xref: /linux/net/bluetooth/hci_core.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 
31 #include <linux/rfkill.h>
32 
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35 
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39 
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43 
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47 
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50 
51 /* ---- HCI notifications ---- */
52 
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55 	hci_sock_dev_event(hdev, event);
56 }
57 
58 /* ---- HCI requests ---- */
59 
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62 	BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63 
64 	/* If this is the init phase check if the completed command matches
65 	 * the last init command, and if not just return.
66 	 */
67 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 		u16 opcode = __le16_to_cpu(sent->opcode);
70 		struct sk_buff *skb;
71 
72 		/* Some CSR based controllers generate a spontaneous
73 		 * reset complete event during init and any pending
74 		 * command will never be completed. In such a case we
75 		 * need to resend whatever was the last sent
76 		 * command.
77 		 */
78 
79 		if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 			return;
81 
82 		skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 		if (skb) {
84 			skb_queue_head(&hdev->cmd_q, skb);
85 			queue_work(hdev->workqueue, &hdev->cmd_work);
86 		}
87 
88 		return;
89 	}
90 
91 	if (hdev->req_status == HCI_REQ_PEND) {
92 		hdev->req_result = result;
93 		hdev->req_status = HCI_REQ_DONE;
94 		wake_up_interruptible(&hdev->req_wait_q);
95 	}
96 }
97 
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
101 
102 	if (hdev->req_status == HCI_REQ_PEND) {
103 		hdev->req_result = err;
104 		hdev->req_status = HCI_REQ_CANCELED;
105 		wake_up_interruptible(&hdev->req_wait_q);
106 	}
107 }
108 
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111 			 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 			 unsigned long opt, __u32 timeout)
113 {
114 	DECLARE_WAITQUEUE(wait, current);
115 	int err = 0;
116 
117 	BT_DBG("%s start", hdev->name);
118 
119 	hdev->req_status = HCI_REQ_PEND;
120 
121 	add_wait_queue(&hdev->req_wait_q, &wait);
122 	set_current_state(TASK_INTERRUPTIBLE);
123 
124 	req(hdev, opt);
125 	schedule_timeout(timeout);
126 
127 	remove_wait_queue(&hdev->req_wait_q, &wait);
128 
129 	if (signal_pending(current))
130 		return -EINTR;
131 
132 	switch (hdev->req_status) {
133 	case HCI_REQ_DONE:
134 		err = -bt_to_errno(hdev->req_result);
135 		break;
136 
137 	case HCI_REQ_CANCELED:
138 		err = -hdev->req_result;
139 		break;
140 
141 	default:
142 		err = -ETIMEDOUT;
143 		break;
144 	}
145 
146 	hdev->req_status = hdev->req_result = 0;
147 
148 	BT_DBG("%s end: err %d", hdev->name, err);
149 
150 	return err;
151 }
152 
153 static int hci_request(struct hci_dev *hdev,
154 		       void (*req)(struct hci_dev *hdev, unsigned long opt),
155 		       unsigned long opt, __u32 timeout)
156 {
157 	int ret;
158 
159 	if (!test_bit(HCI_UP, &hdev->flags))
160 		return -ENETDOWN;
161 
162 	/* Serialize all requests */
163 	hci_req_lock(hdev);
164 	ret = __hci_request(hdev, req, opt, timeout);
165 	hci_req_unlock(hdev);
166 
167 	return ret;
168 }
169 
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172 	BT_DBG("%s %ld", hdev->name, opt);
173 
174 	/* Reset device */
175 	set_bit(HCI_RESET, &hdev->flags);
176 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178 
179 static void bredr_init(struct hci_dev *hdev)
180 {
181 	struct hci_cp_delete_stored_link_key cp;
182 	__le16 param;
183 	__u8 flt_type;
184 
185 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186 
187 	/* Mandatory initialization */
188 
189 	/* Read Local Supported Features */
190 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
191 
192 	/* Read Local Version */
193 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194 
195 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
197 
198 	/* Read BD Address */
199 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200 
201 	/* Read Class of Device */
202 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203 
204 	/* Read Local Name */
205 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
206 
207 	/* Read Voice Setting */
208 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
209 
210 	/* Optional initialization */
211 
212 	/* Clear Event Filters */
213 	flt_type = HCI_FLT_CLEAR_ALL;
214 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
215 
216 	/* Connection accept timeout ~20 secs */
217 	param = __constant_cpu_to_le16(0x7d00);
218 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
219 
220 	bacpy(&cp.bdaddr, BDADDR_ANY);
221 	cp.delete_all = 1;
222 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
223 }
224 
225 static void amp_init(struct hci_dev *hdev)
226 {
227 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228 
229 	/* Read Local Version */
230 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
231 
232 	/* Read Local AMP Info */
233 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
234 }
235 
236 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
237 {
238 	struct sk_buff *skb;
239 
240 	BT_DBG("%s %ld", hdev->name, opt);
241 
242 	/* Driver initialization */
243 
244 	/* Special commands */
245 	while ((skb = skb_dequeue(&hdev->driver_init))) {
246 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
247 		skb->dev = (void *) hdev;
248 
249 		skb_queue_tail(&hdev->cmd_q, skb);
250 		queue_work(hdev->workqueue, &hdev->cmd_work);
251 	}
252 	skb_queue_purge(&hdev->driver_init);
253 
254 	/* Reset */
255 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
256 		hci_reset_req(hdev, 0);
257 
258 	switch (hdev->dev_type) {
259 	case HCI_BREDR:
260 		bredr_init(hdev);
261 		break;
262 
263 	case HCI_AMP:
264 		amp_init(hdev);
265 		break;
266 
267 	default:
268 		BT_ERR("Unknown device type %d", hdev->dev_type);
269 		break;
270 	}
271 
272 }
273 
274 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
275 {
276 	BT_DBG("%s", hdev->name);
277 
278 	/* Read LE buffer size */
279 	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
280 }
281 
282 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
283 {
284 	__u8 scan = opt;
285 
286 	BT_DBG("%s %x", hdev->name, scan);
287 
288 	/* Inquiry and Page scans */
289 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
290 }
291 
292 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
293 {
294 	__u8 auth = opt;
295 
296 	BT_DBG("%s %x", hdev->name, auth);
297 
298 	/* Authentication */
299 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
300 }
301 
302 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
303 {
304 	__u8 encrypt = opt;
305 
306 	BT_DBG("%s %x", hdev->name, encrypt);
307 
308 	/* Encryption */
309 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
310 }
311 
312 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
313 {
314 	__le16 policy = cpu_to_le16(opt);
315 
316 	BT_DBG("%s %x", hdev->name, policy);
317 
318 	/* Default link policy */
319 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
320 }
321 
322 /* Get HCI device by index.
323  * Device is held on return. */
324 struct hci_dev *hci_dev_get(int index)
325 {
326 	struct hci_dev *hdev = NULL, *d;
327 
328 	BT_DBG("%d", index);
329 
330 	if (index < 0)
331 		return NULL;
332 
333 	read_lock(&hci_dev_list_lock);
334 	list_for_each_entry(d, &hci_dev_list, list) {
335 		if (d->id == index) {
336 			hdev = hci_dev_hold(d);
337 			break;
338 		}
339 	}
340 	read_unlock(&hci_dev_list_lock);
341 	return hdev;
342 }
343 
344 /* ---- Inquiry support ---- */
345 
346 bool hci_discovery_active(struct hci_dev *hdev)
347 {
348 	struct discovery_state *discov = &hdev->discovery;
349 
350 	switch (discov->state) {
351 	case DISCOVERY_FINDING:
352 	case DISCOVERY_RESOLVING:
353 		return true;
354 
355 	default:
356 		return false;
357 	}
358 }
359 
360 void hci_discovery_set_state(struct hci_dev *hdev, int state)
361 {
362 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
363 
364 	if (hdev->discovery.state == state)
365 		return;
366 
367 	switch (state) {
368 	case DISCOVERY_STOPPED:
369 		if (hdev->discovery.state != DISCOVERY_STARTING)
370 			mgmt_discovering(hdev, 0);
371 		break;
372 	case DISCOVERY_STARTING:
373 		break;
374 	case DISCOVERY_FINDING:
375 		mgmt_discovering(hdev, 1);
376 		break;
377 	case DISCOVERY_RESOLVING:
378 		break;
379 	case DISCOVERY_STOPPING:
380 		break;
381 	}
382 
383 	hdev->discovery.state = state;
384 }
385 
386 static void inquiry_cache_flush(struct hci_dev *hdev)
387 {
388 	struct discovery_state *cache = &hdev->discovery;
389 	struct inquiry_entry *p, *n;
390 
391 	list_for_each_entry_safe(p, n, &cache->all, all) {
392 		list_del(&p->all);
393 		kfree(p);
394 	}
395 
396 	INIT_LIST_HEAD(&cache->unknown);
397 	INIT_LIST_HEAD(&cache->resolve);
398 }
399 
400 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
401 					       bdaddr_t *bdaddr)
402 {
403 	struct discovery_state *cache = &hdev->discovery;
404 	struct inquiry_entry *e;
405 
406 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
407 
408 	list_for_each_entry(e, &cache->all, all) {
409 		if (!bacmp(&e->data.bdaddr, bdaddr))
410 			return e;
411 	}
412 
413 	return NULL;
414 }
415 
416 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
417 						       bdaddr_t *bdaddr)
418 {
419 	struct discovery_state *cache = &hdev->discovery;
420 	struct inquiry_entry *e;
421 
422 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423 
424 	list_for_each_entry(e, &cache->unknown, list) {
425 		if (!bacmp(&e->data.bdaddr, bdaddr))
426 			return e;
427 	}
428 
429 	return NULL;
430 }
431 
432 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
433 						       bdaddr_t *bdaddr,
434 						       int state)
435 {
436 	struct discovery_state *cache = &hdev->discovery;
437 	struct inquiry_entry *e;
438 
439 	BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
440 
441 	list_for_each_entry(e, &cache->resolve, list) {
442 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
443 			return e;
444 		if (!bacmp(&e->data.bdaddr, bdaddr))
445 			return e;
446 	}
447 
448 	return NULL;
449 }
450 
451 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
452 				      struct inquiry_entry *ie)
453 {
454 	struct discovery_state *cache = &hdev->discovery;
455 	struct list_head *pos = &cache->resolve;
456 	struct inquiry_entry *p;
457 
458 	list_del(&ie->list);
459 
460 	list_for_each_entry(p, &cache->resolve, list) {
461 		if (p->name_state != NAME_PENDING &&
462 		    abs(p->data.rssi) >= abs(ie->data.rssi))
463 			break;
464 		pos = &p->list;
465 	}
466 
467 	list_add(&ie->list, pos);
468 }
469 
470 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
471 			      bool name_known, bool *ssp)
472 {
473 	struct discovery_state *cache = &hdev->discovery;
474 	struct inquiry_entry *ie;
475 
476 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
477 
478 	if (ssp)
479 		*ssp = data->ssp_mode;
480 
481 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
482 	if (ie) {
483 		if (ie->data.ssp_mode && ssp)
484 			*ssp = true;
485 
486 		if (ie->name_state == NAME_NEEDED &&
487 		    data->rssi != ie->data.rssi) {
488 			ie->data.rssi = data->rssi;
489 			hci_inquiry_cache_update_resolve(hdev, ie);
490 		}
491 
492 		goto update;
493 	}
494 
495 	/* Entry not in the cache. Add new one. */
496 	ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
497 	if (!ie)
498 		return false;
499 
500 	list_add(&ie->all, &cache->all);
501 
502 	if (name_known) {
503 		ie->name_state = NAME_KNOWN;
504 	} else {
505 		ie->name_state = NAME_NOT_KNOWN;
506 		list_add(&ie->list, &cache->unknown);
507 	}
508 
509 update:
510 	if (name_known && ie->name_state != NAME_KNOWN &&
511 	    ie->name_state != NAME_PENDING) {
512 		ie->name_state = NAME_KNOWN;
513 		list_del(&ie->list);
514 	}
515 
516 	memcpy(&ie->data, data, sizeof(*data));
517 	ie->timestamp = jiffies;
518 	cache->timestamp = jiffies;
519 
520 	if (ie->name_state == NAME_NOT_KNOWN)
521 		return false;
522 
523 	return true;
524 }
525 
526 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
527 {
528 	struct discovery_state *cache = &hdev->discovery;
529 	struct inquiry_info *info = (struct inquiry_info *) buf;
530 	struct inquiry_entry *e;
531 	int copied = 0;
532 
533 	list_for_each_entry(e, &cache->all, all) {
534 		struct inquiry_data *data = &e->data;
535 
536 		if (copied >= num)
537 			break;
538 
539 		bacpy(&info->bdaddr, &data->bdaddr);
540 		info->pscan_rep_mode	= data->pscan_rep_mode;
541 		info->pscan_period_mode	= data->pscan_period_mode;
542 		info->pscan_mode	= data->pscan_mode;
543 		memcpy(info->dev_class, data->dev_class, 3);
544 		info->clock_offset	= data->clock_offset;
545 
546 		info++;
547 		copied++;
548 	}
549 
550 	BT_DBG("cache %p, copied %d", cache, copied);
551 	return copied;
552 }
553 
554 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
555 {
556 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
557 	struct hci_cp_inquiry cp;
558 
559 	BT_DBG("%s", hdev->name);
560 
561 	if (test_bit(HCI_INQUIRY, &hdev->flags))
562 		return;
563 
564 	/* Start Inquiry */
565 	memcpy(&cp.lap, &ir->lap, 3);
566 	cp.length  = ir->length;
567 	cp.num_rsp = ir->num_rsp;
568 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
569 }
570 
571 int hci_inquiry(void __user *arg)
572 {
573 	__u8 __user *ptr = arg;
574 	struct hci_inquiry_req ir;
575 	struct hci_dev *hdev;
576 	int err = 0, do_inquiry = 0, max_rsp;
577 	long timeo;
578 	__u8 *buf;
579 
580 	if (copy_from_user(&ir, ptr, sizeof(ir)))
581 		return -EFAULT;
582 
583 	hdev = hci_dev_get(ir.dev_id);
584 	if (!hdev)
585 		return -ENODEV;
586 
587 	hci_dev_lock(hdev);
588 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
589 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
590 		inquiry_cache_flush(hdev);
591 		do_inquiry = 1;
592 	}
593 	hci_dev_unlock(hdev);
594 
595 	timeo = ir.length * msecs_to_jiffies(2000);
596 
597 	if (do_inquiry) {
598 		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
599 		if (err < 0)
600 			goto done;
601 	}
602 
603 	/* for unlimited number of responses we will use buffer with
604 	 * 255 entries
605 	 */
606 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
607 
608 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
609 	 * copy it to the user space.
610 	 */
611 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
612 	if (!buf) {
613 		err = -ENOMEM;
614 		goto done;
615 	}
616 
617 	hci_dev_lock(hdev);
618 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
619 	hci_dev_unlock(hdev);
620 
621 	BT_DBG("num_rsp %d", ir.num_rsp);
622 
623 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
624 		ptr += sizeof(ir);
625 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
626 				 ir.num_rsp))
627 			err = -EFAULT;
628 	} else
629 		err = -EFAULT;
630 
631 	kfree(buf);
632 
633 done:
634 	hci_dev_put(hdev);
635 	return err;
636 }
637 
638 /* ---- HCI ioctl helpers ---- */
639 
640 int hci_dev_open(__u16 dev)
641 {
642 	struct hci_dev *hdev;
643 	int ret = 0;
644 
645 	hdev = hci_dev_get(dev);
646 	if (!hdev)
647 		return -ENODEV;
648 
649 	BT_DBG("%s %p", hdev->name, hdev);
650 
651 	hci_req_lock(hdev);
652 
653 	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
654 		ret = -ENODEV;
655 		goto done;
656 	}
657 
658 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
659 		ret = -ERFKILL;
660 		goto done;
661 	}
662 
663 	if (test_bit(HCI_UP, &hdev->flags)) {
664 		ret = -EALREADY;
665 		goto done;
666 	}
667 
668 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
669 		set_bit(HCI_RAW, &hdev->flags);
670 
671 	/* Treat all non BR/EDR controllers as raw devices if
672 	   enable_hs is not set */
673 	if (hdev->dev_type != HCI_BREDR && !enable_hs)
674 		set_bit(HCI_RAW, &hdev->flags);
675 
676 	if (hdev->open(hdev)) {
677 		ret = -EIO;
678 		goto done;
679 	}
680 
681 	if (!test_bit(HCI_RAW, &hdev->flags)) {
682 		atomic_set(&hdev->cmd_cnt, 1);
683 		set_bit(HCI_INIT, &hdev->flags);
684 		hdev->init_last_cmd = 0;
685 
686 		ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
687 
688 		if (lmp_host_le_capable(hdev))
689 			ret = __hci_request(hdev, hci_le_init_req, 0,
690 					    HCI_INIT_TIMEOUT);
691 
692 		clear_bit(HCI_INIT, &hdev->flags);
693 	}
694 
695 	if (!ret) {
696 		hci_dev_hold(hdev);
697 		set_bit(HCI_UP, &hdev->flags);
698 		hci_notify(hdev, HCI_DEV_UP);
699 		if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
700 			hci_dev_lock(hdev);
701 			mgmt_powered(hdev, 1);
702 			hci_dev_unlock(hdev);
703 		}
704 	} else {
705 		/* Init failed, cleanup */
706 		flush_work(&hdev->tx_work);
707 		flush_work(&hdev->cmd_work);
708 		flush_work(&hdev->rx_work);
709 
710 		skb_queue_purge(&hdev->cmd_q);
711 		skb_queue_purge(&hdev->rx_q);
712 
713 		if (hdev->flush)
714 			hdev->flush(hdev);
715 
716 		if (hdev->sent_cmd) {
717 			kfree_skb(hdev->sent_cmd);
718 			hdev->sent_cmd = NULL;
719 		}
720 
721 		hdev->close(hdev);
722 		hdev->flags = 0;
723 	}
724 
725 done:
726 	hci_req_unlock(hdev);
727 	hci_dev_put(hdev);
728 	return ret;
729 }
730 
731 static int hci_dev_do_close(struct hci_dev *hdev)
732 {
733 	BT_DBG("%s %p", hdev->name, hdev);
734 
735 	cancel_work_sync(&hdev->le_scan);
736 
737 	hci_req_cancel(hdev, ENODEV);
738 	hci_req_lock(hdev);
739 
740 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
741 		del_timer_sync(&hdev->cmd_timer);
742 		hci_req_unlock(hdev);
743 		return 0;
744 	}
745 
746 	/* Flush RX and TX works */
747 	flush_work(&hdev->tx_work);
748 	flush_work(&hdev->rx_work);
749 
750 	if (hdev->discov_timeout > 0) {
751 		cancel_delayed_work(&hdev->discov_off);
752 		hdev->discov_timeout = 0;
753 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
754 	}
755 
756 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
757 		cancel_delayed_work(&hdev->service_cache);
758 
759 	cancel_delayed_work_sync(&hdev->le_scan_disable);
760 
761 	hci_dev_lock(hdev);
762 	inquiry_cache_flush(hdev);
763 	hci_conn_hash_flush(hdev);
764 	hci_dev_unlock(hdev);
765 
766 	hci_notify(hdev, HCI_DEV_DOWN);
767 
768 	if (hdev->flush)
769 		hdev->flush(hdev);
770 
771 	/* Reset device */
772 	skb_queue_purge(&hdev->cmd_q);
773 	atomic_set(&hdev->cmd_cnt, 1);
774 	if (!test_bit(HCI_RAW, &hdev->flags) &&
775 	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
776 		set_bit(HCI_INIT, &hdev->flags);
777 		__hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
778 		clear_bit(HCI_INIT, &hdev->flags);
779 	}
780 
781 	/* flush cmd  work */
782 	flush_work(&hdev->cmd_work);
783 
784 	/* Drop queues */
785 	skb_queue_purge(&hdev->rx_q);
786 	skb_queue_purge(&hdev->cmd_q);
787 	skb_queue_purge(&hdev->raw_q);
788 
789 	/* Drop last sent command */
790 	if (hdev->sent_cmd) {
791 		del_timer_sync(&hdev->cmd_timer);
792 		kfree_skb(hdev->sent_cmd);
793 		hdev->sent_cmd = NULL;
794 	}
795 
796 	/* After this point our queues are empty
797 	 * and no tasks are scheduled. */
798 	hdev->close(hdev);
799 
800 	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
801 		hci_dev_lock(hdev);
802 		mgmt_powered(hdev, 0);
803 		hci_dev_unlock(hdev);
804 	}
805 
806 	/* Clear flags */
807 	hdev->flags = 0;
808 
809 	memset(hdev->eir, 0, sizeof(hdev->eir));
810 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
811 
812 	hci_req_unlock(hdev);
813 
814 	hci_dev_put(hdev);
815 	return 0;
816 }
817 
818 int hci_dev_close(__u16 dev)
819 {
820 	struct hci_dev *hdev;
821 	int err;
822 
823 	hdev = hci_dev_get(dev);
824 	if (!hdev)
825 		return -ENODEV;
826 
827 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
828 		cancel_delayed_work(&hdev->power_off);
829 
830 	err = hci_dev_do_close(hdev);
831 
832 	hci_dev_put(hdev);
833 	return err;
834 }
835 
836 int hci_dev_reset(__u16 dev)
837 {
838 	struct hci_dev *hdev;
839 	int ret = 0;
840 
841 	hdev = hci_dev_get(dev);
842 	if (!hdev)
843 		return -ENODEV;
844 
845 	hci_req_lock(hdev);
846 
847 	if (!test_bit(HCI_UP, &hdev->flags))
848 		goto done;
849 
850 	/* Drop queues */
851 	skb_queue_purge(&hdev->rx_q);
852 	skb_queue_purge(&hdev->cmd_q);
853 
854 	hci_dev_lock(hdev);
855 	inquiry_cache_flush(hdev);
856 	hci_conn_hash_flush(hdev);
857 	hci_dev_unlock(hdev);
858 
859 	if (hdev->flush)
860 		hdev->flush(hdev);
861 
862 	atomic_set(&hdev->cmd_cnt, 1);
863 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
864 
865 	if (!test_bit(HCI_RAW, &hdev->flags))
866 		ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
867 
868 done:
869 	hci_req_unlock(hdev);
870 	hci_dev_put(hdev);
871 	return ret;
872 }
873 
874 int hci_dev_reset_stat(__u16 dev)
875 {
876 	struct hci_dev *hdev;
877 	int ret = 0;
878 
879 	hdev = hci_dev_get(dev);
880 	if (!hdev)
881 		return -ENODEV;
882 
883 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
884 
885 	hci_dev_put(hdev);
886 
887 	return ret;
888 }
889 
890 int hci_dev_cmd(unsigned int cmd, void __user *arg)
891 {
892 	struct hci_dev *hdev;
893 	struct hci_dev_req dr;
894 	int err = 0;
895 
896 	if (copy_from_user(&dr, arg, sizeof(dr)))
897 		return -EFAULT;
898 
899 	hdev = hci_dev_get(dr.dev_id);
900 	if (!hdev)
901 		return -ENODEV;
902 
903 	switch (cmd) {
904 	case HCISETAUTH:
905 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
906 				  HCI_INIT_TIMEOUT);
907 		break;
908 
909 	case HCISETENCRYPT:
910 		if (!lmp_encrypt_capable(hdev)) {
911 			err = -EOPNOTSUPP;
912 			break;
913 		}
914 
915 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
916 			/* Auth must be enabled first */
917 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
918 					  HCI_INIT_TIMEOUT);
919 			if (err)
920 				break;
921 		}
922 
923 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
924 				  HCI_INIT_TIMEOUT);
925 		break;
926 
927 	case HCISETSCAN:
928 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
929 				  HCI_INIT_TIMEOUT);
930 		break;
931 
932 	case HCISETLINKPOL:
933 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
934 				  HCI_INIT_TIMEOUT);
935 		break;
936 
937 	case HCISETLINKMODE:
938 		hdev->link_mode = ((__u16) dr.dev_opt) &
939 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
940 		break;
941 
942 	case HCISETPTYPE:
943 		hdev->pkt_type = (__u16) dr.dev_opt;
944 		break;
945 
946 	case HCISETACLMTU:
947 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
948 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
949 		break;
950 
951 	case HCISETSCOMTU:
952 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
953 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
954 		break;
955 
956 	default:
957 		err = -EINVAL;
958 		break;
959 	}
960 
961 	hci_dev_put(hdev);
962 	return err;
963 }
964 
965 int hci_get_dev_list(void __user *arg)
966 {
967 	struct hci_dev *hdev;
968 	struct hci_dev_list_req *dl;
969 	struct hci_dev_req *dr;
970 	int n = 0, size, err;
971 	__u16 dev_num;
972 
973 	if (get_user(dev_num, (__u16 __user *) arg))
974 		return -EFAULT;
975 
976 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
977 		return -EINVAL;
978 
979 	size = sizeof(*dl) + dev_num * sizeof(*dr);
980 
981 	dl = kzalloc(size, GFP_KERNEL);
982 	if (!dl)
983 		return -ENOMEM;
984 
985 	dr = dl->dev_req;
986 
987 	read_lock(&hci_dev_list_lock);
988 	list_for_each_entry(hdev, &hci_dev_list, list) {
989 		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
990 			cancel_delayed_work(&hdev->power_off);
991 
992 		if (!test_bit(HCI_MGMT, &hdev->dev_flags))
993 			set_bit(HCI_PAIRABLE, &hdev->dev_flags);
994 
995 		(dr + n)->dev_id  = hdev->id;
996 		(dr + n)->dev_opt = hdev->flags;
997 
998 		if (++n >= dev_num)
999 			break;
1000 	}
1001 	read_unlock(&hci_dev_list_lock);
1002 
1003 	dl->dev_num = n;
1004 	size = sizeof(*dl) + n * sizeof(*dr);
1005 
1006 	err = copy_to_user(arg, dl, size);
1007 	kfree(dl);
1008 
1009 	return err ? -EFAULT : 0;
1010 }
1011 
1012 int hci_get_dev_info(void __user *arg)
1013 {
1014 	struct hci_dev *hdev;
1015 	struct hci_dev_info di;
1016 	int err = 0;
1017 
1018 	if (copy_from_user(&di, arg, sizeof(di)))
1019 		return -EFAULT;
1020 
1021 	hdev = hci_dev_get(di.dev_id);
1022 	if (!hdev)
1023 		return -ENODEV;
1024 
1025 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1026 		cancel_delayed_work_sync(&hdev->power_off);
1027 
1028 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1029 		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1030 
1031 	strcpy(di.name, hdev->name);
1032 	di.bdaddr   = hdev->bdaddr;
1033 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1034 	di.flags    = hdev->flags;
1035 	di.pkt_type = hdev->pkt_type;
1036 	di.acl_mtu  = hdev->acl_mtu;
1037 	di.acl_pkts = hdev->acl_pkts;
1038 	di.sco_mtu  = hdev->sco_mtu;
1039 	di.sco_pkts = hdev->sco_pkts;
1040 	di.link_policy = hdev->link_policy;
1041 	di.link_mode   = hdev->link_mode;
1042 
1043 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1044 	memcpy(&di.features, &hdev->features, sizeof(di.features));
1045 
1046 	if (copy_to_user(arg, &di, sizeof(di)))
1047 		err = -EFAULT;
1048 
1049 	hci_dev_put(hdev);
1050 
1051 	return err;
1052 }
1053 
1054 /* ---- Interface to HCI drivers ---- */
1055 
1056 static int hci_rfkill_set_block(void *data, bool blocked)
1057 {
1058 	struct hci_dev *hdev = data;
1059 
1060 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1061 
1062 	if (!blocked)
1063 		return 0;
1064 
1065 	hci_dev_do_close(hdev);
1066 
1067 	return 0;
1068 }
1069 
1070 static const struct rfkill_ops hci_rfkill_ops = {
1071 	.set_block = hci_rfkill_set_block,
1072 };
1073 
1074 static void hci_power_on(struct work_struct *work)
1075 {
1076 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1077 
1078 	BT_DBG("%s", hdev->name);
1079 
1080 	if (hci_dev_open(hdev->id) < 0)
1081 		return;
1082 
1083 	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1084 		schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1085 
1086 	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1087 		mgmt_index_added(hdev);
1088 }
1089 
1090 static void hci_power_off(struct work_struct *work)
1091 {
1092 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1093 					    power_off.work);
1094 
1095 	BT_DBG("%s", hdev->name);
1096 
1097 	hci_dev_do_close(hdev);
1098 }
1099 
1100 static void hci_discov_off(struct work_struct *work)
1101 {
1102 	struct hci_dev *hdev;
1103 	u8 scan = SCAN_PAGE;
1104 
1105 	hdev = container_of(work, struct hci_dev, discov_off.work);
1106 
1107 	BT_DBG("%s", hdev->name);
1108 
1109 	hci_dev_lock(hdev);
1110 
1111 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1112 
1113 	hdev->discov_timeout = 0;
1114 
1115 	hci_dev_unlock(hdev);
1116 }
1117 
1118 int hci_uuids_clear(struct hci_dev *hdev)
1119 {
1120 	struct list_head *p, *n;
1121 
1122 	list_for_each_safe(p, n, &hdev->uuids) {
1123 		struct bt_uuid *uuid;
1124 
1125 		uuid = list_entry(p, struct bt_uuid, list);
1126 
1127 		list_del(p);
1128 		kfree(uuid);
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 int hci_link_keys_clear(struct hci_dev *hdev)
1135 {
1136 	struct list_head *p, *n;
1137 
1138 	list_for_each_safe(p, n, &hdev->link_keys) {
1139 		struct link_key *key;
1140 
1141 		key = list_entry(p, struct link_key, list);
1142 
1143 		list_del(p);
1144 		kfree(key);
1145 	}
1146 
1147 	return 0;
1148 }
1149 
1150 int hci_smp_ltks_clear(struct hci_dev *hdev)
1151 {
1152 	struct smp_ltk *k, *tmp;
1153 
1154 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1155 		list_del(&k->list);
1156 		kfree(k);
1157 	}
1158 
1159 	return 0;
1160 }
1161 
1162 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1163 {
1164 	struct link_key *k;
1165 
1166 	list_for_each_entry(k, &hdev->link_keys, list)
1167 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1168 			return k;
1169 
1170 	return NULL;
1171 }
1172 
1173 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1174 			       u8 key_type, u8 old_key_type)
1175 {
1176 	/* Legacy key */
1177 	if (key_type < 0x03)
1178 		return true;
1179 
1180 	/* Debug keys are insecure so don't store them persistently */
1181 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1182 		return false;
1183 
1184 	/* Changed combination key and there's no previous one */
1185 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1186 		return false;
1187 
1188 	/* Security mode 3 case */
1189 	if (!conn)
1190 		return true;
1191 
1192 	/* Neither local nor remote side had no-bonding as requirement */
1193 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1194 		return true;
1195 
1196 	/* Local side had dedicated bonding as requirement */
1197 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1198 		return true;
1199 
1200 	/* Remote side had dedicated bonding as requirement */
1201 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1202 		return true;
1203 
1204 	/* If none of the above criteria match, then don't store the key
1205 	 * persistently */
1206 	return false;
1207 }
1208 
1209 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1210 {
1211 	struct smp_ltk *k;
1212 
1213 	list_for_each_entry(k, &hdev->long_term_keys, list) {
1214 		if (k->ediv != ediv ||
1215 		    memcmp(rand, k->rand, sizeof(k->rand)))
1216 			continue;
1217 
1218 		return k;
1219 	}
1220 
1221 	return NULL;
1222 }
1223 
1224 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1225 				     u8 addr_type)
1226 {
1227 	struct smp_ltk *k;
1228 
1229 	list_for_each_entry(k, &hdev->long_term_keys, list)
1230 		if (addr_type == k->bdaddr_type &&
1231 		    bacmp(bdaddr, &k->bdaddr) == 0)
1232 			return k;
1233 
1234 	return NULL;
1235 }
1236 
1237 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1238 		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1239 {
1240 	struct link_key *key, *old_key;
1241 	u8 old_key_type;
1242 	bool persistent;
1243 
1244 	old_key = hci_find_link_key(hdev, bdaddr);
1245 	if (old_key) {
1246 		old_key_type = old_key->type;
1247 		key = old_key;
1248 	} else {
1249 		old_key_type = conn ? conn->key_type : 0xff;
1250 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1251 		if (!key)
1252 			return -ENOMEM;
1253 		list_add(&key->list, &hdev->link_keys);
1254 	}
1255 
1256 	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1257 
1258 	/* Some buggy controller combinations generate a changed
1259 	 * combination key for legacy pairing even when there's no
1260 	 * previous key */
1261 	if (type == HCI_LK_CHANGED_COMBINATION &&
1262 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1263 		type = HCI_LK_COMBINATION;
1264 		if (conn)
1265 			conn->key_type = type;
1266 	}
1267 
1268 	bacpy(&key->bdaddr, bdaddr);
1269 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1270 	key->pin_len = pin_len;
1271 
1272 	if (type == HCI_LK_CHANGED_COMBINATION)
1273 		key->type = old_key_type;
1274 	else
1275 		key->type = type;
1276 
1277 	if (!new_key)
1278 		return 0;
1279 
1280 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1281 
1282 	mgmt_new_link_key(hdev, key, persistent);
1283 
1284 	if (conn)
1285 		conn->flush_key = !persistent;
1286 
1287 	return 0;
1288 }
1289 
1290 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1291 		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1292 		ediv, u8 rand[8])
1293 {
1294 	struct smp_ltk *key, *old_key;
1295 
1296 	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1297 		return 0;
1298 
1299 	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1300 	if (old_key)
1301 		key = old_key;
1302 	else {
1303 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1304 		if (!key)
1305 			return -ENOMEM;
1306 		list_add(&key->list, &hdev->long_term_keys);
1307 	}
1308 
1309 	bacpy(&key->bdaddr, bdaddr);
1310 	key->bdaddr_type = addr_type;
1311 	memcpy(key->val, tk, sizeof(key->val));
1312 	key->authenticated = authenticated;
1313 	key->ediv = ediv;
1314 	key->enc_size = enc_size;
1315 	key->type = type;
1316 	memcpy(key->rand, rand, sizeof(key->rand));
1317 
1318 	if (!new_key)
1319 		return 0;
1320 
1321 	if (type & HCI_SMP_LTK)
1322 		mgmt_new_ltk(hdev, key, 1);
1323 
1324 	return 0;
1325 }
1326 
1327 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1328 {
1329 	struct link_key *key;
1330 
1331 	key = hci_find_link_key(hdev, bdaddr);
1332 	if (!key)
1333 		return -ENOENT;
1334 
1335 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1336 
1337 	list_del(&key->list);
1338 	kfree(key);
1339 
1340 	return 0;
1341 }
1342 
1343 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1344 {
1345 	struct smp_ltk *k, *tmp;
1346 
1347 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1348 		if (bacmp(bdaddr, &k->bdaddr))
1349 			continue;
1350 
1351 		BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1352 
1353 		list_del(&k->list);
1354 		kfree(k);
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 /* HCI command timer function */
1361 static void hci_cmd_timeout(unsigned long arg)
1362 {
1363 	struct hci_dev *hdev = (void *) arg;
1364 
1365 	if (hdev->sent_cmd) {
1366 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1367 		u16 opcode = __le16_to_cpu(sent->opcode);
1368 
1369 		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1370 	} else {
1371 		BT_ERR("%s command tx timeout", hdev->name);
1372 	}
1373 
1374 	atomic_set(&hdev->cmd_cnt, 1);
1375 	queue_work(hdev->workqueue, &hdev->cmd_work);
1376 }
1377 
1378 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1379 					  bdaddr_t *bdaddr)
1380 {
1381 	struct oob_data *data;
1382 
1383 	list_for_each_entry(data, &hdev->remote_oob_data, list)
1384 		if (bacmp(bdaddr, &data->bdaddr) == 0)
1385 			return data;
1386 
1387 	return NULL;
1388 }
1389 
1390 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1391 {
1392 	struct oob_data *data;
1393 
1394 	data = hci_find_remote_oob_data(hdev, bdaddr);
1395 	if (!data)
1396 		return -ENOENT;
1397 
1398 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1399 
1400 	list_del(&data->list);
1401 	kfree(data);
1402 
1403 	return 0;
1404 }
1405 
1406 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1407 {
1408 	struct oob_data *data, *n;
1409 
1410 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1411 		list_del(&data->list);
1412 		kfree(data);
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1419 			    u8 *randomizer)
1420 {
1421 	struct oob_data *data;
1422 
1423 	data = hci_find_remote_oob_data(hdev, bdaddr);
1424 
1425 	if (!data) {
1426 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
1427 		if (!data)
1428 			return -ENOMEM;
1429 
1430 		bacpy(&data->bdaddr, bdaddr);
1431 		list_add(&data->list, &hdev->remote_oob_data);
1432 	}
1433 
1434 	memcpy(data->hash, hash, sizeof(data->hash));
1435 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1436 
1437 	BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1438 
1439 	return 0;
1440 }
1441 
1442 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1443 {
1444 	struct bdaddr_list *b;
1445 
1446 	list_for_each_entry(b, &hdev->blacklist, list)
1447 		if (bacmp(bdaddr, &b->bdaddr) == 0)
1448 			return b;
1449 
1450 	return NULL;
1451 }
1452 
1453 int hci_blacklist_clear(struct hci_dev *hdev)
1454 {
1455 	struct list_head *p, *n;
1456 
1457 	list_for_each_safe(p, n, &hdev->blacklist) {
1458 		struct bdaddr_list *b;
1459 
1460 		b = list_entry(p, struct bdaddr_list, list);
1461 
1462 		list_del(p);
1463 		kfree(b);
1464 	}
1465 
1466 	return 0;
1467 }
1468 
1469 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1470 {
1471 	struct bdaddr_list *entry;
1472 
1473 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1474 		return -EBADF;
1475 
1476 	if (hci_blacklist_lookup(hdev, bdaddr))
1477 		return -EEXIST;
1478 
1479 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1480 	if (!entry)
1481 		return -ENOMEM;
1482 
1483 	bacpy(&entry->bdaddr, bdaddr);
1484 
1485 	list_add(&entry->list, &hdev->blacklist);
1486 
1487 	return mgmt_device_blocked(hdev, bdaddr, type);
1488 }
1489 
1490 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1491 {
1492 	struct bdaddr_list *entry;
1493 
1494 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1495 		return hci_blacklist_clear(hdev);
1496 
1497 	entry = hci_blacklist_lookup(hdev, bdaddr);
1498 	if (!entry)
1499 		return -ENOENT;
1500 
1501 	list_del(&entry->list);
1502 	kfree(entry);
1503 
1504 	return mgmt_device_unblocked(hdev, bdaddr, type);
1505 }
1506 
1507 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1508 {
1509 	struct le_scan_params *param =  (struct le_scan_params *) opt;
1510 	struct hci_cp_le_set_scan_param cp;
1511 
1512 	memset(&cp, 0, sizeof(cp));
1513 	cp.type = param->type;
1514 	cp.interval = cpu_to_le16(param->interval);
1515 	cp.window = cpu_to_le16(param->window);
1516 
1517 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1518 }
1519 
1520 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1521 {
1522 	struct hci_cp_le_set_scan_enable cp;
1523 
1524 	memset(&cp, 0, sizeof(cp));
1525 	cp.enable = 1;
1526 	cp.filter_dup = 1;
1527 
1528 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1529 }
1530 
1531 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1532 			  u16 window, int timeout)
1533 {
1534 	long timeo = msecs_to_jiffies(3000);
1535 	struct le_scan_params param;
1536 	int err;
1537 
1538 	BT_DBG("%s", hdev->name);
1539 
1540 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1541 		return -EINPROGRESS;
1542 
1543 	param.type = type;
1544 	param.interval = interval;
1545 	param.window = window;
1546 
1547 	hci_req_lock(hdev);
1548 
1549 	err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1550 			    timeo);
1551 	if (!err)
1552 		err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1553 
1554 	hci_req_unlock(hdev);
1555 
1556 	if (err < 0)
1557 		return err;
1558 
1559 	schedule_delayed_work(&hdev->le_scan_disable,
1560 			      msecs_to_jiffies(timeout));
1561 
1562 	return 0;
1563 }
1564 
1565 int hci_cancel_le_scan(struct hci_dev *hdev)
1566 {
1567 	BT_DBG("%s", hdev->name);
1568 
1569 	if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1570 		return -EALREADY;
1571 
1572 	if (cancel_delayed_work(&hdev->le_scan_disable)) {
1573 		struct hci_cp_le_set_scan_enable cp;
1574 
1575 		/* Send HCI command to disable LE Scan */
1576 		memset(&cp, 0, sizeof(cp));
1577 		hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1578 	}
1579 
1580 	return 0;
1581 }
1582 
1583 static void le_scan_disable_work(struct work_struct *work)
1584 {
1585 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1586 					    le_scan_disable.work);
1587 	struct hci_cp_le_set_scan_enable cp;
1588 
1589 	BT_DBG("%s", hdev->name);
1590 
1591 	memset(&cp, 0, sizeof(cp));
1592 
1593 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1594 }
1595 
1596 static void le_scan_work(struct work_struct *work)
1597 {
1598 	struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1599 	struct le_scan_params *param = &hdev->le_scan_params;
1600 
1601 	BT_DBG("%s", hdev->name);
1602 
1603 	hci_do_le_scan(hdev, param->type, param->interval, param->window,
1604 		       param->timeout);
1605 }
1606 
1607 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1608 		int timeout)
1609 {
1610 	struct le_scan_params *param = &hdev->le_scan_params;
1611 
1612 	BT_DBG("%s", hdev->name);
1613 
1614 	if (work_busy(&hdev->le_scan))
1615 		return -EINPROGRESS;
1616 
1617 	param->type = type;
1618 	param->interval = interval;
1619 	param->window = window;
1620 	param->timeout = timeout;
1621 
1622 	queue_work(system_long_wq, &hdev->le_scan);
1623 
1624 	return 0;
1625 }
1626 
1627 /* Alloc HCI device */
1628 struct hci_dev *hci_alloc_dev(void)
1629 {
1630 	struct hci_dev *hdev;
1631 
1632 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1633 	if (!hdev)
1634 		return NULL;
1635 
1636 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1637 	hdev->esco_type = (ESCO_HV1);
1638 	hdev->link_mode = (HCI_LM_ACCEPT);
1639 	hdev->io_capability = 0x03; /* No Input No Output */
1640 
1641 	hdev->sniff_max_interval = 800;
1642 	hdev->sniff_min_interval = 80;
1643 
1644 	mutex_init(&hdev->lock);
1645 	mutex_init(&hdev->req_lock);
1646 
1647 	INIT_LIST_HEAD(&hdev->mgmt_pending);
1648 	INIT_LIST_HEAD(&hdev->blacklist);
1649 	INIT_LIST_HEAD(&hdev->uuids);
1650 	INIT_LIST_HEAD(&hdev->link_keys);
1651 	INIT_LIST_HEAD(&hdev->long_term_keys);
1652 	INIT_LIST_HEAD(&hdev->remote_oob_data);
1653 
1654 	INIT_WORK(&hdev->rx_work, hci_rx_work);
1655 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1656 	INIT_WORK(&hdev->tx_work, hci_tx_work);
1657 	INIT_WORK(&hdev->power_on, hci_power_on);
1658 	INIT_WORK(&hdev->le_scan, le_scan_work);
1659 
1660 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1661 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1662 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1663 
1664 	skb_queue_head_init(&hdev->driver_init);
1665 	skb_queue_head_init(&hdev->rx_q);
1666 	skb_queue_head_init(&hdev->cmd_q);
1667 	skb_queue_head_init(&hdev->raw_q);
1668 
1669 	init_waitqueue_head(&hdev->req_wait_q);
1670 
1671 	setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1672 
1673 	hci_init_sysfs(hdev);
1674 	discovery_init(hdev);
1675 	hci_conn_hash_init(hdev);
1676 
1677 	return hdev;
1678 }
1679 EXPORT_SYMBOL(hci_alloc_dev);
1680 
1681 /* Free HCI device */
1682 void hci_free_dev(struct hci_dev *hdev)
1683 {
1684 	skb_queue_purge(&hdev->driver_init);
1685 
1686 	/* will free via device release */
1687 	put_device(&hdev->dev);
1688 }
1689 EXPORT_SYMBOL(hci_free_dev);
1690 
1691 /* Register HCI device */
1692 int hci_register_dev(struct hci_dev *hdev)
1693 {
1694 	int id, error;
1695 
1696 	if (!hdev->open || !hdev->close)
1697 		return -EINVAL;
1698 
1699 	/* Do not allow HCI_AMP devices to register at index 0,
1700 	 * so the index can be used as the AMP controller ID.
1701 	 */
1702 	switch (hdev->dev_type) {
1703 	case HCI_BREDR:
1704 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1705 		break;
1706 	case HCI_AMP:
1707 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1708 		break;
1709 	default:
1710 		return -EINVAL;
1711 	}
1712 
1713 	if (id < 0)
1714 		return id;
1715 
1716 	sprintf(hdev->name, "hci%d", id);
1717 	hdev->id = id;
1718 
1719 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1720 
1721 	write_lock(&hci_dev_list_lock);
1722 	list_add(&hdev->list, &hci_dev_list);
1723 	write_unlock(&hci_dev_list_lock);
1724 
1725 	hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1726 					  WQ_MEM_RECLAIM, 1);
1727 	if (!hdev->workqueue) {
1728 		error = -ENOMEM;
1729 		goto err;
1730 	}
1731 
1732 	error = hci_add_sysfs(hdev);
1733 	if (error < 0)
1734 		goto err_wqueue;
1735 
1736 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1737 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1738 				    hdev);
1739 	if (hdev->rfkill) {
1740 		if (rfkill_register(hdev->rfkill) < 0) {
1741 			rfkill_destroy(hdev->rfkill);
1742 			hdev->rfkill = NULL;
1743 		}
1744 	}
1745 
1746 	set_bit(HCI_SETUP, &hdev->dev_flags);
1747 
1748 	if (hdev->dev_type != HCI_AMP)
1749 		set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1750 
1751 	schedule_work(&hdev->power_on);
1752 
1753 	hci_notify(hdev, HCI_DEV_REG);
1754 	hci_dev_hold(hdev);
1755 
1756 	return id;
1757 
1758 err_wqueue:
1759 	destroy_workqueue(hdev->workqueue);
1760 err:
1761 	ida_simple_remove(&hci_index_ida, hdev->id);
1762 	write_lock(&hci_dev_list_lock);
1763 	list_del(&hdev->list);
1764 	write_unlock(&hci_dev_list_lock);
1765 
1766 	return error;
1767 }
1768 EXPORT_SYMBOL(hci_register_dev);
1769 
1770 /* Unregister HCI device */
1771 void hci_unregister_dev(struct hci_dev *hdev)
1772 {
1773 	int i, id;
1774 
1775 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1776 
1777 	set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1778 
1779 	id = hdev->id;
1780 
1781 	write_lock(&hci_dev_list_lock);
1782 	list_del(&hdev->list);
1783 	write_unlock(&hci_dev_list_lock);
1784 
1785 	hci_dev_do_close(hdev);
1786 
1787 	for (i = 0; i < NUM_REASSEMBLY; i++)
1788 		kfree_skb(hdev->reassembly[i]);
1789 
1790 	if (!test_bit(HCI_INIT, &hdev->flags) &&
1791 	    !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1792 		hci_dev_lock(hdev);
1793 		mgmt_index_removed(hdev);
1794 		hci_dev_unlock(hdev);
1795 	}
1796 
1797 	/* mgmt_index_removed should take care of emptying the
1798 	 * pending list */
1799 	BUG_ON(!list_empty(&hdev->mgmt_pending));
1800 
1801 	hci_notify(hdev, HCI_DEV_UNREG);
1802 
1803 	if (hdev->rfkill) {
1804 		rfkill_unregister(hdev->rfkill);
1805 		rfkill_destroy(hdev->rfkill);
1806 	}
1807 
1808 	hci_del_sysfs(hdev);
1809 
1810 	destroy_workqueue(hdev->workqueue);
1811 
1812 	hci_dev_lock(hdev);
1813 	hci_blacklist_clear(hdev);
1814 	hci_uuids_clear(hdev);
1815 	hci_link_keys_clear(hdev);
1816 	hci_smp_ltks_clear(hdev);
1817 	hci_remote_oob_data_clear(hdev);
1818 	hci_dev_unlock(hdev);
1819 
1820 	hci_dev_put(hdev);
1821 
1822 	ida_simple_remove(&hci_index_ida, id);
1823 }
1824 EXPORT_SYMBOL(hci_unregister_dev);
1825 
1826 /* Suspend HCI device */
1827 int hci_suspend_dev(struct hci_dev *hdev)
1828 {
1829 	hci_notify(hdev, HCI_DEV_SUSPEND);
1830 	return 0;
1831 }
1832 EXPORT_SYMBOL(hci_suspend_dev);
1833 
1834 /* Resume HCI device */
1835 int hci_resume_dev(struct hci_dev *hdev)
1836 {
1837 	hci_notify(hdev, HCI_DEV_RESUME);
1838 	return 0;
1839 }
1840 EXPORT_SYMBOL(hci_resume_dev);
1841 
1842 /* Receive frame from HCI drivers */
1843 int hci_recv_frame(struct sk_buff *skb)
1844 {
1845 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1846 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1847 		      && !test_bit(HCI_INIT, &hdev->flags))) {
1848 		kfree_skb(skb);
1849 		return -ENXIO;
1850 	}
1851 
1852 	/* Incomming skb */
1853 	bt_cb(skb)->incoming = 1;
1854 
1855 	/* Time stamp */
1856 	__net_timestamp(skb);
1857 
1858 	skb_queue_tail(&hdev->rx_q, skb);
1859 	queue_work(hdev->workqueue, &hdev->rx_work);
1860 
1861 	return 0;
1862 }
1863 EXPORT_SYMBOL(hci_recv_frame);
1864 
1865 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1866 			  int count, __u8 index)
1867 {
1868 	int len = 0;
1869 	int hlen = 0;
1870 	int remain = count;
1871 	struct sk_buff *skb;
1872 	struct bt_skb_cb *scb;
1873 
1874 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1875 	    index >= NUM_REASSEMBLY)
1876 		return -EILSEQ;
1877 
1878 	skb = hdev->reassembly[index];
1879 
1880 	if (!skb) {
1881 		switch (type) {
1882 		case HCI_ACLDATA_PKT:
1883 			len = HCI_MAX_FRAME_SIZE;
1884 			hlen = HCI_ACL_HDR_SIZE;
1885 			break;
1886 		case HCI_EVENT_PKT:
1887 			len = HCI_MAX_EVENT_SIZE;
1888 			hlen = HCI_EVENT_HDR_SIZE;
1889 			break;
1890 		case HCI_SCODATA_PKT:
1891 			len = HCI_MAX_SCO_SIZE;
1892 			hlen = HCI_SCO_HDR_SIZE;
1893 			break;
1894 		}
1895 
1896 		skb = bt_skb_alloc(len, GFP_ATOMIC);
1897 		if (!skb)
1898 			return -ENOMEM;
1899 
1900 		scb = (void *) skb->cb;
1901 		scb->expect = hlen;
1902 		scb->pkt_type = type;
1903 
1904 		skb->dev = (void *) hdev;
1905 		hdev->reassembly[index] = skb;
1906 	}
1907 
1908 	while (count) {
1909 		scb = (void *) skb->cb;
1910 		len = min_t(uint, scb->expect, count);
1911 
1912 		memcpy(skb_put(skb, len), data, len);
1913 
1914 		count -= len;
1915 		data += len;
1916 		scb->expect -= len;
1917 		remain = count;
1918 
1919 		switch (type) {
1920 		case HCI_EVENT_PKT:
1921 			if (skb->len == HCI_EVENT_HDR_SIZE) {
1922 				struct hci_event_hdr *h = hci_event_hdr(skb);
1923 				scb->expect = h->plen;
1924 
1925 				if (skb_tailroom(skb) < scb->expect) {
1926 					kfree_skb(skb);
1927 					hdev->reassembly[index] = NULL;
1928 					return -ENOMEM;
1929 				}
1930 			}
1931 			break;
1932 
1933 		case HCI_ACLDATA_PKT:
1934 			if (skb->len  == HCI_ACL_HDR_SIZE) {
1935 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
1936 				scb->expect = __le16_to_cpu(h->dlen);
1937 
1938 				if (skb_tailroom(skb) < scb->expect) {
1939 					kfree_skb(skb);
1940 					hdev->reassembly[index] = NULL;
1941 					return -ENOMEM;
1942 				}
1943 			}
1944 			break;
1945 
1946 		case HCI_SCODATA_PKT:
1947 			if (skb->len == HCI_SCO_HDR_SIZE) {
1948 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
1949 				scb->expect = h->dlen;
1950 
1951 				if (skb_tailroom(skb) < scb->expect) {
1952 					kfree_skb(skb);
1953 					hdev->reassembly[index] = NULL;
1954 					return -ENOMEM;
1955 				}
1956 			}
1957 			break;
1958 		}
1959 
1960 		if (scb->expect == 0) {
1961 			/* Complete frame */
1962 
1963 			bt_cb(skb)->pkt_type = type;
1964 			hci_recv_frame(skb);
1965 
1966 			hdev->reassembly[index] = NULL;
1967 			return remain;
1968 		}
1969 	}
1970 
1971 	return remain;
1972 }
1973 
1974 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1975 {
1976 	int rem = 0;
1977 
1978 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1979 		return -EILSEQ;
1980 
1981 	while (count) {
1982 		rem = hci_reassembly(hdev, type, data, count, type - 1);
1983 		if (rem < 0)
1984 			return rem;
1985 
1986 		data += (count - rem);
1987 		count = rem;
1988 	}
1989 
1990 	return rem;
1991 }
1992 EXPORT_SYMBOL(hci_recv_fragment);
1993 
1994 #define STREAM_REASSEMBLY 0
1995 
1996 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1997 {
1998 	int type;
1999 	int rem = 0;
2000 
2001 	while (count) {
2002 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2003 
2004 		if (!skb) {
2005 			struct { char type; } *pkt;
2006 
2007 			/* Start of the frame */
2008 			pkt = data;
2009 			type = pkt->type;
2010 
2011 			data++;
2012 			count--;
2013 		} else
2014 			type = bt_cb(skb)->pkt_type;
2015 
2016 		rem = hci_reassembly(hdev, type, data, count,
2017 				     STREAM_REASSEMBLY);
2018 		if (rem < 0)
2019 			return rem;
2020 
2021 		data += (count - rem);
2022 		count = rem;
2023 	}
2024 
2025 	return rem;
2026 }
2027 EXPORT_SYMBOL(hci_recv_stream_fragment);
2028 
2029 /* ---- Interface to upper protocols ---- */
2030 
2031 int hci_register_cb(struct hci_cb *cb)
2032 {
2033 	BT_DBG("%p name %s", cb, cb->name);
2034 
2035 	write_lock(&hci_cb_list_lock);
2036 	list_add(&cb->list, &hci_cb_list);
2037 	write_unlock(&hci_cb_list_lock);
2038 
2039 	return 0;
2040 }
2041 EXPORT_SYMBOL(hci_register_cb);
2042 
2043 int hci_unregister_cb(struct hci_cb *cb)
2044 {
2045 	BT_DBG("%p name %s", cb, cb->name);
2046 
2047 	write_lock(&hci_cb_list_lock);
2048 	list_del(&cb->list);
2049 	write_unlock(&hci_cb_list_lock);
2050 
2051 	return 0;
2052 }
2053 EXPORT_SYMBOL(hci_unregister_cb);
2054 
2055 static int hci_send_frame(struct sk_buff *skb)
2056 {
2057 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2058 
2059 	if (!hdev) {
2060 		kfree_skb(skb);
2061 		return -ENODEV;
2062 	}
2063 
2064 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2065 
2066 	/* Time stamp */
2067 	__net_timestamp(skb);
2068 
2069 	/* Send copy to monitor */
2070 	hci_send_to_monitor(hdev, skb);
2071 
2072 	if (atomic_read(&hdev->promisc)) {
2073 		/* Send copy to the sockets */
2074 		hci_send_to_sock(hdev, skb);
2075 	}
2076 
2077 	/* Get rid of skb owner, prior to sending to the driver. */
2078 	skb_orphan(skb);
2079 
2080 	return hdev->send(skb);
2081 }
2082 
2083 /* Send HCI command */
2084 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2085 {
2086 	int len = HCI_COMMAND_HDR_SIZE + plen;
2087 	struct hci_command_hdr *hdr;
2088 	struct sk_buff *skb;
2089 
2090 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2091 
2092 	skb = bt_skb_alloc(len, GFP_ATOMIC);
2093 	if (!skb) {
2094 		BT_ERR("%s no memory for command", hdev->name);
2095 		return -ENOMEM;
2096 	}
2097 
2098 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2099 	hdr->opcode = cpu_to_le16(opcode);
2100 	hdr->plen   = plen;
2101 
2102 	if (plen)
2103 		memcpy(skb_put(skb, plen), param, plen);
2104 
2105 	BT_DBG("skb len %d", skb->len);
2106 
2107 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2108 	skb->dev = (void *) hdev;
2109 
2110 	if (test_bit(HCI_INIT, &hdev->flags))
2111 		hdev->init_last_cmd = opcode;
2112 
2113 	skb_queue_tail(&hdev->cmd_q, skb);
2114 	queue_work(hdev->workqueue, &hdev->cmd_work);
2115 
2116 	return 0;
2117 }
2118 
2119 /* Get data from the previously sent command */
2120 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2121 {
2122 	struct hci_command_hdr *hdr;
2123 
2124 	if (!hdev->sent_cmd)
2125 		return NULL;
2126 
2127 	hdr = (void *) hdev->sent_cmd->data;
2128 
2129 	if (hdr->opcode != cpu_to_le16(opcode))
2130 		return NULL;
2131 
2132 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2133 
2134 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2135 }
2136 
2137 /* Send ACL data */
2138 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2139 {
2140 	struct hci_acl_hdr *hdr;
2141 	int len = skb->len;
2142 
2143 	skb_push(skb, HCI_ACL_HDR_SIZE);
2144 	skb_reset_transport_header(skb);
2145 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2146 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2147 	hdr->dlen   = cpu_to_le16(len);
2148 }
2149 
2150 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2151 			  struct sk_buff *skb, __u16 flags)
2152 {
2153 	struct hci_dev *hdev = conn->hdev;
2154 	struct sk_buff *list;
2155 
2156 	skb->len = skb_headlen(skb);
2157 	skb->data_len = 0;
2158 
2159 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2160 	hci_add_acl_hdr(skb, conn->handle, flags);
2161 
2162 	list = skb_shinfo(skb)->frag_list;
2163 	if (!list) {
2164 		/* Non fragmented */
2165 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2166 
2167 		skb_queue_tail(queue, skb);
2168 	} else {
2169 		/* Fragmented */
2170 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2171 
2172 		skb_shinfo(skb)->frag_list = NULL;
2173 
2174 		/* Queue all fragments atomically */
2175 		spin_lock(&queue->lock);
2176 
2177 		__skb_queue_tail(queue, skb);
2178 
2179 		flags &= ~ACL_START;
2180 		flags |= ACL_CONT;
2181 		do {
2182 			skb = list; list = list->next;
2183 
2184 			skb->dev = (void *) hdev;
2185 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2186 			hci_add_acl_hdr(skb, conn->handle, flags);
2187 
2188 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2189 
2190 			__skb_queue_tail(queue, skb);
2191 		} while (list);
2192 
2193 		spin_unlock(&queue->lock);
2194 	}
2195 }
2196 
2197 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2198 {
2199 	struct hci_conn *conn = chan->conn;
2200 	struct hci_dev *hdev = conn->hdev;
2201 
2202 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2203 
2204 	skb->dev = (void *) hdev;
2205 
2206 	hci_queue_acl(conn, &chan->data_q, skb, flags);
2207 
2208 	queue_work(hdev->workqueue, &hdev->tx_work);
2209 }
2210 
2211 /* Send SCO data */
2212 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2213 {
2214 	struct hci_dev *hdev = conn->hdev;
2215 	struct hci_sco_hdr hdr;
2216 
2217 	BT_DBG("%s len %d", hdev->name, skb->len);
2218 
2219 	hdr.handle = cpu_to_le16(conn->handle);
2220 	hdr.dlen   = skb->len;
2221 
2222 	skb_push(skb, HCI_SCO_HDR_SIZE);
2223 	skb_reset_transport_header(skb);
2224 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2225 
2226 	skb->dev = (void *) hdev;
2227 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2228 
2229 	skb_queue_tail(&conn->data_q, skb);
2230 	queue_work(hdev->workqueue, &hdev->tx_work);
2231 }
2232 
2233 /* ---- HCI TX task (outgoing data) ---- */
2234 
2235 /* HCI Connection scheduler */
2236 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2237 				     int *quote)
2238 {
2239 	struct hci_conn_hash *h = &hdev->conn_hash;
2240 	struct hci_conn *conn = NULL, *c;
2241 	unsigned int num = 0, min = ~0;
2242 
2243 	/* We don't have to lock device here. Connections are always
2244 	 * added and removed with TX task disabled. */
2245 
2246 	rcu_read_lock();
2247 
2248 	list_for_each_entry_rcu(c, &h->list, list) {
2249 		if (c->type != type || skb_queue_empty(&c->data_q))
2250 			continue;
2251 
2252 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2253 			continue;
2254 
2255 		num++;
2256 
2257 		if (c->sent < min) {
2258 			min  = c->sent;
2259 			conn = c;
2260 		}
2261 
2262 		if (hci_conn_num(hdev, type) == num)
2263 			break;
2264 	}
2265 
2266 	rcu_read_unlock();
2267 
2268 	if (conn) {
2269 		int cnt, q;
2270 
2271 		switch (conn->type) {
2272 		case ACL_LINK:
2273 			cnt = hdev->acl_cnt;
2274 			break;
2275 		case SCO_LINK:
2276 		case ESCO_LINK:
2277 			cnt = hdev->sco_cnt;
2278 			break;
2279 		case LE_LINK:
2280 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2281 			break;
2282 		default:
2283 			cnt = 0;
2284 			BT_ERR("Unknown link type");
2285 		}
2286 
2287 		q = cnt / num;
2288 		*quote = q ? q : 1;
2289 	} else
2290 		*quote = 0;
2291 
2292 	BT_DBG("conn %p quote %d", conn, *quote);
2293 	return conn;
2294 }
2295 
2296 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2297 {
2298 	struct hci_conn_hash *h = &hdev->conn_hash;
2299 	struct hci_conn *c;
2300 
2301 	BT_ERR("%s link tx timeout", hdev->name);
2302 
2303 	rcu_read_lock();
2304 
2305 	/* Kill stalled connections */
2306 	list_for_each_entry_rcu(c, &h->list, list) {
2307 		if (c->type == type && c->sent) {
2308 			BT_ERR("%s killing stalled connection %s",
2309 			       hdev->name, batostr(&c->dst));
2310 			hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2311 		}
2312 	}
2313 
2314 	rcu_read_unlock();
2315 }
2316 
2317 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2318 				      int *quote)
2319 {
2320 	struct hci_conn_hash *h = &hdev->conn_hash;
2321 	struct hci_chan *chan = NULL;
2322 	unsigned int num = 0, min = ~0, cur_prio = 0;
2323 	struct hci_conn *conn;
2324 	int cnt, q, conn_num = 0;
2325 
2326 	BT_DBG("%s", hdev->name);
2327 
2328 	rcu_read_lock();
2329 
2330 	list_for_each_entry_rcu(conn, &h->list, list) {
2331 		struct hci_chan *tmp;
2332 
2333 		if (conn->type != type)
2334 			continue;
2335 
2336 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2337 			continue;
2338 
2339 		conn_num++;
2340 
2341 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2342 			struct sk_buff *skb;
2343 
2344 			if (skb_queue_empty(&tmp->data_q))
2345 				continue;
2346 
2347 			skb = skb_peek(&tmp->data_q);
2348 			if (skb->priority < cur_prio)
2349 				continue;
2350 
2351 			if (skb->priority > cur_prio) {
2352 				num = 0;
2353 				min = ~0;
2354 				cur_prio = skb->priority;
2355 			}
2356 
2357 			num++;
2358 
2359 			if (conn->sent < min) {
2360 				min  = conn->sent;
2361 				chan = tmp;
2362 			}
2363 		}
2364 
2365 		if (hci_conn_num(hdev, type) == conn_num)
2366 			break;
2367 	}
2368 
2369 	rcu_read_unlock();
2370 
2371 	if (!chan)
2372 		return NULL;
2373 
2374 	switch (chan->conn->type) {
2375 	case ACL_LINK:
2376 		cnt = hdev->acl_cnt;
2377 		break;
2378 	case SCO_LINK:
2379 	case ESCO_LINK:
2380 		cnt = hdev->sco_cnt;
2381 		break;
2382 	case LE_LINK:
2383 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2384 		break;
2385 	default:
2386 		cnt = 0;
2387 		BT_ERR("Unknown link type");
2388 	}
2389 
2390 	q = cnt / num;
2391 	*quote = q ? q : 1;
2392 	BT_DBG("chan %p quote %d", chan, *quote);
2393 	return chan;
2394 }
2395 
2396 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2397 {
2398 	struct hci_conn_hash *h = &hdev->conn_hash;
2399 	struct hci_conn *conn;
2400 	int num = 0;
2401 
2402 	BT_DBG("%s", hdev->name);
2403 
2404 	rcu_read_lock();
2405 
2406 	list_for_each_entry_rcu(conn, &h->list, list) {
2407 		struct hci_chan *chan;
2408 
2409 		if (conn->type != type)
2410 			continue;
2411 
2412 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2413 			continue;
2414 
2415 		num++;
2416 
2417 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2418 			struct sk_buff *skb;
2419 
2420 			if (chan->sent) {
2421 				chan->sent = 0;
2422 				continue;
2423 			}
2424 
2425 			if (skb_queue_empty(&chan->data_q))
2426 				continue;
2427 
2428 			skb = skb_peek(&chan->data_q);
2429 			if (skb->priority >= HCI_PRIO_MAX - 1)
2430 				continue;
2431 
2432 			skb->priority = HCI_PRIO_MAX - 1;
2433 
2434 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2435 			       skb->priority);
2436 		}
2437 
2438 		if (hci_conn_num(hdev, type) == num)
2439 			break;
2440 	}
2441 
2442 	rcu_read_unlock();
2443 
2444 }
2445 
2446 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2447 {
2448 	/* Calculate count of blocks used by this packet */
2449 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2450 }
2451 
2452 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2453 {
2454 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2455 		/* ACL tx timeout must be longer than maximum
2456 		 * link supervision timeout (40.9 seconds) */
2457 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2458 				       HCI_ACL_TX_TIMEOUT))
2459 			hci_link_tx_to(hdev, ACL_LINK);
2460 	}
2461 }
2462 
2463 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2464 {
2465 	unsigned int cnt = hdev->acl_cnt;
2466 	struct hci_chan *chan;
2467 	struct sk_buff *skb;
2468 	int quote;
2469 
2470 	__check_timeout(hdev, cnt);
2471 
2472 	while (hdev->acl_cnt &&
2473 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2474 		u32 priority = (skb_peek(&chan->data_q))->priority;
2475 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2476 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2477 			       skb->len, skb->priority);
2478 
2479 			/* Stop if priority has changed */
2480 			if (skb->priority < priority)
2481 				break;
2482 
2483 			skb = skb_dequeue(&chan->data_q);
2484 
2485 			hci_conn_enter_active_mode(chan->conn,
2486 						   bt_cb(skb)->force_active);
2487 
2488 			hci_send_frame(skb);
2489 			hdev->acl_last_tx = jiffies;
2490 
2491 			hdev->acl_cnt--;
2492 			chan->sent++;
2493 			chan->conn->sent++;
2494 		}
2495 	}
2496 
2497 	if (cnt != hdev->acl_cnt)
2498 		hci_prio_recalculate(hdev, ACL_LINK);
2499 }
2500 
2501 static void hci_sched_acl_blk(struct hci_dev *hdev)
2502 {
2503 	unsigned int cnt = hdev->block_cnt;
2504 	struct hci_chan *chan;
2505 	struct sk_buff *skb;
2506 	int quote;
2507 
2508 	__check_timeout(hdev, cnt);
2509 
2510 	while (hdev->block_cnt > 0 &&
2511 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2512 		u32 priority = (skb_peek(&chan->data_q))->priority;
2513 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2514 			int blocks;
2515 
2516 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2517 			       skb->len, skb->priority);
2518 
2519 			/* Stop if priority has changed */
2520 			if (skb->priority < priority)
2521 				break;
2522 
2523 			skb = skb_dequeue(&chan->data_q);
2524 
2525 			blocks = __get_blocks(hdev, skb);
2526 			if (blocks > hdev->block_cnt)
2527 				return;
2528 
2529 			hci_conn_enter_active_mode(chan->conn,
2530 						   bt_cb(skb)->force_active);
2531 
2532 			hci_send_frame(skb);
2533 			hdev->acl_last_tx = jiffies;
2534 
2535 			hdev->block_cnt -= blocks;
2536 			quote -= blocks;
2537 
2538 			chan->sent += blocks;
2539 			chan->conn->sent += blocks;
2540 		}
2541 	}
2542 
2543 	if (cnt != hdev->block_cnt)
2544 		hci_prio_recalculate(hdev, ACL_LINK);
2545 }
2546 
2547 static void hci_sched_acl(struct hci_dev *hdev)
2548 {
2549 	BT_DBG("%s", hdev->name);
2550 
2551 	if (!hci_conn_num(hdev, ACL_LINK))
2552 		return;
2553 
2554 	switch (hdev->flow_ctl_mode) {
2555 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
2556 		hci_sched_acl_pkt(hdev);
2557 		break;
2558 
2559 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2560 		hci_sched_acl_blk(hdev);
2561 		break;
2562 	}
2563 }
2564 
2565 /* Schedule SCO */
2566 static void hci_sched_sco(struct hci_dev *hdev)
2567 {
2568 	struct hci_conn *conn;
2569 	struct sk_buff *skb;
2570 	int quote;
2571 
2572 	BT_DBG("%s", hdev->name);
2573 
2574 	if (!hci_conn_num(hdev, SCO_LINK))
2575 		return;
2576 
2577 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2578 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2579 			BT_DBG("skb %p len %d", skb, skb->len);
2580 			hci_send_frame(skb);
2581 
2582 			conn->sent++;
2583 			if (conn->sent == ~0)
2584 				conn->sent = 0;
2585 		}
2586 	}
2587 }
2588 
2589 static void hci_sched_esco(struct hci_dev *hdev)
2590 {
2591 	struct hci_conn *conn;
2592 	struct sk_buff *skb;
2593 	int quote;
2594 
2595 	BT_DBG("%s", hdev->name);
2596 
2597 	if (!hci_conn_num(hdev, ESCO_LINK))
2598 		return;
2599 
2600 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2601 						     &quote))) {
2602 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2603 			BT_DBG("skb %p len %d", skb, skb->len);
2604 			hci_send_frame(skb);
2605 
2606 			conn->sent++;
2607 			if (conn->sent == ~0)
2608 				conn->sent = 0;
2609 		}
2610 	}
2611 }
2612 
2613 static void hci_sched_le(struct hci_dev *hdev)
2614 {
2615 	struct hci_chan *chan;
2616 	struct sk_buff *skb;
2617 	int quote, cnt, tmp;
2618 
2619 	BT_DBG("%s", hdev->name);
2620 
2621 	if (!hci_conn_num(hdev, LE_LINK))
2622 		return;
2623 
2624 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2625 		/* LE tx timeout must be longer than maximum
2626 		 * link supervision timeout (40.9 seconds) */
2627 		if (!hdev->le_cnt && hdev->le_pkts &&
2628 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
2629 			hci_link_tx_to(hdev, LE_LINK);
2630 	}
2631 
2632 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2633 	tmp = cnt;
2634 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2635 		u32 priority = (skb_peek(&chan->data_q))->priority;
2636 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2637 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2638 			       skb->len, skb->priority);
2639 
2640 			/* Stop if priority has changed */
2641 			if (skb->priority < priority)
2642 				break;
2643 
2644 			skb = skb_dequeue(&chan->data_q);
2645 
2646 			hci_send_frame(skb);
2647 			hdev->le_last_tx = jiffies;
2648 
2649 			cnt--;
2650 			chan->sent++;
2651 			chan->conn->sent++;
2652 		}
2653 	}
2654 
2655 	if (hdev->le_pkts)
2656 		hdev->le_cnt = cnt;
2657 	else
2658 		hdev->acl_cnt = cnt;
2659 
2660 	if (cnt != tmp)
2661 		hci_prio_recalculate(hdev, LE_LINK);
2662 }
2663 
2664 static void hci_tx_work(struct work_struct *work)
2665 {
2666 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2667 	struct sk_buff *skb;
2668 
2669 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2670 	       hdev->sco_cnt, hdev->le_cnt);
2671 
2672 	/* Schedule queues and send stuff to HCI driver */
2673 
2674 	hci_sched_acl(hdev);
2675 
2676 	hci_sched_sco(hdev);
2677 
2678 	hci_sched_esco(hdev);
2679 
2680 	hci_sched_le(hdev);
2681 
2682 	/* Send next queued raw (unknown type) packet */
2683 	while ((skb = skb_dequeue(&hdev->raw_q)))
2684 		hci_send_frame(skb);
2685 }
2686 
2687 /* ----- HCI RX task (incoming data processing) ----- */
2688 
2689 /* ACL data packet */
2690 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2691 {
2692 	struct hci_acl_hdr *hdr = (void *) skb->data;
2693 	struct hci_conn *conn;
2694 	__u16 handle, flags;
2695 
2696 	skb_pull(skb, HCI_ACL_HDR_SIZE);
2697 
2698 	handle = __le16_to_cpu(hdr->handle);
2699 	flags  = hci_flags(handle);
2700 	handle = hci_handle(handle);
2701 
2702 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2703 	       handle, flags);
2704 
2705 	hdev->stat.acl_rx++;
2706 
2707 	hci_dev_lock(hdev);
2708 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2709 	hci_dev_unlock(hdev);
2710 
2711 	if (conn) {
2712 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2713 
2714 		hci_dev_lock(hdev);
2715 		if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2716 		    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2717 			mgmt_device_connected(hdev, &conn->dst, conn->type,
2718 					      conn->dst_type, 0, NULL, 0,
2719 					      conn->dev_class);
2720 		hci_dev_unlock(hdev);
2721 
2722 		/* Send to upper protocol */
2723 		l2cap_recv_acldata(conn, skb, flags);
2724 		return;
2725 	} else {
2726 		BT_ERR("%s ACL packet for unknown connection handle %d",
2727 		       hdev->name, handle);
2728 	}
2729 
2730 	kfree_skb(skb);
2731 }
2732 
2733 /* SCO data packet */
2734 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2735 {
2736 	struct hci_sco_hdr *hdr = (void *) skb->data;
2737 	struct hci_conn *conn;
2738 	__u16 handle;
2739 
2740 	skb_pull(skb, HCI_SCO_HDR_SIZE);
2741 
2742 	handle = __le16_to_cpu(hdr->handle);
2743 
2744 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2745 
2746 	hdev->stat.sco_rx++;
2747 
2748 	hci_dev_lock(hdev);
2749 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2750 	hci_dev_unlock(hdev);
2751 
2752 	if (conn) {
2753 		/* Send to upper protocol */
2754 		sco_recv_scodata(conn, skb);
2755 		return;
2756 	} else {
2757 		BT_ERR("%s SCO packet for unknown connection handle %d",
2758 		       hdev->name, handle);
2759 	}
2760 
2761 	kfree_skb(skb);
2762 }
2763 
2764 static void hci_rx_work(struct work_struct *work)
2765 {
2766 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2767 	struct sk_buff *skb;
2768 
2769 	BT_DBG("%s", hdev->name);
2770 
2771 	while ((skb = skb_dequeue(&hdev->rx_q))) {
2772 		/* Send copy to monitor */
2773 		hci_send_to_monitor(hdev, skb);
2774 
2775 		if (atomic_read(&hdev->promisc)) {
2776 			/* Send copy to the sockets */
2777 			hci_send_to_sock(hdev, skb);
2778 		}
2779 
2780 		if (test_bit(HCI_RAW, &hdev->flags)) {
2781 			kfree_skb(skb);
2782 			continue;
2783 		}
2784 
2785 		if (test_bit(HCI_INIT, &hdev->flags)) {
2786 			/* Don't process data packets in this states. */
2787 			switch (bt_cb(skb)->pkt_type) {
2788 			case HCI_ACLDATA_PKT:
2789 			case HCI_SCODATA_PKT:
2790 				kfree_skb(skb);
2791 				continue;
2792 			}
2793 		}
2794 
2795 		/* Process frame */
2796 		switch (bt_cb(skb)->pkt_type) {
2797 		case HCI_EVENT_PKT:
2798 			BT_DBG("%s Event packet", hdev->name);
2799 			hci_event_packet(hdev, skb);
2800 			break;
2801 
2802 		case HCI_ACLDATA_PKT:
2803 			BT_DBG("%s ACL data packet", hdev->name);
2804 			hci_acldata_packet(hdev, skb);
2805 			break;
2806 
2807 		case HCI_SCODATA_PKT:
2808 			BT_DBG("%s SCO data packet", hdev->name);
2809 			hci_scodata_packet(hdev, skb);
2810 			break;
2811 
2812 		default:
2813 			kfree_skb(skb);
2814 			break;
2815 		}
2816 	}
2817 }
2818 
2819 static void hci_cmd_work(struct work_struct *work)
2820 {
2821 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2822 	struct sk_buff *skb;
2823 
2824 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2825 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2826 
2827 	/* Send queued commands */
2828 	if (atomic_read(&hdev->cmd_cnt)) {
2829 		skb = skb_dequeue(&hdev->cmd_q);
2830 		if (!skb)
2831 			return;
2832 
2833 		kfree_skb(hdev->sent_cmd);
2834 
2835 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2836 		if (hdev->sent_cmd) {
2837 			atomic_dec(&hdev->cmd_cnt);
2838 			hci_send_frame(skb);
2839 			if (test_bit(HCI_RESET, &hdev->flags))
2840 				del_timer(&hdev->cmd_timer);
2841 			else
2842 				mod_timer(&hdev->cmd_timer,
2843 					  jiffies + HCI_CMD_TIMEOUT);
2844 		} else {
2845 			skb_queue_head(&hdev->cmd_q, skb);
2846 			queue_work(hdev->workqueue, &hdev->cmd_work);
2847 		}
2848 	}
2849 }
2850 
2851 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2852 {
2853 	/* General inquiry access code (GIAC) */
2854 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
2855 	struct hci_cp_inquiry cp;
2856 
2857 	BT_DBG("%s", hdev->name);
2858 
2859 	if (test_bit(HCI_INQUIRY, &hdev->flags))
2860 		return -EINPROGRESS;
2861 
2862 	inquiry_cache_flush(hdev);
2863 
2864 	memset(&cp, 0, sizeof(cp));
2865 	memcpy(&cp.lap, lap, sizeof(cp.lap));
2866 	cp.length  = length;
2867 
2868 	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2869 }
2870 
2871 int hci_cancel_inquiry(struct hci_dev *hdev)
2872 {
2873 	BT_DBG("%s", hdev->name);
2874 
2875 	if (!test_bit(HCI_INQUIRY, &hdev->flags))
2876 		return -EALREADY;
2877 
2878 	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2879 }
2880 
2881 u8 bdaddr_to_le(u8 bdaddr_type)
2882 {
2883 	switch (bdaddr_type) {
2884 	case BDADDR_LE_PUBLIC:
2885 		return ADDR_LE_DEV_PUBLIC;
2886 
2887 	default:
2888 		/* Fallback to LE Random address type */
2889 		return ADDR_LE_DEV_RANDOM;
2890 	}
2891 }
2892