xref: /linux/net/bluetooth/hci_core.c (revision 2d87650a3bf1b80f7d0d150ee1af3f8a89e5b7aa)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
33 
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
36 
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
40 
41 /* HCI device list */
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
44 
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
48 
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
51 
52 /* ---- HCI notifications ---- */
53 
54 static void hci_notify(struct hci_dev *hdev, int event)
55 {
56 	hci_sock_dev_event(hdev, event);
57 }
58 
59 /* ---- HCI debugfs entries ---- */
60 
61 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 			     size_t count, loff_t *ppos)
63 {
64 	struct hci_dev *hdev = file->private_data;
65 	char buf[3];
66 
67 	buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 	buf[1] = '\n';
69 	buf[2] = '\0';
70 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71 }
72 
73 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 			      size_t count, loff_t *ppos)
75 {
76 	struct hci_dev *hdev = file->private_data;
77 	struct sk_buff *skb;
78 	char buf[32];
79 	size_t buf_size = min(count, (sizeof(buf)-1));
80 	bool enable;
81 	int err;
82 
83 	if (!test_bit(HCI_UP, &hdev->flags))
84 		return -ENETDOWN;
85 
86 	if (copy_from_user(buf, user_buf, buf_size))
87 		return -EFAULT;
88 
89 	buf[buf_size] = '\0';
90 	if (strtobool(buf, &enable))
91 		return -EINVAL;
92 
93 	if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 		return -EALREADY;
95 
96 	hci_req_lock(hdev);
97 	if (enable)
98 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 				     HCI_CMD_TIMEOUT);
100 	else
101 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 				     HCI_CMD_TIMEOUT);
103 	hci_req_unlock(hdev);
104 
105 	if (IS_ERR(skb))
106 		return PTR_ERR(skb);
107 
108 	err = -bt_to_errno(skb->data[0]);
109 	kfree_skb(skb);
110 
111 	if (err < 0)
112 		return err;
113 
114 	change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115 
116 	return count;
117 }
118 
119 static const struct file_operations dut_mode_fops = {
120 	.open		= simple_open,
121 	.read		= dut_mode_read,
122 	.write		= dut_mode_write,
123 	.llseek		= default_llseek,
124 };
125 
126 static int features_show(struct seq_file *f, void *ptr)
127 {
128 	struct hci_dev *hdev = f->private;
129 	u8 p;
130 
131 	hci_dev_lock(hdev);
132 	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
133 		seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
134 			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 			   hdev->features[p][0], hdev->features[p][1],
136 			   hdev->features[p][2], hdev->features[p][3],
137 			   hdev->features[p][4], hdev->features[p][5],
138 			   hdev->features[p][6], hdev->features[p][7]);
139 	}
140 	if (lmp_le_capable(hdev))
141 		seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 			   hdev->le_features[0], hdev->le_features[1],
144 			   hdev->le_features[2], hdev->le_features[3],
145 			   hdev->le_features[4], hdev->le_features[5],
146 			   hdev->le_features[6], hdev->le_features[7]);
147 	hci_dev_unlock(hdev);
148 
149 	return 0;
150 }
151 
152 static int features_open(struct inode *inode, struct file *file)
153 {
154 	return single_open(file, features_show, inode->i_private);
155 }
156 
157 static const struct file_operations features_fops = {
158 	.open		= features_open,
159 	.read		= seq_read,
160 	.llseek		= seq_lseek,
161 	.release	= single_release,
162 };
163 
164 static int blacklist_show(struct seq_file *f, void *p)
165 {
166 	struct hci_dev *hdev = f->private;
167 	struct bdaddr_list *b;
168 
169 	hci_dev_lock(hdev);
170 	list_for_each_entry(b, &hdev->blacklist, list)
171 		seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
172 	hci_dev_unlock(hdev);
173 
174 	return 0;
175 }
176 
177 static int blacklist_open(struct inode *inode, struct file *file)
178 {
179 	return single_open(file, blacklist_show, inode->i_private);
180 }
181 
182 static const struct file_operations blacklist_fops = {
183 	.open		= blacklist_open,
184 	.read		= seq_read,
185 	.llseek		= seq_lseek,
186 	.release	= single_release,
187 };
188 
189 static int uuids_show(struct seq_file *f, void *p)
190 {
191 	struct hci_dev *hdev = f->private;
192 	struct bt_uuid *uuid;
193 
194 	hci_dev_lock(hdev);
195 	list_for_each_entry(uuid, &hdev->uuids, list) {
196 		u8 i, val[16];
197 
198 		/* The Bluetooth UUID values are stored in big endian,
199 		 * but with reversed byte order. So convert them into
200 		 * the right order for the %pUb modifier.
201 		 */
202 		for (i = 0; i < 16; i++)
203 			val[i] = uuid->uuid[15 - i];
204 
205 		seq_printf(f, "%pUb\n", val);
206 	}
207 	hci_dev_unlock(hdev);
208 
209 	return 0;
210 }
211 
212 static int uuids_open(struct inode *inode, struct file *file)
213 {
214 	return single_open(file, uuids_show, inode->i_private);
215 }
216 
217 static const struct file_operations uuids_fops = {
218 	.open		= uuids_open,
219 	.read		= seq_read,
220 	.llseek		= seq_lseek,
221 	.release	= single_release,
222 };
223 
224 static int inquiry_cache_show(struct seq_file *f, void *p)
225 {
226 	struct hci_dev *hdev = f->private;
227 	struct discovery_state *cache = &hdev->discovery;
228 	struct inquiry_entry *e;
229 
230 	hci_dev_lock(hdev);
231 
232 	list_for_each_entry(e, &cache->all, all) {
233 		struct inquiry_data *data = &e->data;
234 		seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 			   &data->bdaddr,
236 			   data->pscan_rep_mode, data->pscan_period_mode,
237 			   data->pscan_mode, data->dev_class[2],
238 			   data->dev_class[1], data->dev_class[0],
239 			   __le16_to_cpu(data->clock_offset),
240 			   data->rssi, data->ssp_mode, e->timestamp);
241 	}
242 
243 	hci_dev_unlock(hdev);
244 
245 	return 0;
246 }
247 
248 static int inquiry_cache_open(struct inode *inode, struct file *file)
249 {
250 	return single_open(file, inquiry_cache_show, inode->i_private);
251 }
252 
253 static const struct file_operations inquiry_cache_fops = {
254 	.open		= inquiry_cache_open,
255 	.read		= seq_read,
256 	.llseek		= seq_lseek,
257 	.release	= single_release,
258 };
259 
260 static int link_keys_show(struct seq_file *f, void *ptr)
261 {
262 	struct hci_dev *hdev = f->private;
263 	struct list_head *p, *n;
264 
265 	hci_dev_lock(hdev);
266 	list_for_each_safe(p, n, &hdev->link_keys) {
267 		struct link_key *key = list_entry(p, struct link_key, list);
268 		seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 			   HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 	}
271 	hci_dev_unlock(hdev);
272 
273 	return 0;
274 }
275 
276 static int link_keys_open(struct inode *inode, struct file *file)
277 {
278 	return single_open(file, link_keys_show, inode->i_private);
279 }
280 
281 static const struct file_operations link_keys_fops = {
282 	.open		= link_keys_open,
283 	.read		= seq_read,
284 	.llseek		= seq_lseek,
285 	.release	= single_release,
286 };
287 
288 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 				   size_t count, loff_t *ppos)
290 {
291 	struct hci_dev *hdev = file->private_data;
292 	char buf[3];
293 
294 	buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 	buf[1] = '\n';
296 	buf[2] = '\0';
297 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298 }
299 
300 static const struct file_operations use_debug_keys_fops = {
301 	.open		= simple_open,
302 	.read		= use_debug_keys_read,
303 	.llseek		= default_llseek,
304 };
305 
306 static int dev_class_show(struct seq_file *f, void *ptr)
307 {
308 	struct hci_dev *hdev = f->private;
309 
310 	hci_dev_lock(hdev);
311 	seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 		   hdev->dev_class[1], hdev->dev_class[0]);
313 	hci_dev_unlock(hdev);
314 
315 	return 0;
316 }
317 
318 static int dev_class_open(struct inode *inode, struct file *file)
319 {
320 	return single_open(file, dev_class_show, inode->i_private);
321 }
322 
323 static const struct file_operations dev_class_fops = {
324 	.open		= dev_class_open,
325 	.read		= seq_read,
326 	.llseek		= seq_lseek,
327 	.release	= single_release,
328 };
329 
330 static int voice_setting_get(void *data, u64 *val)
331 {
332 	struct hci_dev *hdev = data;
333 
334 	hci_dev_lock(hdev);
335 	*val = hdev->voice_setting;
336 	hci_dev_unlock(hdev);
337 
338 	return 0;
339 }
340 
341 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 			NULL, "0x%4.4llx\n");
343 
344 static int auto_accept_delay_set(void *data, u64 val)
345 {
346 	struct hci_dev *hdev = data;
347 
348 	hci_dev_lock(hdev);
349 	hdev->auto_accept_delay = val;
350 	hci_dev_unlock(hdev);
351 
352 	return 0;
353 }
354 
355 static int auto_accept_delay_get(void *data, u64 *val)
356 {
357 	struct hci_dev *hdev = data;
358 
359 	hci_dev_lock(hdev);
360 	*val = hdev->auto_accept_delay;
361 	hci_dev_unlock(hdev);
362 
363 	return 0;
364 }
365 
366 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 			auto_accept_delay_set, "%llu\n");
368 
369 static int ssp_debug_mode_set(void *data, u64 val)
370 {
371 	struct hci_dev *hdev = data;
372 	struct sk_buff *skb;
373 	__u8 mode;
374 	int err;
375 
376 	if (val != 0 && val != 1)
377 		return -EINVAL;
378 
379 	if (!test_bit(HCI_UP, &hdev->flags))
380 		return -ENETDOWN;
381 
382 	hci_req_lock(hdev);
383 	mode = val;
384 	skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 			     &mode, HCI_CMD_TIMEOUT);
386 	hci_req_unlock(hdev);
387 
388 	if (IS_ERR(skb))
389 		return PTR_ERR(skb);
390 
391 	err = -bt_to_errno(skb->data[0]);
392 	kfree_skb(skb);
393 
394 	if (err < 0)
395 		return err;
396 
397 	hci_dev_lock(hdev);
398 	hdev->ssp_debug_mode = val;
399 	hci_dev_unlock(hdev);
400 
401 	return 0;
402 }
403 
404 static int ssp_debug_mode_get(void *data, u64 *val)
405 {
406 	struct hci_dev *hdev = data;
407 
408 	hci_dev_lock(hdev);
409 	*val = hdev->ssp_debug_mode;
410 	hci_dev_unlock(hdev);
411 
412 	return 0;
413 }
414 
415 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 			ssp_debug_mode_set, "%llu\n");
417 
418 static int idle_timeout_set(void *data, u64 val)
419 {
420 	struct hci_dev *hdev = data;
421 
422 	if (val != 0 && (val < 500 || val > 3600000))
423 		return -EINVAL;
424 
425 	hci_dev_lock(hdev);
426 	hdev->idle_timeout = val;
427 	hci_dev_unlock(hdev);
428 
429 	return 0;
430 }
431 
432 static int idle_timeout_get(void *data, u64 *val)
433 {
434 	struct hci_dev *hdev = data;
435 
436 	hci_dev_lock(hdev);
437 	*val = hdev->idle_timeout;
438 	hci_dev_unlock(hdev);
439 
440 	return 0;
441 }
442 
443 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 			idle_timeout_set, "%llu\n");
445 
446 static int sniff_min_interval_set(void *data, u64 val)
447 {
448 	struct hci_dev *hdev = data;
449 
450 	if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 		return -EINVAL;
452 
453 	hci_dev_lock(hdev);
454 	hdev->sniff_min_interval = val;
455 	hci_dev_unlock(hdev);
456 
457 	return 0;
458 }
459 
460 static int sniff_min_interval_get(void *data, u64 *val)
461 {
462 	struct hci_dev *hdev = data;
463 
464 	hci_dev_lock(hdev);
465 	*val = hdev->sniff_min_interval;
466 	hci_dev_unlock(hdev);
467 
468 	return 0;
469 }
470 
471 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 			sniff_min_interval_set, "%llu\n");
473 
474 static int sniff_max_interval_set(void *data, u64 val)
475 {
476 	struct hci_dev *hdev = data;
477 
478 	if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 		return -EINVAL;
480 
481 	hci_dev_lock(hdev);
482 	hdev->sniff_max_interval = val;
483 	hci_dev_unlock(hdev);
484 
485 	return 0;
486 }
487 
488 static int sniff_max_interval_get(void *data, u64 *val)
489 {
490 	struct hci_dev *hdev = data;
491 
492 	hci_dev_lock(hdev);
493 	*val = hdev->sniff_max_interval;
494 	hci_dev_unlock(hdev);
495 
496 	return 0;
497 }
498 
499 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 			sniff_max_interval_set, "%llu\n");
501 
502 static int static_address_show(struct seq_file *f, void *p)
503 {
504 	struct hci_dev *hdev = f->private;
505 
506 	hci_dev_lock(hdev);
507 	seq_printf(f, "%pMR\n", &hdev->static_addr);
508 	hci_dev_unlock(hdev);
509 
510 	return 0;
511 }
512 
513 static int static_address_open(struct inode *inode, struct file *file)
514 {
515 	return single_open(file, static_address_show, inode->i_private);
516 }
517 
518 static const struct file_operations static_address_fops = {
519 	.open		= static_address_open,
520 	.read		= seq_read,
521 	.llseek		= seq_lseek,
522 	.release	= single_release,
523 };
524 
525 static int own_address_type_set(void *data, u64 val)
526 {
527 	struct hci_dev *hdev = data;
528 
529 	if (val != 0 && val != 1)
530 		return -EINVAL;
531 
532 	hci_dev_lock(hdev);
533 	hdev->own_addr_type = val;
534 	hci_dev_unlock(hdev);
535 
536 	return 0;
537 }
538 
539 static int own_address_type_get(void *data, u64 *val)
540 {
541 	struct hci_dev *hdev = data;
542 
543 	hci_dev_lock(hdev);
544 	*val = hdev->own_addr_type;
545 	hci_dev_unlock(hdev);
546 
547 	return 0;
548 }
549 
550 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 			own_address_type_set, "%llu\n");
552 
553 static int long_term_keys_show(struct seq_file *f, void *ptr)
554 {
555 	struct hci_dev *hdev = f->private;
556 	struct list_head *p, *n;
557 
558 	hci_dev_lock(hdev);
559 	list_for_each_safe(p, n, &hdev->link_keys) {
560 		struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 		seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 			   &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 			   ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 			   8, ltk->rand, 16, ltk->val);
565 	}
566 	hci_dev_unlock(hdev);
567 
568 	return 0;
569 }
570 
571 static int long_term_keys_open(struct inode *inode, struct file *file)
572 {
573 	return single_open(file, long_term_keys_show, inode->i_private);
574 }
575 
576 static const struct file_operations long_term_keys_fops = {
577 	.open		= long_term_keys_open,
578 	.read		= seq_read,
579 	.llseek		= seq_lseek,
580 	.release	= single_release,
581 };
582 
583 static int conn_min_interval_set(void *data, u64 val)
584 {
585 	struct hci_dev *hdev = data;
586 
587 	if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 		return -EINVAL;
589 
590 	hci_dev_lock(hdev);
591 	hdev->le_conn_min_interval = val;
592 	hci_dev_unlock(hdev);
593 
594 	return 0;
595 }
596 
597 static int conn_min_interval_get(void *data, u64 *val)
598 {
599 	struct hci_dev *hdev = data;
600 
601 	hci_dev_lock(hdev);
602 	*val = hdev->le_conn_min_interval;
603 	hci_dev_unlock(hdev);
604 
605 	return 0;
606 }
607 
608 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 			conn_min_interval_set, "%llu\n");
610 
611 static int conn_max_interval_set(void *data, u64 val)
612 {
613 	struct hci_dev *hdev = data;
614 
615 	if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 		return -EINVAL;
617 
618 	hci_dev_lock(hdev);
619 	hdev->le_conn_max_interval = val;
620 	hci_dev_unlock(hdev);
621 
622 	return 0;
623 }
624 
625 static int conn_max_interval_get(void *data, u64 *val)
626 {
627 	struct hci_dev *hdev = data;
628 
629 	hci_dev_lock(hdev);
630 	*val = hdev->le_conn_max_interval;
631 	hci_dev_unlock(hdev);
632 
633 	return 0;
634 }
635 
636 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 			conn_max_interval_set, "%llu\n");
638 
639 /* ---- HCI requests ---- */
640 
641 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
642 {
643 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
644 
645 	if (hdev->req_status == HCI_REQ_PEND) {
646 		hdev->req_result = result;
647 		hdev->req_status = HCI_REQ_DONE;
648 		wake_up_interruptible(&hdev->req_wait_q);
649 	}
650 }
651 
652 static void hci_req_cancel(struct hci_dev *hdev, int err)
653 {
654 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
655 
656 	if (hdev->req_status == HCI_REQ_PEND) {
657 		hdev->req_result = err;
658 		hdev->req_status = HCI_REQ_CANCELED;
659 		wake_up_interruptible(&hdev->req_wait_q);
660 	}
661 }
662 
663 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
664 					    u8 event)
665 {
666 	struct hci_ev_cmd_complete *ev;
667 	struct hci_event_hdr *hdr;
668 	struct sk_buff *skb;
669 
670 	hci_dev_lock(hdev);
671 
672 	skb = hdev->recv_evt;
673 	hdev->recv_evt = NULL;
674 
675 	hci_dev_unlock(hdev);
676 
677 	if (!skb)
678 		return ERR_PTR(-ENODATA);
679 
680 	if (skb->len < sizeof(*hdr)) {
681 		BT_ERR("Too short HCI event");
682 		goto failed;
683 	}
684 
685 	hdr = (void *) skb->data;
686 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
687 
688 	if (event) {
689 		if (hdr->evt != event)
690 			goto failed;
691 		return skb;
692 	}
693 
694 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
695 		BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
696 		goto failed;
697 	}
698 
699 	if (skb->len < sizeof(*ev)) {
700 		BT_ERR("Too short cmd_complete event");
701 		goto failed;
702 	}
703 
704 	ev = (void *) skb->data;
705 	skb_pull(skb, sizeof(*ev));
706 
707 	if (opcode == __le16_to_cpu(ev->opcode))
708 		return skb;
709 
710 	BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
711 	       __le16_to_cpu(ev->opcode));
712 
713 failed:
714 	kfree_skb(skb);
715 	return ERR_PTR(-ENODATA);
716 }
717 
718 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
719 				  const void *param, u8 event, u32 timeout)
720 {
721 	DECLARE_WAITQUEUE(wait, current);
722 	struct hci_request req;
723 	int err = 0;
724 
725 	BT_DBG("%s", hdev->name);
726 
727 	hci_req_init(&req, hdev);
728 
729 	hci_req_add_ev(&req, opcode, plen, param, event);
730 
731 	hdev->req_status = HCI_REQ_PEND;
732 
733 	err = hci_req_run(&req, hci_req_sync_complete);
734 	if (err < 0)
735 		return ERR_PTR(err);
736 
737 	add_wait_queue(&hdev->req_wait_q, &wait);
738 	set_current_state(TASK_INTERRUPTIBLE);
739 
740 	schedule_timeout(timeout);
741 
742 	remove_wait_queue(&hdev->req_wait_q, &wait);
743 
744 	if (signal_pending(current))
745 		return ERR_PTR(-EINTR);
746 
747 	switch (hdev->req_status) {
748 	case HCI_REQ_DONE:
749 		err = -bt_to_errno(hdev->req_result);
750 		break;
751 
752 	case HCI_REQ_CANCELED:
753 		err = -hdev->req_result;
754 		break;
755 
756 	default:
757 		err = -ETIMEDOUT;
758 		break;
759 	}
760 
761 	hdev->req_status = hdev->req_result = 0;
762 
763 	BT_DBG("%s end: err %d", hdev->name, err);
764 
765 	if (err < 0)
766 		return ERR_PTR(err);
767 
768 	return hci_get_cmd_complete(hdev, opcode, event);
769 }
770 EXPORT_SYMBOL(__hci_cmd_sync_ev);
771 
772 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
773 			       const void *param, u32 timeout)
774 {
775 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
776 }
777 EXPORT_SYMBOL(__hci_cmd_sync);
778 
779 /* Execute request and wait for completion. */
780 static int __hci_req_sync(struct hci_dev *hdev,
781 			  void (*func)(struct hci_request *req,
782 				      unsigned long opt),
783 			  unsigned long opt, __u32 timeout)
784 {
785 	struct hci_request req;
786 	DECLARE_WAITQUEUE(wait, current);
787 	int err = 0;
788 
789 	BT_DBG("%s start", hdev->name);
790 
791 	hci_req_init(&req, hdev);
792 
793 	hdev->req_status = HCI_REQ_PEND;
794 
795 	func(&req, opt);
796 
797 	err = hci_req_run(&req, hci_req_sync_complete);
798 	if (err < 0) {
799 		hdev->req_status = 0;
800 
801 		/* ENODATA means the HCI request command queue is empty.
802 		 * This can happen when a request with conditionals doesn't
803 		 * trigger any commands to be sent. This is normal behavior
804 		 * and should not trigger an error return.
805 		 */
806 		if (err == -ENODATA)
807 			return 0;
808 
809 		return err;
810 	}
811 
812 	add_wait_queue(&hdev->req_wait_q, &wait);
813 	set_current_state(TASK_INTERRUPTIBLE);
814 
815 	schedule_timeout(timeout);
816 
817 	remove_wait_queue(&hdev->req_wait_q, &wait);
818 
819 	if (signal_pending(current))
820 		return -EINTR;
821 
822 	switch (hdev->req_status) {
823 	case HCI_REQ_DONE:
824 		err = -bt_to_errno(hdev->req_result);
825 		break;
826 
827 	case HCI_REQ_CANCELED:
828 		err = -hdev->req_result;
829 		break;
830 
831 	default:
832 		err = -ETIMEDOUT;
833 		break;
834 	}
835 
836 	hdev->req_status = hdev->req_result = 0;
837 
838 	BT_DBG("%s end: err %d", hdev->name, err);
839 
840 	return err;
841 }
842 
843 static int hci_req_sync(struct hci_dev *hdev,
844 			void (*req)(struct hci_request *req,
845 				    unsigned long opt),
846 			unsigned long opt, __u32 timeout)
847 {
848 	int ret;
849 
850 	if (!test_bit(HCI_UP, &hdev->flags))
851 		return -ENETDOWN;
852 
853 	/* Serialize all requests */
854 	hci_req_lock(hdev);
855 	ret = __hci_req_sync(hdev, req, opt, timeout);
856 	hci_req_unlock(hdev);
857 
858 	return ret;
859 }
860 
861 static void hci_reset_req(struct hci_request *req, unsigned long opt)
862 {
863 	BT_DBG("%s %ld", req->hdev->name, opt);
864 
865 	/* Reset device */
866 	set_bit(HCI_RESET, &req->hdev->flags);
867 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
868 }
869 
870 static void bredr_init(struct hci_request *req)
871 {
872 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
873 
874 	/* Read Local Supported Features */
875 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
876 
877 	/* Read Local Version */
878 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
879 
880 	/* Read BD Address */
881 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
882 }
883 
884 static void amp_init(struct hci_request *req)
885 {
886 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
887 
888 	/* Read Local Version */
889 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
890 
891 	/* Read Local Supported Commands */
892 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
893 
894 	/* Read Local Supported Features */
895 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
896 
897 	/* Read Local AMP Info */
898 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
899 
900 	/* Read Data Blk size */
901 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
902 
903 	/* Read Flow Control Mode */
904 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
905 
906 	/* Read Location Data */
907 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
908 }
909 
910 static void hci_init1_req(struct hci_request *req, unsigned long opt)
911 {
912 	struct hci_dev *hdev = req->hdev;
913 
914 	BT_DBG("%s %ld", hdev->name, opt);
915 
916 	/* Reset */
917 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
918 		hci_reset_req(req, 0);
919 
920 	switch (hdev->dev_type) {
921 	case HCI_BREDR:
922 		bredr_init(req);
923 		break;
924 
925 	case HCI_AMP:
926 		amp_init(req);
927 		break;
928 
929 	default:
930 		BT_ERR("Unknown device type %d", hdev->dev_type);
931 		break;
932 	}
933 }
934 
935 static void bredr_setup(struct hci_request *req)
936 {
937 	struct hci_dev *hdev = req->hdev;
938 
939 	__le16 param;
940 	__u8 flt_type;
941 
942 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
943 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
944 
945 	/* Read Class of Device */
946 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
947 
948 	/* Read Local Name */
949 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
950 
951 	/* Read Voice Setting */
952 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
953 
954 	/* Read Number of Supported IAC */
955 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
956 
957 	/* Read Current IAC LAP */
958 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
959 
960 	/* Clear Event Filters */
961 	flt_type = HCI_FLT_CLEAR_ALL;
962 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
963 
964 	/* Connection accept timeout ~20 secs */
965 	param = __constant_cpu_to_le16(0x7d00);
966 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
967 
968 	/* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
969 	 * but it does not support page scan related HCI commands.
970 	 */
971 	if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
972 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
973 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
974 	}
975 }
976 
977 static void le_setup(struct hci_request *req)
978 {
979 	struct hci_dev *hdev = req->hdev;
980 
981 	/* Read LE Buffer Size */
982 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
983 
984 	/* Read LE Local Supported Features */
985 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
986 
987 	/* Read LE Advertising Channel TX Power */
988 	hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
989 
990 	/* Read LE White List Size */
991 	hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
992 
993 	/* Read LE Supported States */
994 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
995 
996 	/* LE-only controllers have LE implicitly enabled */
997 	if (!lmp_bredr_capable(hdev))
998 		set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
999 }
1000 
1001 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1002 {
1003 	if (lmp_ext_inq_capable(hdev))
1004 		return 0x02;
1005 
1006 	if (lmp_inq_rssi_capable(hdev))
1007 		return 0x01;
1008 
1009 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1010 	    hdev->lmp_subver == 0x0757)
1011 		return 0x01;
1012 
1013 	if (hdev->manufacturer == 15) {
1014 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1015 			return 0x01;
1016 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1017 			return 0x01;
1018 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1019 			return 0x01;
1020 	}
1021 
1022 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1023 	    hdev->lmp_subver == 0x1805)
1024 		return 0x01;
1025 
1026 	return 0x00;
1027 }
1028 
1029 static void hci_setup_inquiry_mode(struct hci_request *req)
1030 {
1031 	u8 mode;
1032 
1033 	mode = hci_get_inquiry_mode(req->hdev);
1034 
1035 	hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1036 }
1037 
1038 static void hci_setup_event_mask(struct hci_request *req)
1039 {
1040 	struct hci_dev *hdev = req->hdev;
1041 
1042 	/* The second byte is 0xff instead of 0x9f (two reserved bits
1043 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1044 	 * command otherwise.
1045 	 */
1046 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1047 
1048 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
1049 	 * any event mask for pre 1.2 devices.
1050 	 */
1051 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1052 		return;
1053 
1054 	if (lmp_bredr_capable(hdev)) {
1055 		events[4] |= 0x01; /* Flow Specification Complete */
1056 		events[4] |= 0x02; /* Inquiry Result with RSSI */
1057 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
1058 		events[5] |= 0x08; /* Synchronous Connection Complete */
1059 		events[5] |= 0x10; /* Synchronous Connection Changed */
1060 	} else {
1061 		/* Use a different default for LE-only devices */
1062 		memset(events, 0, sizeof(events));
1063 		events[0] |= 0x10; /* Disconnection Complete */
1064 		events[0] |= 0x80; /* Encryption Change */
1065 		events[1] |= 0x08; /* Read Remote Version Information Complete */
1066 		events[1] |= 0x20; /* Command Complete */
1067 		events[1] |= 0x40; /* Command Status */
1068 		events[1] |= 0x80; /* Hardware Error */
1069 		events[2] |= 0x04; /* Number of Completed Packets */
1070 		events[3] |= 0x02; /* Data Buffer Overflow */
1071 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
1072 	}
1073 
1074 	if (lmp_inq_rssi_capable(hdev))
1075 		events[4] |= 0x02; /* Inquiry Result with RSSI */
1076 
1077 	if (lmp_sniffsubr_capable(hdev))
1078 		events[5] |= 0x20; /* Sniff Subrating */
1079 
1080 	if (lmp_pause_enc_capable(hdev))
1081 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
1082 
1083 	if (lmp_ext_inq_capable(hdev))
1084 		events[5] |= 0x40; /* Extended Inquiry Result */
1085 
1086 	if (lmp_no_flush_capable(hdev))
1087 		events[7] |= 0x01; /* Enhanced Flush Complete */
1088 
1089 	if (lmp_lsto_capable(hdev))
1090 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
1091 
1092 	if (lmp_ssp_capable(hdev)) {
1093 		events[6] |= 0x01;	/* IO Capability Request */
1094 		events[6] |= 0x02;	/* IO Capability Response */
1095 		events[6] |= 0x04;	/* User Confirmation Request */
1096 		events[6] |= 0x08;	/* User Passkey Request */
1097 		events[6] |= 0x10;	/* Remote OOB Data Request */
1098 		events[6] |= 0x20;	/* Simple Pairing Complete */
1099 		events[7] |= 0x04;	/* User Passkey Notification */
1100 		events[7] |= 0x08;	/* Keypress Notification */
1101 		events[7] |= 0x10;	/* Remote Host Supported
1102 					 * Features Notification
1103 					 */
1104 	}
1105 
1106 	if (lmp_le_capable(hdev))
1107 		events[7] |= 0x20;	/* LE Meta-Event */
1108 
1109 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1110 
1111 	if (lmp_le_capable(hdev)) {
1112 		memset(events, 0, sizeof(events));
1113 		events[0] = 0x1f;
1114 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1115 			    sizeof(events), events);
1116 	}
1117 }
1118 
1119 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1120 {
1121 	struct hci_dev *hdev = req->hdev;
1122 
1123 	if (lmp_bredr_capable(hdev))
1124 		bredr_setup(req);
1125 	else
1126 		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1127 
1128 	if (lmp_le_capable(hdev))
1129 		le_setup(req);
1130 
1131 	hci_setup_event_mask(req);
1132 
1133 	/* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1134 	 * local supported commands HCI command.
1135 	 */
1136 	if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1137 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1138 
1139 	if (lmp_ssp_capable(hdev)) {
1140 		/* When SSP is available, then the host features page
1141 		 * should also be available as well. However some
1142 		 * controllers list the max_page as 0 as long as SSP
1143 		 * has not been enabled. To achieve proper debugging
1144 		 * output, force the minimum max_page to 1 at least.
1145 		 */
1146 		hdev->max_page = 0x01;
1147 
1148 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1149 			u8 mode = 0x01;
1150 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1151 				    sizeof(mode), &mode);
1152 		} else {
1153 			struct hci_cp_write_eir cp;
1154 
1155 			memset(hdev->eir, 0, sizeof(hdev->eir));
1156 			memset(&cp, 0, sizeof(cp));
1157 
1158 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1159 		}
1160 	}
1161 
1162 	if (lmp_inq_rssi_capable(hdev))
1163 		hci_setup_inquiry_mode(req);
1164 
1165 	if (lmp_inq_tx_pwr_capable(hdev))
1166 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1167 
1168 	if (lmp_ext_feat_capable(hdev)) {
1169 		struct hci_cp_read_local_ext_features cp;
1170 
1171 		cp.page = 0x01;
1172 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1173 			    sizeof(cp), &cp);
1174 	}
1175 
1176 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1177 		u8 enable = 1;
1178 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1179 			    &enable);
1180 	}
1181 }
1182 
1183 static void hci_setup_link_policy(struct hci_request *req)
1184 {
1185 	struct hci_dev *hdev = req->hdev;
1186 	struct hci_cp_write_def_link_policy cp;
1187 	u16 link_policy = 0;
1188 
1189 	if (lmp_rswitch_capable(hdev))
1190 		link_policy |= HCI_LP_RSWITCH;
1191 	if (lmp_hold_capable(hdev))
1192 		link_policy |= HCI_LP_HOLD;
1193 	if (lmp_sniff_capable(hdev))
1194 		link_policy |= HCI_LP_SNIFF;
1195 	if (lmp_park_capable(hdev))
1196 		link_policy |= HCI_LP_PARK;
1197 
1198 	cp.policy = cpu_to_le16(link_policy);
1199 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1200 }
1201 
1202 static void hci_set_le_support(struct hci_request *req)
1203 {
1204 	struct hci_dev *hdev = req->hdev;
1205 	struct hci_cp_write_le_host_supported cp;
1206 
1207 	/* LE-only devices do not support explicit enablement */
1208 	if (!lmp_bredr_capable(hdev))
1209 		return;
1210 
1211 	memset(&cp, 0, sizeof(cp));
1212 
1213 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1214 		cp.le = 0x01;
1215 		cp.simul = lmp_le_br_capable(hdev);
1216 	}
1217 
1218 	if (cp.le != lmp_host_le_capable(hdev))
1219 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1220 			    &cp);
1221 }
1222 
1223 static void hci_set_event_mask_page_2(struct hci_request *req)
1224 {
1225 	struct hci_dev *hdev = req->hdev;
1226 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1227 
1228 	/* If Connectionless Slave Broadcast master role is supported
1229 	 * enable all necessary events for it.
1230 	 */
1231 	if (lmp_csb_master_capable(hdev)) {
1232 		events[1] |= 0x40;	/* Triggered Clock Capture */
1233 		events[1] |= 0x80;	/* Synchronization Train Complete */
1234 		events[2] |= 0x10;	/* Slave Page Response Timeout */
1235 		events[2] |= 0x20;	/* CSB Channel Map Change */
1236 	}
1237 
1238 	/* If Connectionless Slave Broadcast slave role is supported
1239 	 * enable all necessary events for it.
1240 	 */
1241 	if (lmp_csb_slave_capable(hdev)) {
1242 		events[2] |= 0x01;	/* Synchronization Train Received */
1243 		events[2] |= 0x02;	/* CSB Receive */
1244 		events[2] |= 0x04;	/* CSB Timeout */
1245 		events[2] |= 0x08;	/* Truncated Page Complete */
1246 	}
1247 
1248 	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1249 }
1250 
1251 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1252 {
1253 	struct hci_dev *hdev = req->hdev;
1254 	u8 p;
1255 
1256 	/* Some Broadcom based Bluetooth controllers do not support the
1257 	 * Delete Stored Link Key command. They are clearly indicating its
1258 	 * absence in the bit mask of supported commands.
1259 	 *
1260 	 * Check the supported commands and only if the the command is marked
1261 	 * as supported send it. If not supported assume that the controller
1262 	 * does not have actual support for stored link keys which makes this
1263 	 * command redundant anyway.
1264 	 */
1265 	if (hdev->commands[6] & 0x80) {
1266 		struct hci_cp_delete_stored_link_key cp;
1267 
1268 		bacpy(&cp.bdaddr, BDADDR_ANY);
1269 		cp.delete_all = 0x01;
1270 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1271 			    sizeof(cp), &cp);
1272 	}
1273 
1274 	if (hdev->commands[5] & 0x10)
1275 		hci_setup_link_policy(req);
1276 
1277 	if (lmp_le_capable(hdev)) {
1278 		if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1279 			/* If the controller has a public BD_ADDR, then
1280 			 * by default use that one. If this is a LE only
1281 			 * controller without a public address, default
1282 			 * to the random address.
1283 			 */
1284 			if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1285 				hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1286 			else
1287 				hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1288 		}
1289 
1290 		hci_set_le_support(req);
1291 	}
1292 
1293 	/* Read features beyond page 1 if available */
1294 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1295 		struct hci_cp_read_local_ext_features cp;
1296 
1297 		cp.page = p;
1298 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1299 			    sizeof(cp), &cp);
1300 	}
1301 }
1302 
1303 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1304 {
1305 	struct hci_dev *hdev = req->hdev;
1306 
1307 	/* Set event mask page 2 if the HCI command for it is supported */
1308 	if (hdev->commands[22] & 0x04)
1309 		hci_set_event_mask_page_2(req);
1310 
1311 	/* Check for Synchronization Train support */
1312 	if (lmp_sync_train_capable(hdev))
1313 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1314 }
1315 
1316 static int __hci_init(struct hci_dev *hdev)
1317 {
1318 	int err;
1319 
1320 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1321 	if (err < 0)
1322 		return err;
1323 
1324 	/* The Device Under Test (DUT) mode is special and available for
1325 	 * all controller types. So just create it early on.
1326 	 */
1327 	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1328 		debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1329 				    &dut_mode_fops);
1330 	}
1331 
1332 	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1333 	 * BR/EDR/LE type controllers. AMP controllers only need the
1334 	 * first stage init.
1335 	 */
1336 	if (hdev->dev_type != HCI_BREDR)
1337 		return 0;
1338 
1339 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1340 	if (err < 0)
1341 		return err;
1342 
1343 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1344 	if (err < 0)
1345 		return err;
1346 
1347 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1348 	if (err < 0)
1349 		return err;
1350 
1351 	/* Only create debugfs entries during the initial setup
1352 	 * phase and not every time the controller gets powered on.
1353 	 */
1354 	if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1355 		return 0;
1356 
1357 	debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1358 			    &features_fops);
1359 	debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1360 			   &hdev->manufacturer);
1361 	debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1362 	debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1363 	debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1364 			    &blacklist_fops);
1365 	debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1366 
1367 	if (lmp_bredr_capable(hdev)) {
1368 		debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1369 				    hdev, &inquiry_cache_fops);
1370 		debugfs_create_file("link_keys", 0400, hdev->debugfs,
1371 				    hdev, &link_keys_fops);
1372 		debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1373 				    hdev, &use_debug_keys_fops);
1374 		debugfs_create_file("dev_class", 0444, hdev->debugfs,
1375 				    hdev, &dev_class_fops);
1376 		debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1377 				    hdev, &voice_setting_fops);
1378 	}
1379 
1380 	if (lmp_ssp_capable(hdev)) {
1381 		debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1382 				    hdev, &auto_accept_delay_fops);
1383 		debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1384 				    hdev, &ssp_debug_mode_fops);
1385 	}
1386 
1387 	if (lmp_sniff_capable(hdev)) {
1388 		debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1389 				    hdev, &idle_timeout_fops);
1390 		debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1391 				    hdev, &sniff_min_interval_fops);
1392 		debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1393 				    hdev, &sniff_max_interval_fops);
1394 	}
1395 
1396 	if (lmp_le_capable(hdev)) {
1397 		debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1398 				  &hdev->le_white_list_size);
1399 		debugfs_create_file("static_address", 0444, hdev->debugfs,
1400 				   hdev, &static_address_fops);
1401 		debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1402 				    hdev, &own_address_type_fops);
1403 		debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1404 				    hdev, &long_term_keys_fops);
1405 		debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1406 				    hdev, &conn_min_interval_fops);
1407 		debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1408 				    hdev, &conn_max_interval_fops);
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1415 {
1416 	__u8 scan = opt;
1417 
1418 	BT_DBG("%s %x", req->hdev->name, scan);
1419 
1420 	/* Inquiry and Page scans */
1421 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1422 }
1423 
1424 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1425 {
1426 	__u8 auth = opt;
1427 
1428 	BT_DBG("%s %x", req->hdev->name, auth);
1429 
1430 	/* Authentication */
1431 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1432 }
1433 
1434 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1435 {
1436 	__u8 encrypt = opt;
1437 
1438 	BT_DBG("%s %x", req->hdev->name, encrypt);
1439 
1440 	/* Encryption */
1441 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1442 }
1443 
1444 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1445 {
1446 	__le16 policy = cpu_to_le16(opt);
1447 
1448 	BT_DBG("%s %x", req->hdev->name, policy);
1449 
1450 	/* Default link policy */
1451 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1452 }
1453 
1454 /* Get HCI device by index.
1455  * Device is held on return. */
1456 struct hci_dev *hci_dev_get(int index)
1457 {
1458 	struct hci_dev *hdev = NULL, *d;
1459 
1460 	BT_DBG("%d", index);
1461 
1462 	if (index < 0)
1463 		return NULL;
1464 
1465 	read_lock(&hci_dev_list_lock);
1466 	list_for_each_entry(d, &hci_dev_list, list) {
1467 		if (d->id == index) {
1468 			hdev = hci_dev_hold(d);
1469 			break;
1470 		}
1471 	}
1472 	read_unlock(&hci_dev_list_lock);
1473 	return hdev;
1474 }
1475 
1476 /* ---- Inquiry support ---- */
1477 
1478 bool hci_discovery_active(struct hci_dev *hdev)
1479 {
1480 	struct discovery_state *discov = &hdev->discovery;
1481 
1482 	switch (discov->state) {
1483 	case DISCOVERY_FINDING:
1484 	case DISCOVERY_RESOLVING:
1485 		return true;
1486 
1487 	default:
1488 		return false;
1489 	}
1490 }
1491 
1492 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1493 {
1494 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1495 
1496 	if (hdev->discovery.state == state)
1497 		return;
1498 
1499 	switch (state) {
1500 	case DISCOVERY_STOPPED:
1501 		if (hdev->discovery.state != DISCOVERY_STARTING)
1502 			mgmt_discovering(hdev, 0);
1503 		break;
1504 	case DISCOVERY_STARTING:
1505 		break;
1506 	case DISCOVERY_FINDING:
1507 		mgmt_discovering(hdev, 1);
1508 		break;
1509 	case DISCOVERY_RESOLVING:
1510 		break;
1511 	case DISCOVERY_STOPPING:
1512 		break;
1513 	}
1514 
1515 	hdev->discovery.state = state;
1516 }
1517 
1518 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1519 {
1520 	struct discovery_state *cache = &hdev->discovery;
1521 	struct inquiry_entry *p, *n;
1522 
1523 	list_for_each_entry_safe(p, n, &cache->all, all) {
1524 		list_del(&p->all);
1525 		kfree(p);
1526 	}
1527 
1528 	INIT_LIST_HEAD(&cache->unknown);
1529 	INIT_LIST_HEAD(&cache->resolve);
1530 }
1531 
1532 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1533 					       bdaddr_t *bdaddr)
1534 {
1535 	struct discovery_state *cache = &hdev->discovery;
1536 	struct inquiry_entry *e;
1537 
1538 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1539 
1540 	list_for_each_entry(e, &cache->all, all) {
1541 		if (!bacmp(&e->data.bdaddr, bdaddr))
1542 			return e;
1543 	}
1544 
1545 	return NULL;
1546 }
1547 
1548 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1549 						       bdaddr_t *bdaddr)
1550 {
1551 	struct discovery_state *cache = &hdev->discovery;
1552 	struct inquiry_entry *e;
1553 
1554 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1555 
1556 	list_for_each_entry(e, &cache->unknown, list) {
1557 		if (!bacmp(&e->data.bdaddr, bdaddr))
1558 			return e;
1559 	}
1560 
1561 	return NULL;
1562 }
1563 
1564 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1565 						       bdaddr_t *bdaddr,
1566 						       int state)
1567 {
1568 	struct discovery_state *cache = &hdev->discovery;
1569 	struct inquiry_entry *e;
1570 
1571 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1572 
1573 	list_for_each_entry(e, &cache->resolve, list) {
1574 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1575 			return e;
1576 		if (!bacmp(&e->data.bdaddr, bdaddr))
1577 			return e;
1578 	}
1579 
1580 	return NULL;
1581 }
1582 
1583 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1584 				      struct inquiry_entry *ie)
1585 {
1586 	struct discovery_state *cache = &hdev->discovery;
1587 	struct list_head *pos = &cache->resolve;
1588 	struct inquiry_entry *p;
1589 
1590 	list_del(&ie->list);
1591 
1592 	list_for_each_entry(p, &cache->resolve, list) {
1593 		if (p->name_state != NAME_PENDING &&
1594 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1595 			break;
1596 		pos = &p->list;
1597 	}
1598 
1599 	list_add(&ie->list, pos);
1600 }
1601 
1602 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1603 			      bool name_known, bool *ssp)
1604 {
1605 	struct discovery_state *cache = &hdev->discovery;
1606 	struct inquiry_entry *ie;
1607 
1608 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1609 
1610 	hci_remove_remote_oob_data(hdev, &data->bdaddr);
1611 
1612 	if (ssp)
1613 		*ssp = data->ssp_mode;
1614 
1615 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1616 	if (ie) {
1617 		if (ie->data.ssp_mode && ssp)
1618 			*ssp = true;
1619 
1620 		if (ie->name_state == NAME_NEEDED &&
1621 		    data->rssi != ie->data.rssi) {
1622 			ie->data.rssi = data->rssi;
1623 			hci_inquiry_cache_update_resolve(hdev, ie);
1624 		}
1625 
1626 		goto update;
1627 	}
1628 
1629 	/* Entry not in the cache. Add new one. */
1630 	ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1631 	if (!ie)
1632 		return false;
1633 
1634 	list_add(&ie->all, &cache->all);
1635 
1636 	if (name_known) {
1637 		ie->name_state = NAME_KNOWN;
1638 	} else {
1639 		ie->name_state = NAME_NOT_KNOWN;
1640 		list_add(&ie->list, &cache->unknown);
1641 	}
1642 
1643 update:
1644 	if (name_known && ie->name_state != NAME_KNOWN &&
1645 	    ie->name_state != NAME_PENDING) {
1646 		ie->name_state = NAME_KNOWN;
1647 		list_del(&ie->list);
1648 	}
1649 
1650 	memcpy(&ie->data, data, sizeof(*data));
1651 	ie->timestamp = jiffies;
1652 	cache->timestamp = jiffies;
1653 
1654 	if (ie->name_state == NAME_NOT_KNOWN)
1655 		return false;
1656 
1657 	return true;
1658 }
1659 
1660 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1661 {
1662 	struct discovery_state *cache = &hdev->discovery;
1663 	struct inquiry_info *info = (struct inquiry_info *) buf;
1664 	struct inquiry_entry *e;
1665 	int copied = 0;
1666 
1667 	list_for_each_entry(e, &cache->all, all) {
1668 		struct inquiry_data *data = &e->data;
1669 
1670 		if (copied >= num)
1671 			break;
1672 
1673 		bacpy(&info->bdaddr, &data->bdaddr);
1674 		info->pscan_rep_mode	= data->pscan_rep_mode;
1675 		info->pscan_period_mode	= data->pscan_period_mode;
1676 		info->pscan_mode	= data->pscan_mode;
1677 		memcpy(info->dev_class, data->dev_class, 3);
1678 		info->clock_offset	= data->clock_offset;
1679 
1680 		info++;
1681 		copied++;
1682 	}
1683 
1684 	BT_DBG("cache %p, copied %d", cache, copied);
1685 	return copied;
1686 }
1687 
1688 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1689 {
1690 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1691 	struct hci_dev *hdev = req->hdev;
1692 	struct hci_cp_inquiry cp;
1693 
1694 	BT_DBG("%s", hdev->name);
1695 
1696 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1697 		return;
1698 
1699 	/* Start Inquiry */
1700 	memcpy(&cp.lap, &ir->lap, 3);
1701 	cp.length  = ir->length;
1702 	cp.num_rsp = ir->num_rsp;
1703 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1704 }
1705 
1706 static int wait_inquiry(void *word)
1707 {
1708 	schedule();
1709 	return signal_pending(current);
1710 }
1711 
1712 int hci_inquiry(void __user *arg)
1713 {
1714 	__u8 __user *ptr = arg;
1715 	struct hci_inquiry_req ir;
1716 	struct hci_dev *hdev;
1717 	int err = 0, do_inquiry = 0, max_rsp;
1718 	long timeo;
1719 	__u8 *buf;
1720 
1721 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1722 		return -EFAULT;
1723 
1724 	hdev = hci_dev_get(ir.dev_id);
1725 	if (!hdev)
1726 		return -ENODEV;
1727 
1728 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1729 		err = -EBUSY;
1730 		goto done;
1731 	}
1732 
1733 	if (hdev->dev_type != HCI_BREDR) {
1734 		err = -EOPNOTSUPP;
1735 		goto done;
1736 	}
1737 
1738 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1739 		err = -EOPNOTSUPP;
1740 		goto done;
1741 	}
1742 
1743 	hci_dev_lock(hdev);
1744 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1745 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1746 		hci_inquiry_cache_flush(hdev);
1747 		do_inquiry = 1;
1748 	}
1749 	hci_dev_unlock(hdev);
1750 
1751 	timeo = ir.length * msecs_to_jiffies(2000);
1752 
1753 	if (do_inquiry) {
1754 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1755 				   timeo);
1756 		if (err < 0)
1757 			goto done;
1758 
1759 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1760 		 * cleared). If it is interrupted by a signal, return -EINTR.
1761 		 */
1762 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1763 				TASK_INTERRUPTIBLE))
1764 			return -EINTR;
1765 	}
1766 
1767 	/* for unlimited number of responses we will use buffer with
1768 	 * 255 entries
1769 	 */
1770 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1771 
1772 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1773 	 * copy it to the user space.
1774 	 */
1775 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1776 	if (!buf) {
1777 		err = -ENOMEM;
1778 		goto done;
1779 	}
1780 
1781 	hci_dev_lock(hdev);
1782 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1783 	hci_dev_unlock(hdev);
1784 
1785 	BT_DBG("num_rsp %d", ir.num_rsp);
1786 
1787 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1788 		ptr += sizeof(ir);
1789 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1790 				 ir.num_rsp))
1791 			err = -EFAULT;
1792 	} else
1793 		err = -EFAULT;
1794 
1795 	kfree(buf);
1796 
1797 done:
1798 	hci_dev_put(hdev);
1799 	return err;
1800 }
1801 
1802 static int hci_dev_do_open(struct hci_dev *hdev)
1803 {
1804 	int ret = 0;
1805 
1806 	BT_DBG("%s %p", hdev->name, hdev);
1807 
1808 	hci_req_lock(hdev);
1809 
1810 	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1811 		ret = -ENODEV;
1812 		goto done;
1813 	}
1814 
1815 	if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1816 		/* Check for rfkill but allow the HCI setup stage to
1817 		 * proceed (which in itself doesn't cause any RF activity).
1818 		 */
1819 		if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1820 			ret = -ERFKILL;
1821 			goto done;
1822 		}
1823 
1824 		/* Check for valid public address or a configured static
1825 		 * random adddress, but let the HCI setup proceed to
1826 		 * be able to determine if there is a public address
1827 		 * or not.
1828 		 *
1829 		 * This check is only valid for BR/EDR controllers
1830 		 * since AMP controllers do not have an address.
1831 		 */
1832 		if (hdev->dev_type == HCI_BREDR &&
1833 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1834 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1835 			ret = -EADDRNOTAVAIL;
1836 			goto done;
1837 		}
1838 	}
1839 
1840 	if (test_bit(HCI_UP, &hdev->flags)) {
1841 		ret = -EALREADY;
1842 		goto done;
1843 	}
1844 
1845 	if (hdev->open(hdev)) {
1846 		ret = -EIO;
1847 		goto done;
1848 	}
1849 
1850 	atomic_set(&hdev->cmd_cnt, 1);
1851 	set_bit(HCI_INIT, &hdev->flags);
1852 
1853 	if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1854 		ret = hdev->setup(hdev);
1855 
1856 	if (!ret) {
1857 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1858 			set_bit(HCI_RAW, &hdev->flags);
1859 
1860 		if (!test_bit(HCI_RAW, &hdev->flags) &&
1861 		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1862 			ret = __hci_init(hdev);
1863 	}
1864 
1865 	clear_bit(HCI_INIT, &hdev->flags);
1866 
1867 	if (!ret) {
1868 		hci_dev_hold(hdev);
1869 		set_bit(HCI_UP, &hdev->flags);
1870 		hci_notify(hdev, HCI_DEV_UP);
1871 		if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1872 		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1873 		    hdev->dev_type == HCI_BREDR) {
1874 			hci_dev_lock(hdev);
1875 			mgmt_powered(hdev, 1);
1876 			hci_dev_unlock(hdev);
1877 		}
1878 	} else {
1879 		/* Init failed, cleanup */
1880 		flush_work(&hdev->tx_work);
1881 		flush_work(&hdev->cmd_work);
1882 		flush_work(&hdev->rx_work);
1883 
1884 		skb_queue_purge(&hdev->cmd_q);
1885 		skb_queue_purge(&hdev->rx_q);
1886 
1887 		if (hdev->flush)
1888 			hdev->flush(hdev);
1889 
1890 		if (hdev->sent_cmd) {
1891 			kfree_skb(hdev->sent_cmd);
1892 			hdev->sent_cmd = NULL;
1893 		}
1894 
1895 		hdev->close(hdev);
1896 		hdev->flags = 0;
1897 	}
1898 
1899 done:
1900 	hci_req_unlock(hdev);
1901 	return ret;
1902 }
1903 
1904 /* ---- HCI ioctl helpers ---- */
1905 
1906 int hci_dev_open(__u16 dev)
1907 {
1908 	struct hci_dev *hdev;
1909 	int err;
1910 
1911 	hdev = hci_dev_get(dev);
1912 	if (!hdev)
1913 		return -ENODEV;
1914 
1915 	/* We need to ensure that no other power on/off work is pending
1916 	 * before proceeding to call hci_dev_do_open. This is
1917 	 * particularly important if the setup procedure has not yet
1918 	 * completed.
1919 	 */
1920 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1921 		cancel_delayed_work(&hdev->power_off);
1922 
1923 	/* After this call it is guaranteed that the setup procedure
1924 	 * has finished. This means that error conditions like RFKILL
1925 	 * or no valid public or static random address apply.
1926 	 */
1927 	flush_workqueue(hdev->req_workqueue);
1928 
1929 	err = hci_dev_do_open(hdev);
1930 
1931 	hci_dev_put(hdev);
1932 
1933 	return err;
1934 }
1935 
1936 static int hci_dev_do_close(struct hci_dev *hdev)
1937 {
1938 	BT_DBG("%s %p", hdev->name, hdev);
1939 
1940 	cancel_delayed_work(&hdev->power_off);
1941 
1942 	hci_req_cancel(hdev, ENODEV);
1943 	hci_req_lock(hdev);
1944 
1945 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1946 		del_timer_sync(&hdev->cmd_timer);
1947 		hci_req_unlock(hdev);
1948 		return 0;
1949 	}
1950 
1951 	/* Flush RX and TX works */
1952 	flush_work(&hdev->tx_work);
1953 	flush_work(&hdev->rx_work);
1954 
1955 	if (hdev->discov_timeout > 0) {
1956 		cancel_delayed_work(&hdev->discov_off);
1957 		hdev->discov_timeout = 0;
1958 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1959 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1960 	}
1961 
1962 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1963 		cancel_delayed_work(&hdev->service_cache);
1964 
1965 	cancel_delayed_work_sync(&hdev->le_scan_disable);
1966 
1967 	hci_dev_lock(hdev);
1968 	hci_inquiry_cache_flush(hdev);
1969 	hci_conn_hash_flush(hdev);
1970 	hci_dev_unlock(hdev);
1971 
1972 	hci_notify(hdev, HCI_DEV_DOWN);
1973 
1974 	if (hdev->flush)
1975 		hdev->flush(hdev);
1976 
1977 	/* Reset device */
1978 	skb_queue_purge(&hdev->cmd_q);
1979 	atomic_set(&hdev->cmd_cnt, 1);
1980 	if (!test_bit(HCI_RAW, &hdev->flags) &&
1981 	    !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1982 	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1983 		set_bit(HCI_INIT, &hdev->flags);
1984 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1985 		clear_bit(HCI_INIT, &hdev->flags);
1986 	}
1987 
1988 	/* flush cmd  work */
1989 	flush_work(&hdev->cmd_work);
1990 
1991 	/* Drop queues */
1992 	skb_queue_purge(&hdev->rx_q);
1993 	skb_queue_purge(&hdev->cmd_q);
1994 	skb_queue_purge(&hdev->raw_q);
1995 
1996 	/* Drop last sent command */
1997 	if (hdev->sent_cmd) {
1998 		del_timer_sync(&hdev->cmd_timer);
1999 		kfree_skb(hdev->sent_cmd);
2000 		hdev->sent_cmd = NULL;
2001 	}
2002 
2003 	kfree_skb(hdev->recv_evt);
2004 	hdev->recv_evt = NULL;
2005 
2006 	/* After this point our queues are empty
2007 	 * and no tasks are scheduled. */
2008 	hdev->close(hdev);
2009 
2010 	/* Clear flags */
2011 	hdev->flags = 0;
2012 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2013 
2014 	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2015 		if (hdev->dev_type == HCI_BREDR) {
2016 			hci_dev_lock(hdev);
2017 			mgmt_powered(hdev, 0);
2018 			hci_dev_unlock(hdev);
2019 		}
2020 	}
2021 
2022 	/* Controller radio is available but is currently powered down */
2023 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2024 
2025 	memset(hdev->eir, 0, sizeof(hdev->eir));
2026 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2027 
2028 	hci_req_unlock(hdev);
2029 
2030 	hci_dev_put(hdev);
2031 	return 0;
2032 }
2033 
2034 int hci_dev_close(__u16 dev)
2035 {
2036 	struct hci_dev *hdev;
2037 	int err;
2038 
2039 	hdev = hci_dev_get(dev);
2040 	if (!hdev)
2041 		return -ENODEV;
2042 
2043 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2044 		err = -EBUSY;
2045 		goto done;
2046 	}
2047 
2048 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2049 		cancel_delayed_work(&hdev->power_off);
2050 
2051 	err = hci_dev_do_close(hdev);
2052 
2053 done:
2054 	hci_dev_put(hdev);
2055 	return err;
2056 }
2057 
2058 int hci_dev_reset(__u16 dev)
2059 {
2060 	struct hci_dev *hdev;
2061 	int ret = 0;
2062 
2063 	hdev = hci_dev_get(dev);
2064 	if (!hdev)
2065 		return -ENODEV;
2066 
2067 	hci_req_lock(hdev);
2068 
2069 	if (!test_bit(HCI_UP, &hdev->flags)) {
2070 		ret = -ENETDOWN;
2071 		goto done;
2072 	}
2073 
2074 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2075 		ret = -EBUSY;
2076 		goto done;
2077 	}
2078 
2079 	/* Drop queues */
2080 	skb_queue_purge(&hdev->rx_q);
2081 	skb_queue_purge(&hdev->cmd_q);
2082 
2083 	hci_dev_lock(hdev);
2084 	hci_inquiry_cache_flush(hdev);
2085 	hci_conn_hash_flush(hdev);
2086 	hci_dev_unlock(hdev);
2087 
2088 	if (hdev->flush)
2089 		hdev->flush(hdev);
2090 
2091 	atomic_set(&hdev->cmd_cnt, 1);
2092 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2093 
2094 	if (!test_bit(HCI_RAW, &hdev->flags))
2095 		ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2096 
2097 done:
2098 	hci_req_unlock(hdev);
2099 	hci_dev_put(hdev);
2100 	return ret;
2101 }
2102 
2103 int hci_dev_reset_stat(__u16 dev)
2104 {
2105 	struct hci_dev *hdev;
2106 	int ret = 0;
2107 
2108 	hdev = hci_dev_get(dev);
2109 	if (!hdev)
2110 		return -ENODEV;
2111 
2112 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2113 		ret = -EBUSY;
2114 		goto done;
2115 	}
2116 
2117 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2118 
2119 done:
2120 	hci_dev_put(hdev);
2121 	return ret;
2122 }
2123 
2124 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2125 {
2126 	struct hci_dev *hdev;
2127 	struct hci_dev_req dr;
2128 	int err = 0;
2129 
2130 	if (copy_from_user(&dr, arg, sizeof(dr)))
2131 		return -EFAULT;
2132 
2133 	hdev = hci_dev_get(dr.dev_id);
2134 	if (!hdev)
2135 		return -ENODEV;
2136 
2137 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2138 		err = -EBUSY;
2139 		goto done;
2140 	}
2141 
2142 	if (hdev->dev_type != HCI_BREDR) {
2143 		err = -EOPNOTSUPP;
2144 		goto done;
2145 	}
2146 
2147 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2148 		err = -EOPNOTSUPP;
2149 		goto done;
2150 	}
2151 
2152 	switch (cmd) {
2153 	case HCISETAUTH:
2154 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2155 				   HCI_INIT_TIMEOUT);
2156 		break;
2157 
2158 	case HCISETENCRYPT:
2159 		if (!lmp_encrypt_capable(hdev)) {
2160 			err = -EOPNOTSUPP;
2161 			break;
2162 		}
2163 
2164 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
2165 			/* Auth must be enabled first */
2166 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2167 					   HCI_INIT_TIMEOUT);
2168 			if (err)
2169 				break;
2170 		}
2171 
2172 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2173 				   HCI_INIT_TIMEOUT);
2174 		break;
2175 
2176 	case HCISETSCAN:
2177 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2178 				   HCI_INIT_TIMEOUT);
2179 		break;
2180 
2181 	case HCISETLINKPOL:
2182 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2183 				   HCI_INIT_TIMEOUT);
2184 		break;
2185 
2186 	case HCISETLINKMODE:
2187 		hdev->link_mode = ((__u16) dr.dev_opt) &
2188 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
2189 		break;
2190 
2191 	case HCISETPTYPE:
2192 		hdev->pkt_type = (__u16) dr.dev_opt;
2193 		break;
2194 
2195 	case HCISETACLMTU:
2196 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2197 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2198 		break;
2199 
2200 	case HCISETSCOMTU:
2201 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2202 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2203 		break;
2204 
2205 	default:
2206 		err = -EINVAL;
2207 		break;
2208 	}
2209 
2210 done:
2211 	hci_dev_put(hdev);
2212 	return err;
2213 }
2214 
2215 int hci_get_dev_list(void __user *arg)
2216 {
2217 	struct hci_dev *hdev;
2218 	struct hci_dev_list_req *dl;
2219 	struct hci_dev_req *dr;
2220 	int n = 0, size, err;
2221 	__u16 dev_num;
2222 
2223 	if (get_user(dev_num, (__u16 __user *) arg))
2224 		return -EFAULT;
2225 
2226 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2227 		return -EINVAL;
2228 
2229 	size = sizeof(*dl) + dev_num * sizeof(*dr);
2230 
2231 	dl = kzalloc(size, GFP_KERNEL);
2232 	if (!dl)
2233 		return -ENOMEM;
2234 
2235 	dr = dl->dev_req;
2236 
2237 	read_lock(&hci_dev_list_lock);
2238 	list_for_each_entry(hdev, &hci_dev_list, list) {
2239 		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2240 			cancel_delayed_work(&hdev->power_off);
2241 
2242 		if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2243 			set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2244 
2245 		(dr + n)->dev_id  = hdev->id;
2246 		(dr + n)->dev_opt = hdev->flags;
2247 
2248 		if (++n >= dev_num)
2249 			break;
2250 	}
2251 	read_unlock(&hci_dev_list_lock);
2252 
2253 	dl->dev_num = n;
2254 	size = sizeof(*dl) + n * sizeof(*dr);
2255 
2256 	err = copy_to_user(arg, dl, size);
2257 	kfree(dl);
2258 
2259 	return err ? -EFAULT : 0;
2260 }
2261 
2262 int hci_get_dev_info(void __user *arg)
2263 {
2264 	struct hci_dev *hdev;
2265 	struct hci_dev_info di;
2266 	int err = 0;
2267 
2268 	if (copy_from_user(&di, arg, sizeof(di)))
2269 		return -EFAULT;
2270 
2271 	hdev = hci_dev_get(di.dev_id);
2272 	if (!hdev)
2273 		return -ENODEV;
2274 
2275 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2276 		cancel_delayed_work_sync(&hdev->power_off);
2277 
2278 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2279 		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2280 
2281 	strcpy(di.name, hdev->name);
2282 	di.bdaddr   = hdev->bdaddr;
2283 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2284 	di.flags    = hdev->flags;
2285 	di.pkt_type = hdev->pkt_type;
2286 	if (lmp_bredr_capable(hdev)) {
2287 		di.acl_mtu  = hdev->acl_mtu;
2288 		di.acl_pkts = hdev->acl_pkts;
2289 		di.sco_mtu  = hdev->sco_mtu;
2290 		di.sco_pkts = hdev->sco_pkts;
2291 	} else {
2292 		di.acl_mtu  = hdev->le_mtu;
2293 		di.acl_pkts = hdev->le_pkts;
2294 		di.sco_mtu  = 0;
2295 		di.sco_pkts = 0;
2296 	}
2297 	di.link_policy = hdev->link_policy;
2298 	di.link_mode   = hdev->link_mode;
2299 
2300 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2301 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2302 
2303 	if (copy_to_user(arg, &di, sizeof(di)))
2304 		err = -EFAULT;
2305 
2306 	hci_dev_put(hdev);
2307 
2308 	return err;
2309 }
2310 
2311 /* ---- Interface to HCI drivers ---- */
2312 
2313 static int hci_rfkill_set_block(void *data, bool blocked)
2314 {
2315 	struct hci_dev *hdev = data;
2316 
2317 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2318 
2319 	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2320 		return -EBUSY;
2321 
2322 	if (blocked) {
2323 		set_bit(HCI_RFKILLED, &hdev->dev_flags);
2324 		if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2325 			hci_dev_do_close(hdev);
2326 	} else {
2327 		clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2328 	}
2329 
2330 	return 0;
2331 }
2332 
2333 static const struct rfkill_ops hci_rfkill_ops = {
2334 	.set_block = hci_rfkill_set_block,
2335 };
2336 
2337 static void hci_power_on(struct work_struct *work)
2338 {
2339 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2340 	int err;
2341 
2342 	BT_DBG("%s", hdev->name);
2343 
2344 	err = hci_dev_do_open(hdev);
2345 	if (err < 0) {
2346 		mgmt_set_powered_failed(hdev, err);
2347 		return;
2348 	}
2349 
2350 	/* During the HCI setup phase, a few error conditions are
2351 	 * ignored and they need to be checked now. If they are still
2352 	 * valid, it is important to turn the device back off.
2353 	 */
2354 	if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2355 	    (hdev->dev_type == HCI_BREDR &&
2356 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2357 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2358 		clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2359 		hci_dev_do_close(hdev);
2360 	} else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2361 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2362 				   HCI_AUTO_OFF_TIMEOUT);
2363 	}
2364 
2365 	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2366 		mgmt_index_added(hdev);
2367 }
2368 
2369 static void hci_power_off(struct work_struct *work)
2370 {
2371 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2372 					    power_off.work);
2373 
2374 	BT_DBG("%s", hdev->name);
2375 
2376 	hci_dev_do_close(hdev);
2377 }
2378 
2379 static void hci_discov_off(struct work_struct *work)
2380 {
2381 	struct hci_dev *hdev;
2382 
2383 	hdev = container_of(work, struct hci_dev, discov_off.work);
2384 
2385 	BT_DBG("%s", hdev->name);
2386 
2387 	mgmt_discoverable_timeout(hdev);
2388 }
2389 
2390 int hci_uuids_clear(struct hci_dev *hdev)
2391 {
2392 	struct bt_uuid *uuid, *tmp;
2393 
2394 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2395 		list_del(&uuid->list);
2396 		kfree(uuid);
2397 	}
2398 
2399 	return 0;
2400 }
2401 
2402 int hci_link_keys_clear(struct hci_dev *hdev)
2403 {
2404 	struct list_head *p, *n;
2405 
2406 	list_for_each_safe(p, n, &hdev->link_keys) {
2407 		struct link_key *key;
2408 
2409 		key = list_entry(p, struct link_key, list);
2410 
2411 		list_del(p);
2412 		kfree(key);
2413 	}
2414 
2415 	return 0;
2416 }
2417 
2418 int hci_smp_ltks_clear(struct hci_dev *hdev)
2419 {
2420 	struct smp_ltk *k, *tmp;
2421 
2422 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2423 		list_del(&k->list);
2424 		kfree(k);
2425 	}
2426 
2427 	return 0;
2428 }
2429 
2430 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2431 {
2432 	struct link_key *k;
2433 
2434 	list_for_each_entry(k, &hdev->link_keys, list)
2435 		if (bacmp(bdaddr, &k->bdaddr) == 0)
2436 			return k;
2437 
2438 	return NULL;
2439 }
2440 
2441 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2442 			       u8 key_type, u8 old_key_type)
2443 {
2444 	/* Legacy key */
2445 	if (key_type < 0x03)
2446 		return true;
2447 
2448 	/* Debug keys are insecure so don't store them persistently */
2449 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2450 		return false;
2451 
2452 	/* Changed combination key and there's no previous one */
2453 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2454 		return false;
2455 
2456 	/* Security mode 3 case */
2457 	if (!conn)
2458 		return true;
2459 
2460 	/* Neither local nor remote side had no-bonding as requirement */
2461 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2462 		return true;
2463 
2464 	/* Local side had dedicated bonding as requirement */
2465 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2466 		return true;
2467 
2468 	/* Remote side had dedicated bonding as requirement */
2469 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2470 		return true;
2471 
2472 	/* If none of the above criteria match, then don't store the key
2473 	 * persistently */
2474 	return false;
2475 }
2476 
2477 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2478 {
2479 	struct smp_ltk *k;
2480 
2481 	list_for_each_entry(k, &hdev->long_term_keys, list) {
2482 		if (k->ediv != ediv ||
2483 		    memcmp(rand, k->rand, sizeof(k->rand)))
2484 			continue;
2485 
2486 		return k;
2487 	}
2488 
2489 	return NULL;
2490 }
2491 
2492 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2493 				     u8 addr_type)
2494 {
2495 	struct smp_ltk *k;
2496 
2497 	list_for_each_entry(k, &hdev->long_term_keys, list)
2498 		if (addr_type == k->bdaddr_type &&
2499 		    bacmp(bdaddr, &k->bdaddr) == 0)
2500 			return k;
2501 
2502 	return NULL;
2503 }
2504 
2505 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2506 		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2507 {
2508 	struct link_key *key, *old_key;
2509 	u8 old_key_type;
2510 	bool persistent;
2511 
2512 	old_key = hci_find_link_key(hdev, bdaddr);
2513 	if (old_key) {
2514 		old_key_type = old_key->type;
2515 		key = old_key;
2516 	} else {
2517 		old_key_type = conn ? conn->key_type : 0xff;
2518 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
2519 		if (!key)
2520 			return -ENOMEM;
2521 		list_add(&key->list, &hdev->link_keys);
2522 	}
2523 
2524 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2525 
2526 	/* Some buggy controller combinations generate a changed
2527 	 * combination key for legacy pairing even when there's no
2528 	 * previous key */
2529 	if (type == HCI_LK_CHANGED_COMBINATION &&
2530 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2531 		type = HCI_LK_COMBINATION;
2532 		if (conn)
2533 			conn->key_type = type;
2534 	}
2535 
2536 	bacpy(&key->bdaddr, bdaddr);
2537 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2538 	key->pin_len = pin_len;
2539 
2540 	if (type == HCI_LK_CHANGED_COMBINATION)
2541 		key->type = old_key_type;
2542 	else
2543 		key->type = type;
2544 
2545 	if (!new_key)
2546 		return 0;
2547 
2548 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2549 
2550 	mgmt_new_link_key(hdev, key, persistent);
2551 
2552 	if (conn)
2553 		conn->flush_key = !persistent;
2554 
2555 	return 0;
2556 }
2557 
2558 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2559 		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2560 		ediv, u8 rand[8])
2561 {
2562 	struct smp_ltk *key, *old_key;
2563 
2564 	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2565 		return 0;
2566 
2567 	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2568 	if (old_key)
2569 		key = old_key;
2570 	else {
2571 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
2572 		if (!key)
2573 			return -ENOMEM;
2574 		list_add(&key->list, &hdev->long_term_keys);
2575 	}
2576 
2577 	bacpy(&key->bdaddr, bdaddr);
2578 	key->bdaddr_type = addr_type;
2579 	memcpy(key->val, tk, sizeof(key->val));
2580 	key->authenticated = authenticated;
2581 	key->ediv = ediv;
2582 	key->enc_size = enc_size;
2583 	key->type = type;
2584 	memcpy(key->rand, rand, sizeof(key->rand));
2585 
2586 	if (!new_key)
2587 		return 0;
2588 
2589 	if (type & HCI_SMP_LTK)
2590 		mgmt_new_ltk(hdev, key, 1);
2591 
2592 	return 0;
2593 }
2594 
2595 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2596 {
2597 	struct link_key *key;
2598 
2599 	key = hci_find_link_key(hdev, bdaddr);
2600 	if (!key)
2601 		return -ENOENT;
2602 
2603 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2604 
2605 	list_del(&key->list);
2606 	kfree(key);
2607 
2608 	return 0;
2609 }
2610 
2611 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2612 {
2613 	struct smp_ltk *k, *tmp;
2614 
2615 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2616 		if (bacmp(bdaddr, &k->bdaddr))
2617 			continue;
2618 
2619 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2620 
2621 		list_del(&k->list);
2622 		kfree(k);
2623 	}
2624 
2625 	return 0;
2626 }
2627 
2628 /* HCI command timer function */
2629 static void hci_cmd_timeout(unsigned long arg)
2630 {
2631 	struct hci_dev *hdev = (void *) arg;
2632 
2633 	if (hdev->sent_cmd) {
2634 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2635 		u16 opcode = __le16_to_cpu(sent->opcode);
2636 
2637 		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2638 	} else {
2639 		BT_ERR("%s command tx timeout", hdev->name);
2640 	}
2641 
2642 	atomic_set(&hdev->cmd_cnt, 1);
2643 	queue_work(hdev->workqueue, &hdev->cmd_work);
2644 }
2645 
2646 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2647 					  bdaddr_t *bdaddr)
2648 {
2649 	struct oob_data *data;
2650 
2651 	list_for_each_entry(data, &hdev->remote_oob_data, list)
2652 		if (bacmp(bdaddr, &data->bdaddr) == 0)
2653 			return data;
2654 
2655 	return NULL;
2656 }
2657 
2658 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2659 {
2660 	struct oob_data *data;
2661 
2662 	data = hci_find_remote_oob_data(hdev, bdaddr);
2663 	if (!data)
2664 		return -ENOENT;
2665 
2666 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2667 
2668 	list_del(&data->list);
2669 	kfree(data);
2670 
2671 	return 0;
2672 }
2673 
2674 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2675 {
2676 	struct oob_data *data, *n;
2677 
2678 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2679 		list_del(&data->list);
2680 		kfree(data);
2681 	}
2682 
2683 	return 0;
2684 }
2685 
2686 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2687 			    u8 *randomizer)
2688 {
2689 	struct oob_data *data;
2690 
2691 	data = hci_find_remote_oob_data(hdev, bdaddr);
2692 
2693 	if (!data) {
2694 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
2695 		if (!data)
2696 			return -ENOMEM;
2697 
2698 		bacpy(&data->bdaddr, bdaddr);
2699 		list_add(&data->list, &hdev->remote_oob_data);
2700 	}
2701 
2702 	memcpy(data->hash, hash, sizeof(data->hash));
2703 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2704 
2705 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2706 
2707 	return 0;
2708 }
2709 
2710 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2711 					 bdaddr_t *bdaddr, u8 type)
2712 {
2713 	struct bdaddr_list *b;
2714 
2715 	list_for_each_entry(b, &hdev->blacklist, list) {
2716 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2717 			return b;
2718 	}
2719 
2720 	return NULL;
2721 }
2722 
2723 int hci_blacklist_clear(struct hci_dev *hdev)
2724 {
2725 	struct list_head *p, *n;
2726 
2727 	list_for_each_safe(p, n, &hdev->blacklist) {
2728 		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2729 
2730 		list_del(p);
2731 		kfree(b);
2732 	}
2733 
2734 	return 0;
2735 }
2736 
2737 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2738 {
2739 	struct bdaddr_list *entry;
2740 
2741 	if (!bacmp(bdaddr, BDADDR_ANY))
2742 		return -EBADF;
2743 
2744 	if (hci_blacklist_lookup(hdev, bdaddr, type))
2745 		return -EEXIST;
2746 
2747 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2748 	if (!entry)
2749 		return -ENOMEM;
2750 
2751 	bacpy(&entry->bdaddr, bdaddr);
2752 	entry->bdaddr_type = type;
2753 
2754 	list_add(&entry->list, &hdev->blacklist);
2755 
2756 	return mgmt_device_blocked(hdev, bdaddr, type);
2757 }
2758 
2759 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2760 {
2761 	struct bdaddr_list *entry;
2762 
2763 	if (!bacmp(bdaddr, BDADDR_ANY))
2764 		return hci_blacklist_clear(hdev);
2765 
2766 	entry = hci_blacklist_lookup(hdev, bdaddr, type);
2767 	if (!entry)
2768 		return -ENOENT;
2769 
2770 	list_del(&entry->list);
2771 	kfree(entry);
2772 
2773 	return mgmt_device_unblocked(hdev, bdaddr, type);
2774 }
2775 
2776 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2777 {
2778 	if (status) {
2779 		BT_ERR("Failed to start inquiry: status %d", status);
2780 
2781 		hci_dev_lock(hdev);
2782 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2783 		hci_dev_unlock(hdev);
2784 		return;
2785 	}
2786 }
2787 
2788 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2789 {
2790 	/* General inquiry access code (GIAC) */
2791 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
2792 	struct hci_request req;
2793 	struct hci_cp_inquiry cp;
2794 	int err;
2795 
2796 	if (status) {
2797 		BT_ERR("Failed to disable LE scanning: status %d", status);
2798 		return;
2799 	}
2800 
2801 	switch (hdev->discovery.type) {
2802 	case DISCOV_TYPE_LE:
2803 		hci_dev_lock(hdev);
2804 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2805 		hci_dev_unlock(hdev);
2806 		break;
2807 
2808 	case DISCOV_TYPE_INTERLEAVED:
2809 		hci_req_init(&req, hdev);
2810 
2811 		memset(&cp, 0, sizeof(cp));
2812 		memcpy(&cp.lap, lap, sizeof(cp.lap));
2813 		cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2814 		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2815 
2816 		hci_dev_lock(hdev);
2817 
2818 		hci_inquiry_cache_flush(hdev);
2819 
2820 		err = hci_req_run(&req, inquiry_complete);
2821 		if (err) {
2822 			BT_ERR("Inquiry request failed: err %d", err);
2823 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2824 		}
2825 
2826 		hci_dev_unlock(hdev);
2827 		break;
2828 	}
2829 }
2830 
2831 static void le_scan_disable_work(struct work_struct *work)
2832 {
2833 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2834 					    le_scan_disable.work);
2835 	struct hci_cp_le_set_scan_enable cp;
2836 	struct hci_request req;
2837 	int err;
2838 
2839 	BT_DBG("%s", hdev->name);
2840 
2841 	hci_req_init(&req, hdev);
2842 
2843 	memset(&cp, 0, sizeof(cp));
2844 	cp.enable = LE_SCAN_DISABLE;
2845 	hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2846 
2847 	err = hci_req_run(&req, le_scan_disable_work_complete);
2848 	if (err)
2849 		BT_ERR("Disable LE scanning request failed: err %d", err);
2850 }
2851 
2852 /* Alloc HCI device */
2853 struct hci_dev *hci_alloc_dev(void)
2854 {
2855 	struct hci_dev *hdev;
2856 
2857 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2858 	if (!hdev)
2859 		return NULL;
2860 
2861 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2862 	hdev->esco_type = (ESCO_HV1);
2863 	hdev->link_mode = (HCI_LM_ACCEPT);
2864 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2865 	hdev->io_capability = 0x03;	/* No Input No Output */
2866 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2867 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2868 
2869 	hdev->sniff_max_interval = 800;
2870 	hdev->sniff_min_interval = 80;
2871 
2872 	hdev->le_scan_interval = 0x0060;
2873 	hdev->le_scan_window = 0x0030;
2874 	hdev->le_conn_min_interval = 0x0028;
2875 	hdev->le_conn_max_interval = 0x0038;
2876 
2877 	mutex_init(&hdev->lock);
2878 	mutex_init(&hdev->req_lock);
2879 
2880 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2881 	INIT_LIST_HEAD(&hdev->blacklist);
2882 	INIT_LIST_HEAD(&hdev->uuids);
2883 	INIT_LIST_HEAD(&hdev->link_keys);
2884 	INIT_LIST_HEAD(&hdev->long_term_keys);
2885 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2886 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2887 
2888 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2889 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2890 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2891 	INIT_WORK(&hdev->power_on, hci_power_on);
2892 
2893 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2894 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2895 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2896 
2897 	skb_queue_head_init(&hdev->rx_q);
2898 	skb_queue_head_init(&hdev->cmd_q);
2899 	skb_queue_head_init(&hdev->raw_q);
2900 
2901 	init_waitqueue_head(&hdev->req_wait_q);
2902 
2903 	setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2904 
2905 	hci_init_sysfs(hdev);
2906 	discovery_init(hdev);
2907 
2908 	return hdev;
2909 }
2910 EXPORT_SYMBOL(hci_alloc_dev);
2911 
2912 /* Free HCI device */
2913 void hci_free_dev(struct hci_dev *hdev)
2914 {
2915 	/* will free via device release */
2916 	put_device(&hdev->dev);
2917 }
2918 EXPORT_SYMBOL(hci_free_dev);
2919 
2920 /* Register HCI device */
2921 int hci_register_dev(struct hci_dev *hdev)
2922 {
2923 	int id, error;
2924 
2925 	if (!hdev->open || !hdev->close)
2926 		return -EINVAL;
2927 
2928 	/* Do not allow HCI_AMP devices to register at index 0,
2929 	 * so the index can be used as the AMP controller ID.
2930 	 */
2931 	switch (hdev->dev_type) {
2932 	case HCI_BREDR:
2933 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2934 		break;
2935 	case HCI_AMP:
2936 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2937 		break;
2938 	default:
2939 		return -EINVAL;
2940 	}
2941 
2942 	if (id < 0)
2943 		return id;
2944 
2945 	sprintf(hdev->name, "hci%d", id);
2946 	hdev->id = id;
2947 
2948 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2949 
2950 	hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2951 					  WQ_MEM_RECLAIM, 1, hdev->name);
2952 	if (!hdev->workqueue) {
2953 		error = -ENOMEM;
2954 		goto err;
2955 	}
2956 
2957 	hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2958 					      WQ_MEM_RECLAIM, 1, hdev->name);
2959 	if (!hdev->req_workqueue) {
2960 		destroy_workqueue(hdev->workqueue);
2961 		error = -ENOMEM;
2962 		goto err;
2963 	}
2964 
2965 	if (!IS_ERR_OR_NULL(bt_debugfs))
2966 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2967 
2968 	dev_set_name(&hdev->dev, "%s", hdev->name);
2969 
2970 	error = device_add(&hdev->dev);
2971 	if (error < 0)
2972 		goto err_wqueue;
2973 
2974 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2975 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2976 				    hdev);
2977 	if (hdev->rfkill) {
2978 		if (rfkill_register(hdev->rfkill) < 0) {
2979 			rfkill_destroy(hdev->rfkill);
2980 			hdev->rfkill = NULL;
2981 		}
2982 	}
2983 
2984 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2985 		set_bit(HCI_RFKILLED, &hdev->dev_flags);
2986 
2987 	set_bit(HCI_SETUP, &hdev->dev_flags);
2988 	set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2989 
2990 	if (hdev->dev_type == HCI_BREDR) {
2991 		/* Assume BR/EDR support until proven otherwise (such as
2992 		 * through reading supported features during init.
2993 		 */
2994 		set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2995 	}
2996 
2997 	write_lock(&hci_dev_list_lock);
2998 	list_add(&hdev->list, &hci_dev_list);
2999 	write_unlock(&hci_dev_list_lock);
3000 
3001 	hci_notify(hdev, HCI_DEV_REG);
3002 	hci_dev_hold(hdev);
3003 
3004 	queue_work(hdev->req_workqueue, &hdev->power_on);
3005 
3006 	return id;
3007 
3008 err_wqueue:
3009 	destroy_workqueue(hdev->workqueue);
3010 	destroy_workqueue(hdev->req_workqueue);
3011 err:
3012 	ida_simple_remove(&hci_index_ida, hdev->id);
3013 
3014 	return error;
3015 }
3016 EXPORT_SYMBOL(hci_register_dev);
3017 
3018 /* Unregister HCI device */
3019 void hci_unregister_dev(struct hci_dev *hdev)
3020 {
3021 	int i, id;
3022 
3023 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3024 
3025 	set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3026 
3027 	id = hdev->id;
3028 
3029 	write_lock(&hci_dev_list_lock);
3030 	list_del(&hdev->list);
3031 	write_unlock(&hci_dev_list_lock);
3032 
3033 	hci_dev_do_close(hdev);
3034 
3035 	for (i = 0; i < NUM_REASSEMBLY; i++)
3036 		kfree_skb(hdev->reassembly[i]);
3037 
3038 	cancel_work_sync(&hdev->power_on);
3039 
3040 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3041 	    !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3042 		hci_dev_lock(hdev);
3043 		mgmt_index_removed(hdev);
3044 		hci_dev_unlock(hdev);
3045 	}
3046 
3047 	/* mgmt_index_removed should take care of emptying the
3048 	 * pending list */
3049 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3050 
3051 	hci_notify(hdev, HCI_DEV_UNREG);
3052 
3053 	if (hdev->rfkill) {
3054 		rfkill_unregister(hdev->rfkill);
3055 		rfkill_destroy(hdev->rfkill);
3056 	}
3057 
3058 	device_del(&hdev->dev);
3059 
3060 	debugfs_remove_recursive(hdev->debugfs);
3061 
3062 	destroy_workqueue(hdev->workqueue);
3063 	destroy_workqueue(hdev->req_workqueue);
3064 
3065 	hci_dev_lock(hdev);
3066 	hci_blacklist_clear(hdev);
3067 	hci_uuids_clear(hdev);
3068 	hci_link_keys_clear(hdev);
3069 	hci_smp_ltks_clear(hdev);
3070 	hci_remote_oob_data_clear(hdev);
3071 	hci_dev_unlock(hdev);
3072 
3073 	hci_dev_put(hdev);
3074 
3075 	ida_simple_remove(&hci_index_ida, id);
3076 }
3077 EXPORT_SYMBOL(hci_unregister_dev);
3078 
3079 /* Suspend HCI device */
3080 int hci_suspend_dev(struct hci_dev *hdev)
3081 {
3082 	hci_notify(hdev, HCI_DEV_SUSPEND);
3083 	return 0;
3084 }
3085 EXPORT_SYMBOL(hci_suspend_dev);
3086 
3087 /* Resume HCI device */
3088 int hci_resume_dev(struct hci_dev *hdev)
3089 {
3090 	hci_notify(hdev, HCI_DEV_RESUME);
3091 	return 0;
3092 }
3093 EXPORT_SYMBOL(hci_resume_dev);
3094 
3095 /* Receive frame from HCI drivers */
3096 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3097 {
3098 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3099 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3100 		kfree_skb(skb);
3101 		return -ENXIO;
3102 	}
3103 
3104 	/* Incoming skb */
3105 	bt_cb(skb)->incoming = 1;
3106 
3107 	/* Time stamp */
3108 	__net_timestamp(skb);
3109 
3110 	skb_queue_tail(&hdev->rx_q, skb);
3111 	queue_work(hdev->workqueue, &hdev->rx_work);
3112 
3113 	return 0;
3114 }
3115 EXPORT_SYMBOL(hci_recv_frame);
3116 
3117 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3118 			  int count, __u8 index)
3119 {
3120 	int len = 0;
3121 	int hlen = 0;
3122 	int remain = count;
3123 	struct sk_buff *skb;
3124 	struct bt_skb_cb *scb;
3125 
3126 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3127 	    index >= NUM_REASSEMBLY)
3128 		return -EILSEQ;
3129 
3130 	skb = hdev->reassembly[index];
3131 
3132 	if (!skb) {
3133 		switch (type) {
3134 		case HCI_ACLDATA_PKT:
3135 			len = HCI_MAX_FRAME_SIZE;
3136 			hlen = HCI_ACL_HDR_SIZE;
3137 			break;
3138 		case HCI_EVENT_PKT:
3139 			len = HCI_MAX_EVENT_SIZE;
3140 			hlen = HCI_EVENT_HDR_SIZE;
3141 			break;
3142 		case HCI_SCODATA_PKT:
3143 			len = HCI_MAX_SCO_SIZE;
3144 			hlen = HCI_SCO_HDR_SIZE;
3145 			break;
3146 		}
3147 
3148 		skb = bt_skb_alloc(len, GFP_ATOMIC);
3149 		if (!skb)
3150 			return -ENOMEM;
3151 
3152 		scb = (void *) skb->cb;
3153 		scb->expect = hlen;
3154 		scb->pkt_type = type;
3155 
3156 		hdev->reassembly[index] = skb;
3157 	}
3158 
3159 	while (count) {
3160 		scb = (void *) skb->cb;
3161 		len = min_t(uint, scb->expect, count);
3162 
3163 		memcpy(skb_put(skb, len), data, len);
3164 
3165 		count -= len;
3166 		data += len;
3167 		scb->expect -= len;
3168 		remain = count;
3169 
3170 		switch (type) {
3171 		case HCI_EVENT_PKT:
3172 			if (skb->len == HCI_EVENT_HDR_SIZE) {
3173 				struct hci_event_hdr *h = hci_event_hdr(skb);
3174 				scb->expect = h->plen;
3175 
3176 				if (skb_tailroom(skb) < scb->expect) {
3177 					kfree_skb(skb);
3178 					hdev->reassembly[index] = NULL;
3179 					return -ENOMEM;
3180 				}
3181 			}
3182 			break;
3183 
3184 		case HCI_ACLDATA_PKT:
3185 			if (skb->len  == HCI_ACL_HDR_SIZE) {
3186 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
3187 				scb->expect = __le16_to_cpu(h->dlen);
3188 
3189 				if (skb_tailroom(skb) < scb->expect) {
3190 					kfree_skb(skb);
3191 					hdev->reassembly[index] = NULL;
3192 					return -ENOMEM;
3193 				}
3194 			}
3195 			break;
3196 
3197 		case HCI_SCODATA_PKT:
3198 			if (skb->len == HCI_SCO_HDR_SIZE) {
3199 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
3200 				scb->expect = h->dlen;
3201 
3202 				if (skb_tailroom(skb) < scb->expect) {
3203 					kfree_skb(skb);
3204 					hdev->reassembly[index] = NULL;
3205 					return -ENOMEM;
3206 				}
3207 			}
3208 			break;
3209 		}
3210 
3211 		if (scb->expect == 0) {
3212 			/* Complete frame */
3213 
3214 			bt_cb(skb)->pkt_type = type;
3215 			hci_recv_frame(hdev, skb);
3216 
3217 			hdev->reassembly[index] = NULL;
3218 			return remain;
3219 		}
3220 	}
3221 
3222 	return remain;
3223 }
3224 
3225 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3226 {
3227 	int rem = 0;
3228 
3229 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3230 		return -EILSEQ;
3231 
3232 	while (count) {
3233 		rem = hci_reassembly(hdev, type, data, count, type - 1);
3234 		if (rem < 0)
3235 			return rem;
3236 
3237 		data += (count - rem);
3238 		count = rem;
3239 	}
3240 
3241 	return rem;
3242 }
3243 EXPORT_SYMBOL(hci_recv_fragment);
3244 
3245 #define STREAM_REASSEMBLY 0
3246 
3247 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3248 {
3249 	int type;
3250 	int rem = 0;
3251 
3252 	while (count) {
3253 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3254 
3255 		if (!skb) {
3256 			struct { char type; } *pkt;
3257 
3258 			/* Start of the frame */
3259 			pkt = data;
3260 			type = pkt->type;
3261 
3262 			data++;
3263 			count--;
3264 		} else
3265 			type = bt_cb(skb)->pkt_type;
3266 
3267 		rem = hci_reassembly(hdev, type, data, count,
3268 				     STREAM_REASSEMBLY);
3269 		if (rem < 0)
3270 			return rem;
3271 
3272 		data += (count - rem);
3273 		count = rem;
3274 	}
3275 
3276 	return rem;
3277 }
3278 EXPORT_SYMBOL(hci_recv_stream_fragment);
3279 
3280 /* ---- Interface to upper protocols ---- */
3281 
3282 int hci_register_cb(struct hci_cb *cb)
3283 {
3284 	BT_DBG("%p name %s", cb, cb->name);
3285 
3286 	write_lock(&hci_cb_list_lock);
3287 	list_add(&cb->list, &hci_cb_list);
3288 	write_unlock(&hci_cb_list_lock);
3289 
3290 	return 0;
3291 }
3292 EXPORT_SYMBOL(hci_register_cb);
3293 
3294 int hci_unregister_cb(struct hci_cb *cb)
3295 {
3296 	BT_DBG("%p name %s", cb, cb->name);
3297 
3298 	write_lock(&hci_cb_list_lock);
3299 	list_del(&cb->list);
3300 	write_unlock(&hci_cb_list_lock);
3301 
3302 	return 0;
3303 }
3304 EXPORT_SYMBOL(hci_unregister_cb);
3305 
3306 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3307 {
3308 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3309 
3310 	/* Time stamp */
3311 	__net_timestamp(skb);
3312 
3313 	/* Send copy to monitor */
3314 	hci_send_to_monitor(hdev, skb);
3315 
3316 	if (atomic_read(&hdev->promisc)) {
3317 		/* Send copy to the sockets */
3318 		hci_send_to_sock(hdev, skb);
3319 	}
3320 
3321 	/* Get rid of skb owner, prior to sending to the driver. */
3322 	skb_orphan(skb);
3323 
3324 	if (hdev->send(hdev, skb) < 0)
3325 		BT_ERR("%s sending frame failed", hdev->name);
3326 }
3327 
3328 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3329 {
3330 	skb_queue_head_init(&req->cmd_q);
3331 	req->hdev = hdev;
3332 	req->err = 0;
3333 }
3334 
3335 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3336 {
3337 	struct hci_dev *hdev = req->hdev;
3338 	struct sk_buff *skb;
3339 	unsigned long flags;
3340 
3341 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3342 
3343 	/* If an error occured during request building, remove all HCI
3344 	 * commands queued on the HCI request queue.
3345 	 */
3346 	if (req->err) {
3347 		skb_queue_purge(&req->cmd_q);
3348 		return req->err;
3349 	}
3350 
3351 	/* Do not allow empty requests */
3352 	if (skb_queue_empty(&req->cmd_q))
3353 		return -ENODATA;
3354 
3355 	skb = skb_peek_tail(&req->cmd_q);
3356 	bt_cb(skb)->req.complete = complete;
3357 
3358 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3359 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3360 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3361 
3362 	queue_work(hdev->workqueue, &hdev->cmd_work);
3363 
3364 	return 0;
3365 }
3366 
3367 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3368 				       u32 plen, const void *param)
3369 {
3370 	int len = HCI_COMMAND_HDR_SIZE + plen;
3371 	struct hci_command_hdr *hdr;
3372 	struct sk_buff *skb;
3373 
3374 	skb = bt_skb_alloc(len, GFP_ATOMIC);
3375 	if (!skb)
3376 		return NULL;
3377 
3378 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3379 	hdr->opcode = cpu_to_le16(opcode);
3380 	hdr->plen   = plen;
3381 
3382 	if (plen)
3383 		memcpy(skb_put(skb, plen), param, plen);
3384 
3385 	BT_DBG("skb len %d", skb->len);
3386 
3387 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3388 
3389 	return skb;
3390 }
3391 
3392 /* Send HCI command */
3393 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3394 		 const void *param)
3395 {
3396 	struct sk_buff *skb;
3397 
3398 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3399 
3400 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3401 	if (!skb) {
3402 		BT_ERR("%s no memory for command", hdev->name);
3403 		return -ENOMEM;
3404 	}
3405 
3406 	/* Stand-alone HCI commands must be flaged as
3407 	 * single-command requests.
3408 	 */
3409 	bt_cb(skb)->req.start = true;
3410 
3411 	skb_queue_tail(&hdev->cmd_q, skb);
3412 	queue_work(hdev->workqueue, &hdev->cmd_work);
3413 
3414 	return 0;
3415 }
3416 
3417 /* Queue a command to an asynchronous HCI request */
3418 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3419 		    const void *param, u8 event)
3420 {
3421 	struct hci_dev *hdev = req->hdev;
3422 	struct sk_buff *skb;
3423 
3424 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3425 
3426 	/* If an error occured during request building, there is no point in
3427 	 * queueing the HCI command. We can simply return.
3428 	 */
3429 	if (req->err)
3430 		return;
3431 
3432 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3433 	if (!skb) {
3434 		BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3435 		       hdev->name, opcode);
3436 		req->err = -ENOMEM;
3437 		return;
3438 	}
3439 
3440 	if (skb_queue_empty(&req->cmd_q))
3441 		bt_cb(skb)->req.start = true;
3442 
3443 	bt_cb(skb)->req.event = event;
3444 
3445 	skb_queue_tail(&req->cmd_q, skb);
3446 }
3447 
3448 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3449 		 const void *param)
3450 {
3451 	hci_req_add_ev(req, opcode, plen, param, 0);
3452 }
3453 
3454 /* Get data from the previously sent command */
3455 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3456 {
3457 	struct hci_command_hdr *hdr;
3458 
3459 	if (!hdev->sent_cmd)
3460 		return NULL;
3461 
3462 	hdr = (void *) hdev->sent_cmd->data;
3463 
3464 	if (hdr->opcode != cpu_to_le16(opcode))
3465 		return NULL;
3466 
3467 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3468 
3469 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3470 }
3471 
3472 /* Send ACL data */
3473 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3474 {
3475 	struct hci_acl_hdr *hdr;
3476 	int len = skb->len;
3477 
3478 	skb_push(skb, HCI_ACL_HDR_SIZE);
3479 	skb_reset_transport_header(skb);
3480 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3481 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3482 	hdr->dlen   = cpu_to_le16(len);
3483 }
3484 
3485 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3486 			  struct sk_buff *skb, __u16 flags)
3487 {
3488 	struct hci_conn *conn = chan->conn;
3489 	struct hci_dev *hdev = conn->hdev;
3490 	struct sk_buff *list;
3491 
3492 	skb->len = skb_headlen(skb);
3493 	skb->data_len = 0;
3494 
3495 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3496 
3497 	switch (hdev->dev_type) {
3498 	case HCI_BREDR:
3499 		hci_add_acl_hdr(skb, conn->handle, flags);
3500 		break;
3501 	case HCI_AMP:
3502 		hci_add_acl_hdr(skb, chan->handle, flags);
3503 		break;
3504 	default:
3505 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3506 		return;
3507 	}
3508 
3509 	list = skb_shinfo(skb)->frag_list;
3510 	if (!list) {
3511 		/* Non fragmented */
3512 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3513 
3514 		skb_queue_tail(queue, skb);
3515 	} else {
3516 		/* Fragmented */
3517 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3518 
3519 		skb_shinfo(skb)->frag_list = NULL;
3520 
3521 		/* Queue all fragments atomically */
3522 		spin_lock(&queue->lock);
3523 
3524 		__skb_queue_tail(queue, skb);
3525 
3526 		flags &= ~ACL_START;
3527 		flags |= ACL_CONT;
3528 		do {
3529 			skb = list; list = list->next;
3530 
3531 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3532 			hci_add_acl_hdr(skb, conn->handle, flags);
3533 
3534 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3535 
3536 			__skb_queue_tail(queue, skb);
3537 		} while (list);
3538 
3539 		spin_unlock(&queue->lock);
3540 	}
3541 }
3542 
3543 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3544 {
3545 	struct hci_dev *hdev = chan->conn->hdev;
3546 
3547 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3548 
3549 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3550 
3551 	queue_work(hdev->workqueue, &hdev->tx_work);
3552 }
3553 
3554 /* Send SCO data */
3555 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3556 {
3557 	struct hci_dev *hdev = conn->hdev;
3558 	struct hci_sco_hdr hdr;
3559 
3560 	BT_DBG("%s len %d", hdev->name, skb->len);
3561 
3562 	hdr.handle = cpu_to_le16(conn->handle);
3563 	hdr.dlen   = skb->len;
3564 
3565 	skb_push(skb, HCI_SCO_HDR_SIZE);
3566 	skb_reset_transport_header(skb);
3567 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3568 
3569 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3570 
3571 	skb_queue_tail(&conn->data_q, skb);
3572 	queue_work(hdev->workqueue, &hdev->tx_work);
3573 }
3574 
3575 /* ---- HCI TX task (outgoing data) ---- */
3576 
3577 /* HCI Connection scheduler */
3578 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3579 				     int *quote)
3580 {
3581 	struct hci_conn_hash *h = &hdev->conn_hash;
3582 	struct hci_conn *conn = NULL, *c;
3583 	unsigned int num = 0, min = ~0;
3584 
3585 	/* We don't have to lock device here. Connections are always
3586 	 * added and removed with TX task disabled. */
3587 
3588 	rcu_read_lock();
3589 
3590 	list_for_each_entry_rcu(c, &h->list, list) {
3591 		if (c->type != type || skb_queue_empty(&c->data_q))
3592 			continue;
3593 
3594 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3595 			continue;
3596 
3597 		num++;
3598 
3599 		if (c->sent < min) {
3600 			min  = c->sent;
3601 			conn = c;
3602 		}
3603 
3604 		if (hci_conn_num(hdev, type) == num)
3605 			break;
3606 	}
3607 
3608 	rcu_read_unlock();
3609 
3610 	if (conn) {
3611 		int cnt, q;
3612 
3613 		switch (conn->type) {
3614 		case ACL_LINK:
3615 			cnt = hdev->acl_cnt;
3616 			break;
3617 		case SCO_LINK:
3618 		case ESCO_LINK:
3619 			cnt = hdev->sco_cnt;
3620 			break;
3621 		case LE_LINK:
3622 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3623 			break;
3624 		default:
3625 			cnt = 0;
3626 			BT_ERR("Unknown link type");
3627 		}
3628 
3629 		q = cnt / num;
3630 		*quote = q ? q : 1;
3631 	} else
3632 		*quote = 0;
3633 
3634 	BT_DBG("conn %p quote %d", conn, *quote);
3635 	return conn;
3636 }
3637 
3638 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3639 {
3640 	struct hci_conn_hash *h = &hdev->conn_hash;
3641 	struct hci_conn *c;
3642 
3643 	BT_ERR("%s link tx timeout", hdev->name);
3644 
3645 	rcu_read_lock();
3646 
3647 	/* Kill stalled connections */
3648 	list_for_each_entry_rcu(c, &h->list, list) {
3649 		if (c->type == type && c->sent) {
3650 			BT_ERR("%s killing stalled connection %pMR",
3651 			       hdev->name, &c->dst);
3652 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3653 		}
3654 	}
3655 
3656 	rcu_read_unlock();
3657 }
3658 
3659 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3660 				      int *quote)
3661 {
3662 	struct hci_conn_hash *h = &hdev->conn_hash;
3663 	struct hci_chan *chan = NULL;
3664 	unsigned int num = 0, min = ~0, cur_prio = 0;
3665 	struct hci_conn *conn;
3666 	int cnt, q, conn_num = 0;
3667 
3668 	BT_DBG("%s", hdev->name);
3669 
3670 	rcu_read_lock();
3671 
3672 	list_for_each_entry_rcu(conn, &h->list, list) {
3673 		struct hci_chan *tmp;
3674 
3675 		if (conn->type != type)
3676 			continue;
3677 
3678 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3679 			continue;
3680 
3681 		conn_num++;
3682 
3683 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3684 			struct sk_buff *skb;
3685 
3686 			if (skb_queue_empty(&tmp->data_q))
3687 				continue;
3688 
3689 			skb = skb_peek(&tmp->data_q);
3690 			if (skb->priority < cur_prio)
3691 				continue;
3692 
3693 			if (skb->priority > cur_prio) {
3694 				num = 0;
3695 				min = ~0;
3696 				cur_prio = skb->priority;
3697 			}
3698 
3699 			num++;
3700 
3701 			if (conn->sent < min) {
3702 				min  = conn->sent;
3703 				chan = tmp;
3704 			}
3705 		}
3706 
3707 		if (hci_conn_num(hdev, type) == conn_num)
3708 			break;
3709 	}
3710 
3711 	rcu_read_unlock();
3712 
3713 	if (!chan)
3714 		return NULL;
3715 
3716 	switch (chan->conn->type) {
3717 	case ACL_LINK:
3718 		cnt = hdev->acl_cnt;
3719 		break;
3720 	case AMP_LINK:
3721 		cnt = hdev->block_cnt;
3722 		break;
3723 	case SCO_LINK:
3724 	case ESCO_LINK:
3725 		cnt = hdev->sco_cnt;
3726 		break;
3727 	case LE_LINK:
3728 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3729 		break;
3730 	default:
3731 		cnt = 0;
3732 		BT_ERR("Unknown link type");
3733 	}
3734 
3735 	q = cnt / num;
3736 	*quote = q ? q : 1;
3737 	BT_DBG("chan %p quote %d", chan, *quote);
3738 	return chan;
3739 }
3740 
3741 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3742 {
3743 	struct hci_conn_hash *h = &hdev->conn_hash;
3744 	struct hci_conn *conn;
3745 	int num = 0;
3746 
3747 	BT_DBG("%s", hdev->name);
3748 
3749 	rcu_read_lock();
3750 
3751 	list_for_each_entry_rcu(conn, &h->list, list) {
3752 		struct hci_chan *chan;
3753 
3754 		if (conn->type != type)
3755 			continue;
3756 
3757 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3758 			continue;
3759 
3760 		num++;
3761 
3762 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3763 			struct sk_buff *skb;
3764 
3765 			if (chan->sent) {
3766 				chan->sent = 0;
3767 				continue;
3768 			}
3769 
3770 			if (skb_queue_empty(&chan->data_q))
3771 				continue;
3772 
3773 			skb = skb_peek(&chan->data_q);
3774 			if (skb->priority >= HCI_PRIO_MAX - 1)
3775 				continue;
3776 
3777 			skb->priority = HCI_PRIO_MAX - 1;
3778 
3779 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3780 			       skb->priority);
3781 		}
3782 
3783 		if (hci_conn_num(hdev, type) == num)
3784 			break;
3785 	}
3786 
3787 	rcu_read_unlock();
3788 
3789 }
3790 
3791 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3792 {
3793 	/* Calculate count of blocks used by this packet */
3794 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3795 }
3796 
3797 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3798 {
3799 	if (!test_bit(HCI_RAW, &hdev->flags)) {
3800 		/* ACL tx timeout must be longer than maximum
3801 		 * link supervision timeout (40.9 seconds) */
3802 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3803 				       HCI_ACL_TX_TIMEOUT))
3804 			hci_link_tx_to(hdev, ACL_LINK);
3805 	}
3806 }
3807 
3808 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3809 {
3810 	unsigned int cnt = hdev->acl_cnt;
3811 	struct hci_chan *chan;
3812 	struct sk_buff *skb;
3813 	int quote;
3814 
3815 	__check_timeout(hdev, cnt);
3816 
3817 	while (hdev->acl_cnt &&
3818 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3819 		u32 priority = (skb_peek(&chan->data_q))->priority;
3820 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3821 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3822 			       skb->len, skb->priority);
3823 
3824 			/* Stop if priority has changed */
3825 			if (skb->priority < priority)
3826 				break;
3827 
3828 			skb = skb_dequeue(&chan->data_q);
3829 
3830 			hci_conn_enter_active_mode(chan->conn,
3831 						   bt_cb(skb)->force_active);
3832 
3833 			hci_send_frame(hdev, skb);
3834 			hdev->acl_last_tx = jiffies;
3835 
3836 			hdev->acl_cnt--;
3837 			chan->sent++;
3838 			chan->conn->sent++;
3839 		}
3840 	}
3841 
3842 	if (cnt != hdev->acl_cnt)
3843 		hci_prio_recalculate(hdev, ACL_LINK);
3844 }
3845 
3846 static void hci_sched_acl_blk(struct hci_dev *hdev)
3847 {
3848 	unsigned int cnt = hdev->block_cnt;
3849 	struct hci_chan *chan;
3850 	struct sk_buff *skb;
3851 	int quote;
3852 	u8 type;
3853 
3854 	__check_timeout(hdev, cnt);
3855 
3856 	BT_DBG("%s", hdev->name);
3857 
3858 	if (hdev->dev_type == HCI_AMP)
3859 		type = AMP_LINK;
3860 	else
3861 		type = ACL_LINK;
3862 
3863 	while (hdev->block_cnt > 0 &&
3864 	       (chan = hci_chan_sent(hdev, type, &quote))) {
3865 		u32 priority = (skb_peek(&chan->data_q))->priority;
3866 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3867 			int blocks;
3868 
3869 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3870 			       skb->len, skb->priority);
3871 
3872 			/* Stop if priority has changed */
3873 			if (skb->priority < priority)
3874 				break;
3875 
3876 			skb = skb_dequeue(&chan->data_q);
3877 
3878 			blocks = __get_blocks(hdev, skb);
3879 			if (blocks > hdev->block_cnt)
3880 				return;
3881 
3882 			hci_conn_enter_active_mode(chan->conn,
3883 						   bt_cb(skb)->force_active);
3884 
3885 			hci_send_frame(hdev, skb);
3886 			hdev->acl_last_tx = jiffies;
3887 
3888 			hdev->block_cnt -= blocks;
3889 			quote -= blocks;
3890 
3891 			chan->sent += blocks;
3892 			chan->conn->sent += blocks;
3893 		}
3894 	}
3895 
3896 	if (cnt != hdev->block_cnt)
3897 		hci_prio_recalculate(hdev, type);
3898 }
3899 
3900 static void hci_sched_acl(struct hci_dev *hdev)
3901 {
3902 	BT_DBG("%s", hdev->name);
3903 
3904 	/* No ACL link over BR/EDR controller */
3905 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3906 		return;
3907 
3908 	/* No AMP link over AMP controller */
3909 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3910 		return;
3911 
3912 	switch (hdev->flow_ctl_mode) {
3913 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
3914 		hci_sched_acl_pkt(hdev);
3915 		break;
3916 
3917 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3918 		hci_sched_acl_blk(hdev);
3919 		break;
3920 	}
3921 }
3922 
3923 /* Schedule SCO */
3924 static void hci_sched_sco(struct hci_dev *hdev)
3925 {
3926 	struct hci_conn *conn;
3927 	struct sk_buff *skb;
3928 	int quote;
3929 
3930 	BT_DBG("%s", hdev->name);
3931 
3932 	if (!hci_conn_num(hdev, SCO_LINK))
3933 		return;
3934 
3935 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3936 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3937 			BT_DBG("skb %p len %d", skb, skb->len);
3938 			hci_send_frame(hdev, skb);
3939 
3940 			conn->sent++;
3941 			if (conn->sent == ~0)
3942 				conn->sent = 0;
3943 		}
3944 	}
3945 }
3946 
3947 static void hci_sched_esco(struct hci_dev *hdev)
3948 {
3949 	struct hci_conn *conn;
3950 	struct sk_buff *skb;
3951 	int quote;
3952 
3953 	BT_DBG("%s", hdev->name);
3954 
3955 	if (!hci_conn_num(hdev, ESCO_LINK))
3956 		return;
3957 
3958 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3959 						     &quote))) {
3960 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3961 			BT_DBG("skb %p len %d", skb, skb->len);
3962 			hci_send_frame(hdev, skb);
3963 
3964 			conn->sent++;
3965 			if (conn->sent == ~0)
3966 				conn->sent = 0;
3967 		}
3968 	}
3969 }
3970 
3971 static void hci_sched_le(struct hci_dev *hdev)
3972 {
3973 	struct hci_chan *chan;
3974 	struct sk_buff *skb;
3975 	int quote, cnt, tmp;
3976 
3977 	BT_DBG("%s", hdev->name);
3978 
3979 	if (!hci_conn_num(hdev, LE_LINK))
3980 		return;
3981 
3982 	if (!test_bit(HCI_RAW, &hdev->flags)) {
3983 		/* LE tx timeout must be longer than maximum
3984 		 * link supervision timeout (40.9 seconds) */
3985 		if (!hdev->le_cnt && hdev->le_pkts &&
3986 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
3987 			hci_link_tx_to(hdev, LE_LINK);
3988 	}
3989 
3990 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3991 	tmp = cnt;
3992 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3993 		u32 priority = (skb_peek(&chan->data_q))->priority;
3994 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3995 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3996 			       skb->len, skb->priority);
3997 
3998 			/* Stop if priority has changed */
3999 			if (skb->priority < priority)
4000 				break;
4001 
4002 			skb = skb_dequeue(&chan->data_q);
4003 
4004 			hci_send_frame(hdev, skb);
4005 			hdev->le_last_tx = jiffies;
4006 
4007 			cnt--;
4008 			chan->sent++;
4009 			chan->conn->sent++;
4010 		}
4011 	}
4012 
4013 	if (hdev->le_pkts)
4014 		hdev->le_cnt = cnt;
4015 	else
4016 		hdev->acl_cnt = cnt;
4017 
4018 	if (cnt != tmp)
4019 		hci_prio_recalculate(hdev, LE_LINK);
4020 }
4021 
4022 static void hci_tx_work(struct work_struct *work)
4023 {
4024 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4025 	struct sk_buff *skb;
4026 
4027 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4028 	       hdev->sco_cnt, hdev->le_cnt);
4029 
4030 	if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4031 		/* Schedule queues and send stuff to HCI driver */
4032 		hci_sched_acl(hdev);
4033 		hci_sched_sco(hdev);
4034 		hci_sched_esco(hdev);
4035 		hci_sched_le(hdev);
4036 	}
4037 
4038 	/* Send next queued raw (unknown type) packet */
4039 	while ((skb = skb_dequeue(&hdev->raw_q)))
4040 		hci_send_frame(hdev, skb);
4041 }
4042 
4043 /* ----- HCI RX task (incoming data processing) ----- */
4044 
4045 /* ACL data packet */
4046 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4047 {
4048 	struct hci_acl_hdr *hdr = (void *) skb->data;
4049 	struct hci_conn *conn;
4050 	__u16 handle, flags;
4051 
4052 	skb_pull(skb, HCI_ACL_HDR_SIZE);
4053 
4054 	handle = __le16_to_cpu(hdr->handle);
4055 	flags  = hci_flags(handle);
4056 	handle = hci_handle(handle);
4057 
4058 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4059 	       handle, flags);
4060 
4061 	hdev->stat.acl_rx++;
4062 
4063 	hci_dev_lock(hdev);
4064 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4065 	hci_dev_unlock(hdev);
4066 
4067 	if (conn) {
4068 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4069 
4070 		/* Send to upper protocol */
4071 		l2cap_recv_acldata(conn, skb, flags);
4072 		return;
4073 	} else {
4074 		BT_ERR("%s ACL packet for unknown connection handle %d",
4075 		       hdev->name, handle);
4076 	}
4077 
4078 	kfree_skb(skb);
4079 }
4080 
4081 /* SCO data packet */
4082 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4083 {
4084 	struct hci_sco_hdr *hdr = (void *) skb->data;
4085 	struct hci_conn *conn;
4086 	__u16 handle;
4087 
4088 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4089 
4090 	handle = __le16_to_cpu(hdr->handle);
4091 
4092 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4093 
4094 	hdev->stat.sco_rx++;
4095 
4096 	hci_dev_lock(hdev);
4097 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4098 	hci_dev_unlock(hdev);
4099 
4100 	if (conn) {
4101 		/* Send to upper protocol */
4102 		sco_recv_scodata(conn, skb);
4103 		return;
4104 	} else {
4105 		BT_ERR("%s SCO packet for unknown connection handle %d",
4106 		       hdev->name, handle);
4107 	}
4108 
4109 	kfree_skb(skb);
4110 }
4111 
4112 static bool hci_req_is_complete(struct hci_dev *hdev)
4113 {
4114 	struct sk_buff *skb;
4115 
4116 	skb = skb_peek(&hdev->cmd_q);
4117 	if (!skb)
4118 		return true;
4119 
4120 	return bt_cb(skb)->req.start;
4121 }
4122 
4123 static void hci_resend_last(struct hci_dev *hdev)
4124 {
4125 	struct hci_command_hdr *sent;
4126 	struct sk_buff *skb;
4127 	u16 opcode;
4128 
4129 	if (!hdev->sent_cmd)
4130 		return;
4131 
4132 	sent = (void *) hdev->sent_cmd->data;
4133 	opcode = __le16_to_cpu(sent->opcode);
4134 	if (opcode == HCI_OP_RESET)
4135 		return;
4136 
4137 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4138 	if (!skb)
4139 		return;
4140 
4141 	skb_queue_head(&hdev->cmd_q, skb);
4142 	queue_work(hdev->workqueue, &hdev->cmd_work);
4143 }
4144 
4145 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4146 {
4147 	hci_req_complete_t req_complete = NULL;
4148 	struct sk_buff *skb;
4149 	unsigned long flags;
4150 
4151 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4152 
4153 	/* If the completed command doesn't match the last one that was
4154 	 * sent we need to do special handling of it.
4155 	 */
4156 	if (!hci_sent_cmd_data(hdev, opcode)) {
4157 		/* Some CSR based controllers generate a spontaneous
4158 		 * reset complete event during init and any pending
4159 		 * command will never be completed. In such a case we
4160 		 * need to resend whatever was the last sent
4161 		 * command.
4162 		 */
4163 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4164 			hci_resend_last(hdev);
4165 
4166 		return;
4167 	}
4168 
4169 	/* If the command succeeded and there's still more commands in
4170 	 * this request the request is not yet complete.
4171 	 */
4172 	if (!status && !hci_req_is_complete(hdev))
4173 		return;
4174 
4175 	/* If this was the last command in a request the complete
4176 	 * callback would be found in hdev->sent_cmd instead of the
4177 	 * command queue (hdev->cmd_q).
4178 	 */
4179 	if (hdev->sent_cmd) {
4180 		req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4181 
4182 		if (req_complete) {
4183 			/* We must set the complete callback to NULL to
4184 			 * avoid calling the callback more than once if
4185 			 * this function gets called again.
4186 			 */
4187 			bt_cb(hdev->sent_cmd)->req.complete = NULL;
4188 
4189 			goto call_complete;
4190 		}
4191 	}
4192 
4193 	/* Remove all pending commands belonging to this request */
4194 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4195 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4196 		if (bt_cb(skb)->req.start) {
4197 			__skb_queue_head(&hdev->cmd_q, skb);
4198 			break;
4199 		}
4200 
4201 		req_complete = bt_cb(skb)->req.complete;
4202 		kfree_skb(skb);
4203 	}
4204 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4205 
4206 call_complete:
4207 	if (req_complete)
4208 		req_complete(hdev, status);
4209 }
4210 
4211 static void hci_rx_work(struct work_struct *work)
4212 {
4213 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4214 	struct sk_buff *skb;
4215 
4216 	BT_DBG("%s", hdev->name);
4217 
4218 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4219 		/* Send copy to monitor */
4220 		hci_send_to_monitor(hdev, skb);
4221 
4222 		if (atomic_read(&hdev->promisc)) {
4223 			/* Send copy to the sockets */
4224 			hci_send_to_sock(hdev, skb);
4225 		}
4226 
4227 		if (test_bit(HCI_RAW, &hdev->flags) ||
4228 		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4229 			kfree_skb(skb);
4230 			continue;
4231 		}
4232 
4233 		if (test_bit(HCI_INIT, &hdev->flags)) {
4234 			/* Don't process data packets in this states. */
4235 			switch (bt_cb(skb)->pkt_type) {
4236 			case HCI_ACLDATA_PKT:
4237 			case HCI_SCODATA_PKT:
4238 				kfree_skb(skb);
4239 				continue;
4240 			}
4241 		}
4242 
4243 		/* Process frame */
4244 		switch (bt_cb(skb)->pkt_type) {
4245 		case HCI_EVENT_PKT:
4246 			BT_DBG("%s Event packet", hdev->name);
4247 			hci_event_packet(hdev, skb);
4248 			break;
4249 
4250 		case HCI_ACLDATA_PKT:
4251 			BT_DBG("%s ACL data packet", hdev->name);
4252 			hci_acldata_packet(hdev, skb);
4253 			break;
4254 
4255 		case HCI_SCODATA_PKT:
4256 			BT_DBG("%s SCO data packet", hdev->name);
4257 			hci_scodata_packet(hdev, skb);
4258 			break;
4259 
4260 		default:
4261 			kfree_skb(skb);
4262 			break;
4263 		}
4264 	}
4265 }
4266 
4267 static void hci_cmd_work(struct work_struct *work)
4268 {
4269 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4270 	struct sk_buff *skb;
4271 
4272 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4273 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4274 
4275 	/* Send queued commands */
4276 	if (atomic_read(&hdev->cmd_cnt)) {
4277 		skb = skb_dequeue(&hdev->cmd_q);
4278 		if (!skb)
4279 			return;
4280 
4281 		kfree_skb(hdev->sent_cmd);
4282 
4283 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4284 		if (hdev->sent_cmd) {
4285 			atomic_dec(&hdev->cmd_cnt);
4286 			hci_send_frame(hdev, skb);
4287 			if (test_bit(HCI_RESET, &hdev->flags))
4288 				del_timer(&hdev->cmd_timer);
4289 			else
4290 				mod_timer(&hdev->cmd_timer,
4291 					  jiffies + HCI_CMD_TIMEOUT);
4292 		} else {
4293 			skb_queue_head(&hdev->cmd_q, skb);
4294 			queue_work(hdev->workqueue, &hdev->cmd_work);
4295 		}
4296 	}
4297 }
4298