xref: /linux/net/bluetooth/hci_core.c (revision 071bf69a0220253a44acb8b2a27f7a262b9a46bf)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34 
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39 
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44 
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48 
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52 
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56 
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59 
60 /* ---- HCI debugfs entries ---- */
61 
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 			     size_t count, loff_t *ppos)
64 {
65 	struct hci_dev *hdev = file->private_data;
66 	char buf[3];
67 
68 	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69 	buf[1] = '\n';
70 	buf[2] = '\0';
71 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73 
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 			      size_t count, loff_t *ppos)
76 {
77 	struct hci_dev *hdev = file->private_data;
78 	struct sk_buff *skb;
79 	char buf[32];
80 	size_t buf_size = min(count, (sizeof(buf)-1));
81 	bool enable;
82 
83 	if (!test_bit(HCI_UP, &hdev->flags))
84 		return -ENETDOWN;
85 
86 	if (copy_from_user(buf, user_buf, buf_size))
87 		return -EFAULT;
88 
89 	buf[buf_size] = '\0';
90 	if (strtobool(buf, &enable))
91 		return -EINVAL;
92 
93 	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94 		return -EALREADY;
95 
96 	hci_req_sync_lock(hdev);
97 	if (enable)
98 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 				     HCI_CMD_TIMEOUT);
100 	else
101 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 				     HCI_CMD_TIMEOUT);
103 	hci_req_sync_unlock(hdev);
104 
105 	if (IS_ERR(skb))
106 		return PTR_ERR(skb);
107 
108 	kfree_skb(skb);
109 
110 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
111 
112 	return count;
113 }
114 
115 static const struct file_operations dut_mode_fops = {
116 	.open		= simple_open,
117 	.read		= dut_mode_read,
118 	.write		= dut_mode_write,
119 	.llseek		= default_llseek,
120 };
121 
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 				size_t count, loff_t *ppos)
124 {
125 	struct hci_dev *hdev = file->private_data;
126 	char buf[3];
127 
128 	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129 	buf[1] = '\n';
130 	buf[2] = '\0';
131 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133 
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 				 size_t count, loff_t *ppos)
136 {
137 	struct hci_dev *hdev = file->private_data;
138 	char buf[32];
139 	size_t buf_size = min(count, (sizeof(buf)-1));
140 	bool enable;
141 	int err;
142 
143 	if (copy_from_user(buf, user_buf, buf_size))
144 		return -EFAULT;
145 
146 	buf[buf_size] = '\0';
147 	if (strtobool(buf, &enable))
148 		return -EINVAL;
149 
150 	/* When the diagnostic flags are not persistent and the transport
151 	 * is not active, then there is no need for the vendor callback.
152 	 *
153 	 * Instead just store the desired value. If needed the setting
154 	 * will be programmed when the controller gets powered on.
155 	 */
156 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157 	    !test_bit(HCI_RUNNING, &hdev->flags))
158 		goto done;
159 
160 	hci_req_sync_lock(hdev);
161 	err = hdev->set_diag(hdev, enable);
162 	hci_req_sync_unlock(hdev);
163 
164 	if (err < 0)
165 		return err;
166 
167 done:
168 	if (enable)
169 		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170 	else
171 		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172 
173 	return count;
174 }
175 
176 static const struct file_operations vendor_diag_fops = {
177 	.open		= simple_open,
178 	.read		= vendor_diag_read,
179 	.write		= vendor_diag_write,
180 	.llseek		= default_llseek,
181 };
182 
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185 	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186 			    &dut_mode_fops);
187 
188 	if (hdev->set_diag)
189 		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190 				    &vendor_diag_fops);
191 }
192 
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195 	BT_DBG("%s %ld", req->hdev->name, opt);
196 
197 	/* Reset device */
198 	set_bit(HCI_RESET, &req->hdev->flags);
199 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
200 	return 0;
201 }
202 
203 static void bredr_init(struct hci_request *req)
204 {
205 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206 
207 	/* Read Local Supported Features */
208 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 
210 	/* Read Local Version */
211 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 
213 	/* Read BD Address */
214 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216 
217 static void amp_init1(struct hci_request *req)
218 {
219 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220 
221 	/* Read Local Version */
222 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223 
224 	/* Read Local Supported Commands */
225 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226 
227 	/* Read Local AMP Info */
228 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229 
230 	/* Read Data Blk size */
231 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232 
233 	/* Read Flow Control Mode */
234 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235 
236 	/* Read Location Data */
237 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239 
240 static int amp_init2(struct hci_request *req)
241 {
242 	/* Read Local Supported Features. Not all AMP controllers
243 	 * support this so it's placed conditionally in the second
244 	 * stage init.
245 	 */
246 	if (req->hdev->commands[14] & 0x20)
247 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248 
249 	return 0;
250 }
251 
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254 	struct hci_dev *hdev = req->hdev;
255 
256 	BT_DBG("%s %ld", hdev->name, opt);
257 
258 	/* Reset */
259 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260 		hci_reset_req(req, 0);
261 
262 	switch (hdev->dev_type) {
263 	case HCI_PRIMARY:
264 		bredr_init(req);
265 		break;
266 	case HCI_AMP:
267 		amp_init1(req);
268 		break;
269 	default:
270 		BT_ERR("Unknown device type %d", hdev->dev_type);
271 		break;
272 	}
273 
274 	return 0;
275 }
276 
277 static void bredr_setup(struct hci_request *req)
278 {
279 	__le16 param;
280 	__u8 flt_type;
281 
282 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
283 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
284 
285 	/* Read Class of Device */
286 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
287 
288 	/* Read Local Name */
289 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
290 
291 	/* Read Voice Setting */
292 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
293 
294 	/* Read Number of Supported IAC */
295 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296 
297 	/* Read Current IAC LAP */
298 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299 
300 	/* Clear Event Filters */
301 	flt_type = HCI_FLT_CLEAR_ALL;
302 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
303 
304 	/* Connection accept timeout ~20 secs */
305 	param = cpu_to_le16(0x7d00);
306 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
307 }
308 
309 static void le_setup(struct hci_request *req)
310 {
311 	struct hci_dev *hdev = req->hdev;
312 
313 	/* Read LE Buffer Size */
314 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
315 
316 	/* Read LE Local Supported Features */
317 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
318 
319 	/* Read LE Supported States */
320 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321 
322 	/* LE-only controllers have LE implicitly enabled */
323 	if (!lmp_bredr_capable(hdev))
324 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
325 }
326 
327 static void hci_setup_event_mask(struct hci_request *req)
328 {
329 	struct hci_dev *hdev = req->hdev;
330 
331 	/* The second byte is 0xff instead of 0x9f (two reserved bits
332 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333 	 * command otherwise.
334 	 */
335 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336 
337 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
338 	 * any event mask for pre 1.2 devices.
339 	 */
340 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341 		return;
342 
343 	if (lmp_bredr_capable(hdev)) {
344 		events[4] |= 0x01; /* Flow Specification Complete */
345 	} else {
346 		/* Use a different default for LE-only devices */
347 		memset(events, 0, sizeof(events));
348 		events[1] |= 0x20; /* Command Complete */
349 		events[1] |= 0x40; /* Command Status */
350 		events[1] |= 0x80; /* Hardware Error */
351 
352 		/* If the controller supports the Disconnect command, enable
353 		 * the corresponding event. In addition enable packet flow
354 		 * control related events.
355 		 */
356 		if (hdev->commands[0] & 0x20) {
357 			events[0] |= 0x10; /* Disconnection Complete */
358 			events[2] |= 0x04; /* Number of Completed Packets */
359 			events[3] |= 0x02; /* Data Buffer Overflow */
360 		}
361 
362 		/* If the controller supports the Read Remote Version
363 		 * Information command, enable the corresponding event.
364 		 */
365 		if (hdev->commands[2] & 0x80)
366 			events[1] |= 0x08; /* Read Remote Version Information
367 					    * Complete
368 					    */
369 
370 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371 			events[0] |= 0x80; /* Encryption Change */
372 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
373 		}
374 	}
375 
376 	if (lmp_inq_rssi_capable(hdev) ||
377 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378 		events[4] |= 0x02; /* Inquiry Result with RSSI */
379 
380 	if (lmp_ext_feat_capable(hdev))
381 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
382 
383 	if (lmp_esco_capable(hdev)) {
384 		events[5] |= 0x08; /* Synchronous Connection Complete */
385 		events[5] |= 0x10; /* Synchronous Connection Changed */
386 	}
387 
388 	if (lmp_sniffsubr_capable(hdev))
389 		events[5] |= 0x20; /* Sniff Subrating */
390 
391 	if (lmp_pause_enc_capable(hdev))
392 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
393 
394 	if (lmp_ext_inq_capable(hdev))
395 		events[5] |= 0x40; /* Extended Inquiry Result */
396 
397 	if (lmp_no_flush_capable(hdev))
398 		events[7] |= 0x01; /* Enhanced Flush Complete */
399 
400 	if (lmp_lsto_capable(hdev))
401 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
402 
403 	if (lmp_ssp_capable(hdev)) {
404 		events[6] |= 0x01;	/* IO Capability Request */
405 		events[6] |= 0x02;	/* IO Capability Response */
406 		events[6] |= 0x04;	/* User Confirmation Request */
407 		events[6] |= 0x08;	/* User Passkey Request */
408 		events[6] |= 0x10;	/* Remote OOB Data Request */
409 		events[6] |= 0x20;	/* Simple Pairing Complete */
410 		events[7] |= 0x04;	/* User Passkey Notification */
411 		events[7] |= 0x08;	/* Keypress Notification */
412 		events[7] |= 0x10;	/* Remote Host Supported
413 					 * Features Notification
414 					 */
415 	}
416 
417 	if (lmp_le_capable(hdev))
418 		events[7] |= 0x20;	/* LE Meta-Event */
419 
420 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
421 }
422 
423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
424 {
425 	struct hci_dev *hdev = req->hdev;
426 
427 	if (hdev->dev_type == HCI_AMP)
428 		return amp_init2(req);
429 
430 	if (lmp_bredr_capable(hdev))
431 		bredr_setup(req);
432 	else
433 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
434 
435 	if (lmp_le_capable(hdev))
436 		le_setup(req);
437 
438 	/* All Bluetooth 1.2 and later controllers should support the
439 	 * HCI command for reading the local supported commands.
440 	 *
441 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
442 	 * but do not have support for this command. If that is the case,
443 	 * the driver can quirk the behavior and skip reading the local
444 	 * supported commands.
445 	 */
446 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
449 
450 	if (lmp_ssp_capable(hdev)) {
451 		/* When SSP is available, then the host features page
452 		 * should also be available as well. However some
453 		 * controllers list the max_page as 0 as long as SSP
454 		 * has not been enabled. To achieve proper debugging
455 		 * output, force the minimum max_page to 1 at least.
456 		 */
457 		hdev->max_page = 0x01;
458 
459 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
460 			u8 mode = 0x01;
461 
462 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463 				    sizeof(mode), &mode);
464 		} else {
465 			struct hci_cp_write_eir cp;
466 
467 			memset(hdev->eir, 0, sizeof(hdev->eir));
468 			memset(&cp, 0, sizeof(cp));
469 
470 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
471 		}
472 	}
473 
474 	if (lmp_inq_rssi_capable(hdev) ||
475 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
476 		u8 mode;
477 
478 		/* If Extended Inquiry Result events are supported, then
479 		 * they are clearly preferred over Inquiry Result with RSSI
480 		 * events.
481 		 */
482 		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483 
484 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485 	}
486 
487 	if (lmp_inq_tx_pwr_capable(hdev))
488 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
489 
490 	if (lmp_ext_feat_capable(hdev)) {
491 		struct hci_cp_read_local_ext_features cp;
492 
493 		cp.page = 0x01;
494 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495 			    sizeof(cp), &cp);
496 	}
497 
498 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
499 		u8 enable = 1;
500 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501 			    &enable);
502 	}
503 
504 	return 0;
505 }
506 
507 static void hci_setup_link_policy(struct hci_request *req)
508 {
509 	struct hci_dev *hdev = req->hdev;
510 	struct hci_cp_write_def_link_policy cp;
511 	u16 link_policy = 0;
512 
513 	if (lmp_rswitch_capable(hdev))
514 		link_policy |= HCI_LP_RSWITCH;
515 	if (lmp_hold_capable(hdev))
516 		link_policy |= HCI_LP_HOLD;
517 	if (lmp_sniff_capable(hdev))
518 		link_policy |= HCI_LP_SNIFF;
519 	if (lmp_park_capable(hdev))
520 		link_policy |= HCI_LP_PARK;
521 
522 	cp.policy = cpu_to_le16(link_policy);
523 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
524 }
525 
526 static void hci_set_le_support(struct hci_request *req)
527 {
528 	struct hci_dev *hdev = req->hdev;
529 	struct hci_cp_write_le_host_supported cp;
530 
531 	/* LE-only devices do not support explicit enablement */
532 	if (!lmp_bredr_capable(hdev))
533 		return;
534 
535 	memset(&cp, 0, sizeof(cp));
536 
537 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
538 		cp.le = 0x01;
539 		cp.simul = 0x00;
540 	}
541 
542 	if (cp.le != lmp_host_le_capable(hdev))
543 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544 			    &cp);
545 }
546 
547 static void hci_set_event_mask_page_2(struct hci_request *req)
548 {
549 	struct hci_dev *hdev = req->hdev;
550 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551 
552 	/* If Connectionless Slave Broadcast master role is supported
553 	 * enable all necessary events for it.
554 	 */
555 	if (lmp_csb_master_capable(hdev)) {
556 		events[1] |= 0x40;	/* Triggered Clock Capture */
557 		events[1] |= 0x80;	/* Synchronization Train Complete */
558 		events[2] |= 0x10;	/* Slave Page Response Timeout */
559 		events[2] |= 0x20;	/* CSB Channel Map Change */
560 	}
561 
562 	/* If Connectionless Slave Broadcast slave role is supported
563 	 * enable all necessary events for it.
564 	 */
565 	if (lmp_csb_slave_capable(hdev)) {
566 		events[2] |= 0x01;	/* Synchronization Train Received */
567 		events[2] |= 0x02;	/* CSB Receive */
568 		events[2] |= 0x04;	/* CSB Timeout */
569 		events[2] |= 0x08;	/* Truncated Page Complete */
570 	}
571 
572 	/* Enable Authenticated Payload Timeout Expired event if supported */
573 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
574 		events[2] |= 0x80;
575 
576 	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
577 }
578 
579 static int hci_init3_req(struct hci_request *req, unsigned long opt)
580 {
581 	struct hci_dev *hdev = req->hdev;
582 	u8 p;
583 
584 	hci_setup_event_mask(req);
585 
586 	if (hdev->commands[6] & 0x20 &&
587 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
588 		struct hci_cp_read_stored_link_key cp;
589 
590 		bacpy(&cp.bdaddr, BDADDR_ANY);
591 		cp.read_all = 0x01;
592 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
593 	}
594 
595 	if (hdev->commands[5] & 0x10)
596 		hci_setup_link_policy(req);
597 
598 	if (hdev->commands[8] & 0x01)
599 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
600 
601 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
602 	 * support the Read Page Scan Type command. Check support for
603 	 * this command in the bit mask of supported commands.
604 	 */
605 	if (hdev->commands[13] & 0x01)
606 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
607 
608 	if (lmp_le_capable(hdev)) {
609 		u8 events[8];
610 
611 		memset(events, 0, sizeof(events));
612 
613 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
614 			events[0] |= 0x10;	/* LE Long Term Key Request */
615 
616 		/* If controller supports the Connection Parameters Request
617 		 * Link Layer Procedure, enable the corresponding event.
618 		 */
619 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
620 			events[0] |= 0x20;	/* LE Remote Connection
621 						 * Parameter Request
622 						 */
623 
624 		/* If the controller supports the Data Length Extension
625 		 * feature, enable the corresponding event.
626 		 */
627 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
628 			events[0] |= 0x40;	/* LE Data Length Change */
629 
630 		/* If the controller supports Extended Scanner Filter
631 		 * Policies, enable the correspondig event.
632 		 */
633 		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
634 			events[1] |= 0x04;	/* LE Direct Advertising
635 						 * Report
636 						 */
637 
638 		/* If the controller supports the LE Set Scan Enable command,
639 		 * enable the corresponding advertising report event.
640 		 */
641 		if (hdev->commands[26] & 0x08)
642 			events[0] |= 0x02;	/* LE Advertising Report */
643 
644 		/* If the controller supports the LE Create Connection
645 		 * command, enable the corresponding event.
646 		 */
647 		if (hdev->commands[26] & 0x10)
648 			events[0] |= 0x01;	/* LE Connection Complete */
649 
650 		/* If the controller supports the LE Connection Update
651 		 * command, enable the corresponding event.
652 		 */
653 		if (hdev->commands[27] & 0x04)
654 			events[0] |= 0x04;	/* LE Connection Update
655 						 * Complete
656 						 */
657 
658 		/* If the controller supports the LE Read Remote Used Features
659 		 * command, enable the corresponding event.
660 		 */
661 		if (hdev->commands[27] & 0x20)
662 			events[0] |= 0x08;	/* LE Read Remote Used
663 						 * Features Complete
664 						 */
665 
666 		/* If the controller supports the LE Read Local P-256
667 		 * Public Key command, enable the corresponding event.
668 		 */
669 		if (hdev->commands[34] & 0x02)
670 			events[0] |= 0x80;	/* LE Read Local P-256
671 						 * Public Key Complete
672 						 */
673 
674 		/* If the controller supports the LE Generate DHKey
675 		 * command, enable the corresponding event.
676 		 */
677 		if (hdev->commands[34] & 0x04)
678 			events[1] |= 0x01;	/* LE Generate DHKey Complete */
679 
680 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
681 			    events);
682 
683 		if (hdev->commands[25] & 0x40) {
684 			/* Read LE Advertising Channel TX Power */
685 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
686 		}
687 
688 		if (hdev->commands[26] & 0x40) {
689 			/* Read LE White List Size */
690 			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
691 				    0, NULL);
692 		}
693 
694 		if (hdev->commands[26] & 0x80) {
695 			/* Clear LE White List */
696 			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
697 		}
698 
699 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
700 			/* Read LE Maximum Data Length */
701 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
702 
703 			/* Read LE Suggested Default Data Length */
704 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
705 		}
706 
707 		hci_set_le_support(req);
708 	}
709 
710 	/* Read features beyond page 1 if available */
711 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
712 		struct hci_cp_read_local_ext_features cp;
713 
714 		cp.page = p;
715 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
716 			    sizeof(cp), &cp);
717 	}
718 
719 	return 0;
720 }
721 
722 static int hci_init4_req(struct hci_request *req, unsigned long opt)
723 {
724 	struct hci_dev *hdev = req->hdev;
725 
726 	/* Some Broadcom based Bluetooth controllers do not support the
727 	 * Delete Stored Link Key command. They are clearly indicating its
728 	 * absence in the bit mask of supported commands.
729 	 *
730 	 * Check the supported commands and only if the the command is marked
731 	 * as supported send it. If not supported assume that the controller
732 	 * does not have actual support for stored link keys which makes this
733 	 * command redundant anyway.
734 	 *
735 	 * Some controllers indicate that they support handling deleting
736 	 * stored link keys, but they don't. The quirk lets a driver
737 	 * just disable this command.
738 	 */
739 	if (hdev->commands[6] & 0x80 &&
740 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
741 		struct hci_cp_delete_stored_link_key cp;
742 
743 		bacpy(&cp.bdaddr, BDADDR_ANY);
744 		cp.delete_all = 0x01;
745 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
746 			    sizeof(cp), &cp);
747 	}
748 
749 	/* Set event mask page 2 if the HCI command for it is supported */
750 	if (hdev->commands[22] & 0x04)
751 		hci_set_event_mask_page_2(req);
752 
753 	/* Read local codec list if the HCI command is supported */
754 	if (hdev->commands[29] & 0x20)
755 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
756 
757 	/* Get MWS transport configuration if the HCI command is supported */
758 	if (hdev->commands[30] & 0x08)
759 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
760 
761 	/* Check for Synchronization Train support */
762 	if (lmp_sync_train_capable(hdev))
763 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
764 
765 	/* Enable Secure Connections if supported and configured */
766 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
767 	    bredr_sc_enabled(hdev)) {
768 		u8 support = 0x01;
769 
770 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
771 			    sizeof(support), &support);
772 	}
773 
774 	return 0;
775 }
776 
777 static int __hci_init(struct hci_dev *hdev)
778 {
779 	int err;
780 
781 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
782 	if (err < 0)
783 		return err;
784 
785 	if (hci_dev_test_flag(hdev, HCI_SETUP))
786 		hci_debugfs_create_basic(hdev);
787 
788 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
789 	if (err < 0)
790 		return err;
791 
792 	/* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
793 	 * BR/EDR/LE type controllers. AMP controllers only need the
794 	 * first two stages of init.
795 	 */
796 	if (hdev->dev_type != HCI_PRIMARY)
797 		return 0;
798 
799 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
800 	if (err < 0)
801 		return err;
802 
803 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
804 	if (err < 0)
805 		return err;
806 
807 	/* This function is only called when the controller is actually in
808 	 * configured state. When the controller is marked as unconfigured,
809 	 * this initialization procedure is not run.
810 	 *
811 	 * It means that it is possible that a controller runs through its
812 	 * setup phase and then discovers missing settings. If that is the
813 	 * case, then this function will not be called. It then will only
814 	 * be called during the config phase.
815 	 *
816 	 * So only when in setup phase or config phase, create the debugfs
817 	 * entries and register the SMP channels.
818 	 */
819 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
820 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
821 		return 0;
822 
823 	hci_debugfs_create_common(hdev);
824 
825 	if (lmp_bredr_capable(hdev))
826 		hci_debugfs_create_bredr(hdev);
827 
828 	if (lmp_le_capable(hdev))
829 		hci_debugfs_create_le(hdev);
830 
831 	return 0;
832 }
833 
834 static int hci_init0_req(struct hci_request *req, unsigned long opt)
835 {
836 	struct hci_dev *hdev = req->hdev;
837 
838 	BT_DBG("%s %ld", hdev->name, opt);
839 
840 	/* Reset */
841 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
842 		hci_reset_req(req, 0);
843 
844 	/* Read Local Version */
845 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
846 
847 	/* Read BD Address */
848 	if (hdev->set_bdaddr)
849 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
850 
851 	return 0;
852 }
853 
854 static int __hci_unconf_init(struct hci_dev *hdev)
855 {
856 	int err;
857 
858 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
859 		return 0;
860 
861 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
862 	if (err < 0)
863 		return err;
864 
865 	if (hci_dev_test_flag(hdev, HCI_SETUP))
866 		hci_debugfs_create_basic(hdev);
867 
868 	return 0;
869 }
870 
871 static int hci_scan_req(struct hci_request *req, unsigned long opt)
872 {
873 	__u8 scan = opt;
874 
875 	BT_DBG("%s %x", req->hdev->name, scan);
876 
877 	/* Inquiry and Page scans */
878 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
879 	return 0;
880 }
881 
882 static int hci_auth_req(struct hci_request *req, unsigned long opt)
883 {
884 	__u8 auth = opt;
885 
886 	BT_DBG("%s %x", req->hdev->name, auth);
887 
888 	/* Authentication */
889 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
890 	return 0;
891 }
892 
893 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
894 {
895 	__u8 encrypt = opt;
896 
897 	BT_DBG("%s %x", req->hdev->name, encrypt);
898 
899 	/* Encryption */
900 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
901 	return 0;
902 }
903 
904 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
905 {
906 	__le16 policy = cpu_to_le16(opt);
907 
908 	BT_DBG("%s %x", req->hdev->name, policy);
909 
910 	/* Default link policy */
911 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
912 	return 0;
913 }
914 
915 /* Get HCI device by index.
916  * Device is held on return. */
917 struct hci_dev *hci_dev_get(int index)
918 {
919 	struct hci_dev *hdev = NULL, *d;
920 
921 	BT_DBG("%d", index);
922 
923 	if (index < 0)
924 		return NULL;
925 
926 	read_lock(&hci_dev_list_lock);
927 	list_for_each_entry(d, &hci_dev_list, list) {
928 		if (d->id == index) {
929 			hdev = hci_dev_hold(d);
930 			break;
931 		}
932 	}
933 	read_unlock(&hci_dev_list_lock);
934 	return hdev;
935 }
936 
937 /* ---- Inquiry support ---- */
938 
939 bool hci_discovery_active(struct hci_dev *hdev)
940 {
941 	struct discovery_state *discov = &hdev->discovery;
942 
943 	switch (discov->state) {
944 	case DISCOVERY_FINDING:
945 	case DISCOVERY_RESOLVING:
946 		return true;
947 
948 	default:
949 		return false;
950 	}
951 }
952 
953 void hci_discovery_set_state(struct hci_dev *hdev, int state)
954 {
955 	int old_state = hdev->discovery.state;
956 
957 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
958 
959 	if (old_state == state)
960 		return;
961 
962 	hdev->discovery.state = state;
963 
964 	switch (state) {
965 	case DISCOVERY_STOPPED:
966 		hci_update_background_scan(hdev);
967 
968 		if (old_state != DISCOVERY_STARTING)
969 			mgmt_discovering(hdev, 0);
970 		break;
971 	case DISCOVERY_STARTING:
972 		break;
973 	case DISCOVERY_FINDING:
974 		mgmt_discovering(hdev, 1);
975 		break;
976 	case DISCOVERY_RESOLVING:
977 		break;
978 	case DISCOVERY_STOPPING:
979 		break;
980 	}
981 }
982 
983 void hci_inquiry_cache_flush(struct hci_dev *hdev)
984 {
985 	struct discovery_state *cache = &hdev->discovery;
986 	struct inquiry_entry *p, *n;
987 
988 	list_for_each_entry_safe(p, n, &cache->all, all) {
989 		list_del(&p->all);
990 		kfree(p);
991 	}
992 
993 	INIT_LIST_HEAD(&cache->unknown);
994 	INIT_LIST_HEAD(&cache->resolve);
995 }
996 
997 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
998 					       bdaddr_t *bdaddr)
999 {
1000 	struct discovery_state *cache = &hdev->discovery;
1001 	struct inquiry_entry *e;
1002 
1003 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1004 
1005 	list_for_each_entry(e, &cache->all, all) {
1006 		if (!bacmp(&e->data.bdaddr, bdaddr))
1007 			return e;
1008 	}
1009 
1010 	return NULL;
1011 }
1012 
1013 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1014 						       bdaddr_t *bdaddr)
1015 {
1016 	struct discovery_state *cache = &hdev->discovery;
1017 	struct inquiry_entry *e;
1018 
1019 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1020 
1021 	list_for_each_entry(e, &cache->unknown, list) {
1022 		if (!bacmp(&e->data.bdaddr, bdaddr))
1023 			return e;
1024 	}
1025 
1026 	return NULL;
1027 }
1028 
1029 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1030 						       bdaddr_t *bdaddr,
1031 						       int state)
1032 {
1033 	struct discovery_state *cache = &hdev->discovery;
1034 	struct inquiry_entry *e;
1035 
1036 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1037 
1038 	list_for_each_entry(e, &cache->resolve, list) {
1039 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1040 			return e;
1041 		if (!bacmp(&e->data.bdaddr, bdaddr))
1042 			return e;
1043 	}
1044 
1045 	return NULL;
1046 }
1047 
1048 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1049 				      struct inquiry_entry *ie)
1050 {
1051 	struct discovery_state *cache = &hdev->discovery;
1052 	struct list_head *pos = &cache->resolve;
1053 	struct inquiry_entry *p;
1054 
1055 	list_del(&ie->list);
1056 
1057 	list_for_each_entry(p, &cache->resolve, list) {
1058 		if (p->name_state != NAME_PENDING &&
1059 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1060 			break;
1061 		pos = &p->list;
1062 	}
1063 
1064 	list_add(&ie->list, pos);
1065 }
1066 
1067 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1068 			     bool name_known)
1069 {
1070 	struct discovery_state *cache = &hdev->discovery;
1071 	struct inquiry_entry *ie;
1072 	u32 flags = 0;
1073 
1074 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1075 
1076 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1077 
1078 	if (!data->ssp_mode)
1079 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1080 
1081 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1082 	if (ie) {
1083 		if (!ie->data.ssp_mode)
1084 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1085 
1086 		if (ie->name_state == NAME_NEEDED &&
1087 		    data->rssi != ie->data.rssi) {
1088 			ie->data.rssi = data->rssi;
1089 			hci_inquiry_cache_update_resolve(hdev, ie);
1090 		}
1091 
1092 		goto update;
1093 	}
1094 
1095 	/* Entry not in the cache. Add new one. */
1096 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1097 	if (!ie) {
1098 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1099 		goto done;
1100 	}
1101 
1102 	list_add(&ie->all, &cache->all);
1103 
1104 	if (name_known) {
1105 		ie->name_state = NAME_KNOWN;
1106 	} else {
1107 		ie->name_state = NAME_NOT_KNOWN;
1108 		list_add(&ie->list, &cache->unknown);
1109 	}
1110 
1111 update:
1112 	if (name_known && ie->name_state != NAME_KNOWN &&
1113 	    ie->name_state != NAME_PENDING) {
1114 		ie->name_state = NAME_KNOWN;
1115 		list_del(&ie->list);
1116 	}
1117 
1118 	memcpy(&ie->data, data, sizeof(*data));
1119 	ie->timestamp = jiffies;
1120 	cache->timestamp = jiffies;
1121 
1122 	if (ie->name_state == NAME_NOT_KNOWN)
1123 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1124 
1125 done:
1126 	return flags;
1127 }
1128 
1129 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1130 {
1131 	struct discovery_state *cache = &hdev->discovery;
1132 	struct inquiry_info *info = (struct inquiry_info *) buf;
1133 	struct inquiry_entry *e;
1134 	int copied = 0;
1135 
1136 	list_for_each_entry(e, &cache->all, all) {
1137 		struct inquiry_data *data = &e->data;
1138 
1139 		if (copied >= num)
1140 			break;
1141 
1142 		bacpy(&info->bdaddr, &data->bdaddr);
1143 		info->pscan_rep_mode	= data->pscan_rep_mode;
1144 		info->pscan_period_mode	= data->pscan_period_mode;
1145 		info->pscan_mode	= data->pscan_mode;
1146 		memcpy(info->dev_class, data->dev_class, 3);
1147 		info->clock_offset	= data->clock_offset;
1148 
1149 		info++;
1150 		copied++;
1151 	}
1152 
1153 	BT_DBG("cache %p, copied %d", cache, copied);
1154 	return copied;
1155 }
1156 
1157 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1158 {
1159 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1160 	struct hci_dev *hdev = req->hdev;
1161 	struct hci_cp_inquiry cp;
1162 
1163 	BT_DBG("%s", hdev->name);
1164 
1165 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1166 		return 0;
1167 
1168 	/* Start Inquiry */
1169 	memcpy(&cp.lap, &ir->lap, 3);
1170 	cp.length  = ir->length;
1171 	cp.num_rsp = ir->num_rsp;
1172 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1173 
1174 	return 0;
1175 }
1176 
1177 int hci_inquiry(void __user *arg)
1178 {
1179 	__u8 __user *ptr = arg;
1180 	struct hci_inquiry_req ir;
1181 	struct hci_dev *hdev;
1182 	int err = 0, do_inquiry = 0, max_rsp;
1183 	long timeo;
1184 	__u8 *buf;
1185 
1186 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1187 		return -EFAULT;
1188 
1189 	hdev = hci_dev_get(ir.dev_id);
1190 	if (!hdev)
1191 		return -ENODEV;
1192 
1193 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1194 		err = -EBUSY;
1195 		goto done;
1196 	}
1197 
1198 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1199 		err = -EOPNOTSUPP;
1200 		goto done;
1201 	}
1202 
1203 	if (hdev->dev_type != HCI_PRIMARY) {
1204 		err = -EOPNOTSUPP;
1205 		goto done;
1206 	}
1207 
1208 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1209 		err = -EOPNOTSUPP;
1210 		goto done;
1211 	}
1212 
1213 	hci_dev_lock(hdev);
1214 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1215 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1216 		hci_inquiry_cache_flush(hdev);
1217 		do_inquiry = 1;
1218 	}
1219 	hci_dev_unlock(hdev);
1220 
1221 	timeo = ir.length * msecs_to_jiffies(2000);
1222 
1223 	if (do_inquiry) {
1224 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1225 				   timeo, NULL);
1226 		if (err < 0)
1227 			goto done;
1228 
1229 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1230 		 * cleared). If it is interrupted by a signal, return -EINTR.
1231 		 */
1232 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1233 				TASK_INTERRUPTIBLE))
1234 			return -EINTR;
1235 	}
1236 
1237 	/* for unlimited number of responses we will use buffer with
1238 	 * 255 entries
1239 	 */
1240 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1241 
1242 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1243 	 * copy it to the user space.
1244 	 */
1245 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1246 	if (!buf) {
1247 		err = -ENOMEM;
1248 		goto done;
1249 	}
1250 
1251 	hci_dev_lock(hdev);
1252 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1253 	hci_dev_unlock(hdev);
1254 
1255 	BT_DBG("num_rsp %d", ir.num_rsp);
1256 
1257 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1258 		ptr += sizeof(ir);
1259 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1260 				 ir.num_rsp))
1261 			err = -EFAULT;
1262 	} else
1263 		err = -EFAULT;
1264 
1265 	kfree(buf);
1266 
1267 done:
1268 	hci_dev_put(hdev);
1269 	return err;
1270 }
1271 
1272 static int hci_dev_do_open(struct hci_dev *hdev)
1273 {
1274 	int ret = 0;
1275 
1276 	BT_DBG("%s %p", hdev->name, hdev);
1277 
1278 	hci_req_sync_lock(hdev);
1279 
1280 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1281 		ret = -ENODEV;
1282 		goto done;
1283 	}
1284 
1285 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1286 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1287 		/* Check for rfkill but allow the HCI setup stage to
1288 		 * proceed (which in itself doesn't cause any RF activity).
1289 		 */
1290 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1291 			ret = -ERFKILL;
1292 			goto done;
1293 		}
1294 
1295 		/* Check for valid public address or a configured static
1296 		 * random adddress, but let the HCI setup proceed to
1297 		 * be able to determine if there is a public address
1298 		 * or not.
1299 		 *
1300 		 * In case of user channel usage, it is not important
1301 		 * if a public address or static random address is
1302 		 * available.
1303 		 *
1304 		 * This check is only valid for BR/EDR controllers
1305 		 * since AMP controllers do not have an address.
1306 		 */
1307 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1308 		    hdev->dev_type == HCI_PRIMARY &&
1309 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1310 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1311 			ret = -EADDRNOTAVAIL;
1312 			goto done;
1313 		}
1314 	}
1315 
1316 	if (test_bit(HCI_UP, &hdev->flags)) {
1317 		ret = -EALREADY;
1318 		goto done;
1319 	}
1320 
1321 	if (hdev->open(hdev)) {
1322 		ret = -EIO;
1323 		goto done;
1324 	}
1325 
1326 	set_bit(HCI_RUNNING, &hdev->flags);
1327 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1328 
1329 	atomic_set(&hdev->cmd_cnt, 1);
1330 	set_bit(HCI_INIT, &hdev->flags);
1331 
1332 	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1333 		hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1334 
1335 		if (hdev->setup)
1336 			ret = hdev->setup(hdev);
1337 
1338 		/* The transport driver can set these quirks before
1339 		 * creating the HCI device or in its setup callback.
1340 		 *
1341 		 * In case any of them is set, the controller has to
1342 		 * start up as unconfigured.
1343 		 */
1344 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1345 		    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1346 			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1347 
1348 		/* For an unconfigured controller it is required to
1349 		 * read at least the version information provided by
1350 		 * the Read Local Version Information command.
1351 		 *
1352 		 * If the set_bdaddr driver callback is provided, then
1353 		 * also the original Bluetooth public device address
1354 		 * will be read using the Read BD Address command.
1355 		 */
1356 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1357 			ret = __hci_unconf_init(hdev);
1358 	}
1359 
1360 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1361 		/* If public address change is configured, ensure that
1362 		 * the address gets programmed. If the driver does not
1363 		 * support changing the public address, fail the power
1364 		 * on procedure.
1365 		 */
1366 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1367 		    hdev->set_bdaddr)
1368 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1369 		else
1370 			ret = -EADDRNOTAVAIL;
1371 	}
1372 
1373 	if (!ret) {
1374 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1375 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1376 			ret = __hci_init(hdev);
1377 			if (!ret && hdev->post_init)
1378 				ret = hdev->post_init(hdev);
1379 		}
1380 	}
1381 
1382 	/* If the HCI Reset command is clearing all diagnostic settings,
1383 	 * then they need to be reprogrammed after the init procedure
1384 	 * completed.
1385 	 */
1386 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1387 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1388 		ret = hdev->set_diag(hdev, true);
1389 
1390 	clear_bit(HCI_INIT, &hdev->flags);
1391 
1392 	if (!ret) {
1393 		hci_dev_hold(hdev);
1394 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1395 		set_bit(HCI_UP, &hdev->flags);
1396 		hci_sock_dev_event(hdev, HCI_DEV_UP);
1397 		hci_leds_update_powered(hdev, true);
1398 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1400 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1401 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1402 		    hci_dev_test_flag(hdev, HCI_MGMT) &&
1403 		    hdev->dev_type == HCI_PRIMARY) {
1404 			ret = __hci_req_hci_power_on(hdev);
1405 			mgmt_power_on(hdev, ret);
1406 		}
1407 	} else {
1408 		/* Init failed, cleanup */
1409 		flush_work(&hdev->tx_work);
1410 		flush_work(&hdev->cmd_work);
1411 		flush_work(&hdev->rx_work);
1412 
1413 		skb_queue_purge(&hdev->cmd_q);
1414 		skb_queue_purge(&hdev->rx_q);
1415 
1416 		if (hdev->flush)
1417 			hdev->flush(hdev);
1418 
1419 		if (hdev->sent_cmd) {
1420 			kfree_skb(hdev->sent_cmd);
1421 			hdev->sent_cmd = NULL;
1422 		}
1423 
1424 		clear_bit(HCI_RUNNING, &hdev->flags);
1425 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1426 
1427 		hdev->close(hdev);
1428 		hdev->flags &= BIT(HCI_RAW);
1429 	}
1430 
1431 done:
1432 	hci_req_sync_unlock(hdev);
1433 	return ret;
1434 }
1435 
1436 /* ---- HCI ioctl helpers ---- */
1437 
1438 int hci_dev_open(__u16 dev)
1439 {
1440 	struct hci_dev *hdev;
1441 	int err;
1442 
1443 	hdev = hci_dev_get(dev);
1444 	if (!hdev)
1445 		return -ENODEV;
1446 
1447 	/* Devices that are marked as unconfigured can only be powered
1448 	 * up as user channel. Trying to bring them up as normal devices
1449 	 * will result into a failure. Only user channel operation is
1450 	 * possible.
1451 	 *
1452 	 * When this function is called for a user channel, the flag
1453 	 * HCI_USER_CHANNEL will be set first before attempting to
1454 	 * open the device.
1455 	 */
1456 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1457 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1458 		err = -EOPNOTSUPP;
1459 		goto done;
1460 	}
1461 
1462 	/* We need to ensure that no other power on/off work is pending
1463 	 * before proceeding to call hci_dev_do_open. This is
1464 	 * particularly important if the setup procedure has not yet
1465 	 * completed.
1466 	 */
1467 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1468 		cancel_delayed_work(&hdev->power_off);
1469 
1470 	/* After this call it is guaranteed that the setup procedure
1471 	 * has finished. This means that error conditions like RFKILL
1472 	 * or no valid public or static random address apply.
1473 	 */
1474 	flush_workqueue(hdev->req_workqueue);
1475 
1476 	/* For controllers not using the management interface and that
1477 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1478 	 * so that pairing works for them. Once the management interface
1479 	 * is in use this bit will be cleared again and userspace has
1480 	 * to explicitly enable it.
1481 	 */
1482 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1483 	    !hci_dev_test_flag(hdev, HCI_MGMT))
1484 		hci_dev_set_flag(hdev, HCI_BONDABLE);
1485 
1486 	err = hci_dev_do_open(hdev);
1487 
1488 done:
1489 	hci_dev_put(hdev);
1490 	return err;
1491 }
1492 
1493 /* This function requires the caller holds hdev->lock */
1494 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1495 {
1496 	struct hci_conn_params *p;
1497 
1498 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1499 		if (p->conn) {
1500 			hci_conn_drop(p->conn);
1501 			hci_conn_put(p->conn);
1502 			p->conn = NULL;
1503 		}
1504 		list_del_init(&p->action);
1505 	}
1506 
1507 	BT_DBG("All LE pending actions cleared");
1508 }
1509 
1510 int hci_dev_do_close(struct hci_dev *hdev)
1511 {
1512 	bool auto_off;
1513 
1514 	BT_DBG("%s %p", hdev->name, hdev);
1515 
1516 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1517 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1518 	    test_bit(HCI_UP, &hdev->flags)) {
1519 		/* Execute vendor specific shutdown routine */
1520 		if (hdev->shutdown)
1521 			hdev->shutdown(hdev);
1522 	}
1523 
1524 	cancel_delayed_work(&hdev->power_off);
1525 
1526 	hci_request_cancel_all(hdev);
1527 	hci_req_sync_lock(hdev);
1528 
1529 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1530 		cancel_delayed_work_sync(&hdev->cmd_timer);
1531 		hci_req_sync_unlock(hdev);
1532 		return 0;
1533 	}
1534 
1535 	hci_leds_update_powered(hdev, false);
1536 
1537 	/* Flush RX and TX works */
1538 	flush_work(&hdev->tx_work);
1539 	flush_work(&hdev->rx_work);
1540 
1541 	if (hdev->discov_timeout > 0) {
1542 		hdev->discov_timeout = 0;
1543 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1544 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1545 	}
1546 
1547 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1548 		cancel_delayed_work(&hdev->service_cache);
1549 
1550 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1551 		cancel_delayed_work_sync(&hdev->rpa_expired);
1552 
1553 	/* Avoid potential lockdep warnings from the *_flush() calls by
1554 	 * ensuring the workqueue is empty up front.
1555 	 */
1556 	drain_workqueue(hdev->workqueue);
1557 
1558 	hci_dev_lock(hdev);
1559 
1560 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1561 
1562 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1563 
1564 	if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1565 	    hci_dev_test_flag(hdev, HCI_MGMT))
1566 		__mgmt_power_off(hdev);
1567 
1568 	hci_inquiry_cache_flush(hdev);
1569 	hci_pend_le_actions_clear(hdev);
1570 	hci_conn_hash_flush(hdev);
1571 	hci_dev_unlock(hdev);
1572 
1573 	smp_unregister(hdev);
1574 
1575 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1576 
1577 	if (hdev->flush)
1578 		hdev->flush(hdev);
1579 
1580 	/* Reset device */
1581 	skb_queue_purge(&hdev->cmd_q);
1582 	atomic_set(&hdev->cmd_cnt, 1);
1583 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1584 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1585 		set_bit(HCI_INIT, &hdev->flags);
1586 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1587 		clear_bit(HCI_INIT, &hdev->flags);
1588 	}
1589 
1590 	/* flush cmd  work */
1591 	flush_work(&hdev->cmd_work);
1592 
1593 	/* Drop queues */
1594 	skb_queue_purge(&hdev->rx_q);
1595 	skb_queue_purge(&hdev->cmd_q);
1596 	skb_queue_purge(&hdev->raw_q);
1597 
1598 	/* Drop last sent command */
1599 	if (hdev->sent_cmd) {
1600 		cancel_delayed_work_sync(&hdev->cmd_timer);
1601 		kfree_skb(hdev->sent_cmd);
1602 		hdev->sent_cmd = NULL;
1603 	}
1604 
1605 	clear_bit(HCI_RUNNING, &hdev->flags);
1606 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1607 
1608 	/* After this point our queues are empty
1609 	 * and no tasks are scheduled. */
1610 	hdev->close(hdev);
1611 
1612 	/* Clear flags */
1613 	hdev->flags &= BIT(HCI_RAW);
1614 	hci_dev_clear_volatile_flags(hdev);
1615 
1616 	/* Controller radio is available but is currently powered down */
1617 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1618 
1619 	memset(hdev->eir, 0, sizeof(hdev->eir));
1620 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1621 	bacpy(&hdev->random_addr, BDADDR_ANY);
1622 
1623 	hci_req_sync_unlock(hdev);
1624 
1625 	hci_dev_put(hdev);
1626 	return 0;
1627 }
1628 
1629 int hci_dev_close(__u16 dev)
1630 {
1631 	struct hci_dev *hdev;
1632 	int err;
1633 
1634 	hdev = hci_dev_get(dev);
1635 	if (!hdev)
1636 		return -ENODEV;
1637 
1638 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1639 		err = -EBUSY;
1640 		goto done;
1641 	}
1642 
1643 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1644 		cancel_delayed_work(&hdev->power_off);
1645 
1646 	err = hci_dev_do_close(hdev);
1647 
1648 done:
1649 	hci_dev_put(hdev);
1650 	return err;
1651 }
1652 
1653 static int hci_dev_do_reset(struct hci_dev *hdev)
1654 {
1655 	int ret;
1656 
1657 	BT_DBG("%s %p", hdev->name, hdev);
1658 
1659 	hci_req_sync_lock(hdev);
1660 
1661 	/* Drop queues */
1662 	skb_queue_purge(&hdev->rx_q);
1663 	skb_queue_purge(&hdev->cmd_q);
1664 
1665 	/* Avoid potential lockdep warnings from the *_flush() calls by
1666 	 * ensuring the workqueue is empty up front.
1667 	 */
1668 	drain_workqueue(hdev->workqueue);
1669 
1670 	hci_dev_lock(hdev);
1671 	hci_inquiry_cache_flush(hdev);
1672 	hci_conn_hash_flush(hdev);
1673 	hci_dev_unlock(hdev);
1674 
1675 	if (hdev->flush)
1676 		hdev->flush(hdev);
1677 
1678 	atomic_set(&hdev->cmd_cnt, 1);
1679 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1680 
1681 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1682 
1683 	hci_req_sync_unlock(hdev);
1684 	return ret;
1685 }
1686 
1687 int hci_dev_reset(__u16 dev)
1688 {
1689 	struct hci_dev *hdev;
1690 	int err;
1691 
1692 	hdev = hci_dev_get(dev);
1693 	if (!hdev)
1694 		return -ENODEV;
1695 
1696 	if (!test_bit(HCI_UP, &hdev->flags)) {
1697 		err = -ENETDOWN;
1698 		goto done;
1699 	}
1700 
1701 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1702 		err = -EBUSY;
1703 		goto done;
1704 	}
1705 
1706 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1707 		err = -EOPNOTSUPP;
1708 		goto done;
1709 	}
1710 
1711 	err = hci_dev_do_reset(hdev);
1712 
1713 done:
1714 	hci_dev_put(hdev);
1715 	return err;
1716 }
1717 
1718 int hci_dev_reset_stat(__u16 dev)
1719 {
1720 	struct hci_dev *hdev;
1721 	int ret = 0;
1722 
1723 	hdev = hci_dev_get(dev);
1724 	if (!hdev)
1725 		return -ENODEV;
1726 
1727 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1728 		ret = -EBUSY;
1729 		goto done;
1730 	}
1731 
1732 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1733 		ret = -EOPNOTSUPP;
1734 		goto done;
1735 	}
1736 
1737 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1738 
1739 done:
1740 	hci_dev_put(hdev);
1741 	return ret;
1742 }
1743 
1744 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1745 {
1746 	bool conn_changed, discov_changed;
1747 
1748 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
1749 
1750 	if ((scan & SCAN_PAGE))
1751 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1752 							  HCI_CONNECTABLE);
1753 	else
1754 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1755 							   HCI_CONNECTABLE);
1756 
1757 	if ((scan & SCAN_INQUIRY)) {
1758 		discov_changed = !hci_dev_test_and_set_flag(hdev,
1759 							    HCI_DISCOVERABLE);
1760 	} else {
1761 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1762 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1763 							     HCI_DISCOVERABLE);
1764 	}
1765 
1766 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1767 		return;
1768 
1769 	if (conn_changed || discov_changed) {
1770 		/* In case this was disabled through mgmt */
1771 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1772 
1773 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1774 			hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1775 
1776 		mgmt_new_settings(hdev);
1777 	}
1778 }
1779 
1780 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1781 {
1782 	struct hci_dev *hdev;
1783 	struct hci_dev_req dr;
1784 	int err = 0;
1785 
1786 	if (copy_from_user(&dr, arg, sizeof(dr)))
1787 		return -EFAULT;
1788 
1789 	hdev = hci_dev_get(dr.dev_id);
1790 	if (!hdev)
1791 		return -ENODEV;
1792 
1793 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1794 		err = -EBUSY;
1795 		goto done;
1796 	}
1797 
1798 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1799 		err = -EOPNOTSUPP;
1800 		goto done;
1801 	}
1802 
1803 	if (hdev->dev_type != HCI_PRIMARY) {
1804 		err = -EOPNOTSUPP;
1805 		goto done;
1806 	}
1807 
1808 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1809 		err = -EOPNOTSUPP;
1810 		goto done;
1811 	}
1812 
1813 	switch (cmd) {
1814 	case HCISETAUTH:
1815 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1816 				   HCI_INIT_TIMEOUT, NULL);
1817 		break;
1818 
1819 	case HCISETENCRYPT:
1820 		if (!lmp_encrypt_capable(hdev)) {
1821 			err = -EOPNOTSUPP;
1822 			break;
1823 		}
1824 
1825 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
1826 			/* Auth must be enabled first */
1827 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1828 					   HCI_INIT_TIMEOUT, NULL);
1829 			if (err)
1830 				break;
1831 		}
1832 
1833 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1834 				   HCI_INIT_TIMEOUT, NULL);
1835 		break;
1836 
1837 	case HCISETSCAN:
1838 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1839 				   HCI_INIT_TIMEOUT, NULL);
1840 
1841 		/* Ensure that the connectable and discoverable states
1842 		 * get correctly modified as this was a non-mgmt change.
1843 		 */
1844 		if (!err)
1845 			hci_update_scan_state(hdev, dr.dev_opt);
1846 		break;
1847 
1848 	case HCISETLINKPOL:
1849 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1850 				   HCI_INIT_TIMEOUT, NULL);
1851 		break;
1852 
1853 	case HCISETLINKMODE:
1854 		hdev->link_mode = ((__u16) dr.dev_opt) &
1855 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
1856 		break;
1857 
1858 	case HCISETPTYPE:
1859 		hdev->pkt_type = (__u16) dr.dev_opt;
1860 		break;
1861 
1862 	case HCISETACLMTU:
1863 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1864 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1865 		break;
1866 
1867 	case HCISETSCOMTU:
1868 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1869 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1870 		break;
1871 
1872 	default:
1873 		err = -EINVAL;
1874 		break;
1875 	}
1876 
1877 done:
1878 	hci_dev_put(hdev);
1879 	return err;
1880 }
1881 
1882 int hci_get_dev_list(void __user *arg)
1883 {
1884 	struct hci_dev *hdev;
1885 	struct hci_dev_list_req *dl;
1886 	struct hci_dev_req *dr;
1887 	int n = 0, size, err;
1888 	__u16 dev_num;
1889 
1890 	if (get_user(dev_num, (__u16 __user *) arg))
1891 		return -EFAULT;
1892 
1893 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1894 		return -EINVAL;
1895 
1896 	size = sizeof(*dl) + dev_num * sizeof(*dr);
1897 
1898 	dl = kzalloc(size, GFP_KERNEL);
1899 	if (!dl)
1900 		return -ENOMEM;
1901 
1902 	dr = dl->dev_req;
1903 
1904 	read_lock(&hci_dev_list_lock);
1905 	list_for_each_entry(hdev, &hci_dev_list, list) {
1906 		unsigned long flags = hdev->flags;
1907 
1908 		/* When the auto-off is configured it means the transport
1909 		 * is running, but in that case still indicate that the
1910 		 * device is actually down.
1911 		 */
1912 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1913 			flags &= ~BIT(HCI_UP);
1914 
1915 		(dr + n)->dev_id  = hdev->id;
1916 		(dr + n)->dev_opt = flags;
1917 
1918 		if (++n >= dev_num)
1919 			break;
1920 	}
1921 	read_unlock(&hci_dev_list_lock);
1922 
1923 	dl->dev_num = n;
1924 	size = sizeof(*dl) + n * sizeof(*dr);
1925 
1926 	err = copy_to_user(arg, dl, size);
1927 	kfree(dl);
1928 
1929 	return err ? -EFAULT : 0;
1930 }
1931 
1932 int hci_get_dev_info(void __user *arg)
1933 {
1934 	struct hci_dev *hdev;
1935 	struct hci_dev_info di;
1936 	unsigned long flags;
1937 	int err = 0;
1938 
1939 	if (copy_from_user(&di, arg, sizeof(di)))
1940 		return -EFAULT;
1941 
1942 	hdev = hci_dev_get(di.dev_id);
1943 	if (!hdev)
1944 		return -ENODEV;
1945 
1946 	/* When the auto-off is configured it means the transport
1947 	 * is running, but in that case still indicate that the
1948 	 * device is actually down.
1949 	 */
1950 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1951 		flags = hdev->flags & ~BIT(HCI_UP);
1952 	else
1953 		flags = hdev->flags;
1954 
1955 	strcpy(di.name, hdev->name);
1956 	di.bdaddr   = hdev->bdaddr;
1957 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1958 	di.flags    = flags;
1959 	di.pkt_type = hdev->pkt_type;
1960 	if (lmp_bredr_capable(hdev)) {
1961 		di.acl_mtu  = hdev->acl_mtu;
1962 		di.acl_pkts = hdev->acl_pkts;
1963 		di.sco_mtu  = hdev->sco_mtu;
1964 		di.sco_pkts = hdev->sco_pkts;
1965 	} else {
1966 		di.acl_mtu  = hdev->le_mtu;
1967 		di.acl_pkts = hdev->le_pkts;
1968 		di.sco_mtu  = 0;
1969 		di.sco_pkts = 0;
1970 	}
1971 	di.link_policy = hdev->link_policy;
1972 	di.link_mode   = hdev->link_mode;
1973 
1974 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1975 	memcpy(&di.features, &hdev->features, sizeof(di.features));
1976 
1977 	if (copy_to_user(arg, &di, sizeof(di)))
1978 		err = -EFAULT;
1979 
1980 	hci_dev_put(hdev);
1981 
1982 	return err;
1983 }
1984 
1985 /* ---- Interface to HCI drivers ---- */
1986 
1987 static int hci_rfkill_set_block(void *data, bool blocked)
1988 {
1989 	struct hci_dev *hdev = data;
1990 
1991 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1992 
1993 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1994 		return -EBUSY;
1995 
1996 	if (blocked) {
1997 		hci_dev_set_flag(hdev, HCI_RFKILLED);
1998 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1999 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2000 			hci_dev_do_close(hdev);
2001 	} else {
2002 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2003 	}
2004 
2005 	return 0;
2006 }
2007 
2008 static const struct rfkill_ops hci_rfkill_ops = {
2009 	.set_block = hci_rfkill_set_block,
2010 };
2011 
2012 static void hci_power_on(struct work_struct *work)
2013 {
2014 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2015 	int err;
2016 
2017 	BT_DBG("%s", hdev->name);
2018 
2019 	if (test_bit(HCI_UP, &hdev->flags) &&
2020 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
2021 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2022 		cancel_delayed_work(&hdev->power_off);
2023 		hci_req_sync_lock(hdev);
2024 		err = __hci_req_hci_power_on(hdev);
2025 		hci_req_sync_unlock(hdev);
2026 		mgmt_power_on(hdev, err);
2027 		return;
2028 	}
2029 
2030 	err = hci_dev_do_open(hdev);
2031 	if (err < 0) {
2032 		hci_dev_lock(hdev);
2033 		mgmt_set_powered_failed(hdev, err);
2034 		hci_dev_unlock(hdev);
2035 		return;
2036 	}
2037 
2038 	/* During the HCI setup phase, a few error conditions are
2039 	 * ignored and they need to be checked now. If they are still
2040 	 * valid, it is important to turn the device back off.
2041 	 */
2042 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2043 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2044 	    (hdev->dev_type == HCI_PRIMARY &&
2045 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2046 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2047 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2048 		hci_dev_do_close(hdev);
2049 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2050 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2051 				   HCI_AUTO_OFF_TIMEOUT);
2052 	}
2053 
2054 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2055 		/* For unconfigured devices, set the HCI_RAW flag
2056 		 * so that userspace can easily identify them.
2057 		 */
2058 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2059 			set_bit(HCI_RAW, &hdev->flags);
2060 
2061 		/* For fully configured devices, this will send
2062 		 * the Index Added event. For unconfigured devices,
2063 		 * it will send Unconfigued Index Added event.
2064 		 *
2065 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2066 		 * and no event will be send.
2067 		 */
2068 		mgmt_index_added(hdev);
2069 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2070 		/* When the controller is now configured, then it
2071 		 * is important to clear the HCI_RAW flag.
2072 		 */
2073 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2074 			clear_bit(HCI_RAW, &hdev->flags);
2075 
2076 		/* Powering on the controller with HCI_CONFIG set only
2077 		 * happens with the transition from unconfigured to
2078 		 * configured. This will send the Index Added event.
2079 		 */
2080 		mgmt_index_added(hdev);
2081 	}
2082 }
2083 
2084 static void hci_power_off(struct work_struct *work)
2085 {
2086 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2087 					    power_off.work);
2088 
2089 	BT_DBG("%s", hdev->name);
2090 
2091 	hci_dev_do_close(hdev);
2092 }
2093 
2094 static void hci_error_reset(struct work_struct *work)
2095 {
2096 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2097 
2098 	BT_DBG("%s", hdev->name);
2099 
2100 	if (hdev->hw_error)
2101 		hdev->hw_error(hdev, hdev->hw_error_code);
2102 	else
2103 		BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2104 		       hdev->hw_error_code);
2105 
2106 	if (hci_dev_do_close(hdev))
2107 		return;
2108 
2109 	hci_dev_do_open(hdev);
2110 }
2111 
2112 void hci_uuids_clear(struct hci_dev *hdev)
2113 {
2114 	struct bt_uuid *uuid, *tmp;
2115 
2116 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2117 		list_del(&uuid->list);
2118 		kfree(uuid);
2119 	}
2120 }
2121 
2122 void hci_link_keys_clear(struct hci_dev *hdev)
2123 {
2124 	struct link_key *key;
2125 
2126 	list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2127 		list_del_rcu(&key->list);
2128 		kfree_rcu(key, rcu);
2129 	}
2130 }
2131 
2132 void hci_smp_ltks_clear(struct hci_dev *hdev)
2133 {
2134 	struct smp_ltk *k;
2135 
2136 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2137 		list_del_rcu(&k->list);
2138 		kfree_rcu(k, rcu);
2139 	}
2140 }
2141 
2142 void hci_smp_irks_clear(struct hci_dev *hdev)
2143 {
2144 	struct smp_irk *k;
2145 
2146 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2147 		list_del_rcu(&k->list);
2148 		kfree_rcu(k, rcu);
2149 	}
2150 }
2151 
2152 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2153 {
2154 	struct link_key *k;
2155 
2156 	rcu_read_lock();
2157 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2158 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
2159 			rcu_read_unlock();
2160 			return k;
2161 		}
2162 	}
2163 	rcu_read_unlock();
2164 
2165 	return NULL;
2166 }
2167 
2168 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2169 			       u8 key_type, u8 old_key_type)
2170 {
2171 	/* Legacy key */
2172 	if (key_type < 0x03)
2173 		return true;
2174 
2175 	/* Debug keys are insecure so don't store them persistently */
2176 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2177 		return false;
2178 
2179 	/* Changed combination key and there's no previous one */
2180 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2181 		return false;
2182 
2183 	/* Security mode 3 case */
2184 	if (!conn)
2185 		return true;
2186 
2187 	/* BR/EDR key derived using SC from an LE link */
2188 	if (conn->type == LE_LINK)
2189 		return true;
2190 
2191 	/* Neither local nor remote side had no-bonding as requirement */
2192 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2193 		return true;
2194 
2195 	/* Local side had dedicated bonding as requirement */
2196 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2197 		return true;
2198 
2199 	/* Remote side had dedicated bonding as requirement */
2200 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2201 		return true;
2202 
2203 	/* If none of the above criteria match, then don't store the key
2204 	 * persistently */
2205 	return false;
2206 }
2207 
2208 static u8 ltk_role(u8 type)
2209 {
2210 	if (type == SMP_LTK)
2211 		return HCI_ROLE_MASTER;
2212 
2213 	return HCI_ROLE_SLAVE;
2214 }
2215 
2216 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2217 			     u8 addr_type, u8 role)
2218 {
2219 	struct smp_ltk *k;
2220 
2221 	rcu_read_lock();
2222 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2223 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2224 			continue;
2225 
2226 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2227 			rcu_read_unlock();
2228 			return k;
2229 		}
2230 	}
2231 	rcu_read_unlock();
2232 
2233 	return NULL;
2234 }
2235 
2236 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2237 {
2238 	struct smp_irk *irk;
2239 
2240 	rcu_read_lock();
2241 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2242 		if (!bacmp(&irk->rpa, rpa)) {
2243 			rcu_read_unlock();
2244 			return irk;
2245 		}
2246 	}
2247 
2248 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2249 		if (smp_irk_matches(hdev, irk->val, rpa)) {
2250 			bacpy(&irk->rpa, rpa);
2251 			rcu_read_unlock();
2252 			return irk;
2253 		}
2254 	}
2255 	rcu_read_unlock();
2256 
2257 	return NULL;
2258 }
2259 
2260 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2261 				     u8 addr_type)
2262 {
2263 	struct smp_irk *irk;
2264 
2265 	/* Identity Address must be public or static random */
2266 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2267 		return NULL;
2268 
2269 	rcu_read_lock();
2270 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2271 		if (addr_type == irk->addr_type &&
2272 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
2273 			rcu_read_unlock();
2274 			return irk;
2275 		}
2276 	}
2277 	rcu_read_unlock();
2278 
2279 	return NULL;
2280 }
2281 
2282 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2283 				  bdaddr_t *bdaddr, u8 *val, u8 type,
2284 				  u8 pin_len, bool *persistent)
2285 {
2286 	struct link_key *key, *old_key;
2287 	u8 old_key_type;
2288 
2289 	old_key = hci_find_link_key(hdev, bdaddr);
2290 	if (old_key) {
2291 		old_key_type = old_key->type;
2292 		key = old_key;
2293 	} else {
2294 		old_key_type = conn ? conn->key_type : 0xff;
2295 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2296 		if (!key)
2297 			return NULL;
2298 		list_add_rcu(&key->list, &hdev->link_keys);
2299 	}
2300 
2301 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2302 
2303 	/* Some buggy controller combinations generate a changed
2304 	 * combination key for legacy pairing even when there's no
2305 	 * previous key */
2306 	if (type == HCI_LK_CHANGED_COMBINATION &&
2307 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2308 		type = HCI_LK_COMBINATION;
2309 		if (conn)
2310 			conn->key_type = type;
2311 	}
2312 
2313 	bacpy(&key->bdaddr, bdaddr);
2314 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2315 	key->pin_len = pin_len;
2316 
2317 	if (type == HCI_LK_CHANGED_COMBINATION)
2318 		key->type = old_key_type;
2319 	else
2320 		key->type = type;
2321 
2322 	if (persistent)
2323 		*persistent = hci_persistent_key(hdev, conn, type,
2324 						 old_key_type);
2325 
2326 	return key;
2327 }
2328 
2329 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2330 			    u8 addr_type, u8 type, u8 authenticated,
2331 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2332 {
2333 	struct smp_ltk *key, *old_key;
2334 	u8 role = ltk_role(type);
2335 
2336 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2337 	if (old_key)
2338 		key = old_key;
2339 	else {
2340 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2341 		if (!key)
2342 			return NULL;
2343 		list_add_rcu(&key->list, &hdev->long_term_keys);
2344 	}
2345 
2346 	bacpy(&key->bdaddr, bdaddr);
2347 	key->bdaddr_type = addr_type;
2348 	memcpy(key->val, tk, sizeof(key->val));
2349 	key->authenticated = authenticated;
2350 	key->ediv = ediv;
2351 	key->rand = rand;
2352 	key->enc_size = enc_size;
2353 	key->type = type;
2354 
2355 	return key;
2356 }
2357 
2358 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2359 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2360 {
2361 	struct smp_irk *irk;
2362 
2363 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2364 	if (!irk) {
2365 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2366 		if (!irk)
2367 			return NULL;
2368 
2369 		bacpy(&irk->bdaddr, bdaddr);
2370 		irk->addr_type = addr_type;
2371 
2372 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2373 	}
2374 
2375 	memcpy(irk->val, val, 16);
2376 	bacpy(&irk->rpa, rpa);
2377 
2378 	return irk;
2379 }
2380 
2381 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2382 {
2383 	struct link_key *key;
2384 
2385 	key = hci_find_link_key(hdev, bdaddr);
2386 	if (!key)
2387 		return -ENOENT;
2388 
2389 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2390 
2391 	list_del_rcu(&key->list);
2392 	kfree_rcu(key, rcu);
2393 
2394 	return 0;
2395 }
2396 
2397 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2398 {
2399 	struct smp_ltk *k;
2400 	int removed = 0;
2401 
2402 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2403 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2404 			continue;
2405 
2406 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2407 
2408 		list_del_rcu(&k->list);
2409 		kfree_rcu(k, rcu);
2410 		removed++;
2411 	}
2412 
2413 	return removed ? 0 : -ENOENT;
2414 }
2415 
2416 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2417 {
2418 	struct smp_irk *k;
2419 
2420 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2421 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2422 			continue;
2423 
2424 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2425 
2426 		list_del_rcu(&k->list);
2427 		kfree_rcu(k, rcu);
2428 	}
2429 }
2430 
2431 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2432 {
2433 	struct smp_ltk *k;
2434 	struct smp_irk *irk;
2435 	u8 addr_type;
2436 
2437 	if (type == BDADDR_BREDR) {
2438 		if (hci_find_link_key(hdev, bdaddr))
2439 			return true;
2440 		return false;
2441 	}
2442 
2443 	/* Convert to HCI addr type which struct smp_ltk uses */
2444 	if (type == BDADDR_LE_PUBLIC)
2445 		addr_type = ADDR_LE_DEV_PUBLIC;
2446 	else
2447 		addr_type = ADDR_LE_DEV_RANDOM;
2448 
2449 	irk = hci_get_irk(hdev, bdaddr, addr_type);
2450 	if (irk) {
2451 		bdaddr = &irk->bdaddr;
2452 		addr_type = irk->addr_type;
2453 	}
2454 
2455 	rcu_read_lock();
2456 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2457 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2458 			rcu_read_unlock();
2459 			return true;
2460 		}
2461 	}
2462 	rcu_read_unlock();
2463 
2464 	return false;
2465 }
2466 
2467 /* HCI command timer function */
2468 static void hci_cmd_timeout(struct work_struct *work)
2469 {
2470 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2471 					    cmd_timer.work);
2472 
2473 	if (hdev->sent_cmd) {
2474 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2475 		u16 opcode = __le16_to_cpu(sent->opcode);
2476 
2477 		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2478 	} else {
2479 		BT_ERR("%s command tx timeout", hdev->name);
2480 	}
2481 
2482 	atomic_set(&hdev->cmd_cnt, 1);
2483 	queue_work(hdev->workqueue, &hdev->cmd_work);
2484 }
2485 
2486 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2487 					  bdaddr_t *bdaddr, u8 bdaddr_type)
2488 {
2489 	struct oob_data *data;
2490 
2491 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
2492 		if (bacmp(bdaddr, &data->bdaddr) != 0)
2493 			continue;
2494 		if (data->bdaddr_type != bdaddr_type)
2495 			continue;
2496 		return data;
2497 	}
2498 
2499 	return NULL;
2500 }
2501 
2502 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2503 			       u8 bdaddr_type)
2504 {
2505 	struct oob_data *data;
2506 
2507 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2508 	if (!data)
2509 		return -ENOENT;
2510 
2511 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2512 
2513 	list_del(&data->list);
2514 	kfree(data);
2515 
2516 	return 0;
2517 }
2518 
2519 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2520 {
2521 	struct oob_data *data, *n;
2522 
2523 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2524 		list_del(&data->list);
2525 		kfree(data);
2526 	}
2527 }
2528 
2529 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2530 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2531 			    u8 *hash256, u8 *rand256)
2532 {
2533 	struct oob_data *data;
2534 
2535 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2536 	if (!data) {
2537 		data = kmalloc(sizeof(*data), GFP_KERNEL);
2538 		if (!data)
2539 			return -ENOMEM;
2540 
2541 		bacpy(&data->bdaddr, bdaddr);
2542 		data->bdaddr_type = bdaddr_type;
2543 		list_add(&data->list, &hdev->remote_oob_data);
2544 	}
2545 
2546 	if (hash192 && rand192) {
2547 		memcpy(data->hash192, hash192, sizeof(data->hash192));
2548 		memcpy(data->rand192, rand192, sizeof(data->rand192));
2549 		if (hash256 && rand256)
2550 			data->present = 0x03;
2551 	} else {
2552 		memset(data->hash192, 0, sizeof(data->hash192));
2553 		memset(data->rand192, 0, sizeof(data->rand192));
2554 		if (hash256 && rand256)
2555 			data->present = 0x02;
2556 		else
2557 			data->present = 0x00;
2558 	}
2559 
2560 	if (hash256 && rand256) {
2561 		memcpy(data->hash256, hash256, sizeof(data->hash256));
2562 		memcpy(data->rand256, rand256, sizeof(data->rand256));
2563 	} else {
2564 		memset(data->hash256, 0, sizeof(data->hash256));
2565 		memset(data->rand256, 0, sizeof(data->rand256));
2566 		if (hash192 && rand192)
2567 			data->present = 0x01;
2568 	}
2569 
2570 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2571 
2572 	return 0;
2573 }
2574 
2575 /* This function requires the caller holds hdev->lock */
2576 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2577 {
2578 	struct adv_info *adv_instance;
2579 
2580 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2581 		if (adv_instance->instance == instance)
2582 			return adv_instance;
2583 	}
2584 
2585 	return NULL;
2586 }
2587 
2588 /* This function requires the caller holds hdev->lock */
2589 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2590 {
2591 	struct adv_info *cur_instance;
2592 
2593 	cur_instance = hci_find_adv_instance(hdev, instance);
2594 	if (!cur_instance)
2595 		return NULL;
2596 
2597 	if (cur_instance == list_last_entry(&hdev->adv_instances,
2598 					    struct adv_info, list))
2599 		return list_first_entry(&hdev->adv_instances,
2600 						 struct adv_info, list);
2601 	else
2602 		return list_next_entry(cur_instance, list);
2603 }
2604 
2605 /* This function requires the caller holds hdev->lock */
2606 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2607 {
2608 	struct adv_info *adv_instance;
2609 
2610 	adv_instance = hci_find_adv_instance(hdev, instance);
2611 	if (!adv_instance)
2612 		return -ENOENT;
2613 
2614 	BT_DBG("%s removing %dMR", hdev->name, instance);
2615 
2616 	if (hdev->cur_adv_instance == instance) {
2617 		if (hdev->adv_instance_timeout) {
2618 			cancel_delayed_work(&hdev->adv_instance_expire);
2619 			hdev->adv_instance_timeout = 0;
2620 		}
2621 		hdev->cur_adv_instance = 0x00;
2622 	}
2623 
2624 	list_del(&adv_instance->list);
2625 	kfree(adv_instance);
2626 
2627 	hdev->adv_instance_cnt--;
2628 
2629 	return 0;
2630 }
2631 
2632 /* This function requires the caller holds hdev->lock */
2633 void hci_adv_instances_clear(struct hci_dev *hdev)
2634 {
2635 	struct adv_info *adv_instance, *n;
2636 
2637 	if (hdev->adv_instance_timeout) {
2638 		cancel_delayed_work(&hdev->adv_instance_expire);
2639 		hdev->adv_instance_timeout = 0;
2640 	}
2641 
2642 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2643 		list_del(&adv_instance->list);
2644 		kfree(adv_instance);
2645 	}
2646 
2647 	hdev->adv_instance_cnt = 0;
2648 	hdev->cur_adv_instance = 0x00;
2649 }
2650 
2651 /* This function requires the caller holds hdev->lock */
2652 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2653 			 u16 adv_data_len, u8 *adv_data,
2654 			 u16 scan_rsp_len, u8 *scan_rsp_data,
2655 			 u16 timeout, u16 duration)
2656 {
2657 	struct adv_info *adv_instance;
2658 
2659 	adv_instance = hci_find_adv_instance(hdev, instance);
2660 	if (adv_instance) {
2661 		memset(adv_instance->adv_data, 0,
2662 		       sizeof(adv_instance->adv_data));
2663 		memset(adv_instance->scan_rsp_data, 0,
2664 		       sizeof(adv_instance->scan_rsp_data));
2665 	} else {
2666 		if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2667 		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2668 			return -EOVERFLOW;
2669 
2670 		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2671 		if (!adv_instance)
2672 			return -ENOMEM;
2673 
2674 		adv_instance->pending = true;
2675 		adv_instance->instance = instance;
2676 		list_add(&adv_instance->list, &hdev->adv_instances);
2677 		hdev->adv_instance_cnt++;
2678 	}
2679 
2680 	adv_instance->flags = flags;
2681 	adv_instance->adv_data_len = adv_data_len;
2682 	adv_instance->scan_rsp_len = scan_rsp_len;
2683 
2684 	if (adv_data_len)
2685 		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2686 
2687 	if (scan_rsp_len)
2688 		memcpy(adv_instance->scan_rsp_data,
2689 		       scan_rsp_data, scan_rsp_len);
2690 
2691 	adv_instance->timeout = timeout;
2692 	adv_instance->remaining_time = timeout;
2693 
2694 	if (duration == 0)
2695 		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2696 	else
2697 		adv_instance->duration = duration;
2698 
2699 	BT_DBG("%s for %dMR", hdev->name, instance);
2700 
2701 	return 0;
2702 }
2703 
2704 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2705 					 bdaddr_t *bdaddr, u8 type)
2706 {
2707 	struct bdaddr_list *b;
2708 
2709 	list_for_each_entry(b, bdaddr_list, list) {
2710 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2711 			return b;
2712 	}
2713 
2714 	return NULL;
2715 }
2716 
2717 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2718 {
2719 	struct bdaddr_list *b, *n;
2720 
2721 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2722 		list_del(&b->list);
2723 		kfree(b);
2724 	}
2725 }
2726 
2727 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2728 {
2729 	struct bdaddr_list *entry;
2730 
2731 	if (!bacmp(bdaddr, BDADDR_ANY))
2732 		return -EBADF;
2733 
2734 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2735 		return -EEXIST;
2736 
2737 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2738 	if (!entry)
2739 		return -ENOMEM;
2740 
2741 	bacpy(&entry->bdaddr, bdaddr);
2742 	entry->bdaddr_type = type;
2743 
2744 	list_add(&entry->list, list);
2745 
2746 	return 0;
2747 }
2748 
2749 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2750 {
2751 	struct bdaddr_list *entry;
2752 
2753 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2754 		hci_bdaddr_list_clear(list);
2755 		return 0;
2756 	}
2757 
2758 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2759 	if (!entry)
2760 		return -ENOENT;
2761 
2762 	list_del(&entry->list);
2763 	kfree(entry);
2764 
2765 	return 0;
2766 }
2767 
2768 /* This function requires the caller holds hdev->lock */
2769 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2770 					       bdaddr_t *addr, u8 addr_type)
2771 {
2772 	struct hci_conn_params *params;
2773 
2774 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2775 		if (bacmp(&params->addr, addr) == 0 &&
2776 		    params->addr_type == addr_type) {
2777 			return params;
2778 		}
2779 	}
2780 
2781 	return NULL;
2782 }
2783 
2784 /* This function requires the caller holds hdev->lock */
2785 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2786 						  bdaddr_t *addr, u8 addr_type)
2787 {
2788 	struct hci_conn_params *param;
2789 
2790 	list_for_each_entry(param, list, action) {
2791 		if (bacmp(&param->addr, addr) == 0 &&
2792 		    param->addr_type == addr_type)
2793 			return param;
2794 	}
2795 
2796 	return NULL;
2797 }
2798 
2799 /* This function requires the caller holds hdev->lock */
2800 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2801 					    bdaddr_t *addr, u8 addr_type)
2802 {
2803 	struct hci_conn_params *params;
2804 
2805 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2806 	if (params)
2807 		return params;
2808 
2809 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2810 	if (!params) {
2811 		BT_ERR("Out of memory");
2812 		return NULL;
2813 	}
2814 
2815 	bacpy(&params->addr, addr);
2816 	params->addr_type = addr_type;
2817 
2818 	list_add(&params->list, &hdev->le_conn_params);
2819 	INIT_LIST_HEAD(&params->action);
2820 
2821 	params->conn_min_interval = hdev->le_conn_min_interval;
2822 	params->conn_max_interval = hdev->le_conn_max_interval;
2823 	params->conn_latency = hdev->le_conn_latency;
2824 	params->supervision_timeout = hdev->le_supv_timeout;
2825 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2826 
2827 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2828 
2829 	return params;
2830 }
2831 
2832 static void hci_conn_params_free(struct hci_conn_params *params)
2833 {
2834 	if (params->conn) {
2835 		hci_conn_drop(params->conn);
2836 		hci_conn_put(params->conn);
2837 	}
2838 
2839 	list_del(&params->action);
2840 	list_del(&params->list);
2841 	kfree(params);
2842 }
2843 
2844 /* This function requires the caller holds hdev->lock */
2845 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2846 {
2847 	struct hci_conn_params *params;
2848 
2849 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2850 	if (!params)
2851 		return;
2852 
2853 	hci_conn_params_free(params);
2854 
2855 	hci_update_background_scan(hdev);
2856 
2857 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2858 }
2859 
2860 /* This function requires the caller holds hdev->lock */
2861 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2862 {
2863 	struct hci_conn_params *params, *tmp;
2864 
2865 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2866 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2867 			continue;
2868 
2869 		/* If trying to estabilish one time connection to disabled
2870 		 * device, leave the params, but mark them as just once.
2871 		 */
2872 		if (params->explicit_connect) {
2873 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2874 			continue;
2875 		}
2876 
2877 		list_del(&params->list);
2878 		kfree(params);
2879 	}
2880 
2881 	BT_DBG("All LE disabled connection parameters were removed");
2882 }
2883 
2884 /* This function requires the caller holds hdev->lock */
2885 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2886 {
2887 	struct hci_conn_params *params, *tmp;
2888 
2889 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2890 		hci_conn_params_free(params);
2891 
2892 	BT_DBG("All LE connection parameters were removed");
2893 }
2894 
2895 /* Copy the Identity Address of the controller.
2896  *
2897  * If the controller has a public BD_ADDR, then by default use that one.
2898  * If this is a LE only controller without a public address, default to
2899  * the static random address.
2900  *
2901  * For debugging purposes it is possible to force controllers with a
2902  * public address to use the static random address instead.
2903  *
2904  * In case BR/EDR has been disabled on a dual-mode controller and
2905  * userspace has configured a static address, then that address
2906  * becomes the identity address instead of the public BR/EDR address.
2907  */
2908 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2909 			       u8 *bdaddr_type)
2910 {
2911 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2912 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2913 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2914 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2915 		bacpy(bdaddr, &hdev->static_addr);
2916 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2917 	} else {
2918 		bacpy(bdaddr, &hdev->bdaddr);
2919 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2920 	}
2921 }
2922 
2923 /* Alloc HCI device */
2924 struct hci_dev *hci_alloc_dev(void)
2925 {
2926 	struct hci_dev *hdev;
2927 
2928 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2929 	if (!hdev)
2930 		return NULL;
2931 
2932 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2933 	hdev->esco_type = (ESCO_HV1);
2934 	hdev->link_mode = (HCI_LM_ACCEPT);
2935 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2936 	hdev->io_capability = 0x03;	/* No Input No Output */
2937 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2938 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2939 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2940 	hdev->adv_instance_cnt = 0;
2941 	hdev->cur_adv_instance = 0x00;
2942 	hdev->adv_instance_timeout = 0;
2943 
2944 	hdev->sniff_max_interval = 800;
2945 	hdev->sniff_min_interval = 80;
2946 
2947 	hdev->le_adv_channel_map = 0x07;
2948 	hdev->le_adv_min_interval = 0x0800;
2949 	hdev->le_adv_max_interval = 0x0800;
2950 	hdev->le_scan_interval = 0x0060;
2951 	hdev->le_scan_window = 0x0030;
2952 	hdev->le_conn_min_interval = 0x0028;
2953 	hdev->le_conn_max_interval = 0x0038;
2954 	hdev->le_conn_latency = 0x0000;
2955 	hdev->le_supv_timeout = 0x002a;
2956 	hdev->le_def_tx_len = 0x001b;
2957 	hdev->le_def_tx_time = 0x0148;
2958 	hdev->le_max_tx_len = 0x001b;
2959 	hdev->le_max_tx_time = 0x0148;
2960 	hdev->le_max_rx_len = 0x001b;
2961 	hdev->le_max_rx_time = 0x0148;
2962 
2963 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2964 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2965 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2966 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2967 
2968 	mutex_init(&hdev->lock);
2969 	mutex_init(&hdev->req_lock);
2970 
2971 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2972 	INIT_LIST_HEAD(&hdev->blacklist);
2973 	INIT_LIST_HEAD(&hdev->whitelist);
2974 	INIT_LIST_HEAD(&hdev->uuids);
2975 	INIT_LIST_HEAD(&hdev->link_keys);
2976 	INIT_LIST_HEAD(&hdev->long_term_keys);
2977 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2978 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2979 	INIT_LIST_HEAD(&hdev->le_white_list);
2980 	INIT_LIST_HEAD(&hdev->le_conn_params);
2981 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2982 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2983 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2984 	INIT_LIST_HEAD(&hdev->adv_instances);
2985 
2986 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2987 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2988 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2989 	INIT_WORK(&hdev->power_on, hci_power_on);
2990 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2991 
2992 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2993 
2994 	skb_queue_head_init(&hdev->rx_q);
2995 	skb_queue_head_init(&hdev->cmd_q);
2996 	skb_queue_head_init(&hdev->raw_q);
2997 
2998 	init_waitqueue_head(&hdev->req_wait_q);
2999 
3000 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3001 
3002 	hci_request_setup(hdev);
3003 
3004 	hci_init_sysfs(hdev);
3005 	discovery_init(hdev);
3006 
3007 	return hdev;
3008 }
3009 EXPORT_SYMBOL(hci_alloc_dev);
3010 
3011 /* Free HCI device */
3012 void hci_free_dev(struct hci_dev *hdev)
3013 {
3014 	/* will free via device release */
3015 	put_device(&hdev->dev);
3016 }
3017 EXPORT_SYMBOL(hci_free_dev);
3018 
3019 /* Register HCI device */
3020 int hci_register_dev(struct hci_dev *hdev)
3021 {
3022 	int id, error;
3023 
3024 	if (!hdev->open || !hdev->close || !hdev->send)
3025 		return -EINVAL;
3026 
3027 	/* Do not allow HCI_AMP devices to register at index 0,
3028 	 * so the index can be used as the AMP controller ID.
3029 	 */
3030 	switch (hdev->dev_type) {
3031 	case HCI_PRIMARY:
3032 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3033 		break;
3034 	case HCI_AMP:
3035 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3036 		break;
3037 	default:
3038 		return -EINVAL;
3039 	}
3040 
3041 	if (id < 0)
3042 		return id;
3043 
3044 	sprintf(hdev->name, "hci%d", id);
3045 	hdev->id = id;
3046 
3047 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3048 
3049 	hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3050 					  WQ_MEM_RECLAIM, 1, hdev->name);
3051 	if (!hdev->workqueue) {
3052 		error = -ENOMEM;
3053 		goto err;
3054 	}
3055 
3056 	hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3057 					      WQ_MEM_RECLAIM, 1, hdev->name);
3058 	if (!hdev->req_workqueue) {
3059 		destroy_workqueue(hdev->workqueue);
3060 		error = -ENOMEM;
3061 		goto err;
3062 	}
3063 
3064 	if (!IS_ERR_OR_NULL(bt_debugfs))
3065 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3066 
3067 	dev_set_name(&hdev->dev, "%s", hdev->name);
3068 
3069 	error = device_add(&hdev->dev);
3070 	if (error < 0)
3071 		goto err_wqueue;
3072 
3073 	hci_leds_init(hdev);
3074 
3075 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3076 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3077 				    hdev);
3078 	if (hdev->rfkill) {
3079 		if (rfkill_register(hdev->rfkill) < 0) {
3080 			rfkill_destroy(hdev->rfkill);
3081 			hdev->rfkill = NULL;
3082 		}
3083 	}
3084 
3085 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3086 		hci_dev_set_flag(hdev, HCI_RFKILLED);
3087 
3088 	hci_dev_set_flag(hdev, HCI_SETUP);
3089 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3090 
3091 	if (hdev->dev_type == HCI_PRIMARY) {
3092 		/* Assume BR/EDR support until proven otherwise (such as
3093 		 * through reading supported features during init.
3094 		 */
3095 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3096 	}
3097 
3098 	write_lock(&hci_dev_list_lock);
3099 	list_add(&hdev->list, &hci_dev_list);
3100 	write_unlock(&hci_dev_list_lock);
3101 
3102 	/* Devices that are marked for raw-only usage are unconfigured
3103 	 * and should not be included in normal operation.
3104 	 */
3105 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3106 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3107 
3108 	hci_sock_dev_event(hdev, HCI_DEV_REG);
3109 	hci_dev_hold(hdev);
3110 
3111 	queue_work(hdev->req_workqueue, &hdev->power_on);
3112 
3113 	return id;
3114 
3115 err_wqueue:
3116 	destroy_workqueue(hdev->workqueue);
3117 	destroy_workqueue(hdev->req_workqueue);
3118 err:
3119 	ida_simple_remove(&hci_index_ida, hdev->id);
3120 
3121 	return error;
3122 }
3123 EXPORT_SYMBOL(hci_register_dev);
3124 
3125 /* Unregister HCI device */
3126 void hci_unregister_dev(struct hci_dev *hdev)
3127 {
3128 	int id;
3129 
3130 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3131 
3132 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3133 
3134 	id = hdev->id;
3135 
3136 	write_lock(&hci_dev_list_lock);
3137 	list_del(&hdev->list);
3138 	write_unlock(&hci_dev_list_lock);
3139 
3140 	cancel_work_sync(&hdev->power_on);
3141 
3142 	hci_dev_do_close(hdev);
3143 
3144 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3145 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
3146 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3147 		hci_dev_lock(hdev);
3148 		mgmt_index_removed(hdev);
3149 		hci_dev_unlock(hdev);
3150 	}
3151 
3152 	/* mgmt_index_removed should take care of emptying the
3153 	 * pending list */
3154 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3155 
3156 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3157 
3158 	if (hdev->rfkill) {
3159 		rfkill_unregister(hdev->rfkill);
3160 		rfkill_destroy(hdev->rfkill);
3161 	}
3162 
3163 	device_del(&hdev->dev);
3164 
3165 	debugfs_remove_recursive(hdev->debugfs);
3166 	kfree_const(hdev->hw_info);
3167 	kfree_const(hdev->fw_info);
3168 
3169 	destroy_workqueue(hdev->workqueue);
3170 	destroy_workqueue(hdev->req_workqueue);
3171 
3172 	hci_dev_lock(hdev);
3173 	hci_bdaddr_list_clear(&hdev->blacklist);
3174 	hci_bdaddr_list_clear(&hdev->whitelist);
3175 	hci_uuids_clear(hdev);
3176 	hci_link_keys_clear(hdev);
3177 	hci_smp_ltks_clear(hdev);
3178 	hci_smp_irks_clear(hdev);
3179 	hci_remote_oob_data_clear(hdev);
3180 	hci_adv_instances_clear(hdev);
3181 	hci_bdaddr_list_clear(&hdev->le_white_list);
3182 	hci_conn_params_clear_all(hdev);
3183 	hci_discovery_filter_clear(hdev);
3184 	hci_dev_unlock(hdev);
3185 
3186 	hci_dev_put(hdev);
3187 
3188 	ida_simple_remove(&hci_index_ida, id);
3189 }
3190 EXPORT_SYMBOL(hci_unregister_dev);
3191 
3192 /* Suspend HCI device */
3193 int hci_suspend_dev(struct hci_dev *hdev)
3194 {
3195 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3196 	return 0;
3197 }
3198 EXPORT_SYMBOL(hci_suspend_dev);
3199 
3200 /* Resume HCI device */
3201 int hci_resume_dev(struct hci_dev *hdev)
3202 {
3203 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3204 	return 0;
3205 }
3206 EXPORT_SYMBOL(hci_resume_dev);
3207 
3208 /* Reset HCI device */
3209 int hci_reset_dev(struct hci_dev *hdev)
3210 {
3211 	const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3212 	struct sk_buff *skb;
3213 
3214 	skb = bt_skb_alloc(3, GFP_ATOMIC);
3215 	if (!skb)
3216 		return -ENOMEM;
3217 
3218 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3219 	memcpy(skb_put(skb, 3), hw_err, 3);
3220 
3221 	/* Send Hardware Error to upper stack */
3222 	return hci_recv_frame(hdev, skb);
3223 }
3224 EXPORT_SYMBOL(hci_reset_dev);
3225 
3226 /* Receive frame from HCI drivers */
3227 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3228 {
3229 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3230 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3231 		kfree_skb(skb);
3232 		return -ENXIO;
3233 	}
3234 
3235 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3236 	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3237 	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3238 		kfree_skb(skb);
3239 		return -EINVAL;
3240 	}
3241 
3242 	/* Incoming skb */
3243 	bt_cb(skb)->incoming = 1;
3244 
3245 	/* Time stamp */
3246 	__net_timestamp(skb);
3247 
3248 	skb_queue_tail(&hdev->rx_q, skb);
3249 	queue_work(hdev->workqueue, &hdev->rx_work);
3250 
3251 	return 0;
3252 }
3253 EXPORT_SYMBOL(hci_recv_frame);
3254 
3255 /* Receive diagnostic message from HCI drivers */
3256 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3257 {
3258 	/* Mark as diagnostic packet */
3259 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3260 
3261 	/* Time stamp */
3262 	__net_timestamp(skb);
3263 
3264 	skb_queue_tail(&hdev->rx_q, skb);
3265 	queue_work(hdev->workqueue, &hdev->rx_work);
3266 
3267 	return 0;
3268 }
3269 EXPORT_SYMBOL(hci_recv_diag);
3270 
3271 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3272 {
3273 	va_list vargs;
3274 
3275 	va_start(vargs, fmt);
3276 	kfree_const(hdev->hw_info);
3277 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3278 	va_end(vargs);
3279 }
3280 EXPORT_SYMBOL(hci_set_hw_info);
3281 
3282 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3283 {
3284 	va_list vargs;
3285 
3286 	va_start(vargs, fmt);
3287 	kfree_const(hdev->fw_info);
3288 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3289 	va_end(vargs);
3290 }
3291 EXPORT_SYMBOL(hci_set_fw_info);
3292 
3293 /* ---- Interface to upper protocols ---- */
3294 
3295 int hci_register_cb(struct hci_cb *cb)
3296 {
3297 	BT_DBG("%p name %s", cb, cb->name);
3298 
3299 	mutex_lock(&hci_cb_list_lock);
3300 	list_add_tail(&cb->list, &hci_cb_list);
3301 	mutex_unlock(&hci_cb_list_lock);
3302 
3303 	return 0;
3304 }
3305 EXPORT_SYMBOL(hci_register_cb);
3306 
3307 int hci_unregister_cb(struct hci_cb *cb)
3308 {
3309 	BT_DBG("%p name %s", cb, cb->name);
3310 
3311 	mutex_lock(&hci_cb_list_lock);
3312 	list_del(&cb->list);
3313 	mutex_unlock(&hci_cb_list_lock);
3314 
3315 	return 0;
3316 }
3317 EXPORT_SYMBOL(hci_unregister_cb);
3318 
3319 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3320 {
3321 	int err;
3322 
3323 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3324 	       skb->len);
3325 
3326 	/* Time stamp */
3327 	__net_timestamp(skb);
3328 
3329 	/* Send copy to monitor */
3330 	hci_send_to_monitor(hdev, skb);
3331 
3332 	if (atomic_read(&hdev->promisc)) {
3333 		/* Send copy to the sockets */
3334 		hci_send_to_sock(hdev, skb);
3335 	}
3336 
3337 	/* Get rid of skb owner, prior to sending to the driver. */
3338 	skb_orphan(skb);
3339 
3340 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3341 		kfree_skb(skb);
3342 		return;
3343 	}
3344 
3345 	err = hdev->send(hdev, skb);
3346 	if (err < 0) {
3347 		BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3348 		kfree_skb(skb);
3349 	}
3350 }
3351 
3352 /* Send HCI command */
3353 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3354 		 const void *param)
3355 {
3356 	struct sk_buff *skb;
3357 
3358 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3359 
3360 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3361 	if (!skb) {
3362 		BT_ERR("%s no memory for command", hdev->name);
3363 		return -ENOMEM;
3364 	}
3365 
3366 	/* Stand-alone HCI commands must be flagged as
3367 	 * single-command requests.
3368 	 */
3369 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3370 
3371 	skb_queue_tail(&hdev->cmd_q, skb);
3372 	queue_work(hdev->workqueue, &hdev->cmd_work);
3373 
3374 	return 0;
3375 }
3376 
3377 /* Get data from the previously sent command */
3378 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3379 {
3380 	struct hci_command_hdr *hdr;
3381 
3382 	if (!hdev->sent_cmd)
3383 		return NULL;
3384 
3385 	hdr = (void *) hdev->sent_cmd->data;
3386 
3387 	if (hdr->opcode != cpu_to_le16(opcode))
3388 		return NULL;
3389 
3390 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3391 
3392 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3393 }
3394 
3395 /* Send HCI command and wait for command commplete event */
3396 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3397 			     const void *param, u32 timeout)
3398 {
3399 	struct sk_buff *skb;
3400 
3401 	if (!test_bit(HCI_UP, &hdev->flags))
3402 		return ERR_PTR(-ENETDOWN);
3403 
3404 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3405 
3406 	hci_req_sync_lock(hdev);
3407 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3408 	hci_req_sync_unlock(hdev);
3409 
3410 	return skb;
3411 }
3412 EXPORT_SYMBOL(hci_cmd_sync);
3413 
3414 /* Send ACL data */
3415 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3416 {
3417 	struct hci_acl_hdr *hdr;
3418 	int len = skb->len;
3419 
3420 	skb_push(skb, HCI_ACL_HDR_SIZE);
3421 	skb_reset_transport_header(skb);
3422 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3423 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3424 	hdr->dlen   = cpu_to_le16(len);
3425 }
3426 
3427 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3428 			  struct sk_buff *skb, __u16 flags)
3429 {
3430 	struct hci_conn *conn = chan->conn;
3431 	struct hci_dev *hdev = conn->hdev;
3432 	struct sk_buff *list;
3433 
3434 	skb->len = skb_headlen(skb);
3435 	skb->data_len = 0;
3436 
3437 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3438 
3439 	switch (hdev->dev_type) {
3440 	case HCI_PRIMARY:
3441 		hci_add_acl_hdr(skb, conn->handle, flags);
3442 		break;
3443 	case HCI_AMP:
3444 		hci_add_acl_hdr(skb, chan->handle, flags);
3445 		break;
3446 	default:
3447 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3448 		return;
3449 	}
3450 
3451 	list = skb_shinfo(skb)->frag_list;
3452 	if (!list) {
3453 		/* Non fragmented */
3454 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3455 
3456 		skb_queue_tail(queue, skb);
3457 	} else {
3458 		/* Fragmented */
3459 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3460 
3461 		skb_shinfo(skb)->frag_list = NULL;
3462 
3463 		/* Queue all fragments atomically. We need to use spin_lock_bh
3464 		 * here because of 6LoWPAN links, as there this function is
3465 		 * called from softirq and using normal spin lock could cause
3466 		 * deadlocks.
3467 		 */
3468 		spin_lock_bh(&queue->lock);
3469 
3470 		__skb_queue_tail(queue, skb);
3471 
3472 		flags &= ~ACL_START;
3473 		flags |= ACL_CONT;
3474 		do {
3475 			skb = list; list = list->next;
3476 
3477 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3478 			hci_add_acl_hdr(skb, conn->handle, flags);
3479 
3480 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3481 
3482 			__skb_queue_tail(queue, skb);
3483 		} while (list);
3484 
3485 		spin_unlock_bh(&queue->lock);
3486 	}
3487 }
3488 
3489 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3490 {
3491 	struct hci_dev *hdev = chan->conn->hdev;
3492 
3493 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3494 
3495 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3496 
3497 	queue_work(hdev->workqueue, &hdev->tx_work);
3498 }
3499 
3500 /* Send SCO data */
3501 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3502 {
3503 	struct hci_dev *hdev = conn->hdev;
3504 	struct hci_sco_hdr hdr;
3505 
3506 	BT_DBG("%s len %d", hdev->name, skb->len);
3507 
3508 	hdr.handle = cpu_to_le16(conn->handle);
3509 	hdr.dlen   = skb->len;
3510 
3511 	skb_push(skb, HCI_SCO_HDR_SIZE);
3512 	skb_reset_transport_header(skb);
3513 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3514 
3515 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3516 
3517 	skb_queue_tail(&conn->data_q, skb);
3518 	queue_work(hdev->workqueue, &hdev->tx_work);
3519 }
3520 
3521 /* ---- HCI TX task (outgoing data) ---- */
3522 
3523 /* HCI Connection scheduler */
3524 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3525 				     int *quote)
3526 {
3527 	struct hci_conn_hash *h = &hdev->conn_hash;
3528 	struct hci_conn *conn = NULL, *c;
3529 	unsigned int num = 0, min = ~0;
3530 
3531 	/* We don't have to lock device here. Connections are always
3532 	 * added and removed with TX task disabled. */
3533 
3534 	rcu_read_lock();
3535 
3536 	list_for_each_entry_rcu(c, &h->list, list) {
3537 		if (c->type != type || skb_queue_empty(&c->data_q))
3538 			continue;
3539 
3540 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3541 			continue;
3542 
3543 		num++;
3544 
3545 		if (c->sent < min) {
3546 			min  = c->sent;
3547 			conn = c;
3548 		}
3549 
3550 		if (hci_conn_num(hdev, type) == num)
3551 			break;
3552 	}
3553 
3554 	rcu_read_unlock();
3555 
3556 	if (conn) {
3557 		int cnt, q;
3558 
3559 		switch (conn->type) {
3560 		case ACL_LINK:
3561 			cnt = hdev->acl_cnt;
3562 			break;
3563 		case SCO_LINK:
3564 		case ESCO_LINK:
3565 			cnt = hdev->sco_cnt;
3566 			break;
3567 		case LE_LINK:
3568 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3569 			break;
3570 		default:
3571 			cnt = 0;
3572 			BT_ERR("Unknown link type");
3573 		}
3574 
3575 		q = cnt / num;
3576 		*quote = q ? q : 1;
3577 	} else
3578 		*quote = 0;
3579 
3580 	BT_DBG("conn %p quote %d", conn, *quote);
3581 	return conn;
3582 }
3583 
3584 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3585 {
3586 	struct hci_conn_hash *h = &hdev->conn_hash;
3587 	struct hci_conn *c;
3588 
3589 	BT_ERR("%s link tx timeout", hdev->name);
3590 
3591 	rcu_read_lock();
3592 
3593 	/* Kill stalled connections */
3594 	list_for_each_entry_rcu(c, &h->list, list) {
3595 		if (c->type == type && c->sent) {
3596 			BT_ERR("%s killing stalled connection %pMR",
3597 			       hdev->name, &c->dst);
3598 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3599 		}
3600 	}
3601 
3602 	rcu_read_unlock();
3603 }
3604 
3605 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3606 				      int *quote)
3607 {
3608 	struct hci_conn_hash *h = &hdev->conn_hash;
3609 	struct hci_chan *chan = NULL;
3610 	unsigned int num = 0, min = ~0, cur_prio = 0;
3611 	struct hci_conn *conn;
3612 	int cnt, q, conn_num = 0;
3613 
3614 	BT_DBG("%s", hdev->name);
3615 
3616 	rcu_read_lock();
3617 
3618 	list_for_each_entry_rcu(conn, &h->list, list) {
3619 		struct hci_chan *tmp;
3620 
3621 		if (conn->type != type)
3622 			continue;
3623 
3624 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3625 			continue;
3626 
3627 		conn_num++;
3628 
3629 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3630 			struct sk_buff *skb;
3631 
3632 			if (skb_queue_empty(&tmp->data_q))
3633 				continue;
3634 
3635 			skb = skb_peek(&tmp->data_q);
3636 			if (skb->priority < cur_prio)
3637 				continue;
3638 
3639 			if (skb->priority > cur_prio) {
3640 				num = 0;
3641 				min = ~0;
3642 				cur_prio = skb->priority;
3643 			}
3644 
3645 			num++;
3646 
3647 			if (conn->sent < min) {
3648 				min  = conn->sent;
3649 				chan = tmp;
3650 			}
3651 		}
3652 
3653 		if (hci_conn_num(hdev, type) == conn_num)
3654 			break;
3655 	}
3656 
3657 	rcu_read_unlock();
3658 
3659 	if (!chan)
3660 		return NULL;
3661 
3662 	switch (chan->conn->type) {
3663 	case ACL_LINK:
3664 		cnt = hdev->acl_cnt;
3665 		break;
3666 	case AMP_LINK:
3667 		cnt = hdev->block_cnt;
3668 		break;
3669 	case SCO_LINK:
3670 	case ESCO_LINK:
3671 		cnt = hdev->sco_cnt;
3672 		break;
3673 	case LE_LINK:
3674 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3675 		break;
3676 	default:
3677 		cnt = 0;
3678 		BT_ERR("Unknown link type");
3679 	}
3680 
3681 	q = cnt / num;
3682 	*quote = q ? q : 1;
3683 	BT_DBG("chan %p quote %d", chan, *quote);
3684 	return chan;
3685 }
3686 
3687 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3688 {
3689 	struct hci_conn_hash *h = &hdev->conn_hash;
3690 	struct hci_conn *conn;
3691 	int num = 0;
3692 
3693 	BT_DBG("%s", hdev->name);
3694 
3695 	rcu_read_lock();
3696 
3697 	list_for_each_entry_rcu(conn, &h->list, list) {
3698 		struct hci_chan *chan;
3699 
3700 		if (conn->type != type)
3701 			continue;
3702 
3703 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3704 			continue;
3705 
3706 		num++;
3707 
3708 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3709 			struct sk_buff *skb;
3710 
3711 			if (chan->sent) {
3712 				chan->sent = 0;
3713 				continue;
3714 			}
3715 
3716 			if (skb_queue_empty(&chan->data_q))
3717 				continue;
3718 
3719 			skb = skb_peek(&chan->data_q);
3720 			if (skb->priority >= HCI_PRIO_MAX - 1)
3721 				continue;
3722 
3723 			skb->priority = HCI_PRIO_MAX - 1;
3724 
3725 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3726 			       skb->priority);
3727 		}
3728 
3729 		if (hci_conn_num(hdev, type) == num)
3730 			break;
3731 	}
3732 
3733 	rcu_read_unlock();
3734 
3735 }
3736 
3737 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3738 {
3739 	/* Calculate count of blocks used by this packet */
3740 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3741 }
3742 
3743 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3744 {
3745 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3746 		/* ACL tx timeout must be longer than maximum
3747 		 * link supervision timeout (40.9 seconds) */
3748 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3749 				       HCI_ACL_TX_TIMEOUT))
3750 			hci_link_tx_to(hdev, ACL_LINK);
3751 	}
3752 }
3753 
3754 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3755 {
3756 	unsigned int cnt = hdev->acl_cnt;
3757 	struct hci_chan *chan;
3758 	struct sk_buff *skb;
3759 	int quote;
3760 
3761 	__check_timeout(hdev, cnt);
3762 
3763 	while (hdev->acl_cnt &&
3764 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3765 		u32 priority = (skb_peek(&chan->data_q))->priority;
3766 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3767 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3768 			       skb->len, skb->priority);
3769 
3770 			/* Stop if priority has changed */
3771 			if (skb->priority < priority)
3772 				break;
3773 
3774 			skb = skb_dequeue(&chan->data_q);
3775 
3776 			hci_conn_enter_active_mode(chan->conn,
3777 						   bt_cb(skb)->force_active);
3778 
3779 			hci_send_frame(hdev, skb);
3780 			hdev->acl_last_tx = jiffies;
3781 
3782 			hdev->acl_cnt--;
3783 			chan->sent++;
3784 			chan->conn->sent++;
3785 		}
3786 	}
3787 
3788 	if (cnt != hdev->acl_cnt)
3789 		hci_prio_recalculate(hdev, ACL_LINK);
3790 }
3791 
3792 static void hci_sched_acl_blk(struct hci_dev *hdev)
3793 {
3794 	unsigned int cnt = hdev->block_cnt;
3795 	struct hci_chan *chan;
3796 	struct sk_buff *skb;
3797 	int quote;
3798 	u8 type;
3799 
3800 	__check_timeout(hdev, cnt);
3801 
3802 	BT_DBG("%s", hdev->name);
3803 
3804 	if (hdev->dev_type == HCI_AMP)
3805 		type = AMP_LINK;
3806 	else
3807 		type = ACL_LINK;
3808 
3809 	while (hdev->block_cnt > 0 &&
3810 	       (chan = hci_chan_sent(hdev, type, &quote))) {
3811 		u32 priority = (skb_peek(&chan->data_q))->priority;
3812 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3813 			int blocks;
3814 
3815 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3816 			       skb->len, skb->priority);
3817 
3818 			/* Stop if priority has changed */
3819 			if (skb->priority < priority)
3820 				break;
3821 
3822 			skb = skb_dequeue(&chan->data_q);
3823 
3824 			blocks = __get_blocks(hdev, skb);
3825 			if (blocks > hdev->block_cnt)
3826 				return;
3827 
3828 			hci_conn_enter_active_mode(chan->conn,
3829 						   bt_cb(skb)->force_active);
3830 
3831 			hci_send_frame(hdev, skb);
3832 			hdev->acl_last_tx = jiffies;
3833 
3834 			hdev->block_cnt -= blocks;
3835 			quote -= blocks;
3836 
3837 			chan->sent += blocks;
3838 			chan->conn->sent += blocks;
3839 		}
3840 	}
3841 
3842 	if (cnt != hdev->block_cnt)
3843 		hci_prio_recalculate(hdev, type);
3844 }
3845 
3846 static void hci_sched_acl(struct hci_dev *hdev)
3847 {
3848 	BT_DBG("%s", hdev->name);
3849 
3850 	/* No ACL link over BR/EDR controller */
3851 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3852 		return;
3853 
3854 	/* No AMP link over AMP controller */
3855 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3856 		return;
3857 
3858 	switch (hdev->flow_ctl_mode) {
3859 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
3860 		hci_sched_acl_pkt(hdev);
3861 		break;
3862 
3863 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3864 		hci_sched_acl_blk(hdev);
3865 		break;
3866 	}
3867 }
3868 
3869 /* Schedule SCO */
3870 static void hci_sched_sco(struct hci_dev *hdev)
3871 {
3872 	struct hci_conn *conn;
3873 	struct sk_buff *skb;
3874 	int quote;
3875 
3876 	BT_DBG("%s", hdev->name);
3877 
3878 	if (!hci_conn_num(hdev, SCO_LINK))
3879 		return;
3880 
3881 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3882 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3883 			BT_DBG("skb %p len %d", skb, skb->len);
3884 			hci_send_frame(hdev, skb);
3885 
3886 			conn->sent++;
3887 			if (conn->sent == ~0)
3888 				conn->sent = 0;
3889 		}
3890 	}
3891 }
3892 
3893 static void hci_sched_esco(struct hci_dev *hdev)
3894 {
3895 	struct hci_conn *conn;
3896 	struct sk_buff *skb;
3897 	int quote;
3898 
3899 	BT_DBG("%s", hdev->name);
3900 
3901 	if (!hci_conn_num(hdev, ESCO_LINK))
3902 		return;
3903 
3904 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3905 						     &quote))) {
3906 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3907 			BT_DBG("skb %p len %d", skb, skb->len);
3908 			hci_send_frame(hdev, skb);
3909 
3910 			conn->sent++;
3911 			if (conn->sent == ~0)
3912 				conn->sent = 0;
3913 		}
3914 	}
3915 }
3916 
3917 static void hci_sched_le(struct hci_dev *hdev)
3918 {
3919 	struct hci_chan *chan;
3920 	struct sk_buff *skb;
3921 	int quote, cnt, tmp;
3922 
3923 	BT_DBG("%s", hdev->name);
3924 
3925 	if (!hci_conn_num(hdev, LE_LINK))
3926 		return;
3927 
3928 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3929 		/* LE tx timeout must be longer than maximum
3930 		 * link supervision timeout (40.9 seconds) */
3931 		if (!hdev->le_cnt && hdev->le_pkts &&
3932 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
3933 			hci_link_tx_to(hdev, LE_LINK);
3934 	}
3935 
3936 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3937 	tmp = cnt;
3938 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3939 		u32 priority = (skb_peek(&chan->data_q))->priority;
3940 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3941 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3942 			       skb->len, skb->priority);
3943 
3944 			/* Stop if priority has changed */
3945 			if (skb->priority < priority)
3946 				break;
3947 
3948 			skb = skb_dequeue(&chan->data_q);
3949 
3950 			hci_send_frame(hdev, skb);
3951 			hdev->le_last_tx = jiffies;
3952 
3953 			cnt--;
3954 			chan->sent++;
3955 			chan->conn->sent++;
3956 		}
3957 	}
3958 
3959 	if (hdev->le_pkts)
3960 		hdev->le_cnt = cnt;
3961 	else
3962 		hdev->acl_cnt = cnt;
3963 
3964 	if (cnt != tmp)
3965 		hci_prio_recalculate(hdev, LE_LINK);
3966 }
3967 
3968 static void hci_tx_work(struct work_struct *work)
3969 {
3970 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3971 	struct sk_buff *skb;
3972 
3973 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3974 	       hdev->sco_cnt, hdev->le_cnt);
3975 
3976 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3977 		/* Schedule queues and send stuff to HCI driver */
3978 		hci_sched_acl(hdev);
3979 		hci_sched_sco(hdev);
3980 		hci_sched_esco(hdev);
3981 		hci_sched_le(hdev);
3982 	}
3983 
3984 	/* Send next queued raw (unknown type) packet */
3985 	while ((skb = skb_dequeue(&hdev->raw_q)))
3986 		hci_send_frame(hdev, skb);
3987 }
3988 
3989 /* ----- HCI RX task (incoming data processing) ----- */
3990 
3991 /* ACL data packet */
3992 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3993 {
3994 	struct hci_acl_hdr *hdr = (void *) skb->data;
3995 	struct hci_conn *conn;
3996 	__u16 handle, flags;
3997 
3998 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3999 
4000 	handle = __le16_to_cpu(hdr->handle);
4001 	flags  = hci_flags(handle);
4002 	handle = hci_handle(handle);
4003 
4004 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4005 	       handle, flags);
4006 
4007 	hdev->stat.acl_rx++;
4008 
4009 	hci_dev_lock(hdev);
4010 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4011 	hci_dev_unlock(hdev);
4012 
4013 	if (conn) {
4014 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4015 
4016 		/* Send to upper protocol */
4017 		l2cap_recv_acldata(conn, skb, flags);
4018 		return;
4019 	} else {
4020 		BT_ERR("%s ACL packet for unknown connection handle %d",
4021 		       hdev->name, handle);
4022 	}
4023 
4024 	kfree_skb(skb);
4025 }
4026 
4027 /* SCO data packet */
4028 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4029 {
4030 	struct hci_sco_hdr *hdr = (void *) skb->data;
4031 	struct hci_conn *conn;
4032 	__u16 handle;
4033 
4034 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4035 
4036 	handle = __le16_to_cpu(hdr->handle);
4037 
4038 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4039 
4040 	hdev->stat.sco_rx++;
4041 
4042 	hci_dev_lock(hdev);
4043 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4044 	hci_dev_unlock(hdev);
4045 
4046 	if (conn) {
4047 		/* Send to upper protocol */
4048 		sco_recv_scodata(conn, skb);
4049 		return;
4050 	} else {
4051 		BT_ERR("%s SCO packet for unknown connection handle %d",
4052 		       hdev->name, handle);
4053 	}
4054 
4055 	kfree_skb(skb);
4056 }
4057 
4058 static bool hci_req_is_complete(struct hci_dev *hdev)
4059 {
4060 	struct sk_buff *skb;
4061 
4062 	skb = skb_peek(&hdev->cmd_q);
4063 	if (!skb)
4064 		return true;
4065 
4066 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4067 }
4068 
4069 static void hci_resend_last(struct hci_dev *hdev)
4070 {
4071 	struct hci_command_hdr *sent;
4072 	struct sk_buff *skb;
4073 	u16 opcode;
4074 
4075 	if (!hdev->sent_cmd)
4076 		return;
4077 
4078 	sent = (void *) hdev->sent_cmd->data;
4079 	opcode = __le16_to_cpu(sent->opcode);
4080 	if (opcode == HCI_OP_RESET)
4081 		return;
4082 
4083 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4084 	if (!skb)
4085 		return;
4086 
4087 	skb_queue_head(&hdev->cmd_q, skb);
4088 	queue_work(hdev->workqueue, &hdev->cmd_work);
4089 }
4090 
4091 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4092 			  hci_req_complete_t *req_complete,
4093 			  hci_req_complete_skb_t *req_complete_skb)
4094 {
4095 	struct sk_buff *skb;
4096 	unsigned long flags;
4097 
4098 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4099 
4100 	/* If the completed command doesn't match the last one that was
4101 	 * sent we need to do special handling of it.
4102 	 */
4103 	if (!hci_sent_cmd_data(hdev, opcode)) {
4104 		/* Some CSR based controllers generate a spontaneous
4105 		 * reset complete event during init and any pending
4106 		 * command will never be completed. In such a case we
4107 		 * need to resend whatever was the last sent
4108 		 * command.
4109 		 */
4110 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4111 			hci_resend_last(hdev);
4112 
4113 		return;
4114 	}
4115 
4116 	/* If the command succeeded and there's still more commands in
4117 	 * this request the request is not yet complete.
4118 	 */
4119 	if (!status && !hci_req_is_complete(hdev))
4120 		return;
4121 
4122 	/* If this was the last command in a request the complete
4123 	 * callback would be found in hdev->sent_cmd instead of the
4124 	 * command queue (hdev->cmd_q).
4125 	 */
4126 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4127 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4128 		return;
4129 	}
4130 
4131 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4132 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4133 		return;
4134 	}
4135 
4136 	/* Remove all pending commands belonging to this request */
4137 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4138 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4139 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4140 			__skb_queue_head(&hdev->cmd_q, skb);
4141 			break;
4142 		}
4143 
4144 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4145 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4146 		else
4147 			*req_complete = bt_cb(skb)->hci.req_complete;
4148 		kfree_skb(skb);
4149 	}
4150 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4151 }
4152 
4153 static void hci_rx_work(struct work_struct *work)
4154 {
4155 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4156 	struct sk_buff *skb;
4157 
4158 	BT_DBG("%s", hdev->name);
4159 
4160 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4161 		/* Send copy to monitor */
4162 		hci_send_to_monitor(hdev, skb);
4163 
4164 		if (atomic_read(&hdev->promisc)) {
4165 			/* Send copy to the sockets */
4166 			hci_send_to_sock(hdev, skb);
4167 		}
4168 
4169 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4170 			kfree_skb(skb);
4171 			continue;
4172 		}
4173 
4174 		if (test_bit(HCI_INIT, &hdev->flags)) {
4175 			/* Don't process data packets in this states. */
4176 			switch (hci_skb_pkt_type(skb)) {
4177 			case HCI_ACLDATA_PKT:
4178 			case HCI_SCODATA_PKT:
4179 				kfree_skb(skb);
4180 				continue;
4181 			}
4182 		}
4183 
4184 		/* Process frame */
4185 		switch (hci_skb_pkt_type(skb)) {
4186 		case HCI_EVENT_PKT:
4187 			BT_DBG("%s Event packet", hdev->name);
4188 			hci_event_packet(hdev, skb);
4189 			break;
4190 
4191 		case HCI_ACLDATA_PKT:
4192 			BT_DBG("%s ACL data packet", hdev->name);
4193 			hci_acldata_packet(hdev, skb);
4194 			break;
4195 
4196 		case HCI_SCODATA_PKT:
4197 			BT_DBG("%s SCO data packet", hdev->name);
4198 			hci_scodata_packet(hdev, skb);
4199 			break;
4200 
4201 		default:
4202 			kfree_skb(skb);
4203 			break;
4204 		}
4205 	}
4206 }
4207 
4208 static void hci_cmd_work(struct work_struct *work)
4209 {
4210 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4211 	struct sk_buff *skb;
4212 
4213 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4214 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4215 
4216 	/* Send queued commands */
4217 	if (atomic_read(&hdev->cmd_cnt)) {
4218 		skb = skb_dequeue(&hdev->cmd_q);
4219 		if (!skb)
4220 			return;
4221 
4222 		kfree_skb(hdev->sent_cmd);
4223 
4224 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4225 		if (hdev->sent_cmd) {
4226 			atomic_dec(&hdev->cmd_cnt);
4227 			hci_send_frame(hdev, skb);
4228 			if (test_bit(HCI_RESET, &hdev->flags))
4229 				cancel_delayed_work(&hdev->cmd_timer);
4230 			else
4231 				schedule_delayed_work(&hdev->cmd_timer,
4232 						      HCI_CMD_TIMEOUT);
4233 		} else {
4234 			skb_queue_head(&hdev->cmd_q, skb);
4235 			queue_work(hdev->workqueue, &hdev->cmd_work);
4236 		}
4237 	}
4238 }
4239