xref: /linux/net/bluetooth/hci_core.c (revision 9d106c6dd81bb26ad7fc3ee89cb1d62557c8e2c9)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 
48 static void hci_rx_work(struct work_struct *work);
49 static void hci_cmd_work(struct work_struct *work);
50 static void hci_tx_work(struct work_struct *work);
51 
52 /* HCI device list */
53 LIST_HEAD(hci_dev_list);
54 DEFINE_RWLOCK(hci_dev_list_lock);
55 
56 /* HCI callback list */
57 LIST_HEAD(hci_cb_list);
58 DEFINE_MUTEX(hci_cb_list_lock);
59 
60 /* HCI ID Numbering */
61 static DEFINE_IDA(hci_index_ida);
62 
63 /* ---- HCI debugfs entries ---- */
64 
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 			     size_t count, loff_t *ppos)
67 {
68 	struct hci_dev *hdev = file->private_data;
69 	char buf[3];
70 
71 	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
72 	buf[1] = '\n';
73 	buf[2] = '\0';
74 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76 
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 			      size_t count, loff_t *ppos)
79 {
80 	struct hci_dev *hdev = file->private_data;
81 	struct sk_buff *skb;
82 	bool enable;
83 	int err;
84 
85 	if (!test_bit(HCI_UP, &hdev->flags))
86 		return -ENETDOWN;
87 
88 	err = kstrtobool_from_user(user_buf, count, &enable);
89 	if (err)
90 		return err;
91 
92 	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
93 		return -EALREADY;
94 
95 	hci_req_sync_lock(hdev);
96 	if (enable)
97 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98 				     HCI_CMD_TIMEOUT);
99 	else
100 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101 				     HCI_CMD_TIMEOUT);
102 	hci_req_sync_unlock(hdev);
103 
104 	if (IS_ERR(skb))
105 		return PTR_ERR(skb);
106 
107 	kfree_skb(skb);
108 
109 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
110 
111 	return count;
112 }
113 
114 static const struct file_operations dut_mode_fops = {
115 	.open		= simple_open,
116 	.read		= dut_mode_read,
117 	.write		= dut_mode_write,
118 	.llseek		= default_llseek,
119 };
120 
121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122 				size_t count, loff_t *ppos)
123 {
124 	struct hci_dev *hdev = file->private_data;
125 	char buf[3];
126 
127 	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128 	buf[1] = '\n';
129 	buf[2] = '\0';
130 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131 }
132 
133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134 				 size_t count, loff_t *ppos)
135 {
136 	struct hci_dev *hdev = file->private_data;
137 	bool enable;
138 	int err;
139 
140 	err = kstrtobool_from_user(user_buf, count, &enable);
141 	if (err)
142 		return err;
143 
144 	/* When the diagnostic flags are not persistent and the transport
145 	 * is not active or in user channel operation, then there is no need
146 	 * for the vendor callback. Instead just store the desired value and
147 	 * the setting will be programmed when the controller gets powered on.
148 	 */
149 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150 	    (!test_bit(HCI_RUNNING, &hdev->flags) ||
151 	     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
152 		goto done;
153 
154 	hci_req_sync_lock(hdev);
155 	err = hdev->set_diag(hdev, enable);
156 	hci_req_sync_unlock(hdev);
157 
158 	if (err < 0)
159 		return err;
160 
161 done:
162 	if (enable)
163 		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164 	else
165 		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166 
167 	return count;
168 }
169 
170 static const struct file_operations vendor_diag_fops = {
171 	.open		= simple_open,
172 	.read		= vendor_diag_read,
173 	.write		= vendor_diag_write,
174 	.llseek		= default_llseek,
175 };
176 
177 static void hci_debugfs_create_basic(struct hci_dev *hdev)
178 {
179 	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180 			    &dut_mode_fops);
181 
182 	if (hdev->set_diag)
183 		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184 				    &vendor_diag_fops);
185 }
186 
187 static int hci_reset_req(struct hci_request *req, unsigned long opt)
188 {
189 	BT_DBG("%s %ld", req->hdev->name, opt);
190 
191 	/* Reset device */
192 	set_bit(HCI_RESET, &req->hdev->flags);
193 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
194 	return 0;
195 }
196 
197 static void bredr_init(struct hci_request *req)
198 {
199 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200 
201 	/* Read Local Supported Features */
202 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
203 
204 	/* Read Local Version */
205 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
206 
207 	/* Read BD Address */
208 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
209 }
210 
211 static void amp_init1(struct hci_request *req)
212 {
213 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
214 
215 	/* Read Local Version */
216 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217 
218 	/* Read Local Supported Commands */
219 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220 
221 	/* Read Local AMP Info */
222 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
223 
224 	/* Read Data Blk size */
225 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
226 
227 	/* Read Flow Control Mode */
228 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229 
230 	/* Read Location Data */
231 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
232 }
233 
234 static int amp_init2(struct hci_request *req)
235 {
236 	/* Read Local Supported Features. Not all AMP controllers
237 	 * support this so it's placed conditionally in the second
238 	 * stage init.
239 	 */
240 	if (req->hdev->commands[14] & 0x20)
241 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
242 
243 	return 0;
244 }
245 
246 static int hci_init1_req(struct hci_request *req, unsigned long opt)
247 {
248 	struct hci_dev *hdev = req->hdev;
249 
250 	BT_DBG("%s %ld", hdev->name, opt);
251 
252 	/* Reset */
253 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254 		hci_reset_req(req, 0);
255 
256 	switch (hdev->dev_type) {
257 	case HCI_PRIMARY:
258 		bredr_init(req);
259 		break;
260 	case HCI_AMP:
261 		amp_init1(req);
262 		break;
263 	default:
264 		bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
265 		break;
266 	}
267 
268 	return 0;
269 }
270 
271 static void bredr_setup(struct hci_request *req)
272 {
273 	__le16 param;
274 	__u8 flt_type;
275 
276 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
277 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
278 
279 	/* Read Class of Device */
280 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
281 
282 	/* Read Local Name */
283 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
284 
285 	/* Read Voice Setting */
286 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
287 
288 	/* Read Number of Supported IAC */
289 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290 
291 	/* Read Current IAC LAP */
292 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293 
294 	/* Clear Event Filters */
295 	flt_type = HCI_FLT_CLEAR_ALL;
296 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
297 
298 	/* Connection accept timeout ~20 secs */
299 	param = cpu_to_le16(0x7d00);
300 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
301 }
302 
303 static void le_setup(struct hci_request *req)
304 {
305 	struct hci_dev *hdev = req->hdev;
306 
307 	/* Read LE Buffer Size */
308 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
309 
310 	/* Read LE Local Supported Features */
311 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
312 
313 	/* Read LE Supported States */
314 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315 
316 	/* LE-only controllers have LE implicitly enabled */
317 	if (!lmp_bredr_capable(hdev))
318 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
319 }
320 
321 static void hci_setup_event_mask(struct hci_request *req)
322 {
323 	struct hci_dev *hdev = req->hdev;
324 
325 	/* The second byte is 0xff instead of 0x9f (two reserved bits
326 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327 	 * command otherwise.
328 	 */
329 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330 
331 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
332 	 * any event mask for pre 1.2 devices.
333 	 */
334 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335 		return;
336 
337 	if (lmp_bredr_capable(hdev)) {
338 		events[4] |= 0x01; /* Flow Specification Complete */
339 	} else {
340 		/* Use a different default for LE-only devices */
341 		memset(events, 0, sizeof(events));
342 		events[1] |= 0x20; /* Command Complete */
343 		events[1] |= 0x40; /* Command Status */
344 		events[1] |= 0x80; /* Hardware Error */
345 
346 		/* If the controller supports the Disconnect command, enable
347 		 * the corresponding event. In addition enable packet flow
348 		 * control related events.
349 		 */
350 		if (hdev->commands[0] & 0x20) {
351 			events[0] |= 0x10; /* Disconnection Complete */
352 			events[2] |= 0x04; /* Number of Completed Packets */
353 			events[3] |= 0x02; /* Data Buffer Overflow */
354 		}
355 
356 		/* If the controller supports the Read Remote Version
357 		 * Information command, enable the corresponding event.
358 		 */
359 		if (hdev->commands[2] & 0x80)
360 			events[1] |= 0x08; /* Read Remote Version Information
361 					    * Complete
362 					    */
363 
364 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365 			events[0] |= 0x80; /* Encryption Change */
366 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
367 		}
368 	}
369 
370 	if (lmp_inq_rssi_capable(hdev) ||
371 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372 		events[4] |= 0x02; /* Inquiry Result with RSSI */
373 
374 	if (lmp_ext_feat_capable(hdev))
375 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
376 
377 	if (lmp_esco_capable(hdev)) {
378 		events[5] |= 0x08; /* Synchronous Connection Complete */
379 		events[5] |= 0x10; /* Synchronous Connection Changed */
380 	}
381 
382 	if (lmp_sniffsubr_capable(hdev))
383 		events[5] |= 0x20; /* Sniff Subrating */
384 
385 	if (lmp_pause_enc_capable(hdev))
386 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
387 
388 	if (lmp_ext_inq_capable(hdev))
389 		events[5] |= 0x40; /* Extended Inquiry Result */
390 
391 	if (lmp_no_flush_capable(hdev))
392 		events[7] |= 0x01; /* Enhanced Flush Complete */
393 
394 	if (lmp_lsto_capable(hdev))
395 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
396 
397 	if (lmp_ssp_capable(hdev)) {
398 		events[6] |= 0x01;	/* IO Capability Request */
399 		events[6] |= 0x02;	/* IO Capability Response */
400 		events[6] |= 0x04;	/* User Confirmation Request */
401 		events[6] |= 0x08;	/* User Passkey Request */
402 		events[6] |= 0x10;	/* Remote OOB Data Request */
403 		events[6] |= 0x20;	/* Simple Pairing Complete */
404 		events[7] |= 0x04;	/* User Passkey Notification */
405 		events[7] |= 0x08;	/* Keypress Notification */
406 		events[7] |= 0x10;	/* Remote Host Supported
407 					 * Features Notification
408 					 */
409 	}
410 
411 	if (lmp_le_capable(hdev))
412 		events[7] |= 0x20;	/* LE Meta-Event */
413 
414 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
415 }
416 
417 static int hci_init2_req(struct hci_request *req, unsigned long opt)
418 {
419 	struct hci_dev *hdev = req->hdev;
420 
421 	if (hdev->dev_type == HCI_AMP)
422 		return amp_init2(req);
423 
424 	if (lmp_bredr_capable(hdev))
425 		bredr_setup(req);
426 	else
427 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
428 
429 	if (lmp_le_capable(hdev))
430 		le_setup(req);
431 
432 	/* All Bluetooth 1.2 and later controllers should support the
433 	 * HCI command for reading the local supported commands.
434 	 *
435 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
436 	 * but do not have support for this command. If that is the case,
437 	 * the driver can quirk the behavior and skip reading the local
438 	 * supported commands.
439 	 */
440 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
443 
444 	if (lmp_ssp_capable(hdev)) {
445 		/* When SSP is available, then the host features page
446 		 * should also be available as well. However some
447 		 * controllers list the max_page as 0 as long as SSP
448 		 * has not been enabled. To achieve proper debugging
449 		 * output, force the minimum max_page to 1 at least.
450 		 */
451 		hdev->max_page = 0x01;
452 
453 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
454 			u8 mode = 0x01;
455 
456 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457 				    sizeof(mode), &mode);
458 		} else {
459 			struct hci_cp_write_eir cp;
460 
461 			memset(hdev->eir, 0, sizeof(hdev->eir));
462 			memset(&cp, 0, sizeof(cp));
463 
464 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465 		}
466 	}
467 
468 	if (lmp_inq_rssi_capable(hdev) ||
469 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
470 		u8 mode;
471 
472 		/* If Extended Inquiry Result events are supported, then
473 		 * they are clearly preferred over Inquiry Result with RSSI
474 		 * events.
475 		 */
476 		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477 
478 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479 	}
480 
481 	if (lmp_inq_tx_pwr_capable(hdev))
482 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
483 
484 	if (lmp_ext_feat_capable(hdev)) {
485 		struct hci_cp_read_local_ext_features cp;
486 
487 		cp.page = 0x01;
488 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489 			    sizeof(cp), &cp);
490 	}
491 
492 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
493 		u8 enable = 1;
494 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495 			    &enable);
496 	}
497 
498 	return 0;
499 }
500 
501 static void hci_setup_link_policy(struct hci_request *req)
502 {
503 	struct hci_dev *hdev = req->hdev;
504 	struct hci_cp_write_def_link_policy cp;
505 	u16 link_policy = 0;
506 
507 	if (lmp_rswitch_capable(hdev))
508 		link_policy |= HCI_LP_RSWITCH;
509 	if (lmp_hold_capable(hdev))
510 		link_policy |= HCI_LP_HOLD;
511 	if (lmp_sniff_capable(hdev))
512 		link_policy |= HCI_LP_SNIFF;
513 	if (lmp_park_capable(hdev))
514 		link_policy |= HCI_LP_PARK;
515 
516 	cp.policy = cpu_to_le16(link_policy);
517 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
518 }
519 
520 static void hci_set_le_support(struct hci_request *req)
521 {
522 	struct hci_dev *hdev = req->hdev;
523 	struct hci_cp_write_le_host_supported cp;
524 
525 	/* LE-only devices do not support explicit enablement */
526 	if (!lmp_bredr_capable(hdev))
527 		return;
528 
529 	memset(&cp, 0, sizeof(cp));
530 
531 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
532 		cp.le = 0x01;
533 		cp.simul = 0x00;
534 	}
535 
536 	if (cp.le != lmp_host_le_capable(hdev))
537 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538 			    &cp);
539 }
540 
541 static void hci_set_event_mask_page_2(struct hci_request *req)
542 {
543 	struct hci_dev *hdev = req->hdev;
544 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545 	bool changed = false;
546 
547 	/* If Connectionless Slave Broadcast master role is supported
548 	 * enable all necessary events for it.
549 	 */
550 	if (lmp_csb_master_capable(hdev)) {
551 		events[1] |= 0x40;	/* Triggered Clock Capture */
552 		events[1] |= 0x80;	/* Synchronization Train Complete */
553 		events[2] |= 0x10;	/* Slave Page Response Timeout */
554 		events[2] |= 0x20;	/* CSB Channel Map Change */
555 		changed = true;
556 	}
557 
558 	/* If Connectionless Slave Broadcast slave role is supported
559 	 * enable all necessary events for it.
560 	 */
561 	if (lmp_csb_slave_capable(hdev)) {
562 		events[2] |= 0x01;	/* Synchronization Train Received */
563 		events[2] |= 0x02;	/* CSB Receive */
564 		events[2] |= 0x04;	/* CSB Timeout */
565 		events[2] |= 0x08;	/* Truncated Page Complete */
566 		changed = true;
567 	}
568 
569 	/* Enable Authenticated Payload Timeout Expired event if supported */
570 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
571 		events[2] |= 0x80;
572 		changed = true;
573 	}
574 
575 	/* Some Broadcom based controllers indicate support for Set Event
576 	 * Mask Page 2 command, but then actually do not support it. Since
577 	 * the default value is all bits set to zero, the command is only
578 	 * required if the event mask has to be changed. In case no change
579 	 * to the event mask is needed, skip this command.
580 	 */
581 	if (changed)
582 		hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583 			    sizeof(events), events);
584 }
585 
586 static int hci_init3_req(struct hci_request *req, unsigned long opt)
587 {
588 	struct hci_dev *hdev = req->hdev;
589 	u8 p;
590 
591 	hci_setup_event_mask(req);
592 
593 	if (hdev->commands[6] & 0x20 &&
594 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595 		struct hci_cp_read_stored_link_key cp;
596 
597 		bacpy(&cp.bdaddr, BDADDR_ANY);
598 		cp.read_all = 0x01;
599 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600 	}
601 
602 	if (hdev->commands[5] & 0x10)
603 		hci_setup_link_policy(req);
604 
605 	if (hdev->commands[8] & 0x01)
606 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607 
608 	if (hdev->commands[18] & 0x04)
609 		hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
610 
611 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
612 	 * support the Read Page Scan Type command. Check support for
613 	 * this command in the bit mask of supported commands.
614 	 */
615 	if (hdev->commands[13] & 0x01)
616 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
617 
618 	if (lmp_le_capable(hdev)) {
619 		u8 events[8];
620 
621 		memset(events, 0, sizeof(events));
622 
623 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
624 			events[0] |= 0x10;	/* LE Long Term Key Request */
625 
626 		/* If controller supports the Connection Parameters Request
627 		 * Link Layer Procedure, enable the corresponding event.
628 		 */
629 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
630 			events[0] |= 0x20;	/* LE Remote Connection
631 						 * Parameter Request
632 						 */
633 
634 		/* If the controller supports the Data Length Extension
635 		 * feature, enable the corresponding event.
636 		 */
637 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
638 			events[0] |= 0x40;	/* LE Data Length Change */
639 
640 		/* If the controller supports Extended Scanner Filter
641 		 * Policies, enable the correspondig event.
642 		 */
643 		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
644 			events[1] |= 0x04;	/* LE Direct Advertising
645 						 * Report
646 						 */
647 
648 		/* If the controller supports Channel Selection Algorithm #2
649 		 * feature, enable the corresponding event.
650 		 */
651 		if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
652 			events[2] |= 0x08;	/* LE Channel Selection
653 						 * Algorithm
654 						 */
655 
656 		/* If the controller supports the LE Set Scan Enable command,
657 		 * enable the corresponding advertising report event.
658 		 */
659 		if (hdev->commands[26] & 0x08)
660 			events[0] |= 0x02;	/* LE Advertising Report */
661 
662 		/* If the controller supports the LE Create Connection
663 		 * command, enable the corresponding event.
664 		 */
665 		if (hdev->commands[26] & 0x10)
666 			events[0] |= 0x01;	/* LE Connection Complete */
667 
668 		/* If the controller supports the LE Connection Update
669 		 * command, enable the corresponding event.
670 		 */
671 		if (hdev->commands[27] & 0x04)
672 			events[0] |= 0x04;	/* LE Connection Update
673 						 * Complete
674 						 */
675 
676 		/* If the controller supports the LE Read Remote Used Features
677 		 * command, enable the corresponding event.
678 		 */
679 		if (hdev->commands[27] & 0x20)
680 			events[0] |= 0x08;	/* LE Read Remote Used
681 						 * Features Complete
682 						 */
683 
684 		/* If the controller supports the LE Read Local P-256
685 		 * Public Key command, enable the corresponding event.
686 		 */
687 		if (hdev->commands[34] & 0x02)
688 			events[0] |= 0x80;	/* LE Read Local P-256
689 						 * Public Key Complete
690 						 */
691 
692 		/* If the controller supports the LE Generate DHKey
693 		 * command, enable the corresponding event.
694 		 */
695 		if (hdev->commands[34] & 0x04)
696 			events[1] |= 0x01;	/* LE Generate DHKey Complete */
697 
698 		/* If the controller supports the LE Set Default PHY or
699 		 * LE Set PHY commands, enable the corresponding event.
700 		 */
701 		if (hdev->commands[35] & (0x20 | 0x40))
702 			events[1] |= 0x08;        /* LE PHY Update Complete */
703 
704 		/* If the controller supports LE Set Extended Scan Parameters
705 		 * and LE Set Extended Scan Enable commands, enable the
706 		 * corresponding event.
707 		 */
708 		if (use_ext_scan(hdev))
709 			events[1] |= 0x10;	/* LE Extended Advertising
710 						 * Report
711 						 */
712 
713 		/* If the controller supports the LE Extended Create Connection
714 		 * command, enable the corresponding event.
715 		 */
716 		if (use_ext_conn(hdev))
717 			events[1] |= 0x02;      /* LE Enhanced Connection
718 						 * Complete
719 						 */
720 
721 		/* If the controller supports the LE Extended Advertising
722 		 * command, enable the corresponding event.
723 		 */
724 		if (ext_adv_capable(hdev))
725 			events[2] |= 0x02;	/* LE Advertising Set
726 						 * Terminated
727 						 */
728 
729 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
730 			    events);
731 
732 		/* Read LE Advertising Channel TX Power */
733 		if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
734 			/* HCI TS spec forbids mixing of legacy and extended
735 			 * advertising commands wherein READ_ADV_TX_POWER is
736 			 * also included. So do not call it if extended adv
737 			 * is supported otherwise controller will return
738 			 * COMMAND_DISALLOWED for extended commands.
739 			 */
740 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
741 		}
742 
743 		if (hdev->commands[26] & 0x40) {
744 			/* Read LE White List Size */
745 			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
746 				    0, NULL);
747 		}
748 
749 		if (hdev->commands[26] & 0x80) {
750 			/* Clear LE White List */
751 			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
752 		}
753 
754 		if (hdev->commands[34] & 0x40) {
755 			/* Read LE Resolving List Size */
756 			hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
757 				    0, NULL);
758 		}
759 
760 		if (hdev->commands[34] & 0x20) {
761 			/* Clear LE Resolving List */
762 			hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
763 		}
764 
765 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
766 			/* Read LE Maximum Data Length */
767 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
768 
769 			/* Read LE Suggested Default Data Length */
770 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
771 		}
772 
773 		if (ext_adv_capable(hdev)) {
774 			/* Read LE Number of Supported Advertising Sets */
775 			hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
776 				    0, NULL);
777 		}
778 
779 		hci_set_le_support(req);
780 	}
781 
782 	/* Read features beyond page 1 if available */
783 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
784 		struct hci_cp_read_local_ext_features cp;
785 
786 		cp.page = p;
787 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
788 			    sizeof(cp), &cp);
789 	}
790 
791 	return 0;
792 }
793 
794 static int hci_init4_req(struct hci_request *req, unsigned long opt)
795 {
796 	struct hci_dev *hdev = req->hdev;
797 
798 	/* Some Broadcom based Bluetooth controllers do not support the
799 	 * Delete Stored Link Key command. They are clearly indicating its
800 	 * absence in the bit mask of supported commands.
801 	 *
802 	 * Check the supported commands and only if the the command is marked
803 	 * as supported send it. If not supported assume that the controller
804 	 * does not have actual support for stored link keys which makes this
805 	 * command redundant anyway.
806 	 *
807 	 * Some controllers indicate that they support handling deleting
808 	 * stored link keys, but they don't. The quirk lets a driver
809 	 * just disable this command.
810 	 */
811 	if (hdev->commands[6] & 0x80 &&
812 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
813 		struct hci_cp_delete_stored_link_key cp;
814 
815 		bacpy(&cp.bdaddr, BDADDR_ANY);
816 		cp.delete_all = 0x01;
817 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
818 			    sizeof(cp), &cp);
819 	}
820 
821 	/* Set event mask page 2 if the HCI command for it is supported */
822 	if (hdev->commands[22] & 0x04)
823 		hci_set_event_mask_page_2(req);
824 
825 	/* Read local codec list if the HCI command is supported */
826 	if (hdev->commands[29] & 0x20)
827 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
828 
829 	/* Get MWS transport configuration if the HCI command is supported */
830 	if (hdev->commands[30] & 0x08)
831 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
832 
833 	/* Check for Synchronization Train support */
834 	if (lmp_sync_train_capable(hdev))
835 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
836 
837 	/* Enable Secure Connections if supported and configured */
838 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
839 	    bredr_sc_enabled(hdev)) {
840 		u8 support = 0x01;
841 
842 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
843 			    sizeof(support), &support);
844 	}
845 
846 	/* Set erroneous data reporting if supported to the wideband speech
847 	 * setting value
848 	 */
849 	if (hdev->commands[18] & 0x08) {
850 		bool enabled = hci_dev_test_flag(hdev,
851 						 HCI_WIDEBAND_SPEECH_ENABLED);
852 
853 		if (enabled !=
854 		    (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
855 			struct hci_cp_write_def_err_data_reporting cp;
856 
857 			cp.err_data_reporting = enabled ?
858 						ERR_DATA_REPORTING_ENABLED :
859 						ERR_DATA_REPORTING_DISABLED;
860 
861 			hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
862 				    sizeof(cp), &cp);
863 		}
864 	}
865 
866 	/* Set Suggested Default Data Length to maximum if supported */
867 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
868 		struct hci_cp_le_write_def_data_len cp;
869 
870 		cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
871 		cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
872 		hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
873 	}
874 
875 	/* Set Default PHY parameters if command is supported */
876 	if (hdev->commands[35] & 0x20) {
877 		struct hci_cp_le_set_default_phy cp;
878 
879 		cp.all_phys = 0x00;
880 		cp.tx_phys = hdev->le_tx_def_phys;
881 		cp.rx_phys = hdev->le_rx_def_phys;
882 
883 		hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
884 	}
885 
886 	return 0;
887 }
888 
889 static int __hci_init(struct hci_dev *hdev)
890 {
891 	int err;
892 
893 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
894 	if (err < 0)
895 		return err;
896 
897 	if (hci_dev_test_flag(hdev, HCI_SETUP))
898 		hci_debugfs_create_basic(hdev);
899 
900 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
901 	if (err < 0)
902 		return err;
903 
904 	/* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
905 	 * BR/EDR/LE type controllers. AMP controllers only need the
906 	 * first two stages of init.
907 	 */
908 	if (hdev->dev_type != HCI_PRIMARY)
909 		return 0;
910 
911 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
912 	if (err < 0)
913 		return err;
914 
915 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
916 	if (err < 0)
917 		return err;
918 
919 	/* This function is only called when the controller is actually in
920 	 * configured state. When the controller is marked as unconfigured,
921 	 * this initialization procedure is not run.
922 	 *
923 	 * It means that it is possible that a controller runs through its
924 	 * setup phase and then discovers missing settings. If that is the
925 	 * case, then this function will not be called. It then will only
926 	 * be called during the config phase.
927 	 *
928 	 * So only when in setup phase or config phase, create the debugfs
929 	 * entries and register the SMP channels.
930 	 */
931 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
932 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
933 		return 0;
934 
935 	hci_debugfs_create_common(hdev);
936 
937 	if (lmp_bredr_capable(hdev))
938 		hci_debugfs_create_bredr(hdev);
939 
940 	if (lmp_le_capable(hdev))
941 		hci_debugfs_create_le(hdev);
942 
943 	return 0;
944 }
945 
946 static int hci_init0_req(struct hci_request *req, unsigned long opt)
947 {
948 	struct hci_dev *hdev = req->hdev;
949 
950 	BT_DBG("%s %ld", hdev->name, opt);
951 
952 	/* Reset */
953 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
954 		hci_reset_req(req, 0);
955 
956 	/* Read Local Version */
957 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
958 
959 	/* Read BD Address */
960 	if (hdev->set_bdaddr)
961 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
962 
963 	return 0;
964 }
965 
966 static int __hci_unconf_init(struct hci_dev *hdev)
967 {
968 	int err;
969 
970 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
971 		return 0;
972 
973 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
974 	if (err < 0)
975 		return err;
976 
977 	if (hci_dev_test_flag(hdev, HCI_SETUP))
978 		hci_debugfs_create_basic(hdev);
979 
980 	return 0;
981 }
982 
983 static int hci_scan_req(struct hci_request *req, unsigned long opt)
984 {
985 	__u8 scan = opt;
986 
987 	BT_DBG("%s %x", req->hdev->name, scan);
988 
989 	/* Inquiry and Page scans */
990 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
991 	return 0;
992 }
993 
994 static int hci_auth_req(struct hci_request *req, unsigned long opt)
995 {
996 	__u8 auth = opt;
997 
998 	BT_DBG("%s %x", req->hdev->name, auth);
999 
1000 	/* Authentication */
1001 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1002 	return 0;
1003 }
1004 
1005 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1006 {
1007 	__u8 encrypt = opt;
1008 
1009 	BT_DBG("%s %x", req->hdev->name, encrypt);
1010 
1011 	/* Encryption */
1012 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1013 	return 0;
1014 }
1015 
1016 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1017 {
1018 	__le16 policy = cpu_to_le16(opt);
1019 
1020 	BT_DBG("%s %x", req->hdev->name, policy);
1021 
1022 	/* Default link policy */
1023 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1024 	return 0;
1025 }
1026 
1027 /* Get HCI device by index.
1028  * Device is held on return. */
1029 struct hci_dev *hci_dev_get(int index)
1030 {
1031 	struct hci_dev *hdev = NULL, *d;
1032 
1033 	BT_DBG("%d", index);
1034 
1035 	if (index < 0)
1036 		return NULL;
1037 
1038 	read_lock(&hci_dev_list_lock);
1039 	list_for_each_entry(d, &hci_dev_list, list) {
1040 		if (d->id == index) {
1041 			hdev = hci_dev_hold(d);
1042 			break;
1043 		}
1044 	}
1045 	read_unlock(&hci_dev_list_lock);
1046 	return hdev;
1047 }
1048 
1049 /* ---- Inquiry support ---- */
1050 
1051 bool hci_discovery_active(struct hci_dev *hdev)
1052 {
1053 	struct discovery_state *discov = &hdev->discovery;
1054 
1055 	switch (discov->state) {
1056 	case DISCOVERY_FINDING:
1057 	case DISCOVERY_RESOLVING:
1058 		return true;
1059 
1060 	default:
1061 		return false;
1062 	}
1063 }
1064 
1065 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1066 {
1067 	int old_state = hdev->discovery.state;
1068 
1069 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1070 
1071 	if (old_state == state)
1072 		return;
1073 
1074 	hdev->discovery.state = state;
1075 
1076 	switch (state) {
1077 	case DISCOVERY_STOPPED:
1078 		hci_update_background_scan(hdev);
1079 
1080 		if (old_state != DISCOVERY_STARTING)
1081 			mgmt_discovering(hdev, 0);
1082 		break;
1083 	case DISCOVERY_STARTING:
1084 		break;
1085 	case DISCOVERY_FINDING:
1086 		mgmt_discovering(hdev, 1);
1087 		break;
1088 	case DISCOVERY_RESOLVING:
1089 		break;
1090 	case DISCOVERY_STOPPING:
1091 		break;
1092 	}
1093 }
1094 
1095 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1096 {
1097 	struct discovery_state *cache = &hdev->discovery;
1098 	struct inquiry_entry *p, *n;
1099 
1100 	list_for_each_entry_safe(p, n, &cache->all, all) {
1101 		list_del(&p->all);
1102 		kfree(p);
1103 	}
1104 
1105 	INIT_LIST_HEAD(&cache->unknown);
1106 	INIT_LIST_HEAD(&cache->resolve);
1107 }
1108 
1109 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1110 					       bdaddr_t *bdaddr)
1111 {
1112 	struct discovery_state *cache = &hdev->discovery;
1113 	struct inquiry_entry *e;
1114 
1115 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1116 
1117 	list_for_each_entry(e, &cache->all, all) {
1118 		if (!bacmp(&e->data.bdaddr, bdaddr))
1119 			return e;
1120 	}
1121 
1122 	return NULL;
1123 }
1124 
1125 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1126 						       bdaddr_t *bdaddr)
1127 {
1128 	struct discovery_state *cache = &hdev->discovery;
1129 	struct inquiry_entry *e;
1130 
1131 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1132 
1133 	list_for_each_entry(e, &cache->unknown, list) {
1134 		if (!bacmp(&e->data.bdaddr, bdaddr))
1135 			return e;
1136 	}
1137 
1138 	return NULL;
1139 }
1140 
1141 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1142 						       bdaddr_t *bdaddr,
1143 						       int state)
1144 {
1145 	struct discovery_state *cache = &hdev->discovery;
1146 	struct inquiry_entry *e;
1147 
1148 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1149 
1150 	list_for_each_entry(e, &cache->resolve, list) {
1151 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1152 			return e;
1153 		if (!bacmp(&e->data.bdaddr, bdaddr))
1154 			return e;
1155 	}
1156 
1157 	return NULL;
1158 }
1159 
1160 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1161 				      struct inquiry_entry *ie)
1162 {
1163 	struct discovery_state *cache = &hdev->discovery;
1164 	struct list_head *pos = &cache->resolve;
1165 	struct inquiry_entry *p;
1166 
1167 	list_del(&ie->list);
1168 
1169 	list_for_each_entry(p, &cache->resolve, list) {
1170 		if (p->name_state != NAME_PENDING &&
1171 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1172 			break;
1173 		pos = &p->list;
1174 	}
1175 
1176 	list_add(&ie->list, pos);
1177 }
1178 
1179 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1180 			     bool name_known)
1181 {
1182 	struct discovery_state *cache = &hdev->discovery;
1183 	struct inquiry_entry *ie;
1184 	u32 flags = 0;
1185 
1186 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1187 
1188 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1189 
1190 	if (!data->ssp_mode)
1191 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1192 
1193 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1194 	if (ie) {
1195 		if (!ie->data.ssp_mode)
1196 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1197 
1198 		if (ie->name_state == NAME_NEEDED &&
1199 		    data->rssi != ie->data.rssi) {
1200 			ie->data.rssi = data->rssi;
1201 			hci_inquiry_cache_update_resolve(hdev, ie);
1202 		}
1203 
1204 		goto update;
1205 	}
1206 
1207 	/* Entry not in the cache. Add new one. */
1208 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1209 	if (!ie) {
1210 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211 		goto done;
1212 	}
1213 
1214 	list_add(&ie->all, &cache->all);
1215 
1216 	if (name_known) {
1217 		ie->name_state = NAME_KNOWN;
1218 	} else {
1219 		ie->name_state = NAME_NOT_KNOWN;
1220 		list_add(&ie->list, &cache->unknown);
1221 	}
1222 
1223 update:
1224 	if (name_known && ie->name_state != NAME_KNOWN &&
1225 	    ie->name_state != NAME_PENDING) {
1226 		ie->name_state = NAME_KNOWN;
1227 		list_del(&ie->list);
1228 	}
1229 
1230 	memcpy(&ie->data, data, sizeof(*data));
1231 	ie->timestamp = jiffies;
1232 	cache->timestamp = jiffies;
1233 
1234 	if (ie->name_state == NAME_NOT_KNOWN)
1235 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1236 
1237 done:
1238 	return flags;
1239 }
1240 
1241 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1242 {
1243 	struct discovery_state *cache = &hdev->discovery;
1244 	struct inquiry_info *info = (struct inquiry_info *) buf;
1245 	struct inquiry_entry *e;
1246 	int copied = 0;
1247 
1248 	list_for_each_entry(e, &cache->all, all) {
1249 		struct inquiry_data *data = &e->data;
1250 
1251 		if (copied >= num)
1252 			break;
1253 
1254 		bacpy(&info->bdaddr, &data->bdaddr);
1255 		info->pscan_rep_mode	= data->pscan_rep_mode;
1256 		info->pscan_period_mode	= data->pscan_period_mode;
1257 		info->pscan_mode	= data->pscan_mode;
1258 		memcpy(info->dev_class, data->dev_class, 3);
1259 		info->clock_offset	= data->clock_offset;
1260 
1261 		info++;
1262 		copied++;
1263 	}
1264 
1265 	BT_DBG("cache %p, copied %d", cache, copied);
1266 	return copied;
1267 }
1268 
1269 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1270 {
1271 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1272 	struct hci_dev *hdev = req->hdev;
1273 	struct hci_cp_inquiry cp;
1274 
1275 	BT_DBG("%s", hdev->name);
1276 
1277 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1278 		return 0;
1279 
1280 	/* Start Inquiry */
1281 	memcpy(&cp.lap, &ir->lap, 3);
1282 	cp.length  = ir->length;
1283 	cp.num_rsp = ir->num_rsp;
1284 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1285 
1286 	return 0;
1287 }
1288 
1289 int hci_inquiry(void __user *arg)
1290 {
1291 	__u8 __user *ptr = arg;
1292 	struct hci_inquiry_req ir;
1293 	struct hci_dev *hdev;
1294 	int err = 0, do_inquiry = 0, max_rsp;
1295 	long timeo;
1296 	__u8 *buf;
1297 
1298 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1299 		return -EFAULT;
1300 
1301 	hdev = hci_dev_get(ir.dev_id);
1302 	if (!hdev)
1303 		return -ENODEV;
1304 
1305 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1306 		err = -EBUSY;
1307 		goto done;
1308 	}
1309 
1310 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1311 		err = -EOPNOTSUPP;
1312 		goto done;
1313 	}
1314 
1315 	if (hdev->dev_type != HCI_PRIMARY) {
1316 		err = -EOPNOTSUPP;
1317 		goto done;
1318 	}
1319 
1320 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1321 		err = -EOPNOTSUPP;
1322 		goto done;
1323 	}
1324 
1325 	hci_dev_lock(hdev);
1326 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1327 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1328 		hci_inquiry_cache_flush(hdev);
1329 		do_inquiry = 1;
1330 	}
1331 	hci_dev_unlock(hdev);
1332 
1333 	timeo = ir.length * msecs_to_jiffies(2000);
1334 
1335 	if (do_inquiry) {
1336 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1337 				   timeo, NULL);
1338 		if (err < 0)
1339 			goto done;
1340 
1341 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1342 		 * cleared). If it is interrupted by a signal, return -EINTR.
1343 		 */
1344 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1345 				TASK_INTERRUPTIBLE))
1346 			return -EINTR;
1347 	}
1348 
1349 	/* for unlimited number of responses we will use buffer with
1350 	 * 255 entries
1351 	 */
1352 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1353 
1354 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1355 	 * copy it to the user space.
1356 	 */
1357 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1358 	if (!buf) {
1359 		err = -ENOMEM;
1360 		goto done;
1361 	}
1362 
1363 	hci_dev_lock(hdev);
1364 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1365 	hci_dev_unlock(hdev);
1366 
1367 	BT_DBG("num_rsp %d", ir.num_rsp);
1368 
1369 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1370 		ptr += sizeof(ir);
1371 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1372 				 ir.num_rsp))
1373 			err = -EFAULT;
1374 	} else
1375 		err = -EFAULT;
1376 
1377 	kfree(buf);
1378 
1379 done:
1380 	hci_dev_put(hdev);
1381 	return err;
1382 }
1383 
1384 /**
1385  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1386  *				       (BD_ADDR) for a HCI device from
1387  *				       a firmware node property.
1388  * @hdev:	The HCI device
1389  *
1390  * Search the firmware node for 'local-bd-address'.
1391  *
1392  * All-zero BD addresses are rejected, because those could be properties
1393  * that exist in the firmware tables, but were not updated by the firmware. For
1394  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1395  */
1396 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1397 {
1398 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1399 	bdaddr_t ba;
1400 	int ret;
1401 
1402 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1403 					    (u8 *)&ba, sizeof(ba));
1404 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1405 		return;
1406 
1407 	bacpy(&hdev->public_addr, &ba);
1408 }
1409 
1410 static int hci_dev_do_open(struct hci_dev *hdev)
1411 {
1412 	int ret = 0;
1413 
1414 	BT_DBG("%s %p", hdev->name, hdev);
1415 
1416 	hci_req_sync_lock(hdev);
1417 
1418 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1419 		ret = -ENODEV;
1420 		goto done;
1421 	}
1422 
1423 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1424 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1425 		/* Check for rfkill but allow the HCI setup stage to
1426 		 * proceed (which in itself doesn't cause any RF activity).
1427 		 */
1428 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1429 			ret = -ERFKILL;
1430 			goto done;
1431 		}
1432 
1433 		/* Check for valid public address or a configured static
1434 		 * random adddress, but let the HCI setup proceed to
1435 		 * be able to determine if there is a public address
1436 		 * or not.
1437 		 *
1438 		 * In case of user channel usage, it is not important
1439 		 * if a public address or static random address is
1440 		 * available.
1441 		 *
1442 		 * This check is only valid for BR/EDR controllers
1443 		 * since AMP controllers do not have an address.
1444 		 */
1445 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446 		    hdev->dev_type == HCI_PRIMARY &&
1447 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1448 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1449 			ret = -EADDRNOTAVAIL;
1450 			goto done;
1451 		}
1452 	}
1453 
1454 	if (test_bit(HCI_UP, &hdev->flags)) {
1455 		ret = -EALREADY;
1456 		goto done;
1457 	}
1458 
1459 	if (hdev->open(hdev)) {
1460 		ret = -EIO;
1461 		goto done;
1462 	}
1463 
1464 	set_bit(HCI_RUNNING, &hdev->flags);
1465 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1466 
1467 	atomic_set(&hdev->cmd_cnt, 1);
1468 	set_bit(HCI_INIT, &hdev->flags);
1469 
1470 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1471 	    test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1472 		bool invalid_bdaddr;
1473 
1474 		hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1475 
1476 		if (hdev->setup)
1477 			ret = hdev->setup(hdev);
1478 
1479 		/* The transport driver can set the quirk to mark the
1480 		 * BD_ADDR invalid before creating the HCI device or in
1481 		 * its setup callback.
1482 		 */
1483 		invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1484 					  &hdev->quirks);
1485 
1486 		if (ret)
1487 			goto setup_failed;
1488 
1489 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1490 			if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1491 				hci_dev_get_bd_addr_from_property(hdev);
1492 
1493 			if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1494 			    hdev->set_bdaddr) {
1495 				ret = hdev->set_bdaddr(hdev,
1496 						       &hdev->public_addr);
1497 
1498 				/* If setting of the BD_ADDR from the device
1499 				 * property succeeds, then treat the address
1500 				 * as valid even if the invalid BD_ADDR
1501 				 * quirk indicates otherwise.
1502 				 */
1503 				if (!ret)
1504 					invalid_bdaddr = false;
1505 			}
1506 		}
1507 
1508 setup_failed:
1509 		/* The transport driver can set these quirks before
1510 		 * creating the HCI device or in its setup callback.
1511 		 *
1512 		 * For the invalid BD_ADDR quirk it is possible that
1513 		 * it becomes a valid address if the bootloader does
1514 		 * provide it (see above).
1515 		 *
1516 		 * In case any of them is set, the controller has to
1517 		 * start up as unconfigured.
1518 		 */
1519 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1520 		    invalid_bdaddr)
1521 			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1522 
1523 		/* For an unconfigured controller it is required to
1524 		 * read at least the version information provided by
1525 		 * the Read Local Version Information command.
1526 		 *
1527 		 * If the set_bdaddr driver callback is provided, then
1528 		 * also the original Bluetooth public device address
1529 		 * will be read using the Read BD Address command.
1530 		 */
1531 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1532 			ret = __hci_unconf_init(hdev);
1533 	}
1534 
1535 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1536 		/* If public address change is configured, ensure that
1537 		 * the address gets programmed. If the driver does not
1538 		 * support changing the public address, fail the power
1539 		 * on procedure.
1540 		 */
1541 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1542 		    hdev->set_bdaddr)
1543 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1544 		else
1545 			ret = -EADDRNOTAVAIL;
1546 	}
1547 
1548 	if (!ret) {
1549 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1550 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1551 			ret = __hci_init(hdev);
1552 			if (!ret && hdev->post_init)
1553 				ret = hdev->post_init(hdev);
1554 		}
1555 	}
1556 
1557 	/* If the HCI Reset command is clearing all diagnostic settings,
1558 	 * then they need to be reprogrammed after the init procedure
1559 	 * completed.
1560 	 */
1561 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1562 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1563 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1564 		ret = hdev->set_diag(hdev, true);
1565 
1566 	clear_bit(HCI_INIT, &hdev->flags);
1567 
1568 	if (!ret) {
1569 		hci_dev_hold(hdev);
1570 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1571 		hci_adv_instances_set_rpa_expired(hdev, true);
1572 		set_bit(HCI_UP, &hdev->flags);
1573 		hci_sock_dev_event(hdev, HCI_DEV_UP);
1574 		hci_leds_update_powered(hdev, true);
1575 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1576 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1577 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1578 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1579 		    hci_dev_test_flag(hdev, HCI_MGMT) &&
1580 		    hdev->dev_type == HCI_PRIMARY) {
1581 			ret = __hci_req_hci_power_on(hdev);
1582 			mgmt_power_on(hdev, ret);
1583 		}
1584 	} else {
1585 		/* Init failed, cleanup */
1586 		flush_work(&hdev->tx_work);
1587 		flush_work(&hdev->cmd_work);
1588 		flush_work(&hdev->rx_work);
1589 
1590 		skb_queue_purge(&hdev->cmd_q);
1591 		skb_queue_purge(&hdev->rx_q);
1592 
1593 		if (hdev->flush)
1594 			hdev->flush(hdev);
1595 
1596 		if (hdev->sent_cmd) {
1597 			kfree_skb(hdev->sent_cmd);
1598 			hdev->sent_cmd = NULL;
1599 		}
1600 
1601 		clear_bit(HCI_RUNNING, &hdev->flags);
1602 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1603 
1604 		hdev->close(hdev);
1605 		hdev->flags &= BIT(HCI_RAW);
1606 	}
1607 
1608 done:
1609 	hci_req_sync_unlock(hdev);
1610 	return ret;
1611 }
1612 
1613 /* ---- HCI ioctl helpers ---- */
1614 
1615 int hci_dev_open(__u16 dev)
1616 {
1617 	struct hci_dev *hdev;
1618 	int err;
1619 
1620 	hdev = hci_dev_get(dev);
1621 	if (!hdev)
1622 		return -ENODEV;
1623 
1624 	/* Devices that are marked as unconfigured can only be powered
1625 	 * up as user channel. Trying to bring them up as normal devices
1626 	 * will result into a failure. Only user channel operation is
1627 	 * possible.
1628 	 *
1629 	 * When this function is called for a user channel, the flag
1630 	 * HCI_USER_CHANNEL will be set first before attempting to
1631 	 * open the device.
1632 	 */
1633 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1634 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1635 		err = -EOPNOTSUPP;
1636 		goto done;
1637 	}
1638 
1639 	/* We need to ensure that no other power on/off work is pending
1640 	 * before proceeding to call hci_dev_do_open. This is
1641 	 * particularly important if the setup procedure has not yet
1642 	 * completed.
1643 	 */
1644 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1645 		cancel_delayed_work(&hdev->power_off);
1646 
1647 	/* After this call it is guaranteed that the setup procedure
1648 	 * has finished. This means that error conditions like RFKILL
1649 	 * or no valid public or static random address apply.
1650 	 */
1651 	flush_workqueue(hdev->req_workqueue);
1652 
1653 	/* For controllers not using the management interface and that
1654 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1655 	 * so that pairing works for them. Once the management interface
1656 	 * is in use this bit will be cleared again and userspace has
1657 	 * to explicitly enable it.
1658 	 */
1659 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1660 	    !hci_dev_test_flag(hdev, HCI_MGMT))
1661 		hci_dev_set_flag(hdev, HCI_BONDABLE);
1662 
1663 	err = hci_dev_do_open(hdev);
1664 
1665 done:
1666 	hci_dev_put(hdev);
1667 	return err;
1668 }
1669 
1670 /* This function requires the caller holds hdev->lock */
1671 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1672 {
1673 	struct hci_conn_params *p;
1674 
1675 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1676 		if (p->conn) {
1677 			hci_conn_drop(p->conn);
1678 			hci_conn_put(p->conn);
1679 			p->conn = NULL;
1680 		}
1681 		list_del_init(&p->action);
1682 	}
1683 
1684 	BT_DBG("All LE pending actions cleared");
1685 }
1686 
1687 int hci_dev_do_close(struct hci_dev *hdev)
1688 {
1689 	bool auto_off;
1690 
1691 	BT_DBG("%s %p", hdev->name, hdev);
1692 
1693 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1694 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1695 	    test_bit(HCI_UP, &hdev->flags)) {
1696 		/* Execute vendor specific shutdown routine */
1697 		if (hdev->shutdown)
1698 			hdev->shutdown(hdev);
1699 	}
1700 
1701 	cancel_delayed_work(&hdev->power_off);
1702 
1703 	hci_request_cancel_all(hdev);
1704 	hci_req_sync_lock(hdev);
1705 
1706 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1707 		cancel_delayed_work_sync(&hdev->cmd_timer);
1708 		hci_req_sync_unlock(hdev);
1709 		return 0;
1710 	}
1711 
1712 	hci_leds_update_powered(hdev, false);
1713 
1714 	/* Flush RX and TX works */
1715 	flush_work(&hdev->tx_work);
1716 	flush_work(&hdev->rx_work);
1717 
1718 	if (hdev->discov_timeout > 0) {
1719 		hdev->discov_timeout = 0;
1720 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1721 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1722 	}
1723 
1724 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1725 		cancel_delayed_work(&hdev->service_cache);
1726 
1727 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1728 		struct adv_info *adv_instance;
1729 
1730 		cancel_delayed_work_sync(&hdev->rpa_expired);
1731 
1732 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1733 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1734 	}
1735 
1736 	/* Avoid potential lockdep warnings from the *_flush() calls by
1737 	 * ensuring the workqueue is empty up front.
1738 	 */
1739 	drain_workqueue(hdev->workqueue);
1740 
1741 	hci_dev_lock(hdev);
1742 
1743 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1744 
1745 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1746 
1747 	if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1748 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1749 	    hci_dev_test_flag(hdev, HCI_MGMT))
1750 		__mgmt_power_off(hdev);
1751 
1752 	hci_inquiry_cache_flush(hdev);
1753 	hci_pend_le_actions_clear(hdev);
1754 	hci_conn_hash_flush(hdev);
1755 	hci_dev_unlock(hdev);
1756 
1757 	smp_unregister(hdev);
1758 
1759 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1760 
1761 	if (hdev->flush)
1762 		hdev->flush(hdev);
1763 
1764 	/* Reset device */
1765 	skb_queue_purge(&hdev->cmd_q);
1766 	atomic_set(&hdev->cmd_cnt, 1);
1767 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1768 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1769 		set_bit(HCI_INIT, &hdev->flags);
1770 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1771 		clear_bit(HCI_INIT, &hdev->flags);
1772 	}
1773 
1774 	/* flush cmd  work */
1775 	flush_work(&hdev->cmd_work);
1776 
1777 	/* Drop queues */
1778 	skb_queue_purge(&hdev->rx_q);
1779 	skb_queue_purge(&hdev->cmd_q);
1780 	skb_queue_purge(&hdev->raw_q);
1781 
1782 	/* Drop last sent command */
1783 	if (hdev->sent_cmd) {
1784 		cancel_delayed_work_sync(&hdev->cmd_timer);
1785 		kfree_skb(hdev->sent_cmd);
1786 		hdev->sent_cmd = NULL;
1787 	}
1788 
1789 	clear_bit(HCI_RUNNING, &hdev->flags);
1790 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1791 
1792 	if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1793 		wake_up(&hdev->suspend_wait_q);
1794 
1795 	/* After this point our queues are empty
1796 	 * and no tasks are scheduled. */
1797 	hdev->close(hdev);
1798 
1799 	/* Clear flags */
1800 	hdev->flags &= BIT(HCI_RAW);
1801 	hci_dev_clear_volatile_flags(hdev);
1802 
1803 	/* Controller radio is available but is currently powered down */
1804 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1805 
1806 	memset(hdev->eir, 0, sizeof(hdev->eir));
1807 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1808 	bacpy(&hdev->random_addr, BDADDR_ANY);
1809 
1810 	hci_req_sync_unlock(hdev);
1811 
1812 	hci_dev_put(hdev);
1813 	return 0;
1814 }
1815 
1816 int hci_dev_close(__u16 dev)
1817 {
1818 	struct hci_dev *hdev;
1819 	int err;
1820 
1821 	hdev = hci_dev_get(dev);
1822 	if (!hdev)
1823 		return -ENODEV;
1824 
1825 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1826 		err = -EBUSY;
1827 		goto done;
1828 	}
1829 
1830 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1831 		cancel_delayed_work(&hdev->power_off);
1832 
1833 	err = hci_dev_do_close(hdev);
1834 
1835 done:
1836 	hci_dev_put(hdev);
1837 	return err;
1838 }
1839 
1840 static int hci_dev_do_reset(struct hci_dev *hdev)
1841 {
1842 	int ret;
1843 
1844 	BT_DBG("%s %p", hdev->name, hdev);
1845 
1846 	hci_req_sync_lock(hdev);
1847 
1848 	/* Drop queues */
1849 	skb_queue_purge(&hdev->rx_q);
1850 	skb_queue_purge(&hdev->cmd_q);
1851 
1852 	/* Avoid potential lockdep warnings from the *_flush() calls by
1853 	 * ensuring the workqueue is empty up front.
1854 	 */
1855 	drain_workqueue(hdev->workqueue);
1856 
1857 	hci_dev_lock(hdev);
1858 	hci_inquiry_cache_flush(hdev);
1859 	hci_conn_hash_flush(hdev);
1860 	hci_dev_unlock(hdev);
1861 
1862 	if (hdev->flush)
1863 		hdev->flush(hdev);
1864 
1865 	atomic_set(&hdev->cmd_cnt, 1);
1866 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1867 
1868 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1869 
1870 	hci_req_sync_unlock(hdev);
1871 	return ret;
1872 }
1873 
1874 int hci_dev_reset(__u16 dev)
1875 {
1876 	struct hci_dev *hdev;
1877 	int err;
1878 
1879 	hdev = hci_dev_get(dev);
1880 	if (!hdev)
1881 		return -ENODEV;
1882 
1883 	if (!test_bit(HCI_UP, &hdev->flags)) {
1884 		err = -ENETDOWN;
1885 		goto done;
1886 	}
1887 
1888 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1889 		err = -EBUSY;
1890 		goto done;
1891 	}
1892 
1893 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1894 		err = -EOPNOTSUPP;
1895 		goto done;
1896 	}
1897 
1898 	err = hci_dev_do_reset(hdev);
1899 
1900 done:
1901 	hci_dev_put(hdev);
1902 	return err;
1903 }
1904 
1905 int hci_dev_reset_stat(__u16 dev)
1906 {
1907 	struct hci_dev *hdev;
1908 	int ret = 0;
1909 
1910 	hdev = hci_dev_get(dev);
1911 	if (!hdev)
1912 		return -ENODEV;
1913 
1914 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1915 		ret = -EBUSY;
1916 		goto done;
1917 	}
1918 
1919 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1920 		ret = -EOPNOTSUPP;
1921 		goto done;
1922 	}
1923 
1924 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1925 
1926 done:
1927 	hci_dev_put(hdev);
1928 	return ret;
1929 }
1930 
1931 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1932 {
1933 	bool conn_changed, discov_changed;
1934 
1935 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
1936 
1937 	if ((scan & SCAN_PAGE))
1938 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1939 							  HCI_CONNECTABLE);
1940 	else
1941 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1942 							   HCI_CONNECTABLE);
1943 
1944 	if ((scan & SCAN_INQUIRY)) {
1945 		discov_changed = !hci_dev_test_and_set_flag(hdev,
1946 							    HCI_DISCOVERABLE);
1947 	} else {
1948 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1949 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1950 							     HCI_DISCOVERABLE);
1951 	}
1952 
1953 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1954 		return;
1955 
1956 	if (conn_changed || discov_changed) {
1957 		/* In case this was disabled through mgmt */
1958 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1959 
1960 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1961 			hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1962 
1963 		mgmt_new_settings(hdev);
1964 	}
1965 }
1966 
1967 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1968 {
1969 	struct hci_dev *hdev;
1970 	struct hci_dev_req dr;
1971 	int err = 0;
1972 
1973 	if (copy_from_user(&dr, arg, sizeof(dr)))
1974 		return -EFAULT;
1975 
1976 	hdev = hci_dev_get(dr.dev_id);
1977 	if (!hdev)
1978 		return -ENODEV;
1979 
1980 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1981 		err = -EBUSY;
1982 		goto done;
1983 	}
1984 
1985 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1986 		err = -EOPNOTSUPP;
1987 		goto done;
1988 	}
1989 
1990 	if (hdev->dev_type != HCI_PRIMARY) {
1991 		err = -EOPNOTSUPP;
1992 		goto done;
1993 	}
1994 
1995 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1996 		err = -EOPNOTSUPP;
1997 		goto done;
1998 	}
1999 
2000 	switch (cmd) {
2001 	case HCISETAUTH:
2002 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2003 				   HCI_INIT_TIMEOUT, NULL);
2004 		break;
2005 
2006 	case HCISETENCRYPT:
2007 		if (!lmp_encrypt_capable(hdev)) {
2008 			err = -EOPNOTSUPP;
2009 			break;
2010 		}
2011 
2012 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
2013 			/* Auth must be enabled first */
2014 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2015 					   HCI_INIT_TIMEOUT, NULL);
2016 			if (err)
2017 				break;
2018 		}
2019 
2020 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2021 				   HCI_INIT_TIMEOUT, NULL);
2022 		break;
2023 
2024 	case HCISETSCAN:
2025 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2026 				   HCI_INIT_TIMEOUT, NULL);
2027 
2028 		/* Ensure that the connectable and discoverable states
2029 		 * get correctly modified as this was a non-mgmt change.
2030 		 */
2031 		if (!err)
2032 			hci_update_scan_state(hdev, dr.dev_opt);
2033 		break;
2034 
2035 	case HCISETLINKPOL:
2036 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2037 				   HCI_INIT_TIMEOUT, NULL);
2038 		break;
2039 
2040 	case HCISETLINKMODE:
2041 		hdev->link_mode = ((__u16) dr.dev_opt) &
2042 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
2043 		break;
2044 
2045 	case HCISETPTYPE:
2046 		if (hdev->pkt_type == (__u16) dr.dev_opt)
2047 			break;
2048 
2049 		hdev->pkt_type = (__u16) dr.dev_opt;
2050 		mgmt_phy_configuration_changed(hdev, NULL);
2051 		break;
2052 
2053 	case HCISETACLMTU:
2054 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2055 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2056 		break;
2057 
2058 	case HCISETSCOMTU:
2059 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2060 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2061 		break;
2062 
2063 	default:
2064 		err = -EINVAL;
2065 		break;
2066 	}
2067 
2068 done:
2069 	hci_dev_put(hdev);
2070 	return err;
2071 }
2072 
2073 int hci_get_dev_list(void __user *arg)
2074 {
2075 	struct hci_dev *hdev;
2076 	struct hci_dev_list_req *dl;
2077 	struct hci_dev_req *dr;
2078 	int n = 0, size, err;
2079 	__u16 dev_num;
2080 
2081 	if (get_user(dev_num, (__u16 __user *) arg))
2082 		return -EFAULT;
2083 
2084 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2085 		return -EINVAL;
2086 
2087 	size = sizeof(*dl) + dev_num * sizeof(*dr);
2088 
2089 	dl = kzalloc(size, GFP_KERNEL);
2090 	if (!dl)
2091 		return -ENOMEM;
2092 
2093 	dr = dl->dev_req;
2094 
2095 	read_lock(&hci_dev_list_lock);
2096 	list_for_each_entry(hdev, &hci_dev_list, list) {
2097 		unsigned long flags = hdev->flags;
2098 
2099 		/* When the auto-off is configured it means the transport
2100 		 * is running, but in that case still indicate that the
2101 		 * device is actually down.
2102 		 */
2103 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2104 			flags &= ~BIT(HCI_UP);
2105 
2106 		(dr + n)->dev_id  = hdev->id;
2107 		(dr + n)->dev_opt = flags;
2108 
2109 		if (++n >= dev_num)
2110 			break;
2111 	}
2112 	read_unlock(&hci_dev_list_lock);
2113 
2114 	dl->dev_num = n;
2115 	size = sizeof(*dl) + n * sizeof(*dr);
2116 
2117 	err = copy_to_user(arg, dl, size);
2118 	kfree(dl);
2119 
2120 	return err ? -EFAULT : 0;
2121 }
2122 
2123 int hci_get_dev_info(void __user *arg)
2124 {
2125 	struct hci_dev *hdev;
2126 	struct hci_dev_info di;
2127 	unsigned long flags;
2128 	int err = 0;
2129 
2130 	if (copy_from_user(&di, arg, sizeof(di)))
2131 		return -EFAULT;
2132 
2133 	hdev = hci_dev_get(di.dev_id);
2134 	if (!hdev)
2135 		return -ENODEV;
2136 
2137 	/* When the auto-off is configured it means the transport
2138 	 * is running, but in that case still indicate that the
2139 	 * device is actually down.
2140 	 */
2141 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2142 		flags = hdev->flags & ~BIT(HCI_UP);
2143 	else
2144 		flags = hdev->flags;
2145 
2146 	strcpy(di.name, hdev->name);
2147 	di.bdaddr   = hdev->bdaddr;
2148 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2149 	di.flags    = flags;
2150 	di.pkt_type = hdev->pkt_type;
2151 	if (lmp_bredr_capable(hdev)) {
2152 		di.acl_mtu  = hdev->acl_mtu;
2153 		di.acl_pkts = hdev->acl_pkts;
2154 		di.sco_mtu  = hdev->sco_mtu;
2155 		di.sco_pkts = hdev->sco_pkts;
2156 	} else {
2157 		di.acl_mtu  = hdev->le_mtu;
2158 		di.acl_pkts = hdev->le_pkts;
2159 		di.sco_mtu  = 0;
2160 		di.sco_pkts = 0;
2161 	}
2162 	di.link_policy = hdev->link_policy;
2163 	di.link_mode   = hdev->link_mode;
2164 
2165 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2166 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2167 
2168 	if (copy_to_user(arg, &di, sizeof(di)))
2169 		err = -EFAULT;
2170 
2171 	hci_dev_put(hdev);
2172 
2173 	return err;
2174 }
2175 
2176 /* ---- Interface to HCI drivers ---- */
2177 
2178 static int hci_rfkill_set_block(void *data, bool blocked)
2179 {
2180 	struct hci_dev *hdev = data;
2181 
2182 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2183 
2184 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2185 		return -EBUSY;
2186 
2187 	if (blocked) {
2188 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2189 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2190 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2191 			hci_dev_do_close(hdev);
2192 	} else {
2193 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2194 	}
2195 
2196 	return 0;
2197 }
2198 
2199 static const struct rfkill_ops hci_rfkill_ops = {
2200 	.set_block = hci_rfkill_set_block,
2201 };
2202 
2203 static void hci_power_on(struct work_struct *work)
2204 {
2205 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2206 	int err;
2207 
2208 	BT_DBG("%s", hdev->name);
2209 
2210 	if (test_bit(HCI_UP, &hdev->flags) &&
2211 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
2212 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2213 		cancel_delayed_work(&hdev->power_off);
2214 		hci_req_sync_lock(hdev);
2215 		err = __hci_req_hci_power_on(hdev);
2216 		hci_req_sync_unlock(hdev);
2217 		mgmt_power_on(hdev, err);
2218 		return;
2219 	}
2220 
2221 	err = hci_dev_do_open(hdev);
2222 	if (err < 0) {
2223 		hci_dev_lock(hdev);
2224 		mgmt_set_powered_failed(hdev, err);
2225 		hci_dev_unlock(hdev);
2226 		return;
2227 	}
2228 
2229 	/* During the HCI setup phase, a few error conditions are
2230 	 * ignored and they need to be checked now. If they are still
2231 	 * valid, it is important to turn the device back off.
2232 	 */
2233 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2234 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2235 	    (hdev->dev_type == HCI_PRIMARY &&
2236 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2237 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2238 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2239 		hci_dev_do_close(hdev);
2240 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2241 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2242 				   HCI_AUTO_OFF_TIMEOUT);
2243 	}
2244 
2245 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2246 		/* For unconfigured devices, set the HCI_RAW flag
2247 		 * so that userspace can easily identify them.
2248 		 */
2249 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2250 			set_bit(HCI_RAW, &hdev->flags);
2251 
2252 		/* For fully configured devices, this will send
2253 		 * the Index Added event. For unconfigured devices,
2254 		 * it will send Unconfigued Index Added event.
2255 		 *
2256 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2257 		 * and no event will be send.
2258 		 */
2259 		mgmt_index_added(hdev);
2260 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2261 		/* When the controller is now configured, then it
2262 		 * is important to clear the HCI_RAW flag.
2263 		 */
2264 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2265 			clear_bit(HCI_RAW, &hdev->flags);
2266 
2267 		/* Powering on the controller with HCI_CONFIG set only
2268 		 * happens with the transition from unconfigured to
2269 		 * configured. This will send the Index Added event.
2270 		 */
2271 		mgmt_index_added(hdev);
2272 	}
2273 }
2274 
2275 static void hci_power_off(struct work_struct *work)
2276 {
2277 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2278 					    power_off.work);
2279 
2280 	BT_DBG("%s", hdev->name);
2281 
2282 	hci_dev_do_close(hdev);
2283 }
2284 
2285 static void hci_error_reset(struct work_struct *work)
2286 {
2287 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2288 
2289 	BT_DBG("%s", hdev->name);
2290 
2291 	if (hdev->hw_error)
2292 		hdev->hw_error(hdev, hdev->hw_error_code);
2293 	else
2294 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2295 
2296 	if (hci_dev_do_close(hdev))
2297 		return;
2298 
2299 	hci_dev_do_open(hdev);
2300 }
2301 
2302 void hci_uuids_clear(struct hci_dev *hdev)
2303 {
2304 	struct bt_uuid *uuid, *tmp;
2305 
2306 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2307 		list_del(&uuid->list);
2308 		kfree(uuid);
2309 	}
2310 }
2311 
2312 void hci_link_keys_clear(struct hci_dev *hdev)
2313 {
2314 	struct link_key *key;
2315 
2316 	list_for_each_entry(key, &hdev->link_keys, list) {
2317 		list_del_rcu(&key->list);
2318 		kfree_rcu(key, rcu);
2319 	}
2320 }
2321 
2322 void hci_smp_ltks_clear(struct hci_dev *hdev)
2323 {
2324 	struct smp_ltk *k;
2325 
2326 	list_for_each_entry(k, &hdev->long_term_keys, list) {
2327 		list_del_rcu(&k->list);
2328 		kfree_rcu(k, rcu);
2329 	}
2330 }
2331 
2332 void hci_smp_irks_clear(struct hci_dev *hdev)
2333 {
2334 	struct smp_irk *k;
2335 
2336 	list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2337 		list_del_rcu(&k->list);
2338 		kfree_rcu(k, rcu);
2339 	}
2340 }
2341 
2342 void hci_blocked_keys_clear(struct hci_dev *hdev)
2343 {
2344 	struct blocked_key *b;
2345 
2346 	list_for_each_entry(b, &hdev->blocked_keys, list) {
2347 		list_del_rcu(&b->list);
2348 		kfree_rcu(b, rcu);
2349 	}
2350 }
2351 
2352 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2353 {
2354 	bool blocked = false;
2355 	struct blocked_key *b;
2356 
2357 	rcu_read_lock();
2358 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2359 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2360 			blocked = true;
2361 			break;
2362 		}
2363 	}
2364 
2365 	rcu_read_unlock();
2366 	return blocked;
2367 }
2368 
2369 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2370 {
2371 	struct link_key *k;
2372 
2373 	rcu_read_lock();
2374 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2375 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
2376 			rcu_read_unlock();
2377 
2378 			if (hci_is_blocked_key(hdev,
2379 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2380 					       k->val)) {
2381 				bt_dev_warn_ratelimited(hdev,
2382 							"Link key blocked for %pMR",
2383 							&k->bdaddr);
2384 				return NULL;
2385 			}
2386 
2387 			return k;
2388 		}
2389 	}
2390 	rcu_read_unlock();
2391 
2392 	return NULL;
2393 }
2394 
2395 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2396 			       u8 key_type, u8 old_key_type)
2397 {
2398 	/* Legacy key */
2399 	if (key_type < 0x03)
2400 		return true;
2401 
2402 	/* Debug keys are insecure so don't store them persistently */
2403 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2404 		return false;
2405 
2406 	/* Changed combination key and there's no previous one */
2407 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2408 		return false;
2409 
2410 	/* Security mode 3 case */
2411 	if (!conn)
2412 		return true;
2413 
2414 	/* BR/EDR key derived using SC from an LE link */
2415 	if (conn->type == LE_LINK)
2416 		return true;
2417 
2418 	/* Neither local nor remote side had no-bonding as requirement */
2419 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2420 		return true;
2421 
2422 	/* Local side had dedicated bonding as requirement */
2423 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2424 		return true;
2425 
2426 	/* Remote side had dedicated bonding as requirement */
2427 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2428 		return true;
2429 
2430 	/* If none of the above criteria match, then don't store the key
2431 	 * persistently */
2432 	return false;
2433 }
2434 
2435 static u8 ltk_role(u8 type)
2436 {
2437 	if (type == SMP_LTK)
2438 		return HCI_ROLE_MASTER;
2439 
2440 	return HCI_ROLE_SLAVE;
2441 }
2442 
2443 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2444 			     u8 addr_type, u8 role)
2445 {
2446 	struct smp_ltk *k;
2447 
2448 	rcu_read_lock();
2449 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2450 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2451 			continue;
2452 
2453 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2454 			rcu_read_unlock();
2455 
2456 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2457 					       k->val)) {
2458 				bt_dev_warn_ratelimited(hdev,
2459 							"LTK blocked for %pMR",
2460 							&k->bdaddr);
2461 				return NULL;
2462 			}
2463 
2464 			return k;
2465 		}
2466 	}
2467 	rcu_read_unlock();
2468 
2469 	return NULL;
2470 }
2471 
2472 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2473 {
2474 	struct smp_irk *irk_to_return = NULL;
2475 	struct smp_irk *irk;
2476 
2477 	rcu_read_lock();
2478 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2479 		if (!bacmp(&irk->rpa, rpa)) {
2480 			irk_to_return = irk;
2481 			goto done;
2482 		}
2483 	}
2484 
2485 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2486 		if (smp_irk_matches(hdev, irk->val, rpa)) {
2487 			bacpy(&irk->rpa, rpa);
2488 			irk_to_return = irk;
2489 			goto done;
2490 		}
2491 	}
2492 
2493 done:
2494 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2495 						irk_to_return->val)) {
2496 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2497 					&irk_to_return->bdaddr);
2498 		irk_to_return = NULL;
2499 	}
2500 
2501 	rcu_read_unlock();
2502 
2503 	return irk_to_return;
2504 }
2505 
2506 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2507 				     u8 addr_type)
2508 {
2509 	struct smp_irk *irk_to_return = NULL;
2510 	struct smp_irk *irk;
2511 
2512 	/* Identity Address must be public or static random */
2513 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2514 		return NULL;
2515 
2516 	rcu_read_lock();
2517 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2518 		if (addr_type == irk->addr_type &&
2519 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
2520 			irk_to_return = irk;
2521 			goto done;
2522 		}
2523 	}
2524 
2525 done:
2526 
2527 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2528 						irk_to_return->val)) {
2529 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2530 					&irk_to_return->bdaddr);
2531 		irk_to_return = NULL;
2532 	}
2533 
2534 	rcu_read_unlock();
2535 
2536 	return irk_to_return;
2537 }
2538 
2539 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2540 				  bdaddr_t *bdaddr, u8 *val, u8 type,
2541 				  u8 pin_len, bool *persistent)
2542 {
2543 	struct link_key *key, *old_key;
2544 	u8 old_key_type;
2545 
2546 	old_key = hci_find_link_key(hdev, bdaddr);
2547 	if (old_key) {
2548 		old_key_type = old_key->type;
2549 		key = old_key;
2550 	} else {
2551 		old_key_type = conn ? conn->key_type : 0xff;
2552 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2553 		if (!key)
2554 			return NULL;
2555 		list_add_rcu(&key->list, &hdev->link_keys);
2556 	}
2557 
2558 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2559 
2560 	/* Some buggy controller combinations generate a changed
2561 	 * combination key for legacy pairing even when there's no
2562 	 * previous key */
2563 	if (type == HCI_LK_CHANGED_COMBINATION &&
2564 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2565 		type = HCI_LK_COMBINATION;
2566 		if (conn)
2567 			conn->key_type = type;
2568 	}
2569 
2570 	bacpy(&key->bdaddr, bdaddr);
2571 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2572 	key->pin_len = pin_len;
2573 
2574 	if (type == HCI_LK_CHANGED_COMBINATION)
2575 		key->type = old_key_type;
2576 	else
2577 		key->type = type;
2578 
2579 	if (persistent)
2580 		*persistent = hci_persistent_key(hdev, conn, type,
2581 						 old_key_type);
2582 
2583 	return key;
2584 }
2585 
2586 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2587 			    u8 addr_type, u8 type, u8 authenticated,
2588 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2589 {
2590 	struct smp_ltk *key, *old_key;
2591 	u8 role = ltk_role(type);
2592 
2593 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2594 	if (old_key)
2595 		key = old_key;
2596 	else {
2597 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2598 		if (!key)
2599 			return NULL;
2600 		list_add_rcu(&key->list, &hdev->long_term_keys);
2601 	}
2602 
2603 	bacpy(&key->bdaddr, bdaddr);
2604 	key->bdaddr_type = addr_type;
2605 	memcpy(key->val, tk, sizeof(key->val));
2606 	key->authenticated = authenticated;
2607 	key->ediv = ediv;
2608 	key->rand = rand;
2609 	key->enc_size = enc_size;
2610 	key->type = type;
2611 
2612 	return key;
2613 }
2614 
2615 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2616 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2617 {
2618 	struct smp_irk *irk;
2619 
2620 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2621 	if (!irk) {
2622 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2623 		if (!irk)
2624 			return NULL;
2625 
2626 		bacpy(&irk->bdaddr, bdaddr);
2627 		irk->addr_type = addr_type;
2628 
2629 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2630 	}
2631 
2632 	memcpy(irk->val, val, 16);
2633 	bacpy(&irk->rpa, rpa);
2634 
2635 	return irk;
2636 }
2637 
2638 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2639 {
2640 	struct link_key *key;
2641 
2642 	key = hci_find_link_key(hdev, bdaddr);
2643 	if (!key)
2644 		return -ENOENT;
2645 
2646 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2647 
2648 	list_del_rcu(&key->list);
2649 	kfree_rcu(key, rcu);
2650 
2651 	return 0;
2652 }
2653 
2654 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2655 {
2656 	struct smp_ltk *k;
2657 	int removed = 0;
2658 
2659 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2660 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2661 			continue;
2662 
2663 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2664 
2665 		list_del_rcu(&k->list);
2666 		kfree_rcu(k, rcu);
2667 		removed++;
2668 	}
2669 
2670 	return removed ? 0 : -ENOENT;
2671 }
2672 
2673 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2674 {
2675 	struct smp_irk *k;
2676 
2677 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2678 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2679 			continue;
2680 
2681 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2682 
2683 		list_del_rcu(&k->list);
2684 		kfree_rcu(k, rcu);
2685 	}
2686 }
2687 
2688 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2689 {
2690 	struct smp_ltk *k;
2691 	struct smp_irk *irk;
2692 	u8 addr_type;
2693 
2694 	if (type == BDADDR_BREDR) {
2695 		if (hci_find_link_key(hdev, bdaddr))
2696 			return true;
2697 		return false;
2698 	}
2699 
2700 	/* Convert to HCI addr type which struct smp_ltk uses */
2701 	if (type == BDADDR_LE_PUBLIC)
2702 		addr_type = ADDR_LE_DEV_PUBLIC;
2703 	else
2704 		addr_type = ADDR_LE_DEV_RANDOM;
2705 
2706 	irk = hci_get_irk(hdev, bdaddr, addr_type);
2707 	if (irk) {
2708 		bdaddr = &irk->bdaddr;
2709 		addr_type = irk->addr_type;
2710 	}
2711 
2712 	rcu_read_lock();
2713 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2714 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2715 			rcu_read_unlock();
2716 			return true;
2717 		}
2718 	}
2719 	rcu_read_unlock();
2720 
2721 	return false;
2722 }
2723 
2724 /* HCI command timer function */
2725 static void hci_cmd_timeout(struct work_struct *work)
2726 {
2727 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2728 					    cmd_timer.work);
2729 
2730 	if (hdev->sent_cmd) {
2731 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2732 		u16 opcode = __le16_to_cpu(sent->opcode);
2733 
2734 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2735 	} else {
2736 		bt_dev_err(hdev, "command tx timeout");
2737 	}
2738 
2739 	if (hdev->cmd_timeout)
2740 		hdev->cmd_timeout(hdev);
2741 
2742 	atomic_set(&hdev->cmd_cnt, 1);
2743 	queue_work(hdev->workqueue, &hdev->cmd_work);
2744 }
2745 
2746 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2747 					  bdaddr_t *bdaddr, u8 bdaddr_type)
2748 {
2749 	struct oob_data *data;
2750 
2751 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
2752 		if (bacmp(bdaddr, &data->bdaddr) != 0)
2753 			continue;
2754 		if (data->bdaddr_type != bdaddr_type)
2755 			continue;
2756 		return data;
2757 	}
2758 
2759 	return NULL;
2760 }
2761 
2762 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2763 			       u8 bdaddr_type)
2764 {
2765 	struct oob_data *data;
2766 
2767 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2768 	if (!data)
2769 		return -ENOENT;
2770 
2771 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2772 
2773 	list_del(&data->list);
2774 	kfree(data);
2775 
2776 	return 0;
2777 }
2778 
2779 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2780 {
2781 	struct oob_data *data, *n;
2782 
2783 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2784 		list_del(&data->list);
2785 		kfree(data);
2786 	}
2787 }
2788 
2789 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2790 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2791 			    u8 *hash256, u8 *rand256)
2792 {
2793 	struct oob_data *data;
2794 
2795 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2796 	if (!data) {
2797 		data = kmalloc(sizeof(*data), GFP_KERNEL);
2798 		if (!data)
2799 			return -ENOMEM;
2800 
2801 		bacpy(&data->bdaddr, bdaddr);
2802 		data->bdaddr_type = bdaddr_type;
2803 		list_add(&data->list, &hdev->remote_oob_data);
2804 	}
2805 
2806 	if (hash192 && rand192) {
2807 		memcpy(data->hash192, hash192, sizeof(data->hash192));
2808 		memcpy(data->rand192, rand192, sizeof(data->rand192));
2809 		if (hash256 && rand256)
2810 			data->present = 0x03;
2811 	} else {
2812 		memset(data->hash192, 0, sizeof(data->hash192));
2813 		memset(data->rand192, 0, sizeof(data->rand192));
2814 		if (hash256 && rand256)
2815 			data->present = 0x02;
2816 		else
2817 			data->present = 0x00;
2818 	}
2819 
2820 	if (hash256 && rand256) {
2821 		memcpy(data->hash256, hash256, sizeof(data->hash256));
2822 		memcpy(data->rand256, rand256, sizeof(data->rand256));
2823 	} else {
2824 		memset(data->hash256, 0, sizeof(data->hash256));
2825 		memset(data->rand256, 0, sizeof(data->rand256));
2826 		if (hash192 && rand192)
2827 			data->present = 0x01;
2828 	}
2829 
2830 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2831 
2832 	return 0;
2833 }
2834 
2835 /* This function requires the caller holds hdev->lock */
2836 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2837 {
2838 	struct adv_info *adv_instance;
2839 
2840 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2841 		if (adv_instance->instance == instance)
2842 			return adv_instance;
2843 	}
2844 
2845 	return NULL;
2846 }
2847 
2848 /* This function requires the caller holds hdev->lock */
2849 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2850 {
2851 	struct adv_info *cur_instance;
2852 
2853 	cur_instance = hci_find_adv_instance(hdev, instance);
2854 	if (!cur_instance)
2855 		return NULL;
2856 
2857 	if (cur_instance == list_last_entry(&hdev->adv_instances,
2858 					    struct adv_info, list))
2859 		return list_first_entry(&hdev->adv_instances,
2860 						 struct adv_info, list);
2861 	else
2862 		return list_next_entry(cur_instance, list);
2863 }
2864 
2865 /* This function requires the caller holds hdev->lock */
2866 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2867 {
2868 	struct adv_info *adv_instance;
2869 
2870 	adv_instance = hci_find_adv_instance(hdev, instance);
2871 	if (!adv_instance)
2872 		return -ENOENT;
2873 
2874 	BT_DBG("%s removing %dMR", hdev->name, instance);
2875 
2876 	if (hdev->cur_adv_instance == instance) {
2877 		if (hdev->adv_instance_timeout) {
2878 			cancel_delayed_work(&hdev->adv_instance_expire);
2879 			hdev->adv_instance_timeout = 0;
2880 		}
2881 		hdev->cur_adv_instance = 0x00;
2882 	}
2883 
2884 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2885 
2886 	list_del(&adv_instance->list);
2887 	kfree(adv_instance);
2888 
2889 	hdev->adv_instance_cnt--;
2890 
2891 	return 0;
2892 }
2893 
2894 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2895 {
2896 	struct adv_info *adv_instance, *n;
2897 
2898 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2899 		adv_instance->rpa_expired = rpa_expired;
2900 }
2901 
2902 /* This function requires the caller holds hdev->lock */
2903 void hci_adv_instances_clear(struct hci_dev *hdev)
2904 {
2905 	struct adv_info *adv_instance, *n;
2906 
2907 	if (hdev->adv_instance_timeout) {
2908 		cancel_delayed_work(&hdev->adv_instance_expire);
2909 		hdev->adv_instance_timeout = 0;
2910 	}
2911 
2912 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2913 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2914 		list_del(&adv_instance->list);
2915 		kfree(adv_instance);
2916 	}
2917 
2918 	hdev->adv_instance_cnt = 0;
2919 	hdev->cur_adv_instance = 0x00;
2920 }
2921 
2922 static void adv_instance_rpa_expired(struct work_struct *work)
2923 {
2924 	struct adv_info *adv_instance = container_of(work, struct adv_info,
2925 						     rpa_expired_cb.work);
2926 
2927 	BT_DBG("");
2928 
2929 	adv_instance->rpa_expired = true;
2930 }
2931 
2932 /* This function requires the caller holds hdev->lock */
2933 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2934 			 u16 adv_data_len, u8 *adv_data,
2935 			 u16 scan_rsp_len, u8 *scan_rsp_data,
2936 			 u16 timeout, u16 duration)
2937 {
2938 	struct adv_info *adv_instance;
2939 
2940 	adv_instance = hci_find_adv_instance(hdev, instance);
2941 	if (adv_instance) {
2942 		memset(adv_instance->adv_data, 0,
2943 		       sizeof(adv_instance->adv_data));
2944 		memset(adv_instance->scan_rsp_data, 0,
2945 		       sizeof(adv_instance->scan_rsp_data));
2946 	} else {
2947 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2948 		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2949 			return -EOVERFLOW;
2950 
2951 		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2952 		if (!adv_instance)
2953 			return -ENOMEM;
2954 
2955 		adv_instance->pending = true;
2956 		adv_instance->instance = instance;
2957 		list_add(&adv_instance->list, &hdev->adv_instances);
2958 		hdev->adv_instance_cnt++;
2959 	}
2960 
2961 	adv_instance->flags = flags;
2962 	adv_instance->adv_data_len = adv_data_len;
2963 	adv_instance->scan_rsp_len = scan_rsp_len;
2964 
2965 	if (adv_data_len)
2966 		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2967 
2968 	if (scan_rsp_len)
2969 		memcpy(adv_instance->scan_rsp_data,
2970 		       scan_rsp_data, scan_rsp_len);
2971 
2972 	adv_instance->timeout = timeout;
2973 	adv_instance->remaining_time = timeout;
2974 
2975 	if (duration == 0)
2976 		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2977 	else
2978 		adv_instance->duration = duration;
2979 
2980 	adv_instance->tx_power = HCI_TX_POWER_INVALID;
2981 
2982 	INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2983 			  adv_instance_rpa_expired);
2984 
2985 	BT_DBG("%s for %dMR", hdev->name, instance);
2986 
2987 	return 0;
2988 }
2989 
2990 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2991 					 bdaddr_t *bdaddr, u8 type)
2992 {
2993 	struct bdaddr_list *b;
2994 
2995 	list_for_each_entry(b, bdaddr_list, list) {
2996 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2997 			return b;
2998 	}
2999 
3000 	return NULL;
3001 }
3002 
3003 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3004 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3005 				u8 type)
3006 {
3007 	struct bdaddr_list_with_irk *b;
3008 
3009 	list_for_each_entry(b, bdaddr_list, list) {
3010 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3011 			return b;
3012 	}
3013 
3014 	return NULL;
3015 }
3016 
3017 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3018 {
3019 	struct bdaddr_list *b, *n;
3020 
3021 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
3022 		list_del(&b->list);
3023 		kfree(b);
3024 	}
3025 }
3026 
3027 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3028 {
3029 	struct bdaddr_list *entry;
3030 
3031 	if (!bacmp(bdaddr, BDADDR_ANY))
3032 		return -EBADF;
3033 
3034 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
3035 		return -EEXIST;
3036 
3037 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3038 	if (!entry)
3039 		return -ENOMEM;
3040 
3041 	bacpy(&entry->bdaddr, bdaddr);
3042 	entry->bdaddr_type = type;
3043 
3044 	list_add(&entry->list, list);
3045 
3046 	return 0;
3047 }
3048 
3049 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3050 					u8 type, u8 *peer_irk, u8 *local_irk)
3051 {
3052 	struct bdaddr_list_with_irk *entry;
3053 
3054 	if (!bacmp(bdaddr, BDADDR_ANY))
3055 		return -EBADF;
3056 
3057 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
3058 		return -EEXIST;
3059 
3060 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3061 	if (!entry)
3062 		return -ENOMEM;
3063 
3064 	bacpy(&entry->bdaddr, bdaddr);
3065 	entry->bdaddr_type = type;
3066 
3067 	if (peer_irk)
3068 		memcpy(entry->peer_irk, peer_irk, 16);
3069 
3070 	if (local_irk)
3071 		memcpy(entry->local_irk, local_irk, 16);
3072 
3073 	list_add(&entry->list, list);
3074 
3075 	return 0;
3076 }
3077 
3078 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3079 {
3080 	struct bdaddr_list *entry;
3081 
3082 	if (!bacmp(bdaddr, BDADDR_ANY)) {
3083 		hci_bdaddr_list_clear(list);
3084 		return 0;
3085 	}
3086 
3087 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3088 	if (!entry)
3089 		return -ENOENT;
3090 
3091 	list_del(&entry->list);
3092 	kfree(entry);
3093 
3094 	return 0;
3095 }
3096 
3097 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3098 							u8 type)
3099 {
3100 	struct bdaddr_list_with_irk *entry;
3101 
3102 	if (!bacmp(bdaddr, BDADDR_ANY)) {
3103 		hci_bdaddr_list_clear(list);
3104 		return 0;
3105 	}
3106 
3107 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3108 	if (!entry)
3109 		return -ENOENT;
3110 
3111 	list_del(&entry->list);
3112 	kfree(entry);
3113 
3114 	return 0;
3115 }
3116 
3117 /* This function requires the caller holds hdev->lock */
3118 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3119 					       bdaddr_t *addr, u8 addr_type)
3120 {
3121 	struct hci_conn_params *params;
3122 
3123 	list_for_each_entry(params, &hdev->le_conn_params, list) {
3124 		if (bacmp(&params->addr, addr) == 0 &&
3125 		    params->addr_type == addr_type) {
3126 			return params;
3127 		}
3128 	}
3129 
3130 	return NULL;
3131 }
3132 
3133 /* This function requires the caller holds hdev->lock */
3134 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3135 						  bdaddr_t *addr, u8 addr_type)
3136 {
3137 	struct hci_conn_params *param;
3138 
3139 	list_for_each_entry(param, list, action) {
3140 		if (bacmp(&param->addr, addr) == 0 &&
3141 		    param->addr_type == addr_type)
3142 			return param;
3143 	}
3144 
3145 	return NULL;
3146 }
3147 
3148 /* This function requires the caller holds hdev->lock */
3149 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3150 					    bdaddr_t *addr, u8 addr_type)
3151 {
3152 	struct hci_conn_params *params;
3153 
3154 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3155 	if (params)
3156 		return params;
3157 
3158 	params = kzalloc(sizeof(*params), GFP_KERNEL);
3159 	if (!params) {
3160 		bt_dev_err(hdev, "out of memory");
3161 		return NULL;
3162 	}
3163 
3164 	bacpy(&params->addr, addr);
3165 	params->addr_type = addr_type;
3166 
3167 	list_add(&params->list, &hdev->le_conn_params);
3168 	INIT_LIST_HEAD(&params->action);
3169 
3170 	params->conn_min_interval = hdev->le_conn_min_interval;
3171 	params->conn_max_interval = hdev->le_conn_max_interval;
3172 	params->conn_latency = hdev->le_conn_latency;
3173 	params->supervision_timeout = hdev->le_supv_timeout;
3174 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
3175 
3176 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3177 
3178 	return params;
3179 }
3180 
3181 static void hci_conn_params_free(struct hci_conn_params *params)
3182 {
3183 	if (params->conn) {
3184 		hci_conn_drop(params->conn);
3185 		hci_conn_put(params->conn);
3186 	}
3187 
3188 	list_del(&params->action);
3189 	list_del(&params->list);
3190 	kfree(params);
3191 }
3192 
3193 /* This function requires the caller holds hdev->lock */
3194 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3195 {
3196 	struct hci_conn_params *params;
3197 
3198 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3199 	if (!params)
3200 		return;
3201 
3202 	hci_conn_params_free(params);
3203 
3204 	hci_update_background_scan(hdev);
3205 
3206 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3207 }
3208 
3209 /* This function requires the caller holds hdev->lock */
3210 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3211 {
3212 	struct hci_conn_params *params, *tmp;
3213 
3214 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3215 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3216 			continue;
3217 
3218 		/* If trying to estabilish one time connection to disabled
3219 		 * device, leave the params, but mark them as just once.
3220 		 */
3221 		if (params->explicit_connect) {
3222 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3223 			continue;
3224 		}
3225 
3226 		list_del(&params->list);
3227 		kfree(params);
3228 	}
3229 
3230 	BT_DBG("All LE disabled connection parameters were removed");
3231 }
3232 
3233 /* This function requires the caller holds hdev->lock */
3234 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3235 {
3236 	struct hci_conn_params *params, *tmp;
3237 
3238 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3239 		hci_conn_params_free(params);
3240 
3241 	BT_DBG("All LE connection parameters were removed");
3242 }
3243 
3244 /* Copy the Identity Address of the controller.
3245  *
3246  * If the controller has a public BD_ADDR, then by default use that one.
3247  * If this is a LE only controller without a public address, default to
3248  * the static random address.
3249  *
3250  * For debugging purposes it is possible to force controllers with a
3251  * public address to use the static random address instead.
3252  *
3253  * In case BR/EDR has been disabled on a dual-mode controller and
3254  * userspace has configured a static address, then that address
3255  * becomes the identity address instead of the public BR/EDR address.
3256  */
3257 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3258 			       u8 *bdaddr_type)
3259 {
3260 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3261 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3262 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3263 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
3264 		bacpy(bdaddr, &hdev->static_addr);
3265 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
3266 	} else {
3267 		bacpy(bdaddr, &hdev->bdaddr);
3268 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
3269 	}
3270 }
3271 
3272 static int hci_suspend_wait_event(struct hci_dev *hdev)
3273 {
3274 #define WAKE_COND                                                              \
3275 	(find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3276 	 __SUSPEND_NUM_TASKS)
3277 
3278 	int i;
3279 	int ret = wait_event_timeout(hdev->suspend_wait_q,
3280 				     WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3281 
3282 	if (ret == 0) {
3283 		bt_dev_dbg(hdev, "Timed out waiting for suspend");
3284 		for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3285 			if (test_bit(i, hdev->suspend_tasks))
3286 				bt_dev_dbg(hdev, "Bit %d is set", i);
3287 			clear_bit(i, hdev->suspend_tasks);
3288 		}
3289 
3290 		ret = -ETIMEDOUT;
3291 	} else {
3292 		ret = 0;
3293 	}
3294 
3295 	return ret;
3296 }
3297 
3298 static void hci_prepare_suspend(struct work_struct *work)
3299 {
3300 	struct hci_dev *hdev =
3301 		container_of(work, struct hci_dev, suspend_prepare);
3302 
3303 	hci_dev_lock(hdev);
3304 	hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3305 	hci_dev_unlock(hdev);
3306 }
3307 
3308 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3309 				void *data)
3310 {
3311 	struct hci_dev *hdev =
3312 		container_of(nb, struct hci_dev, suspend_notifier);
3313 	int ret = 0;
3314 
3315 	/* If powering down, wait for completion. */
3316 	if (mgmt_powering_down(hdev)) {
3317 		set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3318 		ret = hci_suspend_wait_event(hdev);
3319 		if (ret)
3320 			goto done;
3321 	}
3322 
3323 	/* Suspend notifier should only act on events when powered. */
3324 	if (!hdev_is_powered(hdev))
3325 		goto done;
3326 
3327 	if (action == PM_SUSPEND_PREPARE) {
3328 		/* Suspend consists of two actions:
3329 		 *  - First, disconnect everything and make the controller not
3330 		 *    connectable (disabling scanning)
3331 		 *  - Second, program event filter/whitelist and enable scan
3332 		 */
3333 		hdev->suspend_state_next = BT_SUSPEND_DISCONNECT;
3334 		set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3335 		queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3336 		ret = hci_suspend_wait_event(hdev);
3337 
3338 		/* If the disconnect portion failed, don't attempt to complete
3339 		 * by configuring the whitelist. The suspend notifier will
3340 		 * follow a cancelled suspend with a PM_POST_SUSPEND
3341 		 * notification.
3342 		 */
3343 		if (!ret) {
3344 			hdev->suspend_state_next = BT_SUSPEND_COMPLETE;
3345 			set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3346 			queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3347 			ret = hci_suspend_wait_event(hdev);
3348 		}
3349 	} else if (action == PM_POST_SUSPEND) {
3350 		hdev->suspend_state_next = BT_RUNNING;
3351 		set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3352 		queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3353 		ret = hci_suspend_wait_event(hdev);
3354 	}
3355 
3356 done:
3357 	return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP;
3358 }
3359 /* Alloc HCI device */
3360 struct hci_dev *hci_alloc_dev(void)
3361 {
3362 	struct hci_dev *hdev;
3363 
3364 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3365 	if (!hdev)
3366 		return NULL;
3367 
3368 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3369 	hdev->esco_type = (ESCO_HV1);
3370 	hdev->link_mode = (HCI_LM_ACCEPT);
3371 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
3372 	hdev->io_capability = 0x03;	/* No Input No Output */
3373 	hdev->manufacturer = 0xffff;	/* Default to internal use */
3374 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3375 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3376 	hdev->adv_instance_cnt = 0;
3377 	hdev->cur_adv_instance = 0x00;
3378 	hdev->adv_instance_timeout = 0;
3379 
3380 	hdev->sniff_max_interval = 800;
3381 	hdev->sniff_min_interval = 80;
3382 
3383 	hdev->le_adv_channel_map = 0x07;
3384 	hdev->le_adv_min_interval = 0x0800;
3385 	hdev->le_adv_max_interval = 0x0800;
3386 	hdev->le_scan_interval = 0x0060;
3387 	hdev->le_scan_window = 0x0030;
3388 	hdev->le_conn_min_interval = 0x0018;
3389 	hdev->le_conn_max_interval = 0x0028;
3390 	hdev->le_conn_latency = 0x0000;
3391 	hdev->le_supv_timeout = 0x002a;
3392 	hdev->le_def_tx_len = 0x001b;
3393 	hdev->le_def_tx_time = 0x0148;
3394 	hdev->le_max_tx_len = 0x001b;
3395 	hdev->le_max_tx_time = 0x0148;
3396 	hdev->le_max_rx_len = 0x001b;
3397 	hdev->le_max_rx_time = 0x0148;
3398 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3399 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3400 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3401 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3402 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3403 
3404 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3405 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3406 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3407 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3408 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3409 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3410 
3411 	mutex_init(&hdev->lock);
3412 	mutex_init(&hdev->req_lock);
3413 
3414 	INIT_LIST_HEAD(&hdev->mgmt_pending);
3415 	INIT_LIST_HEAD(&hdev->blacklist);
3416 	INIT_LIST_HEAD(&hdev->whitelist);
3417 	INIT_LIST_HEAD(&hdev->wakeable);
3418 	INIT_LIST_HEAD(&hdev->uuids);
3419 	INIT_LIST_HEAD(&hdev->link_keys);
3420 	INIT_LIST_HEAD(&hdev->long_term_keys);
3421 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3422 	INIT_LIST_HEAD(&hdev->remote_oob_data);
3423 	INIT_LIST_HEAD(&hdev->le_white_list);
3424 	INIT_LIST_HEAD(&hdev->le_resolv_list);
3425 	INIT_LIST_HEAD(&hdev->le_conn_params);
3426 	INIT_LIST_HEAD(&hdev->pend_le_conns);
3427 	INIT_LIST_HEAD(&hdev->pend_le_reports);
3428 	INIT_LIST_HEAD(&hdev->conn_hash.list);
3429 	INIT_LIST_HEAD(&hdev->adv_instances);
3430 	INIT_LIST_HEAD(&hdev->blocked_keys);
3431 
3432 	INIT_WORK(&hdev->rx_work, hci_rx_work);
3433 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3434 	INIT_WORK(&hdev->tx_work, hci_tx_work);
3435 	INIT_WORK(&hdev->power_on, hci_power_on);
3436 	INIT_WORK(&hdev->error_reset, hci_error_reset);
3437 	INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3438 
3439 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3440 
3441 	skb_queue_head_init(&hdev->rx_q);
3442 	skb_queue_head_init(&hdev->cmd_q);
3443 	skb_queue_head_init(&hdev->raw_q);
3444 
3445 	init_waitqueue_head(&hdev->req_wait_q);
3446 	init_waitqueue_head(&hdev->suspend_wait_q);
3447 
3448 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3449 
3450 	hci_request_setup(hdev);
3451 
3452 	hci_init_sysfs(hdev);
3453 	discovery_init(hdev);
3454 
3455 	return hdev;
3456 }
3457 EXPORT_SYMBOL(hci_alloc_dev);
3458 
3459 /* Free HCI device */
3460 void hci_free_dev(struct hci_dev *hdev)
3461 {
3462 	/* will free via device release */
3463 	put_device(&hdev->dev);
3464 }
3465 EXPORT_SYMBOL(hci_free_dev);
3466 
3467 /* Register HCI device */
3468 int hci_register_dev(struct hci_dev *hdev)
3469 {
3470 	int id, error;
3471 
3472 	if (!hdev->open || !hdev->close || !hdev->send)
3473 		return -EINVAL;
3474 
3475 	/* Do not allow HCI_AMP devices to register at index 0,
3476 	 * so the index can be used as the AMP controller ID.
3477 	 */
3478 	switch (hdev->dev_type) {
3479 	case HCI_PRIMARY:
3480 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3481 		break;
3482 	case HCI_AMP:
3483 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3484 		break;
3485 	default:
3486 		return -EINVAL;
3487 	}
3488 
3489 	if (id < 0)
3490 		return id;
3491 
3492 	sprintf(hdev->name, "hci%d", id);
3493 	hdev->id = id;
3494 
3495 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3496 
3497 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3498 	if (!hdev->workqueue) {
3499 		error = -ENOMEM;
3500 		goto err;
3501 	}
3502 
3503 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3504 						      hdev->name);
3505 	if (!hdev->req_workqueue) {
3506 		destroy_workqueue(hdev->workqueue);
3507 		error = -ENOMEM;
3508 		goto err;
3509 	}
3510 
3511 	if (!IS_ERR_OR_NULL(bt_debugfs))
3512 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3513 
3514 	dev_set_name(&hdev->dev, "%s", hdev->name);
3515 
3516 	error = device_add(&hdev->dev);
3517 	if (error < 0)
3518 		goto err_wqueue;
3519 
3520 	hci_leds_init(hdev);
3521 
3522 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3523 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3524 				    hdev);
3525 	if (hdev->rfkill) {
3526 		if (rfkill_register(hdev->rfkill) < 0) {
3527 			rfkill_destroy(hdev->rfkill);
3528 			hdev->rfkill = NULL;
3529 		}
3530 	}
3531 
3532 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3533 		hci_dev_set_flag(hdev, HCI_RFKILLED);
3534 
3535 	hci_dev_set_flag(hdev, HCI_SETUP);
3536 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3537 
3538 	if (hdev->dev_type == HCI_PRIMARY) {
3539 		/* Assume BR/EDR support until proven otherwise (such as
3540 		 * through reading supported features during init.
3541 		 */
3542 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3543 	}
3544 
3545 	write_lock(&hci_dev_list_lock);
3546 	list_add(&hdev->list, &hci_dev_list);
3547 	write_unlock(&hci_dev_list_lock);
3548 
3549 	/* Devices that are marked for raw-only usage are unconfigured
3550 	 * and should not be included in normal operation.
3551 	 */
3552 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3553 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3554 
3555 	hci_sock_dev_event(hdev, HCI_DEV_REG);
3556 	hci_dev_hold(hdev);
3557 
3558 	hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3559 	error = register_pm_notifier(&hdev->suspend_notifier);
3560 	if (error)
3561 		goto err_wqueue;
3562 
3563 	queue_work(hdev->req_workqueue, &hdev->power_on);
3564 
3565 	return id;
3566 
3567 err_wqueue:
3568 	destroy_workqueue(hdev->workqueue);
3569 	destroy_workqueue(hdev->req_workqueue);
3570 err:
3571 	ida_simple_remove(&hci_index_ida, hdev->id);
3572 
3573 	return error;
3574 }
3575 EXPORT_SYMBOL(hci_register_dev);
3576 
3577 /* Unregister HCI device */
3578 void hci_unregister_dev(struct hci_dev *hdev)
3579 {
3580 	int id;
3581 
3582 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3583 
3584 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3585 
3586 	id = hdev->id;
3587 
3588 	write_lock(&hci_dev_list_lock);
3589 	list_del(&hdev->list);
3590 	write_unlock(&hci_dev_list_lock);
3591 
3592 	cancel_work_sync(&hdev->power_on);
3593 
3594 	hci_dev_do_close(hdev);
3595 
3596 	unregister_pm_notifier(&hdev->suspend_notifier);
3597 
3598 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3599 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
3600 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3601 		hci_dev_lock(hdev);
3602 		mgmt_index_removed(hdev);
3603 		hci_dev_unlock(hdev);
3604 	}
3605 
3606 	/* mgmt_index_removed should take care of emptying the
3607 	 * pending list */
3608 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3609 
3610 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3611 
3612 	if (hdev->rfkill) {
3613 		rfkill_unregister(hdev->rfkill);
3614 		rfkill_destroy(hdev->rfkill);
3615 	}
3616 
3617 	device_del(&hdev->dev);
3618 
3619 	debugfs_remove_recursive(hdev->debugfs);
3620 	kfree_const(hdev->hw_info);
3621 	kfree_const(hdev->fw_info);
3622 
3623 	destroy_workqueue(hdev->workqueue);
3624 	destroy_workqueue(hdev->req_workqueue);
3625 
3626 	hci_dev_lock(hdev);
3627 	hci_bdaddr_list_clear(&hdev->blacklist);
3628 	hci_bdaddr_list_clear(&hdev->whitelist);
3629 	hci_uuids_clear(hdev);
3630 	hci_link_keys_clear(hdev);
3631 	hci_smp_ltks_clear(hdev);
3632 	hci_smp_irks_clear(hdev);
3633 	hci_remote_oob_data_clear(hdev);
3634 	hci_adv_instances_clear(hdev);
3635 	hci_bdaddr_list_clear(&hdev->le_white_list);
3636 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
3637 	hci_conn_params_clear_all(hdev);
3638 	hci_discovery_filter_clear(hdev);
3639 	hci_blocked_keys_clear(hdev);
3640 	hci_dev_unlock(hdev);
3641 
3642 	hci_dev_put(hdev);
3643 
3644 	ida_simple_remove(&hci_index_ida, id);
3645 }
3646 EXPORT_SYMBOL(hci_unregister_dev);
3647 
3648 /* Suspend HCI device */
3649 int hci_suspend_dev(struct hci_dev *hdev)
3650 {
3651 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3652 	return 0;
3653 }
3654 EXPORT_SYMBOL(hci_suspend_dev);
3655 
3656 /* Resume HCI device */
3657 int hci_resume_dev(struct hci_dev *hdev)
3658 {
3659 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3660 	return 0;
3661 }
3662 EXPORT_SYMBOL(hci_resume_dev);
3663 
3664 /* Reset HCI device */
3665 int hci_reset_dev(struct hci_dev *hdev)
3666 {
3667 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3668 	struct sk_buff *skb;
3669 
3670 	skb = bt_skb_alloc(3, GFP_ATOMIC);
3671 	if (!skb)
3672 		return -ENOMEM;
3673 
3674 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3675 	skb_put_data(skb, hw_err, 3);
3676 
3677 	/* Send Hardware Error to upper stack */
3678 	return hci_recv_frame(hdev, skb);
3679 }
3680 EXPORT_SYMBOL(hci_reset_dev);
3681 
3682 /* Receive frame from HCI drivers */
3683 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3684 {
3685 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3686 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3687 		kfree_skb(skb);
3688 		return -ENXIO;
3689 	}
3690 
3691 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3692 	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3693 	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3694 	    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3695 		kfree_skb(skb);
3696 		return -EINVAL;
3697 	}
3698 
3699 	/* Incoming skb */
3700 	bt_cb(skb)->incoming = 1;
3701 
3702 	/* Time stamp */
3703 	__net_timestamp(skb);
3704 
3705 	skb_queue_tail(&hdev->rx_q, skb);
3706 	queue_work(hdev->workqueue, &hdev->rx_work);
3707 
3708 	return 0;
3709 }
3710 EXPORT_SYMBOL(hci_recv_frame);
3711 
3712 /* Receive diagnostic message from HCI drivers */
3713 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3714 {
3715 	/* Mark as diagnostic packet */
3716 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3717 
3718 	/* Time stamp */
3719 	__net_timestamp(skb);
3720 
3721 	skb_queue_tail(&hdev->rx_q, skb);
3722 	queue_work(hdev->workqueue, &hdev->rx_work);
3723 
3724 	return 0;
3725 }
3726 EXPORT_SYMBOL(hci_recv_diag);
3727 
3728 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3729 {
3730 	va_list vargs;
3731 
3732 	va_start(vargs, fmt);
3733 	kfree_const(hdev->hw_info);
3734 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3735 	va_end(vargs);
3736 }
3737 EXPORT_SYMBOL(hci_set_hw_info);
3738 
3739 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3740 {
3741 	va_list vargs;
3742 
3743 	va_start(vargs, fmt);
3744 	kfree_const(hdev->fw_info);
3745 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3746 	va_end(vargs);
3747 }
3748 EXPORT_SYMBOL(hci_set_fw_info);
3749 
3750 /* ---- Interface to upper protocols ---- */
3751 
3752 int hci_register_cb(struct hci_cb *cb)
3753 {
3754 	BT_DBG("%p name %s", cb, cb->name);
3755 
3756 	mutex_lock(&hci_cb_list_lock);
3757 	list_add_tail(&cb->list, &hci_cb_list);
3758 	mutex_unlock(&hci_cb_list_lock);
3759 
3760 	return 0;
3761 }
3762 EXPORT_SYMBOL(hci_register_cb);
3763 
3764 int hci_unregister_cb(struct hci_cb *cb)
3765 {
3766 	BT_DBG("%p name %s", cb, cb->name);
3767 
3768 	mutex_lock(&hci_cb_list_lock);
3769 	list_del(&cb->list);
3770 	mutex_unlock(&hci_cb_list_lock);
3771 
3772 	return 0;
3773 }
3774 EXPORT_SYMBOL(hci_unregister_cb);
3775 
3776 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3777 {
3778 	int err;
3779 
3780 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3781 	       skb->len);
3782 
3783 	/* Time stamp */
3784 	__net_timestamp(skb);
3785 
3786 	/* Send copy to monitor */
3787 	hci_send_to_monitor(hdev, skb);
3788 
3789 	if (atomic_read(&hdev->promisc)) {
3790 		/* Send copy to the sockets */
3791 		hci_send_to_sock(hdev, skb);
3792 	}
3793 
3794 	/* Get rid of skb owner, prior to sending to the driver. */
3795 	skb_orphan(skb);
3796 
3797 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3798 		kfree_skb(skb);
3799 		return;
3800 	}
3801 
3802 	err = hdev->send(hdev, skb);
3803 	if (err < 0) {
3804 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3805 		kfree_skb(skb);
3806 	}
3807 }
3808 
3809 /* Send HCI command */
3810 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3811 		 const void *param)
3812 {
3813 	struct sk_buff *skb;
3814 
3815 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3816 
3817 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3818 	if (!skb) {
3819 		bt_dev_err(hdev, "no memory for command");
3820 		return -ENOMEM;
3821 	}
3822 
3823 	/* Stand-alone HCI commands must be flagged as
3824 	 * single-command requests.
3825 	 */
3826 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3827 
3828 	skb_queue_tail(&hdev->cmd_q, skb);
3829 	queue_work(hdev->workqueue, &hdev->cmd_work);
3830 
3831 	return 0;
3832 }
3833 
3834 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3835 		   const void *param)
3836 {
3837 	struct sk_buff *skb;
3838 
3839 	if (hci_opcode_ogf(opcode) != 0x3f) {
3840 		/* A controller receiving a command shall respond with either
3841 		 * a Command Status Event or a Command Complete Event.
3842 		 * Therefore, all standard HCI commands must be sent via the
3843 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3844 		 * Some vendors do not comply with this rule for vendor-specific
3845 		 * commands and do not return any event. We want to support
3846 		 * unresponded commands for such cases only.
3847 		 */
3848 		bt_dev_err(hdev, "unresponded command not supported");
3849 		return -EINVAL;
3850 	}
3851 
3852 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3853 	if (!skb) {
3854 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3855 			   opcode);
3856 		return -ENOMEM;
3857 	}
3858 
3859 	hci_send_frame(hdev, skb);
3860 
3861 	return 0;
3862 }
3863 EXPORT_SYMBOL(__hci_cmd_send);
3864 
3865 /* Get data from the previously sent command */
3866 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3867 {
3868 	struct hci_command_hdr *hdr;
3869 
3870 	if (!hdev->sent_cmd)
3871 		return NULL;
3872 
3873 	hdr = (void *) hdev->sent_cmd->data;
3874 
3875 	if (hdr->opcode != cpu_to_le16(opcode))
3876 		return NULL;
3877 
3878 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3879 
3880 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3881 }
3882 
3883 /* Send HCI command and wait for command commplete event */
3884 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3885 			     const void *param, u32 timeout)
3886 {
3887 	struct sk_buff *skb;
3888 
3889 	if (!test_bit(HCI_UP, &hdev->flags))
3890 		return ERR_PTR(-ENETDOWN);
3891 
3892 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3893 
3894 	hci_req_sync_lock(hdev);
3895 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3896 	hci_req_sync_unlock(hdev);
3897 
3898 	return skb;
3899 }
3900 EXPORT_SYMBOL(hci_cmd_sync);
3901 
3902 /* Send ACL data */
3903 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3904 {
3905 	struct hci_acl_hdr *hdr;
3906 	int len = skb->len;
3907 
3908 	skb_push(skb, HCI_ACL_HDR_SIZE);
3909 	skb_reset_transport_header(skb);
3910 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3911 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3912 	hdr->dlen   = cpu_to_le16(len);
3913 }
3914 
3915 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3916 			  struct sk_buff *skb, __u16 flags)
3917 {
3918 	struct hci_conn *conn = chan->conn;
3919 	struct hci_dev *hdev = conn->hdev;
3920 	struct sk_buff *list;
3921 
3922 	skb->len = skb_headlen(skb);
3923 	skb->data_len = 0;
3924 
3925 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3926 
3927 	switch (hdev->dev_type) {
3928 	case HCI_PRIMARY:
3929 		hci_add_acl_hdr(skb, conn->handle, flags);
3930 		break;
3931 	case HCI_AMP:
3932 		hci_add_acl_hdr(skb, chan->handle, flags);
3933 		break;
3934 	default:
3935 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3936 		return;
3937 	}
3938 
3939 	list = skb_shinfo(skb)->frag_list;
3940 	if (!list) {
3941 		/* Non fragmented */
3942 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3943 
3944 		skb_queue_tail(queue, skb);
3945 	} else {
3946 		/* Fragmented */
3947 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3948 
3949 		skb_shinfo(skb)->frag_list = NULL;
3950 
3951 		/* Queue all fragments atomically. We need to use spin_lock_bh
3952 		 * here because of 6LoWPAN links, as there this function is
3953 		 * called from softirq and using normal spin lock could cause
3954 		 * deadlocks.
3955 		 */
3956 		spin_lock_bh(&queue->lock);
3957 
3958 		__skb_queue_tail(queue, skb);
3959 
3960 		flags &= ~ACL_START;
3961 		flags |= ACL_CONT;
3962 		do {
3963 			skb = list; list = list->next;
3964 
3965 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3966 			hci_add_acl_hdr(skb, conn->handle, flags);
3967 
3968 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3969 
3970 			__skb_queue_tail(queue, skb);
3971 		} while (list);
3972 
3973 		spin_unlock_bh(&queue->lock);
3974 	}
3975 }
3976 
3977 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3978 {
3979 	struct hci_dev *hdev = chan->conn->hdev;
3980 
3981 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3982 
3983 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3984 
3985 	queue_work(hdev->workqueue, &hdev->tx_work);
3986 }
3987 
3988 /* Send SCO data */
3989 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3990 {
3991 	struct hci_dev *hdev = conn->hdev;
3992 	struct hci_sco_hdr hdr;
3993 
3994 	BT_DBG("%s len %d", hdev->name, skb->len);
3995 
3996 	hdr.handle = cpu_to_le16(conn->handle);
3997 	hdr.dlen   = skb->len;
3998 
3999 	skb_push(skb, HCI_SCO_HDR_SIZE);
4000 	skb_reset_transport_header(skb);
4001 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4002 
4003 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4004 
4005 	skb_queue_tail(&conn->data_q, skb);
4006 	queue_work(hdev->workqueue, &hdev->tx_work);
4007 }
4008 
4009 /* ---- HCI TX task (outgoing data) ---- */
4010 
4011 /* HCI Connection scheduler */
4012 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4013 				     int *quote)
4014 {
4015 	struct hci_conn_hash *h = &hdev->conn_hash;
4016 	struct hci_conn *conn = NULL, *c;
4017 	unsigned int num = 0, min = ~0;
4018 
4019 	/* We don't have to lock device here. Connections are always
4020 	 * added and removed with TX task disabled. */
4021 
4022 	rcu_read_lock();
4023 
4024 	list_for_each_entry_rcu(c, &h->list, list) {
4025 		if (c->type != type || skb_queue_empty(&c->data_q))
4026 			continue;
4027 
4028 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4029 			continue;
4030 
4031 		num++;
4032 
4033 		if (c->sent < min) {
4034 			min  = c->sent;
4035 			conn = c;
4036 		}
4037 
4038 		if (hci_conn_num(hdev, type) == num)
4039 			break;
4040 	}
4041 
4042 	rcu_read_unlock();
4043 
4044 	if (conn) {
4045 		int cnt, q;
4046 
4047 		switch (conn->type) {
4048 		case ACL_LINK:
4049 			cnt = hdev->acl_cnt;
4050 			break;
4051 		case SCO_LINK:
4052 		case ESCO_LINK:
4053 			cnt = hdev->sco_cnt;
4054 			break;
4055 		case LE_LINK:
4056 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4057 			break;
4058 		default:
4059 			cnt = 0;
4060 			bt_dev_err(hdev, "unknown link type %d", conn->type);
4061 		}
4062 
4063 		q = cnt / num;
4064 		*quote = q ? q : 1;
4065 	} else
4066 		*quote = 0;
4067 
4068 	BT_DBG("conn %p quote %d", conn, *quote);
4069 	return conn;
4070 }
4071 
4072 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4073 {
4074 	struct hci_conn_hash *h = &hdev->conn_hash;
4075 	struct hci_conn *c;
4076 
4077 	bt_dev_err(hdev, "link tx timeout");
4078 
4079 	rcu_read_lock();
4080 
4081 	/* Kill stalled connections */
4082 	list_for_each_entry_rcu(c, &h->list, list) {
4083 		if (c->type == type && c->sent) {
4084 			bt_dev_err(hdev, "killing stalled connection %pMR",
4085 				   &c->dst);
4086 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4087 		}
4088 	}
4089 
4090 	rcu_read_unlock();
4091 }
4092 
4093 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4094 				      int *quote)
4095 {
4096 	struct hci_conn_hash *h = &hdev->conn_hash;
4097 	struct hci_chan *chan = NULL;
4098 	unsigned int num = 0, min = ~0, cur_prio = 0;
4099 	struct hci_conn *conn;
4100 	int cnt, q, conn_num = 0;
4101 
4102 	BT_DBG("%s", hdev->name);
4103 
4104 	rcu_read_lock();
4105 
4106 	list_for_each_entry_rcu(conn, &h->list, list) {
4107 		struct hci_chan *tmp;
4108 
4109 		if (conn->type != type)
4110 			continue;
4111 
4112 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4113 			continue;
4114 
4115 		conn_num++;
4116 
4117 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4118 			struct sk_buff *skb;
4119 
4120 			if (skb_queue_empty(&tmp->data_q))
4121 				continue;
4122 
4123 			skb = skb_peek(&tmp->data_q);
4124 			if (skb->priority < cur_prio)
4125 				continue;
4126 
4127 			if (skb->priority > cur_prio) {
4128 				num = 0;
4129 				min = ~0;
4130 				cur_prio = skb->priority;
4131 			}
4132 
4133 			num++;
4134 
4135 			if (conn->sent < min) {
4136 				min  = conn->sent;
4137 				chan = tmp;
4138 			}
4139 		}
4140 
4141 		if (hci_conn_num(hdev, type) == conn_num)
4142 			break;
4143 	}
4144 
4145 	rcu_read_unlock();
4146 
4147 	if (!chan)
4148 		return NULL;
4149 
4150 	switch (chan->conn->type) {
4151 	case ACL_LINK:
4152 		cnt = hdev->acl_cnt;
4153 		break;
4154 	case AMP_LINK:
4155 		cnt = hdev->block_cnt;
4156 		break;
4157 	case SCO_LINK:
4158 	case ESCO_LINK:
4159 		cnt = hdev->sco_cnt;
4160 		break;
4161 	case LE_LINK:
4162 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4163 		break;
4164 	default:
4165 		cnt = 0;
4166 		bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4167 	}
4168 
4169 	q = cnt / num;
4170 	*quote = q ? q : 1;
4171 	BT_DBG("chan %p quote %d", chan, *quote);
4172 	return chan;
4173 }
4174 
4175 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4176 {
4177 	struct hci_conn_hash *h = &hdev->conn_hash;
4178 	struct hci_conn *conn;
4179 	int num = 0;
4180 
4181 	BT_DBG("%s", hdev->name);
4182 
4183 	rcu_read_lock();
4184 
4185 	list_for_each_entry_rcu(conn, &h->list, list) {
4186 		struct hci_chan *chan;
4187 
4188 		if (conn->type != type)
4189 			continue;
4190 
4191 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4192 			continue;
4193 
4194 		num++;
4195 
4196 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4197 			struct sk_buff *skb;
4198 
4199 			if (chan->sent) {
4200 				chan->sent = 0;
4201 				continue;
4202 			}
4203 
4204 			if (skb_queue_empty(&chan->data_q))
4205 				continue;
4206 
4207 			skb = skb_peek(&chan->data_q);
4208 			if (skb->priority >= HCI_PRIO_MAX - 1)
4209 				continue;
4210 
4211 			skb->priority = HCI_PRIO_MAX - 1;
4212 
4213 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4214 			       skb->priority);
4215 		}
4216 
4217 		if (hci_conn_num(hdev, type) == num)
4218 			break;
4219 	}
4220 
4221 	rcu_read_unlock();
4222 
4223 }
4224 
4225 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4226 {
4227 	/* Calculate count of blocks used by this packet */
4228 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4229 }
4230 
4231 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4232 {
4233 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4234 		/* ACL tx timeout must be longer than maximum
4235 		 * link supervision timeout (40.9 seconds) */
4236 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4237 				       HCI_ACL_TX_TIMEOUT))
4238 			hci_link_tx_to(hdev, ACL_LINK);
4239 	}
4240 }
4241 
4242 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4243 {
4244 	unsigned int cnt = hdev->acl_cnt;
4245 	struct hci_chan *chan;
4246 	struct sk_buff *skb;
4247 	int quote;
4248 
4249 	__check_timeout(hdev, cnt);
4250 
4251 	while (hdev->acl_cnt &&
4252 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4253 		u32 priority = (skb_peek(&chan->data_q))->priority;
4254 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4255 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4256 			       skb->len, skb->priority);
4257 
4258 			/* Stop if priority has changed */
4259 			if (skb->priority < priority)
4260 				break;
4261 
4262 			skb = skb_dequeue(&chan->data_q);
4263 
4264 			hci_conn_enter_active_mode(chan->conn,
4265 						   bt_cb(skb)->force_active);
4266 
4267 			hci_send_frame(hdev, skb);
4268 			hdev->acl_last_tx = jiffies;
4269 
4270 			hdev->acl_cnt--;
4271 			chan->sent++;
4272 			chan->conn->sent++;
4273 		}
4274 	}
4275 
4276 	if (cnt != hdev->acl_cnt)
4277 		hci_prio_recalculate(hdev, ACL_LINK);
4278 }
4279 
4280 static void hci_sched_acl_blk(struct hci_dev *hdev)
4281 {
4282 	unsigned int cnt = hdev->block_cnt;
4283 	struct hci_chan *chan;
4284 	struct sk_buff *skb;
4285 	int quote;
4286 	u8 type;
4287 
4288 	__check_timeout(hdev, cnt);
4289 
4290 	BT_DBG("%s", hdev->name);
4291 
4292 	if (hdev->dev_type == HCI_AMP)
4293 		type = AMP_LINK;
4294 	else
4295 		type = ACL_LINK;
4296 
4297 	while (hdev->block_cnt > 0 &&
4298 	       (chan = hci_chan_sent(hdev, type, &quote))) {
4299 		u32 priority = (skb_peek(&chan->data_q))->priority;
4300 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4301 			int blocks;
4302 
4303 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4304 			       skb->len, skb->priority);
4305 
4306 			/* Stop if priority has changed */
4307 			if (skb->priority < priority)
4308 				break;
4309 
4310 			skb = skb_dequeue(&chan->data_q);
4311 
4312 			blocks = __get_blocks(hdev, skb);
4313 			if (blocks > hdev->block_cnt)
4314 				return;
4315 
4316 			hci_conn_enter_active_mode(chan->conn,
4317 						   bt_cb(skb)->force_active);
4318 
4319 			hci_send_frame(hdev, skb);
4320 			hdev->acl_last_tx = jiffies;
4321 
4322 			hdev->block_cnt -= blocks;
4323 			quote -= blocks;
4324 
4325 			chan->sent += blocks;
4326 			chan->conn->sent += blocks;
4327 		}
4328 	}
4329 
4330 	if (cnt != hdev->block_cnt)
4331 		hci_prio_recalculate(hdev, type);
4332 }
4333 
4334 static void hci_sched_acl(struct hci_dev *hdev)
4335 {
4336 	BT_DBG("%s", hdev->name);
4337 
4338 	/* No ACL link over BR/EDR controller */
4339 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4340 		return;
4341 
4342 	/* No AMP link over AMP controller */
4343 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4344 		return;
4345 
4346 	switch (hdev->flow_ctl_mode) {
4347 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
4348 		hci_sched_acl_pkt(hdev);
4349 		break;
4350 
4351 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4352 		hci_sched_acl_blk(hdev);
4353 		break;
4354 	}
4355 }
4356 
4357 /* Schedule SCO */
4358 static void hci_sched_sco(struct hci_dev *hdev)
4359 {
4360 	struct hci_conn *conn;
4361 	struct sk_buff *skb;
4362 	int quote;
4363 
4364 	BT_DBG("%s", hdev->name);
4365 
4366 	if (!hci_conn_num(hdev, SCO_LINK))
4367 		return;
4368 
4369 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4370 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4371 			BT_DBG("skb %p len %d", skb, skb->len);
4372 			hci_send_frame(hdev, skb);
4373 
4374 			conn->sent++;
4375 			if (conn->sent == ~0)
4376 				conn->sent = 0;
4377 		}
4378 	}
4379 }
4380 
4381 static void hci_sched_esco(struct hci_dev *hdev)
4382 {
4383 	struct hci_conn *conn;
4384 	struct sk_buff *skb;
4385 	int quote;
4386 
4387 	BT_DBG("%s", hdev->name);
4388 
4389 	if (!hci_conn_num(hdev, ESCO_LINK))
4390 		return;
4391 
4392 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4393 						     &quote))) {
4394 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4395 			BT_DBG("skb %p len %d", skb, skb->len);
4396 			hci_send_frame(hdev, skb);
4397 
4398 			conn->sent++;
4399 			if (conn->sent == ~0)
4400 				conn->sent = 0;
4401 		}
4402 	}
4403 }
4404 
4405 static void hci_sched_le(struct hci_dev *hdev)
4406 {
4407 	struct hci_chan *chan;
4408 	struct sk_buff *skb;
4409 	int quote, cnt, tmp;
4410 
4411 	BT_DBG("%s", hdev->name);
4412 
4413 	if (!hci_conn_num(hdev, LE_LINK))
4414 		return;
4415 
4416 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4417 
4418 	__check_timeout(hdev, cnt);
4419 
4420 	tmp = cnt;
4421 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4422 		u32 priority = (skb_peek(&chan->data_q))->priority;
4423 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4424 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4425 			       skb->len, skb->priority);
4426 
4427 			/* Stop if priority has changed */
4428 			if (skb->priority < priority)
4429 				break;
4430 
4431 			skb = skb_dequeue(&chan->data_q);
4432 
4433 			hci_send_frame(hdev, skb);
4434 			hdev->le_last_tx = jiffies;
4435 
4436 			cnt--;
4437 			chan->sent++;
4438 			chan->conn->sent++;
4439 		}
4440 	}
4441 
4442 	if (hdev->le_pkts)
4443 		hdev->le_cnt = cnt;
4444 	else
4445 		hdev->acl_cnt = cnt;
4446 
4447 	if (cnt != tmp)
4448 		hci_prio_recalculate(hdev, LE_LINK);
4449 }
4450 
4451 static void hci_tx_work(struct work_struct *work)
4452 {
4453 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4454 	struct sk_buff *skb;
4455 
4456 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4457 	       hdev->sco_cnt, hdev->le_cnt);
4458 
4459 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4460 		/* Schedule queues and send stuff to HCI driver */
4461 		hci_sched_acl(hdev);
4462 		hci_sched_sco(hdev);
4463 		hci_sched_esco(hdev);
4464 		hci_sched_le(hdev);
4465 	}
4466 
4467 	/* Send next queued raw (unknown type) packet */
4468 	while ((skb = skb_dequeue(&hdev->raw_q)))
4469 		hci_send_frame(hdev, skb);
4470 }
4471 
4472 /* ----- HCI RX task (incoming data processing) ----- */
4473 
4474 /* ACL data packet */
4475 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4476 {
4477 	struct hci_acl_hdr *hdr = (void *) skb->data;
4478 	struct hci_conn *conn;
4479 	__u16 handle, flags;
4480 
4481 	skb_pull(skb, HCI_ACL_HDR_SIZE);
4482 
4483 	handle = __le16_to_cpu(hdr->handle);
4484 	flags  = hci_flags(handle);
4485 	handle = hci_handle(handle);
4486 
4487 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4488 	       handle, flags);
4489 
4490 	hdev->stat.acl_rx++;
4491 
4492 	hci_dev_lock(hdev);
4493 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4494 	hci_dev_unlock(hdev);
4495 
4496 	if (conn) {
4497 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4498 
4499 		/* Send to upper protocol */
4500 		l2cap_recv_acldata(conn, skb, flags);
4501 		return;
4502 	} else {
4503 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4504 			   handle);
4505 	}
4506 
4507 	kfree_skb(skb);
4508 }
4509 
4510 /* SCO data packet */
4511 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4512 {
4513 	struct hci_sco_hdr *hdr = (void *) skb->data;
4514 	struct hci_conn *conn;
4515 	__u16 handle, flags;
4516 
4517 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4518 
4519 	handle = __le16_to_cpu(hdr->handle);
4520 	flags  = hci_flags(handle);
4521 	handle = hci_handle(handle);
4522 
4523 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4524 	       handle, flags);
4525 
4526 	hdev->stat.sco_rx++;
4527 
4528 	hci_dev_lock(hdev);
4529 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4530 	hci_dev_unlock(hdev);
4531 
4532 	if (conn) {
4533 		/* Send to upper protocol */
4534 		sco_recv_scodata(conn, skb);
4535 		return;
4536 	} else {
4537 		bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4538 			   handle);
4539 	}
4540 
4541 	kfree_skb(skb);
4542 }
4543 
4544 static bool hci_req_is_complete(struct hci_dev *hdev)
4545 {
4546 	struct sk_buff *skb;
4547 
4548 	skb = skb_peek(&hdev->cmd_q);
4549 	if (!skb)
4550 		return true;
4551 
4552 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4553 }
4554 
4555 static void hci_resend_last(struct hci_dev *hdev)
4556 {
4557 	struct hci_command_hdr *sent;
4558 	struct sk_buff *skb;
4559 	u16 opcode;
4560 
4561 	if (!hdev->sent_cmd)
4562 		return;
4563 
4564 	sent = (void *) hdev->sent_cmd->data;
4565 	opcode = __le16_to_cpu(sent->opcode);
4566 	if (opcode == HCI_OP_RESET)
4567 		return;
4568 
4569 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4570 	if (!skb)
4571 		return;
4572 
4573 	skb_queue_head(&hdev->cmd_q, skb);
4574 	queue_work(hdev->workqueue, &hdev->cmd_work);
4575 }
4576 
4577 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4578 			  hci_req_complete_t *req_complete,
4579 			  hci_req_complete_skb_t *req_complete_skb)
4580 {
4581 	struct sk_buff *skb;
4582 	unsigned long flags;
4583 
4584 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4585 
4586 	/* If the completed command doesn't match the last one that was
4587 	 * sent we need to do special handling of it.
4588 	 */
4589 	if (!hci_sent_cmd_data(hdev, opcode)) {
4590 		/* Some CSR based controllers generate a spontaneous
4591 		 * reset complete event during init and any pending
4592 		 * command will never be completed. In such a case we
4593 		 * need to resend whatever was the last sent
4594 		 * command.
4595 		 */
4596 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4597 			hci_resend_last(hdev);
4598 
4599 		return;
4600 	}
4601 
4602 	/* If we reach this point this event matches the last command sent */
4603 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4604 
4605 	/* If the command succeeded and there's still more commands in
4606 	 * this request the request is not yet complete.
4607 	 */
4608 	if (!status && !hci_req_is_complete(hdev))
4609 		return;
4610 
4611 	/* If this was the last command in a request the complete
4612 	 * callback would be found in hdev->sent_cmd instead of the
4613 	 * command queue (hdev->cmd_q).
4614 	 */
4615 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4616 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4617 		return;
4618 	}
4619 
4620 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4621 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4622 		return;
4623 	}
4624 
4625 	/* Remove all pending commands belonging to this request */
4626 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4627 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4628 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4629 			__skb_queue_head(&hdev->cmd_q, skb);
4630 			break;
4631 		}
4632 
4633 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4634 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4635 		else
4636 			*req_complete = bt_cb(skb)->hci.req_complete;
4637 		kfree_skb(skb);
4638 	}
4639 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4640 }
4641 
4642 static void hci_rx_work(struct work_struct *work)
4643 {
4644 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4645 	struct sk_buff *skb;
4646 
4647 	BT_DBG("%s", hdev->name);
4648 
4649 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4650 		/* Send copy to monitor */
4651 		hci_send_to_monitor(hdev, skb);
4652 
4653 		if (atomic_read(&hdev->promisc)) {
4654 			/* Send copy to the sockets */
4655 			hci_send_to_sock(hdev, skb);
4656 		}
4657 
4658 		/* If the device has been opened in HCI_USER_CHANNEL,
4659 		 * the userspace has exclusive access to device.
4660 		 * When device is HCI_INIT, we still need to process
4661 		 * the data packets to the driver in order
4662 		 * to complete its setup().
4663 		 */
4664 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4665 		    !test_bit(HCI_INIT, &hdev->flags)) {
4666 			kfree_skb(skb);
4667 			continue;
4668 		}
4669 
4670 		if (test_bit(HCI_INIT, &hdev->flags)) {
4671 			/* Don't process data packets in this states. */
4672 			switch (hci_skb_pkt_type(skb)) {
4673 			case HCI_ACLDATA_PKT:
4674 			case HCI_SCODATA_PKT:
4675 			case HCI_ISODATA_PKT:
4676 				kfree_skb(skb);
4677 				continue;
4678 			}
4679 		}
4680 
4681 		/* Process frame */
4682 		switch (hci_skb_pkt_type(skb)) {
4683 		case HCI_EVENT_PKT:
4684 			BT_DBG("%s Event packet", hdev->name);
4685 			hci_event_packet(hdev, skb);
4686 			break;
4687 
4688 		case HCI_ACLDATA_PKT:
4689 			BT_DBG("%s ACL data packet", hdev->name);
4690 			hci_acldata_packet(hdev, skb);
4691 			break;
4692 
4693 		case HCI_SCODATA_PKT:
4694 			BT_DBG("%s SCO data packet", hdev->name);
4695 			hci_scodata_packet(hdev, skb);
4696 			break;
4697 
4698 		default:
4699 			kfree_skb(skb);
4700 			break;
4701 		}
4702 	}
4703 }
4704 
4705 static void hci_cmd_work(struct work_struct *work)
4706 {
4707 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4708 	struct sk_buff *skb;
4709 
4710 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4711 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4712 
4713 	/* Send queued commands */
4714 	if (atomic_read(&hdev->cmd_cnt)) {
4715 		skb = skb_dequeue(&hdev->cmd_q);
4716 		if (!skb)
4717 			return;
4718 
4719 		kfree_skb(hdev->sent_cmd);
4720 
4721 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4722 		if (hdev->sent_cmd) {
4723 			if (hci_req_status_pend(hdev))
4724 				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4725 			atomic_dec(&hdev->cmd_cnt);
4726 			hci_send_frame(hdev, skb);
4727 			if (test_bit(HCI_RESET, &hdev->flags))
4728 				cancel_delayed_work(&hdev->cmd_timer);
4729 			else
4730 				schedule_delayed_work(&hdev->cmd_timer,
4731 						      HCI_CMD_TIMEOUT);
4732 		} else {
4733 			skb_queue_head(&hdev->cmd_q, skb);
4734 			queue_work(hdev->workqueue, &hdev->cmd_work);
4735 		}
4736 	}
4737 }
4738