xref: /linux/net/bluetooth/hci_core.c (revision b60a5b8dcf49af9f2c60ae82e0383ee8e62a9a52)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <linux/property.h>
34 #include <asm/unaligned.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/mgmt.h>
40 
41 #include "hci_request.h"
42 #include "hci_debugfs.h"
43 #include "smp.h"
44 #include "leds.h"
45 
46 static void hci_rx_work(struct work_struct *work);
47 static void hci_cmd_work(struct work_struct *work);
48 static void hci_tx_work(struct work_struct *work);
49 
50 /* HCI device list */
51 LIST_HEAD(hci_dev_list);
52 DEFINE_RWLOCK(hci_dev_list_lock);
53 
54 /* HCI callback list */
55 LIST_HEAD(hci_cb_list);
56 DEFINE_MUTEX(hci_cb_list_lock);
57 
58 /* HCI ID Numbering */
59 static DEFINE_IDA(hci_index_ida);
60 
61 /* ---- HCI debugfs entries ---- */
62 
63 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
64 			     size_t count, loff_t *ppos)
65 {
66 	struct hci_dev *hdev = file->private_data;
67 	char buf[3];
68 
69 	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
70 	buf[1] = '\n';
71 	buf[2] = '\0';
72 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73 }
74 
75 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
76 			      size_t count, loff_t *ppos)
77 {
78 	struct hci_dev *hdev = file->private_data;
79 	struct sk_buff *skb;
80 	bool enable;
81 	int err;
82 
83 	if (!test_bit(HCI_UP, &hdev->flags))
84 		return -ENETDOWN;
85 
86 	err = kstrtobool_from_user(user_buf, count, &enable);
87 	if (err)
88 		return err;
89 
90 	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
91 		return -EALREADY;
92 
93 	hci_req_sync_lock(hdev);
94 	if (enable)
95 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
96 				     HCI_CMD_TIMEOUT);
97 	else
98 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
99 				     HCI_CMD_TIMEOUT);
100 	hci_req_sync_unlock(hdev);
101 
102 	if (IS_ERR(skb))
103 		return PTR_ERR(skb);
104 
105 	kfree_skb(skb);
106 
107 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
108 
109 	return count;
110 }
111 
112 static const struct file_operations dut_mode_fops = {
113 	.open		= simple_open,
114 	.read		= dut_mode_read,
115 	.write		= dut_mode_write,
116 	.llseek		= default_llseek,
117 };
118 
119 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
120 				size_t count, loff_t *ppos)
121 {
122 	struct hci_dev *hdev = file->private_data;
123 	char buf[3];
124 
125 	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
126 	buf[1] = '\n';
127 	buf[2] = '\0';
128 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
129 }
130 
131 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
132 				 size_t count, loff_t *ppos)
133 {
134 	struct hci_dev *hdev = file->private_data;
135 	bool enable;
136 	int err;
137 
138 	err = kstrtobool_from_user(user_buf, count, &enable);
139 	if (err)
140 		return err;
141 
142 	/* When the diagnostic flags are not persistent and the transport
143 	 * is not active or in user channel operation, then there is no need
144 	 * for the vendor callback. Instead just store the desired value and
145 	 * the setting will be programmed when the controller gets powered on.
146 	 */
147 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
148 	    (!test_bit(HCI_RUNNING, &hdev->flags) ||
149 	     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
150 		goto done;
151 
152 	hci_req_sync_lock(hdev);
153 	err = hdev->set_diag(hdev, enable);
154 	hci_req_sync_unlock(hdev);
155 
156 	if (err < 0)
157 		return err;
158 
159 done:
160 	if (enable)
161 		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
162 	else
163 		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
164 
165 	return count;
166 }
167 
168 static const struct file_operations vendor_diag_fops = {
169 	.open		= simple_open,
170 	.read		= vendor_diag_read,
171 	.write		= vendor_diag_write,
172 	.llseek		= default_llseek,
173 };
174 
175 static void hci_debugfs_create_basic(struct hci_dev *hdev)
176 {
177 	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
178 			    &dut_mode_fops);
179 
180 	if (hdev->set_diag)
181 		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
182 				    &vendor_diag_fops);
183 }
184 
185 static int hci_reset_req(struct hci_request *req, unsigned long opt)
186 {
187 	BT_DBG("%s %ld", req->hdev->name, opt);
188 
189 	/* Reset device */
190 	set_bit(HCI_RESET, &req->hdev->flags);
191 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
192 	return 0;
193 }
194 
195 static void bredr_init(struct hci_request *req)
196 {
197 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198 
199 	/* Read Local Supported Features */
200 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
201 
202 	/* Read Local Version */
203 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
204 
205 	/* Read BD Address */
206 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
207 }
208 
209 static void amp_init1(struct hci_request *req)
210 {
211 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
212 
213 	/* Read Local Version */
214 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 
216 	/* Read Local Supported Commands */
217 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
218 
219 	/* Read Local AMP Info */
220 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
221 
222 	/* Read Data Blk size */
223 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
224 
225 	/* Read Flow Control Mode */
226 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
227 
228 	/* Read Location Data */
229 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
230 }
231 
232 static int amp_init2(struct hci_request *req)
233 {
234 	/* Read Local Supported Features. Not all AMP controllers
235 	 * support this so it's placed conditionally in the second
236 	 * stage init.
237 	 */
238 	if (req->hdev->commands[14] & 0x20)
239 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
240 
241 	return 0;
242 }
243 
244 static int hci_init1_req(struct hci_request *req, unsigned long opt)
245 {
246 	struct hci_dev *hdev = req->hdev;
247 
248 	BT_DBG("%s %ld", hdev->name, opt);
249 
250 	/* Reset */
251 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
252 		hci_reset_req(req, 0);
253 
254 	switch (hdev->dev_type) {
255 	case HCI_PRIMARY:
256 		bredr_init(req);
257 		break;
258 	case HCI_AMP:
259 		amp_init1(req);
260 		break;
261 	default:
262 		bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
263 		break;
264 	}
265 
266 	return 0;
267 }
268 
269 static void bredr_setup(struct hci_request *req)
270 {
271 	__le16 param;
272 	__u8 flt_type;
273 
274 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
275 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
276 
277 	/* Read Class of Device */
278 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
279 
280 	/* Read Local Name */
281 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
282 
283 	/* Read Voice Setting */
284 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
285 
286 	/* Read Number of Supported IAC */
287 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
288 
289 	/* Read Current IAC LAP */
290 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
291 
292 	/* Clear Event Filters */
293 	flt_type = HCI_FLT_CLEAR_ALL;
294 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
295 
296 	/* Connection accept timeout ~20 secs */
297 	param = cpu_to_le16(0x7d00);
298 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
299 }
300 
301 static void le_setup(struct hci_request *req)
302 {
303 	struct hci_dev *hdev = req->hdev;
304 
305 	/* Read LE Buffer Size */
306 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307 
308 	/* Read LE Local Supported Features */
309 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
310 
311 	/* Read LE Supported States */
312 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
313 
314 	/* LE-only controllers have LE implicitly enabled */
315 	if (!lmp_bredr_capable(hdev))
316 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
317 }
318 
319 static void hci_setup_event_mask(struct hci_request *req)
320 {
321 	struct hci_dev *hdev = req->hdev;
322 
323 	/* The second byte is 0xff instead of 0x9f (two reserved bits
324 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
325 	 * command otherwise.
326 	 */
327 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
328 
329 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
330 	 * any event mask for pre 1.2 devices.
331 	 */
332 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
333 		return;
334 
335 	if (lmp_bredr_capable(hdev)) {
336 		events[4] |= 0x01; /* Flow Specification Complete */
337 	} else {
338 		/* Use a different default for LE-only devices */
339 		memset(events, 0, sizeof(events));
340 		events[1] |= 0x20; /* Command Complete */
341 		events[1] |= 0x40; /* Command Status */
342 		events[1] |= 0x80; /* Hardware Error */
343 
344 		/* If the controller supports the Disconnect command, enable
345 		 * the corresponding event. In addition enable packet flow
346 		 * control related events.
347 		 */
348 		if (hdev->commands[0] & 0x20) {
349 			events[0] |= 0x10; /* Disconnection Complete */
350 			events[2] |= 0x04; /* Number of Completed Packets */
351 			events[3] |= 0x02; /* Data Buffer Overflow */
352 		}
353 
354 		/* If the controller supports the Read Remote Version
355 		 * Information command, enable the corresponding event.
356 		 */
357 		if (hdev->commands[2] & 0x80)
358 			events[1] |= 0x08; /* Read Remote Version Information
359 					    * Complete
360 					    */
361 
362 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
363 			events[0] |= 0x80; /* Encryption Change */
364 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
365 		}
366 	}
367 
368 	if (lmp_inq_rssi_capable(hdev) ||
369 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
370 		events[4] |= 0x02; /* Inquiry Result with RSSI */
371 
372 	if (lmp_ext_feat_capable(hdev))
373 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
374 
375 	if (lmp_esco_capable(hdev)) {
376 		events[5] |= 0x08; /* Synchronous Connection Complete */
377 		events[5] |= 0x10; /* Synchronous Connection Changed */
378 	}
379 
380 	if (lmp_sniffsubr_capable(hdev))
381 		events[5] |= 0x20; /* Sniff Subrating */
382 
383 	if (lmp_pause_enc_capable(hdev))
384 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
385 
386 	if (lmp_ext_inq_capable(hdev))
387 		events[5] |= 0x40; /* Extended Inquiry Result */
388 
389 	if (lmp_no_flush_capable(hdev))
390 		events[7] |= 0x01; /* Enhanced Flush Complete */
391 
392 	if (lmp_lsto_capable(hdev))
393 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
394 
395 	if (lmp_ssp_capable(hdev)) {
396 		events[6] |= 0x01;	/* IO Capability Request */
397 		events[6] |= 0x02;	/* IO Capability Response */
398 		events[6] |= 0x04;	/* User Confirmation Request */
399 		events[6] |= 0x08;	/* User Passkey Request */
400 		events[6] |= 0x10;	/* Remote OOB Data Request */
401 		events[6] |= 0x20;	/* Simple Pairing Complete */
402 		events[7] |= 0x04;	/* User Passkey Notification */
403 		events[7] |= 0x08;	/* Keypress Notification */
404 		events[7] |= 0x10;	/* Remote Host Supported
405 					 * Features Notification
406 					 */
407 	}
408 
409 	if (lmp_le_capable(hdev))
410 		events[7] |= 0x20;	/* LE Meta-Event */
411 
412 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
413 }
414 
415 static int hci_init2_req(struct hci_request *req, unsigned long opt)
416 {
417 	struct hci_dev *hdev = req->hdev;
418 
419 	if (hdev->dev_type == HCI_AMP)
420 		return amp_init2(req);
421 
422 	if (lmp_bredr_capable(hdev))
423 		bredr_setup(req);
424 	else
425 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
426 
427 	if (lmp_le_capable(hdev))
428 		le_setup(req);
429 
430 	/* All Bluetooth 1.2 and later controllers should support the
431 	 * HCI command for reading the local supported commands.
432 	 *
433 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
434 	 * but do not have support for this command. If that is the case,
435 	 * the driver can quirk the behavior and skip reading the local
436 	 * supported commands.
437 	 */
438 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
439 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
440 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
441 
442 	if (lmp_ssp_capable(hdev)) {
443 		/* When SSP is available, then the host features page
444 		 * should also be available as well. However some
445 		 * controllers list the max_page as 0 as long as SSP
446 		 * has not been enabled. To achieve proper debugging
447 		 * output, force the minimum max_page to 1 at least.
448 		 */
449 		hdev->max_page = 0x01;
450 
451 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
452 			u8 mode = 0x01;
453 
454 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
455 				    sizeof(mode), &mode);
456 		} else {
457 			struct hci_cp_write_eir cp;
458 
459 			memset(hdev->eir, 0, sizeof(hdev->eir));
460 			memset(&cp, 0, sizeof(cp));
461 
462 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
463 		}
464 	}
465 
466 	if (lmp_inq_rssi_capable(hdev) ||
467 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
468 		u8 mode;
469 
470 		/* If Extended Inquiry Result events are supported, then
471 		 * they are clearly preferred over Inquiry Result with RSSI
472 		 * events.
473 		 */
474 		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
475 
476 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
477 	}
478 
479 	if (lmp_inq_tx_pwr_capable(hdev))
480 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
481 
482 	if (lmp_ext_feat_capable(hdev)) {
483 		struct hci_cp_read_local_ext_features cp;
484 
485 		cp.page = 0x01;
486 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
487 			    sizeof(cp), &cp);
488 	}
489 
490 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
491 		u8 enable = 1;
492 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
493 			    &enable);
494 	}
495 
496 	return 0;
497 }
498 
499 static void hci_setup_link_policy(struct hci_request *req)
500 {
501 	struct hci_dev *hdev = req->hdev;
502 	struct hci_cp_write_def_link_policy cp;
503 	u16 link_policy = 0;
504 
505 	if (lmp_rswitch_capable(hdev))
506 		link_policy |= HCI_LP_RSWITCH;
507 	if (lmp_hold_capable(hdev))
508 		link_policy |= HCI_LP_HOLD;
509 	if (lmp_sniff_capable(hdev))
510 		link_policy |= HCI_LP_SNIFF;
511 	if (lmp_park_capable(hdev))
512 		link_policy |= HCI_LP_PARK;
513 
514 	cp.policy = cpu_to_le16(link_policy);
515 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
516 }
517 
518 static void hci_set_le_support(struct hci_request *req)
519 {
520 	struct hci_dev *hdev = req->hdev;
521 	struct hci_cp_write_le_host_supported cp;
522 
523 	/* LE-only devices do not support explicit enablement */
524 	if (!lmp_bredr_capable(hdev))
525 		return;
526 
527 	memset(&cp, 0, sizeof(cp));
528 
529 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
530 		cp.le = 0x01;
531 		cp.simul = 0x00;
532 	}
533 
534 	if (cp.le != lmp_host_le_capable(hdev))
535 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
536 			    &cp);
537 }
538 
539 static void hci_set_event_mask_page_2(struct hci_request *req)
540 {
541 	struct hci_dev *hdev = req->hdev;
542 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
543 	bool changed = false;
544 
545 	/* If Connectionless Slave Broadcast master role is supported
546 	 * enable all necessary events for it.
547 	 */
548 	if (lmp_csb_master_capable(hdev)) {
549 		events[1] |= 0x40;	/* Triggered Clock Capture */
550 		events[1] |= 0x80;	/* Synchronization Train Complete */
551 		events[2] |= 0x10;	/* Slave Page Response Timeout */
552 		events[2] |= 0x20;	/* CSB Channel Map Change */
553 		changed = true;
554 	}
555 
556 	/* If Connectionless Slave Broadcast slave role is supported
557 	 * enable all necessary events for it.
558 	 */
559 	if (lmp_csb_slave_capable(hdev)) {
560 		events[2] |= 0x01;	/* Synchronization Train Received */
561 		events[2] |= 0x02;	/* CSB Receive */
562 		events[2] |= 0x04;	/* CSB Timeout */
563 		events[2] |= 0x08;	/* Truncated Page Complete */
564 		changed = true;
565 	}
566 
567 	/* Enable Authenticated Payload Timeout Expired event if supported */
568 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
569 		events[2] |= 0x80;
570 		changed = true;
571 	}
572 
573 	/* Some Broadcom based controllers indicate support for Set Event
574 	 * Mask Page 2 command, but then actually do not support it. Since
575 	 * the default value is all bits set to zero, the command is only
576 	 * required if the event mask has to be changed. In case no change
577 	 * to the event mask is needed, skip this command.
578 	 */
579 	if (changed)
580 		hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
581 			    sizeof(events), events);
582 }
583 
584 static int hci_init3_req(struct hci_request *req, unsigned long opt)
585 {
586 	struct hci_dev *hdev = req->hdev;
587 	u8 p;
588 
589 	hci_setup_event_mask(req);
590 
591 	if (hdev->commands[6] & 0x20 &&
592 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
593 		struct hci_cp_read_stored_link_key cp;
594 
595 		bacpy(&cp.bdaddr, BDADDR_ANY);
596 		cp.read_all = 0x01;
597 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
598 	}
599 
600 	if (hdev->commands[5] & 0x10)
601 		hci_setup_link_policy(req);
602 
603 	if (hdev->commands[8] & 0x01)
604 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
605 
606 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
607 	 * support the Read Page Scan Type command. Check support for
608 	 * this command in the bit mask of supported commands.
609 	 */
610 	if (hdev->commands[13] & 0x01)
611 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
612 
613 	if (lmp_le_capable(hdev)) {
614 		u8 events[8];
615 
616 		memset(events, 0, sizeof(events));
617 
618 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
619 			events[0] |= 0x10;	/* LE Long Term Key Request */
620 
621 		/* If controller supports the Connection Parameters Request
622 		 * Link Layer Procedure, enable the corresponding event.
623 		 */
624 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
625 			events[0] |= 0x20;	/* LE Remote Connection
626 						 * Parameter Request
627 						 */
628 
629 		/* If the controller supports the Data Length Extension
630 		 * feature, enable the corresponding event.
631 		 */
632 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
633 			events[0] |= 0x40;	/* LE Data Length Change */
634 
635 		/* If the controller supports Extended Scanner Filter
636 		 * Policies, enable the correspondig event.
637 		 */
638 		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
639 			events[1] |= 0x04;	/* LE Direct Advertising
640 						 * Report
641 						 */
642 
643 		/* If the controller supports Channel Selection Algorithm #2
644 		 * feature, enable the corresponding event.
645 		 */
646 		if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
647 			events[2] |= 0x08;	/* LE Channel Selection
648 						 * Algorithm
649 						 */
650 
651 		/* If the controller supports the LE Set Scan Enable command,
652 		 * enable the corresponding advertising report event.
653 		 */
654 		if (hdev->commands[26] & 0x08)
655 			events[0] |= 0x02;	/* LE Advertising Report */
656 
657 		/* If the controller supports the LE Create Connection
658 		 * command, enable the corresponding event.
659 		 */
660 		if (hdev->commands[26] & 0x10)
661 			events[0] |= 0x01;	/* LE Connection Complete */
662 
663 		/* If the controller supports the LE Connection Update
664 		 * command, enable the corresponding event.
665 		 */
666 		if (hdev->commands[27] & 0x04)
667 			events[0] |= 0x04;	/* LE Connection Update
668 						 * Complete
669 						 */
670 
671 		/* If the controller supports the LE Read Remote Used Features
672 		 * command, enable the corresponding event.
673 		 */
674 		if (hdev->commands[27] & 0x20)
675 			events[0] |= 0x08;	/* LE Read Remote Used
676 						 * Features Complete
677 						 */
678 
679 		/* If the controller supports the LE Read Local P-256
680 		 * Public Key command, enable the corresponding event.
681 		 */
682 		if (hdev->commands[34] & 0x02)
683 			events[0] |= 0x80;	/* LE Read Local P-256
684 						 * Public Key Complete
685 						 */
686 
687 		/* If the controller supports the LE Generate DHKey
688 		 * command, enable the corresponding event.
689 		 */
690 		if (hdev->commands[34] & 0x04)
691 			events[1] |= 0x01;	/* LE Generate DHKey Complete */
692 
693 		/* If the controller supports the LE Set Default PHY or
694 		 * LE Set PHY commands, enable the corresponding event.
695 		 */
696 		if (hdev->commands[35] & (0x20 | 0x40))
697 			events[1] |= 0x08;        /* LE PHY Update Complete */
698 
699 		/* If the controller supports LE Set Extended Scan Parameters
700 		 * and LE Set Extended Scan Enable commands, enable the
701 		 * corresponding event.
702 		 */
703 		if (use_ext_scan(hdev))
704 			events[1] |= 0x10;	/* LE Extended Advertising
705 						 * Report
706 						 */
707 
708 		/* If the controller supports the LE Extended Create Connection
709 		 * command, enable the corresponding event.
710 		 */
711 		if (use_ext_conn(hdev))
712 			events[1] |= 0x02;      /* LE Enhanced Connection
713 						 * Complete
714 						 */
715 
716 		/* If the controller supports the LE Extended Advertising
717 		 * command, enable the corresponding event.
718 		 */
719 		if (ext_adv_capable(hdev))
720 			events[2] |= 0x02;	/* LE Advertising Set
721 						 * Terminated
722 						 */
723 
724 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
725 			    events);
726 
727 		/* Read LE Advertising Channel TX Power */
728 		if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
729 			/* HCI TS spec forbids mixing of legacy and extended
730 			 * advertising commands wherein READ_ADV_TX_POWER is
731 			 * also included. So do not call it if extended adv
732 			 * is supported otherwise controller will return
733 			 * COMMAND_DISALLOWED for extended commands.
734 			 */
735 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
736 		}
737 
738 		if (hdev->commands[26] & 0x40) {
739 			/* Read LE White List Size */
740 			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
741 				    0, NULL);
742 		}
743 
744 		if (hdev->commands[26] & 0x80) {
745 			/* Clear LE White List */
746 			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
747 		}
748 
749 		if (hdev->commands[34] & 0x40) {
750 			/* Read LE Resolving List Size */
751 			hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
752 				    0, NULL);
753 		}
754 
755 		if (hdev->commands[34] & 0x20) {
756 			/* Clear LE Resolving List */
757 			hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
758 		}
759 
760 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
761 			/* Read LE Maximum Data Length */
762 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
763 
764 			/* Read LE Suggested Default Data Length */
765 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
766 		}
767 
768 		if (ext_adv_capable(hdev)) {
769 			/* Read LE Number of Supported Advertising Sets */
770 			hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
771 				    0, NULL);
772 		}
773 
774 		hci_set_le_support(req);
775 	}
776 
777 	/* Read features beyond page 1 if available */
778 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
779 		struct hci_cp_read_local_ext_features cp;
780 
781 		cp.page = p;
782 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
783 			    sizeof(cp), &cp);
784 	}
785 
786 	return 0;
787 }
788 
789 static int hci_init4_req(struct hci_request *req, unsigned long opt)
790 {
791 	struct hci_dev *hdev = req->hdev;
792 
793 	/* Some Broadcom based Bluetooth controllers do not support the
794 	 * Delete Stored Link Key command. They are clearly indicating its
795 	 * absence in the bit mask of supported commands.
796 	 *
797 	 * Check the supported commands and only if the the command is marked
798 	 * as supported send it. If not supported assume that the controller
799 	 * does not have actual support for stored link keys which makes this
800 	 * command redundant anyway.
801 	 *
802 	 * Some controllers indicate that they support handling deleting
803 	 * stored link keys, but they don't. The quirk lets a driver
804 	 * just disable this command.
805 	 */
806 	if (hdev->commands[6] & 0x80 &&
807 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
808 		struct hci_cp_delete_stored_link_key cp;
809 
810 		bacpy(&cp.bdaddr, BDADDR_ANY);
811 		cp.delete_all = 0x01;
812 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
813 			    sizeof(cp), &cp);
814 	}
815 
816 	/* Set event mask page 2 if the HCI command for it is supported */
817 	if (hdev->commands[22] & 0x04)
818 		hci_set_event_mask_page_2(req);
819 
820 	/* Read local codec list if the HCI command is supported */
821 	if (hdev->commands[29] & 0x20)
822 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
823 
824 	/* Get MWS transport configuration if the HCI command is supported */
825 	if (hdev->commands[30] & 0x08)
826 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
827 
828 	/* Check for Synchronization Train support */
829 	if (lmp_sync_train_capable(hdev))
830 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
831 
832 	/* Enable Secure Connections if supported and configured */
833 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
834 	    bredr_sc_enabled(hdev)) {
835 		u8 support = 0x01;
836 
837 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
838 			    sizeof(support), &support);
839 	}
840 
841 	/* Set Suggested Default Data Length to maximum if supported */
842 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 		struct hci_cp_le_write_def_data_len cp;
844 
845 		cp.tx_len = hdev->le_max_tx_len;
846 		cp.tx_time = hdev->le_max_tx_time;
847 		hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
848 	}
849 
850 	/* Set Default PHY parameters if command is supported */
851 	if (hdev->commands[35] & 0x20) {
852 		struct hci_cp_le_set_default_phy cp;
853 
854 		cp.all_phys = 0x00;
855 		cp.tx_phys = hdev->le_tx_def_phys;
856 		cp.rx_phys = hdev->le_rx_def_phys;
857 
858 		hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
859 	}
860 
861 	return 0;
862 }
863 
864 static int __hci_init(struct hci_dev *hdev)
865 {
866 	int err;
867 
868 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
869 	if (err < 0)
870 		return err;
871 
872 	if (hci_dev_test_flag(hdev, HCI_SETUP))
873 		hci_debugfs_create_basic(hdev);
874 
875 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
876 	if (err < 0)
877 		return err;
878 
879 	/* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
880 	 * BR/EDR/LE type controllers. AMP controllers only need the
881 	 * first two stages of init.
882 	 */
883 	if (hdev->dev_type != HCI_PRIMARY)
884 		return 0;
885 
886 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
887 	if (err < 0)
888 		return err;
889 
890 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
891 	if (err < 0)
892 		return err;
893 
894 	/* This function is only called when the controller is actually in
895 	 * configured state. When the controller is marked as unconfigured,
896 	 * this initialization procedure is not run.
897 	 *
898 	 * It means that it is possible that a controller runs through its
899 	 * setup phase and then discovers missing settings. If that is the
900 	 * case, then this function will not be called. It then will only
901 	 * be called during the config phase.
902 	 *
903 	 * So only when in setup phase or config phase, create the debugfs
904 	 * entries and register the SMP channels.
905 	 */
906 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
907 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
908 		return 0;
909 
910 	hci_debugfs_create_common(hdev);
911 
912 	if (lmp_bredr_capable(hdev))
913 		hci_debugfs_create_bredr(hdev);
914 
915 	if (lmp_le_capable(hdev))
916 		hci_debugfs_create_le(hdev);
917 
918 	return 0;
919 }
920 
921 static int hci_init0_req(struct hci_request *req, unsigned long opt)
922 {
923 	struct hci_dev *hdev = req->hdev;
924 
925 	BT_DBG("%s %ld", hdev->name, opt);
926 
927 	/* Reset */
928 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
929 		hci_reset_req(req, 0);
930 
931 	/* Read Local Version */
932 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
933 
934 	/* Read BD Address */
935 	if (hdev->set_bdaddr)
936 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
937 
938 	return 0;
939 }
940 
941 static int __hci_unconf_init(struct hci_dev *hdev)
942 {
943 	int err;
944 
945 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
946 		return 0;
947 
948 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
949 	if (err < 0)
950 		return err;
951 
952 	if (hci_dev_test_flag(hdev, HCI_SETUP))
953 		hci_debugfs_create_basic(hdev);
954 
955 	return 0;
956 }
957 
958 static int hci_scan_req(struct hci_request *req, unsigned long opt)
959 {
960 	__u8 scan = opt;
961 
962 	BT_DBG("%s %x", req->hdev->name, scan);
963 
964 	/* Inquiry and Page scans */
965 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
966 	return 0;
967 }
968 
969 static int hci_auth_req(struct hci_request *req, unsigned long opt)
970 {
971 	__u8 auth = opt;
972 
973 	BT_DBG("%s %x", req->hdev->name, auth);
974 
975 	/* Authentication */
976 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
977 	return 0;
978 }
979 
980 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
981 {
982 	__u8 encrypt = opt;
983 
984 	BT_DBG("%s %x", req->hdev->name, encrypt);
985 
986 	/* Encryption */
987 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
988 	return 0;
989 }
990 
991 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
992 {
993 	__le16 policy = cpu_to_le16(opt);
994 
995 	BT_DBG("%s %x", req->hdev->name, policy);
996 
997 	/* Default link policy */
998 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
999 	return 0;
1000 }
1001 
1002 /* Get HCI device by index.
1003  * Device is held on return. */
1004 struct hci_dev *hci_dev_get(int index)
1005 {
1006 	struct hci_dev *hdev = NULL, *d;
1007 
1008 	BT_DBG("%d", index);
1009 
1010 	if (index < 0)
1011 		return NULL;
1012 
1013 	read_lock(&hci_dev_list_lock);
1014 	list_for_each_entry(d, &hci_dev_list, list) {
1015 		if (d->id == index) {
1016 			hdev = hci_dev_hold(d);
1017 			break;
1018 		}
1019 	}
1020 	read_unlock(&hci_dev_list_lock);
1021 	return hdev;
1022 }
1023 
1024 /* ---- Inquiry support ---- */
1025 
1026 bool hci_discovery_active(struct hci_dev *hdev)
1027 {
1028 	struct discovery_state *discov = &hdev->discovery;
1029 
1030 	switch (discov->state) {
1031 	case DISCOVERY_FINDING:
1032 	case DISCOVERY_RESOLVING:
1033 		return true;
1034 
1035 	default:
1036 		return false;
1037 	}
1038 }
1039 
1040 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1041 {
1042 	int old_state = hdev->discovery.state;
1043 
1044 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1045 
1046 	if (old_state == state)
1047 		return;
1048 
1049 	hdev->discovery.state = state;
1050 
1051 	switch (state) {
1052 	case DISCOVERY_STOPPED:
1053 		hci_update_background_scan(hdev);
1054 
1055 		if (old_state != DISCOVERY_STARTING)
1056 			mgmt_discovering(hdev, 0);
1057 		break;
1058 	case DISCOVERY_STARTING:
1059 		break;
1060 	case DISCOVERY_FINDING:
1061 		mgmt_discovering(hdev, 1);
1062 		break;
1063 	case DISCOVERY_RESOLVING:
1064 		break;
1065 	case DISCOVERY_STOPPING:
1066 		break;
1067 	}
1068 }
1069 
1070 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1071 {
1072 	struct discovery_state *cache = &hdev->discovery;
1073 	struct inquiry_entry *p, *n;
1074 
1075 	list_for_each_entry_safe(p, n, &cache->all, all) {
1076 		list_del(&p->all);
1077 		kfree(p);
1078 	}
1079 
1080 	INIT_LIST_HEAD(&cache->unknown);
1081 	INIT_LIST_HEAD(&cache->resolve);
1082 }
1083 
1084 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1085 					       bdaddr_t *bdaddr)
1086 {
1087 	struct discovery_state *cache = &hdev->discovery;
1088 	struct inquiry_entry *e;
1089 
1090 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1091 
1092 	list_for_each_entry(e, &cache->all, all) {
1093 		if (!bacmp(&e->data.bdaddr, bdaddr))
1094 			return e;
1095 	}
1096 
1097 	return NULL;
1098 }
1099 
1100 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1101 						       bdaddr_t *bdaddr)
1102 {
1103 	struct discovery_state *cache = &hdev->discovery;
1104 	struct inquiry_entry *e;
1105 
1106 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1107 
1108 	list_for_each_entry(e, &cache->unknown, list) {
1109 		if (!bacmp(&e->data.bdaddr, bdaddr))
1110 			return e;
1111 	}
1112 
1113 	return NULL;
1114 }
1115 
1116 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1117 						       bdaddr_t *bdaddr,
1118 						       int state)
1119 {
1120 	struct discovery_state *cache = &hdev->discovery;
1121 	struct inquiry_entry *e;
1122 
1123 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1124 
1125 	list_for_each_entry(e, &cache->resolve, list) {
1126 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1127 			return e;
1128 		if (!bacmp(&e->data.bdaddr, bdaddr))
1129 			return e;
1130 	}
1131 
1132 	return NULL;
1133 }
1134 
1135 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1136 				      struct inquiry_entry *ie)
1137 {
1138 	struct discovery_state *cache = &hdev->discovery;
1139 	struct list_head *pos = &cache->resolve;
1140 	struct inquiry_entry *p;
1141 
1142 	list_del(&ie->list);
1143 
1144 	list_for_each_entry(p, &cache->resolve, list) {
1145 		if (p->name_state != NAME_PENDING &&
1146 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1147 			break;
1148 		pos = &p->list;
1149 	}
1150 
1151 	list_add(&ie->list, pos);
1152 }
1153 
1154 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1155 			     bool name_known)
1156 {
1157 	struct discovery_state *cache = &hdev->discovery;
1158 	struct inquiry_entry *ie;
1159 	u32 flags = 0;
1160 
1161 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1162 
1163 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1164 
1165 	if (!data->ssp_mode)
1166 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1167 
1168 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1169 	if (ie) {
1170 		if (!ie->data.ssp_mode)
1171 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1172 
1173 		if (ie->name_state == NAME_NEEDED &&
1174 		    data->rssi != ie->data.rssi) {
1175 			ie->data.rssi = data->rssi;
1176 			hci_inquiry_cache_update_resolve(hdev, ie);
1177 		}
1178 
1179 		goto update;
1180 	}
1181 
1182 	/* Entry not in the cache. Add new one. */
1183 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1184 	if (!ie) {
1185 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1186 		goto done;
1187 	}
1188 
1189 	list_add(&ie->all, &cache->all);
1190 
1191 	if (name_known) {
1192 		ie->name_state = NAME_KNOWN;
1193 	} else {
1194 		ie->name_state = NAME_NOT_KNOWN;
1195 		list_add(&ie->list, &cache->unknown);
1196 	}
1197 
1198 update:
1199 	if (name_known && ie->name_state != NAME_KNOWN &&
1200 	    ie->name_state != NAME_PENDING) {
1201 		ie->name_state = NAME_KNOWN;
1202 		list_del(&ie->list);
1203 	}
1204 
1205 	memcpy(&ie->data, data, sizeof(*data));
1206 	ie->timestamp = jiffies;
1207 	cache->timestamp = jiffies;
1208 
1209 	if (ie->name_state == NAME_NOT_KNOWN)
1210 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211 
1212 done:
1213 	return flags;
1214 }
1215 
1216 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1217 {
1218 	struct discovery_state *cache = &hdev->discovery;
1219 	struct inquiry_info *info = (struct inquiry_info *) buf;
1220 	struct inquiry_entry *e;
1221 	int copied = 0;
1222 
1223 	list_for_each_entry(e, &cache->all, all) {
1224 		struct inquiry_data *data = &e->data;
1225 
1226 		if (copied >= num)
1227 			break;
1228 
1229 		bacpy(&info->bdaddr, &data->bdaddr);
1230 		info->pscan_rep_mode	= data->pscan_rep_mode;
1231 		info->pscan_period_mode	= data->pscan_period_mode;
1232 		info->pscan_mode	= data->pscan_mode;
1233 		memcpy(info->dev_class, data->dev_class, 3);
1234 		info->clock_offset	= data->clock_offset;
1235 
1236 		info++;
1237 		copied++;
1238 	}
1239 
1240 	BT_DBG("cache %p, copied %d", cache, copied);
1241 	return copied;
1242 }
1243 
1244 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1245 {
1246 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1247 	struct hci_dev *hdev = req->hdev;
1248 	struct hci_cp_inquiry cp;
1249 
1250 	BT_DBG("%s", hdev->name);
1251 
1252 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1253 		return 0;
1254 
1255 	/* Start Inquiry */
1256 	memcpy(&cp.lap, &ir->lap, 3);
1257 	cp.length  = ir->length;
1258 	cp.num_rsp = ir->num_rsp;
1259 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1260 
1261 	return 0;
1262 }
1263 
1264 int hci_inquiry(void __user *arg)
1265 {
1266 	__u8 __user *ptr = arg;
1267 	struct hci_inquiry_req ir;
1268 	struct hci_dev *hdev;
1269 	int err = 0, do_inquiry = 0, max_rsp;
1270 	long timeo;
1271 	__u8 *buf;
1272 
1273 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1274 		return -EFAULT;
1275 
1276 	hdev = hci_dev_get(ir.dev_id);
1277 	if (!hdev)
1278 		return -ENODEV;
1279 
1280 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1281 		err = -EBUSY;
1282 		goto done;
1283 	}
1284 
1285 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1286 		err = -EOPNOTSUPP;
1287 		goto done;
1288 	}
1289 
1290 	if (hdev->dev_type != HCI_PRIMARY) {
1291 		err = -EOPNOTSUPP;
1292 		goto done;
1293 	}
1294 
1295 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1296 		err = -EOPNOTSUPP;
1297 		goto done;
1298 	}
1299 
1300 	hci_dev_lock(hdev);
1301 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1302 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1303 		hci_inquiry_cache_flush(hdev);
1304 		do_inquiry = 1;
1305 	}
1306 	hci_dev_unlock(hdev);
1307 
1308 	timeo = ir.length * msecs_to_jiffies(2000);
1309 
1310 	if (do_inquiry) {
1311 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1312 				   timeo, NULL);
1313 		if (err < 0)
1314 			goto done;
1315 
1316 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1317 		 * cleared). If it is interrupted by a signal, return -EINTR.
1318 		 */
1319 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1320 				TASK_INTERRUPTIBLE))
1321 			return -EINTR;
1322 	}
1323 
1324 	/* for unlimited number of responses we will use buffer with
1325 	 * 255 entries
1326 	 */
1327 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1328 
1329 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1330 	 * copy it to the user space.
1331 	 */
1332 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1333 	if (!buf) {
1334 		err = -ENOMEM;
1335 		goto done;
1336 	}
1337 
1338 	hci_dev_lock(hdev);
1339 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1340 	hci_dev_unlock(hdev);
1341 
1342 	BT_DBG("num_rsp %d", ir.num_rsp);
1343 
1344 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1345 		ptr += sizeof(ir);
1346 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1347 				 ir.num_rsp))
1348 			err = -EFAULT;
1349 	} else
1350 		err = -EFAULT;
1351 
1352 	kfree(buf);
1353 
1354 done:
1355 	hci_dev_put(hdev);
1356 	return err;
1357 }
1358 
1359 /**
1360  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1361  *				       (BD_ADDR) for a HCI device from
1362  *				       a firmware node property.
1363  * @hdev:	The HCI device
1364  *
1365  * Search the firmware node for 'local-bd-address'.
1366  *
1367  * All-zero BD addresses are rejected, because those could be properties
1368  * that exist in the firmware tables, but were not updated by the firmware. For
1369  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1370  */
1371 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1372 {
1373 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1374 	bdaddr_t ba;
1375 	int ret;
1376 
1377 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1378 					    (u8 *)&ba, sizeof(ba));
1379 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1380 		return;
1381 
1382 	bacpy(&hdev->public_addr, &ba);
1383 }
1384 
1385 static int hci_dev_do_open(struct hci_dev *hdev)
1386 {
1387 	int ret = 0;
1388 
1389 	BT_DBG("%s %p", hdev->name, hdev);
1390 
1391 	hci_req_sync_lock(hdev);
1392 
1393 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1394 		ret = -ENODEV;
1395 		goto done;
1396 	}
1397 
1398 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1400 		/* Check for rfkill but allow the HCI setup stage to
1401 		 * proceed (which in itself doesn't cause any RF activity).
1402 		 */
1403 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1404 			ret = -ERFKILL;
1405 			goto done;
1406 		}
1407 
1408 		/* Check for valid public address or a configured static
1409 		 * random adddress, but let the HCI setup proceed to
1410 		 * be able to determine if there is a public address
1411 		 * or not.
1412 		 *
1413 		 * In case of user channel usage, it is not important
1414 		 * if a public address or static random address is
1415 		 * available.
1416 		 *
1417 		 * This check is only valid for BR/EDR controllers
1418 		 * since AMP controllers do not have an address.
1419 		 */
1420 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1421 		    hdev->dev_type == HCI_PRIMARY &&
1422 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1423 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1424 			ret = -EADDRNOTAVAIL;
1425 			goto done;
1426 		}
1427 	}
1428 
1429 	if (test_bit(HCI_UP, &hdev->flags)) {
1430 		ret = -EALREADY;
1431 		goto done;
1432 	}
1433 
1434 	if (hdev->open(hdev)) {
1435 		ret = -EIO;
1436 		goto done;
1437 	}
1438 
1439 	set_bit(HCI_RUNNING, &hdev->flags);
1440 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1441 
1442 	atomic_set(&hdev->cmd_cnt, 1);
1443 	set_bit(HCI_INIT, &hdev->flags);
1444 
1445 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1446 	    test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1447 		hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1448 
1449 		if (hdev->setup)
1450 			ret = hdev->setup(hdev);
1451 
1452 		if (ret)
1453 			goto setup_failed;
1454 
1455 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1456 			if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1457 				hci_dev_get_bd_addr_from_property(hdev);
1458 
1459 			if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1460 			    hdev->set_bdaddr)
1461 				ret = hdev->set_bdaddr(hdev,
1462 						       &hdev->public_addr);
1463 			else
1464 				ret = -EADDRNOTAVAIL;
1465 		}
1466 
1467 setup_failed:
1468 		/* The transport driver can set these quirks before
1469 		 * creating the HCI device or in its setup callback.
1470 		 *
1471 		 * In case any of them is set, the controller has to
1472 		 * start up as unconfigured.
1473 		 */
1474 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1475 		    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1476 			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1477 
1478 		/* For an unconfigured controller it is required to
1479 		 * read at least the version information provided by
1480 		 * the Read Local Version Information command.
1481 		 *
1482 		 * If the set_bdaddr driver callback is provided, then
1483 		 * also the original Bluetooth public device address
1484 		 * will be read using the Read BD Address command.
1485 		 */
1486 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1487 			ret = __hci_unconf_init(hdev);
1488 	}
1489 
1490 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1491 		/* If public address change is configured, ensure that
1492 		 * the address gets programmed. If the driver does not
1493 		 * support changing the public address, fail the power
1494 		 * on procedure.
1495 		 */
1496 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1497 		    hdev->set_bdaddr)
1498 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1499 		else
1500 			ret = -EADDRNOTAVAIL;
1501 	}
1502 
1503 	if (!ret) {
1504 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1505 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1506 			ret = __hci_init(hdev);
1507 			if (!ret && hdev->post_init)
1508 				ret = hdev->post_init(hdev);
1509 		}
1510 	}
1511 
1512 	/* If the HCI Reset command is clearing all diagnostic settings,
1513 	 * then they need to be reprogrammed after the init procedure
1514 	 * completed.
1515 	 */
1516 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1517 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1518 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1519 		ret = hdev->set_diag(hdev, true);
1520 
1521 	clear_bit(HCI_INIT, &hdev->flags);
1522 
1523 	if (!ret) {
1524 		hci_dev_hold(hdev);
1525 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1526 		hci_adv_instances_set_rpa_expired(hdev, true);
1527 		set_bit(HCI_UP, &hdev->flags);
1528 		hci_sock_dev_event(hdev, HCI_DEV_UP);
1529 		hci_leds_update_powered(hdev, true);
1530 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1531 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1532 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1533 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1534 		    hci_dev_test_flag(hdev, HCI_MGMT) &&
1535 		    hdev->dev_type == HCI_PRIMARY) {
1536 			ret = __hci_req_hci_power_on(hdev);
1537 			mgmt_power_on(hdev, ret);
1538 		}
1539 	} else {
1540 		/* Init failed, cleanup */
1541 		flush_work(&hdev->tx_work);
1542 		flush_work(&hdev->cmd_work);
1543 		flush_work(&hdev->rx_work);
1544 
1545 		skb_queue_purge(&hdev->cmd_q);
1546 		skb_queue_purge(&hdev->rx_q);
1547 
1548 		if (hdev->flush)
1549 			hdev->flush(hdev);
1550 
1551 		if (hdev->sent_cmd) {
1552 			kfree_skb(hdev->sent_cmd);
1553 			hdev->sent_cmd = NULL;
1554 		}
1555 
1556 		clear_bit(HCI_RUNNING, &hdev->flags);
1557 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1558 
1559 		hdev->close(hdev);
1560 		hdev->flags &= BIT(HCI_RAW);
1561 	}
1562 
1563 done:
1564 	hci_req_sync_unlock(hdev);
1565 	return ret;
1566 }
1567 
1568 /* ---- HCI ioctl helpers ---- */
1569 
1570 int hci_dev_open(__u16 dev)
1571 {
1572 	struct hci_dev *hdev;
1573 	int err;
1574 
1575 	hdev = hci_dev_get(dev);
1576 	if (!hdev)
1577 		return -ENODEV;
1578 
1579 	/* Devices that are marked as unconfigured can only be powered
1580 	 * up as user channel. Trying to bring them up as normal devices
1581 	 * will result into a failure. Only user channel operation is
1582 	 * possible.
1583 	 *
1584 	 * When this function is called for a user channel, the flag
1585 	 * HCI_USER_CHANNEL will be set first before attempting to
1586 	 * open the device.
1587 	 */
1588 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1589 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1590 		err = -EOPNOTSUPP;
1591 		goto done;
1592 	}
1593 
1594 	/* We need to ensure that no other power on/off work is pending
1595 	 * before proceeding to call hci_dev_do_open. This is
1596 	 * particularly important if the setup procedure has not yet
1597 	 * completed.
1598 	 */
1599 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1600 		cancel_delayed_work(&hdev->power_off);
1601 
1602 	/* After this call it is guaranteed that the setup procedure
1603 	 * has finished. This means that error conditions like RFKILL
1604 	 * or no valid public or static random address apply.
1605 	 */
1606 	flush_workqueue(hdev->req_workqueue);
1607 
1608 	/* For controllers not using the management interface and that
1609 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1610 	 * so that pairing works for them. Once the management interface
1611 	 * is in use this bit will be cleared again and userspace has
1612 	 * to explicitly enable it.
1613 	 */
1614 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1615 	    !hci_dev_test_flag(hdev, HCI_MGMT))
1616 		hci_dev_set_flag(hdev, HCI_BONDABLE);
1617 
1618 	err = hci_dev_do_open(hdev);
1619 
1620 done:
1621 	hci_dev_put(hdev);
1622 	return err;
1623 }
1624 
1625 /* This function requires the caller holds hdev->lock */
1626 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1627 {
1628 	struct hci_conn_params *p;
1629 
1630 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1631 		if (p->conn) {
1632 			hci_conn_drop(p->conn);
1633 			hci_conn_put(p->conn);
1634 			p->conn = NULL;
1635 		}
1636 		list_del_init(&p->action);
1637 	}
1638 
1639 	BT_DBG("All LE pending actions cleared");
1640 }
1641 
1642 int hci_dev_do_close(struct hci_dev *hdev)
1643 {
1644 	bool auto_off;
1645 
1646 	BT_DBG("%s %p", hdev->name, hdev);
1647 
1648 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1649 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1650 	    test_bit(HCI_UP, &hdev->flags)) {
1651 		/* Execute vendor specific shutdown routine */
1652 		if (hdev->shutdown)
1653 			hdev->shutdown(hdev);
1654 	}
1655 
1656 	cancel_delayed_work(&hdev->power_off);
1657 
1658 	hci_request_cancel_all(hdev);
1659 	hci_req_sync_lock(hdev);
1660 
1661 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1662 		cancel_delayed_work_sync(&hdev->cmd_timer);
1663 		hci_req_sync_unlock(hdev);
1664 		return 0;
1665 	}
1666 
1667 	hci_leds_update_powered(hdev, false);
1668 
1669 	/* Flush RX and TX works */
1670 	flush_work(&hdev->tx_work);
1671 	flush_work(&hdev->rx_work);
1672 
1673 	if (hdev->discov_timeout > 0) {
1674 		hdev->discov_timeout = 0;
1675 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1676 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1677 	}
1678 
1679 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1680 		cancel_delayed_work(&hdev->service_cache);
1681 
1682 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1683 		struct adv_info *adv_instance;
1684 
1685 		cancel_delayed_work_sync(&hdev->rpa_expired);
1686 
1687 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1688 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1689 	}
1690 
1691 	/* Avoid potential lockdep warnings from the *_flush() calls by
1692 	 * ensuring the workqueue is empty up front.
1693 	 */
1694 	drain_workqueue(hdev->workqueue);
1695 
1696 	hci_dev_lock(hdev);
1697 
1698 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1699 
1700 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1701 
1702 	if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1703 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1704 	    hci_dev_test_flag(hdev, HCI_MGMT))
1705 		__mgmt_power_off(hdev);
1706 
1707 	hci_inquiry_cache_flush(hdev);
1708 	hci_pend_le_actions_clear(hdev);
1709 	hci_conn_hash_flush(hdev);
1710 	hci_dev_unlock(hdev);
1711 
1712 	smp_unregister(hdev);
1713 
1714 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1715 
1716 	if (hdev->flush)
1717 		hdev->flush(hdev);
1718 
1719 	/* Reset device */
1720 	skb_queue_purge(&hdev->cmd_q);
1721 	atomic_set(&hdev->cmd_cnt, 1);
1722 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1723 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1724 		set_bit(HCI_INIT, &hdev->flags);
1725 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1726 		clear_bit(HCI_INIT, &hdev->flags);
1727 	}
1728 
1729 	/* flush cmd  work */
1730 	flush_work(&hdev->cmd_work);
1731 
1732 	/* Drop queues */
1733 	skb_queue_purge(&hdev->rx_q);
1734 	skb_queue_purge(&hdev->cmd_q);
1735 	skb_queue_purge(&hdev->raw_q);
1736 
1737 	/* Drop last sent command */
1738 	if (hdev->sent_cmd) {
1739 		cancel_delayed_work_sync(&hdev->cmd_timer);
1740 		kfree_skb(hdev->sent_cmd);
1741 		hdev->sent_cmd = NULL;
1742 	}
1743 
1744 	clear_bit(HCI_RUNNING, &hdev->flags);
1745 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1746 
1747 	/* After this point our queues are empty
1748 	 * and no tasks are scheduled. */
1749 	hdev->close(hdev);
1750 
1751 	/* Clear flags */
1752 	hdev->flags &= BIT(HCI_RAW);
1753 	hci_dev_clear_volatile_flags(hdev);
1754 
1755 	/* Controller radio is available but is currently powered down */
1756 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1757 
1758 	memset(hdev->eir, 0, sizeof(hdev->eir));
1759 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1760 	bacpy(&hdev->random_addr, BDADDR_ANY);
1761 
1762 	hci_req_sync_unlock(hdev);
1763 
1764 	hci_dev_put(hdev);
1765 	return 0;
1766 }
1767 
1768 int hci_dev_close(__u16 dev)
1769 {
1770 	struct hci_dev *hdev;
1771 	int err;
1772 
1773 	hdev = hci_dev_get(dev);
1774 	if (!hdev)
1775 		return -ENODEV;
1776 
1777 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1778 		err = -EBUSY;
1779 		goto done;
1780 	}
1781 
1782 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1783 		cancel_delayed_work(&hdev->power_off);
1784 
1785 	err = hci_dev_do_close(hdev);
1786 
1787 done:
1788 	hci_dev_put(hdev);
1789 	return err;
1790 }
1791 
1792 static int hci_dev_do_reset(struct hci_dev *hdev)
1793 {
1794 	int ret;
1795 
1796 	BT_DBG("%s %p", hdev->name, hdev);
1797 
1798 	hci_req_sync_lock(hdev);
1799 
1800 	/* Drop queues */
1801 	skb_queue_purge(&hdev->rx_q);
1802 	skb_queue_purge(&hdev->cmd_q);
1803 
1804 	/* Avoid potential lockdep warnings from the *_flush() calls by
1805 	 * ensuring the workqueue is empty up front.
1806 	 */
1807 	drain_workqueue(hdev->workqueue);
1808 
1809 	hci_dev_lock(hdev);
1810 	hci_inquiry_cache_flush(hdev);
1811 	hci_conn_hash_flush(hdev);
1812 	hci_dev_unlock(hdev);
1813 
1814 	if (hdev->flush)
1815 		hdev->flush(hdev);
1816 
1817 	atomic_set(&hdev->cmd_cnt, 1);
1818 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1819 
1820 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1821 
1822 	hci_req_sync_unlock(hdev);
1823 	return ret;
1824 }
1825 
1826 int hci_dev_reset(__u16 dev)
1827 {
1828 	struct hci_dev *hdev;
1829 	int err;
1830 
1831 	hdev = hci_dev_get(dev);
1832 	if (!hdev)
1833 		return -ENODEV;
1834 
1835 	if (!test_bit(HCI_UP, &hdev->flags)) {
1836 		err = -ENETDOWN;
1837 		goto done;
1838 	}
1839 
1840 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1841 		err = -EBUSY;
1842 		goto done;
1843 	}
1844 
1845 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1846 		err = -EOPNOTSUPP;
1847 		goto done;
1848 	}
1849 
1850 	err = hci_dev_do_reset(hdev);
1851 
1852 done:
1853 	hci_dev_put(hdev);
1854 	return err;
1855 }
1856 
1857 int hci_dev_reset_stat(__u16 dev)
1858 {
1859 	struct hci_dev *hdev;
1860 	int ret = 0;
1861 
1862 	hdev = hci_dev_get(dev);
1863 	if (!hdev)
1864 		return -ENODEV;
1865 
1866 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1867 		ret = -EBUSY;
1868 		goto done;
1869 	}
1870 
1871 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1872 		ret = -EOPNOTSUPP;
1873 		goto done;
1874 	}
1875 
1876 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1877 
1878 done:
1879 	hci_dev_put(hdev);
1880 	return ret;
1881 }
1882 
1883 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1884 {
1885 	bool conn_changed, discov_changed;
1886 
1887 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
1888 
1889 	if ((scan & SCAN_PAGE))
1890 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1891 							  HCI_CONNECTABLE);
1892 	else
1893 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1894 							   HCI_CONNECTABLE);
1895 
1896 	if ((scan & SCAN_INQUIRY)) {
1897 		discov_changed = !hci_dev_test_and_set_flag(hdev,
1898 							    HCI_DISCOVERABLE);
1899 	} else {
1900 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1901 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1902 							     HCI_DISCOVERABLE);
1903 	}
1904 
1905 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1906 		return;
1907 
1908 	if (conn_changed || discov_changed) {
1909 		/* In case this was disabled through mgmt */
1910 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1911 
1912 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1913 			hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1914 
1915 		mgmt_new_settings(hdev);
1916 	}
1917 }
1918 
1919 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1920 {
1921 	struct hci_dev *hdev;
1922 	struct hci_dev_req dr;
1923 	int err = 0;
1924 
1925 	if (copy_from_user(&dr, arg, sizeof(dr)))
1926 		return -EFAULT;
1927 
1928 	hdev = hci_dev_get(dr.dev_id);
1929 	if (!hdev)
1930 		return -ENODEV;
1931 
1932 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1933 		err = -EBUSY;
1934 		goto done;
1935 	}
1936 
1937 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1938 		err = -EOPNOTSUPP;
1939 		goto done;
1940 	}
1941 
1942 	if (hdev->dev_type != HCI_PRIMARY) {
1943 		err = -EOPNOTSUPP;
1944 		goto done;
1945 	}
1946 
1947 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1948 		err = -EOPNOTSUPP;
1949 		goto done;
1950 	}
1951 
1952 	switch (cmd) {
1953 	case HCISETAUTH:
1954 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1955 				   HCI_INIT_TIMEOUT, NULL);
1956 		break;
1957 
1958 	case HCISETENCRYPT:
1959 		if (!lmp_encrypt_capable(hdev)) {
1960 			err = -EOPNOTSUPP;
1961 			break;
1962 		}
1963 
1964 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
1965 			/* Auth must be enabled first */
1966 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1967 					   HCI_INIT_TIMEOUT, NULL);
1968 			if (err)
1969 				break;
1970 		}
1971 
1972 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1973 				   HCI_INIT_TIMEOUT, NULL);
1974 		break;
1975 
1976 	case HCISETSCAN:
1977 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1978 				   HCI_INIT_TIMEOUT, NULL);
1979 
1980 		/* Ensure that the connectable and discoverable states
1981 		 * get correctly modified as this was a non-mgmt change.
1982 		 */
1983 		if (!err)
1984 			hci_update_scan_state(hdev, dr.dev_opt);
1985 		break;
1986 
1987 	case HCISETLINKPOL:
1988 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1989 				   HCI_INIT_TIMEOUT, NULL);
1990 		break;
1991 
1992 	case HCISETLINKMODE:
1993 		hdev->link_mode = ((__u16) dr.dev_opt) &
1994 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
1995 		break;
1996 
1997 	case HCISETPTYPE:
1998 		if (hdev->pkt_type == (__u16) dr.dev_opt)
1999 			break;
2000 
2001 		hdev->pkt_type = (__u16) dr.dev_opt;
2002 		mgmt_phy_configuration_changed(hdev, NULL);
2003 		break;
2004 
2005 	case HCISETACLMTU:
2006 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2007 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2008 		break;
2009 
2010 	case HCISETSCOMTU:
2011 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2012 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2013 		break;
2014 
2015 	default:
2016 		err = -EINVAL;
2017 		break;
2018 	}
2019 
2020 done:
2021 	hci_dev_put(hdev);
2022 	return err;
2023 }
2024 
2025 int hci_get_dev_list(void __user *arg)
2026 {
2027 	struct hci_dev *hdev;
2028 	struct hci_dev_list_req *dl;
2029 	struct hci_dev_req *dr;
2030 	int n = 0, size, err;
2031 	__u16 dev_num;
2032 
2033 	if (get_user(dev_num, (__u16 __user *) arg))
2034 		return -EFAULT;
2035 
2036 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2037 		return -EINVAL;
2038 
2039 	size = sizeof(*dl) + dev_num * sizeof(*dr);
2040 
2041 	dl = kzalloc(size, GFP_KERNEL);
2042 	if (!dl)
2043 		return -ENOMEM;
2044 
2045 	dr = dl->dev_req;
2046 
2047 	read_lock(&hci_dev_list_lock);
2048 	list_for_each_entry(hdev, &hci_dev_list, list) {
2049 		unsigned long flags = hdev->flags;
2050 
2051 		/* When the auto-off is configured it means the transport
2052 		 * is running, but in that case still indicate that the
2053 		 * device is actually down.
2054 		 */
2055 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2056 			flags &= ~BIT(HCI_UP);
2057 
2058 		(dr + n)->dev_id  = hdev->id;
2059 		(dr + n)->dev_opt = flags;
2060 
2061 		if (++n >= dev_num)
2062 			break;
2063 	}
2064 	read_unlock(&hci_dev_list_lock);
2065 
2066 	dl->dev_num = n;
2067 	size = sizeof(*dl) + n * sizeof(*dr);
2068 
2069 	err = copy_to_user(arg, dl, size);
2070 	kfree(dl);
2071 
2072 	return err ? -EFAULT : 0;
2073 }
2074 
2075 int hci_get_dev_info(void __user *arg)
2076 {
2077 	struct hci_dev *hdev;
2078 	struct hci_dev_info di;
2079 	unsigned long flags;
2080 	int err = 0;
2081 
2082 	if (copy_from_user(&di, arg, sizeof(di)))
2083 		return -EFAULT;
2084 
2085 	hdev = hci_dev_get(di.dev_id);
2086 	if (!hdev)
2087 		return -ENODEV;
2088 
2089 	/* When the auto-off is configured it means the transport
2090 	 * is running, but in that case still indicate that the
2091 	 * device is actually down.
2092 	 */
2093 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2094 		flags = hdev->flags & ~BIT(HCI_UP);
2095 	else
2096 		flags = hdev->flags;
2097 
2098 	strcpy(di.name, hdev->name);
2099 	di.bdaddr   = hdev->bdaddr;
2100 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2101 	di.flags    = flags;
2102 	di.pkt_type = hdev->pkt_type;
2103 	if (lmp_bredr_capable(hdev)) {
2104 		di.acl_mtu  = hdev->acl_mtu;
2105 		di.acl_pkts = hdev->acl_pkts;
2106 		di.sco_mtu  = hdev->sco_mtu;
2107 		di.sco_pkts = hdev->sco_pkts;
2108 	} else {
2109 		di.acl_mtu  = hdev->le_mtu;
2110 		di.acl_pkts = hdev->le_pkts;
2111 		di.sco_mtu  = 0;
2112 		di.sco_pkts = 0;
2113 	}
2114 	di.link_policy = hdev->link_policy;
2115 	di.link_mode   = hdev->link_mode;
2116 
2117 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2118 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2119 
2120 	if (copy_to_user(arg, &di, sizeof(di)))
2121 		err = -EFAULT;
2122 
2123 	hci_dev_put(hdev);
2124 
2125 	return err;
2126 }
2127 
2128 /* ---- Interface to HCI drivers ---- */
2129 
2130 static int hci_rfkill_set_block(void *data, bool blocked)
2131 {
2132 	struct hci_dev *hdev = data;
2133 
2134 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2135 
2136 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2137 		return -EBUSY;
2138 
2139 	if (blocked) {
2140 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2141 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2142 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2143 			hci_dev_do_close(hdev);
2144 	} else {
2145 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2146 	}
2147 
2148 	return 0;
2149 }
2150 
2151 static const struct rfkill_ops hci_rfkill_ops = {
2152 	.set_block = hci_rfkill_set_block,
2153 };
2154 
2155 static void hci_power_on(struct work_struct *work)
2156 {
2157 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2158 	int err;
2159 
2160 	BT_DBG("%s", hdev->name);
2161 
2162 	if (test_bit(HCI_UP, &hdev->flags) &&
2163 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
2164 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2165 		cancel_delayed_work(&hdev->power_off);
2166 		hci_req_sync_lock(hdev);
2167 		err = __hci_req_hci_power_on(hdev);
2168 		hci_req_sync_unlock(hdev);
2169 		mgmt_power_on(hdev, err);
2170 		return;
2171 	}
2172 
2173 	err = hci_dev_do_open(hdev);
2174 	if (err < 0) {
2175 		hci_dev_lock(hdev);
2176 		mgmt_set_powered_failed(hdev, err);
2177 		hci_dev_unlock(hdev);
2178 		return;
2179 	}
2180 
2181 	/* During the HCI setup phase, a few error conditions are
2182 	 * ignored and they need to be checked now. If they are still
2183 	 * valid, it is important to turn the device back off.
2184 	 */
2185 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2186 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2187 	    (hdev->dev_type == HCI_PRIMARY &&
2188 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2189 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2190 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2191 		hci_dev_do_close(hdev);
2192 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2193 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2194 				   HCI_AUTO_OFF_TIMEOUT);
2195 	}
2196 
2197 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2198 		/* For unconfigured devices, set the HCI_RAW flag
2199 		 * so that userspace can easily identify them.
2200 		 */
2201 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2202 			set_bit(HCI_RAW, &hdev->flags);
2203 
2204 		/* For fully configured devices, this will send
2205 		 * the Index Added event. For unconfigured devices,
2206 		 * it will send Unconfigued Index Added event.
2207 		 *
2208 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2209 		 * and no event will be send.
2210 		 */
2211 		mgmt_index_added(hdev);
2212 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2213 		/* When the controller is now configured, then it
2214 		 * is important to clear the HCI_RAW flag.
2215 		 */
2216 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2217 			clear_bit(HCI_RAW, &hdev->flags);
2218 
2219 		/* Powering on the controller with HCI_CONFIG set only
2220 		 * happens with the transition from unconfigured to
2221 		 * configured. This will send the Index Added event.
2222 		 */
2223 		mgmt_index_added(hdev);
2224 	}
2225 }
2226 
2227 static void hci_power_off(struct work_struct *work)
2228 {
2229 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2230 					    power_off.work);
2231 
2232 	BT_DBG("%s", hdev->name);
2233 
2234 	hci_dev_do_close(hdev);
2235 }
2236 
2237 static void hci_error_reset(struct work_struct *work)
2238 {
2239 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2240 
2241 	BT_DBG("%s", hdev->name);
2242 
2243 	if (hdev->hw_error)
2244 		hdev->hw_error(hdev, hdev->hw_error_code);
2245 	else
2246 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2247 
2248 	if (hci_dev_do_close(hdev))
2249 		return;
2250 
2251 	hci_dev_do_open(hdev);
2252 }
2253 
2254 void hci_uuids_clear(struct hci_dev *hdev)
2255 {
2256 	struct bt_uuid *uuid, *tmp;
2257 
2258 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2259 		list_del(&uuid->list);
2260 		kfree(uuid);
2261 	}
2262 }
2263 
2264 void hci_link_keys_clear(struct hci_dev *hdev)
2265 {
2266 	struct link_key *key;
2267 
2268 	list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2269 		list_del_rcu(&key->list);
2270 		kfree_rcu(key, rcu);
2271 	}
2272 }
2273 
2274 void hci_smp_ltks_clear(struct hci_dev *hdev)
2275 {
2276 	struct smp_ltk *k;
2277 
2278 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2279 		list_del_rcu(&k->list);
2280 		kfree_rcu(k, rcu);
2281 	}
2282 }
2283 
2284 void hci_smp_irks_clear(struct hci_dev *hdev)
2285 {
2286 	struct smp_irk *k;
2287 
2288 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2289 		list_del_rcu(&k->list);
2290 		kfree_rcu(k, rcu);
2291 	}
2292 }
2293 
2294 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2295 {
2296 	struct link_key *k;
2297 
2298 	rcu_read_lock();
2299 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2300 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
2301 			rcu_read_unlock();
2302 			return k;
2303 		}
2304 	}
2305 	rcu_read_unlock();
2306 
2307 	return NULL;
2308 }
2309 
2310 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2311 			       u8 key_type, u8 old_key_type)
2312 {
2313 	/* Legacy key */
2314 	if (key_type < 0x03)
2315 		return true;
2316 
2317 	/* Debug keys are insecure so don't store them persistently */
2318 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2319 		return false;
2320 
2321 	/* Changed combination key and there's no previous one */
2322 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2323 		return false;
2324 
2325 	/* Security mode 3 case */
2326 	if (!conn)
2327 		return true;
2328 
2329 	/* BR/EDR key derived using SC from an LE link */
2330 	if (conn->type == LE_LINK)
2331 		return true;
2332 
2333 	/* Neither local nor remote side had no-bonding as requirement */
2334 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2335 		return true;
2336 
2337 	/* Local side had dedicated bonding as requirement */
2338 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2339 		return true;
2340 
2341 	/* Remote side had dedicated bonding as requirement */
2342 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2343 		return true;
2344 
2345 	/* If none of the above criteria match, then don't store the key
2346 	 * persistently */
2347 	return false;
2348 }
2349 
2350 static u8 ltk_role(u8 type)
2351 {
2352 	if (type == SMP_LTK)
2353 		return HCI_ROLE_MASTER;
2354 
2355 	return HCI_ROLE_SLAVE;
2356 }
2357 
2358 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2359 			     u8 addr_type, u8 role)
2360 {
2361 	struct smp_ltk *k;
2362 
2363 	rcu_read_lock();
2364 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2365 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2366 			continue;
2367 
2368 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2369 			rcu_read_unlock();
2370 			return k;
2371 		}
2372 	}
2373 	rcu_read_unlock();
2374 
2375 	return NULL;
2376 }
2377 
2378 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2379 {
2380 	struct smp_irk *irk;
2381 
2382 	rcu_read_lock();
2383 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2384 		if (!bacmp(&irk->rpa, rpa)) {
2385 			rcu_read_unlock();
2386 			return irk;
2387 		}
2388 	}
2389 
2390 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2391 		if (smp_irk_matches(hdev, irk->val, rpa)) {
2392 			bacpy(&irk->rpa, rpa);
2393 			rcu_read_unlock();
2394 			return irk;
2395 		}
2396 	}
2397 	rcu_read_unlock();
2398 
2399 	return NULL;
2400 }
2401 
2402 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2403 				     u8 addr_type)
2404 {
2405 	struct smp_irk *irk;
2406 
2407 	/* Identity Address must be public or static random */
2408 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2409 		return NULL;
2410 
2411 	rcu_read_lock();
2412 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2413 		if (addr_type == irk->addr_type &&
2414 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
2415 			rcu_read_unlock();
2416 			return irk;
2417 		}
2418 	}
2419 	rcu_read_unlock();
2420 
2421 	return NULL;
2422 }
2423 
2424 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2425 				  bdaddr_t *bdaddr, u8 *val, u8 type,
2426 				  u8 pin_len, bool *persistent)
2427 {
2428 	struct link_key *key, *old_key;
2429 	u8 old_key_type;
2430 
2431 	old_key = hci_find_link_key(hdev, bdaddr);
2432 	if (old_key) {
2433 		old_key_type = old_key->type;
2434 		key = old_key;
2435 	} else {
2436 		old_key_type = conn ? conn->key_type : 0xff;
2437 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2438 		if (!key)
2439 			return NULL;
2440 		list_add_rcu(&key->list, &hdev->link_keys);
2441 	}
2442 
2443 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2444 
2445 	/* Some buggy controller combinations generate a changed
2446 	 * combination key for legacy pairing even when there's no
2447 	 * previous key */
2448 	if (type == HCI_LK_CHANGED_COMBINATION &&
2449 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2450 		type = HCI_LK_COMBINATION;
2451 		if (conn)
2452 			conn->key_type = type;
2453 	}
2454 
2455 	bacpy(&key->bdaddr, bdaddr);
2456 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2457 	key->pin_len = pin_len;
2458 
2459 	if (type == HCI_LK_CHANGED_COMBINATION)
2460 		key->type = old_key_type;
2461 	else
2462 		key->type = type;
2463 
2464 	if (persistent)
2465 		*persistent = hci_persistent_key(hdev, conn, type,
2466 						 old_key_type);
2467 
2468 	return key;
2469 }
2470 
2471 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2472 			    u8 addr_type, u8 type, u8 authenticated,
2473 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2474 {
2475 	struct smp_ltk *key, *old_key;
2476 	u8 role = ltk_role(type);
2477 
2478 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2479 	if (old_key)
2480 		key = old_key;
2481 	else {
2482 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2483 		if (!key)
2484 			return NULL;
2485 		list_add_rcu(&key->list, &hdev->long_term_keys);
2486 	}
2487 
2488 	bacpy(&key->bdaddr, bdaddr);
2489 	key->bdaddr_type = addr_type;
2490 	memcpy(key->val, tk, sizeof(key->val));
2491 	key->authenticated = authenticated;
2492 	key->ediv = ediv;
2493 	key->rand = rand;
2494 	key->enc_size = enc_size;
2495 	key->type = type;
2496 
2497 	return key;
2498 }
2499 
2500 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2501 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2502 {
2503 	struct smp_irk *irk;
2504 
2505 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2506 	if (!irk) {
2507 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2508 		if (!irk)
2509 			return NULL;
2510 
2511 		bacpy(&irk->bdaddr, bdaddr);
2512 		irk->addr_type = addr_type;
2513 
2514 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2515 	}
2516 
2517 	memcpy(irk->val, val, 16);
2518 	bacpy(&irk->rpa, rpa);
2519 
2520 	return irk;
2521 }
2522 
2523 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2524 {
2525 	struct link_key *key;
2526 
2527 	key = hci_find_link_key(hdev, bdaddr);
2528 	if (!key)
2529 		return -ENOENT;
2530 
2531 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2532 
2533 	list_del_rcu(&key->list);
2534 	kfree_rcu(key, rcu);
2535 
2536 	return 0;
2537 }
2538 
2539 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2540 {
2541 	struct smp_ltk *k;
2542 	int removed = 0;
2543 
2544 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2545 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2546 			continue;
2547 
2548 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2549 
2550 		list_del_rcu(&k->list);
2551 		kfree_rcu(k, rcu);
2552 		removed++;
2553 	}
2554 
2555 	return removed ? 0 : -ENOENT;
2556 }
2557 
2558 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2559 {
2560 	struct smp_irk *k;
2561 
2562 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2563 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2564 			continue;
2565 
2566 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2567 
2568 		list_del_rcu(&k->list);
2569 		kfree_rcu(k, rcu);
2570 	}
2571 }
2572 
2573 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2574 {
2575 	struct smp_ltk *k;
2576 	struct smp_irk *irk;
2577 	u8 addr_type;
2578 
2579 	if (type == BDADDR_BREDR) {
2580 		if (hci_find_link_key(hdev, bdaddr))
2581 			return true;
2582 		return false;
2583 	}
2584 
2585 	/* Convert to HCI addr type which struct smp_ltk uses */
2586 	if (type == BDADDR_LE_PUBLIC)
2587 		addr_type = ADDR_LE_DEV_PUBLIC;
2588 	else
2589 		addr_type = ADDR_LE_DEV_RANDOM;
2590 
2591 	irk = hci_get_irk(hdev, bdaddr, addr_type);
2592 	if (irk) {
2593 		bdaddr = &irk->bdaddr;
2594 		addr_type = irk->addr_type;
2595 	}
2596 
2597 	rcu_read_lock();
2598 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2599 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2600 			rcu_read_unlock();
2601 			return true;
2602 		}
2603 	}
2604 	rcu_read_unlock();
2605 
2606 	return false;
2607 }
2608 
2609 /* HCI command timer function */
2610 static void hci_cmd_timeout(struct work_struct *work)
2611 {
2612 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2613 					    cmd_timer.work);
2614 
2615 	if (hdev->sent_cmd) {
2616 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2617 		u16 opcode = __le16_to_cpu(sent->opcode);
2618 
2619 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2620 	} else {
2621 		bt_dev_err(hdev, "command tx timeout");
2622 	}
2623 
2624 	if (hdev->cmd_timeout)
2625 		hdev->cmd_timeout(hdev);
2626 
2627 	atomic_set(&hdev->cmd_cnt, 1);
2628 	queue_work(hdev->workqueue, &hdev->cmd_work);
2629 }
2630 
2631 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2632 					  bdaddr_t *bdaddr, u8 bdaddr_type)
2633 {
2634 	struct oob_data *data;
2635 
2636 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
2637 		if (bacmp(bdaddr, &data->bdaddr) != 0)
2638 			continue;
2639 		if (data->bdaddr_type != bdaddr_type)
2640 			continue;
2641 		return data;
2642 	}
2643 
2644 	return NULL;
2645 }
2646 
2647 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2648 			       u8 bdaddr_type)
2649 {
2650 	struct oob_data *data;
2651 
2652 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2653 	if (!data)
2654 		return -ENOENT;
2655 
2656 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2657 
2658 	list_del(&data->list);
2659 	kfree(data);
2660 
2661 	return 0;
2662 }
2663 
2664 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2665 {
2666 	struct oob_data *data, *n;
2667 
2668 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2669 		list_del(&data->list);
2670 		kfree(data);
2671 	}
2672 }
2673 
2674 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2675 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2676 			    u8 *hash256, u8 *rand256)
2677 {
2678 	struct oob_data *data;
2679 
2680 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2681 	if (!data) {
2682 		data = kmalloc(sizeof(*data), GFP_KERNEL);
2683 		if (!data)
2684 			return -ENOMEM;
2685 
2686 		bacpy(&data->bdaddr, bdaddr);
2687 		data->bdaddr_type = bdaddr_type;
2688 		list_add(&data->list, &hdev->remote_oob_data);
2689 	}
2690 
2691 	if (hash192 && rand192) {
2692 		memcpy(data->hash192, hash192, sizeof(data->hash192));
2693 		memcpy(data->rand192, rand192, sizeof(data->rand192));
2694 		if (hash256 && rand256)
2695 			data->present = 0x03;
2696 	} else {
2697 		memset(data->hash192, 0, sizeof(data->hash192));
2698 		memset(data->rand192, 0, sizeof(data->rand192));
2699 		if (hash256 && rand256)
2700 			data->present = 0x02;
2701 		else
2702 			data->present = 0x00;
2703 	}
2704 
2705 	if (hash256 && rand256) {
2706 		memcpy(data->hash256, hash256, sizeof(data->hash256));
2707 		memcpy(data->rand256, rand256, sizeof(data->rand256));
2708 	} else {
2709 		memset(data->hash256, 0, sizeof(data->hash256));
2710 		memset(data->rand256, 0, sizeof(data->rand256));
2711 		if (hash192 && rand192)
2712 			data->present = 0x01;
2713 	}
2714 
2715 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2716 
2717 	return 0;
2718 }
2719 
2720 /* This function requires the caller holds hdev->lock */
2721 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2722 {
2723 	struct adv_info *adv_instance;
2724 
2725 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2726 		if (adv_instance->instance == instance)
2727 			return adv_instance;
2728 	}
2729 
2730 	return NULL;
2731 }
2732 
2733 /* This function requires the caller holds hdev->lock */
2734 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2735 {
2736 	struct adv_info *cur_instance;
2737 
2738 	cur_instance = hci_find_adv_instance(hdev, instance);
2739 	if (!cur_instance)
2740 		return NULL;
2741 
2742 	if (cur_instance == list_last_entry(&hdev->adv_instances,
2743 					    struct adv_info, list))
2744 		return list_first_entry(&hdev->adv_instances,
2745 						 struct adv_info, list);
2746 	else
2747 		return list_next_entry(cur_instance, list);
2748 }
2749 
2750 /* This function requires the caller holds hdev->lock */
2751 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2752 {
2753 	struct adv_info *adv_instance;
2754 
2755 	adv_instance = hci_find_adv_instance(hdev, instance);
2756 	if (!adv_instance)
2757 		return -ENOENT;
2758 
2759 	BT_DBG("%s removing %dMR", hdev->name, instance);
2760 
2761 	if (hdev->cur_adv_instance == instance) {
2762 		if (hdev->adv_instance_timeout) {
2763 			cancel_delayed_work(&hdev->adv_instance_expire);
2764 			hdev->adv_instance_timeout = 0;
2765 		}
2766 		hdev->cur_adv_instance = 0x00;
2767 	}
2768 
2769 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2770 
2771 	list_del(&adv_instance->list);
2772 	kfree(adv_instance);
2773 
2774 	hdev->adv_instance_cnt--;
2775 
2776 	return 0;
2777 }
2778 
2779 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2780 {
2781 	struct adv_info *adv_instance, *n;
2782 
2783 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2784 		adv_instance->rpa_expired = rpa_expired;
2785 }
2786 
2787 /* This function requires the caller holds hdev->lock */
2788 void hci_adv_instances_clear(struct hci_dev *hdev)
2789 {
2790 	struct adv_info *adv_instance, *n;
2791 
2792 	if (hdev->adv_instance_timeout) {
2793 		cancel_delayed_work(&hdev->adv_instance_expire);
2794 		hdev->adv_instance_timeout = 0;
2795 	}
2796 
2797 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2798 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2799 		list_del(&adv_instance->list);
2800 		kfree(adv_instance);
2801 	}
2802 
2803 	hdev->adv_instance_cnt = 0;
2804 	hdev->cur_adv_instance = 0x00;
2805 }
2806 
2807 static void adv_instance_rpa_expired(struct work_struct *work)
2808 {
2809 	struct adv_info *adv_instance = container_of(work, struct adv_info,
2810 						     rpa_expired_cb.work);
2811 
2812 	BT_DBG("");
2813 
2814 	adv_instance->rpa_expired = true;
2815 }
2816 
2817 /* This function requires the caller holds hdev->lock */
2818 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2819 			 u16 adv_data_len, u8 *adv_data,
2820 			 u16 scan_rsp_len, u8 *scan_rsp_data,
2821 			 u16 timeout, u16 duration)
2822 {
2823 	struct adv_info *adv_instance;
2824 
2825 	adv_instance = hci_find_adv_instance(hdev, instance);
2826 	if (adv_instance) {
2827 		memset(adv_instance->adv_data, 0,
2828 		       sizeof(adv_instance->adv_data));
2829 		memset(adv_instance->scan_rsp_data, 0,
2830 		       sizeof(adv_instance->scan_rsp_data));
2831 	} else {
2832 		if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2833 		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2834 			return -EOVERFLOW;
2835 
2836 		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2837 		if (!adv_instance)
2838 			return -ENOMEM;
2839 
2840 		adv_instance->pending = true;
2841 		adv_instance->instance = instance;
2842 		list_add(&adv_instance->list, &hdev->adv_instances);
2843 		hdev->adv_instance_cnt++;
2844 	}
2845 
2846 	adv_instance->flags = flags;
2847 	adv_instance->adv_data_len = adv_data_len;
2848 	adv_instance->scan_rsp_len = scan_rsp_len;
2849 
2850 	if (adv_data_len)
2851 		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2852 
2853 	if (scan_rsp_len)
2854 		memcpy(adv_instance->scan_rsp_data,
2855 		       scan_rsp_data, scan_rsp_len);
2856 
2857 	adv_instance->timeout = timeout;
2858 	adv_instance->remaining_time = timeout;
2859 
2860 	if (duration == 0)
2861 		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2862 	else
2863 		adv_instance->duration = duration;
2864 
2865 	adv_instance->tx_power = HCI_TX_POWER_INVALID;
2866 
2867 	INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2868 			  adv_instance_rpa_expired);
2869 
2870 	BT_DBG("%s for %dMR", hdev->name, instance);
2871 
2872 	return 0;
2873 }
2874 
2875 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2876 					 bdaddr_t *bdaddr, u8 type)
2877 {
2878 	struct bdaddr_list *b;
2879 
2880 	list_for_each_entry(b, bdaddr_list, list) {
2881 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2882 			return b;
2883 	}
2884 
2885 	return NULL;
2886 }
2887 
2888 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2889 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2890 				u8 type)
2891 {
2892 	struct bdaddr_list_with_irk *b;
2893 
2894 	list_for_each_entry(b, bdaddr_list, list) {
2895 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2896 			return b;
2897 	}
2898 
2899 	return NULL;
2900 }
2901 
2902 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2903 {
2904 	struct bdaddr_list *b, *n;
2905 
2906 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2907 		list_del(&b->list);
2908 		kfree(b);
2909 	}
2910 }
2911 
2912 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2913 {
2914 	struct bdaddr_list *entry;
2915 
2916 	if (!bacmp(bdaddr, BDADDR_ANY))
2917 		return -EBADF;
2918 
2919 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2920 		return -EEXIST;
2921 
2922 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2923 	if (!entry)
2924 		return -ENOMEM;
2925 
2926 	bacpy(&entry->bdaddr, bdaddr);
2927 	entry->bdaddr_type = type;
2928 
2929 	list_add(&entry->list, list);
2930 
2931 	return 0;
2932 }
2933 
2934 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2935 					u8 type, u8 *peer_irk, u8 *local_irk)
2936 {
2937 	struct bdaddr_list_with_irk *entry;
2938 
2939 	if (!bacmp(bdaddr, BDADDR_ANY))
2940 		return -EBADF;
2941 
2942 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2943 		return -EEXIST;
2944 
2945 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2946 	if (!entry)
2947 		return -ENOMEM;
2948 
2949 	bacpy(&entry->bdaddr, bdaddr);
2950 	entry->bdaddr_type = type;
2951 
2952 	if (peer_irk)
2953 		memcpy(entry->peer_irk, peer_irk, 16);
2954 
2955 	if (local_irk)
2956 		memcpy(entry->local_irk, local_irk, 16);
2957 
2958 	list_add(&entry->list, list);
2959 
2960 	return 0;
2961 }
2962 
2963 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2964 {
2965 	struct bdaddr_list *entry;
2966 
2967 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2968 		hci_bdaddr_list_clear(list);
2969 		return 0;
2970 	}
2971 
2972 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2973 	if (!entry)
2974 		return -ENOENT;
2975 
2976 	list_del(&entry->list);
2977 	kfree(entry);
2978 
2979 	return 0;
2980 }
2981 
2982 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2983 							u8 type)
2984 {
2985 	struct bdaddr_list_with_irk *entry;
2986 
2987 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2988 		hci_bdaddr_list_clear(list);
2989 		return 0;
2990 	}
2991 
2992 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2993 	if (!entry)
2994 		return -ENOENT;
2995 
2996 	list_del(&entry->list);
2997 	kfree(entry);
2998 
2999 	return 0;
3000 }
3001 
3002 /* This function requires the caller holds hdev->lock */
3003 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3004 					       bdaddr_t *addr, u8 addr_type)
3005 {
3006 	struct hci_conn_params *params;
3007 
3008 	list_for_each_entry(params, &hdev->le_conn_params, list) {
3009 		if (bacmp(&params->addr, addr) == 0 &&
3010 		    params->addr_type == addr_type) {
3011 			return params;
3012 		}
3013 	}
3014 
3015 	return NULL;
3016 }
3017 
3018 /* This function requires the caller holds hdev->lock */
3019 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3020 						  bdaddr_t *addr, u8 addr_type)
3021 {
3022 	struct hci_conn_params *param;
3023 
3024 	list_for_each_entry(param, list, action) {
3025 		if (bacmp(&param->addr, addr) == 0 &&
3026 		    param->addr_type == addr_type)
3027 			return param;
3028 	}
3029 
3030 	return NULL;
3031 }
3032 
3033 /* This function requires the caller holds hdev->lock */
3034 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3035 					    bdaddr_t *addr, u8 addr_type)
3036 {
3037 	struct hci_conn_params *params;
3038 
3039 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3040 	if (params)
3041 		return params;
3042 
3043 	params = kzalloc(sizeof(*params), GFP_KERNEL);
3044 	if (!params) {
3045 		bt_dev_err(hdev, "out of memory");
3046 		return NULL;
3047 	}
3048 
3049 	bacpy(&params->addr, addr);
3050 	params->addr_type = addr_type;
3051 
3052 	list_add(&params->list, &hdev->le_conn_params);
3053 	INIT_LIST_HEAD(&params->action);
3054 
3055 	params->conn_min_interval = hdev->le_conn_min_interval;
3056 	params->conn_max_interval = hdev->le_conn_max_interval;
3057 	params->conn_latency = hdev->le_conn_latency;
3058 	params->supervision_timeout = hdev->le_supv_timeout;
3059 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
3060 
3061 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3062 
3063 	return params;
3064 }
3065 
3066 static void hci_conn_params_free(struct hci_conn_params *params)
3067 {
3068 	if (params->conn) {
3069 		hci_conn_drop(params->conn);
3070 		hci_conn_put(params->conn);
3071 	}
3072 
3073 	list_del(&params->action);
3074 	list_del(&params->list);
3075 	kfree(params);
3076 }
3077 
3078 /* This function requires the caller holds hdev->lock */
3079 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3080 {
3081 	struct hci_conn_params *params;
3082 
3083 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3084 	if (!params)
3085 		return;
3086 
3087 	hci_conn_params_free(params);
3088 
3089 	hci_update_background_scan(hdev);
3090 
3091 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3092 }
3093 
3094 /* This function requires the caller holds hdev->lock */
3095 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3096 {
3097 	struct hci_conn_params *params, *tmp;
3098 
3099 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3100 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3101 			continue;
3102 
3103 		/* If trying to estabilish one time connection to disabled
3104 		 * device, leave the params, but mark them as just once.
3105 		 */
3106 		if (params->explicit_connect) {
3107 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3108 			continue;
3109 		}
3110 
3111 		list_del(&params->list);
3112 		kfree(params);
3113 	}
3114 
3115 	BT_DBG("All LE disabled connection parameters were removed");
3116 }
3117 
3118 /* This function requires the caller holds hdev->lock */
3119 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3120 {
3121 	struct hci_conn_params *params, *tmp;
3122 
3123 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3124 		hci_conn_params_free(params);
3125 
3126 	BT_DBG("All LE connection parameters were removed");
3127 }
3128 
3129 /* Copy the Identity Address of the controller.
3130  *
3131  * If the controller has a public BD_ADDR, then by default use that one.
3132  * If this is a LE only controller without a public address, default to
3133  * the static random address.
3134  *
3135  * For debugging purposes it is possible to force controllers with a
3136  * public address to use the static random address instead.
3137  *
3138  * In case BR/EDR has been disabled on a dual-mode controller and
3139  * userspace has configured a static address, then that address
3140  * becomes the identity address instead of the public BR/EDR address.
3141  */
3142 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3143 			       u8 *bdaddr_type)
3144 {
3145 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3146 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3147 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3148 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
3149 		bacpy(bdaddr, &hdev->static_addr);
3150 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
3151 	} else {
3152 		bacpy(bdaddr, &hdev->bdaddr);
3153 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
3154 	}
3155 }
3156 
3157 /* Alloc HCI device */
3158 struct hci_dev *hci_alloc_dev(void)
3159 {
3160 	struct hci_dev *hdev;
3161 
3162 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3163 	if (!hdev)
3164 		return NULL;
3165 
3166 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3167 	hdev->esco_type = (ESCO_HV1);
3168 	hdev->link_mode = (HCI_LM_ACCEPT);
3169 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
3170 	hdev->io_capability = 0x03;	/* No Input No Output */
3171 	hdev->manufacturer = 0xffff;	/* Default to internal use */
3172 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3173 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3174 	hdev->adv_instance_cnt = 0;
3175 	hdev->cur_adv_instance = 0x00;
3176 	hdev->adv_instance_timeout = 0;
3177 
3178 	hdev->sniff_max_interval = 800;
3179 	hdev->sniff_min_interval = 80;
3180 
3181 	hdev->le_adv_channel_map = 0x07;
3182 	hdev->le_adv_min_interval = 0x0800;
3183 	hdev->le_adv_max_interval = 0x0800;
3184 	hdev->le_scan_interval = 0x0060;
3185 	hdev->le_scan_window = 0x0030;
3186 	hdev->le_conn_min_interval = 0x0018;
3187 	hdev->le_conn_max_interval = 0x0028;
3188 	hdev->le_conn_latency = 0x0000;
3189 	hdev->le_supv_timeout = 0x002a;
3190 	hdev->le_def_tx_len = 0x001b;
3191 	hdev->le_def_tx_time = 0x0148;
3192 	hdev->le_max_tx_len = 0x001b;
3193 	hdev->le_max_tx_time = 0x0148;
3194 	hdev->le_max_rx_len = 0x001b;
3195 	hdev->le_max_rx_time = 0x0148;
3196 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3197 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3198 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3199 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3200 
3201 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3202 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3203 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3204 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3205 
3206 	mutex_init(&hdev->lock);
3207 	mutex_init(&hdev->req_lock);
3208 
3209 	INIT_LIST_HEAD(&hdev->mgmt_pending);
3210 	INIT_LIST_HEAD(&hdev->blacklist);
3211 	INIT_LIST_HEAD(&hdev->whitelist);
3212 	INIT_LIST_HEAD(&hdev->uuids);
3213 	INIT_LIST_HEAD(&hdev->link_keys);
3214 	INIT_LIST_HEAD(&hdev->long_term_keys);
3215 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3216 	INIT_LIST_HEAD(&hdev->remote_oob_data);
3217 	INIT_LIST_HEAD(&hdev->le_white_list);
3218 	INIT_LIST_HEAD(&hdev->le_resolv_list);
3219 	INIT_LIST_HEAD(&hdev->le_conn_params);
3220 	INIT_LIST_HEAD(&hdev->pend_le_conns);
3221 	INIT_LIST_HEAD(&hdev->pend_le_reports);
3222 	INIT_LIST_HEAD(&hdev->conn_hash.list);
3223 	INIT_LIST_HEAD(&hdev->adv_instances);
3224 
3225 	INIT_WORK(&hdev->rx_work, hci_rx_work);
3226 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3227 	INIT_WORK(&hdev->tx_work, hci_tx_work);
3228 	INIT_WORK(&hdev->power_on, hci_power_on);
3229 	INIT_WORK(&hdev->error_reset, hci_error_reset);
3230 
3231 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3232 
3233 	skb_queue_head_init(&hdev->rx_q);
3234 	skb_queue_head_init(&hdev->cmd_q);
3235 	skb_queue_head_init(&hdev->raw_q);
3236 
3237 	init_waitqueue_head(&hdev->req_wait_q);
3238 
3239 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3240 
3241 	hci_request_setup(hdev);
3242 
3243 	hci_init_sysfs(hdev);
3244 	discovery_init(hdev);
3245 
3246 	return hdev;
3247 }
3248 EXPORT_SYMBOL(hci_alloc_dev);
3249 
3250 /* Free HCI device */
3251 void hci_free_dev(struct hci_dev *hdev)
3252 {
3253 	/* will free via device release */
3254 	put_device(&hdev->dev);
3255 }
3256 EXPORT_SYMBOL(hci_free_dev);
3257 
3258 /* Register HCI device */
3259 int hci_register_dev(struct hci_dev *hdev)
3260 {
3261 	int id, error;
3262 
3263 	if (!hdev->open || !hdev->close || !hdev->send)
3264 		return -EINVAL;
3265 
3266 	/* Do not allow HCI_AMP devices to register at index 0,
3267 	 * so the index can be used as the AMP controller ID.
3268 	 */
3269 	switch (hdev->dev_type) {
3270 	case HCI_PRIMARY:
3271 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3272 		break;
3273 	case HCI_AMP:
3274 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3275 		break;
3276 	default:
3277 		return -EINVAL;
3278 	}
3279 
3280 	if (id < 0)
3281 		return id;
3282 
3283 	sprintf(hdev->name, "hci%d", id);
3284 	hdev->id = id;
3285 
3286 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3287 
3288 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3289 	if (!hdev->workqueue) {
3290 		error = -ENOMEM;
3291 		goto err;
3292 	}
3293 
3294 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3295 						      hdev->name);
3296 	if (!hdev->req_workqueue) {
3297 		destroy_workqueue(hdev->workqueue);
3298 		error = -ENOMEM;
3299 		goto err;
3300 	}
3301 
3302 	if (!IS_ERR_OR_NULL(bt_debugfs))
3303 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3304 
3305 	dev_set_name(&hdev->dev, "%s", hdev->name);
3306 
3307 	error = device_add(&hdev->dev);
3308 	if (error < 0)
3309 		goto err_wqueue;
3310 
3311 	hci_leds_init(hdev);
3312 
3313 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3314 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3315 				    hdev);
3316 	if (hdev->rfkill) {
3317 		if (rfkill_register(hdev->rfkill) < 0) {
3318 			rfkill_destroy(hdev->rfkill);
3319 			hdev->rfkill = NULL;
3320 		}
3321 	}
3322 
3323 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3324 		hci_dev_set_flag(hdev, HCI_RFKILLED);
3325 
3326 	hci_dev_set_flag(hdev, HCI_SETUP);
3327 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3328 
3329 	if (hdev->dev_type == HCI_PRIMARY) {
3330 		/* Assume BR/EDR support until proven otherwise (such as
3331 		 * through reading supported features during init.
3332 		 */
3333 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3334 	}
3335 
3336 	write_lock(&hci_dev_list_lock);
3337 	list_add(&hdev->list, &hci_dev_list);
3338 	write_unlock(&hci_dev_list_lock);
3339 
3340 	/* Devices that are marked for raw-only usage are unconfigured
3341 	 * and should not be included in normal operation.
3342 	 */
3343 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3344 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3345 
3346 	hci_sock_dev_event(hdev, HCI_DEV_REG);
3347 	hci_dev_hold(hdev);
3348 
3349 	queue_work(hdev->req_workqueue, &hdev->power_on);
3350 
3351 	return id;
3352 
3353 err_wqueue:
3354 	destroy_workqueue(hdev->workqueue);
3355 	destroy_workqueue(hdev->req_workqueue);
3356 err:
3357 	ida_simple_remove(&hci_index_ida, hdev->id);
3358 
3359 	return error;
3360 }
3361 EXPORT_SYMBOL(hci_register_dev);
3362 
3363 /* Unregister HCI device */
3364 void hci_unregister_dev(struct hci_dev *hdev)
3365 {
3366 	int id;
3367 
3368 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3369 
3370 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3371 
3372 	id = hdev->id;
3373 
3374 	write_lock(&hci_dev_list_lock);
3375 	list_del(&hdev->list);
3376 	write_unlock(&hci_dev_list_lock);
3377 
3378 	cancel_work_sync(&hdev->power_on);
3379 
3380 	hci_dev_do_close(hdev);
3381 
3382 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3383 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
3384 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3385 		hci_dev_lock(hdev);
3386 		mgmt_index_removed(hdev);
3387 		hci_dev_unlock(hdev);
3388 	}
3389 
3390 	/* mgmt_index_removed should take care of emptying the
3391 	 * pending list */
3392 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3393 
3394 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3395 
3396 	if (hdev->rfkill) {
3397 		rfkill_unregister(hdev->rfkill);
3398 		rfkill_destroy(hdev->rfkill);
3399 	}
3400 
3401 	device_del(&hdev->dev);
3402 
3403 	debugfs_remove_recursive(hdev->debugfs);
3404 	kfree_const(hdev->hw_info);
3405 	kfree_const(hdev->fw_info);
3406 
3407 	destroy_workqueue(hdev->workqueue);
3408 	destroy_workqueue(hdev->req_workqueue);
3409 
3410 	hci_dev_lock(hdev);
3411 	hci_bdaddr_list_clear(&hdev->blacklist);
3412 	hci_bdaddr_list_clear(&hdev->whitelist);
3413 	hci_uuids_clear(hdev);
3414 	hci_link_keys_clear(hdev);
3415 	hci_smp_ltks_clear(hdev);
3416 	hci_smp_irks_clear(hdev);
3417 	hci_remote_oob_data_clear(hdev);
3418 	hci_adv_instances_clear(hdev);
3419 	hci_bdaddr_list_clear(&hdev->le_white_list);
3420 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
3421 	hci_conn_params_clear_all(hdev);
3422 	hci_discovery_filter_clear(hdev);
3423 	hci_dev_unlock(hdev);
3424 
3425 	hci_dev_put(hdev);
3426 
3427 	ida_simple_remove(&hci_index_ida, id);
3428 }
3429 EXPORT_SYMBOL(hci_unregister_dev);
3430 
3431 /* Suspend HCI device */
3432 int hci_suspend_dev(struct hci_dev *hdev)
3433 {
3434 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3435 	return 0;
3436 }
3437 EXPORT_SYMBOL(hci_suspend_dev);
3438 
3439 /* Resume HCI device */
3440 int hci_resume_dev(struct hci_dev *hdev)
3441 {
3442 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3443 	return 0;
3444 }
3445 EXPORT_SYMBOL(hci_resume_dev);
3446 
3447 /* Reset HCI device */
3448 int hci_reset_dev(struct hci_dev *hdev)
3449 {
3450 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3451 	struct sk_buff *skb;
3452 
3453 	skb = bt_skb_alloc(3, GFP_ATOMIC);
3454 	if (!skb)
3455 		return -ENOMEM;
3456 
3457 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3458 	skb_put_data(skb, hw_err, 3);
3459 
3460 	/* Send Hardware Error to upper stack */
3461 	return hci_recv_frame(hdev, skb);
3462 }
3463 EXPORT_SYMBOL(hci_reset_dev);
3464 
3465 /* Receive frame from HCI drivers */
3466 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3467 {
3468 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3469 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3470 		kfree_skb(skb);
3471 		return -ENXIO;
3472 	}
3473 
3474 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3475 	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3476 	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3477 		kfree_skb(skb);
3478 		return -EINVAL;
3479 	}
3480 
3481 	/* Incoming skb */
3482 	bt_cb(skb)->incoming = 1;
3483 
3484 	/* Time stamp */
3485 	__net_timestamp(skb);
3486 
3487 	skb_queue_tail(&hdev->rx_q, skb);
3488 	queue_work(hdev->workqueue, &hdev->rx_work);
3489 
3490 	return 0;
3491 }
3492 EXPORT_SYMBOL(hci_recv_frame);
3493 
3494 /* Receive diagnostic message from HCI drivers */
3495 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3496 {
3497 	/* Mark as diagnostic packet */
3498 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3499 
3500 	/* Time stamp */
3501 	__net_timestamp(skb);
3502 
3503 	skb_queue_tail(&hdev->rx_q, skb);
3504 	queue_work(hdev->workqueue, &hdev->rx_work);
3505 
3506 	return 0;
3507 }
3508 EXPORT_SYMBOL(hci_recv_diag);
3509 
3510 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3511 {
3512 	va_list vargs;
3513 
3514 	va_start(vargs, fmt);
3515 	kfree_const(hdev->hw_info);
3516 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3517 	va_end(vargs);
3518 }
3519 EXPORT_SYMBOL(hci_set_hw_info);
3520 
3521 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3522 {
3523 	va_list vargs;
3524 
3525 	va_start(vargs, fmt);
3526 	kfree_const(hdev->fw_info);
3527 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3528 	va_end(vargs);
3529 }
3530 EXPORT_SYMBOL(hci_set_fw_info);
3531 
3532 /* ---- Interface to upper protocols ---- */
3533 
3534 int hci_register_cb(struct hci_cb *cb)
3535 {
3536 	BT_DBG("%p name %s", cb, cb->name);
3537 
3538 	mutex_lock(&hci_cb_list_lock);
3539 	list_add_tail(&cb->list, &hci_cb_list);
3540 	mutex_unlock(&hci_cb_list_lock);
3541 
3542 	return 0;
3543 }
3544 EXPORT_SYMBOL(hci_register_cb);
3545 
3546 int hci_unregister_cb(struct hci_cb *cb)
3547 {
3548 	BT_DBG("%p name %s", cb, cb->name);
3549 
3550 	mutex_lock(&hci_cb_list_lock);
3551 	list_del(&cb->list);
3552 	mutex_unlock(&hci_cb_list_lock);
3553 
3554 	return 0;
3555 }
3556 EXPORT_SYMBOL(hci_unregister_cb);
3557 
3558 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3559 {
3560 	int err;
3561 
3562 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3563 	       skb->len);
3564 
3565 	/* Time stamp */
3566 	__net_timestamp(skb);
3567 
3568 	/* Send copy to monitor */
3569 	hci_send_to_monitor(hdev, skb);
3570 
3571 	if (atomic_read(&hdev->promisc)) {
3572 		/* Send copy to the sockets */
3573 		hci_send_to_sock(hdev, skb);
3574 	}
3575 
3576 	/* Get rid of skb owner, prior to sending to the driver. */
3577 	skb_orphan(skb);
3578 
3579 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3580 		kfree_skb(skb);
3581 		return;
3582 	}
3583 
3584 	err = hdev->send(hdev, skb);
3585 	if (err < 0) {
3586 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3587 		kfree_skb(skb);
3588 	}
3589 }
3590 
3591 /* Send HCI command */
3592 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3593 		 const void *param)
3594 {
3595 	struct sk_buff *skb;
3596 
3597 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3598 
3599 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3600 	if (!skb) {
3601 		bt_dev_err(hdev, "no memory for command");
3602 		return -ENOMEM;
3603 	}
3604 
3605 	/* Stand-alone HCI commands must be flagged as
3606 	 * single-command requests.
3607 	 */
3608 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3609 
3610 	skb_queue_tail(&hdev->cmd_q, skb);
3611 	queue_work(hdev->workqueue, &hdev->cmd_work);
3612 
3613 	return 0;
3614 }
3615 
3616 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3617 		   const void *param)
3618 {
3619 	struct sk_buff *skb;
3620 
3621 	if (hci_opcode_ogf(opcode) != 0x3f) {
3622 		/* A controller receiving a command shall respond with either
3623 		 * a Command Status Event or a Command Complete Event.
3624 		 * Therefore, all standard HCI commands must be sent via the
3625 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3626 		 * Some vendors do not comply with this rule for vendor-specific
3627 		 * commands and do not return any event. We want to support
3628 		 * unresponded commands for such cases only.
3629 		 */
3630 		bt_dev_err(hdev, "unresponded command not supported");
3631 		return -EINVAL;
3632 	}
3633 
3634 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3635 	if (!skb) {
3636 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3637 			   opcode);
3638 		return -ENOMEM;
3639 	}
3640 
3641 	hci_send_frame(hdev, skb);
3642 
3643 	return 0;
3644 }
3645 EXPORT_SYMBOL(__hci_cmd_send);
3646 
3647 /* Get data from the previously sent command */
3648 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3649 {
3650 	struct hci_command_hdr *hdr;
3651 
3652 	if (!hdev->sent_cmd)
3653 		return NULL;
3654 
3655 	hdr = (void *) hdev->sent_cmd->data;
3656 
3657 	if (hdr->opcode != cpu_to_le16(opcode))
3658 		return NULL;
3659 
3660 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3661 
3662 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3663 }
3664 
3665 /* Send HCI command and wait for command commplete event */
3666 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3667 			     const void *param, u32 timeout)
3668 {
3669 	struct sk_buff *skb;
3670 
3671 	if (!test_bit(HCI_UP, &hdev->flags))
3672 		return ERR_PTR(-ENETDOWN);
3673 
3674 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3675 
3676 	hci_req_sync_lock(hdev);
3677 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3678 	hci_req_sync_unlock(hdev);
3679 
3680 	return skb;
3681 }
3682 EXPORT_SYMBOL(hci_cmd_sync);
3683 
3684 /* Send ACL data */
3685 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3686 {
3687 	struct hci_acl_hdr *hdr;
3688 	int len = skb->len;
3689 
3690 	skb_push(skb, HCI_ACL_HDR_SIZE);
3691 	skb_reset_transport_header(skb);
3692 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3693 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3694 	hdr->dlen   = cpu_to_le16(len);
3695 }
3696 
3697 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3698 			  struct sk_buff *skb, __u16 flags)
3699 {
3700 	struct hci_conn *conn = chan->conn;
3701 	struct hci_dev *hdev = conn->hdev;
3702 	struct sk_buff *list;
3703 
3704 	skb->len = skb_headlen(skb);
3705 	skb->data_len = 0;
3706 
3707 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3708 
3709 	switch (hdev->dev_type) {
3710 	case HCI_PRIMARY:
3711 		hci_add_acl_hdr(skb, conn->handle, flags);
3712 		break;
3713 	case HCI_AMP:
3714 		hci_add_acl_hdr(skb, chan->handle, flags);
3715 		break;
3716 	default:
3717 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3718 		return;
3719 	}
3720 
3721 	list = skb_shinfo(skb)->frag_list;
3722 	if (!list) {
3723 		/* Non fragmented */
3724 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3725 
3726 		skb_queue_tail(queue, skb);
3727 	} else {
3728 		/* Fragmented */
3729 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3730 
3731 		skb_shinfo(skb)->frag_list = NULL;
3732 
3733 		/* Queue all fragments atomically. We need to use spin_lock_bh
3734 		 * here because of 6LoWPAN links, as there this function is
3735 		 * called from softirq and using normal spin lock could cause
3736 		 * deadlocks.
3737 		 */
3738 		spin_lock_bh(&queue->lock);
3739 
3740 		__skb_queue_tail(queue, skb);
3741 
3742 		flags &= ~ACL_START;
3743 		flags |= ACL_CONT;
3744 		do {
3745 			skb = list; list = list->next;
3746 
3747 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3748 			hci_add_acl_hdr(skb, conn->handle, flags);
3749 
3750 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3751 
3752 			__skb_queue_tail(queue, skb);
3753 		} while (list);
3754 
3755 		spin_unlock_bh(&queue->lock);
3756 	}
3757 }
3758 
3759 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3760 {
3761 	struct hci_dev *hdev = chan->conn->hdev;
3762 
3763 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3764 
3765 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3766 
3767 	queue_work(hdev->workqueue, &hdev->tx_work);
3768 }
3769 
3770 /* Send SCO data */
3771 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3772 {
3773 	struct hci_dev *hdev = conn->hdev;
3774 	struct hci_sco_hdr hdr;
3775 
3776 	BT_DBG("%s len %d", hdev->name, skb->len);
3777 
3778 	hdr.handle = cpu_to_le16(conn->handle);
3779 	hdr.dlen   = skb->len;
3780 
3781 	skb_push(skb, HCI_SCO_HDR_SIZE);
3782 	skb_reset_transport_header(skb);
3783 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3784 
3785 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3786 
3787 	skb_queue_tail(&conn->data_q, skb);
3788 	queue_work(hdev->workqueue, &hdev->tx_work);
3789 }
3790 
3791 /* ---- HCI TX task (outgoing data) ---- */
3792 
3793 /* HCI Connection scheduler */
3794 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3795 				     int *quote)
3796 {
3797 	struct hci_conn_hash *h = &hdev->conn_hash;
3798 	struct hci_conn *conn = NULL, *c;
3799 	unsigned int num = 0, min = ~0;
3800 
3801 	/* We don't have to lock device here. Connections are always
3802 	 * added and removed with TX task disabled. */
3803 
3804 	rcu_read_lock();
3805 
3806 	list_for_each_entry_rcu(c, &h->list, list) {
3807 		if (c->type != type || skb_queue_empty(&c->data_q))
3808 			continue;
3809 
3810 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3811 			continue;
3812 
3813 		num++;
3814 
3815 		if (c->sent < min) {
3816 			min  = c->sent;
3817 			conn = c;
3818 		}
3819 
3820 		if (hci_conn_num(hdev, type) == num)
3821 			break;
3822 	}
3823 
3824 	rcu_read_unlock();
3825 
3826 	if (conn) {
3827 		int cnt, q;
3828 
3829 		switch (conn->type) {
3830 		case ACL_LINK:
3831 			cnt = hdev->acl_cnt;
3832 			break;
3833 		case SCO_LINK:
3834 		case ESCO_LINK:
3835 			cnt = hdev->sco_cnt;
3836 			break;
3837 		case LE_LINK:
3838 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3839 			break;
3840 		default:
3841 			cnt = 0;
3842 			bt_dev_err(hdev, "unknown link type %d", conn->type);
3843 		}
3844 
3845 		q = cnt / num;
3846 		*quote = q ? q : 1;
3847 	} else
3848 		*quote = 0;
3849 
3850 	BT_DBG("conn %p quote %d", conn, *quote);
3851 	return conn;
3852 }
3853 
3854 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3855 {
3856 	struct hci_conn_hash *h = &hdev->conn_hash;
3857 	struct hci_conn *c;
3858 
3859 	bt_dev_err(hdev, "link tx timeout");
3860 
3861 	rcu_read_lock();
3862 
3863 	/* Kill stalled connections */
3864 	list_for_each_entry_rcu(c, &h->list, list) {
3865 		if (c->type == type && c->sent) {
3866 			bt_dev_err(hdev, "killing stalled connection %pMR",
3867 				   &c->dst);
3868 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3869 		}
3870 	}
3871 
3872 	rcu_read_unlock();
3873 }
3874 
3875 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3876 				      int *quote)
3877 {
3878 	struct hci_conn_hash *h = &hdev->conn_hash;
3879 	struct hci_chan *chan = NULL;
3880 	unsigned int num = 0, min = ~0, cur_prio = 0;
3881 	struct hci_conn *conn;
3882 	int cnt, q, conn_num = 0;
3883 
3884 	BT_DBG("%s", hdev->name);
3885 
3886 	rcu_read_lock();
3887 
3888 	list_for_each_entry_rcu(conn, &h->list, list) {
3889 		struct hci_chan *tmp;
3890 
3891 		if (conn->type != type)
3892 			continue;
3893 
3894 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3895 			continue;
3896 
3897 		conn_num++;
3898 
3899 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3900 			struct sk_buff *skb;
3901 
3902 			if (skb_queue_empty(&tmp->data_q))
3903 				continue;
3904 
3905 			skb = skb_peek(&tmp->data_q);
3906 			if (skb->priority < cur_prio)
3907 				continue;
3908 
3909 			if (skb->priority > cur_prio) {
3910 				num = 0;
3911 				min = ~0;
3912 				cur_prio = skb->priority;
3913 			}
3914 
3915 			num++;
3916 
3917 			if (conn->sent < min) {
3918 				min  = conn->sent;
3919 				chan = tmp;
3920 			}
3921 		}
3922 
3923 		if (hci_conn_num(hdev, type) == conn_num)
3924 			break;
3925 	}
3926 
3927 	rcu_read_unlock();
3928 
3929 	if (!chan)
3930 		return NULL;
3931 
3932 	switch (chan->conn->type) {
3933 	case ACL_LINK:
3934 		cnt = hdev->acl_cnt;
3935 		break;
3936 	case AMP_LINK:
3937 		cnt = hdev->block_cnt;
3938 		break;
3939 	case SCO_LINK:
3940 	case ESCO_LINK:
3941 		cnt = hdev->sco_cnt;
3942 		break;
3943 	case LE_LINK:
3944 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3945 		break;
3946 	default:
3947 		cnt = 0;
3948 		bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3949 	}
3950 
3951 	q = cnt / num;
3952 	*quote = q ? q : 1;
3953 	BT_DBG("chan %p quote %d", chan, *quote);
3954 	return chan;
3955 }
3956 
3957 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3958 {
3959 	struct hci_conn_hash *h = &hdev->conn_hash;
3960 	struct hci_conn *conn;
3961 	int num = 0;
3962 
3963 	BT_DBG("%s", hdev->name);
3964 
3965 	rcu_read_lock();
3966 
3967 	list_for_each_entry_rcu(conn, &h->list, list) {
3968 		struct hci_chan *chan;
3969 
3970 		if (conn->type != type)
3971 			continue;
3972 
3973 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3974 			continue;
3975 
3976 		num++;
3977 
3978 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3979 			struct sk_buff *skb;
3980 
3981 			if (chan->sent) {
3982 				chan->sent = 0;
3983 				continue;
3984 			}
3985 
3986 			if (skb_queue_empty(&chan->data_q))
3987 				continue;
3988 
3989 			skb = skb_peek(&chan->data_q);
3990 			if (skb->priority >= HCI_PRIO_MAX - 1)
3991 				continue;
3992 
3993 			skb->priority = HCI_PRIO_MAX - 1;
3994 
3995 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3996 			       skb->priority);
3997 		}
3998 
3999 		if (hci_conn_num(hdev, type) == num)
4000 			break;
4001 	}
4002 
4003 	rcu_read_unlock();
4004 
4005 }
4006 
4007 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4008 {
4009 	/* Calculate count of blocks used by this packet */
4010 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4011 }
4012 
4013 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4014 {
4015 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4016 		/* ACL tx timeout must be longer than maximum
4017 		 * link supervision timeout (40.9 seconds) */
4018 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4019 				       HCI_ACL_TX_TIMEOUT))
4020 			hci_link_tx_to(hdev, ACL_LINK);
4021 	}
4022 }
4023 
4024 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4025 {
4026 	unsigned int cnt = hdev->acl_cnt;
4027 	struct hci_chan *chan;
4028 	struct sk_buff *skb;
4029 	int quote;
4030 
4031 	__check_timeout(hdev, cnt);
4032 
4033 	while (hdev->acl_cnt &&
4034 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4035 		u32 priority = (skb_peek(&chan->data_q))->priority;
4036 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4037 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4038 			       skb->len, skb->priority);
4039 
4040 			/* Stop if priority has changed */
4041 			if (skb->priority < priority)
4042 				break;
4043 
4044 			skb = skb_dequeue(&chan->data_q);
4045 
4046 			hci_conn_enter_active_mode(chan->conn,
4047 						   bt_cb(skb)->force_active);
4048 
4049 			hci_send_frame(hdev, skb);
4050 			hdev->acl_last_tx = jiffies;
4051 
4052 			hdev->acl_cnt--;
4053 			chan->sent++;
4054 			chan->conn->sent++;
4055 		}
4056 	}
4057 
4058 	if (cnt != hdev->acl_cnt)
4059 		hci_prio_recalculate(hdev, ACL_LINK);
4060 }
4061 
4062 static void hci_sched_acl_blk(struct hci_dev *hdev)
4063 {
4064 	unsigned int cnt = hdev->block_cnt;
4065 	struct hci_chan *chan;
4066 	struct sk_buff *skb;
4067 	int quote;
4068 	u8 type;
4069 
4070 	__check_timeout(hdev, cnt);
4071 
4072 	BT_DBG("%s", hdev->name);
4073 
4074 	if (hdev->dev_type == HCI_AMP)
4075 		type = AMP_LINK;
4076 	else
4077 		type = ACL_LINK;
4078 
4079 	while (hdev->block_cnt > 0 &&
4080 	       (chan = hci_chan_sent(hdev, type, &quote))) {
4081 		u32 priority = (skb_peek(&chan->data_q))->priority;
4082 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4083 			int blocks;
4084 
4085 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4086 			       skb->len, skb->priority);
4087 
4088 			/* Stop if priority has changed */
4089 			if (skb->priority < priority)
4090 				break;
4091 
4092 			skb = skb_dequeue(&chan->data_q);
4093 
4094 			blocks = __get_blocks(hdev, skb);
4095 			if (blocks > hdev->block_cnt)
4096 				return;
4097 
4098 			hci_conn_enter_active_mode(chan->conn,
4099 						   bt_cb(skb)->force_active);
4100 
4101 			hci_send_frame(hdev, skb);
4102 			hdev->acl_last_tx = jiffies;
4103 
4104 			hdev->block_cnt -= blocks;
4105 			quote -= blocks;
4106 
4107 			chan->sent += blocks;
4108 			chan->conn->sent += blocks;
4109 		}
4110 	}
4111 
4112 	if (cnt != hdev->block_cnt)
4113 		hci_prio_recalculate(hdev, type);
4114 }
4115 
4116 static void hci_sched_acl(struct hci_dev *hdev)
4117 {
4118 	BT_DBG("%s", hdev->name);
4119 
4120 	/* No ACL link over BR/EDR controller */
4121 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4122 		return;
4123 
4124 	/* No AMP link over AMP controller */
4125 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4126 		return;
4127 
4128 	switch (hdev->flow_ctl_mode) {
4129 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
4130 		hci_sched_acl_pkt(hdev);
4131 		break;
4132 
4133 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4134 		hci_sched_acl_blk(hdev);
4135 		break;
4136 	}
4137 }
4138 
4139 /* Schedule SCO */
4140 static void hci_sched_sco(struct hci_dev *hdev)
4141 {
4142 	struct hci_conn *conn;
4143 	struct sk_buff *skb;
4144 	int quote;
4145 
4146 	BT_DBG("%s", hdev->name);
4147 
4148 	if (!hci_conn_num(hdev, SCO_LINK))
4149 		return;
4150 
4151 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4152 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4153 			BT_DBG("skb %p len %d", skb, skb->len);
4154 			hci_send_frame(hdev, skb);
4155 
4156 			conn->sent++;
4157 			if (conn->sent == ~0)
4158 				conn->sent = 0;
4159 		}
4160 	}
4161 }
4162 
4163 static void hci_sched_esco(struct hci_dev *hdev)
4164 {
4165 	struct hci_conn *conn;
4166 	struct sk_buff *skb;
4167 	int quote;
4168 
4169 	BT_DBG("%s", hdev->name);
4170 
4171 	if (!hci_conn_num(hdev, ESCO_LINK))
4172 		return;
4173 
4174 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4175 						     &quote))) {
4176 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4177 			BT_DBG("skb %p len %d", skb, skb->len);
4178 			hci_send_frame(hdev, skb);
4179 
4180 			conn->sent++;
4181 			if (conn->sent == ~0)
4182 				conn->sent = 0;
4183 		}
4184 	}
4185 }
4186 
4187 static void hci_sched_le(struct hci_dev *hdev)
4188 {
4189 	struct hci_chan *chan;
4190 	struct sk_buff *skb;
4191 	int quote, cnt, tmp;
4192 
4193 	BT_DBG("%s", hdev->name);
4194 
4195 	if (!hci_conn_num(hdev, LE_LINK))
4196 		return;
4197 
4198 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4199 		/* LE tx timeout must be longer than maximum
4200 		 * link supervision timeout (40.9 seconds) */
4201 		if (!hdev->le_cnt && hdev->le_pkts &&
4202 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
4203 			hci_link_tx_to(hdev, LE_LINK);
4204 	}
4205 
4206 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4207 	tmp = cnt;
4208 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4209 		u32 priority = (skb_peek(&chan->data_q))->priority;
4210 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4211 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4212 			       skb->len, skb->priority);
4213 
4214 			/* Stop if priority has changed */
4215 			if (skb->priority < priority)
4216 				break;
4217 
4218 			skb = skb_dequeue(&chan->data_q);
4219 
4220 			hci_send_frame(hdev, skb);
4221 			hdev->le_last_tx = jiffies;
4222 
4223 			cnt--;
4224 			chan->sent++;
4225 			chan->conn->sent++;
4226 		}
4227 	}
4228 
4229 	if (hdev->le_pkts)
4230 		hdev->le_cnt = cnt;
4231 	else
4232 		hdev->acl_cnt = cnt;
4233 
4234 	if (cnt != tmp)
4235 		hci_prio_recalculate(hdev, LE_LINK);
4236 }
4237 
4238 static void hci_tx_work(struct work_struct *work)
4239 {
4240 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4241 	struct sk_buff *skb;
4242 
4243 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4244 	       hdev->sco_cnt, hdev->le_cnt);
4245 
4246 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4247 		/* Schedule queues and send stuff to HCI driver */
4248 		hci_sched_acl(hdev);
4249 		hci_sched_sco(hdev);
4250 		hci_sched_esco(hdev);
4251 		hci_sched_le(hdev);
4252 	}
4253 
4254 	/* Send next queued raw (unknown type) packet */
4255 	while ((skb = skb_dequeue(&hdev->raw_q)))
4256 		hci_send_frame(hdev, skb);
4257 }
4258 
4259 /* ----- HCI RX task (incoming data processing) ----- */
4260 
4261 /* ACL data packet */
4262 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4263 {
4264 	struct hci_acl_hdr *hdr = (void *) skb->data;
4265 	struct hci_conn *conn;
4266 	__u16 handle, flags;
4267 
4268 	skb_pull(skb, HCI_ACL_HDR_SIZE);
4269 
4270 	handle = __le16_to_cpu(hdr->handle);
4271 	flags  = hci_flags(handle);
4272 	handle = hci_handle(handle);
4273 
4274 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4275 	       handle, flags);
4276 
4277 	hdev->stat.acl_rx++;
4278 
4279 	hci_dev_lock(hdev);
4280 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4281 	hci_dev_unlock(hdev);
4282 
4283 	if (conn) {
4284 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4285 
4286 		/* Send to upper protocol */
4287 		l2cap_recv_acldata(conn, skb, flags);
4288 		return;
4289 	} else {
4290 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4291 			   handle);
4292 	}
4293 
4294 	kfree_skb(skb);
4295 }
4296 
4297 /* SCO data packet */
4298 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4299 {
4300 	struct hci_sco_hdr *hdr = (void *) skb->data;
4301 	struct hci_conn *conn;
4302 	__u16 handle;
4303 
4304 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4305 
4306 	handle = __le16_to_cpu(hdr->handle);
4307 
4308 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4309 
4310 	hdev->stat.sco_rx++;
4311 
4312 	hci_dev_lock(hdev);
4313 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4314 	hci_dev_unlock(hdev);
4315 
4316 	if (conn) {
4317 		/* Send to upper protocol */
4318 		sco_recv_scodata(conn, skb);
4319 		return;
4320 	} else {
4321 		bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4322 			   handle);
4323 	}
4324 
4325 	kfree_skb(skb);
4326 }
4327 
4328 static bool hci_req_is_complete(struct hci_dev *hdev)
4329 {
4330 	struct sk_buff *skb;
4331 
4332 	skb = skb_peek(&hdev->cmd_q);
4333 	if (!skb)
4334 		return true;
4335 
4336 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4337 }
4338 
4339 static void hci_resend_last(struct hci_dev *hdev)
4340 {
4341 	struct hci_command_hdr *sent;
4342 	struct sk_buff *skb;
4343 	u16 opcode;
4344 
4345 	if (!hdev->sent_cmd)
4346 		return;
4347 
4348 	sent = (void *) hdev->sent_cmd->data;
4349 	opcode = __le16_to_cpu(sent->opcode);
4350 	if (opcode == HCI_OP_RESET)
4351 		return;
4352 
4353 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4354 	if (!skb)
4355 		return;
4356 
4357 	skb_queue_head(&hdev->cmd_q, skb);
4358 	queue_work(hdev->workqueue, &hdev->cmd_work);
4359 }
4360 
4361 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4362 			  hci_req_complete_t *req_complete,
4363 			  hci_req_complete_skb_t *req_complete_skb)
4364 {
4365 	struct sk_buff *skb;
4366 	unsigned long flags;
4367 
4368 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4369 
4370 	/* If the completed command doesn't match the last one that was
4371 	 * sent we need to do special handling of it.
4372 	 */
4373 	if (!hci_sent_cmd_data(hdev, opcode)) {
4374 		/* Some CSR based controllers generate a spontaneous
4375 		 * reset complete event during init and any pending
4376 		 * command will never be completed. In such a case we
4377 		 * need to resend whatever was the last sent
4378 		 * command.
4379 		 */
4380 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4381 			hci_resend_last(hdev);
4382 
4383 		return;
4384 	}
4385 
4386 	/* If the command succeeded and there's still more commands in
4387 	 * this request the request is not yet complete.
4388 	 */
4389 	if (!status && !hci_req_is_complete(hdev))
4390 		return;
4391 
4392 	/* If this was the last command in a request the complete
4393 	 * callback would be found in hdev->sent_cmd instead of the
4394 	 * command queue (hdev->cmd_q).
4395 	 */
4396 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4397 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4398 		return;
4399 	}
4400 
4401 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4402 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4403 		return;
4404 	}
4405 
4406 	/* Remove all pending commands belonging to this request */
4407 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4408 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4409 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4410 			__skb_queue_head(&hdev->cmd_q, skb);
4411 			break;
4412 		}
4413 
4414 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4415 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4416 		else
4417 			*req_complete = bt_cb(skb)->hci.req_complete;
4418 		kfree_skb(skb);
4419 	}
4420 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4421 }
4422 
4423 static void hci_rx_work(struct work_struct *work)
4424 {
4425 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4426 	struct sk_buff *skb;
4427 
4428 	BT_DBG("%s", hdev->name);
4429 
4430 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4431 		/* Send copy to monitor */
4432 		hci_send_to_monitor(hdev, skb);
4433 
4434 		if (atomic_read(&hdev->promisc)) {
4435 			/* Send copy to the sockets */
4436 			hci_send_to_sock(hdev, skb);
4437 		}
4438 
4439 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4440 			kfree_skb(skb);
4441 			continue;
4442 		}
4443 
4444 		if (test_bit(HCI_INIT, &hdev->flags)) {
4445 			/* Don't process data packets in this states. */
4446 			switch (hci_skb_pkt_type(skb)) {
4447 			case HCI_ACLDATA_PKT:
4448 			case HCI_SCODATA_PKT:
4449 				kfree_skb(skb);
4450 				continue;
4451 			}
4452 		}
4453 
4454 		/* Process frame */
4455 		switch (hci_skb_pkt_type(skb)) {
4456 		case HCI_EVENT_PKT:
4457 			BT_DBG("%s Event packet", hdev->name);
4458 			hci_event_packet(hdev, skb);
4459 			break;
4460 
4461 		case HCI_ACLDATA_PKT:
4462 			BT_DBG("%s ACL data packet", hdev->name);
4463 			hci_acldata_packet(hdev, skb);
4464 			break;
4465 
4466 		case HCI_SCODATA_PKT:
4467 			BT_DBG("%s SCO data packet", hdev->name);
4468 			hci_scodata_packet(hdev, skb);
4469 			break;
4470 
4471 		default:
4472 			kfree_skb(skb);
4473 			break;
4474 		}
4475 	}
4476 }
4477 
4478 static void hci_cmd_work(struct work_struct *work)
4479 {
4480 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4481 	struct sk_buff *skb;
4482 
4483 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4484 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4485 
4486 	/* Send queued commands */
4487 	if (atomic_read(&hdev->cmd_cnt)) {
4488 		skb = skb_dequeue(&hdev->cmd_q);
4489 		if (!skb)
4490 			return;
4491 
4492 		kfree_skb(hdev->sent_cmd);
4493 
4494 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4495 		if (hdev->sent_cmd) {
4496 			atomic_dec(&hdev->cmd_cnt);
4497 			hci_send_frame(hdev, skb);
4498 			if (test_bit(HCI_RESET, &hdev->flags))
4499 				cancel_delayed_work(&hdev->cmd_timer);
4500 			else
4501 				schedule_delayed_work(&hdev->cmd_timer,
4502 						      HCI_CMD_TIMEOUT);
4503 		} else {
4504 			skb_queue_head(&hdev->cmd_q, skb);
4505 			queue_work(hdev->workqueue, &hdev->cmd_work);
4506 		}
4507 	}
4508 }
4509