xref: /linux/net/bluetooth/hci_core.c (revision 40e79150c1686263e6a031d7702aec63aff31332)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
52 
53 /* HCI device list */
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
56 
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
60 
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63 
64 /* ---- HCI debugfs entries ---- */
65 
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 			     size_t count, loff_t *ppos)
68 {
69 	struct hci_dev *hdev = file->private_data;
70 	char buf[3];
71 
72 	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73 	buf[1] = '\n';
74 	buf[2] = '\0';
75 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77 
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 			      size_t count, loff_t *ppos)
80 {
81 	struct hci_dev *hdev = file->private_data;
82 	struct sk_buff *skb;
83 	bool enable;
84 	int err;
85 
86 	if (!test_bit(HCI_UP, &hdev->flags))
87 		return -ENETDOWN;
88 
89 	err = kstrtobool_from_user(user_buf, count, &enable);
90 	if (err)
91 		return err;
92 
93 	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94 		return -EALREADY;
95 
96 	hci_req_sync_lock(hdev);
97 	if (enable)
98 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 				     HCI_CMD_TIMEOUT);
100 	else
101 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 				     HCI_CMD_TIMEOUT);
103 	hci_req_sync_unlock(hdev);
104 
105 	if (IS_ERR(skb))
106 		return PTR_ERR(skb);
107 
108 	kfree_skb(skb);
109 
110 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
111 
112 	return count;
113 }
114 
115 static const struct file_operations dut_mode_fops = {
116 	.open		= simple_open,
117 	.read		= dut_mode_read,
118 	.write		= dut_mode_write,
119 	.llseek		= default_llseek,
120 };
121 
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 				size_t count, loff_t *ppos)
124 {
125 	struct hci_dev *hdev = file->private_data;
126 	char buf[3];
127 
128 	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129 	buf[1] = '\n';
130 	buf[2] = '\0';
131 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133 
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 				 size_t count, loff_t *ppos)
136 {
137 	struct hci_dev *hdev = file->private_data;
138 	bool enable;
139 	int err;
140 
141 	err = kstrtobool_from_user(user_buf, count, &enable);
142 	if (err)
143 		return err;
144 
145 	/* When the diagnostic flags are not persistent and the transport
146 	 * is not active or in user channel operation, then there is no need
147 	 * for the vendor callback. Instead just store the desired value and
148 	 * the setting will be programmed when the controller gets powered on.
149 	 */
150 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151 	    (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 	     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153 		goto done;
154 
155 	hci_req_sync_lock(hdev);
156 	err = hdev->set_diag(hdev, enable);
157 	hci_req_sync_unlock(hdev);
158 
159 	if (err < 0)
160 		return err;
161 
162 done:
163 	if (enable)
164 		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165 	else
166 		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167 
168 	return count;
169 }
170 
171 static const struct file_operations vendor_diag_fops = {
172 	.open		= simple_open,
173 	.read		= vendor_diag_read,
174 	.write		= vendor_diag_write,
175 	.llseek		= default_llseek,
176 };
177 
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180 	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181 			    &dut_mode_fops);
182 
183 	if (hdev->set_diag)
184 		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185 				    &vendor_diag_fops);
186 }
187 
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190 	BT_DBG("%s %ld", req->hdev->name, opt);
191 
192 	/* Reset device */
193 	set_bit(HCI_RESET, &req->hdev->flags);
194 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
195 	return 0;
196 }
197 
198 static void bredr_init(struct hci_request *req)
199 {
200 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201 
202 	/* Read Local Supported Features */
203 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204 
205 	/* Read Local Version */
206 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207 
208 	/* Read BD Address */
209 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211 
212 static void amp_init1(struct hci_request *req)
213 {
214 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215 
216 	/* Read Local Version */
217 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218 
219 	/* Read Local Supported Commands */
220 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221 
222 	/* Read Local AMP Info */
223 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224 
225 	/* Read Data Blk size */
226 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227 
228 	/* Read Flow Control Mode */
229 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230 
231 	/* Read Location Data */
232 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234 
235 static int amp_init2(struct hci_request *req)
236 {
237 	/* Read Local Supported Features. Not all AMP controllers
238 	 * support this so it's placed conditionally in the second
239 	 * stage init.
240 	 */
241 	if (req->hdev->commands[14] & 0x20)
242 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243 
244 	return 0;
245 }
246 
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249 	struct hci_dev *hdev = req->hdev;
250 
251 	BT_DBG("%s %ld", hdev->name, opt);
252 
253 	/* Reset */
254 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255 		hci_reset_req(req, 0);
256 
257 	switch (hdev->dev_type) {
258 	case HCI_PRIMARY:
259 		bredr_init(req);
260 		break;
261 	case HCI_AMP:
262 		amp_init1(req);
263 		break;
264 	default:
265 		bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266 		break;
267 	}
268 
269 	return 0;
270 }
271 
272 static void bredr_setup(struct hci_request *req)
273 {
274 	__le16 param;
275 	__u8 flt_type;
276 
277 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
278 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279 
280 	/* Read Class of Device */
281 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282 
283 	/* Read Local Name */
284 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285 
286 	/* Read Voice Setting */
287 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288 
289 	/* Read Number of Supported IAC */
290 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291 
292 	/* Read Current IAC LAP */
293 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294 
295 	/* Clear Event Filters */
296 	flt_type = HCI_FLT_CLEAR_ALL;
297 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298 
299 	/* Connection accept timeout ~20 secs */
300 	param = cpu_to_le16(0x7d00);
301 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
302 }
303 
304 static void le_setup(struct hci_request *req)
305 {
306 	struct hci_dev *hdev = req->hdev;
307 
308 	/* Read LE Buffer Size */
309 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310 
311 	/* Read LE Local Supported Features */
312 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313 
314 	/* Read LE Supported States */
315 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316 
317 	/* LE-only controllers have LE implicitly enabled */
318 	if (!lmp_bredr_capable(hdev))
319 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321 
322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324 	struct hci_dev *hdev = req->hdev;
325 
326 	/* The second byte is 0xff instead of 0x9f (two reserved bits
327 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328 	 * command otherwise.
329 	 */
330 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331 
332 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
333 	 * any event mask for pre 1.2 devices.
334 	 */
335 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336 		return;
337 
338 	if (lmp_bredr_capable(hdev)) {
339 		events[4] |= 0x01; /* Flow Specification Complete */
340 	} else {
341 		/* Use a different default for LE-only devices */
342 		memset(events, 0, sizeof(events));
343 		events[1] |= 0x20; /* Command Complete */
344 		events[1] |= 0x40; /* Command Status */
345 		events[1] |= 0x80; /* Hardware Error */
346 
347 		/* If the controller supports the Disconnect command, enable
348 		 * the corresponding event. In addition enable packet flow
349 		 * control related events.
350 		 */
351 		if (hdev->commands[0] & 0x20) {
352 			events[0] |= 0x10; /* Disconnection Complete */
353 			events[2] |= 0x04; /* Number of Completed Packets */
354 			events[3] |= 0x02; /* Data Buffer Overflow */
355 		}
356 
357 		/* If the controller supports the Read Remote Version
358 		 * Information command, enable the corresponding event.
359 		 */
360 		if (hdev->commands[2] & 0x80)
361 			events[1] |= 0x08; /* Read Remote Version Information
362 					    * Complete
363 					    */
364 
365 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 			events[0] |= 0x80; /* Encryption Change */
367 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
368 		}
369 	}
370 
371 	if (lmp_inq_rssi_capable(hdev) ||
372 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373 		events[4] |= 0x02; /* Inquiry Result with RSSI */
374 
375 	if (lmp_ext_feat_capable(hdev))
376 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
377 
378 	if (lmp_esco_capable(hdev)) {
379 		events[5] |= 0x08; /* Synchronous Connection Complete */
380 		events[5] |= 0x10; /* Synchronous Connection Changed */
381 	}
382 
383 	if (lmp_sniffsubr_capable(hdev))
384 		events[5] |= 0x20; /* Sniff Subrating */
385 
386 	if (lmp_pause_enc_capable(hdev))
387 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
388 
389 	if (lmp_ext_inq_capable(hdev))
390 		events[5] |= 0x40; /* Extended Inquiry Result */
391 
392 	if (lmp_no_flush_capable(hdev))
393 		events[7] |= 0x01; /* Enhanced Flush Complete */
394 
395 	if (lmp_lsto_capable(hdev))
396 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
397 
398 	if (lmp_ssp_capable(hdev)) {
399 		events[6] |= 0x01;	/* IO Capability Request */
400 		events[6] |= 0x02;	/* IO Capability Response */
401 		events[6] |= 0x04;	/* User Confirmation Request */
402 		events[6] |= 0x08;	/* User Passkey Request */
403 		events[6] |= 0x10;	/* Remote OOB Data Request */
404 		events[6] |= 0x20;	/* Simple Pairing Complete */
405 		events[7] |= 0x04;	/* User Passkey Notification */
406 		events[7] |= 0x08;	/* Keypress Notification */
407 		events[7] |= 0x10;	/* Remote Host Supported
408 					 * Features Notification
409 					 */
410 	}
411 
412 	if (lmp_le_capable(hdev))
413 		events[7] |= 0x20;	/* LE Meta-Event */
414 
415 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417 
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420 	struct hci_dev *hdev = req->hdev;
421 
422 	if (hdev->dev_type == HCI_AMP)
423 		return amp_init2(req);
424 
425 	if (lmp_bredr_capable(hdev))
426 		bredr_setup(req);
427 	else
428 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429 
430 	if (lmp_le_capable(hdev))
431 		le_setup(req);
432 
433 	/* All Bluetooth 1.2 and later controllers should support the
434 	 * HCI command for reading the local supported commands.
435 	 *
436 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
437 	 * but do not have support for this command. If that is the case,
438 	 * the driver can quirk the behavior and skip reading the local
439 	 * supported commands.
440 	 */
441 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444 
445 	if (lmp_ssp_capable(hdev)) {
446 		/* When SSP is available, then the host features page
447 		 * should also be available as well. However some
448 		 * controllers list the max_page as 0 as long as SSP
449 		 * has not been enabled. To achieve proper debugging
450 		 * output, force the minimum max_page to 1 at least.
451 		 */
452 		hdev->max_page = 0x01;
453 
454 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455 			u8 mode = 0x01;
456 
457 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 				    sizeof(mode), &mode);
459 		} else {
460 			struct hci_cp_write_eir cp;
461 
462 			memset(hdev->eir, 0, sizeof(hdev->eir));
463 			memset(&cp, 0, sizeof(cp));
464 
465 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466 		}
467 	}
468 
469 	if (lmp_inq_rssi_capable(hdev) ||
470 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471 		u8 mode;
472 
473 		/* If Extended Inquiry Result events are supported, then
474 		 * they are clearly preferred over Inquiry Result with RSSI
475 		 * events.
476 		 */
477 		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478 
479 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480 	}
481 
482 	if (lmp_inq_tx_pwr_capable(hdev))
483 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484 
485 	if (lmp_ext_feat_capable(hdev)) {
486 		struct hci_cp_read_local_ext_features cp;
487 
488 		cp.page = 0x01;
489 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490 			    sizeof(cp), &cp);
491 	}
492 
493 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494 		u8 enable = 1;
495 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496 			    &enable);
497 	}
498 
499 	return 0;
500 }
501 
502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504 	struct hci_dev *hdev = req->hdev;
505 	struct hci_cp_write_def_link_policy cp;
506 	u16 link_policy = 0;
507 
508 	if (lmp_rswitch_capable(hdev))
509 		link_policy |= HCI_LP_RSWITCH;
510 	if (lmp_hold_capable(hdev))
511 		link_policy |= HCI_LP_HOLD;
512 	if (lmp_sniff_capable(hdev))
513 		link_policy |= HCI_LP_SNIFF;
514 	if (lmp_park_capable(hdev))
515 		link_policy |= HCI_LP_PARK;
516 
517 	cp.policy = cpu_to_le16(link_policy);
518 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520 
521 static void hci_set_le_support(struct hci_request *req)
522 {
523 	struct hci_dev *hdev = req->hdev;
524 	struct hci_cp_write_le_host_supported cp;
525 
526 	/* LE-only devices do not support explicit enablement */
527 	if (!lmp_bredr_capable(hdev))
528 		return;
529 
530 	memset(&cp, 0, sizeof(cp));
531 
532 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533 		cp.le = 0x01;
534 		cp.simul = 0x00;
535 	}
536 
537 	if (cp.le != lmp_host_le_capable(hdev))
538 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539 			    &cp);
540 }
541 
542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544 	struct hci_dev *hdev = req->hdev;
545 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546 	bool changed = false;
547 
548 	/* If Connectionless Slave Broadcast master role is supported
549 	 * enable all necessary events for it.
550 	 */
551 	if (lmp_csb_master_capable(hdev)) {
552 		events[1] |= 0x40;	/* Triggered Clock Capture */
553 		events[1] |= 0x80;	/* Synchronization Train Complete */
554 		events[2] |= 0x10;	/* Slave Page Response Timeout */
555 		events[2] |= 0x20;	/* CSB Channel Map Change */
556 		changed = true;
557 	}
558 
559 	/* If Connectionless Slave Broadcast slave role is supported
560 	 * enable all necessary events for it.
561 	 */
562 	if (lmp_csb_slave_capable(hdev)) {
563 		events[2] |= 0x01;	/* Synchronization Train Received */
564 		events[2] |= 0x02;	/* CSB Receive */
565 		events[2] |= 0x04;	/* CSB Timeout */
566 		events[2] |= 0x08;	/* Truncated Page Complete */
567 		changed = true;
568 	}
569 
570 	/* Enable Authenticated Payload Timeout Expired event if supported */
571 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572 		events[2] |= 0x80;
573 		changed = true;
574 	}
575 
576 	/* Some Broadcom based controllers indicate support for Set Event
577 	 * Mask Page 2 command, but then actually do not support it. Since
578 	 * the default value is all bits set to zero, the command is only
579 	 * required if the event mask has to be changed. In case no change
580 	 * to the event mask is needed, skip this command.
581 	 */
582 	if (changed)
583 		hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 			    sizeof(events), events);
585 }
586 
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589 	struct hci_dev *hdev = req->hdev;
590 	u8 p;
591 
592 	hci_setup_event_mask(req);
593 
594 	if (hdev->commands[6] & 0x20 &&
595 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596 		struct hci_cp_read_stored_link_key cp;
597 
598 		bacpy(&cp.bdaddr, BDADDR_ANY);
599 		cp.read_all = 0x01;
600 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601 	}
602 
603 	if (hdev->commands[5] & 0x10)
604 		hci_setup_link_policy(req);
605 
606 	if (hdev->commands[8] & 0x01)
607 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608 
609 	if (hdev->commands[18] & 0x04)
610 		hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611 
612 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
613 	 * support the Read Page Scan Type command. Check support for
614 	 * this command in the bit mask of supported commands.
615 	 */
616 	if (hdev->commands[13] & 0x01)
617 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618 
619 	if (lmp_le_capable(hdev)) {
620 		u8 events[8];
621 
622 		memset(events, 0, sizeof(events));
623 
624 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625 			events[0] |= 0x10;	/* LE Long Term Key Request */
626 
627 		/* If controller supports the Connection Parameters Request
628 		 * Link Layer Procedure, enable the corresponding event.
629 		 */
630 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631 			events[0] |= 0x20;	/* LE Remote Connection
632 						 * Parameter Request
633 						 */
634 
635 		/* If the controller supports the Data Length Extension
636 		 * feature, enable the corresponding event.
637 		 */
638 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639 			events[0] |= 0x40;	/* LE Data Length Change */
640 
641 		/* If the controller supports LL Privacy feature, enable
642 		 * the corresponding event.
643 		 */
644 		if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645 			events[1] |= 0x02;	/* LE Enhanced Connection
646 						 * Complete
647 						 */
648 
649 		/* If the controller supports Extended Scanner Filter
650 		 * Policies, enable the correspondig event.
651 		 */
652 		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653 			events[1] |= 0x04;	/* LE Direct Advertising
654 						 * Report
655 						 */
656 
657 		/* If the controller supports Channel Selection Algorithm #2
658 		 * feature, enable the corresponding event.
659 		 */
660 		if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661 			events[2] |= 0x08;	/* LE Channel Selection
662 						 * Algorithm
663 						 */
664 
665 		/* If the controller supports the LE Set Scan Enable command,
666 		 * enable the corresponding advertising report event.
667 		 */
668 		if (hdev->commands[26] & 0x08)
669 			events[0] |= 0x02;	/* LE Advertising Report */
670 
671 		/* If the controller supports the LE Create Connection
672 		 * command, enable the corresponding event.
673 		 */
674 		if (hdev->commands[26] & 0x10)
675 			events[0] |= 0x01;	/* LE Connection Complete */
676 
677 		/* If the controller supports the LE Connection Update
678 		 * command, enable the corresponding event.
679 		 */
680 		if (hdev->commands[27] & 0x04)
681 			events[0] |= 0x04;	/* LE Connection Update
682 						 * Complete
683 						 */
684 
685 		/* If the controller supports the LE Read Remote Used Features
686 		 * command, enable the corresponding event.
687 		 */
688 		if (hdev->commands[27] & 0x20)
689 			events[0] |= 0x08;	/* LE Read Remote Used
690 						 * Features Complete
691 						 */
692 
693 		/* If the controller supports the LE Read Local P-256
694 		 * Public Key command, enable the corresponding event.
695 		 */
696 		if (hdev->commands[34] & 0x02)
697 			events[0] |= 0x80;	/* LE Read Local P-256
698 						 * Public Key Complete
699 						 */
700 
701 		/* If the controller supports the LE Generate DHKey
702 		 * command, enable the corresponding event.
703 		 */
704 		if (hdev->commands[34] & 0x04)
705 			events[1] |= 0x01;	/* LE Generate DHKey Complete */
706 
707 		/* If the controller supports the LE Set Default PHY or
708 		 * LE Set PHY commands, enable the corresponding event.
709 		 */
710 		if (hdev->commands[35] & (0x20 | 0x40))
711 			events[1] |= 0x08;        /* LE PHY Update Complete */
712 
713 		/* If the controller supports LE Set Extended Scan Parameters
714 		 * and LE Set Extended Scan Enable commands, enable the
715 		 * corresponding event.
716 		 */
717 		if (use_ext_scan(hdev))
718 			events[1] |= 0x10;	/* LE Extended Advertising
719 						 * Report
720 						 */
721 
722 		/* If the controller supports the LE Extended Advertising
723 		 * command, enable the corresponding event.
724 		 */
725 		if (ext_adv_capable(hdev))
726 			events[2] |= 0x02;	/* LE Advertising Set
727 						 * Terminated
728 						 */
729 
730 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731 			    events);
732 
733 		/* Read LE Advertising Channel TX Power */
734 		if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735 			/* HCI TS spec forbids mixing of legacy and extended
736 			 * advertising commands wherein READ_ADV_TX_POWER is
737 			 * also included. So do not call it if extended adv
738 			 * is supported otherwise controller will return
739 			 * COMMAND_DISALLOWED for extended commands.
740 			 */
741 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742 		}
743 
744 		if (hdev->commands[26] & 0x40) {
745 			/* Read LE White List Size */
746 			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
747 				    0, NULL);
748 		}
749 
750 		if (hdev->commands[26] & 0x80) {
751 			/* Clear LE White List */
752 			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
753 		}
754 
755 		if (hdev->commands[34] & 0x40) {
756 			/* Read LE Resolving List Size */
757 			hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
758 				    0, NULL);
759 		}
760 
761 		if (hdev->commands[34] & 0x20) {
762 			/* Clear LE Resolving List */
763 			hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
764 		}
765 
766 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
767 			/* Read LE Maximum Data Length */
768 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
769 
770 			/* Read LE Suggested Default Data Length */
771 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
772 		}
773 
774 		if (ext_adv_capable(hdev)) {
775 			/* Read LE Number of Supported Advertising Sets */
776 			hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
777 				    0, NULL);
778 		}
779 
780 		hci_set_le_support(req);
781 	}
782 
783 	/* Read features beyond page 1 if available */
784 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
785 		struct hci_cp_read_local_ext_features cp;
786 
787 		cp.page = p;
788 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
789 			    sizeof(cp), &cp);
790 	}
791 
792 	return 0;
793 }
794 
795 static int hci_init4_req(struct hci_request *req, unsigned long opt)
796 {
797 	struct hci_dev *hdev = req->hdev;
798 
799 	/* Some Broadcom based Bluetooth controllers do not support the
800 	 * Delete Stored Link Key command. They are clearly indicating its
801 	 * absence in the bit mask of supported commands.
802 	 *
803 	 * Check the supported commands and only if the the command is marked
804 	 * as supported send it. If not supported assume that the controller
805 	 * does not have actual support for stored link keys which makes this
806 	 * command redundant anyway.
807 	 *
808 	 * Some controllers indicate that they support handling deleting
809 	 * stored link keys, but they don't. The quirk lets a driver
810 	 * just disable this command.
811 	 */
812 	if (hdev->commands[6] & 0x80 &&
813 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
814 		struct hci_cp_delete_stored_link_key cp;
815 
816 		bacpy(&cp.bdaddr, BDADDR_ANY);
817 		cp.delete_all = 0x01;
818 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
819 			    sizeof(cp), &cp);
820 	}
821 
822 	/* Set event mask page 2 if the HCI command for it is supported */
823 	if (hdev->commands[22] & 0x04)
824 		hci_set_event_mask_page_2(req);
825 
826 	/* Read local codec list if the HCI command is supported */
827 	if (hdev->commands[29] & 0x20)
828 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
829 
830 	/* Read local pairing options if the HCI command is supported */
831 	if (hdev->commands[41] & 0x08)
832 		hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
833 
834 	/* Get MWS transport configuration if the HCI command is supported */
835 	if (hdev->commands[30] & 0x08)
836 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
837 
838 	/* Check for Synchronization Train support */
839 	if (lmp_sync_train_capable(hdev))
840 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
841 
842 	/* Enable Secure Connections if supported and configured */
843 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
844 	    bredr_sc_enabled(hdev)) {
845 		u8 support = 0x01;
846 
847 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
848 			    sizeof(support), &support);
849 	}
850 
851 	/* Set erroneous data reporting if supported to the wideband speech
852 	 * setting value
853 	 */
854 	if (hdev->commands[18] & 0x08) {
855 		bool enabled = hci_dev_test_flag(hdev,
856 						 HCI_WIDEBAND_SPEECH_ENABLED);
857 
858 		if (enabled !=
859 		    (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
860 			struct hci_cp_write_def_err_data_reporting cp;
861 
862 			cp.err_data_reporting = enabled ?
863 						ERR_DATA_REPORTING_ENABLED :
864 						ERR_DATA_REPORTING_DISABLED;
865 
866 			hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
867 				    sizeof(cp), &cp);
868 		}
869 	}
870 
871 	/* Set Suggested Default Data Length to maximum if supported */
872 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
873 		struct hci_cp_le_write_def_data_len cp;
874 
875 		cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
876 		cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
877 		hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
878 	}
879 
880 	/* Set Default PHY parameters if command is supported */
881 	if (hdev->commands[35] & 0x20) {
882 		struct hci_cp_le_set_default_phy cp;
883 
884 		cp.all_phys = 0x00;
885 		cp.tx_phys = hdev->le_tx_def_phys;
886 		cp.rx_phys = hdev->le_rx_def_phys;
887 
888 		hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
889 	}
890 
891 	return 0;
892 }
893 
894 static int __hci_init(struct hci_dev *hdev)
895 {
896 	int err;
897 
898 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
899 	if (err < 0)
900 		return err;
901 
902 	if (hci_dev_test_flag(hdev, HCI_SETUP))
903 		hci_debugfs_create_basic(hdev);
904 
905 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
906 	if (err < 0)
907 		return err;
908 
909 	/* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
910 	 * BR/EDR/LE type controllers. AMP controllers only need the
911 	 * first two stages of init.
912 	 */
913 	if (hdev->dev_type != HCI_PRIMARY)
914 		return 0;
915 
916 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
917 	if (err < 0)
918 		return err;
919 
920 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
921 	if (err < 0)
922 		return err;
923 
924 	/* This function is only called when the controller is actually in
925 	 * configured state. When the controller is marked as unconfigured,
926 	 * this initialization procedure is not run.
927 	 *
928 	 * It means that it is possible that a controller runs through its
929 	 * setup phase and then discovers missing settings. If that is the
930 	 * case, then this function will not be called. It then will only
931 	 * be called during the config phase.
932 	 *
933 	 * So only when in setup phase or config phase, create the debugfs
934 	 * entries and register the SMP channels.
935 	 */
936 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
937 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
938 		return 0;
939 
940 	hci_debugfs_create_common(hdev);
941 
942 	if (lmp_bredr_capable(hdev))
943 		hci_debugfs_create_bredr(hdev);
944 
945 	if (lmp_le_capable(hdev))
946 		hci_debugfs_create_le(hdev);
947 
948 	return 0;
949 }
950 
951 static int hci_init0_req(struct hci_request *req, unsigned long opt)
952 {
953 	struct hci_dev *hdev = req->hdev;
954 
955 	BT_DBG("%s %ld", hdev->name, opt);
956 
957 	/* Reset */
958 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
959 		hci_reset_req(req, 0);
960 
961 	/* Read Local Version */
962 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
963 
964 	/* Read BD Address */
965 	if (hdev->set_bdaddr)
966 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
967 
968 	return 0;
969 }
970 
971 static int __hci_unconf_init(struct hci_dev *hdev)
972 {
973 	int err;
974 
975 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
976 		return 0;
977 
978 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
979 	if (err < 0)
980 		return err;
981 
982 	if (hci_dev_test_flag(hdev, HCI_SETUP))
983 		hci_debugfs_create_basic(hdev);
984 
985 	return 0;
986 }
987 
988 static int hci_scan_req(struct hci_request *req, unsigned long opt)
989 {
990 	__u8 scan = opt;
991 
992 	BT_DBG("%s %x", req->hdev->name, scan);
993 
994 	/* Inquiry and Page scans */
995 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
996 	return 0;
997 }
998 
999 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1000 {
1001 	__u8 auth = opt;
1002 
1003 	BT_DBG("%s %x", req->hdev->name, auth);
1004 
1005 	/* Authentication */
1006 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1007 	return 0;
1008 }
1009 
1010 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1011 {
1012 	__u8 encrypt = opt;
1013 
1014 	BT_DBG("%s %x", req->hdev->name, encrypt);
1015 
1016 	/* Encryption */
1017 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1018 	return 0;
1019 }
1020 
1021 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1022 {
1023 	__le16 policy = cpu_to_le16(opt);
1024 
1025 	BT_DBG("%s %x", req->hdev->name, policy);
1026 
1027 	/* Default link policy */
1028 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1029 	return 0;
1030 }
1031 
1032 /* Get HCI device by index.
1033  * Device is held on return. */
1034 struct hci_dev *hci_dev_get(int index)
1035 {
1036 	struct hci_dev *hdev = NULL, *d;
1037 
1038 	BT_DBG("%d", index);
1039 
1040 	if (index < 0)
1041 		return NULL;
1042 
1043 	read_lock(&hci_dev_list_lock);
1044 	list_for_each_entry(d, &hci_dev_list, list) {
1045 		if (d->id == index) {
1046 			hdev = hci_dev_hold(d);
1047 			break;
1048 		}
1049 	}
1050 	read_unlock(&hci_dev_list_lock);
1051 	return hdev;
1052 }
1053 
1054 /* ---- Inquiry support ---- */
1055 
1056 bool hci_discovery_active(struct hci_dev *hdev)
1057 {
1058 	struct discovery_state *discov = &hdev->discovery;
1059 
1060 	switch (discov->state) {
1061 	case DISCOVERY_FINDING:
1062 	case DISCOVERY_RESOLVING:
1063 		return true;
1064 
1065 	default:
1066 		return false;
1067 	}
1068 }
1069 
1070 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1071 {
1072 	int old_state = hdev->discovery.state;
1073 
1074 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1075 
1076 	if (old_state == state)
1077 		return;
1078 
1079 	hdev->discovery.state = state;
1080 
1081 	switch (state) {
1082 	case DISCOVERY_STOPPED:
1083 		hci_update_background_scan(hdev);
1084 
1085 		if (old_state != DISCOVERY_STARTING)
1086 			mgmt_discovering(hdev, 0);
1087 		break;
1088 	case DISCOVERY_STARTING:
1089 		break;
1090 	case DISCOVERY_FINDING:
1091 		mgmt_discovering(hdev, 1);
1092 		break;
1093 	case DISCOVERY_RESOLVING:
1094 		break;
1095 	case DISCOVERY_STOPPING:
1096 		break;
1097 	}
1098 }
1099 
1100 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1101 {
1102 	struct discovery_state *cache = &hdev->discovery;
1103 	struct inquiry_entry *p, *n;
1104 
1105 	list_for_each_entry_safe(p, n, &cache->all, all) {
1106 		list_del(&p->all);
1107 		kfree(p);
1108 	}
1109 
1110 	INIT_LIST_HEAD(&cache->unknown);
1111 	INIT_LIST_HEAD(&cache->resolve);
1112 }
1113 
1114 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1115 					       bdaddr_t *bdaddr)
1116 {
1117 	struct discovery_state *cache = &hdev->discovery;
1118 	struct inquiry_entry *e;
1119 
1120 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1121 
1122 	list_for_each_entry(e, &cache->all, all) {
1123 		if (!bacmp(&e->data.bdaddr, bdaddr))
1124 			return e;
1125 	}
1126 
1127 	return NULL;
1128 }
1129 
1130 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1131 						       bdaddr_t *bdaddr)
1132 {
1133 	struct discovery_state *cache = &hdev->discovery;
1134 	struct inquiry_entry *e;
1135 
1136 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1137 
1138 	list_for_each_entry(e, &cache->unknown, list) {
1139 		if (!bacmp(&e->data.bdaddr, bdaddr))
1140 			return e;
1141 	}
1142 
1143 	return NULL;
1144 }
1145 
1146 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1147 						       bdaddr_t *bdaddr,
1148 						       int state)
1149 {
1150 	struct discovery_state *cache = &hdev->discovery;
1151 	struct inquiry_entry *e;
1152 
1153 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1154 
1155 	list_for_each_entry(e, &cache->resolve, list) {
1156 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1157 			return e;
1158 		if (!bacmp(&e->data.bdaddr, bdaddr))
1159 			return e;
1160 	}
1161 
1162 	return NULL;
1163 }
1164 
1165 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1166 				      struct inquiry_entry *ie)
1167 {
1168 	struct discovery_state *cache = &hdev->discovery;
1169 	struct list_head *pos = &cache->resolve;
1170 	struct inquiry_entry *p;
1171 
1172 	list_del(&ie->list);
1173 
1174 	list_for_each_entry(p, &cache->resolve, list) {
1175 		if (p->name_state != NAME_PENDING &&
1176 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1177 			break;
1178 		pos = &p->list;
1179 	}
1180 
1181 	list_add(&ie->list, pos);
1182 }
1183 
1184 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1185 			     bool name_known)
1186 {
1187 	struct discovery_state *cache = &hdev->discovery;
1188 	struct inquiry_entry *ie;
1189 	u32 flags = 0;
1190 
1191 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1192 
1193 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1194 
1195 	if (!data->ssp_mode)
1196 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1197 
1198 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1199 	if (ie) {
1200 		if (!ie->data.ssp_mode)
1201 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1202 
1203 		if (ie->name_state == NAME_NEEDED &&
1204 		    data->rssi != ie->data.rssi) {
1205 			ie->data.rssi = data->rssi;
1206 			hci_inquiry_cache_update_resolve(hdev, ie);
1207 		}
1208 
1209 		goto update;
1210 	}
1211 
1212 	/* Entry not in the cache. Add new one. */
1213 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1214 	if (!ie) {
1215 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1216 		goto done;
1217 	}
1218 
1219 	list_add(&ie->all, &cache->all);
1220 
1221 	if (name_known) {
1222 		ie->name_state = NAME_KNOWN;
1223 	} else {
1224 		ie->name_state = NAME_NOT_KNOWN;
1225 		list_add(&ie->list, &cache->unknown);
1226 	}
1227 
1228 update:
1229 	if (name_known && ie->name_state != NAME_KNOWN &&
1230 	    ie->name_state != NAME_PENDING) {
1231 		ie->name_state = NAME_KNOWN;
1232 		list_del(&ie->list);
1233 	}
1234 
1235 	memcpy(&ie->data, data, sizeof(*data));
1236 	ie->timestamp = jiffies;
1237 	cache->timestamp = jiffies;
1238 
1239 	if (ie->name_state == NAME_NOT_KNOWN)
1240 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1241 
1242 done:
1243 	return flags;
1244 }
1245 
1246 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1247 {
1248 	struct discovery_state *cache = &hdev->discovery;
1249 	struct inquiry_info *info = (struct inquiry_info *) buf;
1250 	struct inquiry_entry *e;
1251 	int copied = 0;
1252 
1253 	list_for_each_entry(e, &cache->all, all) {
1254 		struct inquiry_data *data = &e->data;
1255 
1256 		if (copied >= num)
1257 			break;
1258 
1259 		bacpy(&info->bdaddr, &data->bdaddr);
1260 		info->pscan_rep_mode	= data->pscan_rep_mode;
1261 		info->pscan_period_mode	= data->pscan_period_mode;
1262 		info->pscan_mode	= data->pscan_mode;
1263 		memcpy(info->dev_class, data->dev_class, 3);
1264 		info->clock_offset	= data->clock_offset;
1265 
1266 		info++;
1267 		copied++;
1268 	}
1269 
1270 	BT_DBG("cache %p, copied %d", cache, copied);
1271 	return copied;
1272 }
1273 
1274 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1275 {
1276 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1277 	struct hci_dev *hdev = req->hdev;
1278 	struct hci_cp_inquiry cp;
1279 
1280 	BT_DBG("%s", hdev->name);
1281 
1282 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1283 		return 0;
1284 
1285 	/* Start Inquiry */
1286 	memcpy(&cp.lap, &ir->lap, 3);
1287 	cp.length  = ir->length;
1288 	cp.num_rsp = ir->num_rsp;
1289 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1290 
1291 	return 0;
1292 }
1293 
1294 int hci_inquiry(void __user *arg)
1295 {
1296 	__u8 __user *ptr = arg;
1297 	struct hci_inquiry_req ir;
1298 	struct hci_dev *hdev;
1299 	int err = 0, do_inquiry = 0, max_rsp;
1300 	long timeo;
1301 	__u8 *buf;
1302 
1303 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1304 		return -EFAULT;
1305 
1306 	hdev = hci_dev_get(ir.dev_id);
1307 	if (!hdev)
1308 		return -ENODEV;
1309 
1310 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1311 		err = -EBUSY;
1312 		goto done;
1313 	}
1314 
1315 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1316 		err = -EOPNOTSUPP;
1317 		goto done;
1318 	}
1319 
1320 	if (hdev->dev_type != HCI_PRIMARY) {
1321 		err = -EOPNOTSUPP;
1322 		goto done;
1323 	}
1324 
1325 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1326 		err = -EOPNOTSUPP;
1327 		goto done;
1328 	}
1329 
1330 	hci_dev_lock(hdev);
1331 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1332 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1333 		hci_inquiry_cache_flush(hdev);
1334 		do_inquiry = 1;
1335 	}
1336 	hci_dev_unlock(hdev);
1337 
1338 	timeo = ir.length * msecs_to_jiffies(2000);
1339 
1340 	if (do_inquiry) {
1341 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1342 				   timeo, NULL);
1343 		if (err < 0)
1344 			goto done;
1345 
1346 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1347 		 * cleared). If it is interrupted by a signal, return -EINTR.
1348 		 */
1349 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1350 				TASK_INTERRUPTIBLE))
1351 			return -EINTR;
1352 	}
1353 
1354 	/* for unlimited number of responses we will use buffer with
1355 	 * 255 entries
1356 	 */
1357 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1358 
1359 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1360 	 * copy it to the user space.
1361 	 */
1362 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1363 	if (!buf) {
1364 		err = -ENOMEM;
1365 		goto done;
1366 	}
1367 
1368 	hci_dev_lock(hdev);
1369 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1370 	hci_dev_unlock(hdev);
1371 
1372 	BT_DBG("num_rsp %d", ir.num_rsp);
1373 
1374 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1375 		ptr += sizeof(ir);
1376 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1377 				 ir.num_rsp))
1378 			err = -EFAULT;
1379 	} else
1380 		err = -EFAULT;
1381 
1382 	kfree(buf);
1383 
1384 done:
1385 	hci_dev_put(hdev);
1386 	return err;
1387 }
1388 
1389 /**
1390  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1391  *				       (BD_ADDR) for a HCI device from
1392  *				       a firmware node property.
1393  * @hdev:	The HCI device
1394  *
1395  * Search the firmware node for 'local-bd-address'.
1396  *
1397  * All-zero BD addresses are rejected, because those could be properties
1398  * that exist in the firmware tables, but were not updated by the firmware. For
1399  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1400  */
1401 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1402 {
1403 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1404 	bdaddr_t ba;
1405 	int ret;
1406 
1407 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1408 					    (u8 *)&ba, sizeof(ba));
1409 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1410 		return;
1411 
1412 	bacpy(&hdev->public_addr, &ba);
1413 }
1414 
1415 static int hci_dev_do_open(struct hci_dev *hdev)
1416 {
1417 	int ret = 0;
1418 
1419 	BT_DBG("%s %p", hdev->name, hdev);
1420 
1421 	hci_req_sync_lock(hdev);
1422 
1423 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1424 		ret = -ENODEV;
1425 		goto done;
1426 	}
1427 
1428 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1429 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1430 		/* Check for rfkill but allow the HCI setup stage to
1431 		 * proceed (which in itself doesn't cause any RF activity).
1432 		 */
1433 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1434 			ret = -ERFKILL;
1435 			goto done;
1436 		}
1437 
1438 		/* Check for valid public address or a configured static
1439 		 * random adddress, but let the HCI setup proceed to
1440 		 * be able to determine if there is a public address
1441 		 * or not.
1442 		 *
1443 		 * In case of user channel usage, it is not important
1444 		 * if a public address or static random address is
1445 		 * available.
1446 		 *
1447 		 * This check is only valid for BR/EDR controllers
1448 		 * since AMP controllers do not have an address.
1449 		 */
1450 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1451 		    hdev->dev_type == HCI_PRIMARY &&
1452 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1453 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1454 			ret = -EADDRNOTAVAIL;
1455 			goto done;
1456 		}
1457 	}
1458 
1459 	if (test_bit(HCI_UP, &hdev->flags)) {
1460 		ret = -EALREADY;
1461 		goto done;
1462 	}
1463 
1464 	if (hdev->open(hdev)) {
1465 		ret = -EIO;
1466 		goto done;
1467 	}
1468 
1469 	set_bit(HCI_RUNNING, &hdev->flags);
1470 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1471 
1472 	atomic_set(&hdev->cmd_cnt, 1);
1473 	set_bit(HCI_INIT, &hdev->flags);
1474 
1475 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1476 	    test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1477 		bool invalid_bdaddr;
1478 
1479 		hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1480 
1481 		if (hdev->setup)
1482 			ret = hdev->setup(hdev);
1483 
1484 		/* The transport driver can set the quirk to mark the
1485 		 * BD_ADDR invalid before creating the HCI device or in
1486 		 * its setup callback.
1487 		 */
1488 		invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1489 					  &hdev->quirks);
1490 
1491 		if (ret)
1492 			goto setup_failed;
1493 
1494 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1495 			if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1496 				hci_dev_get_bd_addr_from_property(hdev);
1497 
1498 			if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1499 			    hdev->set_bdaddr) {
1500 				ret = hdev->set_bdaddr(hdev,
1501 						       &hdev->public_addr);
1502 
1503 				/* If setting of the BD_ADDR from the device
1504 				 * property succeeds, then treat the address
1505 				 * as valid even if the invalid BD_ADDR
1506 				 * quirk indicates otherwise.
1507 				 */
1508 				if (!ret)
1509 					invalid_bdaddr = false;
1510 			}
1511 		}
1512 
1513 setup_failed:
1514 		/* The transport driver can set these quirks before
1515 		 * creating the HCI device or in its setup callback.
1516 		 *
1517 		 * For the invalid BD_ADDR quirk it is possible that
1518 		 * it becomes a valid address if the bootloader does
1519 		 * provide it (see above).
1520 		 *
1521 		 * In case any of them is set, the controller has to
1522 		 * start up as unconfigured.
1523 		 */
1524 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1525 		    invalid_bdaddr)
1526 			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1527 
1528 		/* For an unconfigured controller it is required to
1529 		 * read at least the version information provided by
1530 		 * the Read Local Version Information command.
1531 		 *
1532 		 * If the set_bdaddr driver callback is provided, then
1533 		 * also the original Bluetooth public device address
1534 		 * will be read using the Read BD Address command.
1535 		 */
1536 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1537 			ret = __hci_unconf_init(hdev);
1538 	}
1539 
1540 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1541 		/* If public address change is configured, ensure that
1542 		 * the address gets programmed. If the driver does not
1543 		 * support changing the public address, fail the power
1544 		 * on procedure.
1545 		 */
1546 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1547 		    hdev->set_bdaddr)
1548 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1549 		else
1550 			ret = -EADDRNOTAVAIL;
1551 	}
1552 
1553 	if (!ret) {
1554 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1555 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1556 			ret = __hci_init(hdev);
1557 			if (!ret && hdev->post_init)
1558 				ret = hdev->post_init(hdev);
1559 		}
1560 	}
1561 
1562 	/* If the HCI Reset command is clearing all diagnostic settings,
1563 	 * then they need to be reprogrammed after the init procedure
1564 	 * completed.
1565 	 */
1566 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1567 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1568 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1569 		ret = hdev->set_diag(hdev, true);
1570 
1571 	msft_do_open(hdev);
1572 
1573 	clear_bit(HCI_INIT, &hdev->flags);
1574 
1575 	if (!ret) {
1576 		hci_dev_hold(hdev);
1577 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1578 		hci_adv_instances_set_rpa_expired(hdev, true);
1579 		set_bit(HCI_UP, &hdev->flags);
1580 		hci_sock_dev_event(hdev, HCI_DEV_UP);
1581 		hci_leds_update_powered(hdev, true);
1582 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1583 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1584 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1585 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1586 		    hci_dev_test_flag(hdev, HCI_MGMT) &&
1587 		    hdev->dev_type == HCI_PRIMARY) {
1588 			ret = __hci_req_hci_power_on(hdev);
1589 			mgmt_power_on(hdev, ret);
1590 		}
1591 	} else {
1592 		/* Init failed, cleanup */
1593 		flush_work(&hdev->tx_work);
1594 		flush_work(&hdev->cmd_work);
1595 		flush_work(&hdev->rx_work);
1596 
1597 		skb_queue_purge(&hdev->cmd_q);
1598 		skb_queue_purge(&hdev->rx_q);
1599 
1600 		if (hdev->flush)
1601 			hdev->flush(hdev);
1602 
1603 		if (hdev->sent_cmd) {
1604 			kfree_skb(hdev->sent_cmd);
1605 			hdev->sent_cmd = NULL;
1606 		}
1607 
1608 		clear_bit(HCI_RUNNING, &hdev->flags);
1609 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1610 
1611 		hdev->close(hdev);
1612 		hdev->flags &= BIT(HCI_RAW);
1613 	}
1614 
1615 done:
1616 	hci_req_sync_unlock(hdev);
1617 	return ret;
1618 }
1619 
1620 /* ---- HCI ioctl helpers ---- */
1621 
1622 int hci_dev_open(__u16 dev)
1623 {
1624 	struct hci_dev *hdev;
1625 	int err;
1626 
1627 	hdev = hci_dev_get(dev);
1628 	if (!hdev)
1629 		return -ENODEV;
1630 
1631 	/* Devices that are marked as unconfigured can only be powered
1632 	 * up as user channel. Trying to bring them up as normal devices
1633 	 * will result into a failure. Only user channel operation is
1634 	 * possible.
1635 	 *
1636 	 * When this function is called for a user channel, the flag
1637 	 * HCI_USER_CHANNEL will be set first before attempting to
1638 	 * open the device.
1639 	 */
1640 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1641 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1642 		err = -EOPNOTSUPP;
1643 		goto done;
1644 	}
1645 
1646 	/* We need to ensure that no other power on/off work is pending
1647 	 * before proceeding to call hci_dev_do_open. This is
1648 	 * particularly important if the setup procedure has not yet
1649 	 * completed.
1650 	 */
1651 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1652 		cancel_delayed_work(&hdev->power_off);
1653 
1654 	/* After this call it is guaranteed that the setup procedure
1655 	 * has finished. This means that error conditions like RFKILL
1656 	 * or no valid public or static random address apply.
1657 	 */
1658 	flush_workqueue(hdev->req_workqueue);
1659 
1660 	/* For controllers not using the management interface and that
1661 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1662 	 * so that pairing works for them. Once the management interface
1663 	 * is in use this bit will be cleared again and userspace has
1664 	 * to explicitly enable it.
1665 	 */
1666 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1667 	    !hci_dev_test_flag(hdev, HCI_MGMT))
1668 		hci_dev_set_flag(hdev, HCI_BONDABLE);
1669 
1670 	err = hci_dev_do_open(hdev);
1671 
1672 done:
1673 	hci_dev_put(hdev);
1674 	return err;
1675 }
1676 
1677 /* This function requires the caller holds hdev->lock */
1678 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1679 {
1680 	struct hci_conn_params *p;
1681 
1682 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1683 		if (p->conn) {
1684 			hci_conn_drop(p->conn);
1685 			hci_conn_put(p->conn);
1686 			p->conn = NULL;
1687 		}
1688 		list_del_init(&p->action);
1689 	}
1690 
1691 	BT_DBG("All LE pending actions cleared");
1692 }
1693 
1694 int hci_dev_do_close(struct hci_dev *hdev)
1695 {
1696 	bool auto_off;
1697 
1698 	BT_DBG("%s %p", hdev->name, hdev);
1699 
1700 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1701 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1702 	    test_bit(HCI_UP, &hdev->flags)) {
1703 		/* Execute vendor specific shutdown routine */
1704 		if (hdev->shutdown)
1705 			hdev->shutdown(hdev);
1706 	}
1707 
1708 	cancel_delayed_work(&hdev->power_off);
1709 
1710 	hci_request_cancel_all(hdev);
1711 	hci_req_sync_lock(hdev);
1712 
1713 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1714 		cancel_delayed_work_sync(&hdev->cmd_timer);
1715 		hci_req_sync_unlock(hdev);
1716 		return 0;
1717 	}
1718 
1719 	hci_leds_update_powered(hdev, false);
1720 
1721 	/* Flush RX and TX works */
1722 	flush_work(&hdev->tx_work);
1723 	flush_work(&hdev->rx_work);
1724 
1725 	if (hdev->discov_timeout > 0) {
1726 		hdev->discov_timeout = 0;
1727 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1728 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1729 	}
1730 
1731 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1732 		cancel_delayed_work(&hdev->service_cache);
1733 
1734 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1735 		struct adv_info *adv_instance;
1736 
1737 		cancel_delayed_work_sync(&hdev->rpa_expired);
1738 
1739 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1740 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1741 	}
1742 
1743 	/* Avoid potential lockdep warnings from the *_flush() calls by
1744 	 * ensuring the workqueue is empty up front.
1745 	 */
1746 	drain_workqueue(hdev->workqueue);
1747 
1748 	hci_dev_lock(hdev);
1749 
1750 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1751 
1752 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1753 
1754 	if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1755 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1756 	    hci_dev_test_flag(hdev, HCI_MGMT))
1757 		__mgmt_power_off(hdev);
1758 
1759 	hci_inquiry_cache_flush(hdev);
1760 	hci_pend_le_actions_clear(hdev);
1761 	hci_conn_hash_flush(hdev);
1762 	hci_dev_unlock(hdev);
1763 
1764 	smp_unregister(hdev);
1765 
1766 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1767 
1768 	msft_do_close(hdev);
1769 
1770 	if (hdev->flush)
1771 		hdev->flush(hdev);
1772 
1773 	/* Reset device */
1774 	skb_queue_purge(&hdev->cmd_q);
1775 	atomic_set(&hdev->cmd_cnt, 1);
1776 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1777 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1778 		set_bit(HCI_INIT, &hdev->flags);
1779 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1780 		clear_bit(HCI_INIT, &hdev->flags);
1781 	}
1782 
1783 	/* flush cmd  work */
1784 	flush_work(&hdev->cmd_work);
1785 
1786 	/* Drop queues */
1787 	skb_queue_purge(&hdev->rx_q);
1788 	skb_queue_purge(&hdev->cmd_q);
1789 	skb_queue_purge(&hdev->raw_q);
1790 
1791 	/* Drop last sent command */
1792 	if (hdev->sent_cmd) {
1793 		cancel_delayed_work_sync(&hdev->cmd_timer);
1794 		kfree_skb(hdev->sent_cmd);
1795 		hdev->sent_cmd = NULL;
1796 	}
1797 
1798 	clear_bit(HCI_RUNNING, &hdev->flags);
1799 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1800 
1801 	if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1802 		wake_up(&hdev->suspend_wait_q);
1803 
1804 	/* After this point our queues are empty
1805 	 * and no tasks are scheduled. */
1806 	hdev->close(hdev);
1807 
1808 	/* Clear flags */
1809 	hdev->flags &= BIT(HCI_RAW);
1810 	hci_dev_clear_volatile_flags(hdev);
1811 
1812 	/* Controller radio is available but is currently powered down */
1813 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1814 
1815 	memset(hdev->eir, 0, sizeof(hdev->eir));
1816 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1817 	bacpy(&hdev->random_addr, BDADDR_ANY);
1818 
1819 	hci_req_sync_unlock(hdev);
1820 
1821 	hci_dev_put(hdev);
1822 	return 0;
1823 }
1824 
1825 int hci_dev_close(__u16 dev)
1826 {
1827 	struct hci_dev *hdev;
1828 	int err;
1829 
1830 	hdev = hci_dev_get(dev);
1831 	if (!hdev)
1832 		return -ENODEV;
1833 
1834 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1835 		err = -EBUSY;
1836 		goto done;
1837 	}
1838 
1839 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1840 		cancel_delayed_work(&hdev->power_off);
1841 
1842 	err = hci_dev_do_close(hdev);
1843 
1844 done:
1845 	hci_dev_put(hdev);
1846 	return err;
1847 }
1848 
1849 static int hci_dev_do_reset(struct hci_dev *hdev)
1850 {
1851 	int ret;
1852 
1853 	BT_DBG("%s %p", hdev->name, hdev);
1854 
1855 	hci_req_sync_lock(hdev);
1856 
1857 	/* Drop queues */
1858 	skb_queue_purge(&hdev->rx_q);
1859 	skb_queue_purge(&hdev->cmd_q);
1860 
1861 	/* Avoid potential lockdep warnings from the *_flush() calls by
1862 	 * ensuring the workqueue is empty up front.
1863 	 */
1864 	drain_workqueue(hdev->workqueue);
1865 
1866 	hci_dev_lock(hdev);
1867 	hci_inquiry_cache_flush(hdev);
1868 	hci_conn_hash_flush(hdev);
1869 	hci_dev_unlock(hdev);
1870 
1871 	if (hdev->flush)
1872 		hdev->flush(hdev);
1873 
1874 	atomic_set(&hdev->cmd_cnt, 1);
1875 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1876 
1877 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1878 
1879 	hci_req_sync_unlock(hdev);
1880 	return ret;
1881 }
1882 
1883 int hci_dev_reset(__u16 dev)
1884 {
1885 	struct hci_dev *hdev;
1886 	int err;
1887 
1888 	hdev = hci_dev_get(dev);
1889 	if (!hdev)
1890 		return -ENODEV;
1891 
1892 	if (!test_bit(HCI_UP, &hdev->flags)) {
1893 		err = -ENETDOWN;
1894 		goto done;
1895 	}
1896 
1897 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1898 		err = -EBUSY;
1899 		goto done;
1900 	}
1901 
1902 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1903 		err = -EOPNOTSUPP;
1904 		goto done;
1905 	}
1906 
1907 	err = hci_dev_do_reset(hdev);
1908 
1909 done:
1910 	hci_dev_put(hdev);
1911 	return err;
1912 }
1913 
1914 int hci_dev_reset_stat(__u16 dev)
1915 {
1916 	struct hci_dev *hdev;
1917 	int ret = 0;
1918 
1919 	hdev = hci_dev_get(dev);
1920 	if (!hdev)
1921 		return -ENODEV;
1922 
1923 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1924 		ret = -EBUSY;
1925 		goto done;
1926 	}
1927 
1928 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1929 		ret = -EOPNOTSUPP;
1930 		goto done;
1931 	}
1932 
1933 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1934 
1935 done:
1936 	hci_dev_put(hdev);
1937 	return ret;
1938 }
1939 
1940 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1941 {
1942 	bool conn_changed, discov_changed;
1943 
1944 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
1945 
1946 	if ((scan & SCAN_PAGE))
1947 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1948 							  HCI_CONNECTABLE);
1949 	else
1950 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1951 							   HCI_CONNECTABLE);
1952 
1953 	if ((scan & SCAN_INQUIRY)) {
1954 		discov_changed = !hci_dev_test_and_set_flag(hdev,
1955 							    HCI_DISCOVERABLE);
1956 	} else {
1957 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1958 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1959 							     HCI_DISCOVERABLE);
1960 	}
1961 
1962 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1963 		return;
1964 
1965 	if (conn_changed || discov_changed) {
1966 		/* In case this was disabled through mgmt */
1967 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1968 
1969 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1970 			hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1971 
1972 		mgmt_new_settings(hdev);
1973 	}
1974 }
1975 
1976 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1977 {
1978 	struct hci_dev *hdev;
1979 	struct hci_dev_req dr;
1980 	int err = 0;
1981 
1982 	if (copy_from_user(&dr, arg, sizeof(dr)))
1983 		return -EFAULT;
1984 
1985 	hdev = hci_dev_get(dr.dev_id);
1986 	if (!hdev)
1987 		return -ENODEV;
1988 
1989 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1990 		err = -EBUSY;
1991 		goto done;
1992 	}
1993 
1994 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1995 		err = -EOPNOTSUPP;
1996 		goto done;
1997 	}
1998 
1999 	if (hdev->dev_type != HCI_PRIMARY) {
2000 		err = -EOPNOTSUPP;
2001 		goto done;
2002 	}
2003 
2004 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2005 		err = -EOPNOTSUPP;
2006 		goto done;
2007 	}
2008 
2009 	switch (cmd) {
2010 	case HCISETAUTH:
2011 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2012 				   HCI_INIT_TIMEOUT, NULL);
2013 		break;
2014 
2015 	case HCISETENCRYPT:
2016 		if (!lmp_encrypt_capable(hdev)) {
2017 			err = -EOPNOTSUPP;
2018 			break;
2019 		}
2020 
2021 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
2022 			/* Auth must be enabled first */
2023 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2024 					   HCI_INIT_TIMEOUT, NULL);
2025 			if (err)
2026 				break;
2027 		}
2028 
2029 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2030 				   HCI_INIT_TIMEOUT, NULL);
2031 		break;
2032 
2033 	case HCISETSCAN:
2034 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2035 				   HCI_INIT_TIMEOUT, NULL);
2036 
2037 		/* Ensure that the connectable and discoverable states
2038 		 * get correctly modified as this was a non-mgmt change.
2039 		 */
2040 		if (!err)
2041 			hci_update_scan_state(hdev, dr.dev_opt);
2042 		break;
2043 
2044 	case HCISETLINKPOL:
2045 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2046 				   HCI_INIT_TIMEOUT, NULL);
2047 		break;
2048 
2049 	case HCISETLINKMODE:
2050 		hdev->link_mode = ((__u16) dr.dev_opt) &
2051 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
2052 		break;
2053 
2054 	case HCISETPTYPE:
2055 		if (hdev->pkt_type == (__u16) dr.dev_opt)
2056 			break;
2057 
2058 		hdev->pkt_type = (__u16) dr.dev_opt;
2059 		mgmt_phy_configuration_changed(hdev, NULL);
2060 		break;
2061 
2062 	case HCISETACLMTU:
2063 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2064 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2065 		break;
2066 
2067 	case HCISETSCOMTU:
2068 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2069 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2070 		break;
2071 
2072 	default:
2073 		err = -EINVAL;
2074 		break;
2075 	}
2076 
2077 done:
2078 	hci_dev_put(hdev);
2079 	return err;
2080 }
2081 
2082 int hci_get_dev_list(void __user *arg)
2083 {
2084 	struct hci_dev *hdev;
2085 	struct hci_dev_list_req *dl;
2086 	struct hci_dev_req *dr;
2087 	int n = 0, size, err;
2088 	__u16 dev_num;
2089 
2090 	if (get_user(dev_num, (__u16 __user *) arg))
2091 		return -EFAULT;
2092 
2093 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2094 		return -EINVAL;
2095 
2096 	size = sizeof(*dl) + dev_num * sizeof(*dr);
2097 
2098 	dl = kzalloc(size, GFP_KERNEL);
2099 	if (!dl)
2100 		return -ENOMEM;
2101 
2102 	dr = dl->dev_req;
2103 
2104 	read_lock(&hci_dev_list_lock);
2105 	list_for_each_entry(hdev, &hci_dev_list, list) {
2106 		unsigned long flags = hdev->flags;
2107 
2108 		/* When the auto-off is configured it means the transport
2109 		 * is running, but in that case still indicate that the
2110 		 * device is actually down.
2111 		 */
2112 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2113 			flags &= ~BIT(HCI_UP);
2114 
2115 		(dr + n)->dev_id  = hdev->id;
2116 		(dr + n)->dev_opt = flags;
2117 
2118 		if (++n >= dev_num)
2119 			break;
2120 	}
2121 	read_unlock(&hci_dev_list_lock);
2122 
2123 	dl->dev_num = n;
2124 	size = sizeof(*dl) + n * sizeof(*dr);
2125 
2126 	err = copy_to_user(arg, dl, size);
2127 	kfree(dl);
2128 
2129 	return err ? -EFAULT : 0;
2130 }
2131 
2132 int hci_get_dev_info(void __user *arg)
2133 {
2134 	struct hci_dev *hdev;
2135 	struct hci_dev_info di;
2136 	unsigned long flags;
2137 	int err = 0;
2138 
2139 	if (copy_from_user(&di, arg, sizeof(di)))
2140 		return -EFAULT;
2141 
2142 	hdev = hci_dev_get(di.dev_id);
2143 	if (!hdev)
2144 		return -ENODEV;
2145 
2146 	/* When the auto-off is configured it means the transport
2147 	 * is running, but in that case still indicate that the
2148 	 * device is actually down.
2149 	 */
2150 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2151 		flags = hdev->flags & ~BIT(HCI_UP);
2152 	else
2153 		flags = hdev->flags;
2154 
2155 	strcpy(di.name, hdev->name);
2156 	di.bdaddr   = hdev->bdaddr;
2157 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2158 	di.flags    = flags;
2159 	di.pkt_type = hdev->pkt_type;
2160 	if (lmp_bredr_capable(hdev)) {
2161 		di.acl_mtu  = hdev->acl_mtu;
2162 		di.acl_pkts = hdev->acl_pkts;
2163 		di.sco_mtu  = hdev->sco_mtu;
2164 		di.sco_pkts = hdev->sco_pkts;
2165 	} else {
2166 		di.acl_mtu  = hdev->le_mtu;
2167 		di.acl_pkts = hdev->le_pkts;
2168 		di.sco_mtu  = 0;
2169 		di.sco_pkts = 0;
2170 	}
2171 	di.link_policy = hdev->link_policy;
2172 	di.link_mode   = hdev->link_mode;
2173 
2174 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2175 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2176 
2177 	if (copy_to_user(arg, &di, sizeof(di)))
2178 		err = -EFAULT;
2179 
2180 	hci_dev_put(hdev);
2181 
2182 	return err;
2183 }
2184 
2185 /* ---- Interface to HCI drivers ---- */
2186 
2187 static int hci_rfkill_set_block(void *data, bool blocked)
2188 {
2189 	struct hci_dev *hdev = data;
2190 
2191 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2192 
2193 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2194 		return -EBUSY;
2195 
2196 	if (blocked) {
2197 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2198 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2199 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2200 			hci_dev_do_close(hdev);
2201 	} else {
2202 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2203 	}
2204 
2205 	return 0;
2206 }
2207 
2208 static const struct rfkill_ops hci_rfkill_ops = {
2209 	.set_block = hci_rfkill_set_block,
2210 };
2211 
2212 static void hci_power_on(struct work_struct *work)
2213 {
2214 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2215 	int err;
2216 
2217 	BT_DBG("%s", hdev->name);
2218 
2219 	if (test_bit(HCI_UP, &hdev->flags) &&
2220 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
2221 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2222 		cancel_delayed_work(&hdev->power_off);
2223 		hci_req_sync_lock(hdev);
2224 		err = __hci_req_hci_power_on(hdev);
2225 		hci_req_sync_unlock(hdev);
2226 		mgmt_power_on(hdev, err);
2227 		return;
2228 	}
2229 
2230 	err = hci_dev_do_open(hdev);
2231 	if (err < 0) {
2232 		hci_dev_lock(hdev);
2233 		mgmt_set_powered_failed(hdev, err);
2234 		hci_dev_unlock(hdev);
2235 		return;
2236 	}
2237 
2238 	/* During the HCI setup phase, a few error conditions are
2239 	 * ignored and they need to be checked now. If they are still
2240 	 * valid, it is important to turn the device back off.
2241 	 */
2242 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2243 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2244 	    (hdev->dev_type == HCI_PRIMARY &&
2245 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2246 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2247 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2248 		hci_dev_do_close(hdev);
2249 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2250 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2251 				   HCI_AUTO_OFF_TIMEOUT);
2252 	}
2253 
2254 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2255 		/* For unconfigured devices, set the HCI_RAW flag
2256 		 * so that userspace can easily identify them.
2257 		 */
2258 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2259 			set_bit(HCI_RAW, &hdev->flags);
2260 
2261 		/* For fully configured devices, this will send
2262 		 * the Index Added event. For unconfigured devices,
2263 		 * it will send Unconfigued Index Added event.
2264 		 *
2265 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2266 		 * and no event will be send.
2267 		 */
2268 		mgmt_index_added(hdev);
2269 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2270 		/* When the controller is now configured, then it
2271 		 * is important to clear the HCI_RAW flag.
2272 		 */
2273 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2274 			clear_bit(HCI_RAW, &hdev->flags);
2275 
2276 		/* Powering on the controller with HCI_CONFIG set only
2277 		 * happens with the transition from unconfigured to
2278 		 * configured. This will send the Index Added event.
2279 		 */
2280 		mgmt_index_added(hdev);
2281 	}
2282 }
2283 
2284 static void hci_power_off(struct work_struct *work)
2285 {
2286 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2287 					    power_off.work);
2288 
2289 	BT_DBG("%s", hdev->name);
2290 
2291 	hci_dev_do_close(hdev);
2292 }
2293 
2294 static void hci_error_reset(struct work_struct *work)
2295 {
2296 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2297 
2298 	BT_DBG("%s", hdev->name);
2299 
2300 	if (hdev->hw_error)
2301 		hdev->hw_error(hdev, hdev->hw_error_code);
2302 	else
2303 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2304 
2305 	if (hci_dev_do_close(hdev))
2306 		return;
2307 
2308 	hci_dev_do_open(hdev);
2309 }
2310 
2311 void hci_uuids_clear(struct hci_dev *hdev)
2312 {
2313 	struct bt_uuid *uuid, *tmp;
2314 
2315 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2316 		list_del(&uuid->list);
2317 		kfree(uuid);
2318 	}
2319 }
2320 
2321 void hci_link_keys_clear(struct hci_dev *hdev)
2322 {
2323 	struct link_key *key;
2324 
2325 	list_for_each_entry(key, &hdev->link_keys, list) {
2326 		list_del_rcu(&key->list);
2327 		kfree_rcu(key, rcu);
2328 	}
2329 }
2330 
2331 void hci_smp_ltks_clear(struct hci_dev *hdev)
2332 {
2333 	struct smp_ltk *k;
2334 
2335 	list_for_each_entry(k, &hdev->long_term_keys, list) {
2336 		list_del_rcu(&k->list);
2337 		kfree_rcu(k, rcu);
2338 	}
2339 }
2340 
2341 void hci_smp_irks_clear(struct hci_dev *hdev)
2342 {
2343 	struct smp_irk *k;
2344 
2345 	list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2346 		list_del_rcu(&k->list);
2347 		kfree_rcu(k, rcu);
2348 	}
2349 }
2350 
2351 void hci_blocked_keys_clear(struct hci_dev *hdev)
2352 {
2353 	struct blocked_key *b;
2354 
2355 	list_for_each_entry(b, &hdev->blocked_keys, list) {
2356 		list_del_rcu(&b->list);
2357 		kfree_rcu(b, rcu);
2358 	}
2359 }
2360 
2361 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2362 {
2363 	bool blocked = false;
2364 	struct blocked_key *b;
2365 
2366 	rcu_read_lock();
2367 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2368 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2369 			blocked = true;
2370 			break;
2371 		}
2372 	}
2373 
2374 	rcu_read_unlock();
2375 	return blocked;
2376 }
2377 
2378 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2379 {
2380 	struct link_key *k;
2381 
2382 	rcu_read_lock();
2383 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2384 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
2385 			rcu_read_unlock();
2386 
2387 			if (hci_is_blocked_key(hdev,
2388 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2389 					       k->val)) {
2390 				bt_dev_warn_ratelimited(hdev,
2391 							"Link key blocked for %pMR",
2392 							&k->bdaddr);
2393 				return NULL;
2394 			}
2395 
2396 			return k;
2397 		}
2398 	}
2399 	rcu_read_unlock();
2400 
2401 	return NULL;
2402 }
2403 
2404 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2405 			       u8 key_type, u8 old_key_type)
2406 {
2407 	/* Legacy key */
2408 	if (key_type < 0x03)
2409 		return true;
2410 
2411 	/* Debug keys are insecure so don't store them persistently */
2412 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2413 		return false;
2414 
2415 	/* Changed combination key and there's no previous one */
2416 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2417 		return false;
2418 
2419 	/* Security mode 3 case */
2420 	if (!conn)
2421 		return true;
2422 
2423 	/* BR/EDR key derived using SC from an LE link */
2424 	if (conn->type == LE_LINK)
2425 		return true;
2426 
2427 	/* Neither local nor remote side had no-bonding as requirement */
2428 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2429 		return true;
2430 
2431 	/* Local side had dedicated bonding as requirement */
2432 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2433 		return true;
2434 
2435 	/* Remote side had dedicated bonding as requirement */
2436 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2437 		return true;
2438 
2439 	/* If none of the above criteria match, then don't store the key
2440 	 * persistently */
2441 	return false;
2442 }
2443 
2444 static u8 ltk_role(u8 type)
2445 {
2446 	if (type == SMP_LTK)
2447 		return HCI_ROLE_MASTER;
2448 
2449 	return HCI_ROLE_SLAVE;
2450 }
2451 
2452 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2453 			     u8 addr_type, u8 role)
2454 {
2455 	struct smp_ltk *k;
2456 
2457 	rcu_read_lock();
2458 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2459 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2460 			continue;
2461 
2462 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2463 			rcu_read_unlock();
2464 
2465 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2466 					       k->val)) {
2467 				bt_dev_warn_ratelimited(hdev,
2468 							"LTK blocked for %pMR",
2469 							&k->bdaddr);
2470 				return NULL;
2471 			}
2472 
2473 			return k;
2474 		}
2475 	}
2476 	rcu_read_unlock();
2477 
2478 	return NULL;
2479 }
2480 
2481 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2482 {
2483 	struct smp_irk *irk_to_return = NULL;
2484 	struct smp_irk *irk;
2485 
2486 	rcu_read_lock();
2487 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2488 		if (!bacmp(&irk->rpa, rpa)) {
2489 			irk_to_return = irk;
2490 			goto done;
2491 		}
2492 	}
2493 
2494 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2495 		if (smp_irk_matches(hdev, irk->val, rpa)) {
2496 			bacpy(&irk->rpa, rpa);
2497 			irk_to_return = irk;
2498 			goto done;
2499 		}
2500 	}
2501 
2502 done:
2503 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2504 						irk_to_return->val)) {
2505 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2506 					&irk_to_return->bdaddr);
2507 		irk_to_return = NULL;
2508 	}
2509 
2510 	rcu_read_unlock();
2511 
2512 	return irk_to_return;
2513 }
2514 
2515 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2516 				     u8 addr_type)
2517 {
2518 	struct smp_irk *irk_to_return = NULL;
2519 	struct smp_irk *irk;
2520 
2521 	/* Identity Address must be public or static random */
2522 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2523 		return NULL;
2524 
2525 	rcu_read_lock();
2526 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2527 		if (addr_type == irk->addr_type &&
2528 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
2529 			irk_to_return = irk;
2530 			goto done;
2531 		}
2532 	}
2533 
2534 done:
2535 
2536 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2537 						irk_to_return->val)) {
2538 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2539 					&irk_to_return->bdaddr);
2540 		irk_to_return = NULL;
2541 	}
2542 
2543 	rcu_read_unlock();
2544 
2545 	return irk_to_return;
2546 }
2547 
2548 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2549 				  bdaddr_t *bdaddr, u8 *val, u8 type,
2550 				  u8 pin_len, bool *persistent)
2551 {
2552 	struct link_key *key, *old_key;
2553 	u8 old_key_type;
2554 
2555 	old_key = hci_find_link_key(hdev, bdaddr);
2556 	if (old_key) {
2557 		old_key_type = old_key->type;
2558 		key = old_key;
2559 	} else {
2560 		old_key_type = conn ? conn->key_type : 0xff;
2561 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2562 		if (!key)
2563 			return NULL;
2564 		list_add_rcu(&key->list, &hdev->link_keys);
2565 	}
2566 
2567 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2568 
2569 	/* Some buggy controller combinations generate a changed
2570 	 * combination key for legacy pairing even when there's no
2571 	 * previous key */
2572 	if (type == HCI_LK_CHANGED_COMBINATION &&
2573 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2574 		type = HCI_LK_COMBINATION;
2575 		if (conn)
2576 			conn->key_type = type;
2577 	}
2578 
2579 	bacpy(&key->bdaddr, bdaddr);
2580 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2581 	key->pin_len = pin_len;
2582 
2583 	if (type == HCI_LK_CHANGED_COMBINATION)
2584 		key->type = old_key_type;
2585 	else
2586 		key->type = type;
2587 
2588 	if (persistent)
2589 		*persistent = hci_persistent_key(hdev, conn, type,
2590 						 old_key_type);
2591 
2592 	return key;
2593 }
2594 
2595 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2596 			    u8 addr_type, u8 type, u8 authenticated,
2597 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2598 {
2599 	struct smp_ltk *key, *old_key;
2600 	u8 role = ltk_role(type);
2601 
2602 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2603 	if (old_key)
2604 		key = old_key;
2605 	else {
2606 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2607 		if (!key)
2608 			return NULL;
2609 		list_add_rcu(&key->list, &hdev->long_term_keys);
2610 	}
2611 
2612 	bacpy(&key->bdaddr, bdaddr);
2613 	key->bdaddr_type = addr_type;
2614 	memcpy(key->val, tk, sizeof(key->val));
2615 	key->authenticated = authenticated;
2616 	key->ediv = ediv;
2617 	key->rand = rand;
2618 	key->enc_size = enc_size;
2619 	key->type = type;
2620 
2621 	return key;
2622 }
2623 
2624 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2625 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2626 {
2627 	struct smp_irk *irk;
2628 
2629 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2630 	if (!irk) {
2631 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2632 		if (!irk)
2633 			return NULL;
2634 
2635 		bacpy(&irk->bdaddr, bdaddr);
2636 		irk->addr_type = addr_type;
2637 
2638 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2639 	}
2640 
2641 	memcpy(irk->val, val, 16);
2642 	bacpy(&irk->rpa, rpa);
2643 
2644 	return irk;
2645 }
2646 
2647 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2648 {
2649 	struct link_key *key;
2650 
2651 	key = hci_find_link_key(hdev, bdaddr);
2652 	if (!key)
2653 		return -ENOENT;
2654 
2655 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2656 
2657 	list_del_rcu(&key->list);
2658 	kfree_rcu(key, rcu);
2659 
2660 	return 0;
2661 }
2662 
2663 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2664 {
2665 	struct smp_ltk *k;
2666 	int removed = 0;
2667 
2668 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2669 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2670 			continue;
2671 
2672 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2673 
2674 		list_del_rcu(&k->list);
2675 		kfree_rcu(k, rcu);
2676 		removed++;
2677 	}
2678 
2679 	return removed ? 0 : -ENOENT;
2680 }
2681 
2682 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2683 {
2684 	struct smp_irk *k;
2685 
2686 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2687 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2688 			continue;
2689 
2690 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2691 
2692 		list_del_rcu(&k->list);
2693 		kfree_rcu(k, rcu);
2694 	}
2695 }
2696 
2697 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2698 {
2699 	struct smp_ltk *k;
2700 	struct smp_irk *irk;
2701 	u8 addr_type;
2702 
2703 	if (type == BDADDR_BREDR) {
2704 		if (hci_find_link_key(hdev, bdaddr))
2705 			return true;
2706 		return false;
2707 	}
2708 
2709 	/* Convert to HCI addr type which struct smp_ltk uses */
2710 	if (type == BDADDR_LE_PUBLIC)
2711 		addr_type = ADDR_LE_DEV_PUBLIC;
2712 	else
2713 		addr_type = ADDR_LE_DEV_RANDOM;
2714 
2715 	irk = hci_get_irk(hdev, bdaddr, addr_type);
2716 	if (irk) {
2717 		bdaddr = &irk->bdaddr;
2718 		addr_type = irk->addr_type;
2719 	}
2720 
2721 	rcu_read_lock();
2722 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2723 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2724 			rcu_read_unlock();
2725 			return true;
2726 		}
2727 	}
2728 	rcu_read_unlock();
2729 
2730 	return false;
2731 }
2732 
2733 /* HCI command timer function */
2734 static void hci_cmd_timeout(struct work_struct *work)
2735 {
2736 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2737 					    cmd_timer.work);
2738 
2739 	if (hdev->sent_cmd) {
2740 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2741 		u16 opcode = __le16_to_cpu(sent->opcode);
2742 
2743 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2744 	} else {
2745 		bt_dev_err(hdev, "command tx timeout");
2746 	}
2747 
2748 	if (hdev->cmd_timeout)
2749 		hdev->cmd_timeout(hdev);
2750 
2751 	atomic_set(&hdev->cmd_cnt, 1);
2752 	queue_work(hdev->workqueue, &hdev->cmd_work);
2753 }
2754 
2755 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2756 					  bdaddr_t *bdaddr, u8 bdaddr_type)
2757 {
2758 	struct oob_data *data;
2759 
2760 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
2761 		if (bacmp(bdaddr, &data->bdaddr) != 0)
2762 			continue;
2763 		if (data->bdaddr_type != bdaddr_type)
2764 			continue;
2765 		return data;
2766 	}
2767 
2768 	return NULL;
2769 }
2770 
2771 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2772 			       u8 bdaddr_type)
2773 {
2774 	struct oob_data *data;
2775 
2776 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2777 	if (!data)
2778 		return -ENOENT;
2779 
2780 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2781 
2782 	list_del(&data->list);
2783 	kfree(data);
2784 
2785 	return 0;
2786 }
2787 
2788 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2789 {
2790 	struct oob_data *data, *n;
2791 
2792 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2793 		list_del(&data->list);
2794 		kfree(data);
2795 	}
2796 }
2797 
2798 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2799 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2800 			    u8 *hash256, u8 *rand256)
2801 {
2802 	struct oob_data *data;
2803 
2804 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2805 	if (!data) {
2806 		data = kmalloc(sizeof(*data), GFP_KERNEL);
2807 		if (!data)
2808 			return -ENOMEM;
2809 
2810 		bacpy(&data->bdaddr, bdaddr);
2811 		data->bdaddr_type = bdaddr_type;
2812 		list_add(&data->list, &hdev->remote_oob_data);
2813 	}
2814 
2815 	if (hash192 && rand192) {
2816 		memcpy(data->hash192, hash192, sizeof(data->hash192));
2817 		memcpy(data->rand192, rand192, sizeof(data->rand192));
2818 		if (hash256 && rand256)
2819 			data->present = 0x03;
2820 	} else {
2821 		memset(data->hash192, 0, sizeof(data->hash192));
2822 		memset(data->rand192, 0, sizeof(data->rand192));
2823 		if (hash256 && rand256)
2824 			data->present = 0x02;
2825 		else
2826 			data->present = 0x00;
2827 	}
2828 
2829 	if (hash256 && rand256) {
2830 		memcpy(data->hash256, hash256, sizeof(data->hash256));
2831 		memcpy(data->rand256, rand256, sizeof(data->rand256));
2832 	} else {
2833 		memset(data->hash256, 0, sizeof(data->hash256));
2834 		memset(data->rand256, 0, sizeof(data->rand256));
2835 		if (hash192 && rand192)
2836 			data->present = 0x01;
2837 	}
2838 
2839 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2840 
2841 	return 0;
2842 }
2843 
2844 /* This function requires the caller holds hdev->lock */
2845 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2846 {
2847 	struct adv_info *adv_instance;
2848 
2849 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2850 		if (adv_instance->instance == instance)
2851 			return adv_instance;
2852 	}
2853 
2854 	return NULL;
2855 }
2856 
2857 /* This function requires the caller holds hdev->lock */
2858 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2859 {
2860 	struct adv_info *cur_instance;
2861 
2862 	cur_instance = hci_find_adv_instance(hdev, instance);
2863 	if (!cur_instance)
2864 		return NULL;
2865 
2866 	if (cur_instance == list_last_entry(&hdev->adv_instances,
2867 					    struct adv_info, list))
2868 		return list_first_entry(&hdev->adv_instances,
2869 						 struct adv_info, list);
2870 	else
2871 		return list_next_entry(cur_instance, list);
2872 }
2873 
2874 /* This function requires the caller holds hdev->lock */
2875 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2876 {
2877 	struct adv_info *adv_instance;
2878 
2879 	adv_instance = hci_find_adv_instance(hdev, instance);
2880 	if (!adv_instance)
2881 		return -ENOENT;
2882 
2883 	BT_DBG("%s removing %dMR", hdev->name, instance);
2884 
2885 	if (hdev->cur_adv_instance == instance) {
2886 		if (hdev->adv_instance_timeout) {
2887 			cancel_delayed_work(&hdev->adv_instance_expire);
2888 			hdev->adv_instance_timeout = 0;
2889 		}
2890 		hdev->cur_adv_instance = 0x00;
2891 	}
2892 
2893 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2894 
2895 	list_del(&adv_instance->list);
2896 	kfree(adv_instance);
2897 
2898 	hdev->adv_instance_cnt--;
2899 
2900 	return 0;
2901 }
2902 
2903 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2904 {
2905 	struct adv_info *adv_instance, *n;
2906 
2907 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2908 		adv_instance->rpa_expired = rpa_expired;
2909 }
2910 
2911 /* This function requires the caller holds hdev->lock */
2912 void hci_adv_instances_clear(struct hci_dev *hdev)
2913 {
2914 	struct adv_info *adv_instance, *n;
2915 
2916 	if (hdev->adv_instance_timeout) {
2917 		cancel_delayed_work(&hdev->adv_instance_expire);
2918 		hdev->adv_instance_timeout = 0;
2919 	}
2920 
2921 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2922 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2923 		list_del(&adv_instance->list);
2924 		kfree(adv_instance);
2925 	}
2926 
2927 	hdev->adv_instance_cnt = 0;
2928 	hdev->cur_adv_instance = 0x00;
2929 }
2930 
2931 static void adv_instance_rpa_expired(struct work_struct *work)
2932 {
2933 	struct adv_info *adv_instance = container_of(work, struct adv_info,
2934 						     rpa_expired_cb.work);
2935 
2936 	BT_DBG("");
2937 
2938 	adv_instance->rpa_expired = true;
2939 }
2940 
2941 /* This function requires the caller holds hdev->lock */
2942 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2943 			 u16 adv_data_len, u8 *adv_data,
2944 			 u16 scan_rsp_len, u8 *scan_rsp_data,
2945 			 u16 timeout, u16 duration)
2946 {
2947 	struct adv_info *adv_instance;
2948 
2949 	adv_instance = hci_find_adv_instance(hdev, instance);
2950 	if (adv_instance) {
2951 		memset(adv_instance->adv_data, 0,
2952 		       sizeof(adv_instance->adv_data));
2953 		memset(adv_instance->scan_rsp_data, 0,
2954 		       sizeof(adv_instance->scan_rsp_data));
2955 	} else {
2956 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2957 		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2958 			return -EOVERFLOW;
2959 
2960 		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2961 		if (!adv_instance)
2962 			return -ENOMEM;
2963 
2964 		adv_instance->pending = true;
2965 		adv_instance->instance = instance;
2966 		list_add(&adv_instance->list, &hdev->adv_instances);
2967 		hdev->adv_instance_cnt++;
2968 	}
2969 
2970 	adv_instance->flags = flags;
2971 	adv_instance->adv_data_len = adv_data_len;
2972 	adv_instance->scan_rsp_len = scan_rsp_len;
2973 
2974 	if (adv_data_len)
2975 		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2976 
2977 	if (scan_rsp_len)
2978 		memcpy(adv_instance->scan_rsp_data,
2979 		       scan_rsp_data, scan_rsp_len);
2980 
2981 	adv_instance->timeout = timeout;
2982 	adv_instance->remaining_time = timeout;
2983 
2984 	if (duration == 0)
2985 		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2986 	else
2987 		adv_instance->duration = duration;
2988 
2989 	adv_instance->tx_power = HCI_TX_POWER_INVALID;
2990 
2991 	INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2992 			  adv_instance_rpa_expired);
2993 
2994 	BT_DBG("%s for %dMR", hdev->name, instance);
2995 
2996 	return 0;
2997 }
2998 
2999 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3000 					 bdaddr_t *bdaddr, u8 type)
3001 {
3002 	struct bdaddr_list *b;
3003 
3004 	list_for_each_entry(b, bdaddr_list, list) {
3005 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3006 			return b;
3007 	}
3008 
3009 	return NULL;
3010 }
3011 
3012 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3013 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3014 				u8 type)
3015 {
3016 	struct bdaddr_list_with_irk *b;
3017 
3018 	list_for_each_entry(b, bdaddr_list, list) {
3019 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3020 			return b;
3021 	}
3022 
3023 	return NULL;
3024 }
3025 
3026 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3027 {
3028 	struct bdaddr_list *b, *n;
3029 
3030 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
3031 		list_del(&b->list);
3032 		kfree(b);
3033 	}
3034 }
3035 
3036 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3037 {
3038 	struct bdaddr_list *entry;
3039 
3040 	if (!bacmp(bdaddr, BDADDR_ANY))
3041 		return -EBADF;
3042 
3043 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
3044 		return -EEXIST;
3045 
3046 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3047 	if (!entry)
3048 		return -ENOMEM;
3049 
3050 	bacpy(&entry->bdaddr, bdaddr);
3051 	entry->bdaddr_type = type;
3052 
3053 	list_add(&entry->list, list);
3054 
3055 	return 0;
3056 }
3057 
3058 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3059 					u8 type, u8 *peer_irk, u8 *local_irk)
3060 {
3061 	struct bdaddr_list_with_irk *entry;
3062 
3063 	if (!bacmp(bdaddr, BDADDR_ANY))
3064 		return -EBADF;
3065 
3066 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
3067 		return -EEXIST;
3068 
3069 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3070 	if (!entry)
3071 		return -ENOMEM;
3072 
3073 	bacpy(&entry->bdaddr, bdaddr);
3074 	entry->bdaddr_type = type;
3075 
3076 	if (peer_irk)
3077 		memcpy(entry->peer_irk, peer_irk, 16);
3078 
3079 	if (local_irk)
3080 		memcpy(entry->local_irk, local_irk, 16);
3081 
3082 	list_add(&entry->list, list);
3083 
3084 	return 0;
3085 }
3086 
3087 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3088 {
3089 	struct bdaddr_list *entry;
3090 
3091 	if (!bacmp(bdaddr, BDADDR_ANY)) {
3092 		hci_bdaddr_list_clear(list);
3093 		return 0;
3094 	}
3095 
3096 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3097 	if (!entry)
3098 		return -ENOENT;
3099 
3100 	list_del(&entry->list);
3101 	kfree(entry);
3102 
3103 	return 0;
3104 }
3105 
3106 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3107 							u8 type)
3108 {
3109 	struct bdaddr_list_with_irk *entry;
3110 
3111 	if (!bacmp(bdaddr, BDADDR_ANY)) {
3112 		hci_bdaddr_list_clear(list);
3113 		return 0;
3114 	}
3115 
3116 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3117 	if (!entry)
3118 		return -ENOENT;
3119 
3120 	list_del(&entry->list);
3121 	kfree(entry);
3122 
3123 	return 0;
3124 }
3125 
3126 /* This function requires the caller holds hdev->lock */
3127 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3128 					       bdaddr_t *addr, u8 addr_type)
3129 {
3130 	struct hci_conn_params *params;
3131 
3132 	list_for_each_entry(params, &hdev->le_conn_params, list) {
3133 		if (bacmp(&params->addr, addr) == 0 &&
3134 		    params->addr_type == addr_type) {
3135 			return params;
3136 		}
3137 	}
3138 
3139 	return NULL;
3140 }
3141 
3142 /* This function requires the caller holds hdev->lock */
3143 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3144 						  bdaddr_t *addr, u8 addr_type)
3145 {
3146 	struct hci_conn_params *param;
3147 
3148 	list_for_each_entry(param, list, action) {
3149 		if (bacmp(&param->addr, addr) == 0 &&
3150 		    param->addr_type == addr_type)
3151 			return param;
3152 	}
3153 
3154 	return NULL;
3155 }
3156 
3157 /* This function requires the caller holds hdev->lock */
3158 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3159 					    bdaddr_t *addr, u8 addr_type)
3160 {
3161 	struct hci_conn_params *params;
3162 
3163 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3164 	if (params)
3165 		return params;
3166 
3167 	params = kzalloc(sizeof(*params), GFP_KERNEL);
3168 	if (!params) {
3169 		bt_dev_err(hdev, "out of memory");
3170 		return NULL;
3171 	}
3172 
3173 	bacpy(&params->addr, addr);
3174 	params->addr_type = addr_type;
3175 
3176 	list_add(&params->list, &hdev->le_conn_params);
3177 	INIT_LIST_HEAD(&params->action);
3178 
3179 	params->conn_min_interval = hdev->le_conn_min_interval;
3180 	params->conn_max_interval = hdev->le_conn_max_interval;
3181 	params->conn_latency = hdev->le_conn_latency;
3182 	params->supervision_timeout = hdev->le_supv_timeout;
3183 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
3184 
3185 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3186 
3187 	return params;
3188 }
3189 
3190 static void hci_conn_params_free(struct hci_conn_params *params)
3191 {
3192 	if (params->conn) {
3193 		hci_conn_drop(params->conn);
3194 		hci_conn_put(params->conn);
3195 	}
3196 
3197 	list_del(&params->action);
3198 	list_del(&params->list);
3199 	kfree(params);
3200 }
3201 
3202 /* This function requires the caller holds hdev->lock */
3203 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3204 {
3205 	struct hci_conn_params *params;
3206 
3207 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3208 	if (!params)
3209 		return;
3210 
3211 	hci_conn_params_free(params);
3212 
3213 	hci_update_background_scan(hdev);
3214 
3215 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3216 }
3217 
3218 /* This function requires the caller holds hdev->lock */
3219 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3220 {
3221 	struct hci_conn_params *params, *tmp;
3222 
3223 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3224 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3225 			continue;
3226 
3227 		/* If trying to estabilish one time connection to disabled
3228 		 * device, leave the params, but mark them as just once.
3229 		 */
3230 		if (params->explicit_connect) {
3231 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3232 			continue;
3233 		}
3234 
3235 		list_del(&params->list);
3236 		kfree(params);
3237 	}
3238 
3239 	BT_DBG("All LE disabled connection parameters were removed");
3240 }
3241 
3242 /* This function requires the caller holds hdev->lock */
3243 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3244 {
3245 	struct hci_conn_params *params, *tmp;
3246 
3247 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3248 		hci_conn_params_free(params);
3249 
3250 	BT_DBG("All LE connection parameters were removed");
3251 }
3252 
3253 /* Copy the Identity Address of the controller.
3254  *
3255  * If the controller has a public BD_ADDR, then by default use that one.
3256  * If this is a LE only controller without a public address, default to
3257  * the static random address.
3258  *
3259  * For debugging purposes it is possible to force controllers with a
3260  * public address to use the static random address instead.
3261  *
3262  * In case BR/EDR has been disabled on a dual-mode controller and
3263  * userspace has configured a static address, then that address
3264  * becomes the identity address instead of the public BR/EDR address.
3265  */
3266 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3267 			       u8 *bdaddr_type)
3268 {
3269 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3270 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3271 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3272 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
3273 		bacpy(bdaddr, &hdev->static_addr);
3274 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
3275 	} else {
3276 		bacpy(bdaddr, &hdev->bdaddr);
3277 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
3278 	}
3279 }
3280 
3281 static int hci_suspend_wait_event(struct hci_dev *hdev)
3282 {
3283 #define WAKE_COND                                                              \
3284 	(find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3285 	 __SUSPEND_NUM_TASKS)
3286 
3287 	int i;
3288 	int ret = wait_event_timeout(hdev->suspend_wait_q,
3289 				     WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3290 
3291 	if (ret == 0) {
3292 		bt_dev_dbg(hdev, "Timed out waiting for suspend");
3293 		for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3294 			if (test_bit(i, hdev->suspend_tasks))
3295 				bt_dev_dbg(hdev, "Bit %d is set", i);
3296 			clear_bit(i, hdev->suspend_tasks);
3297 		}
3298 
3299 		ret = -ETIMEDOUT;
3300 	} else {
3301 		ret = 0;
3302 	}
3303 
3304 	return ret;
3305 }
3306 
3307 static void hci_prepare_suspend(struct work_struct *work)
3308 {
3309 	struct hci_dev *hdev =
3310 		container_of(work, struct hci_dev, suspend_prepare);
3311 
3312 	hci_dev_lock(hdev);
3313 	hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3314 	hci_dev_unlock(hdev);
3315 }
3316 
3317 static int hci_change_suspend_state(struct hci_dev *hdev,
3318 				    enum suspended_state next)
3319 {
3320 	hdev->suspend_state_next = next;
3321 	set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3322 	queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3323 	return hci_suspend_wait_event(hdev);
3324 }
3325 
3326 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3327 				void *data)
3328 {
3329 	struct hci_dev *hdev =
3330 		container_of(nb, struct hci_dev, suspend_notifier);
3331 	int ret = 0;
3332 
3333 	/* If powering down, wait for completion. */
3334 	if (mgmt_powering_down(hdev)) {
3335 		set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3336 		ret = hci_suspend_wait_event(hdev);
3337 		if (ret)
3338 			goto done;
3339 	}
3340 
3341 	/* Suspend notifier should only act on events when powered. */
3342 	if (!hdev_is_powered(hdev))
3343 		goto done;
3344 
3345 	if (action == PM_SUSPEND_PREPARE) {
3346 		/* Suspend consists of two actions:
3347 		 *  - First, disconnect everything and make the controller not
3348 		 *    connectable (disabling scanning)
3349 		 *  - Second, program event filter/whitelist and enable scan
3350 		 */
3351 		ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3352 
3353 		/* Only configure whitelist if disconnect succeeded */
3354 		if (!ret)
3355 			ret = hci_change_suspend_state(hdev,
3356 						       BT_SUSPEND_COMPLETE);
3357 	} else if (action == PM_POST_SUSPEND) {
3358 		ret = hci_change_suspend_state(hdev, BT_RUNNING);
3359 	}
3360 
3361 	/* If suspend failed, restore it to running */
3362 	if (ret && action == PM_SUSPEND_PREPARE)
3363 		hci_change_suspend_state(hdev, BT_RUNNING);
3364 
3365 done:
3366 	return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP;
3367 }
3368 
3369 /* Alloc HCI device */
3370 struct hci_dev *hci_alloc_dev(void)
3371 {
3372 	struct hci_dev *hdev;
3373 
3374 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3375 	if (!hdev)
3376 		return NULL;
3377 
3378 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3379 	hdev->esco_type = (ESCO_HV1);
3380 	hdev->link_mode = (HCI_LM_ACCEPT);
3381 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
3382 	hdev->io_capability = 0x03;	/* No Input No Output */
3383 	hdev->manufacturer = 0xffff;	/* Default to internal use */
3384 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3385 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3386 	hdev->adv_instance_cnt = 0;
3387 	hdev->cur_adv_instance = 0x00;
3388 	hdev->adv_instance_timeout = 0;
3389 
3390 	hdev->sniff_max_interval = 800;
3391 	hdev->sniff_min_interval = 80;
3392 
3393 	hdev->le_adv_channel_map = 0x07;
3394 	hdev->le_adv_min_interval = 0x0800;
3395 	hdev->le_adv_max_interval = 0x0800;
3396 	hdev->le_scan_interval = 0x0060;
3397 	hdev->le_scan_window = 0x0030;
3398 	hdev->le_conn_min_interval = 0x0018;
3399 	hdev->le_conn_max_interval = 0x0028;
3400 	hdev->le_conn_latency = 0x0000;
3401 	hdev->le_supv_timeout = 0x002a;
3402 	hdev->le_def_tx_len = 0x001b;
3403 	hdev->le_def_tx_time = 0x0148;
3404 	hdev->le_max_tx_len = 0x001b;
3405 	hdev->le_max_tx_time = 0x0148;
3406 	hdev->le_max_rx_len = 0x001b;
3407 	hdev->le_max_rx_time = 0x0148;
3408 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3409 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3410 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3411 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3412 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3413 
3414 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3415 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3416 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3417 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3418 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3419 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3420 
3421 	mutex_init(&hdev->lock);
3422 	mutex_init(&hdev->req_lock);
3423 
3424 	INIT_LIST_HEAD(&hdev->mgmt_pending);
3425 	INIT_LIST_HEAD(&hdev->blacklist);
3426 	INIT_LIST_HEAD(&hdev->whitelist);
3427 	INIT_LIST_HEAD(&hdev->wakeable);
3428 	INIT_LIST_HEAD(&hdev->uuids);
3429 	INIT_LIST_HEAD(&hdev->link_keys);
3430 	INIT_LIST_HEAD(&hdev->long_term_keys);
3431 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3432 	INIT_LIST_HEAD(&hdev->remote_oob_data);
3433 	INIT_LIST_HEAD(&hdev->le_white_list);
3434 	INIT_LIST_HEAD(&hdev->le_resolv_list);
3435 	INIT_LIST_HEAD(&hdev->le_conn_params);
3436 	INIT_LIST_HEAD(&hdev->pend_le_conns);
3437 	INIT_LIST_HEAD(&hdev->pend_le_reports);
3438 	INIT_LIST_HEAD(&hdev->conn_hash.list);
3439 	INIT_LIST_HEAD(&hdev->adv_instances);
3440 	INIT_LIST_HEAD(&hdev->blocked_keys);
3441 
3442 	INIT_WORK(&hdev->rx_work, hci_rx_work);
3443 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3444 	INIT_WORK(&hdev->tx_work, hci_tx_work);
3445 	INIT_WORK(&hdev->power_on, hci_power_on);
3446 	INIT_WORK(&hdev->error_reset, hci_error_reset);
3447 	INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3448 
3449 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3450 
3451 	skb_queue_head_init(&hdev->rx_q);
3452 	skb_queue_head_init(&hdev->cmd_q);
3453 	skb_queue_head_init(&hdev->raw_q);
3454 
3455 	init_waitqueue_head(&hdev->req_wait_q);
3456 	init_waitqueue_head(&hdev->suspend_wait_q);
3457 
3458 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3459 
3460 	hci_request_setup(hdev);
3461 
3462 	hci_init_sysfs(hdev);
3463 	discovery_init(hdev);
3464 
3465 	return hdev;
3466 }
3467 EXPORT_SYMBOL(hci_alloc_dev);
3468 
3469 /* Free HCI device */
3470 void hci_free_dev(struct hci_dev *hdev)
3471 {
3472 	/* will free via device release */
3473 	put_device(&hdev->dev);
3474 }
3475 EXPORT_SYMBOL(hci_free_dev);
3476 
3477 /* Register HCI device */
3478 int hci_register_dev(struct hci_dev *hdev)
3479 {
3480 	int id, error;
3481 
3482 	if (!hdev->open || !hdev->close || !hdev->send)
3483 		return -EINVAL;
3484 
3485 	/* Do not allow HCI_AMP devices to register at index 0,
3486 	 * so the index can be used as the AMP controller ID.
3487 	 */
3488 	switch (hdev->dev_type) {
3489 	case HCI_PRIMARY:
3490 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3491 		break;
3492 	case HCI_AMP:
3493 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3494 		break;
3495 	default:
3496 		return -EINVAL;
3497 	}
3498 
3499 	if (id < 0)
3500 		return id;
3501 
3502 	sprintf(hdev->name, "hci%d", id);
3503 	hdev->id = id;
3504 
3505 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3506 
3507 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3508 	if (!hdev->workqueue) {
3509 		error = -ENOMEM;
3510 		goto err;
3511 	}
3512 
3513 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3514 						      hdev->name);
3515 	if (!hdev->req_workqueue) {
3516 		destroy_workqueue(hdev->workqueue);
3517 		error = -ENOMEM;
3518 		goto err;
3519 	}
3520 
3521 	if (!IS_ERR_OR_NULL(bt_debugfs))
3522 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3523 
3524 	dev_set_name(&hdev->dev, "%s", hdev->name);
3525 
3526 	error = device_add(&hdev->dev);
3527 	if (error < 0)
3528 		goto err_wqueue;
3529 
3530 	hci_leds_init(hdev);
3531 
3532 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3533 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3534 				    hdev);
3535 	if (hdev->rfkill) {
3536 		if (rfkill_register(hdev->rfkill) < 0) {
3537 			rfkill_destroy(hdev->rfkill);
3538 			hdev->rfkill = NULL;
3539 		}
3540 	}
3541 
3542 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3543 		hci_dev_set_flag(hdev, HCI_RFKILLED);
3544 
3545 	hci_dev_set_flag(hdev, HCI_SETUP);
3546 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3547 
3548 	if (hdev->dev_type == HCI_PRIMARY) {
3549 		/* Assume BR/EDR support until proven otherwise (such as
3550 		 * through reading supported features during init.
3551 		 */
3552 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3553 	}
3554 
3555 	write_lock(&hci_dev_list_lock);
3556 	list_add(&hdev->list, &hci_dev_list);
3557 	write_unlock(&hci_dev_list_lock);
3558 
3559 	/* Devices that are marked for raw-only usage are unconfigured
3560 	 * and should not be included in normal operation.
3561 	 */
3562 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3563 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3564 
3565 	hci_sock_dev_event(hdev, HCI_DEV_REG);
3566 	hci_dev_hold(hdev);
3567 
3568 	hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3569 	error = register_pm_notifier(&hdev->suspend_notifier);
3570 	if (error)
3571 		goto err_wqueue;
3572 
3573 	queue_work(hdev->req_workqueue, &hdev->power_on);
3574 
3575 	return id;
3576 
3577 err_wqueue:
3578 	destroy_workqueue(hdev->workqueue);
3579 	destroy_workqueue(hdev->req_workqueue);
3580 err:
3581 	ida_simple_remove(&hci_index_ida, hdev->id);
3582 
3583 	return error;
3584 }
3585 EXPORT_SYMBOL(hci_register_dev);
3586 
3587 /* Unregister HCI device */
3588 void hci_unregister_dev(struct hci_dev *hdev)
3589 {
3590 	int id;
3591 
3592 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3593 
3594 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3595 
3596 	id = hdev->id;
3597 
3598 	write_lock(&hci_dev_list_lock);
3599 	list_del(&hdev->list);
3600 	write_unlock(&hci_dev_list_lock);
3601 
3602 	cancel_work_sync(&hdev->power_on);
3603 
3604 	hci_dev_do_close(hdev);
3605 
3606 	unregister_pm_notifier(&hdev->suspend_notifier);
3607 
3608 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3609 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
3610 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3611 		hci_dev_lock(hdev);
3612 		mgmt_index_removed(hdev);
3613 		hci_dev_unlock(hdev);
3614 	}
3615 
3616 	/* mgmt_index_removed should take care of emptying the
3617 	 * pending list */
3618 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3619 
3620 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3621 
3622 	if (hdev->rfkill) {
3623 		rfkill_unregister(hdev->rfkill);
3624 		rfkill_destroy(hdev->rfkill);
3625 	}
3626 
3627 	device_del(&hdev->dev);
3628 
3629 	debugfs_remove_recursive(hdev->debugfs);
3630 	kfree_const(hdev->hw_info);
3631 	kfree_const(hdev->fw_info);
3632 
3633 	destroy_workqueue(hdev->workqueue);
3634 	destroy_workqueue(hdev->req_workqueue);
3635 
3636 	hci_dev_lock(hdev);
3637 	hci_bdaddr_list_clear(&hdev->blacklist);
3638 	hci_bdaddr_list_clear(&hdev->whitelist);
3639 	hci_uuids_clear(hdev);
3640 	hci_link_keys_clear(hdev);
3641 	hci_smp_ltks_clear(hdev);
3642 	hci_smp_irks_clear(hdev);
3643 	hci_remote_oob_data_clear(hdev);
3644 	hci_adv_instances_clear(hdev);
3645 	hci_bdaddr_list_clear(&hdev->le_white_list);
3646 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
3647 	hci_conn_params_clear_all(hdev);
3648 	hci_discovery_filter_clear(hdev);
3649 	hci_blocked_keys_clear(hdev);
3650 	hci_dev_unlock(hdev);
3651 
3652 	hci_dev_put(hdev);
3653 
3654 	ida_simple_remove(&hci_index_ida, id);
3655 }
3656 EXPORT_SYMBOL(hci_unregister_dev);
3657 
3658 /* Suspend HCI device */
3659 int hci_suspend_dev(struct hci_dev *hdev)
3660 {
3661 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3662 	return 0;
3663 }
3664 EXPORT_SYMBOL(hci_suspend_dev);
3665 
3666 /* Resume HCI device */
3667 int hci_resume_dev(struct hci_dev *hdev)
3668 {
3669 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3670 	return 0;
3671 }
3672 EXPORT_SYMBOL(hci_resume_dev);
3673 
3674 /* Reset HCI device */
3675 int hci_reset_dev(struct hci_dev *hdev)
3676 {
3677 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3678 	struct sk_buff *skb;
3679 
3680 	skb = bt_skb_alloc(3, GFP_ATOMIC);
3681 	if (!skb)
3682 		return -ENOMEM;
3683 
3684 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3685 	skb_put_data(skb, hw_err, 3);
3686 
3687 	/* Send Hardware Error to upper stack */
3688 	return hci_recv_frame(hdev, skb);
3689 }
3690 EXPORT_SYMBOL(hci_reset_dev);
3691 
3692 /* Receive frame from HCI drivers */
3693 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3694 {
3695 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3696 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3697 		kfree_skb(skb);
3698 		return -ENXIO;
3699 	}
3700 
3701 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3702 	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3703 	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3704 	    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3705 		kfree_skb(skb);
3706 		return -EINVAL;
3707 	}
3708 
3709 	/* Incoming skb */
3710 	bt_cb(skb)->incoming = 1;
3711 
3712 	/* Time stamp */
3713 	__net_timestamp(skb);
3714 
3715 	skb_queue_tail(&hdev->rx_q, skb);
3716 	queue_work(hdev->workqueue, &hdev->rx_work);
3717 
3718 	return 0;
3719 }
3720 EXPORT_SYMBOL(hci_recv_frame);
3721 
3722 /* Receive diagnostic message from HCI drivers */
3723 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3724 {
3725 	/* Mark as diagnostic packet */
3726 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3727 
3728 	/* Time stamp */
3729 	__net_timestamp(skb);
3730 
3731 	skb_queue_tail(&hdev->rx_q, skb);
3732 	queue_work(hdev->workqueue, &hdev->rx_work);
3733 
3734 	return 0;
3735 }
3736 EXPORT_SYMBOL(hci_recv_diag);
3737 
3738 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3739 {
3740 	va_list vargs;
3741 
3742 	va_start(vargs, fmt);
3743 	kfree_const(hdev->hw_info);
3744 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3745 	va_end(vargs);
3746 }
3747 EXPORT_SYMBOL(hci_set_hw_info);
3748 
3749 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3750 {
3751 	va_list vargs;
3752 
3753 	va_start(vargs, fmt);
3754 	kfree_const(hdev->fw_info);
3755 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3756 	va_end(vargs);
3757 }
3758 EXPORT_SYMBOL(hci_set_fw_info);
3759 
3760 /* ---- Interface to upper protocols ---- */
3761 
3762 int hci_register_cb(struct hci_cb *cb)
3763 {
3764 	BT_DBG("%p name %s", cb, cb->name);
3765 
3766 	mutex_lock(&hci_cb_list_lock);
3767 	list_add_tail(&cb->list, &hci_cb_list);
3768 	mutex_unlock(&hci_cb_list_lock);
3769 
3770 	return 0;
3771 }
3772 EXPORT_SYMBOL(hci_register_cb);
3773 
3774 int hci_unregister_cb(struct hci_cb *cb)
3775 {
3776 	BT_DBG("%p name %s", cb, cb->name);
3777 
3778 	mutex_lock(&hci_cb_list_lock);
3779 	list_del(&cb->list);
3780 	mutex_unlock(&hci_cb_list_lock);
3781 
3782 	return 0;
3783 }
3784 EXPORT_SYMBOL(hci_unregister_cb);
3785 
3786 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3787 {
3788 	int err;
3789 
3790 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3791 	       skb->len);
3792 
3793 	/* Time stamp */
3794 	__net_timestamp(skb);
3795 
3796 	/* Send copy to monitor */
3797 	hci_send_to_monitor(hdev, skb);
3798 
3799 	if (atomic_read(&hdev->promisc)) {
3800 		/* Send copy to the sockets */
3801 		hci_send_to_sock(hdev, skb);
3802 	}
3803 
3804 	/* Get rid of skb owner, prior to sending to the driver. */
3805 	skb_orphan(skb);
3806 
3807 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3808 		kfree_skb(skb);
3809 		return;
3810 	}
3811 
3812 	err = hdev->send(hdev, skb);
3813 	if (err < 0) {
3814 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3815 		kfree_skb(skb);
3816 	}
3817 }
3818 
3819 /* Send HCI command */
3820 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3821 		 const void *param)
3822 {
3823 	struct sk_buff *skb;
3824 
3825 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3826 
3827 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3828 	if (!skb) {
3829 		bt_dev_err(hdev, "no memory for command");
3830 		return -ENOMEM;
3831 	}
3832 
3833 	/* Stand-alone HCI commands must be flagged as
3834 	 * single-command requests.
3835 	 */
3836 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3837 
3838 	skb_queue_tail(&hdev->cmd_q, skb);
3839 	queue_work(hdev->workqueue, &hdev->cmd_work);
3840 
3841 	return 0;
3842 }
3843 
3844 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3845 		   const void *param)
3846 {
3847 	struct sk_buff *skb;
3848 
3849 	if (hci_opcode_ogf(opcode) != 0x3f) {
3850 		/* A controller receiving a command shall respond with either
3851 		 * a Command Status Event or a Command Complete Event.
3852 		 * Therefore, all standard HCI commands must be sent via the
3853 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3854 		 * Some vendors do not comply with this rule for vendor-specific
3855 		 * commands and do not return any event. We want to support
3856 		 * unresponded commands for such cases only.
3857 		 */
3858 		bt_dev_err(hdev, "unresponded command not supported");
3859 		return -EINVAL;
3860 	}
3861 
3862 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3863 	if (!skb) {
3864 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3865 			   opcode);
3866 		return -ENOMEM;
3867 	}
3868 
3869 	hci_send_frame(hdev, skb);
3870 
3871 	return 0;
3872 }
3873 EXPORT_SYMBOL(__hci_cmd_send);
3874 
3875 /* Get data from the previously sent command */
3876 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3877 {
3878 	struct hci_command_hdr *hdr;
3879 
3880 	if (!hdev->sent_cmd)
3881 		return NULL;
3882 
3883 	hdr = (void *) hdev->sent_cmd->data;
3884 
3885 	if (hdr->opcode != cpu_to_le16(opcode))
3886 		return NULL;
3887 
3888 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3889 
3890 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3891 }
3892 
3893 /* Send HCI command and wait for command commplete event */
3894 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3895 			     const void *param, u32 timeout)
3896 {
3897 	struct sk_buff *skb;
3898 
3899 	if (!test_bit(HCI_UP, &hdev->flags))
3900 		return ERR_PTR(-ENETDOWN);
3901 
3902 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3903 
3904 	hci_req_sync_lock(hdev);
3905 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3906 	hci_req_sync_unlock(hdev);
3907 
3908 	return skb;
3909 }
3910 EXPORT_SYMBOL(hci_cmd_sync);
3911 
3912 /* Send ACL data */
3913 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3914 {
3915 	struct hci_acl_hdr *hdr;
3916 	int len = skb->len;
3917 
3918 	skb_push(skb, HCI_ACL_HDR_SIZE);
3919 	skb_reset_transport_header(skb);
3920 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3921 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3922 	hdr->dlen   = cpu_to_le16(len);
3923 }
3924 
3925 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3926 			  struct sk_buff *skb, __u16 flags)
3927 {
3928 	struct hci_conn *conn = chan->conn;
3929 	struct hci_dev *hdev = conn->hdev;
3930 	struct sk_buff *list;
3931 
3932 	skb->len = skb_headlen(skb);
3933 	skb->data_len = 0;
3934 
3935 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3936 
3937 	switch (hdev->dev_type) {
3938 	case HCI_PRIMARY:
3939 		hci_add_acl_hdr(skb, conn->handle, flags);
3940 		break;
3941 	case HCI_AMP:
3942 		hci_add_acl_hdr(skb, chan->handle, flags);
3943 		break;
3944 	default:
3945 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3946 		return;
3947 	}
3948 
3949 	list = skb_shinfo(skb)->frag_list;
3950 	if (!list) {
3951 		/* Non fragmented */
3952 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3953 
3954 		skb_queue_tail(queue, skb);
3955 	} else {
3956 		/* Fragmented */
3957 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3958 
3959 		skb_shinfo(skb)->frag_list = NULL;
3960 
3961 		/* Queue all fragments atomically. We need to use spin_lock_bh
3962 		 * here because of 6LoWPAN links, as there this function is
3963 		 * called from softirq and using normal spin lock could cause
3964 		 * deadlocks.
3965 		 */
3966 		spin_lock_bh(&queue->lock);
3967 
3968 		__skb_queue_tail(queue, skb);
3969 
3970 		flags &= ~ACL_START;
3971 		flags |= ACL_CONT;
3972 		do {
3973 			skb = list; list = list->next;
3974 
3975 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3976 			hci_add_acl_hdr(skb, conn->handle, flags);
3977 
3978 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3979 
3980 			__skb_queue_tail(queue, skb);
3981 		} while (list);
3982 
3983 		spin_unlock_bh(&queue->lock);
3984 	}
3985 }
3986 
3987 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3988 {
3989 	struct hci_dev *hdev = chan->conn->hdev;
3990 
3991 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3992 
3993 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3994 
3995 	queue_work(hdev->workqueue, &hdev->tx_work);
3996 }
3997 
3998 /* Send SCO data */
3999 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4000 {
4001 	struct hci_dev *hdev = conn->hdev;
4002 	struct hci_sco_hdr hdr;
4003 
4004 	BT_DBG("%s len %d", hdev->name, skb->len);
4005 
4006 	hdr.handle = cpu_to_le16(conn->handle);
4007 	hdr.dlen   = skb->len;
4008 
4009 	skb_push(skb, HCI_SCO_HDR_SIZE);
4010 	skb_reset_transport_header(skb);
4011 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4012 
4013 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4014 
4015 	skb_queue_tail(&conn->data_q, skb);
4016 	queue_work(hdev->workqueue, &hdev->tx_work);
4017 }
4018 
4019 /* ---- HCI TX task (outgoing data) ---- */
4020 
4021 /* HCI Connection scheduler */
4022 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4023 				     int *quote)
4024 {
4025 	struct hci_conn_hash *h = &hdev->conn_hash;
4026 	struct hci_conn *conn = NULL, *c;
4027 	unsigned int num = 0, min = ~0;
4028 
4029 	/* We don't have to lock device here. Connections are always
4030 	 * added and removed with TX task disabled. */
4031 
4032 	rcu_read_lock();
4033 
4034 	list_for_each_entry_rcu(c, &h->list, list) {
4035 		if (c->type != type || skb_queue_empty(&c->data_q))
4036 			continue;
4037 
4038 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4039 			continue;
4040 
4041 		num++;
4042 
4043 		if (c->sent < min) {
4044 			min  = c->sent;
4045 			conn = c;
4046 		}
4047 
4048 		if (hci_conn_num(hdev, type) == num)
4049 			break;
4050 	}
4051 
4052 	rcu_read_unlock();
4053 
4054 	if (conn) {
4055 		int cnt, q;
4056 
4057 		switch (conn->type) {
4058 		case ACL_LINK:
4059 			cnt = hdev->acl_cnt;
4060 			break;
4061 		case SCO_LINK:
4062 		case ESCO_LINK:
4063 			cnt = hdev->sco_cnt;
4064 			break;
4065 		case LE_LINK:
4066 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4067 			break;
4068 		default:
4069 			cnt = 0;
4070 			bt_dev_err(hdev, "unknown link type %d", conn->type);
4071 		}
4072 
4073 		q = cnt / num;
4074 		*quote = q ? q : 1;
4075 	} else
4076 		*quote = 0;
4077 
4078 	BT_DBG("conn %p quote %d", conn, *quote);
4079 	return conn;
4080 }
4081 
4082 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4083 {
4084 	struct hci_conn_hash *h = &hdev->conn_hash;
4085 	struct hci_conn *c;
4086 
4087 	bt_dev_err(hdev, "link tx timeout");
4088 
4089 	rcu_read_lock();
4090 
4091 	/* Kill stalled connections */
4092 	list_for_each_entry_rcu(c, &h->list, list) {
4093 		if (c->type == type && c->sent) {
4094 			bt_dev_err(hdev, "killing stalled connection %pMR",
4095 				   &c->dst);
4096 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4097 		}
4098 	}
4099 
4100 	rcu_read_unlock();
4101 }
4102 
4103 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4104 				      int *quote)
4105 {
4106 	struct hci_conn_hash *h = &hdev->conn_hash;
4107 	struct hci_chan *chan = NULL;
4108 	unsigned int num = 0, min = ~0, cur_prio = 0;
4109 	struct hci_conn *conn;
4110 	int cnt, q, conn_num = 0;
4111 
4112 	BT_DBG("%s", hdev->name);
4113 
4114 	rcu_read_lock();
4115 
4116 	list_for_each_entry_rcu(conn, &h->list, list) {
4117 		struct hci_chan *tmp;
4118 
4119 		if (conn->type != type)
4120 			continue;
4121 
4122 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4123 			continue;
4124 
4125 		conn_num++;
4126 
4127 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4128 			struct sk_buff *skb;
4129 
4130 			if (skb_queue_empty(&tmp->data_q))
4131 				continue;
4132 
4133 			skb = skb_peek(&tmp->data_q);
4134 			if (skb->priority < cur_prio)
4135 				continue;
4136 
4137 			if (skb->priority > cur_prio) {
4138 				num = 0;
4139 				min = ~0;
4140 				cur_prio = skb->priority;
4141 			}
4142 
4143 			num++;
4144 
4145 			if (conn->sent < min) {
4146 				min  = conn->sent;
4147 				chan = tmp;
4148 			}
4149 		}
4150 
4151 		if (hci_conn_num(hdev, type) == conn_num)
4152 			break;
4153 	}
4154 
4155 	rcu_read_unlock();
4156 
4157 	if (!chan)
4158 		return NULL;
4159 
4160 	switch (chan->conn->type) {
4161 	case ACL_LINK:
4162 		cnt = hdev->acl_cnt;
4163 		break;
4164 	case AMP_LINK:
4165 		cnt = hdev->block_cnt;
4166 		break;
4167 	case SCO_LINK:
4168 	case ESCO_LINK:
4169 		cnt = hdev->sco_cnt;
4170 		break;
4171 	case LE_LINK:
4172 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4173 		break;
4174 	default:
4175 		cnt = 0;
4176 		bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4177 	}
4178 
4179 	q = cnt / num;
4180 	*quote = q ? q : 1;
4181 	BT_DBG("chan %p quote %d", chan, *quote);
4182 	return chan;
4183 }
4184 
4185 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4186 {
4187 	struct hci_conn_hash *h = &hdev->conn_hash;
4188 	struct hci_conn *conn;
4189 	int num = 0;
4190 
4191 	BT_DBG("%s", hdev->name);
4192 
4193 	rcu_read_lock();
4194 
4195 	list_for_each_entry_rcu(conn, &h->list, list) {
4196 		struct hci_chan *chan;
4197 
4198 		if (conn->type != type)
4199 			continue;
4200 
4201 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4202 			continue;
4203 
4204 		num++;
4205 
4206 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4207 			struct sk_buff *skb;
4208 
4209 			if (chan->sent) {
4210 				chan->sent = 0;
4211 				continue;
4212 			}
4213 
4214 			if (skb_queue_empty(&chan->data_q))
4215 				continue;
4216 
4217 			skb = skb_peek(&chan->data_q);
4218 			if (skb->priority >= HCI_PRIO_MAX - 1)
4219 				continue;
4220 
4221 			skb->priority = HCI_PRIO_MAX - 1;
4222 
4223 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4224 			       skb->priority);
4225 		}
4226 
4227 		if (hci_conn_num(hdev, type) == num)
4228 			break;
4229 	}
4230 
4231 	rcu_read_unlock();
4232 
4233 }
4234 
4235 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4236 {
4237 	/* Calculate count of blocks used by this packet */
4238 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4239 }
4240 
4241 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4242 {
4243 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4244 		/* ACL tx timeout must be longer than maximum
4245 		 * link supervision timeout (40.9 seconds) */
4246 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4247 				       HCI_ACL_TX_TIMEOUT))
4248 			hci_link_tx_to(hdev, ACL_LINK);
4249 	}
4250 }
4251 
4252 /* Schedule SCO */
4253 static void hci_sched_sco(struct hci_dev *hdev)
4254 {
4255 	struct hci_conn *conn;
4256 	struct sk_buff *skb;
4257 	int quote;
4258 
4259 	BT_DBG("%s", hdev->name);
4260 
4261 	if (!hci_conn_num(hdev, SCO_LINK))
4262 		return;
4263 
4264 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4265 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4266 			BT_DBG("skb %p len %d", skb, skb->len);
4267 			hci_send_frame(hdev, skb);
4268 
4269 			conn->sent++;
4270 			if (conn->sent == ~0)
4271 				conn->sent = 0;
4272 		}
4273 	}
4274 }
4275 
4276 static void hci_sched_esco(struct hci_dev *hdev)
4277 {
4278 	struct hci_conn *conn;
4279 	struct sk_buff *skb;
4280 	int quote;
4281 
4282 	BT_DBG("%s", hdev->name);
4283 
4284 	if (!hci_conn_num(hdev, ESCO_LINK))
4285 		return;
4286 
4287 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4288 						     &quote))) {
4289 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4290 			BT_DBG("skb %p len %d", skb, skb->len);
4291 			hci_send_frame(hdev, skb);
4292 
4293 			conn->sent++;
4294 			if (conn->sent == ~0)
4295 				conn->sent = 0;
4296 		}
4297 	}
4298 }
4299 
4300 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4301 {
4302 	unsigned int cnt = hdev->acl_cnt;
4303 	struct hci_chan *chan;
4304 	struct sk_buff *skb;
4305 	int quote;
4306 
4307 	__check_timeout(hdev, cnt);
4308 
4309 	while (hdev->acl_cnt &&
4310 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4311 		u32 priority = (skb_peek(&chan->data_q))->priority;
4312 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4313 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4314 			       skb->len, skb->priority);
4315 
4316 			/* Stop if priority has changed */
4317 			if (skb->priority < priority)
4318 				break;
4319 
4320 			skb = skb_dequeue(&chan->data_q);
4321 
4322 			hci_conn_enter_active_mode(chan->conn,
4323 						   bt_cb(skb)->force_active);
4324 
4325 			hci_send_frame(hdev, skb);
4326 			hdev->acl_last_tx = jiffies;
4327 
4328 			hdev->acl_cnt--;
4329 			chan->sent++;
4330 			chan->conn->sent++;
4331 
4332 			/* Send pending SCO packets right away */
4333 			hci_sched_sco(hdev);
4334 			hci_sched_esco(hdev);
4335 		}
4336 	}
4337 
4338 	if (cnt != hdev->acl_cnt)
4339 		hci_prio_recalculate(hdev, ACL_LINK);
4340 }
4341 
4342 static void hci_sched_acl_blk(struct hci_dev *hdev)
4343 {
4344 	unsigned int cnt = hdev->block_cnt;
4345 	struct hci_chan *chan;
4346 	struct sk_buff *skb;
4347 	int quote;
4348 	u8 type;
4349 
4350 	__check_timeout(hdev, cnt);
4351 
4352 	BT_DBG("%s", hdev->name);
4353 
4354 	if (hdev->dev_type == HCI_AMP)
4355 		type = AMP_LINK;
4356 	else
4357 		type = ACL_LINK;
4358 
4359 	while (hdev->block_cnt > 0 &&
4360 	       (chan = hci_chan_sent(hdev, type, &quote))) {
4361 		u32 priority = (skb_peek(&chan->data_q))->priority;
4362 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4363 			int blocks;
4364 
4365 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4366 			       skb->len, skb->priority);
4367 
4368 			/* Stop if priority has changed */
4369 			if (skb->priority < priority)
4370 				break;
4371 
4372 			skb = skb_dequeue(&chan->data_q);
4373 
4374 			blocks = __get_blocks(hdev, skb);
4375 			if (blocks > hdev->block_cnt)
4376 				return;
4377 
4378 			hci_conn_enter_active_mode(chan->conn,
4379 						   bt_cb(skb)->force_active);
4380 
4381 			hci_send_frame(hdev, skb);
4382 			hdev->acl_last_tx = jiffies;
4383 
4384 			hdev->block_cnt -= blocks;
4385 			quote -= blocks;
4386 
4387 			chan->sent += blocks;
4388 			chan->conn->sent += blocks;
4389 		}
4390 	}
4391 
4392 	if (cnt != hdev->block_cnt)
4393 		hci_prio_recalculate(hdev, type);
4394 }
4395 
4396 static void hci_sched_acl(struct hci_dev *hdev)
4397 {
4398 	BT_DBG("%s", hdev->name);
4399 
4400 	/* No ACL link over BR/EDR controller */
4401 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4402 		return;
4403 
4404 	/* No AMP link over AMP controller */
4405 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4406 		return;
4407 
4408 	switch (hdev->flow_ctl_mode) {
4409 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
4410 		hci_sched_acl_pkt(hdev);
4411 		break;
4412 
4413 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4414 		hci_sched_acl_blk(hdev);
4415 		break;
4416 	}
4417 }
4418 
4419 static void hci_sched_le(struct hci_dev *hdev)
4420 {
4421 	struct hci_chan *chan;
4422 	struct sk_buff *skb;
4423 	int quote, cnt, tmp;
4424 
4425 	BT_DBG("%s", hdev->name);
4426 
4427 	if (!hci_conn_num(hdev, LE_LINK))
4428 		return;
4429 
4430 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4431 
4432 	__check_timeout(hdev, cnt);
4433 
4434 	tmp = cnt;
4435 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4436 		u32 priority = (skb_peek(&chan->data_q))->priority;
4437 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4438 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4439 			       skb->len, skb->priority);
4440 
4441 			/* Stop if priority has changed */
4442 			if (skb->priority < priority)
4443 				break;
4444 
4445 			skb = skb_dequeue(&chan->data_q);
4446 
4447 			hci_send_frame(hdev, skb);
4448 			hdev->le_last_tx = jiffies;
4449 
4450 			cnt--;
4451 			chan->sent++;
4452 			chan->conn->sent++;
4453 
4454 			/* Send pending SCO packets right away */
4455 			hci_sched_sco(hdev);
4456 			hci_sched_esco(hdev);
4457 		}
4458 	}
4459 
4460 	if (hdev->le_pkts)
4461 		hdev->le_cnt = cnt;
4462 	else
4463 		hdev->acl_cnt = cnt;
4464 
4465 	if (cnt != tmp)
4466 		hci_prio_recalculate(hdev, LE_LINK);
4467 }
4468 
4469 static void hci_tx_work(struct work_struct *work)
4470 {
4471 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4472 	struct sk_buff *skb;
4473 
4474 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4475 	       hdev->sco_cnt, hdev->le_cnt);
4476 
4477 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4478 		/* Schedule queues and send stuff to HCI driver */
4479 		hci_sched_sco(hdev);
4480 		hci_sched_esco(hdev);
4481 		hci_sched_acl(hdev);
4482 		hci_sched_le(hdev);
4483 	}
4484 
4485 	/* Send next queued raw (unknown type) packet */
4486 	while ((skb = skb_dequeue(&hdev->raw_q)))
4487 		hci_send_frame(hdev, skb);
4488 }
4489 
4490 /* ----- HCI RX task (incoming data processing) ----- */
4491 
4492 /* ACL data packet */
4493 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4494 {
4495 	struct hci_acl_hdr *hdr = (void *) skb->data;
4496 	struct hci_conn *conn;
4497 	__u16 handle, flags;
4498 
4499 	skb_pull(skb, HCI_ACL_HDR_SIZE);
4500 
4501 	handle = __le16_to_cpu(hdr->handle);
4502 	flags  = hci_flags(handle);
4503 	handle = hci_handle(handle);
4504 
4505 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4506 	       handle, flags);
4507 
4508 	hdev->stat.acl_rx++;
4509 
4510 	hci_dev_lock(hdev);
4511 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4512 	hci_dev_unlock(hdev);
4513 
4514 	if (conn) {
4515 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4516 
4517 		/* Send to upper protocol */
4518 		l2cap_recv_acldata(conn, skb, flags);
4519 		return;
4520 	} else {
4521 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4522 			   handle);
4523 	}
4524 
4525 	kfree_skb(skb);
4526 }
4527 
4528 /* SCO data packet */
4529 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4530 {
4531 	struct hci_sco_hdr *hdr = (void *) skb->data;
4532 	struct hci_conn *conn;
4533 	__u16 handle, flags;
4534 
4535 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4536 
4537 	handle = __le16_to_cpu(hdr->handle);
4538 	flags  = hci_flags(handle);
4539 	handle = hci_handle(handle);
4540 
4541 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4542 	       handle, flags);
4543 
4544 	hdev->stat.sco_rx++;
4545 
4546 	hci_dev_lock(hdev);
4547 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4548 	hci_dev_unlock(hdev);
4549 
4550 	if (conn) {
4551 		/* Send to upper protocol */
4552 		sco_recv_scodata(conn, skb);
4553 		return;
4554 	} else {
4555 		bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4556 			   handle);
4557 	}
4558 
4559 	kfree_skb(skb);
4560 }
4561 
4562 static bool hci_req_is_complete(struct hci_dev *hdev)
4563 {
4564 	struct sk_buff *skb;
4565 
4566 	skb = skb_peek(&hdev->cmd_q);
4567 	if (!skb)
4568 		return true;
4569 
4570 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4571 }
4572 
4573 static void hci_resend_last(struct hci_dev *hdev)
4574 {
4575 	struct hci_command_hdr *sent;
4576 	struct sk_buff *skb;
4577 	u16 opcode;
4578 
4579 	if (!hdev->sent_cmd)
4580 		return;
4581 
4582 	sent = (void *) hdev->sent_cmd->data;
4583 	opcode = __le16_to_cpu(sent->opcode);
4584 	if (opcode == HCI_OP_RESET)
4585 		return;
4586 
4587 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4588 	if (!skb)
4589 		return;
4590 
4591 	skb_queue_head(&hdev->cmd_q, skb);
4592 	queue_work(hdev->workqueue, &hdev->cmd_work);
4593 }
4594 
4595 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4596 			  hci_req_complete_t *req_complete,
4597 			  hci_req_complete_skb_t *req_complete_skb)
4598 {
4599 	struct sk_buff *skb;
4600 	unsigned long flags;
4601 
4602 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4603 
4604 	/* If the completed command doesn't match the last one that was
4605 	 * sent we need to do special handling of it.
4606 	 */
4607 	if (!hci_sent_cmd_data(hdev, opcode)) {
4608 		/* Some CSR based controllers generate a spontaneous
4609 		 * reset complete event during init and any pending
4610 		 * command will never be completed. In such a case we
4611 		 * need to resend whatever was the last sent
4612 		 * command.
4613 		 */
4614 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4615 			hci_resend_last(hdev);
4616 
4617 		return;
4618 	}
4619 
4620 	/* If we reach this point this event matches the last command sent */
4621 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4622 
4623 	/* If the command succeeded and there's still more commands in
4624 	 * this request the request is not yet complete.
4625 	 */
4626 	if (!status && !hci_req_is_complete(hdev))
4627 		return;
4628 
4629 	/* If this was the last command in a request the complete
4630 	 * callback would be found in hdev->sent_cmd instead of the
4631 	 * command queue (hdev->cmd_q).
4632 	 */
4633 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4634 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4635 		return;
4636 	}
4637 
4638 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4639 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4640 		return;
4641 	}
4642 
4643 	/* Remove all pending commands belonging to this request */
4644 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4645 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4646 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4647 			__skb_queue_head(&hdev->cmd_q, skb);
4648 			break;
4649 		}
4650 
4651 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4652 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4653 		else
4654 			*req_complete = bt_cb(skb)->hci.req_complete;
4655 		kfree_skb(skb);
4656 	}
4657 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4658 }
4659 
4660 static void hci_rx_work(struct work_struct *work)
4661 {
4662 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4663 	struct sk_buff *skb;
4664 
4665 	BT_DBG("%s", hdev->name);
4666 
4667 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4668 		/* Send copy to monitor */
4669 		hci_send_to_monitor(hdev, skb);
4670 
4671 		if (atomic_read(&hdev->promisc)) {
4672 			/* Send copy to the sockets */
4673 			hci_send_to_sock(hdev, skb);
4674 		}
4675 
4676 		/* If the device has been opened in HCI_USER_CHANNEL,
4677 		 * the userspace has exclusive access to device.
4678 		 * When device is HCI_INIT, we still need to process
4679 		 * the data packets to the driver in order
4680 		 * to complete its setup().
4681 		 */
4682 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4683 		    !test_bit(HCI_INIT, &hdev->flags)) {
4684 			kfree_skb(skb);
4685 			continue;
4686 		}
4687 
4688 		if (test_bit(HCI_INIT, &hdev->flags)) {
4689 			/* Don't process data packets in this states. */
4690 			switch (hci_skb_pkt_type(skb)) {
4691 			case HCI_ACLDATA_PKT:
4692 			case HCI_SCODATA_PKT:
4693 			case HCI_ISODATA_PKT:
4694 				kfree_skb(skb);
4695 				continue;
4696 			}
4697 		}
4698 
4699 		/* Process frame */
4700 		switch (hci_skb_pkt_type(skb)) {
4701 		case HCI_EVENT_PKT:
4702 			BT_DBG("%s Event packet", hdev->name);
4703 			hci_event_packet(hdev, skb);
4704 			break;
4705 
4706 		case HCI_ACLDATA_PKT:
4707 			BT_DBG("%s ACL data packet", hdev->name);
4708 			hci_acldata_packet(hdev, skb);
4709 			break;
4710 
4711 		case HCI_SCODATA_PKT:
4712 			BT_DBG("%s SCO data packet", hdev->name);
4713 			hci_scodata_packet(hdev, skb);
4714 			break;
4715 
4716 		default:
4717 			kfree_skb(skb);
4718 			break;
4719 		}
4720 	}
4721 }
4722 
4723 static void hci_cmd_work(struct work_struct *work)
4724 {
4725 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4726 	struct sk_buff *skb;
4727 
4728 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4729 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4730 
4731 	/* Send queued commands */
4732 	if (atomic_read(&hdev->cmd_cnt)) {
4733 		skb = skb_dequeue(&hdev->cmd_q);
4734 		if (!skb)
4735 			return;
4736 
4737 		kfree_skb(hdev->sent_cmd);
4738 
4739 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4740 		if (hdev->sent_cmd) {
4741 			if (hci_req_status_pend(hdev))
4742 				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4743 			atomic_dec(&hdev->cmd_cnt);
4744 			hci_send_frame(hdev, skb);
4745 			if (test_bit(HCI_RESET, &hdev->flags))
4746 				cancel_delayed_work(&hdev->cmd_timer);
4747 			else
4748 				schedule_delayed_work(&hdev->cmd_timer,
4749 						      HCI_CMD_TIMEOUT);
4750 		} else {
4751 			skb_queue_head(&hdev->cmd_q, skb);
4752 			queue_work(hdev->workqueue, &hdev->cmd_work);
4753 		}
4754 	}
4755 }
4756