xref: /linux/net/bluetooth/hci_event.c (revision 40e79150c1686263e6a031d7702aec63aff31332)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 
43 /* Handle HCI Event packets */
44 
45 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 {
47 	__u8 status = *((__u8 *) skb->data);
48 
49 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
50 
51 	if (status)
52 		return;
53 
54 	clear_bit(HCI_INQUIRY, &hdev->flags);
55 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
56 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
57 
58 	hci_dev_lock(hdev);
59 	/* Set discovery state to stopped if we're not doing LE active
60 	 * scanning.
61 	 */
62 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
63 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
64 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
65 	hci_dev_unlock(hdev);
66 
67 	hci_conn_check_pending(hdev);
68 }
69 
70 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 {
72 	__u8 status = *((__u8 *) skb->data);
73 
74 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
75 
76 	if (status)
77 		return;
78 
79 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
80 }
81 
82 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
83 {
84 	__u8 status = *((__u8 *) skb->data);
85 
86 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
87 
88 	if (status)
89 		return;
90 
91 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
92 
93 	hci_conn_check_pending(hdev);
94 }
95 
96 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
97 					  struct sk_buff *skb)
98 {
99 	BT_DBG("%s", hdev->name);
100 }
101 
102 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
103 {
104 	struct hci_rp_role_discovery *rp = (void *) skb->data;
105 	struct hci_conn *conn;
106 
107 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
108 
109 	if (rp->status)
110 		return;
111 
112 	hci_dev_lock(hdev);
113 
114 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
115 	if (conn)
116 		conn->role = rp->role;
117 
118 	hci_dev_unlock(hdev);
119 }
120 
121 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
122 {
123 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
124 	struct hci_conn *conn;
125 
126 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
127 
128 	if (rp->status)
129 		return;
130 
131 	hci_dev_lock(hdev);
132 
133 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
134 	if (conn)
135 		conn->link_policy = __le16_to_cpu(rp->policy);
136 
137 	hci_dev_unlock(hdev);
138 }
139 
140 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 {
142 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
143 	struct hci_conn *conn;
144 	void *sent;
145 
146 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
147 
148 	if (rp->status)
149 		return;
150 
151 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
152 	if (!sent)
153 		return;
154 
155 	hci_dev_lock(hdev);
156 
157 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
158 	if (conn)
159 		conn->link_policy = get_unaligned_le16(sent + 2);
160 
161 	hci_dev_unlock(hdev);
162 }
163 
164 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
165 					struct sk_buff *skb)
166 {
167 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
168 
169 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
170 
171 	if (rp->status)
172 		return;
173 
174 	hdev->link_policy = __le16_to_cpu(rp->policy);
175 }
176 
177 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
178 					 struct sk_buff *skb)
179 {
180 	__u8 status = *((__u8 *) skb->data);
181 	void *sent;
182 
183 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
184 
185 	if (status)
186 		return;
187 
188 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
189 	if (!sent)
190 		return;
191 
192 	hdev->link_policy = get_unaligned_le16(sent);
193 }
194 
195 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
196 {
197 	__u8 status = *((__u8 *) skb->data);
198 
199 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
200 
201 	clear_bit(HCI_RESET, &hdev->flags);
202 
203 	if (status)
204 		return;
205 
206 	/* Reset all non-persistent flags */
207 	hci_dev_clear_volatile_flags(hdev);
208 
209 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
210 
211 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
212 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
213 
214 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
215 	hdev->adv_data_len = 0;
216 
217 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
218 	hdev->scan_rsp_data_len = 0;
219 
220 	hdev->le_scan_type = LE_SCAN_PASSIVE;
221 
222 	hdev->ssp_debug_mode = 0;
223 
224 	hci_bdaddr_list_clear(&hdev->le_white_list);
225 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
226 }
227 
228 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
229 					struct sk_buff *skb)
230 {
231 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
232 	struct hci_cp_read_stored_link_key *sent;
233 
234 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
235 
236 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
237 	if (!sent)
238 		return;
239 
240 	if (!rp->status && sent->read_all == 0x01) {
241 		hdev->stored_max_keys = rp->max_keys;
242 		hdev->stored_num_keys = rp->num_keys;
243 	}
244 }
245 
246 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
247 					  struct sk_buff *skb)
248 {
249 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
250 
251 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
252 
253 	if (rp->status)
254 		return;
255 
256 	if (rp->num_keys <= hdev->stored_num_keys)
257 		hdev->stored_num_keys -= rp->num_keys;
258 	else
259 		hdev->stored_num_keys = 0;
260 }
261 
262 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
263 {
264 	__u8 status = *((__u8 *) skb->data);
265 	void *sent;
266 
267 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
268 
269 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
270 	if (!sent)
271 		return;
272 
273 	hci_dev_lock(hdev);
274 
275 	if (hci_dev_test_flag(hdev, HCI_MGMT))
276 		mgmt_set_local_name_complete(hdev, sent, status);
277 	else if (!status)
278 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
279 
280 	hci_dev_unlock(hdev);
281 }
282 
283 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
284 {
285 	struct hci_rp_read_local_name *rp = (void *) skb->data;
286 
287 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
288 
289 	if (rp->status)
290 		return;
291 
292 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
293 	    hci_dev_test_flag(hdev, HCI_CONFIG))
294 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
295 }
296 
297 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
298 {
299 	__u8 status = *((__u8 *) skb->data);
300 	void *sent;
301 
302 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
303 
304 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
305 	if (!sent)
306 		return;
307 
308 	hci_dev_lock(hdev);
309 
310 	if (!status) {
311 		__u8 param = *((__u8 *) sent);
312 
313 		if (param == AUTH_ENABLED)
314 			set_bit(HCI_AUTH, &hdev->flags);
315 		else
316 			clear_bit(HCI_AUTH, &hdev->flags);
317 	}
318 
319 	if (hci_dev_test_flag(hdev, HCI_MGMT))
320 		mgmt_auth_enable_complete(hdev, status);
321 
322 	hci_dev_unlock(hdev);
323 }
324 
325 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
326 {
327 	__u8 status = *((__u8 *) skb->data);
328 	__u8 param;
329 	void *sent;
330 
331 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
332 
333 	if (status)
334 		return;
335 
336 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
337 	if (!sent)
338 		return;
339 
340 	param = *((__u8 *) sent);
341 
342 	if (param)
343 		set_bit(HCI_ENCRYPT, &hdev->flags);
344 	else
345 		clear_bit(HCI_ENCRYPT, &hdev->flags);
346 }
347 
348 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
349 {
350 	__u8 status = *((__u8 *) skb->data);
351 	__u8 param;
352 	void *sent;
353 
354 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 
356 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
357 	if (!sent)
358 		return;
359 
360 	param = *((__u8 *) sent);
361 
362 	hci_dev_lock(hdev);
363 
364 	if (status) {
365 		hdev->discov_timeout = 0;
366 		goto done;
367 	}
368 
369 	if (param & SCAN_INQUIRY)
370 		set_bit(HCI_ISCAN, &hdev->flags);
371 	else
372 		clear_bit(HCI_ISCAN, &hdev->flags);
373 
374 	if (param & SCAN_PAGE)
375 		set_bit(HCI_PSCAN, &hdev->flags);
376 	else
377 		clear_bit(HCI_PSCAN, &hdev->flags);
378 
379 done:
380 	hci_dev_unlock(hdev);
381 }
382 
383 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
384 {
385 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
386 
387 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
388 
389 	if (rp->status)
390 		return;
391 
392 	memcpy(hdev->dev_class, rp->dev_class, 3);
393 
394 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
395 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
396 }
397 
398 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 {
400 	__u8 status = *((__u8 *) skb->data);
401 	void *sent;
402 
403 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
404 
405 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
406 	if (!sent)
407 		return;
408 
409 	hci_dev_lock(hdev);
410 
411 	if (status == 0)
412 		memcpy(hdev->dev_class, sent, 3);
413 
414 	if (hci_dev_test_flag(hdev, HCI_MGMT))
415 		mgmt_set_class_of_dev_complete(hdev, sent, status);
416 
417 	hci_dev_unlock(hdev);
418 }
419 
420 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
421 {
422 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
423 	__u16 setting;
424 
425 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
426 
427 	if (rp->status)
428 		return;
429 
430 	setting = __le16_to_cpu(rp->voice_setting);
431 
432 	if (hdev->voice_setting == setting)
433 		return;
434 
435 	hdev->voice_setting = setting;
436 
437 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
438 
439 	if (hdev->notify)
440 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
441 }
442 
443 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
444 				       struct sk_buff *skb)
445 {
446 	__u8 status = *((__u8 *) skb->data);
447 	__u16 setting;
448 	void *sent;
449 
450 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
451 
452 	if (status)
453 		return;
454 
455 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
456 	if (!sent)
457 		return;
458 
459 	setting = get_unaligned_le16(sent);
460 
461 	if (hdev->voice_setting == setting)
462 		return;
463 
464 	hdev->voice_setting = setting;
465 
466 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
467 
468 	if (hdev->notify)
469 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
470 }
471 
472 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
473 					  struct sk_buff *skb)
474 {
475 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
476 
477 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
478 
479 	if (rp->status)
480 		return;
481 
482 	hdev->num_iac = rp->num_iac;
483 
484 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
485 }
486 
487 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
488 {
489 	__u8 status = *((__u8 *) skb->data);
490 	struct hci_cp_write_ssp_mode *sent;
491 
492 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
493 
494 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
495 	if (!sent)
496 		return;
497 
498 	hci_dev_lock(hdev);
499 
500 	if (!status) {
501 		if (sent->mode)
502 			hdev->features[1][0] |= LMP_HOST_SSP;
503 		else
504 			hdev->features[1][0] &= ~LMP_HOST_SSP;
505 	}
506 
507 	if (hci_dev_test_flag(hdev, HCI_MGMT))
508 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
509 	else if (!status) {
510 		if (sent->mode)
511 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
512 		else
513 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
514 	}
515 
516 	hci_dev_unlock(hdev);
517 }
518 
519 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
520 {
521 	u8 status = *((u8 *) skb->data);
522 	struct hci_cp_write_sc_support *sent;
523 
524 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
525 
526 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
527 	if (!sent)
528 		return;
529 
530 	hci_dev_lock(hdev);
531 
532 	if (!status) {
533 		if (sent->support)
534 			hdev->features[1][0] |= LMP_HOST_SC;
535 		else
536 			hdev->features[1][0] &= ~LMP_HOST_SC;
537 	}
538 
539 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
540 		if (sent->support)
541 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
542 		else
543 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
544 	}
545 
546 	hci_dev_unlock(hdev);
547 }
548 
549 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
550 {
551 	struct hci_rp_read_local_version *rp = (void *) skb->data;
552 
553 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
554 
555 	if (rp->status)
556 		return;
557 
558 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
559 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
560 		hdev->hci_ver = rp->hci_ver;
561 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
562 		hdev->lmp_ver = rp->lmp_ver;
563 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
564 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
565 	}
566 }
567 
568 static void hci_cc_read_local_commands(struct hci_dev *hdev,
569 				       struct sk_buff *skb)
570 {
571 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
572 
573 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
574 
575 	if (rp->status)
576 		return;
577 
578 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
579 	    hci_dev_test_flag(hdev, HCI_CONFIG))
580 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
581 }
582 
583 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
584 					     struct sk_buff *skb)
585 {
586 	struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
587 	struct hci_conn *conn;
588 
589 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
590 
591 	if (rp->status)
592 		return;
593 
594 	hci_dev_lock(hdev);
595 
596 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
597 	if (conn)
598 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
599 
600 	hci_dev_unlock(hdev);
601 }
602 
603 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
604 					      struct sk_buff *skb)
605 {
606 	struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
607 	struct hci_conn *conn;
608 	void *sent;
609 
610 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
611 
612 	if (rp->status)
613 		return;
614 
615 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
616 	if (!sent)
617 		return;
618 
619 	hci_dev_lock(hdev);
620 
621 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
622 	if (conn)
623 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
624 
625 	hci_dev_unlock(hdev);
626 }
627 
628 static void hci_cc_read_local_features(struct hci_dev *hdev,
629 				       struct sk_buff *skb)
630 {
631 	struct hci_rp_read_local_features *rp = (void *) skb->data;
632 
633 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
634 
635 	if (rp->status)
636 		return;
637 
638 	memcpy(hdev->features, rp->features, 8);
639 
640 	/* Adjust default settings according to features
641 	 * supported by device. */
642 
643 	if (hdev->features[0][0] & LMP_3SLOT)
644 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
645 
646 	if (hdev->features[0][0] & LMP_5SLOT)
647 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
648 
649 	if (hdev->features[0][1] & LMP_HV2) {
650 		hdev->pkt_type  |= (HCI_HV2);
651 		hdev->esco_type |= (ESCO_HV2);
652 	}
653 
654 	if (hdev->features[0][1] & LMP_HV3) {
655 		hdev->pkt_type  |= (HCI_HV3);
656 		hdev->esco_type |= (ESCO_HV3);
657 	}
658 
659 	if (lmp_esco_capable(hdev))
660 		hdev->esco_type |= (ESCO_EV3);
661 
662 	if (hdev->features[0][4] & LMP_EV4)
663 		hdev->esco_type |= (ESCO_EV4);
664 
665 	if (hdev->features[0][4] & LMP_EV5)
666 		hdev->esco_type |= (ESCO_EV5);
667 
668 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
669 		hdev->esco_type |= (ESCO_2EV3);
670 
671 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
672 		hdev->esco_type |= (ESCO_3EV3);
673 
674 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
675 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
676 }
677 
678 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
679 					   struct sk_buff *skb)
680 {
681 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
682 
683 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
684 
685 	if (rp->status)
686 		return;
687 
688 	if (hdev->max_page < rp->max_page)
689 		hdev->max_page = rp->max_page;
690 
691 	if (rp->page < HCI_MAX_PAGES)
692 		memcpy(hdev->features[rp->page], rp->features, 8);
693 }
694 
695 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
696 					  struct sk_buff *skb)
697 {
698 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
699 
700 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
701 
702 	if (rp->status)
703 		return;
704 
705 	hdev->flow_ctl_mode = rp->mode;
706 }
707 
708 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
709 {
710 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
711 
712 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
713 
714 	if (rp->status)
715 		return;
716 
717 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
718 	hdev->sco_mtu  = rp->sco_mtu;
719 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
720 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
721 
722 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
723 		hdev->sco_mtu  = 64;
724 		hdev->sco_pkts = 8;
725 	}
726 
727 	hdev->acl_cnt = hdev->acl_pkts;
728 	hdev->sco_cnt = hdev->sco_pkts;
729 
730 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
731 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
732 }
733 
734 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
735 {
736 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
737 
738 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
739 
740 	if (rp->status)
741 		return;
742 
743 	if (test_bit(HCI_INIT, &hdev->flags))
744 		bacpy(&hdev->bdaddr, &rp->bdaddr);
745 
746 	if (hci_dev_test_flag(hdev, HCI_SETUP))
747 		bacpy(&hdev->setup_addr, &rp->bdaddr);
748 }
749 
750 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
751 					   struct sk_buff *skb)
752 {
753 	struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
754 
755 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
756 
757 	if (rp->status)
758 		return;
759 
760 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
761 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
762 		hdev->pairing_opts = rp->pairing_opts;
763 		hdev->max_enc_key_size = rp->max_key_size;
764 	}
765 }
766 
767 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
768 					   struct sk_buff *skb)
769 {
770 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
771 
772 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
773 
774 	if (rp->status)
775 		return;
776 
777 	if (test_bit(HCI_INIT, &hdev->flags)) {
778 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
779 		hdev->page_scan_window = __le16_to_cpu(rp->window);
780 	}
781 }
782 
783 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
784 					    struct sk_buff *skb)
785 {
786 	u8 status = *((u8 *) skb->data);
787 	struct hci_cp_write_page_scan_activity *sent;
788 
789 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
790 
791 	if (status)
792 		return;
793 
794 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
795 	if (!sent)
796 		return;
797 
798 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
799 	hdev->page_scan_window = __le16_to_cpu(sent->window);
800 }
801 
802 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
803 					   struct sk_buff *skb)
804 {
805 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
806 
807 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
808 
809 	if (rp->status)
810 		return;
811 
812 	if (test_bit(HCI_INIT, &hdev->flags))
813 		hdev->page_scan_type = rp->type;
814 }
815 
816 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
817 					struct sk_buff *skb)
818 {
819 	u8 status = *((u8 *) skb->data);
820 	u8 *type;
821 
822 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
823 
824 	if (status)
825 		return;
826 
827 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
828 	if (type)
829 		hdev->page_scan_type = *type;
830 }
831 
832 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
833 					struct sk_buff *skb)
834 {
835 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
836 
837 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
838 
839 	if (rp->status)
840 		return;
841 
842 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
843 	hdev->block_len = __le16_to_cpu(rp->block_len);
844 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
845 
846 	hdev->block_cnt = hdev->num_blocks;
847 
848 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
849 	       hdev->block_cnt, hdev->block_len);
850 }
851 
852 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
853 {
854 	struct hci_rp_read_clock *rp = (void *) skb->data;
855 	struct hci_cp_read_clock *cp;
856 	struct hci_conn *conn;
857 
858 	BT_DBG("%s", hdev->name);
859 
860 	if (skb->len < sizeof(*rp))
861 		return;
862 
863 	if (rp->status)
864 		return;
865 
866 	hci_dev_lock(hdev);
867 
868 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
869 	if (!cp)
870 		goto unlock;
871 
872 	if (cp->which == 0x00) {
873 		hdev->clock = le32_to_cpu(rp->clock);
874 		goto unlock;
875 	}
876 
877 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
878 	if (conn) {
879 		conn->clock = le32_to_cpu(rp->clock);
880 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
881 	}
882 
883 unlock:
884 	hci_dev_unlock(hdev);
885 }
886 
887 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
888 				       struct sk_buff *skb)
889 {
890 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
891 
892 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
893 
894 	if (rp->status)
895 		return;
896 
897 	hdev->amp_status = rp->amp_status;
898 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
899 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
900 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
901 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
902 	hdev->amp_type = rp->amp_type;
903 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
904 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
905 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
906 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
907 }
908 
909 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
910 					 struct sk_buff *skb)
911 {
912 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
913 
914 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
915 
916 	if (rp->status)
917 		return;
918 
919 	hdev->inq_tx_power = rp->tx_power;
920 }
921 
922 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
923 					       struct sk_buff *skb)
924 {
925 	struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
926 
927 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
928 
929 	if (rp->status)
930 		return;
931 
932 	hdev->err_data_reporting = rp->err_data_reporting;
933 }
934 
935 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
936 						struct sk_buff *skb)
937 {
938 	__u8 status = *((__u8 *)skb->data);
939 	struct hci_cp_write_def_err_data_reporting *cp;
940 
941 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
942 
943 	if (status)
944 		return;
945 
946 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
947 	if (!cp)
948 		return;
949 
950 	hdev->err_data_reporting = cp->err_data_reporting;
951 }
952 
953 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
954 {
955 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
956 	struct hci_cp_pin_code_reply *cp;
957 	struct hci_conn *conn;
958 
959 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
960 
961 	hci_dev_lock(hdev);
962 
963 	if (hci_dev_test_flag(hdev, HCI_MGMT))
964 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
965 
966 	if (rp->status)
967 		goto unlock;
968 
969 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
970 	if (!cp)
971 		goto unlock;
972 
973 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
974 	if (conn)
975 		conn->pin_length = cp->pin_len;
976 
977 unlock:
978 	hci_dev_unlock(hdev);
979 }
980 
981 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
982 {
983 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
984 
985 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
986 
987 	hci_dev_lock(hdev);
988 
989 	if (hci_dev_test_flag(hdev, HCI_MGMT))
990 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
991 						 rp->status);
992 
993 	hci_dev_unlock(hdev);
994 }
995 
996 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
997 				       struct sk_buff *skb)
998 {
999 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1000 
1001 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1002 
1003 	if (rp->status)
1004 		return;
1005 
1006 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1007 	hdev->le_pkts = rp->le_max_pkt;
1008 
1009 	hdev->le_cnt = hdev->le_pkts;
1010 
1011 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1012 }
1013 
1014 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1015 					  struct sk_buff *skb)
1016 {
1017 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1018 
1019 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1020 
1021 	if (rp->status)
1022 		return;
1023 
1024 	memcpy(hdev->le_features, rp->features, 8);
1025 }
1026 
1027 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1028 					struct sk_buff *skb)
1029 {
1030 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1031 
1032 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1033 
1034 	if (rp->status)
1035 		return;
1036 
1037 	hdev->adv_tx_power = rp->tx_power;
1038 }
1039 
1040 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1041 {
1042 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1043 
1044 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1045 
1046 	hci_dev_lock(hdev);
1047 
1048 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1049 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1050 						 rp->status);
1051 
1052 	hci_dev_unlock(hdev);
1053 }
1054 
1055 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1056 					  struct sk_buff *skb)
1057 {
1058 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1059 
1060 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061 
1062 	hci_dev_lock(hdev);
1063 
1064 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1065 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1066 						     ACL_LINK, 0, rp->status);
1067 
1068 	hci_dev_unlock(hdev);
1069 }
1070 
1071 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1072 {
1073 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1074 
1075 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1076 
1077 	hci_dev_lock(hdev);
1078 
1079 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1080 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1081 						 0, rp->status);
1082 
1083 	hci_dev_unlock(hdev);
1084 }
1085 
1086 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1087 					  struct sk_buff *skb)
1088 {
1089 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1090 
1091 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1092 
1093 	hci_dev_lock(hdev);
1094 
1095 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1096 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1097 						     ACL_LINK, 0, rp->status);
1098 
1099 	hci_dev_unlock(hdev);
1100 }
1101 
1102 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1103 				       struct sk_buff *skb)
1104 {
1105 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1106 
1107 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1108 }
1109 
1110 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1111 					   struct sk_buff *skb)
1112 {
1113 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1114 
1115 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1116 }
1117 
1118 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1119 {
1120 	__u8 status = *((__u8 *) skb->data);
1121 	bdaddr_t *sent;
1122 
1123 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1124 
1125 	if (status)
1126 		return;
1127 
1128 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1129 	if (!sent)
1130 		return;
1131 
1132 	hci_dev_lock(hdev);
1133 
1134 	bacpy(&hdev->random_addr, sent);
1135 
1136 	hci_dev_unlock(hdev);
1137 }
1138 
1139 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1140 {
1141 	__u8 status = *((__u8 *) skb->data);
1142 	struct hci_cp_le_set_default_phy *cp;
1143 
1144 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1145 
1146 	if (status)
1147 		return;
1148 
1149 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1150 	if (!cp)
1151 		return;
1152 
1153 	hci_dev_lock(hdev);
1154 
1155 	hdev->le_tx_def_phys = cp->tx_phys;
1156 	hdev->le_rx_def_phys = cp->rx_phys;
1157 
1158 	hci_dev_unlock(hdev);
1159 }
1160 
1161 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1162                                               struct sk_buff *skb)
1163 {
1164 	__u8 status = *((__u8 *) skb->data);
1165 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1166 	struct adv_info *adv_instance;
1167 
1168 	if (status)
1169 		return;
1170 
1171 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1172 	if (!cp)
1173 		return;
1174 
1175 	hci_dev_lock(hdev);
1176 
1177 	if (!hdev->cur_adv_instance) {
1178 		/* Store in hdev for instance 0 (Set adv and Directed advs) */
1179 		bacpy(&hdev->random_addr, &cp->bdaddr);
1180 	} else {
1181 		adv_instance = hci_find_adv_instance(hdev,
1182 						     hdev->cur_adv_instance);
1183 		if (adv_instance)
1184 			bacpy(&adv_instance->random_addr, &cp->bdaddr);
1185 	}
1186 
1187 	hci_dev_unlock(hdev);
1188 }
1189 
1190 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1191 {
1192 	__u8 *sent, status = *((__u8 *) skb->data);
1193 
1194 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1195 
1196 	if (status)
1197 		return;
1198 
1199 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1200 	if (!sent)
1201 		return;
1202 
1203 	hci_dev_lock(hdev);
1204 
1205 	/* If we're doing connection initiation as peripheral. Set a
1206 	 * timeout in case something goes wrong.
1207 	 */
1208 	if (*sent) {
1209 		struct hci_conn *conn;
1210 
1211 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1212 
1213 		conn = hci_lookup_le_connect(hdev);
1214 		if (conn)
1215 			queue_delayed_work(hdev->workqueue,
1216 					   &conn->le_conn_timeout,
1217 					   conn->conn_timeout);
1218 	} else {
1219 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1220 	}
1221 
1222 	hci_dev_unlock(hdev);
1223 }
1224 
1225 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1226 					 struct sk_buff *skb)
1227 {
1228 	struct hci_cp_le_set_ext_adv_enable *cp;
1229 	__u8 status = *((__u8 *) skb->data);
1230 
1231 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1232 
1233 	if (status)
1234 		return;
1235 
1236 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1237 	if (!cp)
1238 		return;
1239 
1240 	hci_dev_lock(hdev);
1241 
1242 	if (cp->enable) {
1243 		struct hci_conn *conn;
1244 
1245 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1246 
1247 		conn = hci_lookup_le_connect(hdev);
1248 		if (conn)
1249 			queue_delayed_work(hdev->workqueue,
1250 					   &conn->le_conn_timeout,
1251 					   conn->conn_timeout);
1252 	} else {
1253 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1254 	}
1255 
1256 	hci_dev_unlock(hdev);
1257 }
1258 
1259 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1260 {
1261 	struct hci_cp_le_set_scan_param *cp;
1262 	__u8 status = *((__u8 *) skb->data);
1263 
1264 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1265 
1266 	if (status)
1267 		return;
1268 
1269 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1270 	if (!cp)
1271 		return;
1272 
1273 	hci_dev_lock(hdev);
1274 
1275 	hdev->le_scan_type = cp->type;
1276 
1277 	hci_dev_unlock(hdev);
1278 }
1279 
1280 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1281 					 struct sk_buff *skb)
1282 {
1283 	struct hci_cp_le_set_ext_scan_params *cp;
1284 	__u8 status = *((__u8 *) skb->data);
1285 	struct hci_cp_le_scan_phy_params *phy_param;
1286 
1287 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1288 
1289 	if (status)
1290 		return;
1291 
1292 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1293 	if (!cp)
1294 		return;
1295 
1296 	phy_param = (void *)cp->data;
1297 
1298 	hci_dev_lock(hdev);
1299 
1300 	hdev->le_scan_type = phy_param->type;
1301 
1302 	hci_dev_unlock(hdev);
1303 }
1304 
1305 static bool has_pending_adv_report(struct hci_dev *hdev)
1306 {
1307 	struct discovery_state *d = &hdev->discovery;
1308 
1309 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1310 }
1311 
1312 static void clear_pending_adv_report(struct hci_dev *hdev)
1313 {
1314 	struct discovery_state *d = &hdev->discovery;
1315 
1316 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1317 	d->last_adv_data_len = 0;
1318 }
1319 
1320 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1321 				     u8 bdaddr_type, s8 rssi, u32 flags,
1322 				     u8 *data, u8 len)
1323 {
1324 	struct discovery_state *d = &hdev->discovery;
1325 
1326 	bacpy(&d->last_adv_addr, bdaddr);
1327 	d->last_adv_addr_type = bdaddr_type;
1328 	d->last_adv_rssi = rssi;
1329 	d->last_adv_flags = flags;
1330 	memcpy(d->last_adv_data, data, len);
1331 	d->last_adv_data_len = len;
1332 }
1333 
1334 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1335 {
1336 	hci_dev_lock(hdev);
1337 
1338 	switch (enable) {
1339 	case LE_SCAN_ENABLE:
1340 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1341 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1342 			clear_pending_adv_report(hdev);
1343 		break;
1344 
1345 	case LE_SCAN_DISABLE:
1346 		/* We do this here instead of when setting DISCOVERY_STOPPED
1347 		 * since the latter would potentially require waiting for
1348 		 * inquiry to stop too.
1349 		 */
1350 		if (has_pending_adv_report(hdev)) {
1351 			struct discovery_state *d = &hdev->discovery;
1352 
1353 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1354 					  d->last_adv_addr_type, NULL,
1355 					  d->last_adv_rssi, d->last_adv_flags,
1356 					  d->last_adv_data,
1357 					  d->last_adv_data_len, NULL, 0);
1358 		}
1359 
1360 		/* Cancel this timer so that we don't try to disable scanning
1361 		 * when it's already disabled.
1362 		 */
1363 		cancel_delayed_work(&hdev->le_scan_disable);
1364 
1365 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1366 
1367 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1368 		 * interrupted scanning due to a connect request. Mark
1369 		 * therefore discovery as stopped. If this was not
1370 		 * because of a connect request advertising might have
1371 		 * been disabled because of active scanning, so
1372 		 * re-enable it again if necessary.
1373 		 */
1374 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1375 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1376 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1377 			 hdev->discovery.state == DISCOVERY_FINDING)
1378 			hci_req_reenable_advertising(hdev);
1379 
1380 		break;
1381 
1382 	default:
1383 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1384 			   enable);
1385 		break;
1386 	}
1387 
1388 	hci_dev_unlock(hdev);
1389 }
1390 
1391 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1392 				      struct sk_buff *skb)
1393 {
1394 	struct hci_cp_le_set_scan_enable *cp;
1395 	__u8 status = *((__u8 *) skb->data);
1396 
1397 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1398 
1399 	if (status)
1400 		return;
1401 
1402 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1403 	if (!cp)
1404 		return;
1405 
1406 	le_set_scan_enable_complete(hdev, cp->enable);
1407 }
1408 
1409 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1410 				      struct sk_buff *skb)
1411 {
1412 	struct hci_cp_le_set_ext_scan_enable *cp;
1413 	__u8 status = *((__u8 *) skb->data);
1414 
1415 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1416 
1417 	if (status)
1418 		return;
1419 
1420 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1421 	if (!cp)
1422 		return;
1423 
1424 	le_set_scan_enable_complete(hdev, cp->enable);
1425 }
1426 
1427 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1428 				      struct sk_buff *skb)
1429 {
1430 	struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1431 
1432 	BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1433 	       rp->num_of_sets);
1434 
1435 	if (rp->status)
1436 		return;
1437 
1438 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1439 }
1440 
1441 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1442 					   struct sk_buff *skb)
1443 {
1444 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1445 
1446 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1447 
1448 	if (rp->status)
1449 		return;
1450 
1451 	hdev->le_white_list_size = rp->size;
1452 }
1453 
1454 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1455 				       struct sk_buff *skb)
1456 {
1457 	__u8 status = *((__u8 *) skb->data);
1458 
1459 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1460 
1461 	if (status)
1462 		return;
1463 
1464 	hci_bdaddr_list_clear(&hdev->le_white_list);
1465 }
1466 
1467 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1468 					struct sk_buff *skb)
1469 {
1470 	struct hci_cp_le_add_to_white_list *sent;
1471 	__u8 status = *((__u8 *) skb->data);
1472 
1473 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1474 
1475 	if (status)
1476 		return;
1477 
1478 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1479 	if (!sent)
1480 		return;
1481 
1482 	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1483 			   sent->bdaddr_type);
1484 }
1485 
1486 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1487 					  struct sk_buff *skb)
1488 {
1489 	struct hci_cp_le_del_from_white_list *sent;
1490 	__u8 status = *((__u8 *) skb->data);
1491 
1492 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1493 
1494 	if (status)
1495 		return;
1496 
1497 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1498 	if (!sent)
1499 		return;
1500 
1501 	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1502 			    sent->bdaddr_type);
1503 }
1504 
1505 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1506 					    struct sk_buff *skb)
1507 {
1508 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1509 
1510 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1511 
1512 	if (rp->status)
1513 		return;
1514 
1515 	memcpy(hdev->le_states, rp->le_states, 8);
1516 }
1517 
1518 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1519 					struct sk_buff *skb)
1520 {
1521 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1522 
1523 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1524 
1525 	if (rp->status)
1526 		return;
1527 
1528 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1529 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1530 }
1531 
1532 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1533 					 struct sk_buff *skb)
1534 {
1535 	struct hci_cp_le_write_def_data_len *sent;
1536 	__u8 status = *((__u8 *) skb->data);
1537 
1538 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1539 
1540 	if (status)
1541 		return;
1542 
1543 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1544 	if (!sent)
1545 		return;
1546 
1547 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1548 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1549 }
1550 
1551 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1552 					 struct sk_buff *skb)
1553 {
1554 	struct hci_cp_le_add_to_resolv_list *sent;
1555 	__u8 status = *((__u8 *) skb->data);
1556 
1557 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1558 
1559 	if (status)
1560 		return;
1561 
1562 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1563 	if (!sent)
1564 		return;
1565 
1566 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1567 				sent->bdaddr_type, sent->peer_irk,
1568 				sent->local_irk);
1569 }
1570 
1571 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1572 					  struct sk_buff *skb)
1573 {
1574 	struct hci_cp_le_del_from_resolv_list *sent;
1575 	__u8 status = *((__u8 *) skb->data);
1576 
1577 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1578 
1579 	if (status)
1580 		return;
1581 
1582 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1583 	if (!sent)
1584 		return;
1585 
1586 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1587 			    sent->bdaddr_type);
1588 }
1589 
1590 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1591 				       struct sk_buff *skb)
1592 {
1593 	__u8 status = *((__u8 *) skb->data);
1594 
1595 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1596 
1597 	if (status)
1598 		return;
1599 
1600 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
1601 }
1602 
1603 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1604 					   struct sk_buff *skb)
1605 {
1606 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1607 
1608 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1609 
1610 	if (rp->status)
1611 		return;
1612 
1613 	hdev->le_resolv_list_size = rp->size;
1614 }
1615 
1616 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1617 						struct sk_buff *skb)
1618 {
1619 	__u8 *sent, status = *((__u8 *) skb->data);
1620 
1621 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1622 
1623 	if (status)
1624 		return;
1625 
1626 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1627 	if (!sent)
1628 		return;
1629 
1630 	hci_dev_lock(hdev);
1631 
1632 	if (*sent)
1633 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1634 	else
1635 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1636 
1637 	hci_dev_unlock(hdev);
1638 }
1639 
1640 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1641 					struct sk_buff *skb)
1642 {
1643 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1644 
1645 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1646 
1647 	if (rp->status)
1648 		return;
1649 
1650 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1651 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1652 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1653 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1654 }
1655 
1656 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1657 					   struct sk_buff *skb)
1658 {
1659 	struct hci_cp_write_le_host_supported *sent;
1660 	__u8 status = *((__u8 *) skb->data);
1661 
1662 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1663 
1664 	if (status)
1665 		return;
1666 
1667 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1668 	if (!sent)
1669 		return;
1670 
1671 	hci_dev_lock(hdev);
1672 
1673 	if (sent->le) {
1674 		hdev->features[1][0] |= LMP_HOST_LE;
1675 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1676 	} else {
1677 		hdev->features[1][0] &= ~LMP_HOST_LE;
1678 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1679 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1680 	}
1681 
1682 	if (sent->simul)
1683 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1684 	else
1685 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1686 
1687 	hci_dev_unlock(hdev);
1688 }
1689 
1690 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1691 {
1692 	struct hci_cp_le_set_adv_param *cp;
1693 	u8 status = *((u8 *) skb->data);
1694 
1695 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1696 
1697 	if (status)
1698 		return;
1699 
1700 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1701 	if (!cp)
1702 		return;
1703 
1704 	hci_dev_lock(hdev);
1705 	hdev->adv_addr_type = cp->own_address_type;
1706 	hci_dev_unlock(hdev);
1707 }
1708 
1709 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1710 {
1711 	struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1712 	struct hci_cp_le_set_ext_adv_params *cp;
1713 	struct adv_info *adv_instance;
1714 
1715 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1716 
1717 	if (rp->status)
1718 		return;
1719 
1720 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1721 	if (!cp)
1722 		return;
1723 
1724 	hci_dev_lock(hdev);
1725 	hdev->adv_addr_type = cp->own_addr_type;
1726 	if (!hdev->cur_adv_instance) {
1727 		/* Store in hdev for instance 0 */
1728 		hdev->adv_tx_power = rp->tx_power;
1729 	} else {
1730 		adv_instance = hci_find_adv_instance(hdev,
1731 						     hdev->cur_adv_instance);
1732 		if (adv_instance)
1733 			adv_instance->tx_power = rp->tx_power;
1734 	}
1735 	/* Update adv data as tx power is known now */
1736 	hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1737 	hci_dev_unlock(hdev);
1738 }
1739 
1740 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1741 {
1742 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1743 	struct hci_conn *conn;
1744 
1745 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1746 
1747 	if (rp->status)
1748 		return;
1749 
1750 	hci_dev_lock(hdev);
1751 
1752 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1753 	if (conn)
1754 		conn->rssi = rp->rssi;
1755 
1756 	hci_dev_unlock(hdev);
1757 }
1758 
1759 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1760 {
1761 	struct hci_cp_read_tx_power *sent;
1762 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1763 	struct hci_conn *conn;
1764 
1765 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1766 
1767 	if (rp->status)
1768 		return;
1769 
1770 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1771 	if (!sent)
1772 		return;
1773 
1774 	hci_dev_lock(hdev);
1775 
1776 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1777 	if (!conn)
1778 		goto unlock;
1779 
1780 	switch (sent->type) {
1781 	case 0x00:
1782 		conn->tx_power = rp->tx_power;
1783 		break;
1784 	case 0x01:
1785 		conn->max_tx_power = rp->tx_power;
1786 		break;
1787 	}
1788 
1789 unlock:
1790 	hci_dev_unlock(hdev);
1791 }
1792 
1793 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1794 {
1795 	u8 status = *((u8 *) skb->data);
1796 	u8 *mode;
1797 
1798 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1799 
1800 	if (status)
1801 		return;
1802 
1803 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1804 	if (mode)
1805 		hdev->ssp_debug_mode = *mode;
1806 }
1807 
1808 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1809 {
1810 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1811 
1812 	if (status) {
1813 		hci_conn_check_pending(hdev);
1814 		return;
1815 	}
1816 
1817 	set_bit(HCI_INQUIRY, &hdev->flags);
1818 }
1819 
1820 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1821 {
1822 	struct hci_cp_create_conn *cp;
1823 	struct hci_conn *conn;
1824 
1825 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1826 
1827 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1828 	if (!cp)
1829 		return;
1830 
1831 	hci_dev_lock(hdev);
1832 
1833 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1834 
1835 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1836 
1837 	if (status) {
1838 		if (conn && conn->state == BT_CONNECT) {
1839 			if (status != 0x0c || conn->attempt > 2) {
1840 				conn->state = BT_CLOSED;
1841 				hci_connect_cfm(conn, status);
1842 				hci_conn_del(conn);
1843 			} else
1844 				conn->state = BT_CONNECT2;
1845 		}
1846 	} else {
1847 		if (!conn) {
1848 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1849 					    HCI_ROLE_MASTER);
1850 			if (!conn)
1851 				bt_dev_err(hdev, "no memory for new connection");
1852 		}
1853 	}
1854 
1855 	hci_dev_unlock(hdev);
1856 }
1857 
1858 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1859 {
1860 	struct hci_cp_add_sco *cp;
1861 	struct hci_conn *acl, *sco;
1862 	__u16 handle;
1863 
1864 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1865 
1866 	if (!status)
1867 		return;
1868 
1869 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1870 	if (!cp)
1871 		return;
1872 
1873 	handle = __le16_to_cpu(cp->handle);
1874 
1875 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1876 
1877 	hci_dev_lock(hdev);
1878 
1879 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1880 	if (acl) {
1881 		sco = acl->link;
1882 		if (sco) {
1883 			sco->state = BT_CLOSED;
1884 
1885 			hci_connect_cfm(sco, status);
1886 			hci_conn_del(sco);
1887 		}
1888 	}
1889 
1890 	hci_dev_unlock(hdev);
1891 }
1892 
1893 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1894 {
1895 	struct hci_cp_auth_requested *cp;
1896 	struct hci_conn *conn;
1897 
1898 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1899 
1900 	if (!status)
1901 		return;
1902 
1903 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1904 	if (!cp)
1905 		return;
1906 
1907 	hci_dev_lock(hdev);
1908 
1909 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1910 	if (conn) {
1911 		if (conn->state == BT_CONFIG) {
1912 			hci_connect_cfm(conn, status);
1913 			hci_conn_drop(conn);
1914 		}
1915 	}
1916 
1917 	hci_dev_unlock(hdev);
1918 }
1919 
1920 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1921 {
1922 	struct hci_cp_set_conn_encrypt *cp;
1923 	struct hci_conn *conn;
1924 
1925 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1926 
1927 	if (!status)
1928 		return;
1929 
1930 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1931 	if (!cp)
1932 		return;
1933 
1934 	hci_dev_lock(hdev);
1935 
1936 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1937 	if (conn) {
1938 		if (conn->state == BT_CONFIG) {
1939 			hci_connect_cfm(conn, status);
1940 			hci_conn_drop(conn);
1941 		}
1942 	}
1943 
1944 	hci_dev_unlock(hdev);
1945 }
1946 
1947 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1948 				    struct hci_conn *conn)
1949 {
1950 	if (conn->state != BT_CONFIG || !conn->out)
1951 		return 0;
1952 
1953 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1954 		return 0;
1955 
1956 	/* Only request authentication for SSP connections or non-SSP
1957 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1958 	 * is requested.
1959 	 */
1960 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1961 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1962 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1963 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1964 		return 0;
1965 
1966 	return 1;
1967 }
1968 
1969 static int hci_resolve_name(struct hci_dev *hdev,
1970 				   struct inquiry_entry *e)
1971 {
1972 	struct hci_cp_remote_name_req cp;
1973 
1974 	memset(&cp, 0, sizeof(cp));
1975 
1976 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1977 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1978 	cp.pscan_mode = e->data.pscan_mode;
1979 	cp.clock_offset = e->data.clock_offset;
1980 
1981 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1982 }
1983 
1984 static bool hci_resolve_next_name(struct hci_dev *hdev)
1985 {
1986 	struct discovery_state *discov = &hdev->discovery;
1987 	struct inquiry_entry *e;
1988 
1989 	if (list_empty(&discov->resolve))
1990 		return false;
1991 
1992 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1993 	if (!e)
1994 		return false;
1995 
1996 	if (hci_resolve_name(hdev, e) == 0) {
1997 		e->name_state = NAME_PENDING;
1998 		return true;
1999 	}
2000 
2001 	return false;
2002 }
2003 
2004 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2005 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2006 {
2007 	struct discovery_state *discov = &hdev->discovery;
2008 	struct inquiry_entry *e;
2009 
2010 	/* Update the mgmt connected state if necessary. Be careful with
2011 	 * conn objects that exist but are not (yet) connected however.
2012 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2013 	 * considered connected.
2014 	 */
2015 	if (conn &&
2016 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2017 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2018 		mgmt_device_connected(hdev, conn, 0, name, name_len);
2019 
2020 	if (discov->state == DISCOVERY_STOPPED)
2021 		return;
2022 
2023 	if (discov->state == DISCOVERY_STOPPING)
2024 		goto discov_complete;
2025 
2026 	if (discov->state != DISCOVERY_RESOLVING)
2027 		return;
2028 
2029 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2030 	/* If the device was not found in a list of found devices names of which
2031 	 * are pending. there is no need to continue resolving a next name as it
2032 	 * will be done upon receiving another Remote Name Request Complete
2033 	 * Event */
2034 	if (!e)
2035 		return;
2036 
2037 	list_del(&e->list);
2038 	if (name) {
2039 		e->name_state = NAME_KNOWN;
2040 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2041 				 e->data.rssi, name, name_len);
2042 	} else {
2043 		e->name_state = NAME_NOT_KNOWN;
2044 	}
2045 
2046 	if (hci_resolve_next_name(hdev))
2047 		return;
2048 
2049 discov_complete:
2050 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2051 }
2052 
2053 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2054 {
2055 	struct hci_cp_remote_name_req *cp;
2056 	struct hci_conn *conn;
2057 
2058 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2059 
2060 	/* If successful wait for the name req complete event before
2061 	 * checking for the need to do authentication */
2062 	if (!status)
2063 		return;
2064 
2065 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2066 	if (!cp)
2067 		return;
2068 
2069 	hci_dev_lock(hdev);
2070 
2071 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2072 
2073 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2074 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2075 
2076 	if (!conn)
2077 		goto unlock;
2078 
2079 	if (!hci_outgoing_auth_needed(hdev, conn))
2080 		goto unlock;
2081 
2082 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2083 		struct hci_cp_auth_requested auth_cp;
2084 
2085 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2086 
2087 		auth_cp.handle = __cpu_to_le16(conn->handle);
2088 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2089 			     sizeof(auth_cp), &auth_cp);
2090 	}
2091 
2092 unlock:
2093 	hci_dev_unlock(hdev);
2094 }
2095 
2096 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2097 {
2098 	struct hci_cp_read_remote_features *cp;
2099 	struct hci_conn *conn;
2100 
2101 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2102 
2103 	if (!status)
2104 		return;
2105 
2106 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2107 	if (!cp)
2108 		return;
2109 
2110 	hci_dev_lock(hdev);
2111 
2112 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2113 	if (conn) {
2114 		if (conn->state == BT_CONFIG) {
2115 			hci_connect_cfm(conn, status);
2116 			hci_conn_drop(conn);
2117 		}
2118 	}
2119 
2120 	hci_dev_unlock(hdev);
2121 }
2122 
2123 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2124 {
2125 	struct hci_cp_read_remote_ext_features *cp;
2126 	struct hci_conn *conn;
2127 
2128 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2129 
2130 	if (!status)
2131 		return;
2132 
2133 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2134 	if (!cp)
2135 		return;
2136 
2137 	hci_dev_lock(hdev);
2138 
2139 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2140 	if (conn) {
2141 		if (conn->state == BT_CONFIG) {
2142 			hci_connect_cfm(conn, status);
2143 			hci_conn_drop(conn);
2144 		}
2145 	}
2146 
2147 	hci_dev_unlock(hdev);
2148 }
2149 
2150 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2151 {
2152 	struct hci_cp_setup_sync_conn *cp;
2153 	struct hci_conn *acl, *sco;
2154 	__u16 handle;
2155 
2156 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2157 
2158 	if (!status)
2159 		return;
2160 
2161 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2162 	if (!cp)
2163 		return;
2164 
2165 	handle = __le16_to_cpu(cp->handle);
2166 
2167 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2168 
2169 	hci_dev_lock(hdev);
2170 
2171 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2172 	if (acl) {
2173 		sco = acl->link;
2174 		if (sco) {
2175 			sco->state = BT_CLOSED;
2176 
2177 			hci_connect_cfm(sco, status);
2178 			hci_conn_del(sco);
2179 		}
2180 	}
2181 
2182 	hci_dev_unlock(hdev);
2183 }
2184 
2185 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2186 {
2187 	struct hci_cp_sniff_mode *cp;
2188 	struct hci_conn *conn;
2189 
2190 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2191 
2192 	if (!status)
2193 		return;
2194 
2195 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2196 	if (!cp)
2197 		return;
2198 
2199 	hci_dev_lock(hdev);
2200 
2201 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2202 	if (conn) {
2203 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2204 
2205 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2206 			hci_sco_setup(conn, status);
2207 	}
2208 
2209 	hci_dev_unlock(hdev);
2210 }
2211 
2212 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2213 {
2214 	struct hci_cp_exit_sniff_mode *cp;
2215 	struct hci_conn *conn;
2216 
2217 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2218 
2219 	if (!status)
2220 		return;
2221 
2222 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2223 	if (!cp)
2224 		return;
2225 
2226 	hci_dev_lock(hdev);
2227 
2228 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2229 	if (conn) {
2230 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2231 
2232 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2233 			hci_sco_setup(conn, status);
2234 	}
2235 
2236 	hci_dev_unlock(hdev);
2237 }
2238 
2239 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2240 {
2241 	struct hci_cp_disconnect *cp;
2242 	struct hci_conn *conn;
2243 
2244 	if (!status)
2245 		return;
2246 
2247 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2248 	if (!cp)
2249 		return;
2250 
2251 	hci_dev_lock(hdev);
2252 
2253 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2254 	if (conn) {
2255 		u8 type = conn->type;
2256 
2257 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2258 				       conn->dst_type, status);
2259 
2260 		/* If the disconnection failed for any reason, the upper layer
2261 		 * does not retry to disconnect in current implementation.
2262 		 * Hence, we need to do some basic cleanup here and re-enable
2263 		 * advertising if necessary.
2264 		 */
2265 		hci_conn_del(conn);
2266 		if (type == LE_LINK)
2267 			hci_req_reenable_advertising(hdev);
2268 	}
2269 
2270 	hci_dev_unlock(hdev);
2271 }
2272 
2273 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2274 			      u8 peer_addr_type, u8 own_address_type,
2275 			      u8 filter_policy)
2276 {
2277 	struct hci_conn *conn;
2278 
2279 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2280 				       peer_addr_type);
2281 	if (!conn)
2282 		return;
2283 
2284 	/* Store the initiator and responder address information which
2285 	 * is needed for SMP. These values will not change during the
2286 	 * lifetime of the connection.
2287 	 */
2288 	conn->init_addr_type = own_address_type;
2289 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2290 		bacpy(&conn->init_addr, &hdev->random_addr);
2291 	else
2292 		bacpy(&conn->init_addr, &hdev->bdaddr);
2293 
2294 	conn->resp_addr_type = peer_addr_type;
2295 	bacpy(&conn->resp_addr, peer_addr);
2296 
2297 	/* We don't want the connection attempt to stick around
2298 	 * indefinitely since LE doesn't have a page timeout concept
2299 	 * like BR/EDR. Set a timer for any connection that doesn't use
2300 	 * the white list for connecting.
2301 	 */
2302 	if (filter_policy == HCI_LE_USE_PEER_ADDR)
2303 		queue_delayed_work(conn->hdev->workqueue,
2304 				   &conn->le_conn_timeout,
2305 				   conn->conn_timeout);
2306 }
2307 
2308 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2309 {
2310 	struct hci_cp_le_create_conn *cp;
2311 
2312 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2313 
2314 	/* All connection failure handling is taken care of by the
2315 	 * hci_le_conn_failed function which is triggered by the HCI
2316 	 * request completion callbacks used for connecting.
2317 	 */
2318 	if (status)
2319 		return;
2320 
2321 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2322 	if (!cp)
2323 		return;
2324 
2325 	hci_dev_lock(hdev);
2326 
2327 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2328 			  cp->own_address_type, cp->filter_policy);
2329 
2330 	hci_dev_unlock(hdev);
2331 }
2332 
2333 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2334 {
2335 	struct hci_cp_le_ext_create_conn *cp;
2336 
2337 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2338 
2339 	/* All connection failure handling is taken care of by the
2340 	 * hci_le_conn_failed function which is triggered by the HCI
2341 	 * request completion callbacks used for connecting.
2342 	 */
2343 	if (status)
2344 		return;
2345 
2346 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2347 	if (!cp)
2348 		return;
2349 
2350 	hci_dev_lock(hdev);
2351 
2352 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2353 			  cp->own_addr_type, cp->filter_policy);
2354 
2355 	hci_dev_unlock(hdev);
2356 }
2357 
2358 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2359 {
2360 	struct hci_cp_le_read_remote_features *cp;
2361 	struct hci_conn *conn;
2362 
2363 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2364 
2365 	if (!status)
2366 		return;
2367 
2368 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2369 	if (!cp)
2370 		return;
2371 
2372 	hci_dev_lock(hdev);
2373 
2374 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2375 	if (conn) {
2376 		if (conn->state == BT_CONFIG) {
2377 			hci_connect_cfm(conn, status);
2378 			hci_conn_drop(conn);
2379 		}
2380 	}
2381 
2382 	hci_dev_unlock(hdev);
2383 }
2384 
2385 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2386 {
2387 	struct hci_cp_le_start_enc *cp;
2388 	struct hci_conn *conn;
2389 
2390 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2391 
2392 	if (!status)
2393 		return;
2394 
2395 	hci_dev_lock(hdev);
2396 
2397 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2398 	if (!cp)
2399 		goto unlock;
2400 
2401 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2402 	if (!conn)
2403 		goto unlock;
2404 
2405 	if (conn->state != BT_CONNECTED)
2406 		goto unlock;
2407 
2408 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2409 	hci_conn_drop(conn);
2410 
2411 unlock:
2412 	hci_dev_unlock(hdev);
2413 }
2414 
2415 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2416 {
2417 	struct hci_cp_switch_role *cp;
2418 	struct hci_conn *conn;
2419 
2420 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2421 
2422 	if (!status)
2423 		return;
2424 
2425 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2426 	if (!cp)
2427 		return;
2428 
2429 	hci_dev_lock(hdev);
2430 
2431 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2432 	if (conn)
2433 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2434 
2435 	hci_dev_unlock(hdev);
2436 }
2437 
2438 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2439 {
2440 	__u8 status = *((__u8 *) skb->data);
2441 	struct discovery_state *discov = &hdev->discovery;
2442 	struct inquiry_entry *e;
2443 
2444 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2445 
2446 	hci_conn_check_pending(hdev);
2447 
2448 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2449 		return;
2450 
2451 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2452 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2453 
2454 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2455 		return;
2456 
2457 	hci_dev_lock(hdev);
2458 
2459 	if (discov->state != DISCOVERY_FINDING)
2460 		goto unlock;
2461 
2462 	if (list_empty(&discov->resolve)) {
2463 		/* When BR/EDR inquiry is active and no LE scanning is in
2464 		 * progress, then change discovery state to indicate completion.
2465 		 *
2466 		 * When running LE scanning and BR/EDR inquiry simultaneously
2467 		 * and the LE scan already finished, then change the discovery
2468 		 * state to indicate completion.
2469 		 */
2470 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2471 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2472 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2473 		goto unlock;
2474 	}
2475 
2476 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2477 	if (e && hci_resolve_name(hdev, e) == 0) {
2478 		e->name_state = NAME_PENDING;
2479 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2480 	} else {
2481 		/* When BR/EDR inquiry is active and no LE scanning is in
2482 		 * progress, then change discovery state to indicate completion.
2483 		 *
2484 		 * When running LE scanning and BR/EDR inquiry simultaneously
2485 		 * and the LE scan already finished, then change the discovery
2486 		 * state to indicate completion.
2487 		 */
2488 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2489 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2490 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2491 	}
2492 
2493 unlock:
2494 	hci_dev_unlock(hdev);
2495 }
2496 
2497 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2498 {
2499 	struct inquiry_data data;
2500 	struct inquiry_info *info = (void *) (skb->data + 1);
2501 	int num_rsp = *((__u8 *) skb->data);
2502 
2503 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2504 
2505 	if (!num_rsp)
2506 		return;
2507 
2508 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2509 		return;
2510 
2511 	hci_dev_lock(hdev);
2512 
2513 	for (; num_rsp; num_rsp--, info++) {
2514 		u32 flags;
2515 
2516 		bacpy(&data.bdaddr, &info->bdaddr);
2517 		data.pscan_rep_mode	= info->pscan_rep_mode;
2518 		data.pscan_period_mode	= info->pscan_period_mode;
2519 		data.pscan_mode		= info->pscan_mode;
2520 		memcpy(data.dev_class, info->dev_class, 3);
2521 		data.clock_offset	= info->clock_offset;
2522 		data.rssi		= HCI_RSSI_INVALID;
2523 		data.ssp_mode		= 0x00;
2524 
2525 		flags = hci_inquiry_cache_update(hdev, &data, false);
2526 
2527 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2528 				  info->dev_class, HCI_RSSI_INVALID,
2529 				  flags, NULL, 0, NULL, 0);
2530 	}
2531 
2532 	hci_dev_unlock(hdev);
2533 }
2534 
2535 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2536 {
2537 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2538 	struct inquiry_entry *ie;
2539 	struct hci_conn *conn;
2540 
2541 	BT_DBG("%s", hdev->name);
2542 
2543 	hci_dev_lock(hdev);
2544 
2545 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2546 	if (!conn) {
2547 		/* Connection may not exist if auto-connected. Check the inquiry
2548 		 * cache to see if we've already discovered this bdaddr before.
2549 		 * If found and link is an ACL type, create a connection class
2550 		 * automatically.
2551 		 */
2552 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2553 		if (ie && ev->link_type == ACL_LINK) {
2554 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2555 					    HCI_ROLE_SLAVE);
2556 			if (!conn) {
2557 				bt_dev_err(hdev, "no memory for new conn");
2558 				goto unlock;
2559 			}
2560 		} else {
2561 			if (ev->link_type != SCO_LINK)
2562 				goto unlock;
2563 
2564 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2565 						       &ev->bdaddr);
2566 			if (!conn)
2567 				goto unlock;
2568 
2569 			conn->type = SCO_LINK;
2570 		}
2571 	}
2572 
2573 	if (!ev->status) {
2574 		conn->handle = __le16_to_cpu(ev->handle);
2575 
2576 		if (conn->type == ACL_LINK) {
2577 			conn->state = BT_CONFIG;
2578 			hci_conn_hold(conn);
2579 
2580 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2581 			    !hci_find_link_key(hdev, &ev->bdaddr))
2582 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2583 			else
2584 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2585 		} else
2586 			conn->state = BT_CONNECTED;
2587 
2588 		hci_debugfs_create_conn(conn);
2589 		hci_conn_add_sysfs(conn);
2590 
2591 		if (test_bit(HCI_AUTH, &hdev->flags))
2592 			set_bit(HCI_CONN_AUTH, &conn->flags);
2593 
2594 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2595 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2596 
2597 		/* Get remote features */
2598 		if (conn->type == ACL_LINK) {
2599 			struct hci_cp_read_remote_features cp;
2600 			cp.handle = ev->handle;
2601 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2602 				     sizeof(cp), &cp);
2603 
2604 			hci_req_update_scan(hdev);
2605 		}
2606 
2607 		/* Set packet type for incoming connection */
2608 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2609 			struct hci_cp_change_conn_ptype cp;
2610 			cp.handle = ev->handle;
2611 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2612 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2613 				     &cp);
2614 		}
2615 	} else {
2616 		conn->state = BT_CLOSED;
2617 		if (conn->type == ACL_LINK)
2618 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2619 					    conn->dst_type, ev->status);
2620 	}
2621 
2622 	if (conn->type == ACL_LINK)
2623 		hci_sco_setup(conn, ev->status);
2624 
2625 	if (ev->status) {
2626 		hci_connect_cfm(conn, ev->status);
2627 		hci_conn_del(conn);
2628 	} else if (ev->link_type == SCO_LINK) {
2629 		switch (conn->setting & SCO_AIRMODE_MASK) {
2630 		case SCO_AIRMODE_CVSD:
2631 			if (hdev->notify)
2632 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2633 			break;
2634 		}
2635 
2636 		hci_connect_cfm(conn, ev->status);
2637 	}
2638 
2639 unlock:
2640 	hci_dev_unlock(hdev);
2641 
2642 	hci_conn_check_pending(hdev);
2643 }
2644 
2645 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2646 {
2647 	struct hci_cp_reject_conn_req cp;
2648 
2649 	bacpy(&cp.bdaddr, bdaddr);
2650 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2651 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2652 }
2653 
2654 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2655 {
2656 	struct hci_ev_conn_request *ev = (void *) skb->data;
2657 	int mask = hdev->link_mode;
2658 	struct inquiry_entry *ie;
2659 	struct hci_conn *conn;
2660 	__u8 flags = 0;
2661 
2662 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2663 	       ev->link_type);
2664 
2665 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2666 				      &flags);
2667 
2668 	if (!(mask & HCI_LM_ACCEPT)) {
2669 		hci_reject_conn(hdev, &ev->bdaddr);
2670 		return;
2671 	}
2672 
2673 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2674 				   BDADDR_BREDR)) {
2675 		hci_reject_conn(hdev, &ev->bdaddr);
2676 		return;
2677 	}
2678 
2679 	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2680 	 * connection. These features are only touched through mgmt so
2681 	 * only do the checks if HCI_MGMT is set.
2682 	 */
2683 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2684 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2685 	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2686 				    BDADDR_BREDR)) {
2687 		    hci_reject_conn(hdev, &ev->bdaddr);
2688 		    return;
2689 	}
2690 
2691 	/* Connection accepted */
2692 
2693 	hci_dev_lock(hdev);
2694 
2695 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2696 	if (ie)
2697 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2698 
2699 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2700 			&ev->bdaddr);
2701 	if (!conn) {
2702 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2703 				    HCI_ROLE_SLAVE);
2704 		if (!conn) {
2705 			bt_dev_err(hdev, "no memory for new connection");
2706 			hci_dev_unlock(hdev);
2707 			return;
2708 		}
2709 	}
2710 
2711 	memcpy(conn->dev_class, ev->dev_class, 3);
2712 
2713 	hci_dev_unlock(hdev);
2714 
2715 	if (ev->link_type == ACL_LINK ||
2716 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2717 		struct hci_cp_accept_conn_req cp;
2718 		conn->state = BT_CONNECT;
2719 
2720 		bacpy(&cp.bdaddr, &ev->bdaddr);
2721 
2722 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2723 			cp.role = 0x00; /* Become master */
2724 		else
2725 			cp.role = 0x01; /* Remain slave */
2726 
2727 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2728 	} else if (!(flags & HCI_PROTO_DEFER)) {
2729 		struct hci_cp_accept_sync_conn_req cp;
2730 		conn->state = BT_CONNECT;
2731 
2732 		bacpy(&cp.bdaddr, &ev->bdaddr);
2733 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2734 
2735 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2736 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2737 		cp.max_latency    = cpu_to_le16(0xffff);
2738 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2739 		cp.retrans_effort = 0xff;
2740 
2741 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2742 			     &cp);
2743 	} else {
2744 		conn->state = BT_CONNECT2;
2745 		hci_connect_cfm(conn, 0);
2746 	}
2747 }
2748 
2749 static u8 hci_to_mgmt_reason(u8 err)
2750 {
2751 	switch (err) {
2752 	case HCI_ERROR_CONNECTION_TIMEOUT:
2753 		return MGMT_DEV_DISCONN_TIMEOUT;
2754 	case HCI_ERROR_REMOTE_USER_TERM:
2755 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2756 	case HCI_ERROR_REMOTE_POWER_OFF:
2757 		return MGMT_DEV_DISCONN_REMOTE;
2758 	case HCI_ERROR_LOCAL_HOST_TERM:
2759 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2760 	default:
2761 		return MGMT_DEV_DISCONN_UNKNOWN;
2762 	}
2763 }
2764 
2765 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2766 {
2767 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2768 	u8 reason;
2769 	struct hci_conn_params *params;
2770 	struct hci_conn *conn;
2771 	bool mgmt_connected;
2772 	u8 type;
2773 
2774 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2775 
2776 	hci_dev_lock(hdev);
2777 
2778 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2779 	if (!conn)
2780 		goto unlock;
2781 
2782 	if (ev->status) {
2783 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2784 				       conn->dst_type, ev->status);
2785 		goto unlock;
2786 	}
2787 
2788 	conn->state = BT_CLOSED;
2789 
2790 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2791 
2792 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2793 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2794 	else
2795 		reason = hci_to_mgmt_reason(ev->reason);
2796 
2797 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2798 				reason, mgmt_connected);
2799 
2800 	if (conn->type == ACL_LINK) {
2801 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2802 			hci_remove_link_key(hdev, &conn->dst);
2803 
2804 		hci_req_update_scan(hdev);
2805 	}
2806 
2807 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2808 	if (params) {
2809 		switch (params->auto_connect) {
2810 		case HCI_AUTO_CONN_LINK_LOSS:
2811 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2812 				break;
2813 			/* Fall through */
2814 
2815 		case HCI_AUTO_CONN_DIRECT:
2816 		case HCI_AUTO_CONN_ALWAYS:
2817 			list_del_init(&params->action);
2818 			list_add(&params->action, &hdev->pend_le_conns);
2819 			hci_update_background_scan(hdev);
2820 			break;
2821 
2822 		default:
2823 			break;
2824 		}
2825 	}
2826 
2827 	type = conn->type;
2828 
2829 	hci_disconn_cfm(conn, ev->reason);
2830 	hci_conn_del(conn);
2831 
2832 	/* The suspend notifier is waiting for all devices to disconnect so
2833 	 * clear the bit from pending tasks and inform the wait queue.
2834 	 */
2835 	if (list_empty(&hdev->conn_hash.list) &&
2836 	    test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2837 		wake_up(&hdev->suspend_wait_q);
2838 	}
2839 
2840 	/* Re-enable advertising if necessary, since it might
2841 	 * have been disabled by the connection. From the
2842 	 * HCI_LE_Set_Advertise_Enable command description in
2843 	 * the core specification (v4.0):
2844 	 * "The Controller shall continue advertising until the Host
2845 	 * issues an LE_Set_Advertise_Enable command with
2846 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2847 	 * or until a connection is created or until the Advertising
2848 	 * is timed out due to Directed Advertising."
2849 	 */
2850 	if (type == LE_LINK)
2851 		hci_req_reenable_advertising(hdev);
2852 
2853 unlock:
2854 	hci_dev_unlock(hdev);
2855 }
2856 
2857 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2858 {
2859 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2860 	struct hci_conn *conn;
2861 
2862 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2863 
2864 	hci_dev_lock(hdev);
2865 
2866 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2867 	if (!conn)
2868 		goto unlock;
2869 
2870 	if (!ev->status) {
2871 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2872 
2873 		if (!hci_conn_ssp_enabled(conn) &&
2874 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2875 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2876 		} else {
2877 			set_bit(HCI_CONN_AUTH, &conn->flags);
2878 			conn->sec_level = conn->pending_sec_level;
2879 		}
2880 	} else {
2881 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2882 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2883 
2884 		mgmt_auth_failed(conn, ev->status);
2885 	}
2886 
2887 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2888 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2889 
2890 	if (conn->state == BT_CONFIG) {
2891 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2892 			struct hci_cp_set_conn_encrypt cp;
2893 			cp.handle  = ev->handle;
2894 			cp.encrypt = 0x01;
2895 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2896 				     &cp);
2897 		} else {
2898 			conn->state = BT_CONNECTED;
2899 			hci_connect_cfm(conn, ev->status);
2900 			hci_conn_drop(conn);
2901 		}
2902 	} else {
2903 		hci_auth_cfm(conn, ev->status);
2904 
2905 		hci_conn_hold(conn);
2906 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2907 		hci_conn_drop(conn);
2908 	}
2909 
2910 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2911 		if (!ev->status) {
2912 			struct hci_cp_set_conn_encrypt cp;
2913 			cp.handle  = ev->handle;
2914 			cp.encrypt = 0x01;
2915 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2916 				     &cp);
2917 		} else {
2918 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2919 			hci_encrypt_cfm(conn, ev->status, 0x00);
2920 		}
2921 	}
2922 
2923 unlock:
2924 	hci_dev_unlock(hdev);
2925 }
2926 
2927 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2928 {
2929 	struct hci_ev_remote_name *ev = (void *) skb->data;
2930 	struct hci_conn *conn;
2931 
2932 	BT_DBG("%s", hdev->name);
2933 
2934 	hci_conn_check_pending(hdev);
2935 
2936 	hci_dev_lock(hdev);
2937 
2938 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2939 
2940 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2941 		goto check_auth;
2942 
2943 	if (ev->status == 0)
2944 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2945 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2946 	else
2947 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2948 
2949 check_auth:
2950 	if (!conn)
2951 		goto unlock;
2952 
2953 	if (!hci_outgoing_auth_needed(hdev, conn))
2954 		goto unlock;
2955 
2956 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2957 		struct hci_cp_auth_requested cp;
2958 
2959 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2960 
2961 		cp.handle = __cpu_to_le16(conn->handle);
2962 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2963 	}
2964 
2965 unlock:
2966 	hci_dev_unlock(hdev);
2967 }
2968 
2969 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2970 				       u16 opcode, struct sk_buff *skb)
2971 {
2972 	const struct hci_rp_read_enc_key_size *rp;
2973 	struct hci_conn *conn;
2974 	u16 handle;
2975 
2976 	BT_DBG("%s status 0x%02x", hdev->name, status);
2977 
2978 	if (!skb || skb->len < sizeof(*rp)) {
2979 		bt_dev_err(hdev, "invalid read key size response");
2980 		return;
2981 	}
2982 
2983 	rp = (void *)skb->data;
2984 	handle = le16_to_cpu(rp->handle);
2985 
2986 	hci_dev_lock(hdev);
2987 
2988 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2989 	if (!conn)
2990 		goto unlock;
2991 
2992 	/* While unexpected, the read_enc_key_size command may fail. The most
2993 	 * secure approach is to then assume the key size is 0 to force a
2994 	 * disconnection.
2995 	 */
2996 	if (rp->status) {
2997 		bt_dev_err(hdev, "failed to read key size for handle %u",
2998 			   handle);
2999 		conn->enc_key_size = 0;
3000 	} else {
3001 		conn->enc_key_size = rp->key_size;
3002 	}
3003 
3004 	if (conn->state == BT_CONFIG) {
3005 		conn->state = BT_CONNECTED;
3006 		hci_connect_cfm(conn, 0);
3007 		hci_conn_drop(conn);
3008 	} else {
3009 		u8 encrypt;
3010 
3011 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
3012 			encrypt = 0x00;
3013 		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
3014 			encrypt = 0x02;
3015 		else
3016 			encrypt = 0x01;
3017 
3018 		hci_encrypt_cfm(conn, 0, encrypt);
3019 	}
3020 
3021 unlock:
3022 	hci_dev_unlock(hdev);
3023 }
3024 
3025 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3026 {
3027 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
3028 	struct hci_conn *conn;
3029 
3030 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3031 
3032 	hci_dev_lock(hdev);
3033 
3034 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3035 	if (!conn)
3036 		goto unlock;
3037 
3038 	if (!ev->status) {
3039 		if (ev->encrypt) {
3040 			/* Encryption implies authentication */
3041 			set_bit(HCI_CONN_AUTH, &conn->flags);
3042 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3043 			conn->sec_level = conn->pending_sec_level;
3044 
3045 			/* P-256 authentication key implies FIPS */
3046 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3047 				set_bit(HCI_CONN_FIPS, &conn->flags);
3048 
3049 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3050 			    conn->type == LE_LINK)
3051 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3052 		} else {
3053 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3054 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3055 		}
3056 	}
3057 
3058 	/* We should disregard the current RPA and generate a new one
3059 	 * whenever the encryption procedure fails.
3060 	 */
3061 	if (ev->status && conn->type == LE_LINK) {
3062 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3063 		hci_adv_instances_set_rpa_expired(hdev, true);
3064 	}
3065 
3066 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3067 
3068 	if (ev->status && conn->state == BT_CONNECTED) {
3069 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3070 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3071 
3072 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3073 		hci_conn_drop(conn);
3074 		goto unlock;
3075 	}
3076 
3077 	/* In Secure Connections Only mode, do not allow any connections
3078 	 * that are not encrypted with AES-CCM using a P-256 authenticated
3079 	 * combination key.
3080 	 */
3081 	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
3082 	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
3083 	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
3084 		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
3085 		hci_conn_drop(conn);
3086 		goto unlock;
3087 	}
3088 
3089 	/* Try reading the encryption key size for encrypted ACL links */
3090 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3091 		struct hci_cp_read_enc_key_size cp;
3092 		struct hci_request req;
3093 
3094 		/* Only send HCI_Read_Encryption_Key_Size if the
3095 		 * controller really supports it. If it doesn't, assume
3096 		 * the default size (16).
3097 		 */
3098 		if (!(hdev->commands[20] & 0x10)) {
3099 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3100 			goto notify;
3101 		}
3102 
3103 		hci_req_init(&req, hdev);
3104 
3105 		cp.handle = cpu_to_le16(conn->handle);
3106 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3107 
3108 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3109 			bt_dev_err(hdev, "sending read key size failed");
3110 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3111 			goto notify;
3112 		}
3113 
3114 		goto unlock;
3115 	}
3116 
3117 	/* Set the default Authenticated Payload Timeout after
3118 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3119 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3120 	 * sent when the link is active and Encryption is enabled, the conn
3121 	 * type can be either LE or ACL and controller must support LMP Ping.
3122 	 * Ensure for AES-CCM encryption as well.
3123 	 */
3124 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3125 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3126 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3127 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3128 		struct hci_cp_write_auth_payload_to cp;
3129 
3130 		cp.handle = cpu_to_le16(conn->handle);
3131 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3132 		hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3133 			     sizeof(cp), &cp);
3134 	}
3135 
3136 notify:
3137 	if (conn->state == BT_CONFIG) {
3138 		if (!ev->status)
3139 			conn->state = BT_CONNECTED;
3140 
3141 		hci_connect_cfm(conn, ev->status);
3142 		hci_conn_drop(conn);
3143 	} else
3144 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
3145 
3146 unlock:
3147 	hci_dev_unlock(hdev);
3148 }
3149 
3150 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3151 					     struct sk_buff *skb)
3152 {
3153 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3154 	struct hci_conn *conn;
3155 
3156 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3157 
3158 	hci_dev_lock(hdev);
3159 
3160 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3161 	if (conn) {
3162 		if (!ev->status)
3163 			set_bit(HCI_CONN_SECURE, &conn->flags);
3164 
3165 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3166 
3167 		hci_key_change_cfm(conn, ev->status);
3168 	}
3169 
3170 	hci_dev_unlock(hdev);
3171 }
3172 
3173 static void hci_remote_features_evt(struct hci_dev *hdev,
3174 				    struct sk_buff *skb)
3175 {
3176 	struct hci_ev_remote_features *ev = (void *) skb->data;
3177 	struct hci_conn *conn;
3178 
3179 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3180 
3181 	hci_dev_lock(hdev);
3182 
3183 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3184 	if (!conn)
3185 		goto unlock;
3186 
3187 	if (!ev->status)
3188 		memcpy(conn->features[0], ev->features, 8);
3189 
3190 	if (conn->state != BT_CONFIG)
3191 		goto unlock;
3192 
3193 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3194 	    lmp_ext_feat_capable(conn)) {
3195 		struct hci_cp_read_remote_ext_features cp;
3196 		cp.handle = ev->handle;
3197 		cp.page = 0x01;
3198 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3199 			     sizeof(cp), &cp);
3200 		goto unlock;
3201 	}
3202 
3203 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3204 		struct hci_cp_remote_name_req cp;
3205 		memset(&cp, 0, sizeof(cp));
3206 		bacpy(&cp.bdaddr, &conn->dst);
3207 		cp.pscan_rep_mode = 0x02;
3208 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3209 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3210 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3211 
3212 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3213 		conn->state = BT_CONNECTED;
3214 		hci_connect_cfm(conn, ev->status);
3215 		hci_conn_drop(conn);
3216 	}
3217 
3218 unlock:
3219 	hci_dev_unlock(hdev);
3220 }
3221 
3222 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3223 				 u16 *opcode, u8 *status,
3224 				 hci_req_complete_t *req_complete,
3225 				 hci_req_complete_skb_t *req_complete_skb)
3226 {
3227 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
3228 
3229 	*opcode = __le16_to_cpu(ev->opcode);
3230 	*status = skb->data[sizeof(*ev)];
3231 
3232 	skb_pull(skb, sizeof(*ev));
3233 
3234 	switch (*opcode) {
3235 	case HCI_OP_INQUIRY_CANCEL:
3236 		hci_cc_inquiry_cancel(hdev, skb);
3237 		break;
3238 
3239 	case HCI_OP_PERIODIC_INQ:
3240 		hci_cc_periodic_inq(hdev, skb);
3241 		break;
3242 
3243 	case HCI_OP_EXIT_PERIODIC_INQ:
3244 		hci_cc_exit_periodic_inq(hdev, skb);
3245 		break;
3246 
3247 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3248 		hci_cc_remote_name_req_cancel(hdev, skb);
3249 		break;
3250 
3251 	case HCI_OP_ROLE_DISCOVERY:
3252 		hci_cc_role_discovery(hdev, skb);
3253 		break;
3254 
3255 	case HCI_OP_READ_LINK_POLICY:
3256 		hci_cc_read_link_policy(hdev, skb);
3257 		break;
3258 
3259 	case HCI_OP_WRITE_LINK_POLICY:
3260 		hci_cc_write_link_policy(hdev, skb);
3261 		break;
3262 
3263 	case HCI_OP_READ_DEF_LINK_POLICY:
3264 		hci_cc_read_def_link_policy(hdev, skb);
3265 		break;
3266 
3267 	case HCI_OP_WRITE_DEF_LINK_POLICY:
3268 		hci_cc_write_def_link_policy(hdev, skb);
3269 		break;
3270 
3271 	case HCI_OP_RESET:
3272 		hci_cc_reset(hdev, skb);
3273 		break;
3274 
3275 	case HCI_OP_READ_STORED_LINK_KEY:
3276 		hci_cc_read_stored_link_key(hdev, skb);
3277 		break;
3278 
3279 	case HCI_OP_DELETE_STORED_LINK_KEY:
3280 		hci_cc_delete_stored_link_key(hdev, skb);
3281 		break;
3282 
3283 	case HCI_OP_WRITE_LOCAL_NAME:
3284 		hci_cc_write_local_name(hdev, skb);
3285 		break;
3286 
3287 	case HCI_OP_READ_LOCAL_NAME:
3288 		hci_cc_read_local_name(hdev, skb);
3289 		break;
3290 
3291 	case HCI_OP_WRITE_AUTH_ENABLE:
3292 		hci_cc_write_auth_enable(hdev, skb);
3293 		break;
3294 
3295 	case HCI_OP_WRITE_ENCRYPT_MODE:
3296 		hci_cc_write_encrypt_mode(hdev, skb);
3297 		break;
3298 
3299 	case HCI_OP_WRITE_SCAN_ENABLE:
3300 		hci_cc_write_scan_enable(hdev, skb);
3301 		break;
3302 
3303 	case HCI_OP_READ_CLASS_OF_DEV:
3304 		hci_cc_read_class_of_dev(hdev, skb);
3305 		break;
3306 
3307 	case HCI_OP_WRITE_CLASS_OF_DEV:
3308 		hci_cc_write_class_of_dev(hdev, skb);
3309 		break;
3310 
3311 	case HCI_OP_READ_VOICE_SETTING:
3312 		hci_cc_read_voice_setting(hdev, skb);
3313 		break;
3314 
3315 	case HCI_OP_WRITE_VOICE_SETTING:
3316 		hci_cc_write_voice_setting(hdev, skb);
3317 		break;
3318 
3319 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
3320 		hci_cc_read_num_supported_iac(hdev, skb);
3321 		break;
3322 
3323 	case HCI_OP_WRITE_SSP_MODE:
3324 		hci_cc_write_ssp_mode(hdev, skb);
3325 		break;
3326 
3327 	case HCI_OP_WRITE_SC_SUPPORT:
3328 		hci_cc_write_sc_support(hdev, skb);
3329 		break;
3330 
3331 	case HCI_OP_READ_AUTH_PAYLOAD_TO:
3332 		hci_cc_read_auth_payload_timeout(hdev, skb);
3333 		break;
3334 
3335 	case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3336 		hci_cc_write_auth_payload_timeout(hdev, skb);
3337 		break;
3338 
3339 	case HCI_OP_READ_LOCAL_VERSION:
3340 		hci_cc_read_local_version(hdev, skb);
3341 		break;
3342 
3343 	case HCI_OP_READ_LOCAL_COMMANDS:
3344 		hci_cc_read_local_commands(hdev, skb);
3345 		break;
3346 
3347 	case HCI_OP_READ_LOCAL_FEATURES:
3348 		hci_cc_read_local_features(hdev, skb);
3349 		break;
3350 
3351 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
3352 		hci_cc_read_local_ext_features(hdev, skb);
3353 		break;
3354 
3355 	case HCI_OP_READ_BUFFER_SIZE:
3356 		hci_cc_read_buffer_size(hdev, skb);
3357 		break;
3358 
3359 	case HCI_OP_READ_BD_ADDR:
3360 		hci_cc_read_bd_addr(hdev, skb);
3361 		break;
3362 
3363 	case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3364 		hci_cc_read_local_pairing_opts(hdev, skb);
3365 		break;
3366 
3367 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3368 		hci_cc_read_page_scan_activity(hdev, skb);
3369 		break;
3370 
3371 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3372 		hci_cc_write_page_scan_activity(hdev, skb);
3373 		break;
3374 
3375 	case HCI_OP_READ_PAGE_SCAN_TYPE:
3376 		hci_cc_read_page_scan_type(hdev, skb);
3377 		break;
3378 
3379 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3380 		hci_cc_write_page_scan_type(hdev, skb);
3381 		break;
3382 
3383 	case HCI_OP_READ_DATA_BLOCK_SIZE:
3384 		hci_cc_read_data_block_size(hdev, skb);
3385 		break;
3386 
3387 	case HCI_OP_READ_FLOW_CONTROL_MODE:
3388 		hci_cc_read_flow_control_mode(hdev, skb);
3389 		break;
3390 
3391 	case HCI_OP_READ_LOCAL_AMP_INFO:
3392 		hci_cc_read_local_amp_info(hdev, skb);
3393 		break;
3394 
3395 	case HCI_OP_READ_CLOCK:
3396 		hci_cc_read_clock(hdev, skb);
3397 		break;
3398 
3399 	case HCI_OP_READ_INQ_RSP_TX_POWER:
3400 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
3401 		break;
3402 
3403 	case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3404 		hci_cc_read_def_err_data_reporting(hdev, skb);
3405 		break;
3406 
3407 	case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3408 		hci_cc_write_def_err_data_reporting(hdev, skb);
3409 		break;
3410 
3411 	case HCI_OP_PIN_CODE_REPLY:
3412 		hci_cc_pin_code_reply(hdev, skb);
3413 		break;
3414 
3415 	case HCI_OP_PIN_CODE_NEG_REPLY:
3416 		hci_cc_pin_code_neg_reply(hdev, skb);
3417 		break;
3418 
3419 	case HCI_OP_READ_LOCAL_OOB_DATA:
3420 		hci_cc_read_local_oob_data(hdev, skb);
3421 		break;
3422 
3423 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3424 		hci_cc_read_local_oob_ext_data(hdev, skb);
3425 		break;
3426 
3427 	case HCI_OP_LE_READ_BUFFER_SIZE:
3428 		hci_cc_le_read_buffer_size(hdev, skb);
3429 		break;
3430 
3431 	case HCI_OP_LE_READ_LOCAL_FEATURES:
3432 		hci_cc_le_read_local_features(hdev, skb);
3433 		break;
3434 
3435 	case HCI_OP_LE_READ_ADV_TX_POWER:
3436 		hci_cc_le_read_adv_tx_power(hdev, skb);
3437 		break;
3438 
3439 	case HCI_OP_USER_CONFIRM_REPLY:
3440 		hci_cc_user_confirm_reply(hdev, skb);
3441 		break;
3442 
3443 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
3444 		hci_cc_user_confirm_neg_reply(hdev, skb);
3445 		break;
3446 
3447 	case HCI_OP_USER_PASSKEY_REPLY:
3448 		hci_cc_user_passkey_reply(hdev, skb);
3449 		break;
3450 
3451 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
3452 		hci_cc_user_passkey_neg_reply(hdev, skb);
3453 		break;
3454 
3455 	case HCI_OP_LE_SET_RANDOM_ADDR:
3456 		hci_cc_le_set_random_addr(hdev, skb);
3457 		break;
3458 
3459 	case HCI_OP_LE_SET_ADV_ENABLE:
3460 		hci_cc_le_set_adv_enable(hdev, skb);
3461 		break;
3462 
3463 	case HCI_OP_LE_SET_SCAN_PARAM:
3464 		hci_cc_le_set_scan_param(hdev, skb);
3465 		break;
3466 
3467 	case HCI_OP_LE_SET_SCAN_ENABLE:
3468 		hci_cc_le_set_scan_enable(hdev, skb);
3469 		break;
3470 
3471 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3472 		hci_cc_le_read_white_list_size(hdev, skb);
3473 		break;
3474 
3475 	case HCI_OP_LE_CLEAR_WHITE_LIST:
3476 		hci_cc_le_clear_white_list(hdev, skb);
3477 		break;
3478 
3479 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
3480 		hci_cc_le_add_to_white_list(hdev, skb);
3481 		break;
3482 
3483 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3484 		hci_cc_le_del_from_white_list(hdev, skb);
3485 		break;
3486 
3487 	case HCI_OP_LE_READ_SUPPORTED_STATES:
3488 		hci_cc_le_read_supported_states(hdev, skb);
3489 		break;
3490 
3491 	case HCI_OP_LE_READ_DEF_DATA_LEN:
3492 		hci_cc_le_read_def_data_len(hdev, skb);
3493 		break;
3494 
3495 	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3496 		hci_cc_le_write_def_data_len(hdev, skb);
3497 		break;
3498 
3499 	case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3500 		hci_cc_le_add_to_resolv_list(hdev, skb);
3501 		break;
3502 
3503 	case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3504 		hci_cc_le_del_from_resolv_list(hdev, skb);
3505 		break;
3506 
3507 	case HCI_OP_LE_CLEAR_RESOLV_LIST:
3508 		hci_cc_le_clear_resolv_list(hdev, skb);
3509 		break;
3510 
3511 	case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3512 		hci_cc_le_read_resolv_list_size(hdev, skb);
3513 		break;
3514 
3515 	case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3516 		hci_cc_le_set_addr_resolution_enable(hdev, skb);
3517 		break;
3518 
3519 	case HCI_OP_LE_READ_MAX_DATA_LEN:
3520 		hci_cc_le_read_max_data_len(hdev, skb);
3521 		break;
3522 
3523 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3524 		hci_cc_write_le_host_supported(hdev, skb);
3525 		break;
3526 
3527 	case HCI_OP_LE_SET_ADV_PARAM:
3528 		hci_cc_set_adv_param(hdev, skb);
3529 		break;
3530 
3531 	case HCI_OP_READ_RSSI:
3532 		hci_cc_read_rssi(hdev, skb);
3533 		break;
3534 
3535 	case HCI_OP_READ_TX_POWER:
3536 		hci_cc_read_tx_power(hdev, skb);
3537 		break;
3538 
3539 	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3540 		hci_cc_write_ssp_debug_mode(hdev, skb);
3541 		break;
3542 
3543 	case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3544 		hci_cc_le_set_ext_scan_param(hdev, skb);
3545 		break;
3546 
3547 	case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3548 		hci_cc_le_set_ext_scan_enable(hdev, skb);
3549 		break;
3550 
3551 	case HCI_OP_LE_SET_DEFAULT_PHY:
3552 		hci_cc_le_set_default_phy(hdev, skb);
3553 		break;
3554 
3555 	case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3556 		hci_cc_le_read_num_adv_sets(hdev, skb);
3557 		break;
3558 
3559 	case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3560 		hci_cc_set_ext_adv_param(hdev, skb);
3561 		break;
3562 
3563 	case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3564 		hci_cc_le_set_ext_adv_enable(hdev, skb);
3565 		break;
3566 
3567 	case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3568 		hci_cc_le_set_adv_set_random_addr(hdev, skb);
3569 		break;
3570 
3571 	default:
3572 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3573 		break;
3574 	}
3575 
3576 	if (*opcode != HCI_OP_NOP)
3577 		cancel_delayed_work(&hdev->cmd_timer);
3578 
3579 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3580 		atomic_set(&hdev->cmd_cnt, 1);
3581 
3582 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3583 			     req_complete_skb);
3584 
3585 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3586 		bt_dev_err(hdev,
3587 			   "unexpected event for opcode 0x%4.4x", *opcode);
3588 		return;
3589 	}
3590 
3591 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3592 		queue_work(hdev->workqueue, &hdev->cmd_work);
3593 }
3594 
3595 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3596 			       u16 *opcode, u8 *status,
3597 			       hci_req_complete_t *req_complete,
3598 			       hci_req_complete_skb_t *req_complete_skb)
3599 {
3600 	struct hci_ev_cmd_status *ev = (void *) skb->data;
3601 
3602 	skb_pull(skb, sizeof(*ev));
3603 
3604 	*opcode = __le16_to_cpu(ev->opcode);
3605 	*status = ev->status;
3606 
3607 	switch (*opcode) {
3608 	case HCI_OP_INQUIRY:
3609 		hci_cs_inquiry(hdev, ev->status);
3610 		break;
3611 
3612 	case HCI_OP_CREATE_CONN:
3613 		hci_cs_create_conn(hdev, ev->status);
3614 		break;
3615 
3616 	case HCI_OP_DISCONNECT:
3617 		hci_cs_disconnect(hdev, ev->status);
3618 		break;
3619 
3620 	case HCI_OP_ADD_SCO:
3621 		hci_cs_add_sco(hdev, ev->status);
3622 		break;
3623 
3624 	case HCI_OP_AUTH_REQUESTED:
3625 		hci_cs_auth_requested(hdev, ev->status);
3626 		break;
3627 
3628 	case HCI_OP_SET_CONN_ENCRYPT:
3629 		hci_cs_set_conn_encrypt(hdev, ev->status);
3630 		break;
3631 
3632 	case HCI_OP_REMOTE_NAME_REQ:
3633 		hci_cs_remote_name_req(hdev, ev->status);
3634 		break;
3635 
3636 	case HCI_OP_READ_REMOTE_FEATURES:
3637 		hci_cs_read_remote_features(hdev, ev->status);
3638 		break;
3639 
3640 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3641 		hci_cs_read_remote_ext_features(hdev, ev->status);
3642 		break;
3643 
3644 	case HCI_OP_SETUP_SYNC_CONN:
3645 		hci_cs_setup_sync_conn(hdev, ev->status);
3646 		break;
3647 
3648 	case HCI_OP_SNIFF_MODE:
3649 		hci_cs_sniff_mode(hdev, ev->status);
3650 		break;
3651 
3652 	case HCI_OP_EXIT_SNIFF_MODE:
3653 		hci_cs_exit_sniff_mode(hdev, ev->status);
3654 		break;
3655 
3656 	case HCI_OP_SWITCH_ROLE:
3657 		hci_cs_switch_role(hdev, ev->status);
3658 		break;
3659 
3660 	case HCI_OP_LE_CREATE_CONN:
3661 		hci_cs_le_create_conn(hdev, ev->status);
3662 		break;
3663 
3664 	case HCI_OP_LE_READ_REMOTE_FEATURES:
3665 		hci_cs_le_read_remote_features(hdev, ev->status);
3666 		break;
3667 
3668 	case HCI_OP_LE_START_ENC:
3669 		hci_cs_le_start_enc(hdev, ev->status);
3670 		break;
3671 
3672 	case HCI_OP_LE_EXT_CREATE_CONN:
3673 		hci_cs_le_ext_create_conn(hdev, ev->status);
3674 		break;
3675 
3676 	default:
3677 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3678 		break;
3679 	}
3680 
3681 	if (*opcode != HCI_OP_NOP)
3682 		cancel_delayed_work(&hdev->cmd_timer);
3683 
3684 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3685 		atomic_set(&hdev->cmd_cnt, 1);
3686 
3687 	/* Indicate request completion if the command failed. Also, if
3688 	 * we're not waiting for a special event and we get a success
3689 	 * command status we should try to flag the request as completed
3690 	 * (since for this kind of commands there will not be a command
3691 	 * complete event).
3692 	 */
3693 	if (ev->status ||
3694 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3695 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3696 				     req_complete_skb);
3697 
3698 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3699 		bt_dev_err(hdev,
3700 			   "unexpected event for opcode 0x%4.4x", *opcode);
3701 		return;
3702 	}
3703 
3704 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3705 		queue_work(hdev->workqueue, &hdev->cmd_work);
3706 }
3707 
3708 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3709 {
3710 	struct hci_ev_hardware_error *ev = (void *) skb->data;
3711 
3712 	hdev->hw_error_code = ev->code;
3713 
3714 	queue_work(hdev->req_workqueue, &hdev->error_reset);
3715 }
3716 
3717 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3718 {
3719 	struct hci_ev_role_change *ev = (void *) skb->data;
3720 	struct hci_conn *conn;
3721 
3722 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3723 
3724 	hci_dev_lock(hdev);
3725 
3726 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3727 	if (conn) {
3728 		if (!ev->status)
3729 			conn->role = ev->role;
3730 
3731 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3732 
3733 		hci_role_switch_cfm(conn, ev->status, ev->role);
3734 	}
3735 
3736 	hci_dev_unlock(hdev);
3737 }
3738 
3739 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3740 {
3741 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3742 	int i;
3743 
3744 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3745 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3746 		return;
3747 	}
3748 
3749 	if (skb->len < sizeof(*ev) ||
3750 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3751 		BT_DBG("%s bad parameters", hdev->name);
3752 		return;
3753 	}
3754 
3755 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3756 
3757 	for (i = 0; i < ev->num_hndl; i++) {
3758 		struct hci_comp_pkts_info *info = &ev->handles[i];
3759 		struct hci_conn *conn;
3760 		__u16  handle, count;
3761 
3762 		handle = __le16_to_cpu(info->handle);
3763 		count  = __le16_to_cpu(info->count);
3764 
3765 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3766 		if (!conn)
3767 			continue;
3768 
3769 		conn->sent -= count;
3770 
3771 		switch (conn->type) {
3772 		case ACL_LINK:
3773 			hdev->acl_cnt += count;
3774 			if (hdev->acl_cnt > hdev->acl_pkts)
3775 				hdev->acl_cnt = hdev->acl_pkts;
3776 			break;
3777 
3778 		case LE_LINK:
3779 			if (hdev->le_pkts) {
3780 				hdev->le_cnt += count;
3781 				if (hdev->le_cnt > hdev->le_pkts)
3782 					hdev->le_cnt = hdev->le_pkts;
3783 			} else {
3784 				hdev->acl_cnt += count;
3785 				if (hdev->acl_cnt > hdev->acl_pkts)
3786 					hdev->acl_cnt = hdev->acl_pkts;
3787 			}
3788 			break;
3789 
3790 		case SCO_LINK:
3791 			hdev->sco_cnt += count;
3792 			if (hdev->sco_cnt > hdev->sco_pkts)
3793 				hdev->sco_cnt = hdev->sco_pkts;
3794 			break;
3795 
3796 		default:
3797 			bt_dev_err(hdev, "unknown type %d conn %p",
3798 				   conn->type, conn);
3799 			break;
3800 		}
3801 	}
3802 
3803 	queue_work(hdev->workqueue, &hdev->tx_work);
3804 }
3805 
3806 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3807 						 __u16 handle)
3808 {
3809 	struct hci_chan *chan;
3810 
3811 	switch (hdev->dev_type) {
3812 	case HCI_PRIMARY:
3813 		return hci_conn_hash_lookup_handle(hdev, handle);
3814 	case HCI_AMP:
3815 		chan = hci_chan_lookup_handle(hdev, handle);
3816 		if (chan)
3817 			return chan->conn;
3818 		break;
3819 	default:
3820 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3821 		break;
3822 	}
3823 
3824 	return NULL;
3825 }
3826 
3827 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3828 {
3829 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3830 	int i;
3831 
3832 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3833 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3834 		return;
3835 	}
3836 
3837 	if (skb->len < sizeof(*ev) ||
3838 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3839 		BT_DBG("%s bad parameters", hdev->name);
3840 		return;
3841 	}
3842 
3843 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3844 	       ev->num_hndl);
3845 
3846 	for (i = 0; i < ev->num_hndl; i++) {
3847 		struct hci_comp_blocks_info *info = &ev->handles[i];
3848 		struct hci_conn *conn = NULL;
3849 		__u16  handle, block_count;
3850 
3851 		handle = __le16_to_cpu(info->handle);
3852 		block_count = __le16_to_cpu(info->blocks);
3853 
3854 		conn = __hci_conn_lookup_handle(hdev, handle);
3855 		if (!conn)
3856 			continue;
3857 
3858 		conn->sent -= block_count;
3859 
3860 		switch (conn->type) {
3861 		case ACL_LINK:
3862 		case AMP_LINK:
3863 			hdev->block_cnt += block_count;
3864 			if (hdev->block_cnt > hdev->num_blocks)
3865 				hdev->block_cnt = hdev->num_blocks;
3866 			break;
3867 
3868 		default:
3869 			bt_dev_err(hdev, "unknown type %d conn %p",
3870 				   conn->type, conn);
3871 			break;
3872 		}
3873 	}
3874 
3875 	queue_work(hdev->workqueue, &hdev->tx_work);
3876 }
3877 
3878 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3879 {
3880 	struct hci_ev_mode_change *ev = (void *) skb->data;
3881 	struct hci_conn *conn;
3882 
3883 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3884 
3885 	hci_dev_lock(hdev);
3886 
3887 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3888 	if (conn) {
3889 		conn->mode = ev->mode;
3890 
3891 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3892 					&conn->flags)) {
3893 			if (conn->mode == HCI_CM_ACTIVE)
3894 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3895 			else
3896 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3897 		}
3898 
3899 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3900 			hci_sco_setup(conn, ev->status);
3901 	}
3902 
3903 	hci_dev_unlock(hdev);
3904 }
3905 
3906 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3907 {
3908 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3909 	struct hci_conn *conn;
3910 
3911 	BT_DBG("%s", hdev->name);
3912 
3913 	hci_dev_lock(hdev);
3914 
3915 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3916 	if (!conn)
3917 		goto unlock;
3918 
3919 	if (conn->state == BT_CONNECTED) {
3920 		hci_conn_hold(conn);
3921 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3922 		hci_conn_drop(conn);
3923 	}
3924 
3925 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3926 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3927 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3928 			     sizeof(ev->bdaddr), &ev->bdaddr);
3929 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3930 		u8 secure;
3931 
3932 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3933 			secure = 1;
3934 		else
3935 			secure = 0;
3936 
3937 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3938 	}
3939 
3940 unlock:
3941 	hci_dev_unlock(hdev);
3942 }
3943 
3944 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3945 {
3946 	if (key_type == HCI_LK_CHANGED_COMBINATION)
3947 		return;
3948 
3949 	conn->pin_length = pin_len;
3950 	conn->key_type = key_type;
3951 
3952 	switch (key_type) {
3953 	case HCI_LK_LOCAL_UNIT:
3954 	case HCI_LK_REMOTE_UNIT:
3955 	case HCI_LK_DEBUG_COMBINATION:
3956 		return;
3957 	case HCI_LK_COMBINATION:
3958 		if (pin_len == 16)
3959 			conn->pending_sec_level = BT_SECURITY_HIGH;
3960 		else
3961 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3962 		break;
3963 	case HCI_LK_UNAUTH_COMBINATION_P192:
3964 	case HCI_LK_UNAUTH_COMBINATION_P256:
3965 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3966 		break;
3967 	case HCI_LK_AUTH_COMBINATION_P192:
3968 		conn->pending_sec_level = BT_SECURITY_HIGH;
3969 		break;
3970 	case HCI_LK_AUTH_COMBINATION_P256:
3971 		conn->pending_sec_level = BT_SECURITY_FIPS;
3972 		break;
3973 	}
3974 }
3975 
3976 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3977 {
3978 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3979 	struct hci_cp_link_key_reply cp;
3980 	struct hci_conn *conn;
3981 	struct link_key *key;
3982 
3983 	BT_DBG("%s", hdev->name);
3984 
3985 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3986 		return;
3987 
3988 	hci_dev_lock(hdev);
3989 
3990 	key = hci_find_link_key(hdev, &ev->bdaddr);
3991 	if (!key) {
3992 		BT_DBG("%s link key not found for %pMR", hdev->name,
3993 		       &ev->bdaddr);
3994 		goto not_found;
3995 	}
3996 
3997 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3998 	       &ev->bdaddr);
3999 
4000 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4001 	if (conn) {
4002 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4003 
4004 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4005 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4006 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4007 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
4008 			goto not_found;
4009 		}
4010 
4011 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4012 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4013 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4014 			BT_DBG("%s ignoring key unauthenticated for high security",
4015 			       hdev->name);
4016 			goto not_found;
4017 		}
4018 
4019 		conn_set_key(conn, key->type, key->pin_len);
4020 	}
4021 
4022 	bacpy(&cp.bdaddr, &ev->bdaddr);
4023 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4024 
4025 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4026 
4027 	hci_dev_unlock(hdev);
4028 
4029 	return;
4030 
4031 not_found:
4032 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4033 	hci_dev_unlock(hdev);
4034 }
4035 
4036 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4037 {
4038 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
4039 	struct hci_conn *conn;
4040 	struct link_key *key;
4041 	bool persistent;
4042 	u8 pin_len = 0;
4043 
4044 	BT_DBG("%s", hdev->name);
4045 
4046 	hci_dev_lock(hdev);
4047 
4048 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4049 	if (!conn)
4050 		goto unlock;
4051 
4052 	hci_conn_hold(conn);
4053 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4054 	hci_conn_drop(conn);
4055 
4056 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4057 	conn_set_key(conn, ev->key_type, conn->pin_length);
4058 
4059 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4060 		goto unlock;
4061 
4062 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4063 			        ev->key_type, pin_len, &persistent);
4064 	if (!key)
4065 		goto unlock;
4066 
4067 	/* Update connection information since adding the key will have
4068 	 * fixed up the type in the case of changed combination keys.
4069 	 */
4070 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4071 		conn_set_key(conn, key->type, key->pin_len);
4072 
4073 	mgmt_new_link_key(hdev, key, persistent);
4074 
4075 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4076 	 * is set. If it's not set simply remove the key from the kernel
4077 	 * list (we've still notified user space about it but with
4078 	 * store_hint being 0).
4079 	 */
4080 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4081 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4082 		list_del_rcu(&key->list);
4083 		kfree_rcu(key, rcu);
4084 		goto unlock;
4085 	}
4086 
4087 	if (persistent)
4088 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4089 	else
4090 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4091 
4092 unlock:
4093 	hci_dev_unlock(hdev);
4094 }
4095 
4096 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4097 {
4098 	struct hci_ev_clock_offset *ev = (void *) skb->data;
4099 	struct hci_conn *conn;
4100 
4101 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4102 
4103 	hci_dev_lock(hdev);
4104 
4105 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4106 	if (conn && !ev->status) {
4107 		struct inquiry_entry *ie;
4108 
4109 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4110 		if (ie) {
4111 			ie->data.clock_offset = ev->clock_offset;
4112 			ie->timestamp = jiffies;
4113 		}
4114 	}
4115 
4116 	hci_dev_unlock(hdev);
4117 }
4118 
4119 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4120 {
4121 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4122 	struct hci_conn *conn;
4123 
4124 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4125 
4126 	hci_dev_lock(hdev);
4127 
4128 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4129 	if (conn && !ev->status)
4130 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4131 
4132 	hci_dev_unlock(hdev);
4133 }
4134 
4135 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4136 {
4137 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4138 	struct inquiry_entry *ie;
4139 
4140 	BT_DBG("%s", hdev->name);
4141 
4142 	hci_dev_lock(hdev);
4143 
4144 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4145 	if (ie) {
4146 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4147 		ie->timestamp = jiffies;
4148 	}
4149 
4150 	hci_dev_unlock(hdev);
4151 }
4152 
4153 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4154 					     struct sk_buff *skb)
4155 {
4156 	struct inquiry_data data;
4157 	int num_rsp = *((__u8 *) skb->data);
4158 
4159 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4160 
4161 	if (!num_rsp)
4162 		return;
4163 
4164 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4165 		return;
4166 
4167 	hci_dev_lock(hdev);
4168 
4169 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4170 		struct inquiry_info_with_rssi_and_pscan_mode *info;
4171 		info = (void *) (skb->data + 1);
4172 
4173 		for (; num_rsp; num_rsp--, info++) {
4174 			u32 flags;
4175 
4176 			bacpy(&data.bdaddr, &info->bdaddr);
4177 			data.pscan_rep_mode	= info->pscan_rep_mode;
4178 			data.pscan_period_mode	= info->pscan_period_mode;
4179 			data.pscan_mode		= info->pscan_mode;
4180 			memcpy(data.dev_class, info->dev_class, 3);
4181 			data.clock_offset	= info->clock_offset;
4182 			data.rssi		= info->rssi;
4183 			data.ssp_mode		= 0x00;
4184 
4185 			flags = hci_inquiry_cache_update(hdev, &data, false);
4186 
4187 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4188 					  info->dev_class, info->rssi,
4189 					  flags, NULL, 0, NULL, 0);
4190 		}
4191 	} else {
4192 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4193 
4194 		for (; num_rsp; num_rsp--, info++) {
4195 			u32 flags;
4196 
4197 			bacpy(&data.bdaddr, &info->bdaddr);
4198 			data.pscan_rep_mode	= info->pscan_rep_mode;
4199 			data.pscan_period_mode	= info->pscan_period_mode;
4200 			data.pscan_mode		= 0x00;
4201 			memcpy(data.dev_class, info->dev_class, 3);
4202 			data.clock_offset	= info->clock_offset;
4203 			data.rssi		= info->rssi;
4204 			data.ssp_mode		= 0x00;
4205 
4206 			flags = hci_inquiry_cache_update(hdev, &data, false);
4207 
4208 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4209 					  info->dev_class, info->rssi,
4210 					  flags, NULL, 0, NULL, 0);
4211 		}
4212 	}
4213 
4214 	hci_dev_unlock(hdev);
4215 }
4216 
4217 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4218 					struct sk_buff *skb)
4219 {
4220 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4221 	struct hci_conn *conn;
4222 
4223 	BT_DBG("%s", hdev->name);
4224 
4225 	hci_dev_lock(hdev);
4226 
4227 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4228 	if (!conn)
4229 		goto unlock;
4230 
4231 	if (ev->page < HCI_MAX_PAGES)
4232 		memcpy(conn->features[ev->page], ev->features, 8);
4233 
4234 	if (!ev->status && ev->page == 0x01) {
4235 		struct inquiry_entry *ie;
4236 
4237 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4238 		if (ie)
4239 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4240 
4241 		if (ev->features[0] & LMP_HOST_SSP) {
4242 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4243 		} else {
4244 			/* It is mandatory by the Bluetooth specification that
4245 			 * Extended Inquiry Results are only used when Secure
4246 			 * Simple Pairing is enabled, but some devices violate
4247 			 * this.
4248 			 *
4249 			 * To make these devices work, the internal SSP
4250 			 * enabled flag needs to be cleared if the remote host
4251 			 * features do not indicate SSP support */
4252 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4253 		}
4254 
4255 		if (ev->features[0] & LMP_HOST_SC)
4256 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4257 	}
4258 
4259 	if (conn->state != BT_CONFIG)
4260 		goto unlock;
4261 
4262 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4263 		struct hci_cp_remote_name_req cp;
4264 		memset(&cp, 0, sizeof(cp));
4265 		bacpy(&cp.bdaddr, &conn->dst);
4266 		cp.pscan_rep_mode = 0x02;
4267 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4268 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4269 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4270 
4271 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4272 		conn->state = BT_CONNECTED;
4273 		hci_connect_cfm(conn, ev->status);
4274 		hci_conn_drop(conn);
4275 	}
4276 
4277 unlock:
4278 	hci_dev_unlock(hdev);
4279 }
4280 
4281 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4282 				       struct sk_buff *skb)
4283 {
4284 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4285 	struct hci_conn *conn;
4286 
4287 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4288 
4289 	hci_dev_lock(hdev);
4290 
4291 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4292 	if (!conn) {
4293 		if (ev->link_type == ESCO_LINK)
4294 			goto unlock;
4295 
4296 		/* When the link type in the event indicates SCO connection
4297 		 * and lookup of the connection object fails, then check
4298 		 * if an eSCO connection object exists.
4299 		 *
4300 		 * The core limits the synchronous connections to either
4301 		 * SCO or eSCO. The eSCO connection is preferred and tried
4302 		 * to be setup first and until successfully established,
4303 		 * the link type will be hinted as eSCO.
4304 		 */
4305 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4306 		if (!conn)
4307 			goto unlock;
4308 	}
4309 
4310 	switch (ev->status) {
4311 	case 0x00:
4312 		conn->handle = __le16_to_cpu(ev->handle);
4313 		conn->state  = BT_CONNECTED;
4314 		conn->type   = ev->link_type;
4315 
4316 		hci_debugfs_create_conn(conn);
4317 		hci_conn_add_sysfs(conn);
4318 		break;
4319 
4320 	case 0x10:	/* Connection Accept Timeout */
4321 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4322 	case 0x11:	/* Unsupported Feature or Parameter Value */
4323 	case 0x1c:	/* SCO interval rejected */
4324 	case 0x1a:	/* Unsupported Remote Feature */
4325 	case 0x1f:	/* Unspecified error */
4326 	case 0x20:	/* Unsupported LMP Parameter value */
4327 		if (conn->out) {
4328 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4329 					(hdev->esco_type & EDR_ESCO_MASK);
4330 			if (hci_setup_sync(conn, conn->link->handle))
4331 				goto unlock;
4332 		}
4333 		/* fall through */
4334 
4335 	default:
4336 		conn->state = BT_CLOSED;
4337 		break;
4338 	}
4339 
4340 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4341 
4342 	switch (conn->setting & SCO_AIRMODE_MASK) {
4343 	case SCO_AIRMODE_CVSD:
4344 		if (hdev->notify)
4345 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4346 		break;
4347 	case SCO_AIRMODE_TRANSP:
4348 		if (hdev->notify)
4349 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4350 		break;
4351 	}
4352 
4353 	hci_connect_cfm(conn, ev->status);
4354 	if (ev->status)
4355 		hci_conn_del(conn);
4356 
4357 unlock:
4358 	hci_dev_unlock(hdev);
4359 }
4360 
4361 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4362 {
4363 	size_t parsed = 0;
4364 
4365 	while (parsed < eir_len) {
4366 		u8 field_len = eir[0];
4367 
4368 		if (field_len == 0)
4369 			return parsed;
4370 
4371 		parsed += field_len + 1;
4372 		eir += field_len + 1;
4373 	}
4374 
4375 	return eir_len;
4376 }
4377 
4378 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4379 					    struct sk_buff *skb)
4380 {
4381 	struct inquiry_data data;
4382 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
4383 	int num_rsp = *((__u8 *) skb->data);
4384 	size_t eir_len;
4385 
4386 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4387 
4388 	if (!num_rsp)
4389 		return;
4390 
4391 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4392 		return;
4393 
4394 	hci_dev_lock(hdev);
4395 
4396 	for (; num_rsp; num_rsp--, info++) {
4397 		u32 flags;
4398 		bool name_known;
4399 
4400 		bacpy(&data.bdaddr, &info->bdaddr);
4401 		data.pscan_rep_mode	= info->pscan_rep_mode;
4402 		data.pscan_period_mode	= info->pscan_period_mode;
4403 		data.pscan_mode		= 0x00;
4404 		memcpy(data.dev_class, info->dev_class, 3);
4405 		data.clock_offset	= info->clock_offset;
4406 		data.rssi		= info->rssi;
4407 		data.ssp_mode		= 0x01;
4408 
4409 		if (hci_dev_test_flag(hdev, HCI_MGMT))
4410 			name_known = eir_get_data(info->data,
4411 						  sizeof(info->data),
4412 						  EIR_NAME_COMPLETE, NULL);
4413 		else
4414 			name_known = true;
4415 
4416 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
4417 
4418 		eir_len = eir_get_length(info->data, sizeof(info->data));
4419 
4420 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4421 				  info->dev_class, info->rssi,
4422 				  flags, info->data, eir_len, NULL, 0);
4423 	}
4424 
4425 	hci_dev_unlock(hdev);
4426 }
4427 
4428 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4429 					 struct sk_buff *skb)
4430 {
4431 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4432 	struct hci_conn *conn;
4433 
4434 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4435 	       __le16_to_cpu(ev->handle));
4436 
4437 	hci_dev_lock(hdev);
4438 
4439 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4440 	if (!conn)
4441 		goto unlock;
4442 
4443 	/* For BR/EDR the necessary steps are taken through the
4444 	 * auth_complete event.
4445 	 */
4446 	if (conn->type != LE_LINK)
4447 		goto unlock;
4448 
4449 	if (!ev->status)
4450 		conn->sec_level = conn->pending_sec_level;
4451 
4452 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4453 
4454 	if (ev->status && conn->state == BT_CONNECTED) {
4455 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4456 		hci_conn_drop(conn);
4457 		goto unlock;
4458 	}
4459 
4460 	if (conn->state == BT_CONFIG) {
4461 		if (!ev->status)
4462 			conn->state = BT_CONNECTED;
4463 
4464 		hci_connect_cfm(conn, ev->status);
4465 		hci_conn_drop(conn);
4466 	} else {
4467 		hci_auth_cfm(conn, ev->status);
4468 
4469 		hci_conn_hold(conn);
4470 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4471 		hci_conn_drop(conn);
4472 	}
4473 
4474 unlock:
4475 	hci_dev_unlock(hdev);
4476 }
4477 
4478 static u8 hci_get_auth_req(struct hci_conn *conn)
4479 {
4480 	/* If remote requests no-bonding follow that lead */
4481 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
4482 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4483 		return conn->remote_auth | (conn->auth_type & 0x01);
4484 
4485 	/* If both remote and local have enough IO capabilities, require
4486 	 * MITM protection
4487 	 */
4488 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4489 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4490 		return conn->remote_auth | 0x01;
4491 
4492 	/* No MITM protection possible so ignore remote requirement */
4493 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4494 }
4495 
4496 static u8 bredr_oob_data_present(struct hci_conn *conn)
4497 {
4498 	struct hci_dev *hdev = conn->hdev;
4499 	struct oob_data *data;
4500 
4501 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4502 	if (!data)
4503 		return 0x00;
4504 
4505 	if (bredr_sc_enabled(hdev)) {
4506 		/* When Secure Connections is enabled, then just
4507 		 * return the present value stored with the OOB
4508 		 * data. The stored value contains the right present
4509 		 * information. However it can only be trusted when
4510 		 * not in Secure Connection Only mode.
4511 		 */
4512 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4513 			return data->present;
4514 
4515 		/* When Secure Connections Only mode is enabled, then
4516 		 * the P-256 values are required. If they are not
4517 		 * available, then do not declare that OOB data is
4518 		 * present.
4519 		 */
4520 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4521 		    !memcmp(data->hash256, ZERO_KEY, 16))
4522 			return 0x00;
4523 
4524 		return 0x02;
4525 	}
4526 
4527 	/* When Secure Connections is not enabled or actually
4528 	 * not supported by the hardware, then check that if
4529 	 * P-192 data values are present.
4530 	 */
4531 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4532 	    !memcmp(data->hash192, ZERO_KEY, 16))
4533 		return 0x00;
4534 
4535 	return 0x01;
4536 }
4537 
4538 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4539 {
4540 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
4541 	struct hci_conn *conn;
4542 
4543 	BT_DBG("%s", hdev->name);
4544 
4545 	hci_dev_lock(hdev);
4546 
4547 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4548 	if (!conn)
4549 		goto unlock;
4550 
4551 	hci_conn_hold(conn);
4552 
4553 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4554 		goto unlock;
4555 
4556 	/* Allow pairing if we're pairable, the initiators of the
4557 	 * pairing or if the remote is not requesting bonding.
4558 	 */
4559 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4560 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4561 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4562 		struct hci_cp_io_capability_reply cp;
4563 
4564 		bacpy(&cp.bdaddr, &ev->bdaddr);
4565 		/* Change the IO capability from KeyboardDisplay
4566 		 * to DisplayYesNo as it is not supported by BT spec. */
4567 		cp.capability = (conn->io_capability == 0x04) ?
4568 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4569 
4570 		/* If we are initiators, there is no remote information yet */
4571 		if (conn->remote_auth == 0xff) {
4572 			/* Request MITM protection if our IO caps allow it
4573 			 * except for the no-bonding case.
4574 			 */
4575 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4576 			    conn->auth_type != HCI_AT_NO_BONDING)
4577 				conn->auth_type |= 0x01;
4578 		} else {
4579 			conn->auth_type = hci_get_auth_req(conn);
4580 		}
4581 
4582 		/* If we're not bondable, force one of the non-bondable
4583 		 * authentication requirement values.
4584 		 */
4585 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4586 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4587 
4588 		cp.authentication = conn->auth_type;
4589 		cp.oob_data = bredr_oob_data_present(conn);
4590 
4591 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4592 			     sizeof(cp), &cp);
4593 	} else {
4594 		struct hci_cp_io_capability_neg_reply cp;
4595 
4596 		bacpy(&cp.bdaddr, &ev->bdaddr);
4597 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4598 
4599 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4600 			     sizeof(cp), &cp);
4601 	}
4602 
4603 unlock:
4604 	hci_dev_unlock(hdev);
4605 }
4606 
4607 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4608 {
4609 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4610 	struct hci_conn *conn;
4611 
4612 	BT_DBG("%s", hdev->name);
4613 
4614 	hci_dev_lock(hdev);
4615 
4616 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4617 	if (!conn)
4618 		goto unlock;
4619 
4620 	conn->remote_cap = ev->capability;
4621 	conn->remote_auth = ev->authentication;
4622 
4623 unlock:
4624 	hci_dev_unlock(hdev);
4625 }
4626 
4627 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4628 					 struct sk_buff *skb)
4629 {
4630 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4631 	int loc_mitm, rem_mitm, confirm_hint = 0;
4632 	struct hci_conn *conn;
4633 
4634 	BT_DBG("%s", hdev->name);
4635 
4636 	hci_dev_lock(hdev);
4637 
4638 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4639 		goto unlock;
4640 
4641 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4642 	if (!conn)
4643 		goto unlock;
4644 
4645 	loc_mitm = (conn->auth_type & 0x01);
4646 	rem_mitm = (conn->remote_auth & 0x01);
4647 
4648 	/* If we require MITM but the remote device can't provide that
4649 	 * (it has NoInputNoOutput) then reject the confirmation
4650 	 * request. We check the security level here since it doesn't
4651 	 * necessarily match conn->auth_type.
4652 	 */
4653 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4654 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4655 		BT_DBG("Rejecting request: remote device can't provide MITM");
4656 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4657 			     sizeof(ev->bdaddr), &ev->bdaddr);
4658 		goto unlock;
4659 	}
4660 
4661 	/* If no side requires MITM protection; auto-accept */
4662 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4663 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4664 
4665 		/* If we're not the initiators request authorization to
4666 		 * proceed from user space (mgmt_user_confirm with
4667 		 * confirm_hint set to 1). The exception is if neither
4668 		 * side had MITM or if the local IO capability is
4669 		 * NoInputNoOutput, in which case we do auto-accept
4670 		 */
4671 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4672 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4673 		    (loc_mitm || rem_mitm)) {
4674 			BT_DBG("Confirming auto-accept as acceptor");
4675 			confirm_hint = 1;
4676 			goto confirm;
4677 		}
4678 
4679 		/* If there already exists link key in local host, leave the
4680 		 * decision to user space since the remote device could be
4681 		 * legitimate or malicious.
4682 		 */
4683 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
4684 			bt_dev_dbg(hdev, "Local host already has link key");
4685 			confirm_hint = 1;
4686 			goto confirm;
4687 		}
4688 
4689 		BT_DBG("Auto-accept of user confirmation with %ums delay",
4690 		       hdev->auto_accept_delay);
4691 
4692 		if (hdev->auto_accept_delay > 0) {
4693 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4694 			queue_delayed_work(conn->hdev->workqueue,
4695 					   &conn->auto_accept_work, delay);
4696 			goto unlock;
4697 		}
4698 
4699 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4700 			     sizeof(ev->bdaddr), &ev->bdaddr);
4701 		goto unlock;
4702 	}
4703 
4704 confirm:
4705 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4706 				  le32_to_cpu(ev->passkey), confirm_hint);
4707 
4708 unlock:
4709 	hci_dev_unlock(hdev);
4710 }
4711 
4712 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4713 					 struct sk_buff *skb)
4714 {
4715 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4716 
4717 	BT_DBG("%s", hdev->name);
4718 
4719 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4720 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4721 }
4722 
4723 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4724 					struct sk_buff *skb)
4725 {
4726 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4727 	struct hci_conn *conn;
4728 
4729 	BT_DBG("%s", hdev->name);
4730 
4731 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4732 	if (!conn)
4733 		return;
4734 
4735 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4736 	conn->passkey_entered = 0;
4737 
4738 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4739 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4740 					 conn->dst_type, conn->passkey_notify,
4741 					 conn->passkey_entered);
4742 }
4743 
4744 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4745 {
4746 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4747 	struct hci_conn *conn;
4748 
4749 	BT_DBG("%s", hdev->name);
4750 
4751 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4752 	if (!conn)
4753 		return;
4754 
4755 	switch (ev->type) {
4756 	case HCI_KEYPRESS_STARTED:
4757 		conn->passkey_entered = 0;
4758 		return;
4759 
4760 	case HCI_KEYPRESS_ENTERED:
4761 		conn->passkey_entered++;
4762 		break;
4763 
4764 	case HCI_KEYPRESS_ERASED:
4765 		conn->passkey_entered--;
4766 		break;
4767 
4768 	case HCI_KEYPRESS_CLEARED:
4769 		conn->passkey_entered = 0;
4770 		break;
4771 
4772 	case HCI_KEYPRESS_COMPLETED:
4773 		return;
4774 	}
4775 
4776 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4777 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4778 					 conn->dst_type, conn->passkey_notify,
4779 					 conn->passkey_entered);
4780 }
4781 
4782 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4783 					 struct sk_buff *skb)
4784 {
4785 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4786 	struct hci_conn *conn;
4787 
4788 	BT_DBG("%s", hdev->name);
4789 
4790 	hci_dev_lock(hdev);
4791 
4792 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4793 	if (!conn)
4794 		goto unlock;
4795 
4796 	/* Reset the authentication requirement to unknown */
4797 	conn->remote_auth = 0xff;
4798 
4799 	/* To avoid duplicate auth_failed events to user space we check
4800 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4801 	 * initiated the authentication. A traditional auth_complete
4802 	 * event gets always produced as initiator and is also mapped to
4803 	 * the mgmt_auth_failed event */
4804 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4805 		mgmt_auth_failed(conn, ev->status);
4806 
4807 	hci_conn_drop(conn);
4808 
4809 unlock:
4810 	hci_dev_unlock(hdev);
4811 }
4812 
4813 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4814 					 struct sk_buff *skb)
4815 {
4816 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4817 	struct inquiry_entry *ie;
4818 	struct hci_conn *conn;
4819 
4820 	BT_DBG("%s", hdev->name);
4821 
4822 	hci_dev_lock(hdev);
4823 
4824 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4825 	if (conn)
4826 		memcpy(conn->features[1], ev->features, 8);
4827 
4828 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4829 	if (ie)
4830 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4831 
4832 	hci_dev_unlock(hdev);
4833 }
4834 
4835 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4836 					    struct sk_buff *skb)
4837 {
4838 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4839 	struct oob_data *data;
4840 
4841 	BT_DBG("%s", hdev->name);
4842 
4843 	hci_dev_lock(hdev);
4844 
4845 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4846 		goto unlock;
4847 
4848 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4849 	if (!data) {
4850 		struct hci_cp_remote_oob_data_neg_reply cp;
4851 
4852 		bacpy(&cp.bdaddr, &ev->bdaddr);
4853 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4854 			     sizeof(cp), &cp);
4855 		goto unlock;
4856 	}
4857 
4858 	if (bredr_sc_enabled(hdev)) {
4859 		struct hci_cp_remote_oob_ext_data_reply cp;
4860 
4861 		bacpy(&cp.bdaddr, &ev->bdaddr);
4862 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4863 			memset(cp.hash192, 0, sizeof(cp.hash192));
4864 			memset(cp.rand192, 0, sizeof(cp.rand192));
4865 		} else {
4866 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4867 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4868 		}
4869 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4870 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4871 
4872 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4873 			     sizeof(cp), &cp);
4874 	} else {
4875 		struct hci_cp_remote_oob_data_reply cp;
4876 
4877 		bacpy(&cp.bdaddr, &ev->bdaddr);
4878 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4879 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4880 
4881 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4882 			     sizeof(cp), &cp);
4883 	}
4884 
4885 unlock:
4886 	hci_dev_unlock(hdev);
4887 }
4888 
4889 #if IS_ENABLED(CONFIG_BT_HS)
4890 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4891 {
4892 	struct hci_ev_channel_selected *ev = (void *)skb->data;
4893 	struct hci_conn *hcon;
4894 
4895 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4896 
4897 	skb_pull(skb, sizeof(*ev));
4898 
4899 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4900 	if (!hcon)
4901 		return;
4902 
4903 	amp_read_loc_assoc_final_data(hdev, hcon);
4904 }
4905 
4906 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4907 				      struct sk_buff *skb)
4908 {
4909 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4910 	struct hci_conn *hcon, *bredr_hcon;
4911 
4912 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4913 	       ev->status);
4914 
4915 	hci_dev_lock(hdev);
4916 
4917 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4918 	if (!hcon) {
4919 		hci_dev_unlock(hdev);
4920 		return;
4921 	}
4922 
4923 	if (ev->status) {
4924 		hci_conn_del(hcon);
4925 		hci_dev_unlock(hdev);
4926 		return;
4927 	}
4928 
4929 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4930 
4931 	hcon->state = BT_CONNECTED;
4932 	bacpy(&hcon->dst, &bredr_hcon->dst);
4933 
4934 	hci_conn_hold(hcon);
4935 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4936 	hci_conn_drop(hcon);
4937 
4938 	hci_debugfs_create_conn(hcon);
4939 	hci_conn_add_sysfs(hcon);
4940 
4941 	amp_physical_cfm(bredr_hcon, hcon);
4942 
4943 	hci_dev_unlock(hdev);
4944 }
4945 
4946 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4947 {
4948 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4949 	struct hci_conn *hcon;
4950 	struct hci_chan *hchan;
4951 	struct amp_mgr *mgr;
4952 
4953 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4954 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4955 	       ev->status);
4956 
4957 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4958 	if (!hcon)
4959 		return;
4960 
4961 	/* Create AMP hchan */
4962 	hchan = hci_chan_create(hcon);
4963 	if (!hchan)
4964 		return;
4965 
4966 	hchan->handle = le16_to_cpu(ev->handle);
4967 
4968 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4969 
4970 	mgr = hcon->amp_mgr;
4971 	if (mgr && mgr->bredr_chan) {
4972 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4973 
4974 		l2cap_chan_lock(bredr_chan);
4975 
4976 		bredr_chan->conn->mtu = hdev->block_mtu;
4977 		l2cap_logical_cfm(bredr_chan, hchan, 0);
4978 		hci_conn_hold(hcon);
4979 
4980 		l2cap_chan_unlock(bredr_chan);
4981 	}
4982 }
4983 
4984 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4985 					     struct sk_buff *skb)
4986 {
4987 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4988 	struct hci_chan *hchan;
4989 
4990 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4991 	       le16_to_cpu(ev->handle), ev->status);
4992 
4993 	if (ev->status)
4994 		return;
4995 
4996 	hci_dev_lock(hdev);
4997 
4998 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4999 	if (!hchan)
5000 		goto unlock;
5001 
5002 	amp_destroy_logical_link(hchan, ev->reason);
5003 
5004 unlock:
5005 	hci_dev_unlock(hdev);
5006 }
5007 
5008 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5009 					     struct sk_buff *skb)
5010 {
5011 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5012 	struct hci_conn *hcon;
5013 
5014 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5015 
5016 	if (ev->status)
5017 		return;
5018 
5019 	hci_dev_lock(hdev);
5020 
5021 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5022 	if (hcon) {
5023 		hcon->state = BT_CLOSED;
5024 		hci_conn_del(hcon);
5025 	}
5026 
5027 	hci_dev_unlock(hdev);
5028 }
5029 #endif
5030 
5031 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5032 			bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
5033 			u16 interval, u16 latency, u16 supervision_timeout)
5034 {
5035 	struct hci_conn_params *params;
5036 	struct hci_conn *conn;
5037 	struct smp_irk *irk;
5038 	u8 addr_type;
5039 
5040 	hci_dev_lock(hdev);
5041 
5042 	/* All controllers implicitly stop advertising in the event of a
5043 	 * connection, so ensure that the state bit is cleared.
5044 	 */
5045 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5046 
5047 	conn = hci_lookup_le_connect(hdev);
5048 	if (!conn) {
5049 		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5050 		if (!conn) {
5051 			bt_dev_err(hdev, "no memory for new connection");
5052 			goto unlock;
5053 		}
5054 
5055 		conn->dst_type = bdaddr_type;
5056 
5057 		/* If we didn't have a hci_conn object previously
5058 		 * but we're in master role this must be something
5059 		 * initiated using a white list. Since white list based
5060 		 * connections are not "first class citizens" we don't
5061 		 * have full tracking of them. Therefore, we go ahead
5062 		 * with a "best effort" approach of determining the
5063 		 * initiator address based on the HCI_PRIVACY flag.
5064 		 */
5065 		if (conn->out) {
5066 			conn->resp_addr_type = bdaddr_type;
5067 			bacpy(&conn->resp_addr, bdaddr);
5068 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5069 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5070 				bacpy(&conn->init_addr, &hdev->rpa);
5071 			} else {
5072 				hci_copy_identity_address(hdev,
5073 							  &conn->init_addr,
5074 							  &conn->init_addr_type);
5075 			}
5076 		}
5077 	} else {
5078 		cancel_delayed_work(&conn->le_conn_timeout);
5079 	}
5080 
5081 	if (!conn->out) {
5082 		/* Set the responder (our side) address type based on
5083 		 * the advertising address type.
5084 		 */
5085 		conn->resp_addr_type = hdev->adv_addr_type;
5086 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5087 			/* In case of ext adv, resp_addr will be updated in
5088 			 * Adv Terminated event.
5089 			 */
5090 			if (!ext_adv_capable(hdev))
5091 				bacpy(&conn->resp_addr, &hdev->random_addr);
5092 		} else {
5093 			bacpy(&conn->resp_addr, &hdev->bdaddr);
5094 		}
5095 
5096 		conn->init_addr_type = bdaddr_type;
5097 		bacpy(&conn->init_addr, bdaddr);
5098 
5099 		/* For incoming connections, set the default minimum
5100 		 * and maximum connection interval. They will be used
5101 		 * to check if the parameters are in range and if not
5102 		 * trigger the connection update procedure.
5103 		 */
5104 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
5105 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
5106 	}
5107 
5108 	/* Lookup the identity address from the stored connection
5109 	 * address and address type.
5110 	 *
5111 	 * When establishing connections to an identity address, the
5112 	 * connection procedure will store the resolvable random
5113 	 * address first. Now if it can be converted back into the
5114 	 * identity address, start using the identity address from
5115 	 * now on.
5116 	 */
5117 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5118 	if (irk) {
5119 		bacpy(&conn->dst, &irk->bdaddr);
5120 		conn->dst_type = irk->addr_type;
5121 	}
5122 
5123 	if (status) {
5124 		hci_le_conn_failed(conn, status);
5125 		goto unlock;
5126 	}
5127 
5128 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5129 		addr_type = BDADDR_LE_PUBLIC;
5130 	else
5131 		addr_type = BDADDR_LE_RANDOM;
5132 
5133 	/* Drop the connection if the device is blocked */
5134 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5135 		hci_conn_drop(conn);
5136 		goto unlock;
5137 	}
5138 
5139 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5140 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
5141 
5142 	conn->sec_level = BT_SECURITY_LOW;
5143 	conn->handle = handle;
5144 	conn->state = BT_CONFIG;
5145 
5146 	conn->le_conn_interval = interval;
5147 	conn->le_conn_latency = latency;
5148 	conn->le_supv_timeout = supervision_timeout;
5149 
5150 	hci_debugfs_create_conn(conn);
5151 	hci_conn_add_sysfs(conn);
5152 
5153 	/* The remote features procedure is defined for master
5154 	 * role only. So only in case of an initiated connection
5155 	 * request the remote features.
5156 	 *
5157 	 * If the local controller supports slave-initiated features
5158 	 * exchange, then requesting the remote features in slave
5159 	 * role is possible. Otherwise just transition into the
5160 	 * connected state without requesting the remote features.
5161 	 */
5162 	if (conn->out ||
5163 	    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5164 		struct hci_cp_le_read_remote_features cp;
5165 
5166 		cp.handle = __cpu_to_le16(conn->handle);
5167 
5168 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5169 			     sizeof(cp), &cp);
5170 
5171 		hci_conn_hold(conn);
5172 	} else {
5173 		conn->state = BT_CONNECTED;
5174 		hci_connect_cfm(conn, status);
5175 	}
5176 
5177 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5178 					   conn->dst_type);
5179 	if (params) {
5180 		list_del_init(&params->action);
5181 		if (params->conn) {
5182 			hci_conn_drop(params->conn);
5183 			hci_conn_put(params->conn);
5184 			params->conn = NULL;
5185 		}
5186 	}
5187 
5188 unlock:
5189 	hci_update_background_scan(hdev);
5190 	hci_dev_unlock(hdev);
5191 }
5192 
5193 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5194 {
5195 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5196 
5197 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5198 
5199 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5200 			     ev->role, le16_to_cpu(ev->handle),
5201 			     le16_to_cpu(ev->interval),
5202 			     le16_to_cpu(ev->latency),
5203 			     le16_to_cpu(ev->supervision_timeout));
5204 }
5205 
5206 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5207 					 struct sk_buff *skb)
5208 {
5209 	struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5210 
5211 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5212 
5213 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5214 			     ev->role, le16_to_cpu(ev->handle),
5215 			     le16_to_cpu(ev->interval),
5216 			     le16_to_cpu(ev->latency),
5217 			     le16_to_cpu(ev->supervision_timeout));
5218 }
5219 
5220 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5221 {
5222 	struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5223 	struct hci_conn *conn;
5224 
5225 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5226 
5227 	if (ev->status)
5228 		return;
5229 
5230 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5231 	if (conn) {
5232 		struct adv_info *adv_instance;
5233 
5234 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5235 			return;
5236 
5237 		if (!hdev->cur_adv_instance) {
5238 			bacpy(&conn->resp_addr, &hdev->random_addr);
5239 			return;
5240 		}
5241 
5242 		adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5243 		if (adv_instance)
5244 			bacpy(&conn->resp_addr, &adv_instance->random_addr);
5245 	}
5246 }
5247 
5248 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5249 					    struct sk_buff *skb)
5250 {
5251 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5252 	struct hci_conn *conn;
5253 
5254 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5255 
5256 	if (ev->status)
5257 		return;
5258 
5259 	hci_dev_lock(hdev);
5260 
5261 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5262 	if (conn) {
5263 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5264 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5265 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5266 	}
5267 
5268 	hci_dev_unlock(hdev);
5269 }
5270 
5271 /* This function requires the caller holds hdev->lock */
5272 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5273 					      bdaddr_t *addr,
5274 					      u8 addr_type, u8 adv_type,
5275 					      bdaddr_t *direct_rpa)
5276 {
5277 	struct hci_conn *conn;
5278 	struct hci_conn_params *params;
5279 
5280 	/* If the event is not connectable don't proceed further */
5281 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5282 		return NULL;
5283 
5284 	/* Ignore if the device is blocked */
5285 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5286 		return NULL;
5287 
5288 	/* Most controller will fail if we try to create new connections
5289 	 * while we have an existing one in slave role.
5290 	 */
5291 	if (hdev->conn_hash.le_num_slave > 0)
5292 		return NULL;
5293 
5294 	/* If we're not connectable only connect devices that we have in
5295 	 * our pend_le_conns list.
5296 	 */
5297 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5298 					   addr_type);
5299 	if (!params)
5300 		return NULL;
5301 
5302 	if (!params->explicit_connect) {
5303 		switch (params->auto_connect) {
5304 		case HCI_AUTO_CONN_DIRECT:
5305 			/* Only devices advertising with ADV_DIRECT_IND are
5306 			 * triggering a connection attempt. This is allowing
5307 			 * incoming connections from slave devices.
5308 			 */
5309 			if (adv_type != LE_ADV_DIRECT_IND)
5310 				return NULL;
5311 			break;
5312 		case HCI_AUTO_CONN_ALWAYS:
5313 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5314 			 * are triggering a connection attempt. This means
5315 			 * that incoming connections from slave device are
5316 			 * accepted and also outgoing connections to slave
5317 			 * devices are established when found.
5318 			 */
5319 			break;
5320 		default:
5321 			return NULL;
5322 		}
5323 	}
5324 
5325 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5326 			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5327 			      direct_rpa);
5328 	if (!IS_ERR(conn)) {
5329 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5330 		 * by higher layer that tried to connect, if no then
5331 		 * store the pointer since we don't really have any
5332 		 * other owner of the object besides the params that
5333 		 * triggered it. This way we can abort the connection if
5334 		 * the parameters get removed and keep the reference
5335 		 * count consistent once the connection is established.
5336 		 */
5337 
5338 		if (!params->explicit_connect)
5339 			params->conn = hci_conn_get(conn);
5340 
5341 		return conn;
5342 	}
5343 
5344 	switch (PTR_ERR(conn)) {
5345 	case -EBUSY:
5346 		/* If hci_connect() returns -EBUSY it means there is already
5347 		 * an LE connection attempt going on. Since controllers don't
5348 		 * support more than one connection attempt at the time, we
5349 		 * don't consider this an error case.
5350 		 */
5351 		break;
5352 	default:
5353 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5354 		return NULL;
5355 	}
5356 
5357 	return NULL;
5358 }
5359 
5360 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5361 			       u8 bdaddr_type, bdaddr_t *direct_addr,
5362 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5363 {
5364 	struct discovery_state *d = &hdev->discovery;
5365 	struct smp_irk *irk;
5366 	struct hci_conn *conn;
5367 	bool match;
5368 	u32 flags;
5369 	u8 *ptr, real_len;
5370 
5371 	switch (type) {
5372 	case LE_ADV_IND:
5373 	case LE_ADV_DIRECT_IND:
5374 	case LE_ADV_SCAN_IND:
5375 	case LE_ADV_NONCONN_IND:
5376 	case LE_ADV_SCAN_RSP:
5377 		break;
5378 	default:
5379 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5380 				       "type: 0x%02x", type);
5381 		return;
5382 	}
5383 
5384 	/* Find the end of the data in case the report contains padded zero
5385 	 * bytes at the end causing an invalid length value.
5386 	 *
5387 	 * When data is NULL, len is 0 so there is no need for extra ptr
5388 	 * check as 'ptr < data + 0' is already false in such case.
5389 	 */
5390 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5391 		if (ptr + 1 + *ptr > data + len)
5392 			break;
5393 	}
5394 
5395 	real_len = ptr - data;
5396 
5397 	/* Adjust for actual length */
5398 	if (len != real_len) {
5399 		bt_dev_err_ratelimited(hdev, "advertising data len corrected %u -> %u",
5400 				       len, real_len);
5401 		len = real_len;
5402 	}
5403 
5404 	/* If the direct address is present, then this report is from
5405 	 * a LE Direct Advertising Report event. In that case it is
5406 	 * important to see if the address is matching the local
5407 	 * controller address.
5408 	 */
5409 	if (direct_addr) {
5410 		/* Only resolvable random addresses are valid for these
5411 		 * kind of reports and others can be ignored.
5412 		 */
5413 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5414 			return;
5415 
5416 		/* If the controller is not using resolvable random
5417 		 * addresses, then this report can be ignored.
5418 		 */
5419 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5420 			return;
5421 
5422 		/* If the local IRK of the controller does not match
5423 		 * with the resolvable random address provided, then
5424 		 * this report can be ignored.
5425 		 */
5426 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5427 			return;
5428 	}
5429 
5430 	/* Check if we need to convert to identity address */
5431 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5432 	if (irk) {
5433 		bdaddr = &irk->bdaddr;
5434 		bdaddr_type = irk->addr_type;
5435 	}
5436 
5437 	/* Check if we have been requested to connect to this device.
5438 	 *
5439 	 * direct_addr is set only for directed advertising reports (it is NULL
5440 	 * for advertising reports) and is already verified to be RPA above.
5441 	 */
5442 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5443 								direct_addr);
5444 	if (conn && type == LE_ADV_IND) {
5445 		/* Store report for later inclusion by
5446 		 * mgmt_device_connected
5447 		 */
5448 		memcpy(conn->le_adv_data, data, len);
5449 		conn->le_adv_data_len = len;
5450 	}
5451 
5452 	/* Passive scanning shouldn't trigger any device found events,
5453 	 * except for devices marked as CONN_REPORT for which we do send
5454 	 * device found events.
5455 	 */
5456 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5457 		if (type == LE_ADV_DIRECT_IND)
5458 			return;
5459 
5460 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5461 					       bdaddr, bdaddr_type))
5462 			return;
5463 
5464 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5465 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5466 		else
5467 			flags = 0;
5468 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5469 				  rssi, flags, data, len, NULL, 0);
5470 		return;
5471 	}
5472 
5473 	/* When receiving non-connectable or scannable undirected
5474 	 * advertising reports, this means that the remote device is
5475 	 * not connectable and then clearly indicate this in the
5476 	 * device found event.
5477 	 *
5478 	 * When receiving a scan response, then there is no way to
5479 	 * know if the remote device is connectable or not. However
5480 	 * since scan responses are merged with a previously seen
5481 	 * advertising report, the flags field from that report
5482 	 * will be used.
5483 	 *
5484 	 * In the really unlikely case that a controller get confused
5485 	 * and just sends a scan response event, then it is marked as
5486 	 * not connectable as well.
5487 	 */
5488 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5489 	    type == LE_ADV_SCAN_RSP)
5490 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5491 	else
5492 		flags = 0;
5493 
5494 	/* If there's nothing pending either store the data from this
5495 	 * event or send an immediate device found event if the data
5496 	 * should not be stored for later.
5497 	 */
5498 	if (!has_pending_adv_report(hdev)) {
5499 		/* If the report will trigger a SCAN_REQ store it for
5500 		 * later merging.
5501 		 */
5502 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5503 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5504 						 rssi, flags, data, len);
5505 			return;
5506 		}
5507 
5508 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5509 				  rssi, flags, data, len, NULL, 0);
5510 		return;
5511 	}
5512 
5513 	/* Check if the pending report is for the same device as the new one */
5514 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5515 		 bdaddr_type == d->last_adv_addr_type);
5516 
5517 	/* If the pending data doesn't match this report or this isn't a
5518 	 * scan response (e.g. we got a duplicate ADV_IND) then force
5519 	 * sending of the pending data.
5520 	 */
5521 	if (type != LE_ADV_SCAN_RSP || !match) {
5522 		/* Send out whatever is in the cache, but skip duplicates */
5523 		if (!match)
5524 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5525 					  d->last_adv_addr_type, NULL,
5526 					  d->last_adv_rssi, d->last_adv_flags,
5527 					  d->last_adv_data,
5528 					  d->last_adv_data_len, NULL, 0);
5529 
5530 		/* If the new report will trigger a SCAN_REQ store it for
5531 		 * later merging.
5532 		 */
5533 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5534 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5535 						 rssi, flags, data, len);
5536 			return;
5537 		}
5538 
5539 		/* The advertising reports cannot be merged, so clear
5540 		 * the pending report and send out a device found event.
5541 		 */
5542 		clear_pending_adv_report(hdev);
5543 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5544 				  rssi, flags, data, len, NULL, 0);
5545 		return;
5546 	}
5547 
5548 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5549 	 * the new event is a SCAN_RSP. We can therefore proceed with
5550 	 * sending a merged device found event.
5551 	 */
5552 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5553 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5554 			  d->last_adv_data, d->last_adv_data_len, data, len);
5555 	clear_pending_adv_report(hdev);
5556 }
5557 
5558 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5559 {
5560 	u8 num_reports = skb->data[0];
5561 	void *ptr = &skb->data[1];
5562 
5563 	hci_dev_lock(hdev);
5564 
5565 	while (num_reports--) {
5566 		struct hci_ev_le_advertising_info *ev = ptr;
5567 		s8 rssi;
5568 
5569 		if (ev->length <= HCI_MAX_AD_LENGTH) {
5570 			rssi = ev->data[ev->length];
5571 			process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5572 					   ev->bdaddr_type, NULL, 0, rssi,
5573 					   ev->data, ev->length);
5574 		} else {
5575 			bt_dev_err(hdev, "Dropping invalid advertising data");
5576 		}
5577 
5578 		ptr += sizeof(*ev) + ev->length + 1;
5579 	}
5580 
5581 	hci_dev_unlock(hdev);
5582 }
5583 
5584 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5585 {
5586 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5587 		switch (evt_type) {
5588 		case LE_LEGACY_ADV_IND:
5589 			return LE_ADV_IND;
5590 		case LE_LEGACY_ADV_DIRECT_IND:
5591 			return LE_ADV_DIRECT_IND;
5592 		case LE_LEGACY_ADV_SCAN_IND:
5593 			return LE_ADV_SCAN_IND;
5594 		case LE_LEGACY_NONCONN_IND:
5595 			return LE_ADV_NONCONN_IND;
5596 		case LE_LEGACY_SCAN_RSP_ADV:
5597 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5598 			return LE_ADV_SCAN_RSP;
5599 		}
5600 
5601 		goto invalid;
5602 	}
5603 
5604 	if (evt_type & LE_EXT_ADV_CONN_IND) {
5605 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
5606 			return LE_ADV_DIRECT_IND;
5607 
5608 		return LE_ADV_IND;
5609 	}
5610 
5611 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
5612 		return LE_ADV_SCAN_RSP;
5613 
5614 	if (evt_type & LE_EXT_ADV_SCAN_IND)
5615 		return LE_ADV_SCAN_IND;
5616 
5617 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5618 	    evt_type & LE_EXT_ADV_DIRECT_IND)
5619 		return LE_ADV_NONCONN_IND;
5620 
5621 invalid:
5622 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5623 			       evt_type);
5624 
5625 	return LE_ADV_INVALID;
5626 }
5627 
5628 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5629 {
5630 	u8 num_reports = skb->data[0];
5631 	void *ptr = &skb->data[1];
5632 
5633 	hci_dev_lock(hdev);
5634 
5635 	while (num_reports--) {
5636 		struct hci_ev_le_ext_adv_report *ev = ptr;
5637 		u8 legacy_evt_type;
5638 		u16 evt_type;
5639 
5640 		evt_type = __le16_to_cpu(ev->evt_type);
5641 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5642 		if (legacy_evt_type != LE_ADV_INVALID) {
5643 			process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5644 					   ev->bdaddr_type, NULL, 0, ev->rssi,
5645 					   ev->data, ev->length);
5646 		}
5647 
5648 		ptr += sizeof(*ev) + ev->length;
5649 	}
5650 
5651 	hci_dev_unlock(hdev);
5652 }
5653 
5654 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5655 					    struct sk_buff *skb)
5656 {
5657 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5658 	struct hci_conn *conn;
5659 
5660 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5661 
5662 	hci_dev_lock(hdev);
5663 
5664 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5665 	if (conn) {
5666 		if (!ev->status)
5667 			memcpy(conn->features[0], ev->features, 8);
5668 
5669 		if (conn->state == BT_CONFIG) {
5670 			__u8 status;
5671 
5672 			/* If the local controller supports slave-initiated
5673 			 * features exchange, but the remote controller does
5674 			 * not, then it is possible that the error code 0x1a
5675 			 * for unsupported remote feature gets returned.
5676 			 *
5677 			 * In this specific case, allow the connection to
5678 			 * transition into connected state and mark it as
5679 			 * successful.
5680 			 */
5681 			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5682 			    !conn->out && ev->status == 0x1a)
5683 				status = 0x00;
5684 			else
5685 				status = ev->status;
5686 
5687 			conn->state = BT_CONNECTED;
5688 			hci_connect_cfm(conn, status);
5689 			hci_conn_drop(conn);
5690 		}
5691 	}
5692 
5693 	hci_dev_unlock(hdev);
5694 }
5695 
5696 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5697 {
5698 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5699 	struct hci_cp_le_ltk_reply cp;
5700 	struct hci_cp_le_ltk_neg_reply neg;
5701 	struct hci_conn *conn;
5702 	struct smp_ltk *ltk;
5703 
5704 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5705 
5706 	hci_dev_lock(hdev);
5707 
5708 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5709 	if (conn == NULL)
5710 		goto not_found;
5711 
5712 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5713 	if (!ltk)
5714 		goto not_found;
5715 
5716 	if (smp_ltk_is_sc(ltk)) {
5717 		/* With SC both EDiv and Rand are set to zero */
5718 		if (ev->ediv || ev->rand)
5719 			goto not_found;
5720 	} else {
5721 		/* For non-SC keys check that EDiv and Rand match */
5722 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5723 			goto not_found;
5724 	}
5725 
5726 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5727 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5728 	cp.handle = cpu_to_le16(conn->handle);
5729 
5730 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5731 
5732 	conn->enc_key_size = ltk->enc_size;
5733 
5734 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5735 
5736 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5737 	 * temporary key used to encrypt a connection following
5738 	 * pairing. It is used during the Encrypted Session Setup to
5739 	 * distribute the keys. Later, security can be re-established
5740 	 * using a distributed LTK.
5741 	 */
5742 	if (ltk->type == SMP_STK) {
5743 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5744 		list_del_rcu(&ltk->list);
5745 		kfree_rcu(ltk, rcu);
5746 	} else {
5747 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5748 	}
5749 
5750 	hci_dev_unlock(hdev);
5751 
5752 	return;
5753 
5754 not_found:
5755 	neg.handle = ev->handle;
5756 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5757 	hci_dev_unlock(hdev);
5758 }
5759 
5760 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5761 				      u8 reason)
5762 {
5763 	struct hci_cp_le_conn_param_req_neg_reply cp;
5764 
5765 	cp.handle = cpu_to_le16(handle);
5766 	cp.reason = reason;
5767 
5768 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5769 		     &cp);
5770 }
5771 
5772 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5773 					     struct sk_buff *skb)
5774 {
5775 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5776 	struct hci_cp_le_conn_param_req_reply cp;
5777 	struct hci_conn *hcon;
5778 	u16 handle, min, max, latency, timeout;
5779 
5780 	handle = le16_to_cpu(ev->handle);
5781 	min = le16_to_cpu(ev->interval_min);
5782 	max = le16_to_cpu(ev->interval_max);
5783 	latency = le16_to_cpu(ev->latency);
5784 	timeout = le16_to_cpu(ev->timeout);
5785 
5786 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5787 	if (!hcon || hcon->state != BT_CONNECTED)
5788 		return send_conn_param_neg_reply(hdev, handle,
5789 						 HCI_ERROR_UNKNOWN_CONN_ID);
5790 
5791 	if (hci_check_conn_params(min, max, latency, timeout))
5792 		return send_conn_param_neg_reply(hdev, handle,
5793 						 HCI_ERROR_INVALID_LL_PARAMS);
5794 
5795 	if (hcon->role == HCI_ROLE_MASTER) {
5796 		struct hci_conn_params *params;
5797 		u8 store_hint;
5798 
5799 		hci_dev_lock(hdev);
5800 
5801 		params = hci_conn_params_lookup(hdev, &hcon->dst,
5802 						hcon->dst_type);
5803 		if (params) {
5804 			params->conn_min_interval = min;
5805 			params->conn_max_interval = max;
5806 			params->conn_latency = latency;
5807 			params->supervision_timeout = timeout;
5808 			store_hint = 0x01;
5809 		} else{
5810 			store_hint = 0x00;
5811 		}
5812 
5813 		hci_dev_unlock(hdev);
5814 
5815 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5816 				    store_hint, min, max, latency, timeout);
5817 	}
5818 
5819 	cp.handle = ev->handle;
5820 	cp.interval_min = ev->interval_min;
5821 	cp.interval_max = ev->interval_max;
5822 	cp.latency = ev->latency;
5823 	cp.timeout = ev->timeout;
5824 	cp.min_ce_len = 0;
5825 	cp.max_ce_len = 0;
5826 
5827 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5828 }
5829 
5830 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5831 					 struct sk_buff *skb)
5832 {
5833 	u8 num_reports = skb->data[0];
5834 	void *ptr = &skb->data[1];
5835 
5836 	hci_dev_lock(hdev);
5837 
5838 	while (num_reports--) {
5839 		struct hci_ev_le_direct_adv_info *ev = ptr;
5840 
5841 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5842 				   ev->bdaddr_type, &ev->direct_addr,
5843 				   ev->direct_addr_type, ev->rssi, NULL, 0);
5844 
5845 		ptr += sizeof(*ev);
5846 	}
5847 
5848 	hci_dev_unlock(hdev);
5849 }
5850 
5851 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5852 {
5853 	struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5854 	struct hci_conn *conn;
5855 
5856 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5857 
5858 	if (!ev->status)
5859 		return;
5860 
5861 	hci_dev_lock(hdev);
5862 
5863 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5864 	if (!conn)
5865 		goto unlock;
5866 
5867 	conn->le_tx_phy = ev->tx_phy;
5868 	conn->le_rx_phy = ev->rx_phy;
5869 
5870 unlock:
5871 	hci_dev_unlock(hdev);
5872 }
5873 
5874 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5875 {
5876 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
5877 
5878 	skb_pull(skb, sizeof(*le_ev));
5879 
5880 	switch (le_ev->subevent) {
5881 	case HCI_EV_LE_CONN_COMPLETE:
5882 		hci_le_conn_complete_evt(hdev, skb);
5883 		break;
5884 
5885 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5886 		hci_le_conn_update_complete_evt(hdev, skb);
5887 		break;
5888 
5889 	case HCI_EV_LE_ADVERTISING_REPORT:
5890 		hci_le_adv_report_evt(hdev, skb);
5891 		break;
5892 
5893 	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5894 		hci_le_remote_feat_complete_evt(hdev, skb);
5895 		break;
5896 
5897 	case HCI_EV_LE_LTK_REQ:
5898 		hci_le_ltk_request_evt(hdev, skb);
5899 		break;
5900 
5901 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5902 		hci_le_remote_conn_param_req_evt(hdev, skb);
5903 		break;
5904 
5905 	case HCI_EV_LE_DIRECT_ADV_REPORT:
5906 		hci_le_direct_adv_report_evt(hdev, skb);
5907 		break;
5908 
5909 	case HCI_EV_LE_PHY_UPDATE_COMPLETE:
5910 		hci_le_phy_update_evt(hdev, skb);
5911 		break;
5912 
5913 	case HCI_EV_LE_EXT_ADV_REPORT:
5914 		hci_le_ext_adv_report_evt(hdev, skb);
5915 		break;
5916 
5917 	case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5918 		hci_le_enh_conn_complete_evt(hdev, skb);
5919 		break;
5920 
5921 	case HCI_EV_LE_EXT_ADV_SET_TERM:
5922 		hci_le_ext_adv_term_evt(hdev, skb);
5923 		break;
5924 
5925 	default:
5926 		break;
5927 	}
5928 }
5929 
5930 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5931 				 u8 event, struct sk_buff *skb)
5932 {
5933 	struct hci_ev_cmd_complete *ev;
5934 	struct hci_event_hdr *hdr;
5935 
5936 	if (!skb)
5937 		return false;
5938 
5939 	if (skb->len < sizeof(*hdr)) {
5940 		bt_dev_err(hdev, "too short HCI event");
5941 		return false;
5942 	}
5943 
5944 	hdr = (void *) skb->data;
5945 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5946 
5947 	if (event) {
5948 		if (hdr->evt != event)
5949 			return false;
5950 		return true;
5951 	}
5952 
5953 	/* Check if request ended in Command Status - no way to retreive
5954 	 * any extra parameters in this case.
5955 	 */
5956 	if (hdr->evt == HCI_EV_CMD_STATUS)
5957 		return false;
5958 
5959 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5960 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5961 			   hdr->evt);
5962 		return false;
5963 	}
5964 
5965 	if (skb->len < sizeof(*ev)) {
5966 		bt_dev_err(hdev, "too short cmd_complete event");
5967 		return false;
5968 	}
5969 
5970 	ev = (void *) skb->data;
5971 	skb_pull(skb, sizeof(*ev));
5972 
5973 	if (opcode != __le16_to_cpu(ev->opcode)) {
5974 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5975 		       __le16_to_cpu(ev->opcode));
5976 		return false;
5977 	}
5978 
5979 	return true;
5980 }
5981 
5982 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5983 {
5984 	struct hci_event_hdr *hdr = (void *) skb->data;
5985 	hci_req_complete_t req_complete = NULL;
5986 	hci_req_complete_skb_t req_complete_skb = NULL;
5987 	struct sk_buff *orig_skb = NULL;
5988 	u8 status = 0, event = hdr->evt, req_evt = 0;
5989 	u16 opcode = HCI_OP_NOP;
5990 
5991 	if (!event) {
5992 		bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5993 		goto done;
5994 	}
5995 
5996 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5997 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5998 		opcode = __le16_to_cpu(cmd_hdr->opcode);
5999 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6000 				     &req_complete_skb);
6001 		req_evt = event;
6002 	}
6003 
6004 	/* If it looks like we might end up having to call
6005 	 * req_complete_skb, store a pristine copy of the skb since the
6006 	 * various handlers may modify the original one through
6007 	 * skb_pull() calls, etc.
6008 	 */
6009 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6010 	    event == HCI_EV_CMD_COMPLETE)
6011 		orig_skb = skb_clone(skb, GFP_KERNEL);
6012 
6013 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
6014 
6015 	switch (event) {
6016 	case HCI_EV_INQUIRY_COMPLETE:
6017 		hci_inquiry_complete_evt(hdev, skb);
6018 		break;
6019 
6020 	case HCI_EV_INQUIRY_RESULT:
6021 		hci_inquiry_result_evt(hdev, skb);
6022 		break;
6023 
6024 	case HCI_EV_CONN_COMPLETE:
6025 		hci_conn_complete_evt(hdev, skb);
6026 		break;
6027 
6028 	case HCI_EV_CONN_REQUEST:
6029 		hci_conn_request_evt(hdev, skb);
6030 		break;
6031 
6032 	case HCI_EV_DISCONN_COMPLETE:
6033 		hci_disconn_complete_evt(hdev, skb);
6034 		break;
6035 
6036 	case HCI_EV_AUTH_COMPLETE:
6037 		hci_auth_complete_evt(hdev, skb);
6038 		break;
6039 
6040 	case HCI_EV_REMOTE_NAME:
6041 		hci_remote_name_evt(hdev, skb);
6042 		break;
6043 
6044 	case HCI_EV_ENCRYPT_CHANGE:
6045 		hci_encrypt_change_evt(hdev, skb);
6046 		break;
6047 
6048 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6049 		hci_change_link_key_complete_evt(hdev, skb);
6050 		break;
6051 
6052 	case HCI_EV_REMOTE_FEATURES:
6053 		hci_remote_features_evt(hdev, skb);
6054 		break;
6055 
6056 	case HCI_EV_CMD_COMPLETE:
6057 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6058 				     &req_complete, &req_complete_skb);
6059 		break;
6060 
6061 	case HCI_EV_CMD_STATUS:
6062 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6063 				   &req_complete_skb);
6064 		break;
6065 
6066 	case HCI_EV_HARDWARE_ERROR:
6067 		hci_hardware_error_evt(hdev, skb);
6068 		break;
6069 
6070 	case HCI_EV_ROLE_CHANGE:
6071 		hci_role_change_evt(hdev, skb);
6072 		break;
6073 
6074 	case HCI_EV_NUM_COMP_PKTS:
6075 		hci_num_comp_pkts_evt(hdev, skb);
6076 		break;
6077 
6078 	case HCI_EV_MODE_CHANGE:
6079 		hci_mode_change_evt(hdev, skb);
6080 		break;
6081 
6082 	case HCI_EV_PIN_CODE_REQ:
6083 		hci_pin_code_request_evt(hdev, skb);
6084 		break;
6085 
6086 	case HCI_EV_LINK_KEY_REQ:
6087 		hci_link_key_request_evt(hdev, skb);
6088 		break;
6089 
6090 	case HCI_EV_LINK_KEY_NOTIFY:
6091 		hci_link_key_notify_evt(hdev, skb);
6092 		break;
6093 
6094 	case HCI_EV_CLOCK_OFFSET:
6095 		hci_clock_offset_evt(hdev, skb);
6096 		break;
6097 
6098 	case HCI_EV_PKT_TYPE_CHANGE:
6099 		hci_pkt_type_change_evt(hdev, skb);
6100 		break;
6101 
6102 	case HCI_EV_PSCAN_REP_MODE:
6103 		hci_pscan_rep_mode_evt(hdev, skb);
6104 		break;
6105 
6106 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6107 		hci_inquiry_result_with_rssi_evt(hdev, skb);
6108 		break;
6109 
6110 	case HCI_EV_REMOTE_EXT_FEATURES:
6111 		hci_remote_ext_features_evt(hdev, skb);
6112 		break;
6113 
6114 	case HCI_EV_SYNC_CONN_COMPLETE:
6115 		hci_sync_conn_complete_evt(hdev, skb);
6116 		break;
6117 
6118 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
6119 		hci_extended_inquiry_result_evt(hdev, skb);
6120 		break;
6121 
6122 	case HCI_EV_KEY_REFRESH_COMPLETE:
6123 		hci_key_refresh_complete_evt(hdev, skb);
6124 		break;
6125 
6126 	case HCI_EV_IO_CAPA_REQUEST:
6127 		hci_io_capa_request_evt(hdev, skb);
6128 		break;
6129 
6130 	case HCI_EV_IO_CAPA_REPLY:
6131 		hci_io_capa_reply_evt(hdev, skb);
6132 		break;
6133 
6134 	case HCI_EV_USER_CONFIRM_REQUEST:
6135 		hci_user_confirm_request_evt(hdev, skb);
6136 		break;
6137 
6138 	case HCI_EV_USER_PASSKEY_REQUEST:
6139 		hci_user_passkey_request_evt(hdev, skb);
6140 		break;
6141 
6142 	case HCI_EV_USER_PASSKEY_NOTIFY:
6143 		hci_user_passkey_notify_evt(hdev, skb);
6144 		break;
6145 
6146 	case HCI_EV_KEYPRESS_NOTIFY:
6147 		hci_keypress_notify_evt(hdev, skb);
6148 		break;
6149 
6150 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
6151 		hci_simple_pair_complete_evt(hdev, skb);
6152 		break;
6153 
6154 	case HCI_EV_REMOTE_HOST_FEATURES:
6155 		hci_remote_host_features_evt(hdev, skb);
6156 		break;
6157 
6158 	case HCI_EV_LE_META:
6159 		hci_le_meta_evt(hdev, skb);
6160 		break;
6161 
6162 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6163 		hci_remote_oob_data_request_evt(hdev, skb);
6164 		break;
6165 
6166 #if IS_ENABLED(CONFIG_BT_HS)
6167 	case HCI_EV_CHANNEL_SELECTED:
6168 		hci_chan_selected_evt(hdev, skb);
6169 		break;
6170 
6171 	case HCI_EV_PHY_LINK_COMPLETE:
6172 		hci_phy_link_complete_evt(hdev, skb);
6173 		break;
6174 
6175 	case HCI_EV_LOGICAL_LINK_COMPLETE:
6176 		hci_loglink_complete_evt(hdev, skb);
6177 		break;
6178 
6179 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6180 		hci_disconn_loglink_complete_evt(hdev, skb);
6181 		break;
6182 
6183 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6184 		hci_disconn_phylink_complete_evt(hdev, skb);
6185 		break;
6186 #endif
6187 
6188 	case HCI_EV_NUM_COMP_BLOCKS:
6189 		hci_num_comp_blocks_evt(hdev, skb);
6190 		break;
6191 
6192 	case HCI_EV_VENDOR:
6193 		msft_vendor_evt(hdev, skb);
6194 		break;
6195 
6196 	default:
6197 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
6198 		break;
6199 	}
6200 
6201 	if (req_complete) {
6202 		req_complete(hdev, status, opcode);
6203 	} else if (req_complete_skb) {
6204 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6205 			kfree_skb(orig_skb);
6206 			orig_skb = NULL;
6207 		}
6208 		req_complete_skb(hdev, status, opcode, orig_skb);
6209 	}
6210 
6211 done:
6212 	kfree_skb(orig_skb);
6213 	kfree_skb(skb);
6214 	hdev->stat.evt_rx++;
6215 }
6216