xref: /linux/net/bluetooth/hci_event.c (revision 06ce23ad57c8e378b86ef3f439b2e08bcb5d05eb)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 #include "eir.h"
40 
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 
46 /* Handle HCI Event packets */
47 
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
49 			     u8 ev, size_t len)
50 {
51 	void *data;
52 
53 	data = skb_pull_data(skb, len);
54 	if (!data)
55 		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
56 
57 	return data;
58 }
59 
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
61 			     u16 op, size_t len)
62 {
63 	void *data;
64 
65 	data = skb_pull_data(skb, len);
66 	if (!data)
67 		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
68 
69 	return data;
70 }
71 
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
73 				u8 ev, size_t len)
74 {
75 	void *data;
76 
77 	data = skb_pull_data(skb, len);
78 	if (!data)
79 		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
80 
81 	return data;
82 }
83 
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
85 				struct sk_buff *skb)
86 {
87 	struct hci_ev_status *rp = data;
88 
89 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
90 
91 	/* It is possible that we receive Inquiry Complete event right
92 	 * before we receive Inquiry Cancel Command Complete event, in
93 	 * which case the latter event should have status of Command
94 	 * Disallowed (0x0c). This should not be treated as error, since
95 	 * we actually achieve what Inquiry Cancel wants to achieve,
96 	 * which is to end the last Inquiry session.
97 	 */
98 	if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
100 		rp->status = 0x00;
101 	}
102 
103 	if (rp->status)
104 		return rp->status;
105 
106 	clear_bit(HCI_INQUIRY, &hdev->flags);
107 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
109 
110 	hci_dev_lock(hdev);
111 	/* Set discovery state to stopped if we're not doing LE active
112 	 * scanning.
113 	 */
114 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
116 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117 	hci_dev_unlock(hdev);
118 
119 	hci_conn_check_pending(hdev);
120 
121 	return rp->status;
122 }
123 
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125 			      struct sk_buff *skb)
126 {
127 	struct hci_ev_status *rp = data;
128 
129 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130 
131 	if (rp->status)
132 		return rp->status;
133 
134 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135 
136 	return rp->status;
137 }
138 
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140 				   struct sk_buff *skb)
141 {
142 	struct hci_ev_status *rp = data;
143 
144 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145 
146 	if (rp->status)
147 		return rp->status;
148 
149 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150 
151 	hci_conn_check_pending(hdev);
152 
153 	return rp->status;
154 }
155 
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
157 					struct sk_buff *skb)
158 {
159 	struct hci_ev_status *rp = data;
160 
161 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
162 
163 	return rp->status;
164 }
165 
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
167 				struct sk_buff *skb)
168 {
169 	struct hci_rp_role_discovery *rp = data;
170 	struct hci_conn *conn;
171 
172 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
173 
174 	if (rp->status)
175 		return rp->status;
176 
177 	hci_dev_lock(hdev);
178 
179 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180 	if (conn)
181 		conn->role = rp->role;
182 
183 	hci_dev_unlock(hdev);
184 
185 	return rp->status;
186 }
187 
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
189 				  struct sk_buff *skb)
190 {
191 	struct hci_rp_read_link_policy *rp = data;
192 	struct hci_conn *conn;
193 
194 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
195 
196 	if (rp->status)
197 		return rp->status;
198 
199 	hci_dev_lock(hdev);
200 
201 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
202 	if (conn)
203 		conn->link_policy = __le16_to_cpu(rp->policy);
204 
205 	hci_dev_unlock(hdev);
206 
207 	return rp->status;
208 }
209 
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
211 				   struct sk_buff *skb)
212 {
213 	struct hci_rp_write_link_policy *rp = data;
214 	struct hci_conn *conn;
215 	void *sent;
216 
217 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
218 
219 	if (rp->status)
220 		return rp->status;
221 
222 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
223 	if (!sent)
224 		return rp->status;
225 
226 	hci_dev_lock(hdev);
227 
228 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
229 	if (conn)
230 		conn->link_policy = get_unaligned_le16(sent + 2);
231 
232 	hci_dev_unlock(hdev);
233 
234 	return rp->status;
235 }
236 
237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
238 				      struct sk_buff *skb)
239 {
240 	struct hci_rp_read_def_link_policy *rp = data;
241 
242 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
243 
244 	if (rp->status)
245 		return rp->status;
246 
247 	hdev->link_policy = __le16_to_cpu(rp->policy);
248 
249 	return rp->status;
250 }
251 
252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
253 				       struct sk_buff *skb)
254 {
255 	struct hci_ev_status *rp = data;
256 	void *sent;
257 
258 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
259 
260 	if (rp->status)
261 		return rp->status;
262 
263 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
264 	if (!sent)
265 		return rp->status;
266 
267 	hdev->link_policy = get_unaligned_le16(sent);
268 
269 	return rp->status;
270 }
271 
272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
273 {
274 	struct hci_ev_status *rp = data;
275 
276 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
277 
278 	clear_bit(HCI_RESET, &hdev->flags);
279 
280 	if (rp->status)
281 		return rp->status;
282 
283 	/* Reset all non-persistent flags */
284 	hci_dev_clear_volatile_flags(hdev);
285 
286 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
287 
288 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
289 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
290 
291 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
292 	hdev->adv_data_len = 0;
293 
294 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
295 	hdev->scan_rsp_data_len = 0;
296 
297 	hdev->le_scan_type = LE_SCAN_PASSIVE;
298 
299 	hdev->ssp_debug_mode = 0;
300 
301 	hci_bdaddr_list_clear(&hdev->le_accept_list);
302 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
303 
304 	return rp->status;
305 }
306 
307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
308 				      struct sk_buff *skb)
309 {
310 	struct hci_rp_read_stored_link_key *rp = data;
311 	struct hci_cp_read_stored_link_key *sent;
312 
313 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
314 
315 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
316 	if (!sent)
317 		return rp->status;
318 
319 	if (!rp->status && sent->read_all == 0x01) {
320 		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
321 		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
322 	}
323 
324 	return rp->status;
325 }
326 
327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
328 					struct sk_buff *skb)
329 {
330 	struct hci_rp_delete_stored_link_key *rp = data;
331 	u16 num_keys;
332 
333 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
334 
335 	if (rp->status)
336 		return rp->status;
337 
338 	num_keys = le16_to_cpu(rp->num_keys);
339 
340 	if (num_keys <= hdev->stored_num_keys)
341 		hdev->stored_num_keys -= num_keys;
342 	else
343 		hdev->stored_num_keys = 0;
344 
345 	return rp->status;
346 }
347 
348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
349 				  struct sk_buff *skb)
350 {
351 	struct hci_ev_status *rp = data;
352 	void *sent;
353 
354 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
355 
356 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
357 	if (!sent)
358 		return rp->status;
359 
360 	hci_dev_lock(hdev);
361 
362 	if (hci_dev_test_flag(hdev, HCI_MGMT))
363 		mgmt_set_local_name_complete(hdev, sent, rp->status);
364 	else if (!rp->status)
365 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
366 
367 	hci_dev_unlock(hdev);
368 
369 	return rp->status;
370 }
371 
372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
373 				 struct sk_buff *skb)
374 {
375 	struct hci_rp_read_local_name *rp = data;
376 
377 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
378 
379 	if (rp->status)
380 		return rp->status;
381 
382 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
383 	    hci_dev_test_flag(hdev, HCI_CONFIG))
384 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
385 
386 	return rp->status;
387 }
388 
389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
390 				   struct sk_buff *skb)
391 {
392 	struct hci_ev_status *rp = data;
393 	void *sent;
394 
395 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
396 
397 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
398 	if (!sent)
399 		return rp->status;
400 
401 	hci_dev_lock(hdev);
402 
403 	if (!rp->status) {
404 		__u8 param = *((__u8 *) sent);
405 
406 		if (param == AUTH_ENABLED)
407 			set_bit(HCI_AUTH, &hdev->flags);
408 		else
409 			clear_bit(HCI_AUTH, &hdev->flags);
410 	}
411 
412 	if (hci_dev_test_flag(hdev, HCI_MGMT))
413 		mgmt_auth_enable_complete(hdev, rp->status);
414 
415 	hci_dev_unlock(hdev);
416 
417 	return rp->status;
418 }
419 
420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
421 				    struct sk_buff *skb)
422 {
423 	struct hci_ev_status *rp = data;
424 	__u8 param;
425 	void *sent;
426 
427 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
428 
429 	if (rp->status)
430 		return rp->status;
431 
432 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
433 	if (!sent)
434 		return rp->status;
435 
436 	param = *((__u8 *) sent);
437 
438 	if (param)
439 		set_bit(HCI_ENCRYPT, &hdev->flags);
440 	else
441 		clear_bit(HCI_ENCRYPT, &hdev->flags);
442 
443 	return rp->status;
444 }
445 
446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
447 				   struct sk_buff *skb)
448 {
449 	struct hci_ev_status *rp = data;
450 	__u8 param;
451 	void *sent;
452 
453 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
454 
455 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
456 	if (!sent)
457 		return rp->status;
458 
459 	param = *((__u8 *) sent);
460 
461 	hci_dev_lock(hdev);
462 
463 	if (rp->status) {
464 		hdev->discov_timeout = 0;
465 		goto done;
466 	}
467 
468 	if (param & SCAN_INQUIRY)
469 		set_bit(HCI_ISCAN, &hdev->flags);
470 	else
471 		clear_bit(HCI_ISCAN, &hdev->flags);
472 
473 	if (param & SCAN_PAGE)
474 		set_bit(HCI_PSCAN, &hdev->flags);
475 	else
476 		clear_bit(HCI_PSCAN, &hdev->flags);
477 
478 done:
479 	hci_dev_unlock(hdev);
480 
481 	return rp->status;
482 }
483 
484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
485 				  struct sk_buff *skb)
486 {
487 	struct hci_ev_status *rp = data;
488 	struct hci_cp_set_event_filter *cp;
489 	void *sent;
490 
491 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
492 
493 	if (rp->status)
494 		return rp->status;
495 
496 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
497 	if (!sent)
498 		return rp->status;
499 
500 	cp = (struct hci_cp_set_event_filter *)sent;
501 
502 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
503 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
504 	else
505 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
506 
507 	return rp->status;
508 }
509 
510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
511 				   struct sk_buff *skb)
512 {
513 	struct hci_rp_read_class_of_dev *rp = data;
514 
515 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
516 
517 	if (rp->status)
518 		return rp->status;
519 
520 	memcpy(hdev->dev_class, rp->dev_class, 3);
521 
522 	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
523 		   hdev->dev_class[1], hdev->dev_class[0]);
524 
525 	return rp->status;
526 }
527 
528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
529 				    struct sk_buff *skb)
530 {
531 	struct hci_ev_status *rp = data;
532 	void *sent;
533 
534 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
535 
536 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
537 	if (!sent)
538 		return rp->status;
539 
540 	hci_dev_lock(hdev);
541 
542 	if (!rp->status)
543 		memcpy(hdev->dev_class, sent, 3);
544 
545 	if (hci_dev_test_flag(hdev, HCI_MGMT))
546 		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
547 
548 	hci_dev_unlock(hdev);
549 
550 	return rp->status;
551 }
552 
553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
554 				    struct sk_buff *skb)
555 {
556 	struct hci_rp_read_voice_setting *rp = data;
557 	__u16 setting;
558 
559 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
560 
561 	if (rp->status)
562 		return rp->status;
563 
564 	setting = __le16_to_cpu(rp->voice_setting);
565 
566 	if (hdev->voice_setting == setting)
567 		return rp->status;
568 
569 	hdev->voice_setting = setting;
570 
571 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
572 
573 	if (hdev->notify)
574 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
575 
576 	return rp->status;
577 }
578 
579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
580 				     struct sk_buff *skb)
581 {
582 	struct hci_ev_status *rp = data;
583 	__u16 setting;
584 	void *sent;
585 
586 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
587 
588 	if (rp->status)
589 		return rp->status;
590 
591 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
592 	if (!sent)
593 		return rp->status;
594 
595 	setting = get_unaligned_le16(sent);
596 
597 	if (hdev->voice_setting == setting)
598 		return rp->status;
599 
600 	hdev->voice_setting = setting;
601 
602 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
603 
604 	if (hdev->notify)
605 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
606 
607 	return rp->status;
608 }
609 
610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
611 					struct sk_buff *skb)
612 {
613 	struct hci_rp_read_num_supported_iac *rp = data;
614 
615 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
616 
617 	if (rp->status)
618 		return rp->status;
619 
620 	hdev->num_iac = rp->num_iac;
621 
622 	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
623 
624 	return rp->status;
625 }
626 
627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
628 				struct sk_buff *skb)
629 {
630 	struct hci_ev_status *rp = data;
631 	struct hci_cp_write_ssp_mode *sent;
632 
633 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
634 
635 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
636 	if (!sent)
637 		return rp->status;
638 
639 	hci_dev_lock(hdev);
640 
641 	if (!rp->status) {
642 		if (sent->mode)
643 			hdev->features[1][0] |= LMP_HOST_SSP;
644 		else
645 			hdev->features[1][0] &= ~LMP_HOST_SSP;
646 	}
647 
648 	if (!rp->status) {
649 		if (sent->mode)
650 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
651 		else
652 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
653 	}
654 
655 	hci_dev_unlock(hdev);
656 
657 	return rp->status;
658 }
659 
660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
661 				  struct sk_buff *skb)
662 {
663 	struct hci_ev_status *rp = data;
664 	struct hci_cp_write_sc_support *sent;
665 
666 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
667 
668 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
669 	if (!sent)
670 		return rp->status;
671 
672 	hci_dev_lock(hdev);
673 
674 	if (!rp->status) {
675 		if (sent->support)
676 			hdev->features[1][0] |= LMP_HOST_SC;
677 		else
678 			hdev->features[1][0] &= ~LMP_HOST_SC;
679 	}
680 
681 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
682 		if (sent->support)
683 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
684 		else
685 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
686 	}
687 
688 	hci_dev_unlock(hdev);
689 
690 	return rp->status;
691 }
692 
693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
694 				    struct sk_buff *skb)
695 {
696 	struct hci_rp_read_local_version *rp = data;
697 
698 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
699 
700 	if (rp->status)
701 		return rp->status;
702 
703 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
704 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
705 		hdev->hci_ver = rp->hci_ver;
706 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
707 		hdev->lmp_ver = rp->lmp_ver;
708 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
709 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
710 	}
711 
712 	return rp->status;
713 }
714 
715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
716 				   struct sk_buff *skb)
717 {
718 	struct hci_rp_read_enc_key_size *rp = data;
719 	struct hci_conn *conn;
720 	u16 handle;
721 	u8 status = rp->status;
722 
723 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
724 
725 	handle = le16_to_cpu(rp->handle);
726 
727 	hci_dev_lock(hdev);
728 
729 	conn = hci_conn_hash_lookup_handle(hdev, handle);
730 	if (!conn) {
731 		status = 0xFF;
732 		goto done;
733 	}
734 
735 	/* While unexpected, the read_enc_key_size command may fail. The most
736 	 * secure approach is to then assume the key size is 0 to force a
737 	 * disconnection.
738 	 */
739 	if (status) {
740 		bt_dev_err(hdev, "failed to read key size for handle %u",
741 			   handle);
742 		conn->enc_key_size = 0;
743 	} else {
744 		conn->enc_key_size = rp->key_size;
745 		status = 0;
746 	}
747 
748 	hci_encrypt_cfm(conn, 0);
749 
750 done:
751 	hci_dev_unlock(hdev);
752 
753 	return status;
754 }
755 
756 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
757 				     struct sk_buff *skb)
758 {
759 	struct hci_rp_read_local_commands *rp = data;
760 
761 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
762 
763 	if (rp->status)
764 		return rp->status;
765 
766 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
767 	    hci_dev_test_flag(hdev, HCI_CONFIG))
768 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
769 
770 	return rp->status;
771 }
772 
773 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
774 					   struct sk_buff *skb)
775 {
776 	struct hci_rp_read_auth_payload_to *rp = data;
777 	struct hci_conn *conn;
778 
779 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
780 
781 	if (rp->status)
782 		return rp->status;
783 
784 	hci_dev_lock(hdev);
785 
786 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
787 	if (conn)
788 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
789 
790 	hci_dev_unlock(hdev);
791 
792 	return rp->status;
793 }
794 
795 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
796 					    struct sk_buff *skb)
797 {
798 	struct hci_rp_write_auth_payload_to *rp = data;
799 	struct hci_conn *conn;
800 	void *sent;
801 
802 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
803 
804 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
805 	if (!sent)
806 		return rp->status;
807 
808 	hci_dev_lock(hdev);
809 
810 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
811 	if (!conn) {
812 		rp->status = 0xff;
813 		goto unlock;
814 	}
815 
816 	if (!rp->status)
817 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
818 
819 	hci_encrypt_cfm(conn, 0);
820 
821 unlock:
822 	hci_dev_unlock(hdev);
823 
824 	return rp->status;
825 }
826 
827 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
828 				     struct sk_buff *skb)
829 {
830 	struct hci_rp_read_local_features *rp = data;
831 
832 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
833 
834 	if (rp->status)
835 		return rp->status;
836 
837 	memcpy(hdev->features, rp->features, 8);
838 
839 	/* Adjust default settings according to features
840 	 * supported by device. */
841 
842 	if (hdev->features[0][0] & LMP_3SLOT)
843 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
844 
845 	if (hdev->features[0][0] & LMP_5SLOT)
846 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
847 
848 	if (hdev->features[0][1] & LMP_HV2) {
849 		hdev->pkt_type  |= (HCI_HV2);
850 		hdev->esco_type |= (ESCO_HV2);
851 	}
852 
853 	if (hdev->features[0][1] & LMP_HV3) {
854 		hdev->pkt_type  |= (HCI_HV3);
855 		hdev->esco_type |= (ESCO_HV3);
856 	}
857 
858 	if (lmp_esco_capable(hdev))
859 		hdev->esco_type |= (ESCO_EV3);
860 
861 	if (hdev->features[0][4] & LMP_EV4)
862 		hdev->esco_type |= (ESCO_EV4);
863 
864 	if (hdev->features[0][4] & LMP_EV5)
865 		hdev->esco_type |= (ESCO_EV5);
866 
867 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
868 		hdev->esco_type |= (ESCO_2EV3);
869 
870 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
871 		hdev->esco_type |= (ESCO_3EV3);
872 
873 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
874 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
875 
876 	return rp->status;
877 }
878 
879 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
880 					 struct sk_buff *skb)
881 {
882 	struct hci_rp_read_local_ext_features *rp = data;
883 
884 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
885 
886 	if (rp->status)
887 		return rp->status;
888 
889 	if (hdev->max_page < rp->max_page)
890 		hdev->max_page = rp->max_page;
891 
892 	if (rp->page < HCI_MAX_PAGES)
893 		memcpy(hdev->features[rp->page], rp->features, 8);
894 
895 	return rp->status;
896 }
897 
898 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
899 					struct sk_buff *skb)
900 {
901 	struct hci_rp_read_flow_control_mode *rp = data;
902 
903 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
904 
905 	if (rp->status)
906 		return rp->status;
907 
908 	hdev->flow_ctl_mode = rp->mode;
909 
910 	return rp->status;
911 }
912 
913 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
914 				  struct sk_buff *skb)
915 {
916 	struct hci_rp_read_buffer_size *rp = data;
917 
918 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
919 
920 	if (rp->status)
921 		return rp->status;
922 
923 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
924 	hdev->sco_mtu  = rp->sco_mtu;
925 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
926 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
927 
928 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
929 		hdev->sco_mtu  = 64;
930 		hdev->sco_pkts = 8;
931 	}
932 
933 	hdev->acl_cnt = hdev->acl_pkts;
934 	hdev->sco_cnt = hdev->sco_pkts;
935 
936 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
937 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
938 
939 	return rp->status;
940 }
941 
942 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
943 			      struct sk_buff *skb)
944 {
945 	struct hci_rp_read_bd_addr *rp = data;
946 
947 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
948 
949 	if (rp->status)
950 		return rp->status;
951 
952 	if (test_bit(HCI_INIT, &hdev->flags))
953 		bacpy(&hdev->bdaddr, &rp->bdaddr);
954 
955 	if (hci_dev_test_flag(hdev, HCI_SETUP))
956 		bacpy(&hdev->setup_addr, &rp->bdaddr);
957 
958 	return rp->status;
959 }
960 
961 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
962 					 struct sk_buff *skb)
963 {
964 	struct hci_rp_read_local_pairing_opts *rp = data;
965 
966 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
967 
968 	if (rp->status)
969 		return rp->status;
970 
971 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
972 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
973 		hdev->pairing_opts = rp->pairing_opts;
974 		hdev->max_enc_key_size = rp->max_key_size;
975 	}
976 
977 	return rp->status;
978 }
979 
980 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
981 					 struct sk_buff *skb)
982 {
983 	struct hci_rp_read_page_scan_activity *rp = data;
984 
985 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
986 
987 	if (rp->status)
988 		return rp->status;
989 
990 	if (test_bit(HCI_INIT, &hdev->flags)) {
991 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
992 		hdev->page_scan_window = __le16_to_cpu(rp->window);
993 	}
994 
995 	return rp->status;
996 }
997 
998 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
999 					  struct sk_buff *skb)
1000 {
1001 	struct hci_ev_status *rp = data;
1002 	struct hci_cp_write_page_scan_activity *sent;
1003 
1004 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1005 
1006 	if (rp->status)
1007 		return rp->status;
1008 
1009 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1010 	if (!sent)
1011 		return rp->status;
1012 
1013 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1014 	hdev->page_scan_window = __le16_to_cpu(sent->window);
1015 
1016 	return rp->status;
1017 }
1018 
1019 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1020 				     struct sk_buff *skb)
1021 {
1022 	struct hci_rp_read_page_scan_type *rp = data;
1023 
1024 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1025 
1026 	if (rp->status)
1027 		return rp->status;
1028 
1029 	if (test_bit(HCI_INIT, &hdev->flags))
1030 		hdev->page_scan_type = rp->type;
1031 
1032 	return rp->status;
1033 }
1034 
1035 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1036 				      struct sk_buff *skb)
1037 {
1038 	struct hci_ev_status *rp = data;
1039 	u8 *type;
1040 
1041 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1042 
1043 	if (rp->status)
1044 		return rp->status;
1045 
1046 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1047 	if (type)
1048 		hdev->page_scan_type = *type;
1049 
1050 	return rp->status;
1051 }
1052 
1053 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1054 				      struct sk_buff *skb)
1055 {
1056 	struct hci_rp_read_data_block_size *rp = data;
1057 
1058 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1059 
1060 	if (rp->status)
1061 		return rp->status;
1062 
1063 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1064 	hdev->block_len = __le16_to_cpu(rp->block_len);
1065 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1066 
1067 	hdev->block_cnt = hdev->num_blocks;
1068 
1069 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1070 	       hdev->block_cnt, hdev->block_len);
1071 
1072 	return rp->status;
1073 }
1074 
1075 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1076 			    struct sk_buff *skb)
1077 {
1078 	struct hci_rp_read_clock *rp = data;
1079 	struct hci_cp_read_clock *cp;
1080 	struct hci_conn *conn;
1081 
1082 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1083 
1084 	if (rp->status)
1085 		return rp->status;
1086 
1087 	hci_dev_lock(hdev);
1088 
1089 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1090 	if (!cp)
1091 		goto unlock;
1092 
1093 	if (cp->which == 0x00) {
1094 		hdev->clock = le32_to_cpu(rp->clock);
1095 		goto unlock;
1096 	}
1097 
1098 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1099 	if (conn) {
1100 		conn->clock = le32_to_cpu(rp->clock);
1101 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1102 	}
1103 
1104 unlock:
1105 	hci_dev_unlock(hdev);
1106 	return rp->status;
1107 }
1108 
1109 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1110 				     struct sk_buff *skb)
1111 {
1112 	struct hci_rp_read_local_amp_info *rp = data;
1113 
1114 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1115 
1116 	if (rp->status)
1117 		return rp->status;
1118 
1119 	hdev->amp_status = rp->amp_status;
1120 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1121 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1122 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1123 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1124 	hdev->amp_type = rp->amp_type;
1125 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1126 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1127 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1128 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1129 
1130 	return rp->status;
1131 }
1132 
1133 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1134 				       struct sk_buff *skb)
1135 {
1136 	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1137 
1138 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1139 
1140 	if (rp->status)
1141 		return rp->status;
1142 
1143 	hdev->inq_tx_power = rp->tx_power;
1144 
1145 	return rp->status;
1146 }
1147 
1148 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1149 					     struct sk_buff *skb)
1150 {
1151 	struct hci_rp_read_def_err_data_reporting *rp = data;
1152 
1153 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1154 
1155 	if (rp->status)
1156 		return rp->status;
1157 
1158 	hdev->err_data_reporting = rp->err_data_reporting;
1159 
1160 	return rp->status;
1161 }
1162 
1163 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1164 					      struct sk_buff *skb)
1165 {
1166 	struct hci_ev_status *rp = data;
1167 	struct hci_cp_write_def_err_data_reporting *cp;
1168 
1169 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1170 
1171 	if (rp->status)
1172 		return rp->status;
1173 
1174 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1175 	if (!cp)
1176 		return rp->status;
1177 
1178 	hdev->err_data_reporting = cp->err_data_reporting;
1179 
1180 	return rp->status;
1181 }
1182 
1183 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1184 				struct sk_buff *skb)
1185 {
1186 	struct hci_rp_pin_code_reply *rp = data;
1187 	struct hci_cp_pin_code_reply *cp;
1188 	struct hci_conn *conn;
1189 
1190 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1191 
1192 	hci_dev_lock(hdev);
1193 
1194 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1195 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1196 
1197 	if (rp->status)
1198 		goto unlock;
1199 
1200 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1201 	if (!cp)
1202 		goto unlock;
1203 
1204 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1205 	if (conn)
1206 		conn->pin_length = cp->pin_len;
1207 
1208 unlock:
1209 	hci_dev_unlock(hdev);
1210 	return rp->status;
1211 }
1212 
1213 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1214 				    struct sk_buff *skb)
1215 {
1216 	struct hci_rp_pin_code_neg_reply *rp = data;
1217 
1218 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1219 
1220 	hci_dev_lock(hdev);
1221 
1222 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1223 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1224 						 rp->status);
1225 
1226 	hci_dev_unlock(hdev);
1227 
1228 	return rp->status;
1229 }
1230 
1231 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1232 				     struct sk_buff *skb)
1233 {
1234 	struct hci_rp_le_read_buffer_size *rp = data;
1235 
1236 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1237 
1238 	if (rp->status)
1239 		return rp->status;
1240 
1241 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1242 	hdev->le_pkts = rp->le_max_pkt;
1243 
1244 	hdev->le_cnt = hdev->le_pkts;
1245 
1246 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1247 
1248 	return rp->status;
1249 }
1250 
1251 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1252 					struct sk_buff *skb)
1253 {
1254 	struct hci_rp_le_read_local_features *rp = data;
1255 
1256 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1257 
1258 	if (rp->status)
1259 		return rp->status;
1260 
1261 	memcpy(hdev->le_features, rp->features, 8);
1262 
1263 	return rp->status;
1264 }
1265 
1266 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1267 				      struct sk_buff *skb)
1268 {
1269 	struct hci_rp_le_read_adv_tx_power *rp = data;
1270 
1271 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1272 
1273 	if (rp->status)
1274 		return rp->status;
1275 
1276 	hdev->adv_tx_power = rp->tx_power;
1277 
1278 	return rp->status;
1279 }
1280 
1281 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1282 				    struct sk_buff *skb)
1283 {
1284 	struct hci_rp_user_confirm_reply *rp = data;
1285 
1286 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1287 
1288 	hci_dev_lock(hdev);
1289 
1290 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1291 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1292 						 rp->status);
1293 
1294 	hci_dev_unlock(hdev);
1295 
1296 	return rp->status;
1297 }
1298 
1299 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1300 					struct sk_buff *skb)
1301 {
1302 	struct hci_rp_user_confirm_reply *rp = data;
1303 
1304 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1305 
1306 	hci_dev_lock(hdev);
1307 
1308 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1309 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1310 						     ACL_LINK, 0, rp->status);
1311 
1312 	hci_dev_unlock(hdev);
1313 
1314 	return rp->status;
1315 }
1316 
1317 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1318 				    struct sk_buff *skb)
1319 {
1320 	struct hci_rp_user_confirm_reply *rp = data;
1321 
1322 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1323 
1324 	hci_dev_lock(hdev);
1325 
1326 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1327 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1328 						 0, rp->status);
1329 
1330 	hci_dev_unlock(hdev);
1331 
1332 	return rp->status;
1333 }
1334 
1335 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1336 					struct sk_buff *skb)
1337 {
1338 	struct hci_rp_user_confirm_reply *rp = data;
1339 
1340 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1341 
1342 	hci_dev_lock(hdev);
1343 
1344 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1345 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1346 						     ACL_LINK, 0, rp->status);
1347 
1348 	hci_dev_unlock(hdev);
1349 
1350 	return rp->status;
1351 }
1352 
1353 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1354 				     struct sk_buff *skb)
1355 {
1356 	struct hci_rp_read_local_oob_data *rp = data;
1357 
1358 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1359 
1360 	return rp->status;
1361 }
1362 
1363 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1364 					 struct sk_buff *skb)
1365 {
1366 	struct hci_rp_read_local_oob_ext_data *rp = data;
1367 
1368 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1369 
1370 	return rp->status;
1371 }
1372 
1373 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1374 				    struct sk_buff *skb)
1375 {
1376 	struct hci_ev_status *rp = data;
1377 	bdaddr_t *sent;
1378 
1379 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1380 
1381 	if (rp->status)
1382 		return rp->status;
1383 
1384 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1385 	if (!sent)
1386 		return rp->status;
1387 
1388 	hci_dev_lock(hdev);
1389 
1390 	bacpy(&hdev->random_addr, sent);
1391 
1392 	if (!bacmp(&hdev->rpa, sent)) {
1393 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1394 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1395 				   secs_to_jiffies(hdev->rpa_timeout));
1396 	}
1397 
1398 	hci_dev_unlock(hdev);
1399 
1400 	return rp->status;
1401 }
1402 
1403 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1404 				    struct sk_buff *skb)
1405 {
1406 	struct hci_ev_status *rp = data;
1407 	struct hci_cp_le_set_default_phy *cp;
1408 
1409 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1410 
1411 	if (rp->status)
1412 		return rp->status;
1413 
1414 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1415 	if (!cp)
1416 		return rp->status;
1417 
1418 	hci_dev_lock(hdev);
1419 
1420 	hdev->le_tx_def_phys = cp->tx_phys;
1421 	hdev->le_rx_def_phys = cp->rx_phys;
1422 
1423 	hci_dev_unlock(hdev);
1424 
1425 	return rp->status;
1426 }
1427 
1428 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1429 					    struct sk_buff *skb)
1430 {
1431 	struct hci_ev_status *rp = data;
1432 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1433 	struct adv_info *adv;
1434 
1435 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1436 
1437 	if (rp->status)
1438 		return rp->status;
1439 
1440 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1441 	/* Update only in case the adv instance since handle 0x00 shall be using
1442 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1443 	 * non-extended adverting.
1444 	 */
1445 	if (!cp || !cp->handle)
1446 		return rp->status;
1447 
1448 	hci_dev_lock(hdev);
1449 
1450 	adv = hci_find_adv_instance(hdev, cp->handle);
1451 	if (adv) {
1452 		bacpy(&adv->random_addr, &cp->bdaddr);
1453 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1454 			adv->rpa_expired = false;
1455 			queue_delayed_work(hdev->workqueue,
1456 					   &adv->rpa_expired_cb,
1457 					   secs_to_jiffies(hdev->rpa_timeout));
1458 		}
1459 	}
1460 
1461 	hci_dev_unlock(hdev);
1462 
1463 	return rp->status;
1464 }
1465 
1466 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1467 				   struct sk_buff *skb)
1468 {
1469 	struct hci_ev_status *rp = data;
1470 	u8 *instance;
1471 	int err;
1472 
1473 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1474 
1475 	if (rp->status)
1476 		return rp->status;
1477 
1478 	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1479 	if (!instance)
1480 		return rp->status;
1481 
1482 	hci_dev_lock(hdev);
1483 
1484 	err = hci_remove_adv_instance(hdev, *instance);
1485 	if (!err)
1486 		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1487 					 *instance);
1488 
1489 	hci_dev_unlock(hdev);
1490 
1491 	return rp->status;
1492 }
1493 
1494 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1495 				   struct sk_buff *skb)
1496 {
1497 	struct hci_ev_status *rp = data;
1498 	struct adv_info *adv, *n;
1499 	int err;
1500 
1501 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1502 
1503 	if (rp->status)
1504 		return rp->status;
1505 
1506 	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1507 		return rp->status;
1508 
1509 	hci_dev_lock(hdev);
1510 
1511 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1512 		u8 instance = adv->instance;
1513 
1514 		err = hci_remove_adv_instance(hdev, instance);
1515 		if (!err)
1516 			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1517 						 hdev, instance);
1518 	}
1519 
1520 	hci_dev_unlock(hdev);
1521 
1522 	return rp->status;
1523 }
1524 
1525 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1526 					struct sk_buff *skb)
1527 {
1528 	struct hci_rp_le_read_transmit_power *rp = data;
1529 
1530 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1531 
1532 	if (rp->status)
1533 		return rp->status;
1534 
1535 	hdev->min_le_tx_power = rp->min_le_tx_power;
1536 	hdev->max_le_tx_power = rp->max_le_tx_power;
1537 
1538 	return rp->status;
1539 }
1540 
1541 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1542 				     struct sk_buff *skb)
1543 {
1544 	struct hci_ev_status *rp = data;
1545 	struct hci_cp_le_set_privacy_mode *cp;
1546 	struct hci_conn_params *params;
1547 
1548 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549 
1550 	if (rp->status)
1551 		return rp->status;
1552 
1553 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1554 	if (!cp)
1555 		return rp->status;
1556 
1557 	hci_dev_lock(hdev);
1558 
1559 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1560 	if (params)
1561 		params->privacy_mode = cp->mode;
1562 
1563 	hci_dev_unlock(hdev);
1564 
1565 	return rp->status;
1566 }
1567 
1568 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1569 				   struct sk_buff *skb)
1570 {
1571 	struct hci_ev_status *rp = data;
1572 	__u8 *sent;
1573 
1574 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1575 
1576 	if (rp->status)
1577 		return rp->status;
1578 
1579 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1580 	if (!sent)
1581 		return rp->status;
1582 
1583 	hci_dev_lock(hdev);
1584 
1585 	/* If we're doing connection initiation as peripheral. Set a
1586 	 * timeout in case something goes wrong.
1587 	 */
1588 	if (*sent) {
1589 		struct hci_conn *conn;
1590 
1591 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1592 
1593 		conn = hci_lookup_le_connect(hdev);
1594 		if (conn)
1595 			queue_delayed_work(hdev->workqueue,
1596 					   &conn->le_conn_timeout,
1597 					   conn->conn_timeout);
1598 	} else {
1599 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1600 	}
1601 
1602 	hci_dev_unlock(hdev);
1603 
1604 	return rp->status;
1605 }
1606 
1607 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1608 				       struct sk_buff *skb)
1609 {
1610 	struct hci_cp_le_set_ext_adv_enable *cp;
1611 	struct hci_cp_ext_adv_set *set;
1612 	struct adv_info *adv = NULL, *n;
1613 	struct hci_ev_status *rp = data;
1614 
1615 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1616 
1617 	if (rp->status)
1618 		return rp->status;
1619 
1620 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1621 	if (!cp)
1622 		return rp->status;
1623 
1624 	set = (void *)cp->data;
1625 
1626 	hci_dev_lock(hdev);
1627 
1628 	if (cp->num_of_sets)
1629 		adv = hci_find_adv_instance(hdev, set->handle);
1630 
1631 	if (cp->enable) {
1632 		struct hci_conn *conn;
1633 
1634 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1635 
1636 		if (adv)
1637 			adv->enabled = true;
1638 
1639 		conn = hci_lookup_le_connect(hdev);
1640 		if (conn)
1641 			queue_delayed_work(hdev->workqueue,
1642 					   &conn->le_conn_timeout,
1643 					   conn->conn_timeout);
1644 	} else {
1645 		if (cp->num_of_sets) {
1646 			if (adv)
1647 				adv->enabled = false;
1648 
1649 			/* If just one instance was disabled check if there are
1650 			 * any other instance enabled before clearing HCI_LE_ADV
1651 			 */
1652 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1653 						 list) {
1654 				if (adv->enabled)
1655 					goto unlock;
1656 			}
1657 		} else {
1658 			/* All instances shall be considered disabled */
1659 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1660 						 list)
1661 				adv->enabled = false;
1662 		}
1663 
1664 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1665 	}
1666 
1667 unlock:
1668 	hci_dev_unlock(hdev);
1669 	return rp->status;
1670 }
1671 
1672 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1673 				   struct sk_buff *skb)
1674 {
1675 	struct hci_cp_le_set_scan_param *cp;
1676 	struct hci_ev_status *rp = data;
1677 
1678 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1679 
1680 	if (rp->status)
1681 		return rp->status;
1682 
1683 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1684 	if (!cp)
1685 		return rp->status;
1686 
1687 	hci_dev_lock(hdev);
1688 
1689 	hdev->le_scan_type = cp->type;
1690 
1691 	hci_dev_unlock(hdev);
1692 
1693 	return rp->status;
1694 }
1695 
1696 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1697 				       struct sk_buff *skb)
1698 {
1699 	struct hci_cp_le_set_ext_scan_params *cp;
1700 	struct hci_ev_status *rp = data;
1701 	struct hci_cp_le_scan_phy_params *phy_param;
1702 
1703 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1704 
1705 	if (rp->status)
1706 		return rp->status;
1707 
1708 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1709 	if (!cp)
1710 		return rp->status;
1711 
1712 	phy_param = (void *)cp->data;
1713 
1714 	hci_dev_lock(hdev);
1715 
1716 	hdev->le_scan_type = phy_param->type;
1717 
1718 	hci_dev_unlock(hdev);
1719 
1720 	return rp->status;
1721 }
1722 
1723 static bool has_pending_adv_report(struct hci_dev *hdev)
1724 {
1725 	struct discovery_state *d = &hdev->discovery;
1726 
1727 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1728 }
1729 
1730 static void clear_pending_adv_report(struct hci_dev *hdev)
1731 {
1732 	struct discovery_state *d = &hdev->discovery;
1733 
1734 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1735 	d->last_adv_data_len = 0;
1736 }
1737 
1738 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1739 				     u8 bdaddr_type, s8 rssi, u32 flags,
1740 				     u8 *data, u8 len)
1741 {
1742 	struct discovery_state *d = &hdev->discovery;
1743 
1744 	if (len > HCI_MAX_AD_LENGTH)
1745 		return;
1746 
1747 	bacpy(&d->last_adv_addr, bdaddr);
1748 	d->last_adv_addr_type = bdaddr_type;
1749 	d->last_adv_rssi = rssi;
1750 	d->last_adv_flags = flags;
1751 	memcpy(d->last_adv_data, data, len);
1752 	d->last_adv_data_len = len;
1753 }
1754 
1755 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1756 {
1757 	hci_dev_lock(hdev);
1758 
1759 	switch (enable) {
1760 	case LE_SCAN_ENABLE:
1761 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1762 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1763 			clear_pending_adv_report(hdev);
1764 		if (hci_dev_test_flag(hdev, HCI_MESH))
1765 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1766 		break;
1767 
1768 	case LE_SCAN_DISABLE:
1769 		/* We do this here instead of when setting DISCOVERY_STOPPED
1770 		 * since the latter would potentially require waiting for
1771 		 * inquiry to stop too.
1772 		 */
1773 		if (has_pending_adv_report(hdev)) {
1774 			struct discovery_state *d = &hdev->discovery;
1775 
1776 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1777 					  d->last_adv_addr_type, NULL,
1778 					  d->last_adv_rssi, d->last_adv_flags,
1779 					  d->last_adv_data,
1780 					  d->last_adv_data_len, NULL, 0, 0);
1781 		}
1782 
1783 		/* Cancel this timer so that we don't try to disable scanning
1784 		 * when it's already disabled.
1785 		 */
1786 		cancel_delayed_work(&hdev->le_scan_disable);
1787 
1788 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1789 
1790 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1791 		 * interrupted scanning due to a connect request. Mark
1792 		 * therefore discovery as stopped.
1793 		 */
1794 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1795 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1796 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1797 			 hdev->discovery.state == DISCOVERY_FINDING)
1798 			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1799 
1800 		break;
1801 
1802 	default:
1803 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1804 			   enable);
1805 		break;
1806 	}
1807 
1808 	hci_dev_unlock(hdev);
1809 }
1810 
1811 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1812 				    struct sk_buff *skb)
1813 {
1814 	struct hci_cp_le_set_scan_enable *cp;
1815 	struct hci_ev_status *rp = data;
1816 
1817 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1818 
1819 	if (rp->status)
1820 		return rp->status;
1821 
1822 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1823 	if (!cp)
1824 		return rp->status;
1825 
1826 	le_set_scan_enable_complete(hdev, cp->enable);
1827 
1828 	return rp->status;
1829 }
1830 
1831 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1832 					struct sk_buff *skb)
1833 {
1834 	struct hci_cp_le_set_ext_scan_enable *cp;
1835 	struct hci_ev_status *rp = data;
1836 
1837 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1838 
1839 	if (rp->status)
1840 		return rp->status;
1841 
1842 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1843 	if (!cp)
1844 		return rp->status;
1845 
1846 	le_set_scan_enable_complete(hdev, cp->enable);
1847 
1848 	return rp->status;
1849 }
1850 
1851 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1852 				      struct sk_buff *skb)
1853 {
1854 	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1855 
1856 	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1857 		   rp->num_of_sets);
1858 
1859 	if (rp->status)
1860 		return rp->status;
1861 
1862 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1863 
1864 	return rp->status;
1865 }
1866 
1867 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1868 					  struct sk_buff *skb)
1869 {
1870 	struct hci_rp_le_read_accept_list_size *rp = data;
1871 
1872 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1873 
1874 	if (rp->status)
1875 		return rp->status;
1876 
1877 	hdev->le_accept_list_size = rp->size;
1878 
1879 	return rp->status;
1880 }
1881 
1882 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1883 				      struct sk_buff *skb)
1884 {
1885 	struct hci_ev_status *rp = data;
1886 
1887 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1888 
1889 	if (rp->status)
1890 		return rp->status;
1891 
1892 	hci_dev_lock(hdev);
1893 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1894 	hci_dev_unlock(hdev);
1895 
1896 	return rp->status;
1897 }
1898 
1899 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1900 				       struct sk_buff *skb)
1901 {
1902 	struct hci_cp_le_add_to_accept_list *sent;
1903 	struct hci_ev_status *rp = data;
1904 
1905 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1906 
1907 	if (rp->status)
1908 		return rp->status;
1909 
1910 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1911 	if (!sent)
1912 		return rp->status;
1913 
1914 	hci_dev_lock(hdev);
1915 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1916 			    sent->bdaddr_type);
1917 	hci_dev_unlock(hdev);
1918 
1919 	return rp->status;
1920 }
1921 
1922 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1923 					 struct sk_buff *skb)
1924 {
1925 	struct hci_cp_le_del_from_accept_list *sent;
1926 	struct hci_ev_status *rp = data;
1927 
1928 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1929 
1930 	if (rp->status)
1931 		return rp->status;
1932 
1933 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1934 	if (!sent)
1935 		return rp->status;
1936 
1937 	hci_dev_lock(hdev);
1938 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1939 			    sent->bdaddr_type);
1940 	hci_dev_unlock(hdev);
1941 
1942 	return rp->status;
1943 }
1944 
1945 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1946 					  struct sk_buff *skb)
1947 {
1948 	struct hci_rp_le_read_supported_states *rp = data;
1949 
1950 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1951 
1952 	if (rp->status)
1953 		return rp->status;
1954 
1955 	memcpy(hdev->le_states, rp->le_states, 8);
1956 
1957 	return rp->status;
1958 }
1959 
1960 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1961 				      struct sk_buff *skb)
1962 {
1963 	struct hci_rp_le_read_def_data_len *rp = data;
1964 
1965 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1966 
1967 	if (rp->status)
1968 		return rp->status;
1969 
1970 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1971 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1972 
1973 	return rp->status;
1974 }
1975 
1976 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1977 				       struct sk_buff *skb)
1978 {
1979 	struct hci_cp_le_write_def_data_len *sent;
1980 	struct hci_ev_status *rp = data;
1981 
1982 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1983 
1984 	if (rp->status)
1985 		return rp->status;
1986 
1987 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1988 	if (!sent)
1989 		return rp->status;
1990 
1991 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1992 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1993 
1994 	return rp->status;
1995 }
1996 
1997 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1998 				       struct sk_buff *skb)
1999 {
2000 	struct hci_cp_le_add_to_resolv_list *sent;
2001 	struct hci_ev_status *rp = data;
2002 
2003 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2004 
2005 	if (rp->status)
2006 		return rp->status;
2007 
2008 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2009 	if (!sent)
2010 		return rp->status;
2011 
2012 	hci_dev_lock(hdev);
2013 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2014 				sent->bdaddr_type, sent->peer_irk,
2015 				sent->local_irk);
2016 	hci_dev_unlock(hdev);
2017 
2018 	return rp->status;
2019 }
2020 
2021 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2022 					 struct sk_buff *skb)
2023 {
2024 	struct hci_cp_le_del_from_resolv_list *sent;
2025 	struct hci_ev_status *rp = data;
2026 
2027 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028 
2029 	if (rp->status)
2030 		return rp->status;
2031 
2032 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2033 	if (!sent)
2034 		return rp->status;
2035 
2036 	hci_dev_lock(hdev);
2037 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2038 			    sent->bdaddr_type);
2039 	hci_dev_unlock(hdev);
2040 
2041 	return rp->status;
2042 }
2043 
2044 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2045 				      struct sk_buff *skb)
2046 {
2047 	struct hci_ev_status *rp = data;
2048 
2049 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2050 
2051 	if (rp->status)
2052 		return rp->status;
2053 
2054 	hci_dev_lock(hdev);
2055 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2056 	hci_dev_unlock(hdev);
2057 
2058 	return rp->status;
2059 }
2060 
2061 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2062 					  struct sk_buff *skb)
2063 {
2064 	struct hci_rp_le_read_resolv_list_size *rp = data;
2065 
2066 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2067 
2068 	if (rp->status)
2069 		return rp->status;
2070 
2071 	hdev->le_resolv_list_size = rp->size;
2072 
2073 	return rp->status;
2074 }
2075 
2076 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2077 					       struct sk_buff *skb)
2078 {
2079 	struct hci_ev_status *rp = data;
2080 	__u8 *sent;
2081 
2082 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2083 
2084 	if (rp->status)
2085 		return rp->status;
2086 
2087 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2088 	if (!sent)
2089 		return rp->status;
2090 
2091 	hci_dev_lock(hdev);
2092 
2093 	if (*sent)
2094 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2095 	else
2096 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2097 
2098 	hci_dev_unlock(hdev);
2099 
2100 	return rp->status;
2101 }
2102 
2103 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2104 				      struct sk_buff *skb)
2105 {
2106 	struct hci_rp_le_read_max_data_len *rp = data;
2107 
2108 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2109 
2110 	if (rp->status)
2111 		return rp->status;
2112 
2113 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2114 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2115 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2116 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2117 
2118 	return rp->status;
2119 }
2120 
2121 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2122 					 struct sk_buff *skb)
2123 {
2124 	struct hci_cp_write_le_host_supported *sent;
2125 	struct hci_ev_status *rp = data;
2126 
2127 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2128 
2129 	if (rp->status)
2130 		return rp->status;
2131 
2132 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2133 	if (!sent)
2134 		return rp->status;
2135 
2136 	hci_dev_lock(hdev);
2137 
2138 	if (sent->le) {
2139 		hdev->features[1][0] |= LMP_HOST_LE;
2140 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2141 	} else {
2142 		hdev->features[1][0] &= ~LMP_HOST_LE;
2143 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2144 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2145 	}
2146 
2147 	if (sent->simul)
2148 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2149 	else
2150 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2151 
2152 	hci_dev_unlock(hdev);
2153 
2154 	return rp->status;
2155 }
2156 
2157 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2158 			       struct sk_buff *skb)
2159 {
2160 	struct hci_cp_le_set_adv_param *cp;
2161 	struct hci_ev_status *rp = data;
2162 
2163 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2164 
2165 	if (rp->status)
2166 		return rp->status;
2167 
2168 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2169 	if (!cp)
2170 		return rp->status;
2171 
2172 	hci_dev_lock(hdev);
2173 	hdev->adv_addr_type = cp->own_address_type;
2174 	hci_dev_unlock(hdev);
2175 
2176 	return rp->status;
2177 }
2178 
2179 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2180 				   struct sk_buff *skb)
2181 {
2182 	struct hci_rp_le_set_ext_adv_params *rp = data;
2183 	struct hci_cp_le_set_ext_adv_params *cp;
2184 	struct adv_info *adv_instance;
2185 
2186 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2187 
2188 	if (rp->status)
2189 		return rp->status;
2190 
2191 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2192 	if (!cp)
2193 		return rp->status;
2194 
2195 	hci_dev_lock(hdev);
2196 	hdev->adv_addr_type = cp->own_addr_type;
2197 	if (!cp->handle) {
2198 		/* Store in hdev for instance 0 */
2199 		hdev->adv_tx_power = rp->tx_power;
2200 	} else {
2201 		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2202 		if (adv_instance)
2203 			adv_instance->tx_power = rp->tx_power;
2204 	}
2205 	/* Update adv data as tx power is known now */
2206 	hci_update_adv_data(hdev, cp->handle);
2207 
2208 	hci_dev_unlock(hdev);
2209 
2210 	return rp->status;
2211 }
2212 
2213 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2214 			   struct sk_buff *skb)
2215 {
2216 	struct hci_rp_read_rssi *rp = data;
2217 	struct hci_conn *conn;
2218 
2219 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2220 
2221 	if (rp->status)
2222 		return rp->status;
2223 
2224 	hci_dev_lock(hdev);
2225 
2226 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2227 	if (conn)
2228 		conn->rssi = rp->rssi;
2229 
2230 	hci_dev_unlock(hdev);
2231 
2232 	return rp->status;
2233 }
2234 
2235 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2236 			       struct sk_buff *skb)
2237 {
2238 	struct hci_cp_read_tx_power *sent;
2239 	struct hci_rp_read_tx_power *rp = data;
2240 	struct hci_conn *conn;
2241 
2242 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2243 
2244 	if (rp->status)
2245 		return rp->status;
2246 
2247 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2248 	if (!sent)
2249 		return rp->status;
2250 
2251 	hci_dev_lock(hdev);
2252 
2253 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2254 	if (!conn)
2255 		goto unlock;
2256 
2257 	switch (sent->type) {
2258 	case 0x00:
2259 		conn->tx_power = rp->tx_power;
2260 		break;
2261 	case 0x01:
2262 		conn->max_tx_power = rp->tx_power;
2263 		break;
2264 	}
2265 
2266 unlock:
2267 	hci_dev_unlock(hdev);
2268 	return rp->status;
2269 }
2270 
2271 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2272 				      struct sk_buff *skb)
2273 {
2274 	struct hci_ev_status *rp = data;
2275 	u8 *mode;
2276 
2277 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2278 
2279 	if (rp->status)
2280 		return rp->status;
2281 
2282 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2283 	if (mode)
2284 		hdev->ssp_debug_mode = *mode;
2285 
2286 	return rp->status;
2287 }
2288 
2289 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2290 {
2291 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2292 
2293 	if (status) {
2294 		hci_conn_check_pending(hdev);
2295 		return;
2296 	}
2297 
2298 	set_bit(HCI_INQUIRY, &hdev->flags);
2299 }
2300 
2301 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2302 {
2303 	struct hci_cp_create_conn *cp;
2304 	struct hci_conn *conn;
2305 
2306 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2307 
2308 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2309 	if (!cp)
2310 		return;
2311 
2312 	hci_dev_lock(hdev);
2313 
2314 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2315 
2316 	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2317 
2318 	if (status) {
2319 		if (conn && conn->state == BT_CONNECT) {
2320 			if (status != 0x0c || conn->attempt > 2) {
2321 				conn->state = BT_CLOSED;
2322 				hci_connect_cfm(conn, status);
2323 				hci_conn_del(conn);
2324 			} else
2325 				conn->state = BT_CONNECT2;
2326 		}
2327 	} else {
2328 		if (!conn) {
2329 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2330 					    HCI_ROLE_MASTER);
2331 			if (!conn)
2332 				bt_dev_err(hdev, "no memory for new connection");
2333 		}
2334 	}
2335 
2336 	hci_dev_unlock(hdev);
2337 }
2338 
2339 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2340 {
2341 	struct hci_cp_add_sco *cp;
2342 	struct hci_conn *acl, *sco;
2343 	__u16 handle;
2344 
2345 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2346 
2347 	if (!status)
2348 		return;
2349 
2350 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2351 	if (!cp)
2352 		return;
2353 
2354 	handle = __le16_to_cpu(cp->handle);
2355 
2356 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2357 
2358 	hci_dev_lock(hdev);
2359 
2360 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2361 	if (acl) {
2362 		sco = acl->link;
2363 		if (sco) {
2364 			sco->state = BT_CLOSED;
2365 
2366 			hci_connect_cfm(sco, status);
2367 			hci_conn_del(sco);
2368 		}
2369 	}
2370 
2371 	hci_dev_unlock(hdev);
2372 }
2373 
2374 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2375 {
2376 	struct hci_cp_auth_requested *cp;
2377 	struct hci_conn *conn;
2378 
2379 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2380 
2381 	if (!status)
2382 		return;
2383 
2384 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2385 	if (!cp)
2386 		return;
2387 
2388 	hci_dev_lock(hdev);
2389 
2390 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2391 	if (conn) {
2392 		if (conn->state == BT_CONFIG) {
2393 			hci_connect_cfm(conn, status);
2394 			hci_conn_drop(conn);
2395 		}
2396 	}
2397 
2398 	hci_dev_unlock(hdev);
2399 }
2400 
2401 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2402 {
2403 	struct hci_cp_set_conn_encrypt *cp;
2404 	struct hci_conn *conn;
2405 
2406 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2407 
2408 	if (!status)
2409 		return;
2410 
2411 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2412 	if (!cp)
2413 		return;
2414 
2415 	hci_dev_lock(hdev);
2416 
2417 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2418 	if (conn) {
2419 		if (conn->state == BT_CONFIG) {
2420 			hci_connect_cfm(conn, status);
2421 			hci_conn_drop(conn);
2422 		}
2423 	}
2424 
2425 	hci_dev_unlock(hdev);
2426 }
2427 
2428 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2429 				    struct hci_conn *conn)
2430 {
2431 	if (conn->state != BT_CONFIG || !conn->out)
2432 		return 0;
2433 
2434 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2435 		return 0;
2436 
2437 	/* Only request authentication for SSP connections or non-SSP
2438 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2439 	 * is requested.
2440 	 */
2441 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2442 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2443 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2444 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2445 		return 0;
2446 
2447 	return 1;
2448 }
2449 
2450 static int hci_resolve_name(struct hci_dev *hdev,
2451 				   struct inquiry_entry *e)
2452 {
2453 	struct hci_cp_remote_name_req cp;
2454 
2455 	memset(&cp, 0, sizeof(cp));
2456 
2457 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2458 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2459 	cp.pscan_mode = e->data.pscan_mode;
2460 	cp.clock_offset = e->data.clock_offset;
2461 
2462 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2463 }
2464 
2465 static bool hci_resolve_next_name(struct hci_dev *hdev)
2466 {
2467 	struct discovery_state *discov = &hdev->discovery;
2468 	struct inquiry_entry *e;
2469 
2470 	if (list_empty(&discov->resolve))
2471 		return false;
2472 
2473 	/* We should stop if we already spent too much time resolving names. */
2474 	if (time_after(jiffies, discov->name_resolve_timeout)) {
2475 		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2476 		return false;
2477 	}
2478 
2479 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2480 	if (!e)
2481 		return false;
2482 
2483 	if (hci_resolve_name(hdev, e) == 0) {
2484 		e->name_state = NAME_PENDING;
2485 		return true;
2486 	}
2487 
2488 	return false;
2489 }
2490 
2491 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2492 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2493 {
2494 	struct discovery_state *discov = &hdev->discovery;
2495 	struct inquiry_entry *e;
2496 
2497 	/* Update the mgmt connected state if necessary. Be careful with
2498 	 * conn objects that exist but are not (yet) connected however.
2499 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2500 	 * considered connected.
2501 	 */
2502 	if (conn &&
2503 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2504 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2505 		mgmt_device_connected(hdev, conn, name, name_len);
2506 
2507 	if (discov->state == DISCOVERY_STOPPED)
2508 		return;
2509 
2510 	if (discov->state == DISCOVERY_STOPPING)
2511 		goto discov_complete;
2512 
2513 	if (discov->state != DISCOVERY_RESOLVING)
2514 		return;
2515 
2516 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2517 	/* If the device was not found in a list of found devices names of which
2518 	 * are pending. there is no need to continue resolving a next name as it
2519 	 * will be done upon receiving another Remote Name Request Complete
2520 	 * Event */
2521 	if (!e)
2522 		return;
2523 
2524 	list_del(&e->list);
2525 
2526 	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2527 	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2528 			 name, name_len);
2529 
2530 	if (hci_resolve_next_name(hdev))
2531 		return;
2532 
2533 discov_complete:
2534 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2535 }
2536 
2537 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2538 {
2539 	struct hci_cp_remote_name_req *cp;
2540 	struct hci_conn *conn;
2541 
2542 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2543 
2544 	/* If successful wait for the name req complete event before
2545 	 * checking for the need to do authentication */
2546 	if (!status)
2547 		return;
2548 
2549 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2550 	if (!cp)
2551 		return;
2552 
2553 	hci_dev_lock(hdev);
2554 
2555 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2556 
2557 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2558 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2559 
2560 	if (!conn)
2561 		goto unlock;
2562 
2563 	if (!hci_outgoing_auth_needed(hdev, conn))
2564 		goto unlock;
2565 
2566 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2567 		struct hci_cp_auth_requested auth_cp;
2568 
2569 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2570 
2571 		auth_cp.handle = __cpu_to_le16(conn->handle);
2572 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2573 			     sizeof(auth_cp), &auth_cp);
2574 	}
2575 
2576 unlock:
2577 	hci_dev_unlock(hdev);
2578 }
2579 
2580 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2581 {
2582 	struct hci_cp_read_remote_features *cp;
2583 	struct hci_conn *conn;
2584 
2585 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2586 
2587 	if (!status)
2588 		return;
2589 
2590 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2591 	if (!cp)
2592 		return;
2593 
2594 	hci_dev_lock(hdev);
2595 
2596 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2597 	if (conn) {
2598 		if (conn->state == BT_CONFIG) {
2599 			hci_connect_cfm(conn, status);
2600 			hci_conn_drop(conn);
2601 		}
2602 	}
2603 
2604 	hci_dev_unlock(hdev);
2605 }
2606 
2607 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2608 {
2609 	struct hci_cp_read_remote_ext_features *cp;
2610 	struct hci_conn *conn;
2611 
2612 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2613 
2614 	if (!status)
2615 		return;
2616 
2617 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2618 	if (!cp)
2619 		return;
2620 
2621 	hci_dev_lock(hdev);
2622 
2623 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2624 	if (conn) {
2625 		if (conn->state == BT_CONFIG) {
2626 			hci_connect_cfm(conn, status);
2627 			hci_conn_drop(conn);
2628 		}
2629 	}
2630 
2631 	hci_dev_unlock(hdev);
2632 }
2633 
2634 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2635 {
2636 	struct hci_cp_setup_sync_conn *cp;
2637 	struct hci_conn *acl, *sco;
2638 	__u16 handle;
2639 
2640 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2641 
2642 	if (!status)
2643 		return;
2644 
2645 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2646 	if (!cp)
2647 		return;
2648 
2649 	handle = __le16_to_cpu(cp->handle);
2650 
2651 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2652 
2653 	hci_dev_lock(hdev);
2654 
2655 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2656 	if (acl) {
2657 		sco = acl->link;
2658 		if (sco) {
2659 			sco->state = BT_CLOSED;
2660 
2661 			hci_connect_cfm(sco, status);
2662 			hci_conn_del(sco);
2663 		}
2664 	}
2665 
2666 	hci_dev_unlock(hdev);
2667 }
2668 
2669 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2670 {
2671 	struct hci_cp_enhanced_setup_sync_conn *cp;
2672 	struct hci_conn *acl, *sco;
2673 	__u16 handle;
2674 
2675 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2676 
2677 	if (!status)
2678 		return;
2679 
2680 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2681 	if (!cp)
2682 		return;
2683 
2684 	handle = __le16_to_cpu(cp->handle);
2685 
2686 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2687 
2688 	hci_dev_lock(hdev);
2689 
2690 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2691 	if (acl) {
2692 		sco = acl->link;
2693 		if (sco) {
2694 			sco->state = BT_CLOSED;
2695 
2696 			hci_connect_cfm(sco, status);
2697 			hci_conn_del(sco);
2698 		}
2699 	}
2700 
2701 	hci_dev_unlock(hdev);
2702 }
2703 
2704 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2705 {
2706 	struct hci_cp_sniff_mode *cp;
2707 	struct hci_conn *conn;
2708 
2709 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2710 
2711 	if (!status)
2712 		return;
2713 
2714 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2715 	if (!cp)
2716 		return;
2717 
2718 	hci_dev_lock(hdev);
2719 
2720 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2721 	if (conn) {
2722 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2723 
2724 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2725 			hci_sco_setup(conn, status);
2726 	}
2727 
2728 	hci_dev_unlock(hdev);
2729 }
2730 
2731 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2732 {
2733 	struct hci_cp_exit_sniff_mode *cp;
2734 	struct hci_conn *conn;
2735 
2736 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2737 
2738 	if (!status)
2739 		return;
2740 
2741 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2742 	if (!cp)
2743 		return;
2744 
2745 	hci_dev_lock(hdev);
2746 
2747 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2748 	if (conn) {
2749 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2750 
2751 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2752 			hci_sco_setup(conn, status);
2753 	}
2754 
2755 	hci_dev_unlock(hdev);
2756 }
2757 
2758 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2759 {
2760 	struct hci_cp_disconnect *cp;
2761 	struct hci_conn_params *params;
2762 	struct hci_conn *conn;
2763 	bool mgmt_conn;
2764 
2765 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2766 
2767 	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2768 	 * otherwise cleanup the connection immediately.
2769 	 */
2770 	if (!status && !hdev->suspended)
2771 		return;
2772 
2773 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2774 	if (!cp)
2775 		return;
2776 
2777 	hci_dev_lock(hdev);
2778 
2779 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2780 	if (!conn)
2781 		goto unlock;
2782 
2783 	if (status) {
2784 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2785 				       conn->dst_type, status);
2786 
2787 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2788 			hdev->cur_adv_instance = conn->adv_instance;
2789 			hci_enable_advertising(hdev);
2790 		}
2791 
2792 		goto done;
2793 	}
2794 
2795 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2796 
2797 	if (conn->type == ACL_LINK) {
2798 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2799 			hci_remove_link_key(hdev, &conn->dst);
2800 	}
2801 
2802 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2803 	if (params) {
2804 		switch (params->auto_connect) {
2805 		case HCI_AUTO_CONN_LINK_LOSS:
2806 			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2807 				break;
2808 			fallthrough;
2809 
2810 		case HCI_AUTO_CONN_DIRECT:
2811 		case HCI_AUTO_CONN_ALWAYS:
2812 			list_del_init(&params->action);
2813 			list_add(&params->action, &hdev->pend_le_conns);
2814 			break;
2815 
2816 		default:
2817 			break;
2818 		}
2819 	}
2820 
2821 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2822 				 cp->reason, mgmt_conn);
2823 
2824 	hci_disconn_cfm(conn, cp->reason);
2825 
2826 done:
2827 	/* If the disconnection failed for any reason, the upper layer
2828 	 * does not retry to disconnect in current implementation.
2829 	 * Hence, we need to do some basic cleanup here and re-enable
2830 	 * advertising if necessary.
2831 	 */
2832 	hci_conn_del(conn);
2833 unlock:
2834 	hci_dev_unlock(hdev);
2835 }
2836 
2837 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2838 {
2839 	/* When using controller based address resolution, then the new
2840 	 * address types 0x02 and 0x03 are used. These types need to be
2841 	 * converted back into either public address or random address type
2842 	 */
2843 	switch (type) {
2844 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2845 		if (resolved)
2846 			*resolved = true;
2847 		return ADDR_LE_DEV_PUBLIC;
2848 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2849 		if (resolved)
2850 			*resolved = true;
2851 		return ADDR_LE_DEV_RANDOM;
2852 	}
2853 
2854 	if (resolved)
2855 		*resolved = false;
2856 	return type;
2857 }
2858 
2859 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2860 			      u8 peer_addr_type, u8 own_address_type,
2861 			      u8 filter_policy)
2862 {
2863 	struct hci_conn *conn;
2864 
2865 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2866 				       peer_addr_type);
2867 	if (!conn)
2868 		return;
2869 
2870 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2871 
2872 	/* Store the initiator and responder address information which
2873 	 * is needed for SMP. These values will not change during the
2874 	 * lifetime of the connection.
2875 	 */
2876 	conn->init_addr_type = own_address_type;
2877 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2878 		bacpy(&conn->init_addr, &hdev->random_addr);
2879 	else
2880 		bacpy(&conn->init_addr, &hdev->bdaddr);
2881 
2882 	conn->resp_addr_type = peer_addr_type;
2883 	bacpy(&conn->resp_addr, peer_addr);
2884 
2885 	/* We don't want the connection attempt to stick around
2886 	 * indefinitely since LE doesn't have a page timeout concept
2887 	 * like BR/EDR. Set a timer for any connection that doesn't use
2888 	 * the accept list for connecting.
2889 	 */
2890 	if (filter_policy == HCI_LE_USE_PEER_ADDR)
2891 		queue_delayed_work(conn->hdev->workqueue,
2892 				   &conn->le_conn_timeout,
2893 				   conn->conn_timeout);
2894 }
2895 
2896 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2897 {
2898 	struct hci_cp_le_create_conn *cp;
2899 
2900 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2901 
2902 	/* All connection failure handling is taken care of by the
2903 	 * hci_conn_failed function which is triggered by the HCI
2904 	 * request completion callbacks used for connecting.
2905 	 */
2906 	if (status)
2907 		return;
2908 
2909 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2910 	if (!cp)
2911 		return;
2912 
2913 	hci_dev_lock(hdev);
2914 
2915 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2916 			  cp->own_address_type, cp->filter_policy);
2917 
2918 	hci_dev_unlock(hdev);
2919 }
2920 
2921 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2922 {
2923 	struct hci_cp_le_ext_create_conn *cp;
2924 
2925 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2926 
2927 	/* All connection failure handling is taken care of by the
2928 	 * hci_conn_failed function which is triggered by the HCI
2929 	 * request completion callbacks used for connecting.
2930 	 */
2931 	if (status)
2932 		return;
2933 
2934 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2935 	if (!cp)
2936 		return;
2937 
2938 	hci_dev_lock(hdev);
2939 
2940 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2941 			  cp->own_addr_type, cp->filter_policy);
2942 
2943 	hci_dev_unlock(hdev);
2944 }
2945 
2946 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2947 {
2948 	struct hci_cp_le_read_remote_features *cp;
2949 	struct hci_conn *conn;
2950 
2951 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2952 
2953 	if (!status)
2954 		return;
2955 
2956 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2957 	if (!cp)
2958 		return;
2959 
2960 	hci_dev_lock(hdev);
2961 
2962 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2963 	if (conn) {
2964 		if (conn->state == BT_CONFIG) {
2965 			hci_connect_cfm(conn, status);
2966 			hci_conn_drop(conn);
2967 		}
2968 	}
2969 
2970 	hci_dev_unlock(hdev);
2971 }
2972 
2973 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2974 {
2975 	struct hci_cp_le_start_enc *cp;
2976 	struct hci_conn *conn;
2977 
2978 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2979 
2980 	if (!status)
2981 		return;
2982 
2983 	hci_dev_lock(hdev);
2984 
2985 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2986 	if (!cp)
2987 		goto unlock;
2988 
2989 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2990 	if (!conn)
2991 		goto unlock;
2992 
2993 	if (conn->state != BT_CONNECTED)
2994 		goto unlock;
2995 
2996 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2997 	hci_conn_drop(conn);
2998 
2999 unlock:
3000 	hci_dev_unlock(hdev);
3001 }
3002 
3003 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3004 {
3005 	struct hci_cp_switch_role *cp;
3006 	struct hci_conn *conn;
3007 
3008 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
3009 
3010 	if (!status)
3011 		return;
3012 
3013 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3014 	if (!cp)
3015 		return;
3016 
3017 	hci_dev_lock(hdev);
3018 
3019 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3020 	if (conn)
3021 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3022 
3023 	hci_dev_unlock(hdev);
3024 }
3025 
3026 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3027 				     struct sk_buff *skb)
3028 {
3029 	struct hci_ev_status *ev = data;
3030 	struct discovery_state *discov = &hdev->discovery;
3031 	struct inquiry_entry *e;
3032 
3033 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3034 
3035 	hci_conn_check_pending(hdev);
3036 
3037 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3038 		return;
3039 
3040 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3041 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
3042 
3043 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3044 		return;
3045 
3046 	hci_dev_lock(hdev);
3047 
3048 	if (discov->state != DISCOVERY_FINDING)
3049 		goto unlock;
3050 
3051 	if (list_empty(&discov->resolve)) {
3052 		/* When BR/EDR inquiry is active and no LE scanning is in
3053 		 * progress, then change discovery state to indicate completion.
3054 		 *
3055 		 * When running LE scanning and BR/EDR inquiry simultaneously
3056 		 * and the LE scan already finished, then change the discovery
3057 		 * state to indicate completion.
3058 		 */
3059 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3060 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3061 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3062 		goto unlock;
3063 	}
3064 
3065 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3066 	if (e && hci_resolve_name(hdev, e) == 0) {
3067 		e->name_state = NAME_PENDING;
3068 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3069 		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3070 	} else {
3071 		/* When BR/EDR inquiry is active and no LE scanning is in
3072 		 * progress, then change discovery state to indicate completion.
3073 		 *
3074 		 * When running LE scanning and BR/EDR inquiry simultaneously
3075 		 * and the LE scan already finished, then change the discovery
3076 		 * state to indicate completion.
3077 		 */
3078 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3079 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3080 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3081 	}
3082 
3083 unlock:
3084 	hci_dev_unlock(hdev);
3085 }
3086 
3087 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3088 				   struct sk_buff *skb)
3089 {
3090 	struct hci_ev_inquiry_result *ev = edata;
3091 	struct inquiry_data data;
3092 	int i;
3093 
3094 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3095 			     flex_array_size(ev, info, ev->num)))
3096 		return;
3097 
3098 	bt_dev_dbg(hdev, "num %d", ev->num);
3099 
3100 	if (!ev->num)
3101 		return;
3102 
3103 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3104 		return;
3105 
3106 	hci_dev_lock(hdev);
3107 
3108 	for (i = 0; i < ev->num; i++) {
3109 		struct inquiry_info *info = &ev->info[i];
3110 		u32 flags;
3111 
3112 		bacpy(&data.bdaddr, &info->bdaddr);
3113 		data.pscan_rep_mode	= info->pscan_rep_mode;
3114 		data.pscan_period_mode	= info->pscan_period_mode;
3115 		data.pscan_mode		= info->pscan_mode;
3116 		memcpy(data.dev_class, info->dev_class, 3);
3117 		data.clock_offset	= info->clock_offset;
3118 		data.rssi		= HCI_RSSI_INVALID;
3119 		data.ssp_mode		= 0x00;
3120 
3121 		flags = hci_inquiry_cache_update(hdev, &data, false);
3122 
3123 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3124 				  info->dev_class, HCI_RSSI_INVALID,
3125 				  flags, NULL, 0, NULL, 0, 0);
3126 	}
3127 
3128 	hci_dev_unlock(hdev);
3129 }
3130 
3131 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3132 				  struct sk_buff *skb)
3133 {
3134 	struct hci_ev_conn_complete *ev = data;
3135 	struct hci_conn *conn;
3136 	u8 status = ev->status;
3137 
3138 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3139 
3140 	hci_dev_lock(hdev);
3141 
3142 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3143 	if (!conn) {
3144 		/* In case of error status and there is no connection pending
3145 		 * just unlock as there is nothing to cleanup.
3146 		 */
3147 		if (ev->status)
3148 			goto unlock;
3149 
3150 		/* Connection may not exist if auto-connected. Check the bredr
3151 		 * allowlist to see if this device is allowed to auto connect.
3152 		 * If link is an ACL type, create a connection class
3153 		 * automatically.
3154 		 *
3155 		 * Auto-connect will only occur if the event filter is
3156 		 * programmed with a given address. Right now, event filter is
3157 		 * only used during suspend.
3158 		 */
3159 		if (ev->link_type == ACL_LINK &&
3160 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3161 						      &ev->bdaddr,
3162 						      BDADDR_BREDR)) {
3163 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3164 					    HCI_ROLE_SLAVE);
3165 			if (!conn) {
3166 				bt_dev_err(hdev, "no memory for new conn");
3167 				goto unlock;
3168 			}
3169 		} else {
3170 			if (ev->link_type != SCO_LINK)
3171 				goto unlock;
3172 
3173 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3174 						       &ev->bdaddr);
3175 			if (!conn)
3176 				goto unlock;
3177 
3178 			conn->type = SCO_LINK;
3179 		}
3180 	}
3181 
3182 	/* The HCI_Connection_Complete event is only sent once per connection.
3183 	 * Processing it more than once per connection can corrupt kernel memory.
3184 	 *
3185 	 * As the connection handle is set here for the first time, it indicates
3186 	 * whether the connection is already set up.
3187 	 */
3188 	if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3189 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3190 		goto unlock;
3191 	}
3192 
3193 	if (!status) {
3194 		conn->handle = __le16_to_cpu(ev->handle);
3195 		if (conn->handle > HCI_CONN_HANDLE_MAX) {
3196 			bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3197 				   conn->handle, HCI_CONN_HANDLE_MAX);
3198 			status = HCI_ERROR_INVALID_PARAMETERS;
3199 			goto done;
3200 		}
3201 
3202 		if (conn->type == ACL_LINK) {
3203 			conn->state = BT_CONFIG;
3204 			hci_conn_hold(conn);
3205 
3206 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3207 			    !hci_find_link_key(hdev, &ev->bdaddr))
3208 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3209 			else
3210 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3211 		} else
3212 			conn->state = BT_CONNECTED;
3213 
3214 		hci_debugfs_create_conn(conn);
3215 		hci_conn_add_sysfs(conn);
3216 
3217 		if (test_bit(HCI_AUTH, &hdev->flags))
3218 			set_bit(HCI_CONN_AUTH, &conn->flags);
3219 
3220 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3221 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3222 
3223 		/* Get remote features */
3224 		if (conn->type == ACL_LINK) {
3225 			struct hci_cp_read_remote_features cp;
3226 			cp.handle = ev->handle;
3227 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3228 				     sizeof(cp), &cp);
3229 
3230 			hci_update_scan(hdev);
3231 		}
3232 
3233 		/* Set packet type for incoming connection */
3234 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3235 			struct hci_cp_change_conn_ptype cp;
3236 			cp.handle = ev->handle;
3237 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3238 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3239 				     &cp);
3240 		}
3241 	}
3242 
3243 	if (conn->type == ACL_LINK)
3244 		hci_sco_setup(conn, ev->status);
3245 
3246 done:
3247 	if (status) {
3248 		hci_conn_failed(conn, status);
3249 	} else if (ev->link_type == SCO_LINK) {
3250 		switch (conn->setting & SCO_AIRMODE_MASK) {
3251 		case SCO_AIRMODE_CVSD:
3252 			if (hdev->notify)
3253 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3254 			break;
3255 		}
3256 
3257 		hci_connect_cfm(conn, status);
3258 	}
3259 
3260 unlock:
3261 	hci_dev_unlock(hdev);
3262 
3263 	hci_conn_check_pending(hdev);
3264 }
3265 
3266 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3267 {
3268 	struct hci_cp_reject_conn_req cp;
3269 
3270 	bacpy(&cp.bdaddr, bdaddr);
3271 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3272 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3273 }
3274 
3275 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3276 				 struct sk_buff *skb)
3277 {
3278 	struct hci_ev_conn_request *ev = data;
3279 	int mask = hdev->link_mode;
3280 	struct inquiry_entry *ie;
3281 	struct hci_conn *conn;
3282 	__u8 flags = 0;
3283 
3284 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3285 
3286 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3287 				      &flags);
3288 
3289 	if (!(mask & HCI_LM_ACCEPT)) {
3290 		hci_reject_conn(hdev, &ev->bdaddr);
3291 		return;
3292 	}
3293 
3294 	hci_dev_lock(hdev);
3295 
3296 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3297 				   BDADDR_BREDR)) {
3298 		hci_reject_conn(hdev, &ev->bdaddr);
3299 		goto unlock;
3300 	}
3301 
3302 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3303 	 * connection. These features are only touched through mgmt so
3304 	 * only do the checks if HCI_MGMT is set.
3305 	 */
3306 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3307 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3308 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3309 					       BDADDR_BREDR)) {
3310 		hci_reject_conn(hdev, &ev->bdaddr);
3311 		goto unlock;
3312 	}
3313 
3314 	/* Connection accepted */
3315 
3316 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3317 	if (ie)
3318 		memcpy(ie->data.dev_class, ev->dev_class, 3);
3319 
3320 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3321 			&ev->bdaddr);
3322 	if (!conn) {
3323 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3324 				    HCI_ROLE_SLAVE);
3325 		if (!conn) {
3326 			bt_dev_err(hdev, "no memory for new connection");
3327 			goto unlock;
3328 		}
3329 	}
3330 
3331 	memcpy(conn->dev_class, ev->dev_class, 3);
3332 
3333 	hci_dev_unlock(hdev);
3334 
3335 	if (ev->link_type == ACL_LINK ||
3336 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3337 		struct hci_cp_accept_conn_req cp;
3338 		conn->state = BT_CONNECT;
3339 
3340 		bacpy(&cp.bdaddr, &ev->bdaddr);
3341 
3342 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3343 			cp.role = 0x00; /* Become central */
3344 		else
3345 			cp.role = 0x01; /* Remain peripheral */
3346 
3347 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3348 	} else if (!(flags & HCI_PROTO_DEFER)) {
3349 		struct hci_cp_accept_sync_conn_req cp;
3350 		conn->state = BT_CONNECT;
3351 
3352 		bacpy(&cp.bdaddr, &ev->bdaddr);
3353 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3354 
3355 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3356 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3357 		cp.max_latency    = cpu_to_le16(0xffff);
3358 		cp.content_format = cpu_to_le16(hdev->voice_setting);
3359 		cp.retrans_effort = 0xff;
3360 
3361 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3362 			     &cp);
3363 	} else {
3364 		conn->state = BT_CONNECT2;
3365 		hci_connect_cfm(conn, 0);
3366 	}
3367 
3368 	return;
3369 unlock:
3370 	hci_dev_unlock(hdev);
3371 }
3372 
3373 static u8 hci_to_mgmt_reason(u8 err)
3374 {
3375 	switch (err) {
3376 	case HCI_ERROR_CONNECTION_TIMEOUT:
3377 		return MGMT_DEV_DISCONN_TIMEOUT;
3378 	case HCI_ERROR_REMOTE_USER_TERM:
3379 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3380 	case HCI_ERROR_REMOTE_POWER_OFF:
3381 		return MGMT_DEV_DISCONN_REMOTE;
3382 	case HCI_ERROR_LOCAL_HOST_TERM:
3383 		return MGMT_DEV_DISCONN_LOCAL_HOST;
3384 	default:
3385 		return MGMT_DEV_DISCONN_UNKNOWN;
3386 	}
3387 }
3388 
3389 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3390 				     struct sk_buff *skb)
3391 {
3392 	struct hci_ev_disconn_complete *ev = data;
3393 	u8 reason;
3394 	struct hci_conn_params *params;
3395 	struct hci_conn *conn;
3396 	bool mgmt_connected;
3397 
3398 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3399 
3400 	hci_dev_lock(hdev);
3401 
3402 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3403 	if (!conn)
3404 		goto unlock;
3405 
3406 	if (ev->status) {
3407 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3408 				       conn->dst_type, ev->status);
3409 		goto unlock;
3410 	}
3411 
3412 	conn->state = BT_CLOSED;
3413 
3414 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3415 
3416 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3417 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3418 	else
3419 		reason = hci_to_mgmt_reason(ev->reason);
3420 
3421 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3422 				reason, mgmt_connected);
3423 
3424 	if (conn->type == ACL_LINK) {
3425 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3426 			hci_remove_link_key(hdev, &conn->dst);
3427 
3428 		hci_update_scan(hdev);
3429 	}
3430 
3431 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3432 	if (params) {
3433 		switch (params->auto_connect) {
3434 		case HCI_AUTO_CONN_LINK_LOSS:
3435 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3436 				break;
3437 			fallthrough;
3438 
3439 		case HCI_AUTO_CONN_DIRECT:
3440 		case HCI_AUTO_CONN_ALWAYS:
3441 			list_del_init(&params->action);
3442 			list_add(&params->action, &hdev->pend_le_conns);
3443 			hci_update_passive_scan(hdev);
3444 			break;
3445 
3446 		default:
3447 			break;
3448 		}
3449 	}
3450 
3451 	hci_disconn_cfm(conn, ev->reason);
3452 
3453 	/* Re-enable advertising if necessary, since it might
3454 	 * have been disabled by the connection. From the
3455 	 * HCI_LE_Set_Advertise_Enable command description in
3456 	 * the core specification (v4.0):
3457 	 * "The Controller shall continue advertising until the Host
3458 	 * issues an LE_Set_Advertise_Enable command with
3459 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3460 	 * or until a connection is created or until the Advertising
3461 	 * is timed out due to Directed Advertising."
3462 	 */
3463 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3464 		hdev->cur_adv_instance = conn->adv_instance;
3465 		hci_enable_advertising(hdev);
3466 	}
3467 
3468 	hci_conn_del(conn);
3469 
3470 unlock:
3471 	hci_dev_unlock(hdev);
3472 }
3473 
3474 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3475 				  struct sk_buff *skb)
3476 {
3477 	struct hci_ev_auth_complete *ev = data;
3478 	struct hci_conn *conn;
3479 
3480 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3481 
3482 	hci_dev_lock(hdev);
3483 
3484 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3485 	if (!conn)
3486 		goto unlock;
3487 
3488 	if (!ev->status) {
3489 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3490 
3491 		if (!hci_conn_ssp_enabled(conn) &&
3492 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3493 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3494 		} else {
3495 			set_bit(HCI_CONN_AUTH, &conn->flags);
3496 			conn->sec_level = conn->pending_sec_level;
3497 		}
3498 	} else {
3499 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3500 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3501 
3502 		mgmt_auth_failed(conn, ev->status);
3503 	}
3504 
3505 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3506 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3507 
3508 	if (conn->state == BT_CONFIG) {
3509 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3510 			struct hci_cp_set_conn_encrypt cp;
3511 			cp.handle  = ev->handle;
3512 			cp.encrypt = 0x01;
3513 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3514 				     &cp);
3515 		} else {
3516 			conn->state = BT_CONNECTED;
3517 			hci_connect_cfm(conn, ev->status);
3518 			hci_conn_drop(conn);
3519 		}
3520 	} else {
3521 		hci_auth_cfm(conn, ev->status);
3522 
3523 		hci_conn_hold(conn);
3524 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3525 		hci_conn_drop(conn);
3526 	}
3527 
3528 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3529 		if (!ev->status) {
3530 			struct hci_cp_set_conn_encrypt cp;
3531 			cp.handle  = ev->handle;
3532 			cp.encrypt = 0x01;
3533 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3534 				     &cp);
3535 		} else {
3536 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3537 			hci_encrypt_cfm(conn, ev->status);
3538 		}
3539 	}
3540 
3541 unlock:
3542 	hci_dev_unlock(hdev);
3543 }
3544 
3545 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3546 				struct sk_buff *skb)
3547 {
3548 	struct hci_ev_remote_name *ev = data;
3549 	struct hci_conn *conn;
3550 
3551 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3552 
3553 	hci_conn_check_pending(hdev);
3554 
3555 	hci_dev_lock(hdev);
3556 
3557 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3558 
3559 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3560 		goto check_auth;
3561 
3562 	if (ev->status == 0)
3563 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3564 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3565 	else
3566 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3567 
3568 check_auth:
3569 	if (!conn)
3570 		goto unlock;
3571 
3572 	if (!hci_outgoing_auth_needed(hdev, conn))
3573 		goto unlock;
3574 
3575 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3576 		struct hci_cp_auth_requested cp;
3577 
3578 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3579 
3580 		cp.handle = __cpu_to_le16(conn->handle);
3581 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3582 	}
3583 
3584 unlock:
3585 	hci_dev_unlock(hdev);
3586 }
3587 
3588 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3589 				   struct sk_buff *skb)
3590 {
3591 	struct hci_ev_encrypt_change *ev = data;
3592 	struct hci_conn *conn;
3593 
3594 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3595 
3596 	hci_dev_lock(hdev);
3597 
3598 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3599 	if (!conn)
3600 		goto unlock;
3601 
3602 	if (!ev->status) {
3603 		if (ev->encrypt) {
3604 			/* Encryption implies authentication */
3605 			set_bit(HCI_CONN_AUTH, &conn->flags);
3606 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3607 			conn->sec_level = conn->pending_sec_level;
3608 
3609 			/* P-256 authentication key implies FIPS */
3610 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3611 				set_bit(HCI_CONN_FIPS, &conn->flags);
3612 
3613 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3614 			    conn->type == LE_LINK)
3615 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3616 		} else {
3617 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3618 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3619 		}
3620 	}
3621 
3622 	/* We should disregard the current RPA and generate a new one
3623 	 * whenever the encryption procedure fails.
3624 	 */
3625 	if (ev->status && conn->type == LE_LINK) {
3626 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3627 		hci_adv_instances_set_rpa_expired(hdev, true);
3628 	}
3629 
3630 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3631 
3632 	/* Check link security requirements are met */
3633 	if (!hci_conn_check_link_mode(conn))
3634 		ev->status = HCI_ERROR_AUTH_FAILURE;
3635 
3636 	if (ev->status && conn->state == BT_CONNECTED) {
3637 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3638 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3639 
3640 		/* Notify upper layers so they can cleanup before
3641 		 * disconnecting.
3642 		 */
3643 		hci_encrypt_cfm(conn, ev->status);
3644 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3645 		hci_conn_drop(conn);
3646 		goto unlock;
3647 	}
3648 
3649 	/* Try reading the encryption key size for encrypted ACL links */
3650 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3651 		struct hci_cp_read_enc_key_size cp;
3652 
3653 		/* Only send HCI_Read_Encryption_Key_Size if the
3654 		 * controller really supports it. If it doesn't, assume
3655 		 * the default size (16).
3656 		 */
3657 		if (!(hdev->commands[20] & 0x10)) {
3658 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3659 			goto notify;
3660 		}
3661 
3662 		cp.handle = cpu_to_le16(conn->handle);
3663 		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3664 				 sizeof(cp), &cp)) {
3665 			bt_dev_err(hdev, "sending read key size failed");
3666 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3667 			goto notify;
3668 		}
3669 
3670 		goto unlock;
3671 	}
3672 
3673 	/* Set the default Authenticated Payload Timeout after
3674 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3675 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3676 	 * sent when the link is active and Encryption is enabled, the conn
3677 	 * type can be either LE or ACL and controller must support LMP Ping.
3678 	 * Ensure for AES-CCM encryption as well.
3679 	 */
3680 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3681 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3682 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3683 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3684 		struct hci_cp_write_auth_payload_to cp;
3685 
3686 		cp.handle = cpu_to_le16(conn->handle);
3687 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3688 		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3689 				 sizeof(cp), &cp)) {
3690 			bt_dev_err(hdev, "write auth payload timeout failed");
3691 			goto notify;
3692 		}
3693 
3694 		goto unlock;
3695 	}
3696 
3697 notify:
3698 	hci_encrypt_cfm(conn, ev->status);
3699 
3700 unlock:
3701 	hci_dev_unlock(hdev);
3702 }
3703 
3704 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3705 					     struct sk_buff *skb)
3706 {
3707 	struct hci_ev_change_link_key_complete *ev = data;
3708 	struct hci_conn *conn;
3709 
3710 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3711 
3712 	hci_dev_lock(hdev);
3713 
3714 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3715 	if (conn) {
3716 		if (!ev->status)
3717 			set_bit(HCI_CONN_SECURE, &conn->flags);
3718 
3719 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3720 
3721 		hci_key_change_cfm(conn, ev->status);
3722 	}
3723 
3724 	hci_dev_unlock(hdev);
3725 }
3726 
3727 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3728 				    struct sk_buff *skb)
3729 {
3730 	struct hci_ev_remote_features *ev = data;
3731 	struct hci_conn *conn;
3732 
3733 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3734 
3735 	hci_dev_lock(hdev);
3736 
3737 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3738 	if (!conn)
3739 		goto unlock;
3740 
3741 	if (!ev->status)
3742 		memcpy(conn->features[0], ev->features, 8);
3743 
3744 	if (conn->state != BT_CONFIG)
3745 		goto unlock;
3746 
3747 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3748 	    lmp_ext_feat_capable(conn)) {
3749 		struct hci_cp_read_remote_ext_features cp;
3750 		cp.handle = ev->handle;
3751 		cp.page = 0x01;
3752 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3753 			     sizeof(cp), &cp);
3754 		goto unlock;
3755 	}
3756 
3757 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3758 		struct hci_cp_remote_name_req cp;
3759 		memset(&cp, 0, sizeof(cp));
3760 		bacpy(&cp.bdaddr, &conn->dst);
3761 		cp.pscan_rep_mode = 0x02;
3762 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3763 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3764 		mgmt_device_connected(hdev, conn, NULL, 0);
3765 
3766 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3767 		conn->state = BT_CONNECTED;
3768 		hci_connect_cfm(conn, ev->status);
3769 		hci_conn_drop(conn);
3770 	}
3771 
3772 unlock:
3773 	hci_dev_unlock(hdev);
3774 }
3775 
3776 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3777 {
3778 	cancel_delayed_work(&hdev->cmd_timer);
3779 
3780 	rcu_read_lock();
3781 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3782 		if (ncmd) {
3783 			cancel_delayed_work(&hdev->ncmd_timer);
3784 			atomic_set(&hdev->cmd_cnt, 1);
3785 		} else {
3786 			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3787 				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3788 						   HCI_NCMD_TIMEOUT);
3789 		}
3790 	}
3791 	rcu_read_unlock();
3792 }
3793 
3794 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3795 					struct sk_buff *skb)
3796 {
3797 	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3798 
3799 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3800 
3801 	if (rp->status)
3802 		return rp->status;
3803 
3804 	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3805 	hdev->le_pkts  = rp->acl_max_pkt;
3806 	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3807 	hdev->iso_pkts = rp->iso_max_pkt;
3808 
3809 	hdev->le_cnt  = hdev->le_pkts;
3810 	hdev->iso_cnt = hdev->iso_pkts;
3811 
3812 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3813 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3814 
3815 	return rp->status;
3816 }
3817 
3818 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3819 				   struct sk_buff *skb)
3820 {
3821 	struct hci_rp_le_set_cig_params *rp = data;
3822 	struct hci_conn *conn;
3823 	int i = 0;
3824 
3825 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3826 
3827 	hci_dev_lock(hdev);
3828 
3829 	if (rp->status) {
3830 		while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
3831 			conn->state = BT_CLOSED;
3832 			hci_connect_cfm(conn, rp->status);
3833 			hci_conn_del(conn);
3834 		}
3835 		goto unlock;
3836 	}
3837 
3838 	rcu_read_lock();
3839 
3840 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
3841 		if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
3842 		    conn->state == BT_CONNECTED)
3843 			continue;
3844 
3845 		conn->handle = __le16_to_cpu(rp->handle[i++]);
3846 
3847 		bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
3848 			   conn->handle, conn->link);
3849 
3850 		/* Create CIS if LE is already connected */
3851 		if (conn->link && conn->link->state == BT_CONNECTED) {
3852 			rcu_read_unlock();
3853 			hci_le_create_cis(conn->link);
3854 			rcu_read_lock();
3855 		}
3856 
3857 		if (i == rp->num_handles)
3858 			break;
3859 	}
3860 
3861 	rcu_read_unlock();
3862 
3863 unlock:
3864 	hci_dev_unlock(hdev);
3865 
3866 	return rp->status;
3867 }
3868 
3869 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3870 				   struct sk_buff *skb)
3871 {
3872 	struct hci_rp_le_setup_iso_path *rp = data;
3873 	struct hci_cp_le_setup_iso_path *cp;
3874 	struct hci_conn *conn;
3875 
3876 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3877 
3878 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3879 	if (!cp)
3880 		return rp->status;
3881 
3882 	hci_dev_lock(hdev);
3883 
3884 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3885 	if (!conn)
3886 		goto unlock;
3887 
3888 	if (rp->status) {
3889 		hci_connect_cfm(conn, rp->status);
3890 		hci_conn_del(conn);
3891 		goto unlock;
3892 	}
3893 
3894 	switch (cp->direction) {
3895 	/* Input (Host to Controller) */
3896 	case 0x00:
3897 		/* Only confirm connection if output only */
3898 		if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
3899 			hci_connect_cfm(conn, rp->status);
3900 		break;
3901 	/* Output (Controller to Host) */
3902 	case 0x01:
3903 		/* Confirm connection since conn->iso_qos is always configured
3904 		 * last.
3905 		 */
3906 		hci_connect_cfm(conn, rp->status);
3907 		break;
3908 	}
3909 
3910 unlock:
3911 	hci_dev_unlock(hdev);
3912 	return rp->status;
3913 }
3914 
3915 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3916 {
3917 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3918 }
3919 
3920 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3921 				   struct sk_buff *skb)
3922 {
3923 	struct hci_ev_status *rp = data;
3924 	struct hci_cp_le_set_per_adv_params *cp;
3925 
3926 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3927 
3928 	if (rp->status)
3929 		return rp->status;
3930 
3931 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3932 	if (!cp)
3933 		return rp->status;
3934 
3935 	/* TODO: set the conn state */
3936 	return rp->status;
3937 }
3938 
3939 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3940 				       struct sk_buff *skb)
3941 {
3942 	struct hci_ev_status *rp = data;
3943 	__u8 *sent;
3944 
3945 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3946 
3947 	if (rp->status)
3948 		return rp->status;
3949 
3950 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3951 	if (!sent)
3952 		return rp->status;
3953 
3954 	hci_dev_lock(hdev);
3955 
3956 	if (*sent)
3957 		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3958 	else
3959 		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3960 
3961 	hci_dev_unlock(hdev);
3962 
3963 	return rp->status;
3964 }
3965 
3966 #define HCI_CC_VL(_op, _func, _min, _max) \
3967 { \
3968 	.op = _op, \
3969 	.func = _func, \
3970 	.min_len = _min, \
3971 	.max_len = _max, \
3972 }
3973 
3974 #define HCI_CC(_op, _func, _len) \
3975 	HCI_CC_VL(_op, _func, _len, _len)
3976 
3977 #define HCI_CC_STATUS(_op, _func) \
3978 	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
3979 
3980 static const struct hci_cc {
3981 	u16  op;
3982 	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
3983 	u16  min_len;
3984 	u16  max_len;
3985 } hci_cc_table[] = {
3986 	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
3987 	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
3988 	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
3989 	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
3990 		      hci_cc_remote_name_req_cancel),
3991 	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
3992 	       sizeof(struct hci_rp_role_discovery)),
3993 	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
3994 	       sizeof(struct hci_rp_read_link_policy)),
3995 	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
3996 	       sizeof(struct hci_rp_write_link_policy)),
3997 	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
3998 	       sizeof(struct hci_rp_read_def_link_policy)),
3999 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4000 		      hci_cc_write_def_link_policy),
4001 	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4002 	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4003 	       sizeof(struct hci_rp_read_stored_link_key)),
4004 	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4005 	       sizeof(struct hci_rp_delete_stored_link_key)),
4006 	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4007 	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4008 	       sizeof(struct hci_rp_read_local_name)),
4009 	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4010 	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4011 	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4012 	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4013 	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4014 	       sizeof(struct hci_rp_read_class_of_dev)),
4015 	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4016 	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4017 	       sizeof(struct hci_rp_read_voice_setting)),
4018 	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4019 	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4020 	       sizeof(struct hci_rp_read_num_supported_iac)),
4021 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4022 	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4023 	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4024 	       sizeof(struct hci_rp_read_auth_payload_to)),
4025 	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4026 	       sizeof(struct hci_rp_write_auth_payload_to)),
4027 	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4028 	       sizeof(struct hci_rp_read_local_version)),
4029 	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4030 	       sizeof(struct hci_rp_read_local_commands)),
4031 	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4032 	       sizeof(struct hci_rp_read_local_features)),
4033 	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4034 	       sizeof(struct hci_rp_read_local_ext_features)),
4035 	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4036 	       sizeof(struct hci_rp_read_buffer_size)),
4037 	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4038 	       sizeof(struct hci_rp_read_bd_addr)),
4039 	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4040 	       sizeof(struct hci_rp_read_local_pairing_opts)),
4041 	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4042 	       sizeof(struct hci_rp_read_page_scan_activity)),
4043 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4044 		      hci_cc_write_page_scan_activity),
4045 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4046 	       sizeof(struct hci_rp_read_page_scan_type)),
4047 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4048 	HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4049 	       sizeof(struct hci_rp_read_data_block_size)),
4050 	HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4051 	       sizeof(struct hci_rp_read_flow_control_mode)),
4052 	HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4053 	       sizeof(struct hci_rp_read_local_amp_info)),
4054 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4055 	       sizeof(struct hci_rp_read_clock)),
4056 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4057 	       sizeof(struct hci_rp_read_enc_key_size)),
4058 	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4059 	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4060 	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4061 	       hci_cc_read_def_err_data_reporting,
4062 	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4063 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4064 		      hci_cc_write_def_err_data_reporting),
4065 	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4066 	       sizeof(struct hci_rp_pin_code_reply)),
4067 	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4068 	       sizeof(struct hci_rp_pin_code_neg_reply)),
4069 	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4070 	       sizeof(struct hci_rp_read_local_oob_data)),
4071 	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4072 	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4073 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4074 	       sizeof(struct hci_rp_le_read_buffer_size)),
4075 	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4076 	       sizeof(struct hci_rp_le_read_local_features)),
4077 	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4078 	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4079 	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4080 	       sizeof(struct hci_rp_user_confirm_reply)),
4081 	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4082 	       sizeof(struct hci_rp_user_confirm_reply)),
4083 	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4084 	       sizeof(struct hci_rp_user_confirm_reply)),
4085 	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4086 	       sizeof(struct hci_rp_user_confirm_reply)),
4087 	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4088 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4089 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4090 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4091 	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4092 	       hci_cc_le_read_accept_list_size,
4093 	       sizeof(struct hci_rp_le_read_accept_list_size)),
4094 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4095 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4096 		      hci_cc_le_add_to_accept_list),
4097 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4098 		      hci_cc_le_del_from_accept_list),
4099 	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4100 	       sizeof(struct hci_rp_le_read_supported_states)),
4101 	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4102 	       sizeof(struct hci_rp_le_read_def_data_len)),
4103 	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4104 		      hci_cc_le_write_def_data_len),
4105 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4106 		      hci_cc_le_add_to_resolv_list),
4107 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4108 		      hci_cc_le_del_from_resolv_list),
4109 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4110 		      hci_cc_le_clear_resolv_list),
4111 	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4112 	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4113 	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4114 		      hci_cc_le_set_addr_resolution_enable),
4115 	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4116 	       sizeof(struct hci_rp_le_read_max_data_len)),
4117 	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4118 		      hci_cc_write_le_host_supported),
4119 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4120 	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4121 	       sizeof(struct hci_rp_read_rssi)),
4122 	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4123 	       sizeof(struct hci_rp_read_tx_power)),
4124 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4125 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4126 		      hci_cc_le_set_ext_scan_param),
4127 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4128 		      hci_cc_le_set_ext_scan_enable),
4129 	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4130 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4131 	       hci_cc_le_read_num_adv_sets,
4132 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4133 	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4134 	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4135 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4136 		      hci_cc_le_set_ext_adv_enable),
4137 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4138 		      hci_cc_le_set_adv_set_random_addr),
4139 	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4140 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4141 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4142 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4143 		      hci_cc_le_set_per_adv_enable),
4144 	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4145 	       sizeof(struct hci_rp_le_read_transmit_power)),
4146 	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4147 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4148 	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4149 	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4150 		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4151 	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4152 	       sizeof(struct hci_rp_le_setup_iso_path)),
4153 };
4154 
4155 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4156 		      struct sk_buff *skb)
4157 {
4158 	void *data;
4159 
4160 	if (skb->len < cc->min_len) {
4161 		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4162 			   cc->op, skb->len, cc->min_len);
4163 		return HCI_ERROR_UNSPECIFIED;
4164 	}
4165 
4166 	/* Just warn if the length is over max_len size it still be possible to
4167 	 * partially parse the cc so leave to callback to decide if that is
4168 	 * acceptable.
4169 	 */
4170 	if (skb->len > cc->max_len)
4171 		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4172 			    cc->op, skb->len, cc->max_len);
4173 
4174 	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4175 	if (!data)
4176 		return HCI_ERROR_UNSPECIFIED;
4177 
4178 	return cc->func(hdev, data, skb);
4179 }
4180 
4181 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4182 				 struct sk_buff *skb, u16 *opcode, u8 *status,
4183 				 hci_req_complete_t *req_complete,
4184 				 hci_req_complete_skb_t *req_complete_skb)
4185 {
4186 	struct hci_ev_cmd_complete *ev = data;
4187 	int i;
4188 
4189 	*opcode = __le16_to_cpu(ev->opcode);
4190 
4191 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4192 
4193 	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4194 		if (hci_cc_table[i].op == *opcode) {
4195 			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4196 			break;
4197 		}
4198 	}
4199 
4200 	if (i == ARRAY_SIZE(hci_cc_table)) {
4201 		/* Unknown opcode, assume byte 0 contains the status, so
4202 		 * that e.g. __hci_cmd_sync() properly returns errors
4203 		 * for vendor specific commands send by HCI drivers.
4204 		 * If a vendor doesn't actually follow this convention we may
4205 		 * need to introduce a vendor CC table in order to properly set
4206 		 * the status.
4207 		 */
4208 		*status = skb->data[0];
4209 	}
4210 
4211 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4212 
4213 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4214 			     req_complete_skb);
4215 
4216 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4217 		bt_dev_err(hdev,
4218 			   "unexpected event for opcode 0x%4.4x", *opcode);
4219 		return;
4220 	}
4221 
4222 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4223 		queue_work(hdev->workqueue, &hdev->cmd_work);
4224 }
4225 
4226 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4227 {
4228 	struct hci_cp_le_create_cis *cp;
4229 	int i;
4230 
4231 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4232 
4233 	if (!status)
4234 		return;
4235 
4236 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4237 	if (!cp)
4238 		return;
4239 
4240 	hci_dev_lock(hdev);
4241 
4242 	/* Remove connection if command failed */
4243 	for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4244 		struct hci_conn *conn;
4245 		u16 handle;
4246 
4247 		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4248 
4249 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4250 		if (conn) {
4251 			conn->state = BT_CLOSED;
4252 			hci_connect_cfm(conn, status);
4253 			hci_conn_del(conn);
4254 		}
4255 	}
4256 
4257 	hci_dev_unlock(hdev);
4258 }
4259 
4260 #define HCI_CS(_op, _func) \
4261 { \
4262 	.op = _op, \
4263 	.func = _func, \
4264 }
4265 
4266 static const struct hci_cs {
4267 	u16  op;
4268 	void (*func)(struct hci_dev *hdev, __u8 status);
4269 } hci_cs_table[] = {
4270 	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4271 	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4272 	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4273 	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4274 	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4275 	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4276 	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4277 	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4278 	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4279 	       hci_cs_read_remote_ext_features),
4280 	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4281 	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4282 	       hci_cs_enhanced_setup_sync_conn),
4283 	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4284 	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4285 	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4286 	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4287 	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4288 	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4289 	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4290 	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4291 	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4292 };
4293 
4294 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4295 			       struct sk_buff *skb, u16 *opcode, u8 *status,
4296 			       hci_req_complete_t *req_complete,
4297 			       hci_req_complete_skb_t *req_complete_skb)
4298 {
4299 	struct hci_ev_cmd_status *ev = data;
4300 	int i;
4301 
4302 	*opcode = __le16_to_cpu(ev->opcode);
4303 	*status = ev->status;
4304 
4305 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4306 
4307 	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4308 		if (hci_cs_table[i].op == *opcode) {
4309 			hci_cs_table[i].func(hdev, ev->status);
4310 			break;
4311 		}
4312 	}
4313 
4314 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4315 
4316 	/* Indicate request completion if the command failed. Also, if
4317 	 * we're not waiting for a special event and we get a success
4318 	 * command status we should try to flag the request as completed
4319 	 * (since for this kind of commands there will not be a command
4320 	 * complete event).
4321 	 */
4322 	if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4323 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4324 				     req_complete_skb);
4325 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4326 			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4327 				   *opcode);
4328 			return;
4329 		}
4330 	}
4331 
4332 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4333 		queue_work(hdev->workqueue, &hdev->cmd_work);
4334 }
4335 
4336 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4337 				   struct sk_buff *skb)
4338 {
4339 	struct hci_ev_hardware_error *ev = data;
4340 
4341 	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4342 
4343 	hdev->hw_error_code = ev->code;
4344 
4345 	queue_work(hdev->req_workqueue, &hdev->error_reset);
4346 }
4347 
4348 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4349 				struct sk_buff *skb)
4350 {
4351 	struct hci_ev_role_change *ev = data;
4352 	struct hci_conn *conn;
4353 
4354 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4355 
4356 	hci_dev_lock(hdev);
4357 
4358 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4359 	if (conn) {
4360 		if (!ev->status)
4361 			conn->role = ev->role;
4362 
4363 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4364 
4365 		hci_role_switch_cfm(conn, ev->status, ev->role);
4366 	}
4367 
4368 	hci_dev_unlock(hdev);
4369 }
4370 
4371 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4372 				  struct sk_buff *skb)
4373 {
4374 	struct hci_ev_num_comp_pkts *ev = data;
4375 	int i;
4376 
4377 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4378 			     flex_array_size(ev, handles, ev->num)))
4379 		return;
4380 
4381 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4382 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4383 		return;
4384 	}
4385 
4386 	bt_dev_dbg(hdev, "num %d", ev->num);
4387 
4388 	for (i = 0; i < ev->num; i++) {
4389 		struct hci_comp_pkts_info *info = &ev->handles[i];
4390 		struct hci_conn *conn;
4391 		__u16  handle, count;
4392 
4393 		handle = __le16_to_cpu(info->handle);
4394 		count  = __le16_to_cpu(info->count);
4395 
4396 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4397 		if (!conn)
4398 			continue;
4399 
4400 		conn->sent -= count;
4401 
4402 		switch (conn->type) {
4403 		case ACL_LINK:
4404 			hdev->acl_cnt += count;
4405 			if (hdev->acl_cnt > hdev->acl_pkts)
4406 				hdev->acl_cnt = hdev->acl_pkts;
4407 			break;
4408 
4409 		case LE_LINK:
4410 			if (hdev->le_pkts) {
4411 				hdev->le_cnt += count;
4412 				if (hdev->le_cnt > hdev->le_pkts)
4413 					hdev->le_cnt = hdev->le_pkts;
4414 			} else {
4415 				hdev->acl_cnt += count;
4416 				if (hdev->acl_cnt > hdev->acl_pkts)
4417 					hdev->acl_cnt = hdev->acl_pkts;
4418 			}
4419 			break;
4420 
4421 		case SCO_LINK:
4422 			hdev->sco_cnt += count;
4423 			if (hdev->sco_cnt > hdev->sco_pkts)
4424 				hdev->sco_cnt = hdev->sco_pkts;
4425 			break;
4426 
4427 		case ISO_LINK:
4428 			if (hdev->iso_pkts) {
4429 				hdev->iso_cnt += count;
4430 				if (hdev->iso_cnt > hdev->iso_pkts)
4431 					hdev->iso_cnt = hdev->iso_pkts;
4432 			} else if (hdev->le_pkts) {
4433 				hdev->le_cnt += count;
4434 				if (hdev->le_cnt > hdev->le_pkts)
4435 					hdev->le_cnt = hdev->le_pkts;
4436 			} else {
4437 				hdev->acl_cnt += count;
4438 				if (hdev->acl_cnt > hdev->acl_pkts)
4439 					hdev->acl_cnt = hdev->acl_pkts;
4440 			}
4441 			break;
4442 
4443 		default:
4444 			bt_dev_err(hdev, "unknown type %d conn %p",
4445 				   conn->type, conn);
4446 			break;
4447 		}
4448 	}
4449 
4450 	queue_work(hdev->workqueue, &hdev->tx_work);
4451 }
4452 
4453 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4454 						 __u16 handle)
4455 {
4456 	struct hci_chan *chan;
4457 
4458 	switch (hdev->dev_type) {
4459 	case HCI_PRIMARY:
4460 		return hci_conn_hash_lookup_handle(hdev, handle);
4461 	case HCI_AMP:
4462 		chan = hci_chan_lookup_handle(hdev, handle);
4463 		if (chan)
4464 			return chan->conn;
4465 		break;
4466 	default:
4467 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4468 		break;
4469 	}
4470 
4471 	return NULL;
4472 }
4473 
4474 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4475 				    struct sk_buff *skb)
4476 {
4477 	struct hci_ev_num_comp_blocks *ev = data;
4478 	int i;
4479 
4480 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4481 			     flex_array_size(ev, handles, ev->num_hndl)))
4482 		return;
4483 
4484 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4485 		bt_dev_err(hdev, "wrong event for mode %d",
4486 			   hdev->flow_ctl_mode);
4487 		return;
4488 	}
4489 
4490 	bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4491 		   ev->num_hndl);
4492 
4493 	for (i = 0; i < ev->num_hndl; i++) {
4494 		struct hci_comp_blocks_info *info = &ev->handles[i];
4495 		struct hci_conn *conn = NULL;
4496 		__u16  handle, block_count;
4497 
4498 		handle = __le16_to_cpu(info->handle);
4499 		block_count = __le16_to_cpu(info->blocks);
4500 
4501 		conn = __hci_conn_lookup_handle(hdev, handle);
4502 		if (!conn)
4503 			continue;
4504 
4505 		conn->sent -= block_count;
4506 
4507 		switch (conn->type) {
4508 		case ACL_LINK:
4509 		case AMP_LINK:
4510 			hdev->block_cnt += block_count;
4511 			if (hdev->block_cnt > hdev->num_blocks)
4512 				hdev->block_cnt = hdev->num_blocks;
4513 			break;
4514 
4515 		default:
4516 			bt_dev_err(hdev, "unknown type %d conn %p",
4517 				   conn->type, conn);
4518 			break;
4519 		}
4520 	}
4521 
4522 	queue_work(hdev->workqueue, &hdev->tx_work);
4523 }
4524 
4525 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4526 				struct sk_buff *skb)
4527 {
4528 	struct hci_ev_mode_change *ev = data;
4529 	struct hci_conn *conn;
4530 
4531 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4532 
4533 	hci_dev_lock(hdev);
4534 
4535 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4536 	if (conn) {
4537 		conn->mode = ev->mode;
4538 
4539 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4540 					&conn->flags)) {
4541 			if (conn->mode == HCI_CM_ACTIVE)
4542 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4543 			else
4544 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4545 		}
4546 
4547 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4548 			hci_sco_setup(conn, ev->status);
4549 	}
4550 
4551 	hci_dev_unlock(hdev);
4552 }
4553 
4554 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4555 				     struct sk_buff *skb)
4556 {
4557 	struct hci_ev_pin_code_req *ev = data;
4558 	struct hci_conn *conn;
4559 
4560 	bt_dev_dbg(hdev, "");
4561 
4562 	hci_dev_lock(hdev);
4563 
4564 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4565 	if (!conn)
4566 		goto unlock;
4567 
4568 	if (conn->state == BT_CONNECTED) {
4569 		hci_conn_hold(conn);
4570 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4571 		hci_conn_drop(conn);
4572 	}
4573 
4574 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4575 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4576 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4577 			     sizeof(ev->bdaddr), &ev->bdaddr);
4578 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4579 		u8 secure;
4580 
4581 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4582 			secure = 1;
4583 		else
4584 			secure = 0;
4585 
4586 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4587 	}
4588 
4589 unlock:
4590 	hci_dev_unlock(hdev);
4591 }
4592 
4593 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4594 {
4595 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4596 		return;
4597 
4598 	conn->pin_length = pin_len;
4599 	conn->key_type = key_type;
4600 
4601 	switch (key_type) {
4602 	case HCI_LK_LOCAL_UNIT:
4603 	case HCI_LK_REMOTE_UNIT:
4604 	case HCI_LK_DEBUG_COMBINATION:
4605 		return;
4606 	case HCI_LK_COMBINATION:
4607 		if (pin_len == 16)
4608 			conn->pending_sec_level = BT_SECURITY_HIGH;
4609 		else
4610 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4611 		break;
4612 	case HCI_LK_UNAUTH_COMBINATION_P192:
4613 	case HCI_LK_UNAUTH_COMBINATION_P256:
4614 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4615 		break;
4616 	case HCI_LK_AUTH_COMBINATION_P192:
4617 		conn->pending_sec_level = BT_SECURITY_HIGH;
4618 		break;
4619 	case HCI_LK_AUTH_COMBINATION_P256:
4620 		conn->pending_sec_level = BT_SECURITY_FIPS;
4621 		break;
4622 	}
4623 }
4624 
4625 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4626 				     struct sk_buff *skb)
4627 {
4628 	struct hci_ev_link_key_req *ev = data;
4629 	struct hci_cp_link_key_reply cp;
4630 	struct hci_conn *conn;
4631 	struct link_key *key;
4632 
4633 	bt_dev_dbg(hdev, "");
4634 
4635 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4636 		return;
4637 
4638 	hci_dev_lock(hdev);
4639 
4640 	key = hci_find_link_key(hdev, &ev->bdaddr);
4641 	if (!key) {
4642 		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4643 		goto not_found;
4644 	}
4645 
4646 	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4647 
4648 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4649 	if (conn) {
4650 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4651 
4652 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4653 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4654 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4655 			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4656 			goto not_found;
4657 		}
4658 
4659 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4660 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4661 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4662 			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4663 			goto not_found;
4664 		}
4665 
4666 		conn_set_key(conn, key->type, key->pin_len);
4667 	}
4668 
4669 	bacpy(&cp.bdaddr, &ev->bdaddr);
4670 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4671 
4672 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4673 
4674 	hci_dev_unlock(hdev);
4675 
4676 	return;
4677 
4678 not_found:
4679 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4680 	hci_dev_unlock(hdev);
4681 }
4682 
4683 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4684 				    struct sk_buff *skb)
4685 {
4686 	struct hci_ev_link_key_notify *ev = data;
4687 	struct hci_conn *conn;
4688 	struct link_key *key;
4689 	bool persistent;
4690 	u8 pin_len = 0;
4691 
4692 	bt_dev_dbg(hdev, "");
4693 
4694 	hci_dev_lock(hdev);
4695 
4696 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4697 	if (!conn)
4698 		goto unlock;
4699 
4700 	hci_conn_hold(conn);
4701 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4702 	hci_conn_drop(conn);
4703 
4704 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4705 	conn_set_key(conn, ev->key_type, conn->pin_length);
4706 
4707 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4708 		goto unlock;
4709 
4710 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4711 			        ev->key_type, pin_len, &persistent);
4712 	if (!key)
4713 		goto unlock;
4714 
4715 	/* Update connection information since adding the key will have
4716 	 * fixed up the type in the case of changed combination keys.
4717 	 */
4718 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4719 		conn_set_key(conn, key->type, key->pin_len);
4720 
4721 	mgmt_new_link_key(hdev, key, persistent);
4722 
4723 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4724 	 * is set. If it's not set simply remove the key from the kernel
4725 	 * list (we've still notified user space about it but with
4726 	 * store_hint being 0).
4727 	 */
4728 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4729 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4730 		list_del_rcu(&key->list);
4731 		kfree_rcu(key, rcu);
4732 		goto unlock;
4733 	}
4734 
4735 	if (persistent)
4736 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4737 	else
4738 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4739 
4740 unlock:
4741 	hci_dev_unlock(hdev);
4742 }
4743 
4744 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4745 				 struct sk_buff *skb)
4746 {
4747 	struct hci_ev_clock_offset *ev = data;
4748 	struct hci_conn *conn;
4749 
4750 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4751 
4752 	hci_dev_lock(hdev);
4753 
4754 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4755 	if (conn && !ev->status) {
4756 		struct inquiry_entry *ie;
4757 
4758 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4759 		if (ie) {
4760 			ie->data.clock_offset = ev->clock_offset;
4761 			ie->timestamp = jiffies;
4762 		}
4763 	}
4764 
4765 	hci_dev_unlock(hdev);
4766 }
4767 
4768 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4769 				    struct sk_buff *skb)
4770 {
4771 	struct hci_ev_pkt_type_change *ev = data;
4772 	struct hci_conn *conn;
4773 
4774 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4775 
4776 	hci_dev_lock(hdev);
4777 
4778 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4779 	if (conn && !ev->status)
4780 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4781 
4782 	hci_dev_unlock(hdev);
4783 }
4784 
4785 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4786 				   struct sk_buff *skb)
4787 {
4788 	struct hci_ev_pscan_rep_mode *ev = data;
4789 	struct inquiry_entry *ie;
4790 
4791 	bt_dev_dbg(hdev, "");
4792 
4793 	hci_dev_lock(hdev);
4794 
4795 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4796 	if (ie) {
4797 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4798 		ie->timestamp = jiffies;
4799 	}
4800 
4801 	hci_dev_unlock(hdev);
4802 }
4803 
4804 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4805 					     struct sk_buff *skb)
4806 {
4807 	struct hci_ev_inquiry_result_rssi *ev = edata;
4808 	struct inquiry_data data;
4809 	int i;
4810 
4811 	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4812 
4813 	if (!ev->num)
4814 		return;
4815 
4816 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4817 		return;
4818 
4819 	hci_dev_lock(hdev);
4820 
4821 	if (skb->len == array_size(ev->num,
4822 				   sizeof(struct inquiry_info_rssi_pscan))) {
4823 		struct inquiry_info_rssi_pscan *info;
4824 
4825 		for (i = 0; i < ev->num; i++) {
4826 			u32 flags;
4827 
4828 			info = hci_ev_skb_pull(hdev, skb,
4829 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4830 					       sizeof(*info));
4831 			if (!info) {
4832 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4833 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4834 				goto unlock;
4835 			}
4836 
4837 			bacpy(&data.bdaddr, &info->bdaddr);
4838 			data.pscan_rep_mode	= info->pscan_rep_mode;
4839 			data.pscan_period_mode	= info->pscan_period_mode;
4840 			data.pscan_mode		= info->pscan_mode;
4841 			memcpy(data.dev_class, info->dev_class, 3);
4842 			data.clock_offset	= info->clock_offset;
4843 			data.rssi		= info->rssi;
4844 			data.ssp_mode		= 0x00;
4845 
4846 			flags = hci_inquiry_cache_update(hdev, &data, false);
4847 
4848 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4849 					  info->dev_class, info->rssi,
4850 					  flags, NULL, 0, NULL, 0, 0);
4851 		}
4852 	} else if (skb->len == array_size(ev->num,
4853 					  sizeof(struct inquiry_info_rssi))) {
4854 		struct inquiry_info_rssi *info;
4855 
4856 		for (i = 0; i < ev->num; i++) {
4857 			u32 flags;
4858 
4859 			info = hci_ev_skb_pull(hdev, skb,
4860 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4861 					       sizeof(*info));
4862 			if (!info) {
4863 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4864 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4865 				goto unlock;
4866 			}
4867 
4868 			bacpy(&data.bdaddr, &info->bdaddr);
4869 			data.pscan_rep_mode	= info->pscan_rep_mode;
4870 			data.pscan_period_mode	= info->pscan_period_mode;
4871 			data.pscan_mode		= 0x00;
4872 			memcpy(data.dev_class, info->dev_class, 3);
4873 			data.clock_offset	= info->clock_offset;
4874 			data.rssi		= info->rssi;
4875 			data.ssp_mode		= 0x00;
4876 
4877 			flags = hci_inquiry_cache_update(hdev, &data, false);
4878 
4879 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4880 					  info->dev_class, info->rssi,
4881 					  flags, NULL, 0, NULL, 0, 0);
4882 		}
4883 	} else {
4884 		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4885 			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4886 	}
4887 unlock:
4888 	hci_dev_unlock(hdev);
4889 }
4890 
4891 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4892 					struct sk_buff *skb)
4893 {
4894 	struct hci_ev_remote_ext_features *ev = data;
4895 	struct hci_conn *conn;
4896 
4897 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4898 
4899 	hci_dev_lock(hdev);
4900 
4901 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4902 	if (!conn)
4903 		goto unlock;
4904 
4905 	if (ev->page < HCI_MAX_PAGES)
4906 		memcpy(conn->features[ev->page], ev->features, 8);
4907 
4908 	if (!ev->status && ev->page == 0x01) {
4909 		struct inquiry_entry *ie;
4910 
4911 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4912 		if (ie)
4913 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4914 
4915 		if (ev->features[0] & LMP_HOST_SSP) {
4916 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4917 		} else {
4918 			/* It is mandatory by the Bluetooth specification that
4919 			 * Extended Inquiry Results are only used when Secure
4920 			 * Simple Pairing is enabled, but some devices violate
4921 			 * this.
4922 			 *
4923 			 * To make these devices work, the internal SSP
4924 			 * enabled flag needs to be cleared if the remote host
4925 			 * features do not indicate SSP support */
4926 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4927 		}
4928 
4929 		if (ev->features[0] & LMP_HOST_SC)
4930 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4931 	}
4932 
4933 	if (conn->state != BT_CONFIG)
4934 		goto unlock;
4935 
4936 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4937 		struct hci_cp_remote_name_req cp;
4938 		memset(&cp, 0, sizeof(cp));
4939 		bacpy(&cp.bdaddr, &conn->dst);
4940 		cp.pscan_rep_mode = 0x02;
4941 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4942 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4943 		mgmt_device_connected(hdev, conn, NULL, 0);
4944 
4945 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4946 		conn->state = BT_CONNECTED;
4947 		hci_connect_cfm(conn, ev->status);
4948 		hci_conn_drop(conn);
4949 	}
4950 
4951 unlock:
4952 	hci_dev_unlock(hdev);
4953 }
4954 
4955 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4956 				       struct sk_buff *skb)
4957 {
4958 	struct hci_ev_sync_conn_complete *ev = data;
4959 	struct hci_conn *conn;
4960 	u8 status = ev->status;
4961 
4962 	switch (ev->link_type) {
4963 	case SCO_LINK:
4964 	case ESCO_LINK:
4965 		break;
4966 	default:
4967 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4968 		 * for HCI_Synchronous_Connection_Complete is limited to
4969 		 * either SCO or eSCO
4970 		 */
4971 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4972 		return;
4973 	}
4974 
4975 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4976 
4977 	hci_dev_lock(hdev);
4978 
4979 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4980 	if (!conn) {
4981 		if (ev->link_type == ESCO_LINK)
4982 			goto unlock;
4983 
4984 		/* When the link type in the event indicates SCO connection
4985 		 * and lookup of the connection object fails, then check
4986 		 * if an eSCO connection object exists.
4987 		 *
4988 		 * The core limits the synchronous connections to either
4989 		 * SCO or eSCO. The eSCO connection is preferred and tried
4990 		 * to be setup first and until successfully established,
4991 		 * the link type will be hinted as eSCO.
4992 		 */
4993 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4994 		if (!conn)
4995 			goto unlock;
4996 	}
4997 
4998 	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4999 	 * Processing it more than once per connection can corrupt kernel memory.
5000 	 *
5001 	 * As the connection handle is set here for the first time, it indicates
5002 	 * whether the connection is already set up.
5003 	 */
5004 	if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5005 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5006 		goto unlock;
5007 	}
5008 
5009 	switch (status) {
5010 	case 0x00:
5011 		conn->handle = __le16_to_cpu(ev->handle);
5012 		if (conn->handle > HCI_CONN_HANDLE_MAX) {
5013 			bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5014 				   conn->handle, HCI_CONN_HANDLE_MAX);
5015 			status = HCI_ERROR_INVALID_PARAMETERS;
5016 			conn->state = BT_CLOSED;
5017 			break;
5018 		}
5019 
5020 		conn->state  = BT_CONNECTED;
5021 		conn->type   = ev->link_type;
5022 
5023 		hci_debugfs_create_conn(conn);
5024 		hci_conn_add_sysfs(conn);
5025 		break;
5026 
5027 	case 0x10:	/* Connection Accept Timeout */
5028 	case 0x0d:	/* Connection Rejected due to Limited Resources */
5029 	case 0x11:	/* Unsupported Feature or Parameter Value */
5030 	case 0x1c:	/* SCO interval rejected */
5031 	case 0x1a:	/* Unsupported Remote Feature */
5032 	case 0x1e:	/* Invalid LMP Parameters */
5033 	case 0x1f:	/* Unspecified error */
5034 	case 0x20:	/* Unsupported LMP Parameter value */
5035 		if (conn->out) {
5036 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5037 					(hdev->esco_type & EDR_ESCO_MASK);
5038 			if (hci_setup_sync(conn, conn->link->handle))
5039 				goto unlock;
5040 		}
5041 		fallthrough;
5042 
5043 	default:
5044 		conn->state = BT_CLOSED;
5045 		break;
5046 	}
5047 
5048 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5049 	/* Notify only in case of SCO over HCI transport data path which
5050 	 * is zero and non-zero value shall be non-HCI transport data path
5051 	 */
5052 	if (conn->codec.data_path == 0 && hdev->notify) {
5053 		switch (ev->air_mode) {
5054 		case 0x02:
5055 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5056 			break;
5057 		case 0x03:
5058 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5059 			break;
5060 		}
5061 	}
5062 
5063 	hci_connect_cfm(conn, status);
5064 	if (status)
5065 		hci_conn_del(conn);
5066 
5067 unlock:
5068 	hci_dev_unlock(hdev);
5069 }
5070 
5071 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5072 {
5073 	size_t parsed = 0;
5074 
5075 	while (parsed < eir_len) {
5076 		u8 field_len = eir[0];
5077 
5078 		if (field_len == 0)
5079 			return parsed;
5080 
5081 		parsed += field_len + 1;
5082 		eir += field_len + 1;
5083 	}
5084 
5085 	return eir_len;
5086 }
5087 
5088 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5089 					    struct sk_buff *skb)
5090 {
5091 	struct hci_ev_ext_inquiry_result *ev = edata;
5092 	struct inquiry_data data;
5093 	size_t eir_len;
5094 	int i;
5095 
5096 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5097 			     flex_array_size(ev, info, ev->num)))
5098 		return;
5099 
5100 	bt_dev_dbg(hdev, "num %d", ev->num);
5101 
5102 	if (!ev->num)
5103 		return;
5104 
5105 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5106 		return;
5107 
5108 	hci_dev_lock(hdev);
5109 
5110 	for (i = 0; i < ev->num; i++) {
5111 		struct extended_inquiry_info *info = &ev->info[i];
5112 		u32 flags;
5113 		bool name_known;
5114 
5115 		bacpy(&data.bdaddr, &info->bdaddr);
5116 		data.pscan_rep_mode	= info->pscan_rep_mode;
5117 		data.pscan_period_mode	= info->pscan_period_mode;
5118 		data.pscan_mode		= 0x00;
5119 		memcpy(data.dev_class, info->dev_class, 3);
5120 		data.clock_offset	= info->clock_offset;
5121 		data.rssi		= info->rssi;
5122 		data.ssp_mode		= 0x01;
5123 
5124 		if (hci_dev_test_flag(hdev, HCI_MGMT))
5125 			name_known = eir_get_data(info->data,
5126 						  sizeof(info->data),
5127 						  EIR_NAME_COMPLETE, NULL);
5128 		else
5129 			name_known = true;
5130 
5131 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5132 
5133 		eir_len = eir_get_length(info->data, sizeof(info->data));
5134 
5135 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5136 				  info->dev_class, info->rssi,
5137 				  flags, info->data, eir_len, NULL, 0, 0);
5138 	}
5139 
5140 	hci_dev_unlock(hdev);
5141 }
5142 
5143 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5144 					 struct sk_buff *skb)
5145 {
5146 	struct hci_ev_key_refresh_complete *ev = data;
5147 	struct hci_conn *conn;
5148 
5149 	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5150 		   __le16_to_cpu(ev->handle));
5151 
5152 	hci_dev_lock(hdev);
5153 
5154 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5155 	if (!conn)
5156 		goto unlock;
5157 
5158 	/* For BR/EDR the necessary steps are taken through the
5159 	 * auth_complete event.
5160 	 */
5161 	if (conn->type != LE_LINK)
5162 		goto unlock;
5163 
5164 	if (!ev->status)
5165 		conn->sec_level = conn->pending_sec_level;
5166 
5167 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5168 
5169 	if (ev->status && conn->state == BT_CONNECTED) {
5170 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5171 		hci_conn_drop(conn);
5172 		goto unlock;
5173 	}
5174 
5175 	if (conn->state == BT_CONFIG) {
5176 		if (!ev->status)
5177 			conn->state = BT_CONNECTED;
5178 
5179 		hci_connect_cfm(conn, ev->status);
5180 		hci_conn_drop(conn);
5181 	} else {
5182 		hci_auth_cfm(conn, ev->status);
5183 
5184 		hci_conn_hold(conn);
5185 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5186 		hci_conn_drop(conn);
5187 	}
5188 
5189 unlock:
5190 	hci_dev_unlock(hdev);
5191 }
5192 
5193 static u8 hci_get_auth_req(struct hci_conn *conn)
5194 {
5195 	/* If remote requests no-bonding follow that lead */
5196 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5197 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5198 		return conn->remote_auth | (conn->auth_type & 0x01);
5199 
5200 	/* If both remote and local have enough IO capabilities, require
5201 	 * MITM protection
5202 	 */
5203 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5204 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5205 		return conn->remote_auth | 0x01;
5206 
5207 	/* No MITM protection possible so ignore remote requirement */
5208 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5209 }
5210 
5211 static u8 bredr_oob_data_present(struct hci_conn *conn)
5212 {
5213 	struct hci_dev *hdev = conn->hdev;
5214 	struct oob_data *data;
5215 
5216 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5217 	if (!data)
5218 		return 0x00;
5219 
5220 	if (bredr_sc_enabled(hdev)) {
5221 		/* When Secure Connections is enabled, then just
5222 		 * return the present value stored with the OOB
5223 		 * data. The stored value contains the right present
5224 		 * information. However it can only be trusted when
5225 		 * not in Secure Connection Only mode.
5226 		 */
5227 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5228 			return data->present;
5229 
5230 		/* When Secure Connections Only mode is enabled, then
5231 		 * the P-256 values are required. If they are not
5232 		 * available, then do not declare that OOB data is
5233 		 * present.
5234 		 */
5235 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5236 		    !memcmp(data->hash256, ZERO_KEY, 16))
5237 			return 0x00;
5238 
5239 		return 0x02;
5240 	}
5241 
5242 	/* When Secure Connections is not enabled or actually
5243 	 * not supported by the hardware, then check that if
5244 	 * P-192 data values are present.
5245 	 */
5246 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5247 	    !memcmp(data->hash192, ZERO_KEY, 16))
5248 		return 0x00;
5249 
5250 	return 0x01;
5251 }
5252 
5253 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5254 				    struct sk_buff *skb)
5255 {
5256 	struct hci_ev_io_capa_request *ev = data;
5257 	struct hci_conn *conn;
5258 
5259 	bt_dev_dbg(hdev, "");
5260 
5261 	hci_dev_lock(hdev);
5262 
5263 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5264 	if (!conn)
5265 		goto unlock;
5266 
5267 	hci_conn_hold(conn);
5268 
5269 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5270 		goto unlock;
5271 
5272 	/* Allow pairing if we're pairable, the initiators of the
5273 	 * pairing or if the remote is not requesting bonding.
5274 	 */
5275 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5276 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5277 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5278 		struct hci_cp_io_capability_reply cp;
5279 
5280 		bacpy(&cp.bdaddr, &ev->bdaddr);
5281 		/* Change the IO capability from KeyboardDisplay
5282 		 * to DisplayYesNo as it is not supported by BT spec. */
5283 		cp.capability = (conn->io_capability == 0x04) ?
5284 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5285 
5286 		/* If we are initiators, there is no remote information yet */
5287 		if (conn->remote_auth == 0xff) {
5288 			/* Request MITM protection if our IO caps allow it
5289 			 * except for the no-bonding case.
5290 			 */
5291 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5292 			    conn->auth_type != HCI_AT_NO_BONDING)
5293 				conn->auth_type |= 0x01;
5294 		} else {
5295 			conn->auth_type = hci_get_auth_req(conn);
5296 		}
5297 
5298 		/* If we're not bondable, force one of the non-bondable
5299 		 * authentication requirement values.
5300 		 */
5301 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5302 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5303 
5304 		cp.authentication = conn->auth_type;
5305 		cp.oob_data = bredr_oob_data_present(conn);
5306 
5307 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5308 			     sizeof(cp), &cp);
5309 	} else {
5310 		struct hci_cp_io_capability_neg_reply cp;
5311 
5312 		bacpy(&cp.bdaddr, &ev->bdaddr);
5313 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5314 
5315 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5316 			     sizeof(cp), &cp);
5317 	}
5318 
5319 unlock:
5320 	hci_dev_unlock(hdev);
5321 }
5322 
5323 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5324 				  struct sk_buff *skb)
5325 {
5326 	struct hci_ev_io_capa_reply *ev = data;
5327 	struct hci_conn *conn;
5328 
5329 	bt_dev_dbg(hdev, "");
5330 
5331 	hci_dev_lock(hdev);
5332 
5333 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5334 	if (!conn)
5335 		goto unlock;
5336 
5337 	conn->remote_cap = ev->capability;
5338 	conn->remote_auth = ev->authentication;
5339 
5340 unlock:
5341 	hci_dev_unlock(hdev);
5342 }
5343 
5344 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5345 					 struct sk_buff *skb)
5346 {
5347 	struct hci_ev_user_confirm_req *ev = data;
5348 	int loc_mitm, rem_mitm, confirm_hint = 0;
5349 	struct hci_conn *conn;
5350 
5351 	bt_dev_dbg(hdev, "");
5352 
5353 	hci_dev_lock(hdev);
5354 
5355 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5356 		goto unlock;
5357 
5358 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5359 	if (!conn)
5360 		goto unlock;
5361 
5362 	loc_mitm = (conn->auth_type & 0x01);
5363 	rem_mitm = (conn->remote_auth & 0x01);
5364 
5365 	/* If we require MITM but the remote device can't provide that
5366 	 * (it has NoInputNoOutput) then reject the confirmation
5367 	 * request. We check the security level here since it doesn't
5368 	 * necessarily match conn->auth_type.
5369 	 */
5370 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5371 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5372 		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5373 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5374 			     sizeof(ev->bdaddr), &ev->bdaddr);
5375 		goto unlock;
5376 	}
5377 
5378 	/* If no side requires MITM protection; auto-accept */
5379 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5380 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5381 
5382 		/* If we're not the initiators request authorization to
5383 		 * proceed from user space (mgmt_user_confirm with
5384 		 * confirm_hint set to 1). The exception is if neither
5385 		 * side had MITM or if the local IO capability is
5386 		 * NoInputNoOutput, in which case we do auto-accept
5387 		 */
5388 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5389 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5390 		    (loc_mitm || rem_mitm)) {
5391 			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5392 			confirm_hint = 1;
5393 			goto confirm;
5394 		}
5395 
5396 		/* If there already exists link key in local host, leave the
5397 		 * decision to user space since the remote device could be
5398 		 * legitimate or malicious.
5399 		 */
5400 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5401 			bt_dev_dbg(hdev, "Local host already has link key");
5402 			confirm_hint = 1;
5403 			goto confirm;
5404 		}
5405 
5406 		BT_DBG("Auto-accept of user confirmation with %ums delay",
5407 		       hdev->auto_accept_delay);
5408 
5409 		if (hdev->auto_accept_delay > 0) {
5410 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5411 			queue_delayed_work(conn->hdev->workqueue,
5412 					   &conn->auto_accept_work, delay);
5413 			goto unlock;
5414 		}
5415 
5416 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5417 			     sizeof(ev->bdaddr), &ev->bdaddr);
5418 		goto unlock;
5419 	}
5420 
5421 confirm:
5422 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5423 				  le32_to_cpu(ev->passkey), confirm_hint);
5424 
5425 unlock:
5426 	hci_dev_unlock(hdev);
5427 }
5428 
5429 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5430 					 struct sk_buff *skb)
5431 {
5432 	struct hci_ev_user_passkey_req *ev = data;
5433 
5434 	bt_dev_dbg(hdev, "");
5435 
5436 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5437 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5438 }
5439 
5440 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5441 					struct sk_buff *skb)
5442 {
5443 	struct hci_ev_user_passkey_notify *ev = data;
5444 	struct hci_conn *conn;
5445 
5446 	bt_dev_dbg(hdev, "");
5447 
5448 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5449 	if (!conn)
5450 		return;
5451 
5452 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5453 	conn->passkey_entered = 0;
5454 
5455 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5456 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5457 					 conn->dst_type, conn->passkey_notify,
5458 					 conn->passkey_entered);
5459 }
5460 
5461 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5462 				    struct sk_buff *skb)
5463 {
5464 	struct hci_ev_keypress_notify *ev = data;
5465 	struct hci_conn *conn;
5466 
5467 	bt_dev_dbg(hdev, "");
5468 
5469 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5470 	if (!conn)
5471 		return;
5472 
5473 	switch (ev->type) {
5474 	case HCI_KEYPRESS_STARTED:
5475 		conn->passkey_entered = 0;
5476 		return;
5477 
5478 	case HCI_KEYPRESS_ENTERED:
5479 		conn->passkey_entered++;
5480 		break;
5481 
5482 	case HCI_KEYPRESS_ERASED:
5483 		conn->passkey_entered--;
5484 		break;
5485 
5486 	case HCI_KEYPRESS_CLEARED:
5487 		conn->passkey_entered = 0;
5488 		break;
5489 
5490 	case HCI_KEYPRESS_COMPLETED:
5491 		return;
5492 	}
5493 
5494 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5495 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5496 					 conn->dst_type, conn->passkey_notify,
5497 					 conn->passkey_entered);
5498 }
5499 
5500 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5501 					 struct sk_buff *skb)
5502 {
5503 	struct hci_ev_simple_pair_complete *ev = data;
5504 	struct hci_conn *conn;
5505 
5506 	bt_dev_dbg(hdev, "");
5507 
5508 	hci_dev_lock(hdev);
5509 
5510 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5511 	if (!conn)
5512 		goto unlock;
5513 
5514 	/* Reset the authentication requirement to unknown */
5515 	conn->remote_auth = 0xff;
5516 
5517 	/* To avoid duplicate auth_failed events to user space we check
5518 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5519 	 * initiated the authentication. A traditional auth_complete
5520 	 * event gets always produced as initiator and is also mapped to
5521 	 * the mgmt_auth_failed event */
5522 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5523 		mgmt_auth_failed(conn, ev->status);
5524 
5525 	hci_conn_drop(conn);
5526 
5527 unlock:
5528 	hci_dev_unlock(hdev);
5529 }
5530 
5531 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5532 					 struct sk_buff *skb)
5533 {
5534 	struct hci_ev_remote_host_features *ev = data;
5535 	struct inquiry_entry *ie;
5536 	struct hci_conn *conn;
5537 
5538 	bt_dev_dbg(hdev, "");
5539 
5540 	hci_dev_lock(hdev);
5541 
5542 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5543 	if (conn)
5544 		memcpy(conn->features[1], ev->features, 8);
5545 
5546 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5547 	if (ie)
5548 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5549 
5550 	hci_dev_unlock(hdev);
5551 }
5552 
5553 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5554 					    struct sk_buff *skb)
5555 {
5556 	struct hci_ev_remote_oob_data_request *ev = edata;
5557 	struct oob_data *data;
5558 
5559 	bt_dev_dbg(hdev, "");
5560 
5561 	hci_dev_lock(hdev);
5562 
5563 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5564 		goto unlock;
5565 
5566 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5567 	if (!data) {
5568 		struct hci_cp_remote_oob_data_neg_reply cp;
5569 
5570 		bacpy(&cp.bdaddr, &ev->bdaddr);
5571 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5572 			     sizeof(cp), &cp);
5573 		goto unlock;
5574 	}
5575 
5576 	if (bredr_sc_enabled(hdev)) {
5577 		struct hci_cp_remote_oob_ext_data_reply cp;
5578 
5579 		bacpy(&cp.bdaddr, &ev->bdaddr);
5580 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5581 			memset(cp.hash192, 0, sizeof(cp.hash192));
5582 			memset(cp.rand192, 0, sizeof(cp.rand192));
5583 		} else {
5584 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5585 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5586 		}
5587 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5588 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5589 
5590 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5591 			     sizeof(cp), &cp);
5592 	} else {
5593 		struct hci_cp_remote_oob_data_reply cp;
5594 
5595 		bacpy(&cp.bdaddr, &ev->bdaddr);
5596 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5597 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5598 
5599 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5600 			     sizeof(cp), &cp);
5601 	}
5602 
5603 unlock:
5604 	hci_dev_unlock(hdev);
5605 }
5606 
5607 #if IS_ENABLED(CONFIG_BT_HS)
5608 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5609 				  struct sk_buff *skb)
5610 {
5611 	struct hci_ev_channel_selected *ev = data;
5612 	struct hci_conn *hcon;
5613 
5614 	bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5615 
5616 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5617 	if (!hcon)
5618 		return;
5619 
5620 	amp_read_loc_assoc_final_data(hdev, hcon);
5621 }
5622 
5623 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5624 				      struct sk_buff *skb)
5625 {
5626 	struct hci_ev_phy_link_complete *ev = data;
5627 	struct hci_conn *hcon, *bredr_hcon;
5628 
5629 	bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5630 		   ev->status);
5631 
5632 	hci_dev_lock(hdev);
5633 
5634 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5635 	if (!hcon)
5636 		goto unlock;
5637 
5638 	if (!hcon->amp_mgr)
5639 		goto unlock;
5640 
5641 	if (ev->status) {
5642 		hci_conn_del(hcon);
5643 		goto unlock;
5644 	}
5645 
5646 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5647 
5648 	hcon->state = BT_CONNECTED;
5649 	bacpy(&hcon->dst, &bredr_hcon->dst);
5650 
5651 	hci_conn_hold(hcon);
5652 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5653 	hci_conn_drop(hcon);
5654 
5655 	hci_debugfs_create_conn(hcon);
5656 	hci_conn_add_sysfs(hcon);
5657 
5658 	amp_physical_cfm(bredr_hcon, hcon);
5659 
5660 unlock:
5661 	hci_dev_unlock(hdev);
5662 }
5663 
5664 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5665 				     struct sk_buff *skb)
5666 {
5667 	struct hci_ev_logical_link_complete *ev = data;
5668 	struct hci_conn *hcon;
5669 	struct hci_chan *hchan;
5670 	struct amp_mgr *mgr;
5671 
5672 	bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5673 		   le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5674 
5675 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5676 	if (!hcon)
5677 		return;
5678 
5679 	/* Create AMP hchan */
5680 	hchan = hci_chan_create(hcon);
5681 	if (!hchan)
5682 		return;
5683 
5684 	hchan->handle = le16_to_cpu(ev->handle);
5685 	hchan->amp = true;
5686 
5687 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5688 
5689 	mgr = hcon->amp_mgr;
5690 	if (mgr && mgr->bredr_chan) {
5691 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5692 
5693 		l2cap_chan_lock(bredr_chan);
5694 
5695 		bredr_chan->conn->mtu = hdev->block_mtu;
5696 		l2cap_logical_cfm(bredr_chan, hchan, 0);
5697 		hci_conn_hold(hcon);
5698 
5699 		l2cap_chan_unlock(bredr_chan);
5700 	}
5701 }
5702 
5703 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5704 					     struct sk_buff *skb)
5705 {
5706 	struct hci_ev_disconn_logical_link_complete *ev = data;
5707 	struct hci_chan *hchan;
5708 
5709 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5710 		   le16_to_cpu(ev->handle), ev->status);
5711 
5712 	if (ev->status)
5713 		return;
5714 
5715 	hci_dev_lock(hdev);
5716 
5717 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5718 	if (!hchan || !hchan->amp)
5719 		goto unlock;
5720 
5721 	amp_destroy_logical_link(hchan, ev->reason);
5722 
5723 unlock:
5724 	hci_dev_unlock(hdev);
5725 }
5726 
5727 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5728 					     struct sk_buff *skb)
5729 {
5730 	struct hci_ev_disconn_phy_link_complete *ev = data;
5731 	struct hci_conn *hcon;
5732 
5733 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5734 
5735 	if (ev->status)
5736 		return;
5737 
5738 	hci_dev_lock(hdev);
5739 
5740 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5741 	if (hcon && hcon->type == AMP_LINK) {
5742 		hcon->state = BT_CLOSED;
5743 		hci_disconn_cfm(hcon, ev->reason);
5744 		hci_conn_del(hcon);
5745 	}
5746 
5747 	hci_dev_unlock(hdev);
5748 }
5749 #endif
5750 
5751 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5752 				u8 bdaddr_type, bdaddr_t *local_rpa)
5753 {
5754 	if (conn->out) {
5755 		conn->dst_type = bdaddr_type;
5756 		conn->resp_addr_type = bdaddr_type;
5757 		bacpy(&conn->resp_addr, bdaddr);
5758 
5759 		/* Check if the controller has set a Local RPA then it must be
5760 		 * used instead or hdev->rpa.
5761 		 */
5762 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5763 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5764 			bacpy(&conn->init_addr, local_rpa);
5765 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5766 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5767 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5768 		} else {
5769 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5770 						  &conn->init_addr_type);
5771 		}
5772 	} else {
5773 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5774 		/* Check if the controller has set a Local RPA then it must be
5775 		 * used instead or hdev->rpa.
5776 		 */
5777 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5778 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5779 			bacpy(&conn->resp_addr, local_rpa);
5780 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5781 			/* In case of ext adv, resp_addr will be updated in
5782 			 * Adv Terminated event.
5783 			 */
5784 			if (!ext_adv_capable(conn->hdev))
5785 				bacpy(&conn->resp_addr,
5786 				      &conn->hdev->random_addr);
5787 		} else {
5788 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5789 		}
5790 
5791 		conn->init_addr_type = bdaddr_type;
5792 		bacpy(&conn->init_addr, bdaddr);
5793 
5794 		/* For incoming connections, set the default minimum
5795 		 * and maximum connection interval. They will be used
5796 		 * to check if the parameters are in range and if not
5797 		 * trigger the connection update procedure.
5798 		 */
5799 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5800 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5801 	}
5802 }
5803 
5804 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5805 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5806 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5807 				 u16 interval, u16 latency,
5808 				 u16 supervision_timeout)
5809 {
5810 	struct hci_conn_params *params;
5811 	struct hci_conn *conn;
5812 	struct smp_irk *irk;
5813 	u8 addr_type;
5814 
5815 	hci_dev_lock(hdev);
5816 
5817 	/* All controllers implicitly stop advertising in the event of a
5818 	 * connection, so ensure that the state bit is cleared.
5819 	 */
5820 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5821 
5822 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5823 	if (!conn) {
5824 		/* In case of error status and there is no connection pending
5825 		 * just unlock as there is nothing to cleanup.
5826 		 */
5827 		if (status)
5828 			goto unlock;
5829 
5830 		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5831 		if (!conn) {
5832 			bt_dev_err(hdev, "no memory for new connection");
5833 			goto unlock;
5834 		}
5835 
5836 		conn->dst_type = bdaddr_type;
5837 
5838 		/* If we didn't have a hci_conn object previously
5839 		 * but we're in central role this must be something
5840 		 * initiated using an accept list. Since accept list based
5841 		 * connections are not "first class citizens" we don't
5842 		 * have full tracking of them. Therefore, we go ahead
5843 		 * with a "best effort" approach of determining the
5844 		 * initiator address based on the HCI_PRIVACY flag.
5845 		 */
5846 		if (conn->out) {
5847 			conn->resp_addr_type = bdaddr_type;
5848 			bacpy(&conn->resp_addr, bdaddr);
5849 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5850 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5851 				bacpy(&conn->init_addr, &hdev->rpa);
5852 			} else {
5853 				hci_copy_identity_address(hdev,
5854 							  &conn->init_addr,
5855 							  &conn->init_addr_type);
5856 			}
5857 		}
5858 	} else {
5859 		cancel_delayed_work(&conn->le_conn_timeout);
5860 	}
5861 
5862 	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5863 	 * Processing it more than once per connection can corrupt kernel memory.
5864 	 *
5865 	 * As the connection handle is set here for the first time, it indicates
5866 	 * whether the connection is already set up.
5867 	 */
5868 	if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5869 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5870 		goto unlock;
5871 	}
5872 
5873 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5874 
5875 	/* Lookup the identity address from the stored connection
5876 	 * address and address type.
5877 	 *
5878 	 * When establishing connections to an identity address, the
5879 	 * connection procedure will store the resolvable random
5880 	 * address first. Now if it can be converted back into the
5881 	 * identity address, start using the identity address from
5882 	 * now on.
5883 	 */
5884 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5885 	if (irk) {
5886 		bacpy(&conn->dst, &irk->bdaddr);
5887 		conn->dst_type = irk->addr_type;
5888 	}
5889 
5890 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5891 
5892 	if (handle > HCI_CONN_HANDLE_MAX) {
5893 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5894 			   HCI_CONN_HANDLE_MAX);
5895 		status = HCI_ERROR_INVALID_PARAMETERS;
5896 	}
5897 
5898 	/* All connection failure handling is taken care of by the
5899 	 * hci_conn_failed function which is triggered by the HCI
5900 	 * request completion callbacks used for connecting.
5901 	 */
5902 	if (status)
5903 		goto unlock;
5904 
5905 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5906 		addr_type = BDADDR_LE_PUBLIC;
5907 	else
5908 		addr_type = BDADDR_LE_RANDOM;
5909 
5910 	/* Drop the connection if the device is blocked */
5911 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5912 		hci_conn_drop(conn);
5913 		goto unlock;
5914 	}
5915 
5916 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5917 		mgmt_device_connected(hdev, conn, NULL, 0);
5918 
5919 	conn->sec_level = BT_SECURITY_LOW;
5920 	conn->handle = handle;
5921 	conn->state = BT_CONFIG;
5922 
5923 	/* Store current advertising instance as connection advertising instance
5924 	 * when sotfware rotation is in use so it can be re-enabled when
5925 	 * disconnected.
5926 	 */
5927 	if (!ext_adv_capable(hdev))
5928 		conn->adv_instance = hdev->cur_adv_instance;
5929 
5930 	conn->le_conn_interval = interval;
5931 	conn->le_conn_latency = latency;
5932 	conn->le_supv_timeout = supervision_timeout;
5933 
5934 	hci_debugfs_create_conn(conn);
5935 	hci_conn_add_sysfs(conn);
5936 
5937 	/* The remote features procedure is defined for central
5938 	 * role only. So only in case of an initiated connection
5939 	 * request the remote features.
5940 	 *
5941 	 * If the local controller supports peripheral-initiated features
5942 	 * exchange, then requesting the remote features in peripheral
5943 	 * role is possible. Otherwise just transition into the
5944 	 * connected state without requesting the remote features.
5945 	 */
5946 	if (conn->out ||
5947 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5948 		struct hci_cp_le_read_remote_features cp;
5949 
5950 		cp.handle = __cpu_to_le16(conn->handle);
5951 
5952 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5953 			     sizeof(cp), &cp);
5954 
5955 		hci_conn_hold(conn);
5956 	} else {
5957 		conn->state = BT_CONNECTED;
5958 		hci_connect_cfm(conn, status);
5959 	}
5960 
5961 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5962 					   conn->dst_type);
5963 	if (params) {
5964 		list_del_init(&params->action);
5965 		if (params->conn) {
5966 			hci_conn_drop(params->conn);
5967 			hci_conn_put(params->conn);
5968 			params->conn = NULL;
5969 		}
5970 	}
5971 
5972 unlock:
5973 	hci_update_passive_scan(hdev);
5974 	hci_dev_unlock(hdev);
5975 }
5976 
5977 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5978 				     struct sk_buff *skb)
5979 {
5980 	struct hci_ev_le_conn_complete *ev = data;
5981 
5982 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5983 
5984 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5985 			     NULL, ev->role, le16_to_cpu(ev->handle),
5986 			     le16_to_cpu(ev->interval),
5987 			     le16_to_cpu(ev->latency),
5988 			     le16_to_cpu(ev->supervision_timeout));
5989 }
5990 
5991 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5992 					 struct sk_buff *skb)
5993 {
5994 	struct hci_ev_le_enh_conn_complete *ev = data;
5995 
5996 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5997 
5998 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5999 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6000 			     le16_to_cpu(ev->interval),
6001 			     le16_to_cpu(ev->latency),
6002 			     le16_to_cpu(ev->supervision_timeout));
6003 }
6004 
6005 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6006 				    struct sk_buff *skb)
6007 {
6008 	struct hci_evt_le_ext_adv_set_term *ev = data;
6009 	struct hci_conn *conn;
6010 	struct adv_info *adv, *n;
6011 
6012 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6013 
6014 	/* The Bluetooth Core 5.3 specification clearly states that this event
6015 	 * shall not be sent when the Host disables the advertising set. So in
6016 	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6017 	 *
6018 	 * When the Host disables an advertising set, all cleanup is done via
6019 	 * its command callback and not needed to be duplicated here.
6020 	 */
6021 	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6022 		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6023 		return;
6024 	}
6025 
6026 	hci_dev_lock(hdev);
6027 
6028 	adv = hci_find_adv_instance(hdev, ev->handle);
6029 
6030 	if (ev->status) {
6031 		if (!adv)
6032 			goto unlock;
6033 
6034 		/* Remove advertising as it has been terminated */
6035 		hci_remove_adv_instance(hdev, ev->handle);
6036 		mgmt_advertising_removed(NULL, hdev, ev->handle);
6037 
6038 		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6039 			if (adv->enabled)
6040 				goto unlock;
6041 		}
6042 
6043 		/* We are no longer advertising, clear HCI_LE_ADV */
6044 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
6045 		goto unlock;
6046 	}
6047 
6048 	if (adv)
6049 		adv->enabled = false;
6050 
6051 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6052 	if (conn) {
6053 		/* Store handle in the connection so the correct advertising
6054 		 * instance can be re-enabled when disconnected.
6055 		 */
6056 		conn->adv_instance = ev->handle;
6057 
6058 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6059 		    bacmp(&conn->resp_addr, BDADDR_ANY))
6060 			goto unlock;
6061 
6062 		if (!ev->handle) {
6063 			bacpy(&conn->resp_addr, &hdev->random_addr);
6064 			goto unlock;
6065 		}
6066 
6067 		if (adv)
6068 			bacpy(&conn->resp_addr, &adv->random_addr);
6069 	}
6070 
6071 unlock:
6072 	hci_dev_unlock(hdev);
6073 }
6074 
6075 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6076 					    struct sk_buff *skb)
6077 {
6078 	struct hci_ev_le_conn_update_complete *ev = data;
6079 	struct hci_conn *conn;
6080 
6081 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6082 
6083 	if (ev->status)
6084 		return;
6085 
6086 	hci_dev_lock(hdev);
6087 
6088 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6089 	if (conn) {
6090 		conn->le_conn_interval = le16_to_cpu(ev->interval);
6091 		conn->le_conn_latency = le16_to_cpu(ev->latency);
6092 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6093 	}
6094 
6095 	hci_dev_unlock(hdev);
6096 }
6097 
6098 /* This function requires the caller holds hdev->lock */
6099 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6100 					      bdaddr_t *addr,
6101 					      u8 addr_type, bool addr_resolved,
6102 					      u8 adv_type)
6103 {
6104 	struct hci_conn *conn;
6105 	struct hci_conn_params *params;
6106 
6107 	/* If the event is not connectable don't proceed further */
6108 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6109 		return NULL;
6110 
6111 	/* Ignore if the device is blocked or hdev is suspended */
6112 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6113 	    hdev->suspended)
6114 		return NULL;
6115 
6116 	/* Most controller will fail if we try to create new connections
6117 	 * while we have an existing one in peripheral role.
6118 	 */
6119 	if (hdev->conn_hash.le_num_peripheral > 0 &&
6120 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6121 	     !(hdev->le_states[3] & 0x10)))
6122 		return NULL;
6123 
6124 	/* If we're not connectable only connect devices that we have in
6125 	 * our pend_le_conns list.
6126 	 */
6127 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6128 					   addr_type);
6129 	if (!params)
6130 		return NULL;
6131 
6132 	if (!params->explicit_connect) {
6133 		switch (params->auto_connect) {
6134 		case HCI_AUTO_CONN_DIRECT:
6135 			/* Only devices advertising with ADV_DIRECT_IND are
6136 			 * triggering a connection attempt. This is allowing
6137 			 * incoming connections from peripheral devices.
6138 			 */
6139 			if (adv_type != LE_ADV_DIRECT_IND)
6140 				return NULL;
6141 			break;
6142 		case HCI_AUTO_CONN_ALWAYS:
6143 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
6144 			 * are triggering a connection attempt. This means
6145 			 * that incoming connections from peripheral device are
6146 			 * accepted and also outgoing connections to peripheral
6147 			 * devices are established when found.
6148 			 */
6149 			break;
6150 		default:
6151 			return NULL;
6152 		}
6153 	}
6154 
6155 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6156 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6157 			      HCI_ROLE_MASTER);
6158 	if (!IS_ERR(conn)) {
6159 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6160 		 * by higher layer that tried to connect, if no then
6161 		 * store the pointer since we don't really have any
6162 		 * other owner of the object besides the params that
6163 		 * triggered it. This way we can abort the connection if
6164 		 * the parameters get removed and keep the reference
6165 		 * count consistent once the connection is established.
6166 		 */
6167 
6168 		if (!params->explicit_connect)
6169 			params->conn = hci_conn_get(conn);
6170 
6171 		return conn;
6172 	}
6173 
6174 	switch (PTR_ERR(conn)) {
6175 	case -EBUSY:
6176 		/* If hci_connect() returns -EBUSY it means there is already
6177 		 * an LE connection attempt going on. Since controllers don't
6178 		 * support more than one connection attempt at the time, we
6179 		 * don't consider this an error case.
6180 		 */
6181 		break;
6182 	default:
6183 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6184 		return NULL;
6185 	}
6186 
6187 	return NULL;
6188 }
6189 
6190 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6191 			       u8 bdaddr_type, bdaddr_t *direct_addr,
6192 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6193 			       bool ext_adv, bool ctl_time, u64 instant)
6194 {
6195 	struct discovery_state *d = &hdev->discovery;
6196 	struct smp_irk *irk;
6197 	struct hci_conn *conn;
6198 	bool match, bdaddr_resolved;
6199 	u32 flags;
6200 	u8 *ptr;
6201 
6202 	switch (type) {
6203 	case LE_ADV_IND:
6204 	case LE_ADV_DIRECT_IND:
6205 	case LE_ADV_SCAN_IND:
6206 	case LE_ADV_NONCONN_IND:
6207 	case LE_ADV_SCAN_RSP:
6208 		break;
6209 	default:
6210 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6211 				       "type: 0x%02x", type);
6212 		return;
6213 	}
6214 
6215 	if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6216 		bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6217 		return;
6218 	}
6219 
6220 	/* Find the end of the data in case the report contains padded zero
6221 	 * bytes at the end causing an invalid length value.
6222 	 *
6223 	 * When data is NULL, len is 0 so there is no need for extra ptr
6224 	 * check as 'ptr < data + 0' is already false in such case.
6225 	 */
6226 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6227 		if (ptr + 1 + *ptr > data + len)
6228 			break;
6229 	}
6230 
6231 	/* Adjust for actual length. This handles the case when remote
6232 	 * device is advertising with incorrect data length.
6233 	 */
6234 	len = ptr - data;
6235 
6236 	/* If the direct address is present, then this report is from
6237 	 * a LE Direct Advertising Report event. In that case it is
6238 	 * important to see if the address is matching the local
6239 	 * controller address.
6240 	 */
6241 	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6242 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6243 						  &bdaddr_resolved);
6244 
6245 		/* Only resolvable random addresses are valid for these
6246 		 * kind of reports and others can be ignored.
6247 		 */
6248 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6249 			return;
6250 
6251 		/* If the controller is not using resolvable random
6252 		 * addresses, then this report can be ignored.
6253 		 */
6254 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6255 			return;
6256 
6257 		/* If the local IRK of the controller does not match
6258 		 * with the resolvable random address provided, then
6259 		 * this report can be ignored.
6260 		 */
6261 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6262 			return;
6263 	}
6264 
6265 	/* Check if we need to convert to identity address */
6266 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6267 	if (irk) {
6268 		bdaddr = &irk->bdaddr;
6269 		bdaddr_type = irk->addr_type;
6270 	}
6271 
6272 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6273 
6274 	/* Check if we have been requested to connect to this device.
6275 	 *
6276 	 * direct_addr is set only for directed advertising reports (it is NULL
6277 	 * for advertising reports) and is already verified to be RPA above.
6278 	 */
6279 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6280 				     type);
6281 	if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6282 		/* Store report for later inclusion by
6283 		 * mgmt_device_connected
6284 		 */
6285 		memcpy(conn->le_adv_data, data, len);
6286 		conn->le_adv_data_len = len;
6287 	}
6288 
6289 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6290 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6291 	else
6292 		flags = 0;
6293 
6294 	/* All scan results should be sent up for Mesh systems */
6295 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6296 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6297 				  rssi, flags, data, len, NULL, 0, instant);
6298 		return;
6299 	}
6300 
6301 	/* Passive scanning shouldn't trigger any device found events,
6302 	 * except for devices marked as CONN_REPORT for which we do send
6303 	 * device found events, or advertisement monitoring requested.
6304 	 */
6305 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6306 		if (type == LE_ADV_DIRECT_IND)
6307 			return;
6308 
6309 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6310 					       bdaddr, bdaddr_type) &&
6311 		    idr_is_empty(&hdev->adv_monitors_idr))
6312 			return;
6313 
6314 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6315 				  rssi, flags, data, len, NULL, 0, 0);
6316 		return;
6317 	}
6318 
6319 	/* When receiving non-connectable or scannable undirected
6320 	 * advertising reports, this means that the remote device is
6321 	 * not connectable and then clearly indicate this in the
6322 	 * device found event.
6323 	 *
6324 	 * When receiving a scan response, then there is no way to
6325 	 * know if the remote device is connectable or not. However
6326 	 * since scan responses are merged with a previously seen
6327 	 * advertising report, the flags field from that report
6328 	 * will be used.
6329 	 *
6330 	 * In the really unlikely case that a controller get confused
6331 	 * and just sends a scan response event, then it is marked as
6332 	 * not connectable as well.
6333 	 */
6334 	if (type == LE_ADV_SCAN_RSP)
6335 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6336 
6337 	/* If there's nothing pending either store the data from this
6338 	 * event or send an immediate device found event if the data
6339 	 * should not be stored for later.
6340 	 */
6341 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6342 		/* If the report will trigger a SCAN_REQ store it for
6343 		 * later merging.
6344 		 */
6345 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6346 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6347 						 rssi, flags, data, len);
6348 			return;
6349 		}
6350 
6351 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6352 				  rssi, flags, data, len, NULL, 0, 0);
6353 		return;
6354 	}
6355 
6356 	/* Check if the pending report is for the same device as the new one */
6357 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6358 		 bdaddr_type == d->last_adv_addr_type);
6359 
6360 	/* If the pending data doesn't match this report or this isn't a
6361 	 * scan response (e.g. we got a duplicate ADV_IND) then force
6362 	 * sending of the pending data.
6363 	 */
6364 	if (type != LE_ADV_SCAN_RSP || !match) {
6365 		/* Send out whatever is in the cache, but skip duplicates */
6366 		if (!match)
6367 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6368 					  d->last_adv_addr_type, NULL,
6369 					  d->last_adv_rssi, d->last_adv_flags,
6370 					  d->last_adv_data,
6371 					  d->last_adv_data_len, NULL, 0, 0);
6372 
6373 		/* If the new report will trigger a SCAN_REQ store it for
6374 		 * later merging.
6375 		 */
6376 		if (!ext_adv && (type == LE_ADV_IND ||
6377 				 type == LE_ADV_SCAN_IND)) {
6378 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6379 						 rssi, flags, data, len);
6380 			return;
6381 		}
6382 
6383 		/* The advertising reports cannot be merged, so clear
6384 		 * the pending report and send out a device found event.
6385 		 */
6386 		clear_pending_adv_report(hdev);
6387 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6388 				  rssi, flags, data, len, NULL, 0, 0);
6389 		return;
6390 	}
6391 
6392 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6393 	 * the new event is a SCAN_RSP. We can therefore proceed with
6394 	 * sending a merged device found event.
6395 	 */
6396 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6397 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6398 			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6399 	clear_pending_adv_report(hdev);
6400 }
6401 
6402 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6403 				  struct sk_buff *skb)
6404 {
6405 	struct hci_ev_le_advertising_report *ev = data;
6406 	u64 instant = jiffies;
6407 
6408 	if (!ev->num)
6409 		return;
6410 
6411 	hci_dev_lock(hdev);
6412 
6413 	while (ev->num--) {
6414 		struct hci_ev_le_advertising_info *info;
6415 		s8 rssi;
6416 
6417 		info = hci_le_ev_skb_pull(hdev, skb,
6418 					  HCI_EV_LE_ADVERTISING_REPORT,
6419 					  sizeof(*info));
6420 		if (!info)
6421 			break;
6422 
6423 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6424 					info->length + 1))
6425 			break;
6426 
6427 		if (info->length <= HCI_MAX_AD_LENGTH) {
6428 			rssi = info->data[info->length];
6429 			process_adv_report(hdev, info->type, &info->bdaddr,
6430 					   info->bdaddr_type, NULL, 0, rssi,
6431 					   info->data, info->length, false,
6432 					   false, instant);
6433 		} else {
6434 			bt_dev_err(hdev, "Dropping invalid advertising data");
6435 		}
6436 	}
6437 
6438 	hci_dev_unlock(hdev);
6439 }
6440 
6441 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6442 {
6443 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6444 		switch (evt_type) {
6445 		case LE_LEGACY_ADV_IND:
6446 			return LE_ADV_IND;
6447 		case LE_LEGACY_ADV_DIRECT_IND:
6448 			return LE_ADV_DIRECT_IND;
6449 		case LE_LEGACY_ADV_SCAN_IND:
6450 			return LE_ADV_SCAN_IND;
6451 		case LE_LEGACY_NONCONN_IND:
6452 			return LE_ADV_NONCONN_IND;
6453 		case LE_LEGACY_SCAN_RSP_ADV:
6454 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6455 			return LE_ADV_SCAN_RSP;
6456 		}
6457 
6458 		goto invalid;
6459 	}
6460 
6461 	if (evt_type & LE_EXT_ADV_CONN_IND) {
6462 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6463 			return LE_ADV_DIRECT_IND;
6464 
6465 		return LE_ADV_IND;
6466 	}
6467 
6468 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6469 		return LE_ADV_SCAN_RSP;
6470 
6471 	if (evt_type & LE_EXT_ADV_SCAN_IND)
6472 		return LE_ADV_SCAN_IND;
6473 
6474 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6475 	    evt_type & LE_EXT_ADV_DIRECT_IND)
6476 		return LE_ADV_NONCONN_IND;
6477 
6478 invalid:
6479 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6480 			       evt_type);
6481 
6482 	return LE_ADV_INVALID;
6483 }
6484 
6485 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6486 				      struct sk_buff *skb)
6487 {
6488 	struct hci_ev_le_ext_adv_report *ev = data;
6489 	u64 instant = jiffies;
6490 
6491 	if (!ev->num)
6492 		return;
6493 
6494 	hci_dev_lock(hdev);
6495 
6496 	while (ev->num--) {
6497 		struct hci_ev_le_ext_adv_info *info;
6498 		u8 legacy_evt_type;
6499 		u16 evt_type;
6500 
6501 		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6502 					  sizeof(*info));
6503 		if (!info)
6504 			break;
6505 
6506 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6507 					info->length))
6508 			break;
6509 
6510 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6511 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6512 		if (legacy_evt_type != LE_ADV_INVALID) {
6513 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6514 					   info->bdaddr_type, NULL, 0,
6515 					   info->rssi, info->data, info->length,
6516 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6517 					   false, instant);
6518 		}
6519 	}
6520 
6521 	hci_dev_unlock(hdev);
6522 }
6523 
6524 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6525 {
6526 	struct hci_cp_le_pa_term_sync cp;
6527 
6528 	memset(&cp, 0, sizeof(cp));
6529 	cp.handle = handle;
6530 
6531 	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6532 }
6533 
6534 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6535 					    struct sk_buff *skb)
6536 {
6537 	struct hci_ev_le_pa_sync_established *ev = data;
6538 	int mask = hdev->link_mode;
6539 	__u8 flags = 0;
6540 
6541 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6542 
6543 	if (ev->status)
6544 		return;
6545 
6546 	hci_dev_lock(hdev);
6547 
6548 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6549 
6550 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6551 	if (!(mask & HCI_LM_ACCEPT))
6552 		hci_le_pa_term_sync(hdev, ev->handle);
6553 
6554 	hci_dev_unlock(hdev);
6555 }
6556 
6557 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6558 					    struct sk_buff *skb)
6559 {
6560 	struct hci_ev_le_remote_feat_complete *ev = data;
6561 	struct hci_conn *conn;
6562 
6563 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6564 
6565 	hci_dev_lock(hdev);
6566 
6567 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6568 	if (conn) {
6569 		if (!ev->status)
6570 			memcpy(conn->features[0], ev->features, 8);
6571 
6572 		if (conn->state == BT_CONFIG) {
6573 			__u8 status;
6574 
6575 			/* If the local controller supports peripheral-initiated
6576 			 * features exchange, but the remote controller does
6577 			 * not, then it is possible that the error code 0x1a
6578 			 * for unsupported remote feature gets returned.
6579 			 *
6580 			 * In this specific case, allow the connection to
6581 			 * transition into connected state and mark it as
6582 			 * successful.
6583 			 */
6584 			if (!conn->out && ev->status == 0x1a &&
6585 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6586 				status = 0x00;
6587 			else
6588 				status = ev->status;
6589 
6590 			conn->state = BT_CONNECTED;
6591 			hci_connect_cfm(conn, status);
6592 			hci_conn_drop(conn);
6593 		}
6594 	}
6595 
6596 	hci_dev_unlock(hdev);
6597 }
6598 
6599 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6600 				   struct sk_buff *skb)
6601 {
6602 	struct hci_ev_le_ltk_req *ev = data;
6603 	struct hci_cp_le_ltk_reply cp;
6604 	struct hci_cp_le_ltk_neg_reply neg;
6605 	struct hci_conn *conn;
6606 	struct smp_ltk *ltk;
6607 
6608 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6609 
6610 	hci_dev_lock(hdev);
6611 
6612 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6613 	if (conn == NULL)
6614 		goto not_found;
6615 
6616 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6617 	if (!ltk)
6618 		goto not_found;
6619 
6620 	if (smp_ltk_is_sc(ltk)) {
6621 		/* With SC both EDiv and Rand are set to zero */
6622 		if (ev->ediv || ev->rand)
6623 			goto not_found;
6624 	} else {
6625 		/* For non-SC keys check that EDiv and Rand match */
6626 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6627 			goto not_found;
6628 	}
6629 
6630 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6631 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6632 	cp.handle = cpu_to_le16(conn->handle);
6633 
6634 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6635 
6636 	conn->enc_key_size = ltk->enc_size;
6637 
6638 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6639 
6640 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6641 	 * temporary key used to encrypt a connection following
6642 	 * pairing. It is used during the Encrypted Session Setup to
6643 	 * distribute the keys. Later, security can be re-established
6644 	 * using a distributed LTK.
6645 	 */
6646 	if (ltk->type == SMP_STK) {
6647 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6648 		list_del_rcu(&ltk->list);
6649 		kfree_rcu(ltk, rcu);
6650 	} else {
6651 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6652 	}
6653 
6654 	hci_dev_unlock(hdev);
6655 
6656 	return;
6657 
6658 not_found:
6659 	neg.handle = ev->handle;
6660 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6661 	hci_dev_unlock(hdev);
6662 }
6663 
6664 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6665 				      u8 reason)
6666 {
6667 	struct hci_cp_le_conn_param_req_neg_reply cp;
6668 
6669 	cp.handle = cpu_to_le16(handle);
6670 	cp.reason = reason;
6671 
6672 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6673 		     &cp);
6674 }
6675 
6676 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6677 					     struct sk_buff *skb)
6678 {
6679 	struct hci_ev_le_remote_conn_param_req *ev = data;
6680 	struct hci_cp_le_conn_param_req_reply cp;
6681 	struct hci_conn *hcon;
6682 	u16 handle, min, max, latency, timeout;
6683 
6684 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6685 
6686 	handle = le16_to_cpu(ev->handle);
6687 	min = le16_to_cpu(ev->interval_min);
6688 	max = le16_to_cpu(ev->interval_max);
6689 	latency = le16_to_cpu(ev->latency);
6690 	timeout = le16_to_cpu(ev->timeout);
6691 
6692 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6693 	if (!hcon || hcon->state != BT_CONNECTED)
6694 		return send_conn_param_neg_reply(hdev, handle,
6695 						 HCI_ERROR_UNKNOWN_CONN_ID);
6696 
6697 	if (hci_check_conn_params(min, max, latency, timeout))
6698 		return send_conn_param_neg_reply(hdev, handle,
6699 						 HCI_ERROR_INVALID_LL_PARAMS);
6700 
6701 	if (hcon->role == HCI_ROLE_MASTER) {
6702 		struct hci_conn_params *params;
6703 		u8 store_hint;
6704 
6705 		hci_dev_lock(hdev);
6706 
6707 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6708 						hcon->dst_type);
6709 		if (params) {
6710 			params->conn_min_interval = min;
6711 			params->conn_max_interval = max;
6712 			params->conn_latency = latency;
6713 			params->supervision_timeout = timeout;
6714 			store_hint = 0x01;
6715 		} else {
6716 			store_hint = 0x00;
6717 		}
6718 
6719 		hci_dev_unlock(hdev);
6720 
6721 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6722 				    store_hint, min, max, latency, timeout);
6723 	}
6724 
6725 	cp.handle = ev->handle;
6726 	cp.interval_min = ev->interval_min;
6727 	cp.interval_max = ev->interval_max;
6728 	cp.latency = ev->latency;
6729 	cp.timeout = ev->timeout;
6730 	cp.min_ce_len = 0;
6731 	cp.max_ce_len = 0;
6732 
6733 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6734 }
6735 
6736 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6737 					 struct sk_buff *skb)
6738 {
6739 	struct hci_ev_le_direct_adv_report *ev = data;
6740 	u64 instant = jiffies;
6741 	int i;
6742 
6743 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6744 				flex_array_size(ev, info, ev->num)))
6745 		return;
6746 
6747 	if (!ev->num)
6748 		return;
6749 
6750 	hci_dev_lock(hdev);
6751 
6752 	for (i = 0; i < ev->num; i++) {
6753 		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6754 
6755 		process_adv_report(hdev, info->type, &info->bdaddr,
6756 				   info->bdaddr_type, &info->direct_addr,
6757 				   info->direct_addr_type, info->rssi, NULL, 0,
6758 				   false, false, instant);
6759 	}
6760 
6761 	hci_dev_unlock(hdev);
6762 }
6763 
6764 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6765 				  struct sk_buff *skb)
6766 {
6767 	struct hci_ev_le_phy_update_complete *ev = data;
6768 	struct hci_conn *conn;
6769 
6770 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6771 
6772 	if (ev->status)
6773 		return;
6774 
6775 	hci_dev_lock(hdev);
6776 
6777 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6778 	if (!conn)
6779 		goto unlock;
6780 
6781 	conn->le_tx_phy = ev->tx_phy;
6782 	conn->le_rx_phy = ev->rx_phy;
6783 
6784 unlock:
6785 	hci_dev_unlock(hdev);
6786 }
6787 
6788 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6789 					struct sk_buff *skb)
6790 {
6791 	struct hci_evt_le_cis_established *ev = data;
6792 	struct hci_conn *conn;
6793 	u16 handle = __le16_to_cpu(ev->handle);
6794 
6795 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6796 
6797 	hci_dev_lock(hdev);
6798 
6799 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6800 	if (!conn) {
6801 		bt_dev_err(hdev,
6802 			   "Unable to find connection with handle 0x%4.4x",
6803 			   handle);
6804 		goto unlock;
6805 	}
6806 
6807 	if (conn->type != ISO_LINK) {
6808 		bt_dev_err(hdev,
6809 			   "Invalid connection link type handle 0x%4.4x",
6810 			   handle);
6811 		goto unlock;
6812 	}
6813 
6814 	if (conn->role == HCI_ROLE_SLAVE) {
6815 		__le32 interval;
6816 
6817 		memset(&interval, 0, sizeof(interval));
6818 
6819 		memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
6820 		conn->iso_qos.in.interval = le32_to_cpu(interval);
6821 		memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
6822 		conn->iso_qos.out.interval = le32_to_cpu(interval);
6823 		conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
6824 		conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
6825 		conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
6826 		conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
6827 		conn->iso_qos.in.phy = ev->c_phy;
6828 		conn->iso_qos.out.phy = ev->p_phy;
6829 	}
6830 
6831 	if (!ev->status) {
6832 		conn->state = BT_CONNECTED;
6833 		hci_debugfs_create_conn(conn);
6834 		hci_conn_add_sysfs(conn);
6835 		hci_iso_setup_path(conn);
6836 		goto unlock;
6837 	}
6838 
6839 	hci_connect_cfm(conn, ev->status);
6840 	hci_conn_del(conn);
6841 
6842 unlock:
6843 	hci_dev_unlock(hdev);
6844 }
6845 
6846 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6847 {
6848 	struct hci_cp_le_reject_cis cp;
6849 
6850 	memset(&cp, 0, sizeof(cp));
6851 	cp.handle = handle;
6852 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6853 	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6854 }
6855 
6856 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6857 {
6858 	struct hci_cp_le_accept_cis cp;
6859 
6860 	memset(&cp, 0, sizeof(cp));
6861 	cp.handle = handle;
6862 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6863 }
6864 
6865 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6866 			       struct sk_buff *skb)
6867 {
6868 	struct hci_evt_le_cis_req *ev = data;
6869 	u16 acl_handle, cis_handle;
6870 	struct hci_conn *acl, *cis;
6871 	int mask;
6872 	__u8 flags = 0;
6873 
6874 	acl_handle = __le16_to_cpu(ev->acl_handle);
6875 	cis_handle = __le16_to_cpu(ev->cis_handle);
6876 
6877 	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6878 		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6879 
6880 	hci_dev_lock(hdev);
6881 
6882 	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6883 	if (!acl)
6884 		goto unlock;
6885 
6886 	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6887 	if (!(mask & HCI_LM_ACCEPT)) {
6888 		hci_le_reject_cis(hdev, ev->cis_handle);
6889 		goto unlock;
6890 	}
6891 
6892 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6893 	if (!cis) {
6894 		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
6895 		if (!cis) {
6896 			hci_le_reject_cis(hdev, ev->cis_handle);
6897 			goto unlock;
6898 		}
6899 		cis->handle = cis_handle;
6900 	}
6901 
6902 	cis->iso_qos.cig = ev->cig_id;
6903 	cis->iso_qos.cis = ev->cis_id;
6904 
6905 	if (!(flags & HCI_PROTO_DEFER)) {
6906 		hci_le_accept_cis(hdev, ev->cis_handle);
6907 	} else {
6908 		cis->state = BT_CONNECT2;
6909 		hci_connect_cfm(cis, 0);
6910 	}
6911 
6912 unlock:
6913 	hci_dev_unlock(hdev);
6914 }
6915 
6916 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6917 					   struct sk_buff *skb)
6918 {
6919 	struct hci_evt_le_create_big_complete *ev = data;
6920 	struct hci_conn *conn;
6921 
6922 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6923 
6924 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6925 				flex_array_size(ev, bis_handle, ev->num_bis)))
6926 		return;
6927 
6928 	hci_dev_lock(hdev);
6929 
6930 	conn = hci_conn_hash_lookup_big(hdev, ev->handle);
6931 	if (!conn)
6932 		goto unlock;
6933 
6934 	if (conn->type != ISO_LINK) {
6935 		bt_dev_err(hdev,
6936 			   "Invalid connection link type handle 0x%2.2x",
6937 			   ev->handle);
6938 		goto unlock;
6939 	}
6940 
6941 	if (ev->num_bis)
6942 		conn->handle = __le16_to_cpu(ev->bis_handle[0]);
6943 
6944 	if (!ev->status) {
6945 		conn->state = BT_CONNECTED;
6946 		hci_debugfs_create_conn(conn);
6947 		hci_conn_add_sysfs(conn);
6948 		hci_iso_setup_path(conn);
6949 		goto unlock;
6950 	}
6951 
6952 	hci_connect_cfm(conn, ev->status);
6953 	hci_conn_del(conn);
6954 
6955 unlock:
6956 	hci_dev_unlock(hdev);
6957 }
6958 
6959 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6960 					    struct sk_buff *skb)
6961 {
6962 	struct hci_evt_le_big_sync_estabilished *ev = data;
6963 	struct hci_conn *bis;
6964 	int i;
6965 
6966 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6967 
6968 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6969 				flex_array_size(ev, bis, ev->num_bis)))
6970 		return;
6971 
6972 	if (ev->status)
6973 		return;
6974 
6975 	hci_dev_lock(hdev);
6976 
6977 	for (i = 0; i < ev->num_bis; i++) {
6978 		u16 handle = le16_to_cpu(ev->bis[i]);
6979 		__le32 interval;
6980 
6981 		bis = hci_conn_hash_lookup_handle(hdev, handle);
6982 		if (!bis) {
6983 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
6984 					   HCI_ROLE_SLAVE);
6985 			if (!bis)
6986 				continue;
6987 			bis->handle = handle;
6988 		}
6989 
6990 		bis->iso_qos.big = ev->handle;
6991 		memset(&interval, 0, sizeof(interval));
6992 		memcpy(&interval, ev->latency, sizeof(ev->latency));
6993 		bis->iso_qos.in.interval = le32_to_cpu(interval);
6994 		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
6995 		bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
6996 		bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
6997 
6998 		hci_connect_cfm(bis, ev->status);
6999 	}
7000 
7001 	hci_dev_unlock(hdev);
7002 }
7003 
7004 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7005 					   struct sk_buff *skb)
7006 {
7007 	struct hci_evt_le_big_info_adv_report *ev = data;
7008 	int mask = hdev->link_mode;
7009 	__u8 flags = 0;
7010 
7011 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7012 
7013 	hci_dev_lock(hdev);
7014 
7015 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7016 	if (!(mask & HCI_LM_ACCEPT))
7017 		hci_le_pa_term_sync(hdev, ev->sync_handle);
7018 
7019 	hci_dev_unlock(hdev);
7020 }
7021 
7022 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7023 [_op] = { \
7024 	.func = _func, \
7025 	.min_len = _min_len, \
7026 	.max_len = _max_len, \
7027 }
7028 
7029 #define HCI_LE_EV(_op, _func, _len) \
7030 	HCI_LE_EV_VL(_op, _func, _len, _len)
7031 
7032 #define HCI_LE_EV_STATUS(_op, _func) \
7033 	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7034 
7035 /* Entries in this table shall have their position according to the subevent
7036  * opcode they handle so the use of the macros above is recommend since it does
7037  * attempt to initialize at its proper index using Designated Initializers that
7038  * way events without a callback function can be ommited.
7039  */
7040 static const struct hci_le_ev {
7041 	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7042 	u16  min_len;
7043 	u16  max_len;
7044 } hci_le_ev_table[U8_MAX + 1] = {
7045 	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7046 	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7047 		  sizeof(struct hci_ev_le_conn_complete)),
7048 	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7049 	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7050 		     sizeof(struct hci_ev_le_advertising_report),
7051 		     HCI_MAX_EVENT_SIZE),
7052 	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7053 	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7054 		  hci_le_conn_update_complete_evt,
7055 		  sizeof(struct hci_ev_le_conn_update_complete)),
7056 	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7057 	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7058 		  hci_le_remote_feat_complete_evt,
7059 		  sizeof(struct hci_ev_le_remote_feat_complete)),
7060 	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7061 	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7062 		  sizeof(struct hci_ev_le_ltk_req)),
7063 	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7064 	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7065 		  hci_le_remote_conn_param_req_evt,
7066 		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7067 	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7068 	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7069 		  hci_le_enh_conn_complete_evt,
7070 		  sizeof(struct hci_ev_le_enh_conn_complete)),
7071 	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7072 	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7073 		     sizeof(struct hci_ev_le_direct_adv_report),
7074 		     HCI_MAX_EVENT_SIZE),
7075 	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7076 	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7077 		  sizeof(struct hci_ev_le_phy_update_complete)),
7078 	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7079 	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7080 		     sizeof(struct hci_ev_le_ext_adv_report),
7081 		     HCI_MAX_EVENT_SIZE),
7082 	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7083 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7084 		  hci_le_pa_sync_estabilished_evt,
7085 		  sizeof(struct hci_ev_le_pa_sync_established)),
7086 	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7087 	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7088 		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7089 	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7090 	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7091 		  sizeof(struct hci_evt_le_cis_established)),
7092 	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7093 	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7094 		  sizeof(struct hci_evt_le_cis_req)),
7095 	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7096 	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7097 		     hci_le_create_big_complete_evt,
7098 		     sizeof(struct hci_evt_le_create_big_complete),
7099 		     HCI_MAX_EVENT_SIZE),
7100 	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7101 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7102 		     hci_le_big_sync_established_evt,
7103 		     sizeof(struct hci_evt_le_big_sync_estabilished),
7104 		     HCI_MAX_EVENT_SIZE),
7105 	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7106 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7107 		     hci_le_big_info_adv_report_evt,
7108 		     sizeof(struct hci_evt_le_big_info_adv_report),
7109 		     HCI_MAX_EVENT_SIZE),
7110 };
7111 
7112 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7113 			    struct sk_buff *skb, u16 *opcode, u8 *status,
7114 			    hci_req_complete_t *req_complete,
7115 			    hci_req_complete_skb_t *req_complete_skb)
7116 {
7117 	struct hci_ev_le_meta *ev = data;
7118 	const struct hci_le_ev *subev;
7119 
7120 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7121 
7122 	/* Only match event if command OGF is for LE */
7123 	if (hdev->sent_cmd &&
7124 	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7125 	    hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7126 		*opcode = hci_skb_opcode(hdev->sent_cmd);
7127 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7128 				     req_complete_skb);
7129 	}
7130 
7131 	subev = &hci_le_ev_table[ev->subevent];
7132 	if (!subev->func)
7133 		return;
7134 
7135 	if (skb->len < subev->min_len) {
7136 		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7137 			   ev->subevent, skb->len, subev->min_len);
7138 		return;
7139 	}
7140 
7141 	/* Just warn if the length is over max_len size it still be
7142 	 * possible to partially parse the event so leave to callback to
7143 	 * decide if that is acceptable.
7144 	 */
7145 	if (skb->len > subev->max_len)
7146 		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7147 			    ev->subevent, skb->len, subev->max_len);
7148 	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7149 	if (!data)
7150 		return;
7151 
7152 	subev->func(hdev, data, skb);
7153 }
7154 
7155 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7156 				 u8 event, struct sk_buff *skb)
7157 {
7158 	struct hci_ev_cmd_complete *ev;
7159 	struct hci_event_hdr *hdr;
7160 
7161 	if (!skb)
7162 		return false;
7163 
7164 	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7165 	if (!hdr)
7166 		return false;
7167 
7168 	if (event) {
7169 		if (hdr->evt != event)
7170 			return false;
7171 		return true;
7172 	}
7173 
7174 	/* Check if request ended in Command Status - no way to retrieve
7175 	 * any extra parameters in this case.
7176 	 */
7177 	if (hdr->evt == HCI_EV_CMD_STATUS)
7178 		return false;
7179 
7180 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7181 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7182 			   hdr->evt);
7183 		return false;
7184 	}
7185 
7186 	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7187 	if (!ev)
7188 		return false;
7189 
7190 	if (opcode != __le16_to_cpu(ev->opcode)) {
7191 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7192 		       __le16_to_cpu(ev->opcode));
7193 		return false;
7194 	}
7195 
7196 	return true;
7197 }
7198 
7199 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7200 				  struct sk_buff *skb)
7201 {
7202 	struct hci_ev_le_advertising_info *adv;
7203 	struct hci_ev_le_direct_adv_info *direct_adv;
7204 	struct hci_ev_le_ext_adv_info *ext_adv;
7205 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7206 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7207 
7208 	hci_dev_lock(hdev);
7209 
7210 	/* If we are currently suspended and this is the first BT event seen,
7211 	 * save the wake reason associated with the event.
7212 	 */
7213 	if (!hdev->suspended || hdev->wake_reason)
7214 		goto unlock;
7215 
7216 	/* Default to remote wake. Values for wake_reason are documented in the
7217 	 * Bluez mgmt api docs.
7218 	 */
7219 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7220 
7221 	/* Once configured for remote wakeup, we should only wake up for
7222 	 * reconnections. It's useful to see which device is waking us up so
7223 	 * keep track of the bdaddr of the connection event that woke us up.
7224 	 */
7225 	if (event == HCI_EV_CONN_REQUEST) {
7226 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7227 		hdev->wake_addr_type = BDADDR_BREDR;
7228 	} else if (event == HCI_EV_CONN_COMPLETE) {
7229 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7230 		hdev->wake_addr_type = BDADDR_BREDR;
7231 	} else if (event == HCI_EV_LE_META) {
7232 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7233 		u8 subevent = le_ev->subevent;
7234 		u8 *ptr = &skb->data[sizeof(*le_ev)];
7235 		u8 num_reports = *ptr;
7236 
7237 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7238 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7239 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7240 		    num_reports) {
7241 			adv = (void *)(ptr + 1);
7242 			direct_adv = (void *)(ptr + 1);
7243 			ext_adv = (void *)(ptr + 1);
7244 
7245 			switch (subevent) {
7246 			case HCI_EV_LE_ADVERTISING_REPORT:
7247 				bacpy(&hdev->wake_addr, &adv->bdaddr);
7248 				hdev->wake_addr_type = adv->bdaddr_type;
7249 				break;
7250 			case HCI_EV_LE_DIRECT_ADV_REPORT:
7251 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7252 				hdev->wake_addr_type = direct_adv->bdaddr_type;
7253 				break;
7254 			case HCI_EV_LE_EXT_ADV_REPORT:
7255 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7256 				hdev->wake_addr_type = ext_adv->bdaddr_type;
7257 				break;
7258 			}
7259 		}
7260 	} else {
7261 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7262 	}
7263 
7264 unlock:
7265 	hci_dev_unlock(hdev);
7266 }
7267 
7268 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7269 [_op] = { \
7270 	.req = false, \
7271 	.func = _func, \
7272 	.min_len = _min_len, \
7273 	.max_len = _max_len, \
7274 }
7275 
7276 #define HCI_EV(_op, _func, _len) \
7277 	HCI_EV_VL(_op, _func, _len, _len)
7278 
7279 #define HCI_EV_STATUS(_op, _func) \
7280 	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7281 
7282 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7283 [_op] = { \
7284 	.req = true, \
7285 	.func_req = _func, \
7286 	.min_len = _min_len, \
7287 	.max_len = _max_len, \
7288 }
7289 
7290 #define HCI_EV_REQ(_op, _func, _len) \
7291 	HCI_EV_REQ_VL(_op, _func, _len, _len)
7292 
7293 /* Entries in this table shall have their position according to the event opcode
7294  * they handle so the use of the macros above is recommend since it does attempt
7295  * to initialize at its proper index using Designated Initializers that way
7296  * events without a callback function don't have entered.
7297  */
7298 static const struct hci_ev {
7299 	bool req;
7300 	union {
7301 		void (*func)(struct hci_dev *hdev, void *data,
7302 			     struct sk_buff *skb);
7303 		void (*func_req)(struct hci_dev *hdev, void *data,
7304 				 struct sk_buff *skb, u16 *opcode, u8 *status,
7305 				 hci_req_complete_t *req_complete,
7306 				 hci_req_complete_skb_t *req_complete_skb);
7307 	};
7308 	u16  min_len;
7309 	u16  max_len;
7310 } hci_ev_table[U8_MAX + 1] = {
7311 	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7312 	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7313 	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7314 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7315 		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7316 	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7317 	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7318 	       sizeof(struct hci_ev_conn_complete)),
7319 	/* [0x04 = HCI_EV_CONN_REQUEST] */
7320 	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7321 	       sizeof(struct hci_ev_conn_request)),
7322 	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7323 	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7324 	       sizeof(struct hci_ev_disconn_complete)),
7325 	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7326 	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7327 	       sizeof(struct hci_ev_auth_complete)),
7328 	/* [0x07 = HCI_EV_REMOTE_NAME] */
7329 	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7330 	       sizeof(struct hci_ev_remote_name)),
7331 	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7332 	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7333 	       sizeof(struct hci_ev_encrypt_change)),
7334 	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7335 	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7336 	       hci_change_link_key_complete_evt,
7337 	       sizeof(struct hci_ev_change_link_key_complete)),
7338 	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7339 	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7340 	       sizeof(struct hci_ev_remote_features)),
7341 	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7342 	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7343 		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7344 	/* [0x0f = HCI_EV_CMD_STATUS] */
7345 	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7346 		   sizeof(struct hci_ev_cmd_status)),
7347 	/* [0x10 = HCI_EV_CMD_STATUS] */
7348 	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7349 	       sizeof(struct hci_ev_hardware_error)),
7350 	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7351 	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7352 	       sizeof(struct hci_ev_role_change)),
7353 	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7354 	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7355 		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7356 	/* [0x14 = HCI_EV_MODE_CHANGE] */
7357 	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7358 	       sizeof(struct hci_ev_mode_change)),
7359 	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7360 	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7361 	       sizeof(struct hci_ev_pin_code_req)),
7362 	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7363 	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7364 	       sizeof(struct hci_ev_link_key_req)),
7365 	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7366 	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7367 	       sizeof(struct hci_ev_link_key_notify)),
7368 	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7369 	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7370 	       sizeof(struct hci_ev_clock_offset)),
7371 	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7372 	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7373 	       sizeof(struct hci_ev_pkt_type_change)),
7374 	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7375 	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7376 	       sizeof(struct hci_ev_pscan_rep_mode)),
7377 	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7378 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7379 		  hci_inquiry_result_with_rssi_evt,
7380 		  sizeof(struct hci_ev_inquiry_result_rssi),
7381 		  HCI_MAX_EVENT_SIZE),
7382 	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7383 	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7384 	       sizeof(struct hci_ev_remote_ext_features)),
7385 	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7386 	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7387 	       sizeof(struct hci_ev_sync_conn_complete)),
7388 	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7389 	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7390 		  hci_extended_inquiry_result_evt,
7391 		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7392 	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7393 	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7394 	       sizeof(struct hci_ev_key_refresh_complete)),
7395 	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7396 	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7397 	       sizeof(struct hci_ev_io_capa_request)),
7398 	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7399 	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7400 	       sizeof(struct hci_ev_io_capa_reply)),
7401 	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7402 	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7403 	       sizeof(struct hci_ev_user_confirm_req)),
7404 	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7405 	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7406 	       sizeof(struct hci_ev_user_passkey_req)),
7407 	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7408 	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7409 	       sizeof(struct hci_ev_remote_oob_data_request)),
7410 	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7411 	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7412 	       sizeof(struct hci_ev_simple_pair_complete)),
7413 	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7414 	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7415 	       sizeof(struct hci_ev_user_passkey_notify)),
7416 	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7417 	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7418 	       sizeof(struct hci_ev_keypress_notify)),
7419 	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7420 	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7421 	       sizeof(struct hci_ev_remote_host_features)),
7422 	/* [0x3e = HCI_EV_LE_META] */
7423 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7424 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7425 #if IS_ENABLED(CONFIG_BT_HS)
7426 	/* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7427 	HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7428 	       sizeof(struct hci_ev_phy_link_complete)),
7429 	/* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7430 	HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7431 	       sizeof(struct hci_ev_channel_selected)),
7432 	/* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7433 	HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7434 	       hci_disconn_loglink_complete_evt,
7435 	       sizeof(struct hci_ev_disconn_logical_link_complete)),
7436 	/* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7437 	HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7438 	       sizeof(struct hci_ev_logical_link_complete)),
7439 	/* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7440 	HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7441 	       hci_disconn_phylink_complete_evt,
7442 	       sizeof(struct hci_ev_disconn_phy_link_complete)),
7443 #endif
7444 	/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7445 	HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7446 	       sizeof(struct hci_ev_num_comp_blocks)),
7447 	/* [0xff = HCI_EV_VENDOR] */
7448 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7449 };
7450 
7451 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7452 			   u16 *opcode, u8 *status,
7453 			   hci_req_complete_t *req_complete,
7454 			   hci_req_complete_skb_t *req_complete_skb)
7455 {
7456 	const struct hci_ev *ev = &hci_ev_table[event];
7457 	void *data;
7458 
7459 	if (!ev->func)
7460 		return;
7461 
7462 	if (skb->len < ev->min_len) {
7463 		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7464 			   event, skb->len, ev->min_len);
7465 		return;
7466 	}
7467 
7468 	/* Just warn if the length is over max_len size it still be
7469 	 * possible to partially parse the event so leave to callback to
7470 	 * decide if that is acceptable.
7471 	 */
7472 	if (skb->len > ev->max_len)
7473 		bt_dev_warn_ratelimited(hdev,
7474 					"unexpected event 0x%2.2x length: %u > %u",
7475 					event, skb->len, ev->max_len);
7476 
7477 	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7478 	if (!data)
7479 		return;
7480 
7481 	if (ev->req)
7482 		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7483 			     req_complete_skb);
7484 	else
7485 		ev->func(hdev, data, skb);
7486 }
7487 
7488 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7489 {
7490 	struct hci_event_hdr *hdr = (void *) skb->data;
7491 	hci_req_complete_t req_complete = NULL;
7492 	hci_req_complete_skb_t req_complete_skb = NULL;
7493 	struct sk_buff *orig_skb = NULL;
7494 	u8 status = 0, event, req_evt = 0;
7495 	u16 opcode = HCI_OP_NOP;
7496 
7497 	if (skb->len < sizeof(*hdr)) {
7498 		bt_dev_err(hdev, "Malformed HCI Event");
7499 		goto done;
7500 	}
7501 
7502 	kfree_skb(hdev->recv_event);
7503 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7504 
7505 	event = hdr->evt;
7506 	if (!event) {
7507 		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7508 			    event);
7509 		goto done;
7510 	}
7511 
7512 	/* Only match event if command OGF is not for LE */
7513 	if (hdev->sent_cmd &&
7514 	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7515 	    hci_skb_event(hdev->sent_cmd) == event) {
7516 		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7517 				     status, &req_complete, &req_complete_skb);
7518 		req_evt = event;
7519 	}
7520 
7521 	/* If it looks like we might end up having to call
7522 	 * req_complete_skb, store a pristine copy of the skb since the
7523 	 * various handlers may modify the original one through
7524 	 * skb_pull() calls, etc.
7525 	 */
7526 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7527 	    event == HCI_EV_CMD_COMPLETE)
7528 		orig_skb = skb_clone(skb, GFP_KERNEL);
7529 
7530 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7531 
7532 	/* Store wake reason if we're suspended */
7533 	hci_store_wake_reason(hdev, event, skb);
7534 
7535 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7536 
7537 	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7538 		       &req_complete_skb);
7539 
7540 	if (req_complete) {
7541 		req_complete(hdev, status, opcode);
7542 	} else if (req_complete_skb) {
7543 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7544 			kfree_skb(orig_skb);
7545 			orig_skb = NULL;
7546 		}
7547 		req_complete_skb(hdev, status, opcode, orig_skb);
7548 	}
7549 
7550 done:
7551 	kfree_skb(orig_skb);
7552 	kfree_skb(skb);
7553 	hdev->stat.evt_rx++;
7554 }
7555