xref: /linux/net/bluetooth/hci_event.c (revision 2c9b3512402ed192d1f43f4531fb5da947e72bd0)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023-2024 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI event handling. */
27 
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_debugfs.h"
37 #include "hci_codec.h"
38 #include "smp.h"
39 #include "msft.h"
40 #include "eir.h"
41 
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
44 
45 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
46 
47 /* Handle HCI Event packets */
48 
49 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
50 			     u8 ev, size_t len)
51 {
52 	void *data;
53 
54 	data = skb_pull_data(skb, len);
55 	if (!data)
56 		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
57 
58 	return data;
59 }
60 
61 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
62 			     u16 op, size_t len)
63 {
64 	void *data;
65 
66 	data = skb_pull_data(skb, len);
67 	if (!data)
68 		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
69 
70 	return data;
71 }
72 
73 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
74 				u8 ev, size_t len)
75 {
76 	void *data;
77 
78 	data = skb_pull_data(skb, len);
79 	if (!data)
80 		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
81 
82 	return data;
83 }
84 
85 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
86 				struct sk_buff *skb)
87 {
88 	struct hci_ev_status *rp = data;
89 
90 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
91 
92 	/* It is possible that we receive Inquiry Complete event right
93 	 * before we receive Inquiry Cancel Command Complete event, in
94 	 * which case the latter event should have status of Command
95 	 * Disallowed. This should not be treated as error, since
96 	 * we actually achieve what Inquiry Cancel wants to achieve,
97 	 * which is to end the last Inquiry session.
98 	 */
99 	if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
100 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
101 		rp->status = 0x00;
102 	}
103 
104 	if (rp->status)
105 		return rp->status;
106 
107 	clear_bit(HCI_INQUIRY, &hdev->flags);
108 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
109 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
110 
111 	hci_dev_lock(hdev);
112 	/* Set discovery state to stopped if we're not doing LE active
113 	 * scanning.
114 	 */
115 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
116 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
117 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
118 	hci_dev_unlock(hdev);
119 
120 	return rp->status;
121 }
122 
123 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
124 			      struct sk_buff *skb)
125 {
126 	struct hci_ev_status *rp = data;
127 
128 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
129 
130 	if (rp->status)
131 		return rp->status;
132 
133 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
134 
135 	return rp->status;
136 }
137 
138 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
139 				   struct sk_buff *skb)
140 {
141 	struct hci_ev_status *rp = data;
142 
143 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
144 
145 	if (rp->status)
146 		return rp->status;
147 
148 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
149 
150 	return rp->status;
151 }
152 
153 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
154 					struct sk_buff *skb)
155 {
156 	struct hci_ev_status *rp = data;
157 
158 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
159 
160 	return rp->status;
161 }
162 
163 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
164 				struct sk_buff *skb)
165 {
166 	struct hci_rp_role_discovery *rp = data;
167 	struct hci_conn *conn;
168 
169 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
170 
171 	if (rp->status)
172 		return rp->status;
173 
174 	hci_dev_lock(hdev);
175 
176 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
177 	if (conn)
178 		conn->role = rp->role;
179 
180 	hci_dev_unlock(hdev);
181 
182 	return rp->status;
183 }
184 
185 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
186 				  struct sk_buff *skb)
187 {
188 	struct hci_rp_read_link_policy *rp = data;
189 	struct hci_conn *conn;
190 
191 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
192 
193 	if (rp->status)
194 		return rp->status;
195 
196 	hci_dev_lock(hdev);
197 
198 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
199 	if (conn)
200 		conn->link_policy = __le16_to_cpu(rp->policy);
201 
202 	hci_dev_unlock(hdev);
203 
204 	return rp->status;
205 }
206 
207 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
208 				   struct sk_buff *skb)
209 {
210 	struct hci_rp_write_link_policy *rp = data;
211 	struct hci_conn *conn;
212 	void *sent;
213 
214 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
215 
216 	if (rp->status)
217 		return rp->status;
218 
219 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
220 	if (!sent)
221 		return rp->status;
222 
223 	hci_dev_lock(hdev);
224 
225 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
226 	if (conn)
227 		conn->link_policy = get_unaligned_le16(sent + 2);
228 
229 	hci_dev_unlock(hdev);
230 
231 	return rp->status;
232 }
233 
234 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
235 				      struct sk_buff *skb)
236 {
237 	struct hci_rp_read_def_link_policy *rp = data;
238 
239 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
240 
241 	if (rp->status)
242 		return rp->status;
243 
244 	hdev->link_policy = __le16_to_cpu(rp->policy);
245 
246 	return rp->status;
247 }
248 
249 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
250 				       struct sk_buff *skb)
251 {
252 	struct hci_ev_status *rp = data;
253 	void *sent;
254 
255 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
256 
257 	if (rp->status)
258 		return rp->status;
259 
260 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
261 	if (!sent)
262 		return rp->status;
263 
264 	hdev->link_policy = get_unaligned_le16(sent);
265 
266 	return rp->status;
267 }
268 
269 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
270 {
271 	struct hci_ev_status *rp = data;
272 
273 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
274 
275 	clear_bit(HCI_RESET, &hdev->flags);
276 
277 	if (rp->status)
278 		return rp->status;
279 
280 	/* Reset all non-persistent flags */
281 	hci_dev_clear_volatile_flags(hdev);
282 
283 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
284 
285 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
286 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
287 
288 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
289 	hdev->adv_data_len = 0;
290 
291 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
292 	hdev->scan_rsp_data_len = 0;
293 
294 	hdev->le_scan_type = LE_SCAN_PASSIVE;
295 
296 	hdev->ssp_debug_mode = 0;
297 
298 	hci_bdaddr_list_clear(&hdev->le_accept_list);
299 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
300 
301 	return rp->status;
302 }
303 
304 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
305 				      struct sk_buff *skb)
306 {
307 	struct hci_rp_read_stored_link_key *rp = data;
308 	struct hci_cp_read_stored_link_key *sent;
309 
310 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
311 
312 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
313 	if (!sent)
314 		return rp->status;
315 
316 	if (!rp->status && sent->read_all == 0x01) {
317 		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
318 		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
319 	}
320 
321 	return rp->status;
322 }
323 
324 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
325 					struct sk_buff *skb)
326 {
327 	struct hci_rp_delete_stored_link_key *rp = data;
328 	u16 num_keys;
329 
330 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
331 
332 	if (rp->status)
333 		return rp->status;
334 
335 	num_keys = le16_to_cpu(rp->num_keys);
336 
337 	if (num_keys <= hdev->stored_num_keys)
338 		hdev->stored_num_keys -= num_keys;
339 	else
340 		hdev->stored_num_keys = 0;
341 
342 	return rp->status;
343 }
344 
345 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
346 				  struct sk_buff *skb)
347 {
348 	struct hci_ev_status *rp = data;
349 	void *sent;
350 
351 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
352 
353 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
354 	if (!sent)
355 		return rp->status;
356 
357 	hci_dev_lock(hdev);
358 
359 	if (hci_dev_test_flag(hdev, HCI_MGMT))
360 		mgmt_set_local_name_complete(hdev, sent, rp->status);
361 	else if (!rp->status)
362 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
363 
364 	hci_dev_unlock(hdev);
365 
366 	return rp->status;
367 }
368 
369 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
370 				 struct sk_buff *skb)
371 {
372 	struct hci_rp_read_local_name *rp = data;
373 
374 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
375 
376 	if (rp->status)
377 		return rp->status;
378 
379 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
380 	    hci_dev_test_flag(hdev, HCI_CONFIG))
381 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
382 
383 	return rp->status;
384 }
385 
386 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
387 				   struct sk_buff *skb)
388 {
389 	struct hci_ev_status *rp = data;
390 	void *sent;
391 
392 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
393 
394 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
395 	if (!sent)
396 		return rp->status;
397 
398 	hci_dev_lock(hdev);
399 
400 	if (!rp->status) {
401 		__u8 param = *((__u8 *) sent);
402 
403 		if (param == AUTH_ENABLED)
404 			set_bit(HCI_AUTH, &hdev->flags);
405 		else
406 			clear_bit(HCI_AUTH, &hdev->flags);
407 	}
408 
409 	if (hci_dev_test_flag(hdev, HCI_MGMT))
410 		mgmt_auth_enable_complete(hdev, rp->status);
411 
412 	hci_dev_unlock(hdev);
413 
414 	return rp->status;
415 }
416 
417 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
418 				    struct sk_buff *skb)
419 {
420 	struct hci_ev_status *rp = data;
421 	__u8 param;
422 	void *sent;
423 
424 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
425 
426 	if (rp->status)
427 		return rp->status;
428 
429 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
430 	if (!sent)
431 		return rp->status;
432 
433 	param = *((__u8 *) sent);
434 
435 	if (param)
436 		set_bit(HCI_ENCRYPT, &hdev->flags);
437 	else
438 		clear_bit(HCI_ENCRYPT, &hdev->flags);
439 
440 	return rp->status;
441 }
442 
443 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
444 				   struct sk_buff *skb)
445 {
446 	struct hci_ev_status *rp = data;
447 	__u8 param;
448 	void *sent;
449 
450 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
451 
452 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
453 	if (!sent)
454 		return rp->status;
455 
456 	param = *((__u8 *) sent);
457 
458 	hci_dev_lock(hdev);
459 
460 	if (rp->status) {
461 		hdev->discov_timeout = 0;
462 		goto done;
463 	}
464 
465 	if (param & SCAN_INQUIRY)
466 		set_bit(HCI_ISCAN, &hdev->flags);
467 	else
468 		clear_bit(HCI_ISCAN, &hdev->flags);
469 
470 	if (param & SCAN_PAGE)
471 		set_bit(HCI_PSCAN, &hdev->flags);
472 	else
473 		clear_bit(HCI_PSCAN, &hdev->flags);
474 
475 done:
476 	hci_dev_unlock(hdev);
477 
478 	return rp->status;
479 }
480 
481 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
482 				  struct sk_buff *skb)
483 {
484 	struct hci_ev_status *rp = data;
485 	struct hci_cp_set_event_filter *cp;
486 	void *sent;
487 
488 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
489 
490 	if (rp->status)
491 		return rp->status;
492 
493 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
494 	if (!sent)
495 		return rp->status;
496 
497 	cp = (struct hci_cp_set_event_filter *)sent;
498 
499 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
500 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
501 	else
502 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
503 
504 	return rp->status;
505 }
506 
507 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
508 				   struct sk_buff *skb)
509 {
510 	struct hci_rp_read_class_of_dev *rp = data;
511 
512 	if (WARN_ON(!hdev))
513 		return HCI_ERROR_UNSPECIFIED;
514 
515 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
516 
517 	if (rp->status)
518 		return rp->status;
519 
520 	memcpy(hdev->dev_class, rp->dev_class, 3);
521 
522 	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
523 		   hdev->dev_class[1], hdev->dev_class[0]);
524 
525 	return rp->status;
526 }
527 
528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
529 				    struct sk_buff *skb)
530 {
531 	struct hci_ev_status *rp = data;
532 	void *sent;
533 
534 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
535 
536 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
537 	if (!sent)
538 		return rp->status;
539 
540 	hci_dev_lock(hdev);
541 
542 	if (!rp->status)
543 		memcpy(hdev->dev_class, sent, 3);
544 
545 	if (hci_dev_test_flag(hdev, HCI_MGMT))
546 		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
547 
548 	hci_dev_unlock(hdev);
549 
550 	return rp->status;
551 }
552 
553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
554 				    struct sk_buff *skb)
555 {
556 	struct hci_rp_read_voice_setting *rp = data;
557 	__u16 setting;
558 
559 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
560 
561 	if (rp->status)
562 		return rp->status;
563 
564 	setting = __le16_to_cpu(rp->voice_setting);
565 
566 	if (hdev->voice_setting == setting)
567 		return rp->status;
568 
569 	hdev->voice_setting = setting;
570 
571 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
572 
573 	if (hdev->notify)
574 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
575 
576 	return rp->status;
577 }
578 
579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
580 				     struct sk_buff *skb)
581 {
582 	struct hci_ev_status *rp = data;
583 	__u16 setting;
584 	void *sent;
585 
586 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
587 
588 	if (rp->status)
589 		return rp->status;
590 
591 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
592 	if (!sent)
593 		return rp->status;
594 
595 	setting = get_unaligned_le16(sent);
596 
597 	if (hdev->voice_setting == setting)
598 		return rp->status;
599 
600 	hdev->voice_setting = setting;
601 
602 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
603 
604 	if (hdev->notify)
605 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
606 
607 	return rp->status;
608 }
609 
610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
611 					struct sk_buff *skb)
612 {
613 	struct hci_rp_read_num_supported_iac *rp = data;
614 
615 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
616 
617 	if (rp->status)
618 		return rp->status;
619 
620 	hdev->num_iac = rp->num_iac;
621 
622 	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
623 
624 	return rp->status;
625 }
626 
627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
628 				struct sk_buff *skb)
629 {
630 	struct hci_ev_status *rp = data;
631 	struct hci_cp_write_ssp_mode *sent;
632 
633 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
634 
635 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
636 	if (!sent)
637 		return rp->status;
638 
639 	hci_dev_lock(hdev);
640 
641 	if (!rp->status) {
642 		if (sent->mode)
643 			hdev->features[1][0] |= LMP_HOST_SSP;
644 		else
645 			hdev->features[1][0] &= ~LMP_HOST_SSP;
646 	}
647 
648 	if (!rp->status) {
649 		if (sent->mode)
650 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
651 		else
652 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
653 	}
654 
655 	hci_dev_unlock(hdev);
656 
657 	return rp->status;
658 }
659 
660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
661 				  struct sk_buff *skb)
662 {
663 	struct hci_ev_status *rp = data;
664 	struct hci_cp_write_sc_support *sent;
665 
666 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
667 
668 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
669 	if (!sent)
670 		return rp->status;
671 
672 	hci_dev_lock(hdev);
673 
674 	if (!rp->status) {
675 		if (sent->support)
676 			hdev->features[1][0] |= LMP_HOST_SC;
677 		else
678 			hdev->features[1][0] &= ~LMP_HOST_SC;
679 	}
680 
681 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
682 		if (sent->support)
683 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
684 		else
685 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
686 	}
687 
688 	hci_dev_unlock(hdev);
689 
690 	return rp->status;
691 }
692 
693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
694 				    struct sk_buff *skb)
695 {
696 	struct hci_rp_read_local_version *rp = data;
697 
698 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
699 
700 	if (rp->status)
701 		return rp->status;
702 
703 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
704 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
705 		hdev->hci_ver = rp->hci_ver;
706 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
707 		hdev->lmp_ver = rp->lmp_ver;
708 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
709 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
710 	}
711 
712 	return rp->status;
713 }
714 
715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
716 				   struct sk_buff *skb)
717 {
718 	struct hci_rp_read_enc_key_size *rp = data;
719 	struct hci_conn *conn;
720 	u16 handle;
721 	u8 status = rp->status;
722 
723 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
724 
725 	handle = le16_to_cpu(rp->handle);
726 
727 	hci_dev_lock(hdev);
728 
729 	conn = hci_conn_hash_lookup_handle(hdev, handle);
730 	if (!conn) {
731 		status = 0xFF;
732 		goto done;
733 	}
734 
735 	/* While unexpected, the read_enc_key_size command may fail. The most
736 	 * secure approach is to then assume the key size is 0 to force a
737 	 * disconnection.
738 	 */
739 	if (status) {
740 		bt_dev_err(hdev, "failed to read key size for handle %u",
741 			   handle);
742 		conn->enc_key_size = 0;
743 	} else {
744 		conn->enc_key_size = rp->key_size;
745 		status = 0;
746 
747 		if (conn->enc_key_size < hdev->min_enc_key_size) {
748 			/* As slave role, the conn->state has been set to
749 			 * BT_CONNECTED and l2cap conn req might not be received
750 			 * yet, at this moment the l2cap layer almost does
751 			 * nothing with the non-zero status.
752 			 * So we also clear encrypt related bits, and then the
753 			 * handler of l2cap conn req will get the right secure
754 			 * state at a later time.
755 			 */
756 			status = HCI_ERROR_AUTH_FAILURE;
757 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
758 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
759 		}
760 	}
761 
762 	hci_encrypt_cfm(conn, status);
763 
764 done:
765 	hci_dev_unlock(hdev);
766 
767 	return status;
768 }
769 
770 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
771 				     struct sk_buff *skb)
772 {
773 	struct hci_rp_read_local_commands *rp = data;
774 
775 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
776 
777 	if (rp->status)
778 		return rp->status;
779 
780 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
781 	    hci_dev_test_flag(hdev, HCI_CONFIG))
782 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
783 
784 	return rp->status;
785 }
786 
787 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
788 					   struct sk_buff *skb)
789 {
790 	struct hci_rp_read_auth_payload_to *rp = data;
791 	struct hci_conn *conn;
792 
793 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
794 
795 	if (rp->status)
796 		return rp->status;
797 
798 	hci_dev_lock(hdev);
799 
800 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
801 	if (conn)
802 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
803 
804 	hci_dev_unlock(hdev);
805 
806 	return rp->status;
807 }
808 
809 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
810 					    struct sk_buff *skb)
811 {
812 	struct hci_rp_write_auth_payload_to *rp = data;
813 	struct hci_conn *conn;
814 	void *sent;
815 
816 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
817 
818 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
819 	if (!sent)
820 		return rp->status;
821 
822 	hci_dev_lock(hdev);
823 
824 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
825 	if (!conn) {
826 		rp->status = 0xff;
827 		goto unlock;
828 	}
829 
830 	if (!rp->status)
831 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
832 
833 unlock:
834 	hci_dev_unlock(hdev);
835 
836 	return rp->status;
837 }
838 
839 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
840 				     struct sk_buff *skb)
841 {
842 	struct hci_rp_read_local_features *rp = data;
843 
844 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
845 
846 	if (rp->status)
847 		return rp->status;
848 
849 	memcpy(hdev->features, rp->features, 8);
850 
851 	/* Adjust default settings according to features
852 	 * supported by device. */
853 
854 	if (hdev->features[0][0] & LMP_3SLOT)
855 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
856 
857 	if (hdev->features[0][0] & LMP_5SLOT)
858 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
859 
860 	if (hdev->features[0][1] & LMP_HV2) {
861 		hdev->pkt_type  |= (HCI_HV2);
862 		hdev->esco_type |= (ESCO_HV2);
863 	}
864 
865 	if (hdev->features[0][1] & LMP_HV3) {
866 		hdev->pkt_type  |= (HCI_HV3);
867 		hdev->esco_type |= (ESCO_HV3);
868 	}
869 
870 	if (lmp_esco_capable(hdev))
871 		hdev->esco_type |= (ESCO_EV3);
872 
873 	if (hdev->features[0][4] & LMP_EV4)
874 		hdev->esco_type |= (ESCO_EV4);
875 
876 	if (hdev->features[0][4] & LMP_EV5)
877 		hdev->esco_type |= (ESCO_EV5);
878 
879 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
880 		hdev->esco_type |= (ESCO_2EV3);
881 
882 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
883 		hdev->esco_type |= (ESCO_3EV3);
884 
885 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
886 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
887 
888 	return rp->status;
889 }
890 
891 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
892 					 struct sk_buff *skb)
893 {
894 	struct hci_rp_read_local_ext_features *rp = data;
895 
896 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
897 
898 	if (rp->status)
899 		return rp->status;
900 
901 	if (hdev->max_page < rp->max_page) {
902 		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
903 			     &hdev->quirks))
904 			bt_dev_warn(hdev, "broken local ext features page 2");
905 		else
906 			hdev->max_page = rp->max_page;
907 	}
908 
909 	if (rp->page < HCI_MAX_PAGES)
910 		memcpy(hdev->features[rp->page], rp->features, 8);
911 
912 	return rp->status;
913 }
914 
915 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
916 				  struct sk_buff *skb)
917 {
918 	struct hci_rp_read_buffer_size *rp = data;
919 
920 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
921 
922 	if (rp->status)
923 		return rp->status;
924 
925 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
926 	hdev->sco_mtu  = rp->sco_mtu;
927 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
928 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
929 
930 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
931 		hdev->sco_mtu  = 64;
932 		hdev->sco_pkts = 8;
933 	}
934 
935 	hdev->acl_cnt = hdev->acl_pkts;
936 	hdev->sco_cnt = hdev->sco_pkts;
937 
938 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
939 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
940 
941 	if (!hdev->acl_mtu || !hdev->acl_pkts)
942 		return HCI_ERROR_INVALID_PARAMETERS;
943 
944 	return rp->status;
945 }
946 
947 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
948 			      struct sk_buff *skb)
949 {
950 	struct hci_rp_read_bd_addr *rp = data;
951 
952 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
953 
954 	if (rp->status)
955 		return rp->status;
956 
957 	if (test_bit(HCI_INIT, &hdev->flags))
958 		bacpy(&hdev->bdaddr, &rp->bdaddr);
959 
960 	if (hci_dev_test_flag(hdev, HCI_SETUP))
961 		bacpy(&hdev->setup_addr, &rp->bdaddr);
962 
963 	return rp->status;
964 }
965 
966 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
967 					 struct sk_buff *skb)
968 {
969 	struct hci_rp_read_local_pairing_opts *rp = data;
970 
971 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
972 
973 	if (rp->status)
974 		return rp->status;
975 
976 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
977 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
978 		hdev->pairing_opts = rp->pairing_opts;
979 		hdev->max_enc_key_size = rp->max_key_size;
980 	}
981 
982 	return rp->status;
983 }
984 
985 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
986 					 struct sk_buff *skb)
987 {
988 	struct hci_rp_read_page_scan_activity *rp = data;
989 
990 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
991 
992 	if (rp->status)
993 		return rp->status;
994 
995 	if (test_bit(HCI_INIT, &hdev->flags)) {
996 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
997 		hdev->page_scan_window = __le16_to_cpu(rp->window);
998 	}
999 
1000 	return rp->status;
1001 }
1002 
1003 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1004 					  struct sk_buff *skb)
1005 {
1006 	struct hci_ev_status *rp = data;
1007 	struct hci_cp_write_page_scan_activity *sent;
1008 
1009 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1010 
1011 	if (rp->status)
1012 		return rp->status;
1013 
1014 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1015 	if (!sent)
1016 		return rp->status;
1017 
1018 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1019 	hdev->page_scan_window = __le16_to_cpu(sent->window);
1020 
1021 	return rp->status;
1022 }
1023 
1024 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1025 				     struct sk_buff *skb)
1026 {
1027 	struct hci_rp_read_page_scan_type *rp = data;
1028 
1029 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1030 
1031 	if (rp->status)
1032 		return rp->status;
1033 
1034 	if (test_bit(HCI_INIT, &hdev->flags))
1035 		hdev->page_scan_type = rp->type;
1036 
1037 	return rp->status;
1038 }
1039 
1040 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1041 				      struct sk_buff *skb)
1042 {
1043 	struct hci_ev_status *rp = data;
1044 	u8 *type;
1045 
1046 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1047 
1048 	if (rp->status)
1049 		return rp->status;
1050 
1051 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1052 	if (type)
1053 		hdev->page_scan_type = *type;
1054 
1055 	return rp->status;
1056 }
1057 
1058 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1059 			    struct sk_buff *skb)
1060 {
1061 	struct hci_rp_read_clock *rp = data;
1062 	struct hci_cp_read_clock *cp;
1063 	struct hci_conn *conn;
1064 
1065 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1066 
1067 	if (rp->status)
1068 		return rp->status;
1069 
1070 	hci_dev_lock(hdev);
1071 
1072 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1073 	if (!cp)
1074 		goto unlock;
1075 
1076 	if (cp->which == 0x00) {
1077 		hdev->clock = le32_to_cpu(rp->clock);
1078 		goto unlock;
1079 	}
1080 
1081 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1082 	if (conn) {
1083 		conn->clock = le32_to_cpu(rp->clock);
1084 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1085 	}
1086 
1087 unlock:
1088 	hci_dev_unlock(hdev);
1089 	return rp->status;
1090 }
1091 
1092 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1093 				       struct sk_buff *skb)
1094 {
1095 	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1096 
1097 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1098 
1099 	if (rp->status)
1100 		return rp->status;
1101 
1102 	hdev->inq_tx_power = rp->tx_power;
1103 
1104 	return rp->status;
1105 }
1106 
1107 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1108 					     struct sk_buff *skb)
1109 {
1110 	struct hci_rp_read_def_err_data_reporting *rp = data;
1111 
1112 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1113 
1114 	if (rp->status)
1115 		return rp->status;
1116 
1117 	hdev->err_data_reporting = rp->err_data_reporting;
1118 
1119 	return rp->status;
1120 }
1121 
1122 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1123 					      struct sk_buff *skb)
1124 {
1125 	struct hci_ev_status *rp = data;
1126 	struct hci_cp_write_def_err_data_reporting *cp;
1127 
1128 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1129 
1130 	if (rp->status)
1131 		return rp->status;
1132 
1133 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1134 	if (!cp)
1135 		return rp->status;
1136 
1137 	hdev->err_data_reporting = cp->err_data_reporting;
1138 
1139 	return rp->status;
1140 }
1141 
1142 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1143 				struct sk_buff *skb)
1144 {
1145 	struct hci_rp_pin_code_reply *rp = data;
1146 	struct hci_cp_pin_code_reply *cp;
1147 	struct hci_conn *conn;
1148 
1149 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1150 
1151 	hci_dev_lock(hdev);
1152 
1153 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1154 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1155 
1156 	if (rp->status)
1157 		goto unlock;
1158 
1159 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1160 	if (!cp)
1161 		goto unlock;
1162 
1163 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1164 	if (conn)
1165 		conn->pin_length = cp->pin_len;
1166 
1167 unlock:
1168 	hci_dev_unlock(hdev);
1169 	return rp->status;
1170 }
1171 
1172 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1173 				    struct sk_buff *skb)
1174 {
1175 	struct hci_rp_pin_code_neg_reply *rp = data;
1176 
1177 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1178 
1179 	hci_dev_lock(hdev);
1180 
1181 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1182 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1183 						 rp->status);
1184 
1185 	hci_dev_unlock(hdev);
1186 
1187 	return rp->status;
1188 }
1189 
1190 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1191 				     struct sk_buff *skb)
1192 {
1193 	struct hci_rp_le_read_buffer_size *rp = data;
1194 
1195 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1196 
1197 	if (rp->status)
1198 		return rp->status;
1199 
1200 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1201 	hdev->le_pkts = rp->le_max_pkt;
1202 
1203 	hdev->le_cnt = hdev->le_pkts;
1204 
1205 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1206 
1207 	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1208 		return HCI_ERROR_INVALID_PARAMETERS;
1209 
1210 	return rp->status;
1211 }
1212 
1213 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1214 					struct sk_buff *skb)
1215 {
1216 	struct hci_rp_le_read_local_features *rp = data;
1217 
1218 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1219 
1220 	if (rp->status)
1221 		return rp->status;
1222 
1223 	memcpy(hdev->le_features, rp->features, 8);
1224 
1225 	return rp->status;
1226 }
1227 
1228 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1229 				      struct sk_buff *skb)
1230 {
1231 	struct hci_rp_le_read_adv_tx_power *rp = data;
1232 
1233 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1234 
1235 	if (rp->status)
1236 		return rp->status;
1237 
1238 	hdev->adv_tx_power = rp->tx_power;
1239 
1240 	return rp->status;
1241 }
1242 
1243 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1244 				    struct sk_buff *skb)
1245 {
1246 	struct hci_rp_user_confirm_reply *rp = data;
1247 
1248 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1249 
1250 	hci_dev_lock(hdev);
1251 
1252 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1253 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1254 						 rp->status);
1255 
1256 	hci_dev_unlock(hdev);
1257 
1258 	return rp->status;
1259 }
1260 
1261 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1262 					struct sk_buff *skb)
1263 {
1264 	struct hci_rp_user_confirm_reply *rp = data;
1265 
1266 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1267 
1268 	hci_dev_lock(hdev);
1269 
1270 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1271 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1272 						     ACL_LINK, 0, rp->status);
1273 
1274 	hci_dev_unlock(hdev);
1275 
1276 	return rp->status;
1277 }
1278 
1279 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1280 				    struct sk_buff *skb)
1281 {
1282 	struct hci_rp_user_confirm_reply *rp = data;
1283 
1284 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1285 
1286 	hci_dev_lock(hdev);
1287 
1288 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1289 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1290 						 0, rp->status);
1291 
1292 	hci_dev_unlock(hdev);
1293 
1294 	return rp->status;
1295 }
1296 
1297 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1298 					struct sk_buff *skb)
1299 {
1300 	struct hci_rp_user_confirm_reply *rp = data;
1301 
1302 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1303 
1304 	hci_dev_lock(hdev);
1305 
1306 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1307 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1308 						     ACL_LINK, 0, rp->status);
1309 
1310 	hci_dev_unlock(hdev);
1311 
1312 	return rp->status;
1313 }
1314 
1315 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1316 				     struct sk_buff *skb)
1317 {
1318 	struct hci_rp_read_local_oob_data *rp = data;
1319 
1320 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1321 
1322 	return rp->status;
1323 }
1324 
1325 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1326 					 struct sk_buff *skb)
1327 {
1328 	struct hci_rp_read_local_oob_ext_data *rp = data;
1329 
1330 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1331 
1332 	return rp->status;
1333 }
1334 
1335 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1336 				    struct sk_buff *skb)
1337 {
1338 	struct hci_ev_status *rp = data;
1339 	bdaddr_t *sent;
1340 
1341 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1342 
1343 	if (rp->status)
1344 		return rp->status;
1345 
1346 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1347 	if (!sent)
1348 		return rp->status;
1349 
1350 	hci_dev_lock(hdev);
1351 
1352 	bacpy(&hdev->random_addr, sent);
1353 
1354 	if (!bacmp(&hdev->rpa, sent)) {
1355 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1356 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1357 				   secs_to_jiffies(hdev->rpa_timeout));
1358 	}
1359 
1360 	hci_dev_unlock(hdev);
1361 
1362 	return rp->status;
1363 }
1364 
1365 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1366 				    struct sk_buff *skb)
1367 {
1368 	struct hci_ev_status *rp = data;
1369 	struct hci_cp_le_set_default_phy *cp;
1370 
1371 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1372 
1373 	if (rp->status)
1374 		return rp->status;
1375 
1376 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1377 	if (!cp)
1378 		return rp->status;
1379 
1380 	hci_dev_lock(hdev);
1381 
1382 	hdev->le_tx_def_phys = cp->tx_phys;
1383 	hdev->le_rx_def_phys = cp->rx_phys;
1384 
1385 	hci_dev_unlock(hdev);
1386 
1387 	return rp->status;
1388 }
1389 
1390 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1391 					    struct sk_buff *skb)
1392 {
1393 	struct hci_ev_status *rp = data;
1394 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1395 	struct adv_info *adv;
1396 
1397 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1398 
1399 	if (rp->status)
1400 		return rp->status;
1401 
1402 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1403 	/* Update only in case the adv instance since handle 0x00 shall be using
1404 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1405 	 * non-extended adverting.
1406 	 */
1407 	if (!cp || !cp->handle)
1408 		return rp->status;
1409 
1410 	hci_dev_lock(hdev);
1411 
1412 	adv = hci_find_adv_instance(hdev, cp->handle);
1413 	if (adv) {
1414 		bacpy(&adv->random_addr, &cp->bdaddr);
1415 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1416 			adv->rpa_expired = false;
1417 			queue_delayed_work(hdev->workqueue,
1418 					   &adv->rpa_expired_cb,
1419 					   secs_to_jiffies(hdev->rpa_timeout));
1420 		}
1421 	}
1422 
1423 	hci_dev_unlock(hdev);
1424 
1425 	return rp->status;
1426 }
1427 
1428 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1429 				   struct sk_buff *skb)
1430 {
1431 	struct hci_ev_status *rp = data;
1432 	u8 *instance;
1433 	int err;
1434 
1435 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1436 
1437 	if (rp->status)
1438 		return rp->status;
1439 
1440 	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1441 	if (!instance)
1442 		return rp->status;
1443 
1444 	hci_dev_lock(hdev);
1445 
1446 	err = hci_remove_adv_instance(hdev, *instance);
1447 	if (!err)
1448 		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1449 					 *instance);
1450 
1451 	hci_dev_unlock(hdev);
1452 
1453 	return rp->status;
1454 }
1455 
1456 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1457 				   struct sk_buff *skb)
1458 {
1459 	struct hci_ev_status *rp = data;
1460 	struct adv_info *adv, *n;
1461 	int err;
1462 
1463 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1464 
1465 	if (rp->status)
1466 		return rp->status;
1467 
1468 	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1469 		return rp->status;
1470 
1471 	hci_dev_lock(hdev);
1472 
1473 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1474 		u8 instance = adv->instance;
1475 
1476 		err = hci_remove_adv_instance(hdev, instance);
1477 		if (!err)
1478 			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1479 						 hdev, instance);
1480 	}
1481 
1482 	hci_dev_unlock(hdev);
1483 
1484 	return rp->status;
1485 }
1486 
1487 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1488 					struct sk_buff *skb)
1489 {
1490 	struct hci_rp_le_read_transmit_power *rp = data;
1491 
1492 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1493 
1494 	if (rp->status)
1495 		return rp->status;
1496 
1497 	hdev->min_le_tx_power = rp->min_le_tx_power;
1498 	hdev->max_le_tx_power = rp->max_le_tx_power;
1499 
1500 	return rp->status;
1501 }
1502 
1503 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1504 				     struct sk_buff *skb)
1505 {
1506 	struct hci_ev_status *rp = data;
1507 	struct hci_cp_le_set_privacy_mode *cp;
1508 	struct hci_conn_params *params;
1509 
1510 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1511 
1512 	if (rp->status)
1513 		return rp->status;
1514 
1515 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1516 	if (!cp)
1517 		return rp->status;
1518 
1519 	hci_dev_lock(hdev);
1520 
1521 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1522 	if (params)
1523 		WRITE_ONCE(params->privacy_mode, cp->mode);
1524 
1525 	hci_dev_unlock(hdev);
1526 
1527 	return rp->status;
1528 }
1529 
1530 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1531 				   struct sk_buff *skb)
1532 {
1533 	struct hci_ev_status *rp = data;
1534 	__u8 *sent;
1535 
1536 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1537 
1538 	if (rp->status)
1539 		return rp->status;
1540 
1541 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1542 	if (!sent)
1543 		return rp->status;
1544 
1545 	hci_dev_lock(hdev);
1546 
1547 	/* If we're doing connection initiation as peripheral. Set a
1548 	 * timeout in case something goes wrong.
1549 	 */
1550 	if (*sent) {
1551 		struct hci_conn *conn;
1552 
1553 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1554 
1555 		conn = hci_lookup_le_connect(hdev);
1556 		if (conn)
1557 			queue_delayed_work(hdev->workqueue,
1558 					   &conn->le_conn_timeout,
1559 					   conn->conn_timeout);
1560 	} else {
1561 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1562 	}
1563 
1564 	hci_dev_unlock(hdev);
1565 
1566 	return rp->status;
1567 }
1568 
1569 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1570 				       struct sk_buff *skb)
1571 {
1572 	struct hci_cp_le_set_ext_adv_enable *cp;
1573 	struct hci_cp_ext_adv_set *set;
1574 	struct adv_info *adv = NULL, *n;
1575 	struct hci_ev_status *rp = data;
1576 
1577 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1578 
1579 	if (rp->status)
1580 		return rp->status;
1581 
1582 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1583 	if (!cp)
1584 		return rp->status;
1585 
1586 	set = (void *)cp->data;
1587 
1588 	hci_dev_lock(hdev);
1589 
1590 	if (cp->num_of_sets)
1591 		adv = hci_find_adv_instance(hdev, set->handle);
1592 
1593 	if (cp->enable) {
1594 		struct hci_conn *conn;
1595 
1596 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1597 
1598 		if (adv && !adv->periodic)
1599 			adv->enabled = true;
1600 
1601 		conn = hci_lookup_le_connect(hdev);
1602 		if (conn)
1603 			queue_delayed_work(hdev->workqueue,
1604 					   &conn->le_conn_timeout,
1605 					   conn->conn_timeout);
1606 	} else {
1607 		if (cp->num_of_sets) {
1608 			if (adv)
1609 				adv->enabled = false;
1610 
1611 			/* If just one instance was disabled check if there are
1612 			 * any other instance enabled before clearing HCI_LE_ADV
1613 			 */
1614 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1615 						 list) {
1616 				if (adv->enabled)
1617 					goto unlock;
1618 			}
1619 		} else {
1620 			/* All instances shall be considered disabled */
1621 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1622 						 list)
1623 				adv->enabled = false;
1624 		}
1625 
1626 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1627 	}
1628 
1629 unlock:
1630 	hci_dev_unlock(hdev);
1631 	return rp->status;
1632 }
1633 
1634 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1635 				   struct sk_buff *skb)
1636 {
1637 	struct hci_cp_le_set_scan_param *cp;
1638 	struct hci_ev_status *rp = data;
1639 
1640 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1641 
1642 	if (rp->status)
1643 		return rp->status;
1644 
1645 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1646 	if (!cp)
1647 		return rp->status;
1648 
1649 	hci_dev_lock(hdev);
1650 
1651 	hdev->le_scan_type = cp->type;
1652 
1653 	hci_dev_unlock(hdev);
1654 
1655 	return rp->status;
1656 }
1657 
1658 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1659 				       struct sk_buff *skb)
1660 {
1661 	struct hci_cp_le_set_ext_scan_params *cp;
1662 	struct hci_ev_status *rp = data;
1663 	struct hci_cp_le_scan_phy_params *phy_param;
1664 
1665 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1666 
1667 	if (rp->status)
1668 		return rp->status;
1669 
1670 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1671 	if (!cp)
1672 		return rp->status;
1673 
1674 	phy_param = (void *)cp->data;
1675 
1676 	hci_dev_lock(hdev);
1677 
1678 	hdev->le_scan_type = phy_param->type;
1679 
1680 	hci_dev_unlock(hdev);
1681 
1682 	return rp->status;
1683 }
1684 
1685 static bool has_pending_adv_report(struct hci_dev *hdev)
1686 {
1687 	struct discovery_state *d = &hdev->discovery;
1688 
1689 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1690 }
1691 
1692 static void clear_pending_adv_report(struct hci_dev *hdev)
1693 {
1694 	struct discovery_state *d = &hdev->discovery;
1695 
1696 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1697 	d->last_adv_data_len = 0;
1698 }
1699 
1700 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1701 				     u8 bdaddr_type, s8 rssi, u32 flags,
1702 				     u8 *data, u8 len)
1703 {
1704 	struct discovery_state *d = &hdev->discovery;
1705 
1706 	if (len > max_adv_len(hdev))
1707 		return;
1708 
1709 	bacpy(&d->last_adv_addr, bdaddr);
1710 	d->last_adv_addr_type = bdaddr_type;
1711 	d->last_adv_rssi = rssi;
1712 	d->last_adv_flags = flags;
1713 	memcpy(d->last_adv_data, data, len);
1714 	d->last_adv_data_len = len;
1715 }
1716 
1717 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1718 {
1719 	hci_dev_lock(hdev);
1720 
1721 	switch (enable) {
1722 	case LE_SCAN_ENABLE:
1723 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1724 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1725 			clear_pending_adv_report(hdev);
1726 		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1727 		break;
1728 
1729 	case LE_SCAN_DISABLE:
1730 		/* We do this here instead of when setting DISCOVERY_STOPPED
1731 		 * since the latter would potentially require waiting for
1732 		 * inquiry to stop too.
1733 		 */
1734 		if (has_pending_adv_report(hdev)) {
1735 			struct discovery_state *d = &hdev->discovery;
1736 
1737 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1738 					  d->last_adv_addr_type, NULL,
1739 					  d->last_adv_rssi, d->last_adv_flags,
1740 					  d->last_adv_data,
1741 					  d->last_adv_data_len, NULL, 0, 0);
1742 		}
1743 
1744 		/* Cancel this timer so that we don't try to disable scanning
1745 		 * when it's already disabled.
1746 		 */
1747 		cancel_delayed_work(&hdev->le_scan_disable);
1748 
1749 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1750 
1751 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1752 		 * interrupted scanning due to a connect request. Mark
1753 		 * therefore discovery as stopped.
1754 		 */
1755 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1756 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1757 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1758 			 hdev->discovery.state == DISCOVERY_FINDING)
1759 			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1760 
1761 		break;
1762 
1763 	default:
1764 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1765 			   enable);
1766 		break;
1767 	}
1768 
1769 	hci_dev_unlock(hdev);
1770 }
1771 
1772 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1773 				    struct sk_buff *skb)
1774 {
1775 	struct hci_cp_le_set_scan_enable *cp;
1776 	struct hci_ev_status *rp = data;
1777 
1778 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1779 
1780 	if (rp->status)
1781 		return rp->status;
1782 
1783 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1784 	if (!cp)
1785 		return rp->status;
1786 
1787 	le_set_scan_enable_complete(hdev, cp->enable);
1788 
1789 	return rp->status;
1790 }
1791 
1792 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1793 					struct sk_buff *skb)
1794 {
1795 	struct hci_cp_le_set_ext_scan_enable *cp;
1796 	struct hci_ev_status *rp = data;
1797 
1798 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1799 
1800 	if (rp->status)
1801 		return rp->status;
1802 
1803 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1804 	if (!cp)
1805 		return rp->status;
1806 
1807 	le_set_scan_enable_complete(hdev, cp->enable);
1808 
1809 	return rp->status;
1810 }
1811 
1812 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1813 				      struct sk_buff *skb)
1814 {
1815 	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1816 
1817 	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1818 		   rp->num_of_sets);
1819 
1820 	if (rp->status)
1821 		return rp->status;
1822 
1823 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1824 
1825 	return rp->status;
1826 }
1827 
1828 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1829 					  struct sk_buff *skb)
1830 {
1831 	struct hci_rp_le_read_accept_list_size *rp = data;
1832 
1833 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1834 
1835 	if (rp->status)
1836 		return rp->status;
1837 
1838 	hdev->le_accept_list_size = rp->size;
1839 
1840 	return rp->status;
1841 }
1842 
1843 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1844 				      struct sk_buff *skb)
1845 {
1846 	struct hci_ev_status *rp = data;
1847 
1848 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1849 
1850 	if (rp->status)
1851 		return rp->status;
1852 
1853 	hci_dev_lock(hdev);
1854 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1855 	hci_dev_unlock(hdev);
1856 
1857 	return rp->status;
1858 }
1859 
1860 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1861 				       struct sk_buff *skb)
1862 {
1863 	struct hci_cp_le_add_to_accept_list *sent;
1864 	struct hci_ev_status *rp = data;
1865 
1866 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1867 
1868 	if (rp->status)
1869 		return rp->status;
1870 
1871 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1872 	if (!sent)
1873 		return rp->status;
1874 
1875 	hci_dev_lock(hdev);
1876 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1877 			    sent->bdaddr_type);
1878 	hci_dev_unlock(hdev);
1879 
1880 	return rp->status;
1881 }
1882 
1883 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1884 					 struct sk_buff *skb)
1885 {
1886 	struct hci_cp_le_del_from_accept_list *sent;
1887 	struct hci_ev_status *rp = data;
1888 
1889 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1890 
1891 	if (rp->status)
1892 		return rp->status;
1893 
1894 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1895 	if (!sent)
1896 		return rp->status;
1897 
1898 	hci_dev_lock(hdev);
1899 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1900 			    sent->bdaddr_type);
1901 	hci_dev_unlock(hdev);
1902 
1903 	return rp->status;
1904 }
1905 
1906 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1907 					  struct sk_buff *skb)
1908 {
1909 	struct hci_rp_le_read_supported_states *rp = data;
1910 
1911 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1912 
1913 	if (rp->status)
1914 		return rp->status;
1915 
1916 	memcpy(hdev->le_states, rp->le_states, 8);
1917 
1918 	return rp->status;
1919 }
1920 
1921 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1922 				      struct sk_buff *skb)
1923 {
1924 	struct hci_rp_le_read_def_data_len *rp = data;
1925 
1926 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1927 
1928 	if (rp->status)
1929 		return rp->status;
1930 
1931 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1932 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1933 
1934 	return rp->status;
1935 }
1936 
1937 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1938 				       struct sk_buff *skb)
1939 {
1940 	struct hci_cp_le_write_def_data_len *sent;
1941 	struct hci_ev_status *rp = data;
1942 
1943 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1944 
1945 	if (rp->status)
1946 		return rp->status;
1947 
1948 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1949 	if (!sent)
1950 		return rp->status;
1951 
1952 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1953 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1954 
1955 	return rp->status;
1956 }
1957 
1958 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1959 				       struct sk_buff *skb)
1960 {
1961 	struct hci_cp_le_add_to_resolv_list *sent;
1962 	struct hci_ev_status *rp = data;
1963 
1964 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1965 
1966 	if (rp->status)
1967 		return rp->status;
1968 
1969 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1970 	if (!sent)
1971 		return rp->status;
1972 
1973 	hci_dev_lock(hdev);
1974 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1975 				sent->bdaddr_type, sent->peer_irk,
1976 				sent->local_irk);
1977 	hci_dev_unlock(hdev);
1978 
1979 	return rp->status;
1980 }
1981 
1982 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
1983 					 struct sk_buff *skb)
1984 {
1985 	struct hci_cp_le_del_from_resolv_list *sent;
1986 	struct hci_ev_status *rp = data;
1987 
1988 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1989 
1990 	if (rp->status)
1991 		return rp->status;
1992 
1993 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1994 	if (!sent)
1995 		return rp->status;
1996 
1997 	hci_dev_lock(hdev);
1998 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1999 			    sent->bdaddr_type);
2000 	hci_dev_unlock(hdev);
2001 
2002 	return rp->status;
2003 }
2004 
2005 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2006 				      struct sk_buff *skb)
2007 {
2008 	struct hci_ev_status *rp = data;
2009 
2010 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2011 
2012 	if (rp->status)
2013 		return rp->status;
2014 
2015 	hci_dev_lock(hdev);
2016 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2017 	hci_dev_unlock(hdev);
2018 
2019 	return rp->status;
2020 }
2021 
2022 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2023 					  struct sk_buff *skb)
2024 {
2025 	struct hci_rp_le_read_resolv_list_size *rp = data;
2026 
2027 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2028 
2029 	if (rp->status)
2030 		return rp->status;
2031 
2032 	hdev->le_resolv_list_size = rp->size;
2033 
2034 	return rp->status;
2035 }
2036 
2037 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2038 					       struct sk_buff *skb)
2039 {
2040 	struct hci_ev_status *rp = data;
2041 	__u8 *sent;
2042 
2043 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2044 
2045 	if (rp->status)
2046 		return rp->status;
2047 
2048 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2049 	if (!sent)
2050 		return rp->status;
2051 
2052 	hci_dev_lock(hdev);
2053 
2054 	if (*sent)
2055 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2056 	else
2057 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2058 
2059 	hci_dev_unlock(hdev);
2060 
2061 	return rp->status;
2062 }
2063 
2064 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2065 				      struct sk_buff *skb)
2066 {
2067 	struct hci_rp_le_read_max_data_len *rp = data;
2068 
2069 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2070 
2071 	if (rp->status)
2072 		return rp->status;
2073 
2074 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2075 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2076 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2077 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2078 
2079 	return rp->status;
2080 }
2081 
2082 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2083 					 struct sk_buff *skb)
2084 {
2085 	struct hci_cp_write_le_host_supported *sent;
2086 	struct hci_ev_status *rp = data;
2087 
2088 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2089 
2090 	if (rp->status)
2091 		return rp->status;
2092 
2093 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2094 	if (!sent)
2095 		return rp->status;
2096 
2097 	hci_dev_lock(hdev);
2098 
2099 	if (sent->le) {
2100 		hdev->features[1][0] |= LMP_HOST_LE;
2101 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2102 	} else {
2103 		hdev->features[1][0] &= ~LMP_HOST_LE;
2104 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2105 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2106 	}
2107 
2108 	if (sent->simul)
2109 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2110 	else
2111 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2112 
2113 	hci_dev_unlock(hdev);
2114 
2115 	return rp->status;
2116 }
2117 
2118 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2119 			       struct sk_buff *skb)
2120 {
2121 	struct hci_cp_le_set_adv_param *cp;
2122 	struct hci_ev_status *rp = data;
2123 
2124 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2125 
2126 	if (rp->status)
2127 		return rp->status;
2128 
2129 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2130 	if (!cp)
2131 		return rp->status;
2132 
2133 	hci_dev_lock(hdev);
2134 	hdev->adv_addr_type = cp->own_address_type;
2135 	hci_dev_unlock(hdev);
2136 
2137 	return rp->status;
2138 }
2139 
2140 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2141 				   struct sk_buff *skb)
2142 {
2143 	struct hci_rp_le_set_ext_adv_params *rp = data;
2144 	struct hci_cp_le_set_ext_adv_params *cp;
2145 	struct adv_info *adv_instance;
2146 
2147 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2148 
2149 	if (rp->status)
2150 		return rp->status;
2151 
2152 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2153 	if (!cp)
2154 		return rp->status;
2155 
2156 	hci_dev_lock(hdev);
2157 	hdev->adv_addr_type = cp->own_addr_type;
2158 	if (!cp->handle) {
2159 		/* Store in hdev for instance 0 */
2160 		hdev->adv_tx_power = rp->tx_power;
2161 	} else {
2162 		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2163 		if (adv_instance)
2164 			adv_instance->tx_power = rp->tx_power;
2165 	}
2166 	/* Update adv data as tx power is known now */
2167 	hci_update_adv_data(hdev, cp->handle);
2168 
2169 	hci_dev_unlock(hdev);
2170 
2171 	return rp->status;
2172 }
2173 
2174 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2175 			   struct sk_buff *skb)
2176 {
2177 	struct hci_rp_read_rssi *rp = data;
2178 	struct hci_conn *conn;
2179 
2180 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2181 
2182 	if (rp->status)
2183 		return rp->status;
2184 
2185 	hci_dev_lock(hdev);
2186 
2187 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2188 	if (conn)
2189 		conn->rssi = rp->rssi;
2190 
2191 	hci_dev_unlock(hdev);
2192 
2193 	return rp->status;
2194 }
2195 
2196 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2197 			       struct sk_buff *skb)
2198 {
2199 	struct hci_cp_read_tx_power *sent;
2200 	struct hci_rp_read_tx_power *rp = data;
2201 	struct hci_conn *conn;
2202 
2203 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2204 
2205 	if (rp->status)
2206 		return rp->status;
2207 
2208 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2209 	if (!sent)
2210 		return rp->status;
2211 
2212 	hci_dev_lock(hdev);
2213 
2214 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2215 	if (!conn)
2216 		goto unlock;
2217 
2218 	switch (sent->type) {
2219 	case 0x00:
2220 		conn->tx_power = rp->tx_power;
2221 		break;
2222 	case 0x01:
2223 		conn->max_tx_power = rp->tx_power;
2224 		break;
2225 	}
2226 
2227 unlock:
2228 	hci_dev_unlock(hdev);
2229 	return rp->status;
2230 }
2231 
2232 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2233 				      struct sk_buff *skb)
2234 {
2235 	struct hci_ev_status *rp = data;
2236 	u8 *mode;
2237 
2238 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2239 
2240 	if (rp->status)
2241 		return rp->status;
2242 
2243 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2244 	if (mode)
2245 		hdev->ssp_debug_mode = *mode;
2246 
2247 	return rp->status;
2248 }
2249 
2250 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2251 {
2252 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2253 
2254 	if (status)
2255 		return;
2256 
2257 	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2258 		set_bit(HCI_INQUIRY, &hdev->flags);
2259 }
2260 
2261 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2262 {
2263 	struct hci_cp_create_conn *cp;
2264 	struct hci_conn *conn;
2265 
2266 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2267 
2268 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2269 	if (!cp)
2270 		return;
2271 
2272 	hci_dev_lock(hdev);
2273 
2274 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2275 
2276 	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2277 
2278 	if (status) {
2279 		if (conn && conn->state == BT_CONNECT) {
2280 			conn->state = BT_CLOSED;
2281 			hci_connect_cfm(conn, status);
2282 			hci_conn_del(conn);
2283 		}
2284 	} else {
2285 		if (!conn) {
2286 			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2287 						  HCI_ROLE_MASTER);
2288 			if (IS_ERR(conn))
2289 				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2290 		}
2291 	}
2292 
2293 	hci_dev_unlock(hdev);
2294 }
2295 
2296 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2297 {
2298 	struct hci_cp_add_sco *cp;
2299 	struct hci_conn *acl;
2300 	struct hci_link *link;
2301 	__u16 handle;
2302 
2303 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2304 
2305 	if (!status)
2306 		return;
2307 
2308 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2309 	if (!cp)
2310 		return;
2311 
2312 	handle = __le16_to_cpu(cp->handle);
2313 
2314 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2315 
2316 	hci_dev_lock(hdev);
2317 
2318 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2319 	if (acl) {
2320 		link = list_first_entry_or_null(&acl->link_list,
2321 						struct hci_link, list);
2322 		if (link && link->conn) {
2323 			link->conn->state = BT_CLOSED;
2324 
2325 			hci_connect_cfm(link->conn, status);
2326 			hci_conn_del(link->conn);
2327 		}
2328 	}
2329 
2330 	hci_dev_unlock(hdev);
2331 }
2332 
2333 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2334 {
2335 	struct hci_cp_auth_requested *cp;
2336 	struct hci_conn *conn;
2337 
2338 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2339 
2340 	if (!status)
2341 		return;
2342 
2343 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2344 	if (!cp)
2345 		return;
2346 
2347 	hci_dev_lock(hdev);
2348 
2349 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2350 	if (conn) {
2351 		if (conn->state == BT_CONFIG) {
2352 			hci_connect_cfm(conn, status);
2353 			hci_conn_drop(conn);
2354 		}
2355 	}
2356 
2357 	hci_dev_unlock(hdev);
2358 }
2359 
2360 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2361 {
2362 	struct hci_cp_set_conn_encrypt *cp;
2363 	struct hci_conn *conn;
2364 
2365 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2366 
2367 	if (!status)
2368 		return;
2369 
2370 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2371 	if (!cp)
2372 		return;
2373 
2374 	hci_dev_lock(hdev);
2375 
2376 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2377 	if (conn) {
2378 		if (conn->state == BT_CONFIG) {
2379 			hci_connect_cfm(conn, status);
2380 			hci_conn_drop(conn);
2381 		}
2382 	}
2383 
2384 	hci_dev_unlock(hdev);
2385 }
2386 
2387 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2388 				    struct hci_conn *conn)
2389 {
2390 	if (conn->state != BT_CONFIG || !conn->out)
2391 		return 0;
2392 
2393 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2394 		return 0;
2395 
2396 	/* Only request authentication for SSP connections or non-SSP
2397 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2398 	 * is requested.
2399 	 */
2400 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2401 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2402 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2403 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2404 		return 0;
2405 
2406 	return 1;
2407 }
2408 
2409 static int hci_resolve_name(struct hci_dev *hdev,
2410 				   struct inquiry_entry *e)
2411 {
2412 	struct hci_cp_remote_name_req cp;
2413 
2414 	memset(&cp, 0, sizeof(cp));
2415 
2416 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2417 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2418 	cp.pscan_mode = e->data.pscan_mode;
2419 	cp.clock_offset = e->data.clock_offset;
2420 
2421 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2422 }
2423 
2424 static bool hci_resolve_next_name(struct hci_dev *hdev)
2425 {
2426 	struct discovery_state *discov = &hdev->discovery;
2427 	struct inquiry_entry *e;
2428 
2429 	if (list_empty(&discov->resolve))
2430 		return false;
2431 
2432 	/* We should stop if we already spent too much time resolving names. */
2433 	if (time_after(jiffies, discov->name_resolve_timeout)) {
2434 		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2435 		return false;
2436 	}
2437 
2438 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2439 	if (!e)
2440 		return false;
2441 
2442 	if (hci_resolve_name(hdev, e) == 0) {
2443 		e->name_state = NAME_PENDING;
2444 		return true;
2445 	}
2446 
2447 	return false;
2448 }
2449 
2450 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2451 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2452 {
2453 	struct discovery_state *discov = &hdev->discovery;
2454 	struct inquiry_entry *e;
2455 
2456 	/* Update the mgmt connected state if necessary. Be careful with
2457 	 * conn objects that exist but are not (yet) connected however.
2458 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2459 	 * considered connected.
2460 	 */
2461 	if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2462 		mgmt_device_connected(hdev, conn, name, name_len);
2463 
2464 	if (discov->state == DISCOVERY_STOPPED)
2465 		return;
2466 
2467 	if (discov->state == DISCOVERY_STOPPING)
2468 		goto discov_complete;
2469 
2470 	if (discov->state != DISCOVERY_RESOLVING)
2471 		return;
2472 
2473 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2474 	/* If the device was not found in a list of found devices names of which
2475 	 * are pending. there is no need to continue resolving a next name as it
2476 	 * will be done upon receiving another Remote Name Request Complete
2477 	 * Event */
2478 	if (!e)
2479 		return;
2480 
2481 	list_del(&e->list);
2482 
2483 	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2484 	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2485 			 name, name_len);
2486 
2487 	if (hci_resolve_next_name(hdev))
2488 		return;
2489 
2490 discov_complete:
2491 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2492 }
2493 
2494 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2495 {
2496 	struct hci_cp_remote_name_req *cp;
2497 	struct hci_conn *conn;
2498 
2499 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2500 
2501 	/* If successful wait for the name req complete event before
2502 	 * checking for the need to do authentication */
2503 	if (!status)
2504 		return;
2505 
2506 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2507 	if (!cp)
2508 		return;
2509 
2510 	hci_dev_lock(hdev);
2511 
2512 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2513 
2514 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2515 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2516 
2517 	if (!conn)
2518 		goto unlock;
2519 
2520 	if (!hci_outgoing_auth_needed(hdev, conn))
2521 		goto unlock;
2522 
2523 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2524 		struct hci_cp_auth_requested auth_cp;
2525 
2526 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2527 
2528 		auth_cp.handle = __cpu_to_le16(conn->handle);
2529 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2530 			     sizeof(auth_cp), &auth_cp);
2531 	}
2532 
2533 unlock:
2534 	hci_dev_unlock(hdev);
2535 }
2536 
2537 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2538 {
2539 	struct hci_cp_read_remote_features *cp;
2540 	struct hci_conn *conn;
2541 
2542 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2543 
2544 	if (!status)
2545 		return;
2546 
2547 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2548 	if (!cp)
2549 		return;
2550 
2551 	hci_dev_lock(hdev);
2552 
2553 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2554 	if (conn) {
2555 		if (conn->state == BT_CONFIG) {
2556 			hci_connect_cfm(conn, status);
2557 			hci_conn_drop(conn);
2558 		}
2559 	}
2560 
2561 	hci_dev_unlock(hdev);
2562 }
2563 
2564 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2565 {
2566 	struct hci_cp_read_remote_ext_features *cp;
2567 	struct hci_conn *conn;
2568 
2569 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2570 
2571 	if (!status)
2572 		return;
2573 
2574 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2575 	if (!cp)
2576 		return;
2577 
2578 	hci_dev_lock(hdev);
2579 
2580 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2581 	if (conn) {
2582 		if (conn->state == BT_CONFIG) {
2583 			hci_connect_cfm(conn, status);
2584 			hci_conn_drop(conn);
2585 		}
2586 	}
2587 
2588 	hci_dev_unlock(hdev);
2589 }
2590 
2591 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2592 				       __u8 status)
2593 {
2594 	struct hci_conn *acl;
2595 	struct hci_link *link;
2596 
2597 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2598 
2599 	hci_dev_lock(hdev);
2600 
2601 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2602 	if (acl) {
2603 		link = list_first_entry_or_null(&acl->link_list,
2604 						struct hci_link, list);
2605 		if (link && link->conn) {
2606 			link->conn->state = BT_CLOSED;
2607 
2608 			hci_connect_cfm(link->conn, status);
2609 			hci_conn_del(link->conn);
2610 		}
2611 	}
2612 
2613 	hci_dev_unlock(hdev);
2614 }
2615 
2616 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2617 {
2618 	struct hci_cp_setup_sync_conn *cp;
2619 
2620 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2621 
2622 	if (!status)
2623 		return;
2624 
2625 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2626 	if (!cp)
2627 		return;
2628 
2629 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2630 }
2631 
2632 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2633 {
2634 	struct hci_cp_enhanced_setup_sync_conn *cp;
2635 
2636 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2637 
2638 	if (!status)
2639 		return;
2640 
2641 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2642 	if (!cp)
2643 		return;
2644 
2645 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2646 }
2647 
2648 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2649 {
2650 	struct hci_cp_sniff_mode *cp;
2651 	struct hci_conn *conn;
2652 
2653 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2654 
2655 	if (!status)
2656 		return;
2657 
2658 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2659 	if (!cp)
2660 		return;
2661 
2662 	hci_dev_lock(hdev);
2663 
2664 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2665 	if (conn) {
2666 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2667 
2668 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2669 			hci_sco_setup(conn, status);
2670 	}
2671 
2672 	hci_dev_unlock(hdev);
2673 }
2674 
2675 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2676 {
2677 	struct hci_cp_exit_sniff_mode *cp;
2678 	struct hci_conn *conn;
2679 
2680 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2681 
2682 	if (!status)
2683 		return;
2684 
2685 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2686 	if (!cp)
2687 		return;
2688 
2689 	hci_dev_lock(hdev);
2690 
2691 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2692 	if (conn) {
2693 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2694 
2695 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2696 			hci_sco_setup(conn, status);
2697 	}
2698 
2699 	hci_dev_unlock(hdev);
2700 }
2701 
2702 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2703 {
2704 	struct hci_cp_disconnect *cp;
2705 	struct hci_conn_params *params;
2706 	struct hci_conn *conn;
2707 	bool mgmt_conn;
2708 
2709 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2710 
2711 	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2712 	 * otherwise cleanup the connection immediately.
2713 	 */
2714 	if (!status && !hdev->suspended)
2715 		return;
2716 
2717 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2718 	if (!cp)
2719 		return;
2720 
2721 	hci_dev_lock(hdev);
2722 
2723 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2724 	if (!conn)
2725 		goto unlock;
2726 
2727 	if (status) {
2728 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2729 				       conn->dst_type, status);
2730 
2731 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2732 			hdev->cur_adv_instance = conn->adv_instance;
2733 			hci_enable_advertising(hdev);
2734 		}
2735 
2736 		/* Inform sockets conn is gone before we delete it */
2737 		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2738 
2739 		goto done;
2740 	}
2741 
2742 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2743 
2744 	if (conn->type == ACL_LINK) {
2745 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2746 			hci_remove_link_key(hdev, &conn->dst);
2747 	}
2748 
2749 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2750 	if (params) {
2751 		switch (params->auto_connect) {
2752 		case HCI_AUTO_CONN_LINK_LOSS:
2753 			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2754 				break;
2755 			fallthrough;
2756 
2757 		case HCI_AUTO_CONN_DIRECT:
2758 		case HCI_AUTO_CONN_ALWAYS:
2759 			hci_pend_le_list_del_init(params);
2760 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2761 			break;
2762 
2763 		default:
2764 			break;
2765 		}
2766 	}
2767 
2768 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2769 				 cp->reason, mgmt_conn);
2770 
2771 	hci_disconn_cfm(conn, cp->reason);
2772 
2773 done:
2774 	/* If the disconnection failed for any reason, the upper layer
2775 	 * does not retry to disconnect in current implementation.
2776 	 * Hence, we need to do some basic cleanup here and re-enable
2777 	 * advertising if necessary.
2778 	 */
2779 	hci_conn_del(conn);
2780 unlock:
2781 	hci_dev_unlock(hdev);
2782 }
2783 
2784 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2785 {
2786 	/* When using controller based address resolution, then the new
2787 	 * address types 0x02 and 0x03 are used. These types need to be
2788 	 * converted back into either public address or random address type
2789 	 */
2790 	switch (type) {
2791 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2792 		if (resolved)
2793 			*resolved = true;
2794 		return ADDR_LE_DEV_PUBLIC;
2795 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2796 		if (resolved)
2797 			*resolved = true;
2798 		return ADDR_LE_DEV_RANDOM;
2799 	}
2800 
2801 	if (resolved)
2802 		*resolved = false;
2803 	return type;
2804 }
2805 
2806 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2807 			      u8 peer_addr_type, u8 own_address_type,
2808 			      u8 filter_policy)
2809 {
2810 	struct hci_conn *conn;
2811 
2812 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2813 				       peer_addr_type);
2814 	if (!conn)
2815 		return;
2816 
2817 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2818 
2819 	/* Store the initiator and responder address information which
2820 	 * is needed for SMP. These values will not change during the
2821 	 * lifetime of the connection.
2822 	 */
2823 	conn->init_addr_type = own_address_type;
2824 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2825 		bacpy(&conn->init_addr, &hdev->random_addr);
2826 	else
2827 		bacpy(&conn->init_addr, &hdev->bdaddr);
2828 
2829 	conn->resp_addr_type = peer_addr_type;
2830 	bacpy(&conn->resp_addr, peer_addr);
2831 }
2832 
2833 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2834 {
2835 	struct hci_cp_le_create_conn *cp;
2836 
2837 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2838 
2839 	/* All connection failure handling is taken care of by the
2840 	 * hci_conn_failed function which is triggered by the HCI
2841 	 * request completion callbacks used for connecting.
2842 	 */
2843 	if (status)
2844 		return;
2845 
2846 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2847 	if (!cp)
2848 		return;
2849 
2850 	hci_dev_lock(hdev);
2851 
2852 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2853 			  cp->own_address_type, cp->filter_policy);
2854 
2855 	hci_dev_unlock(hdev);
2856 }
2857 
2858 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2859 {
2860 	struct hci_cp_le_ext_create_conn *cp;
2861 
2862 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2863 
2864 	/* All connection failure handling is taken care of by the
2865 	 * hci_conn_failed function which is triggered by the HCI
2866 	 * request completion callbacks used for connecting.
2867 	 */
2868 	if (status)
2869 		return;
2870 
2871 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2872 	if (!cp)
2873 		return;
2874 
2875 	hci_dev_lock(hdev);
2876 
2877 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2878 			  cp->own_addr_type, cp->filter_policy);
2879 
2880 	hci_dev_unlock(hdev);
2881 }
2882 
2883 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2884 {
2885 	struct hci_cp_le_read_remote_features *cp;
2886 	struct hci_conn *conn;
2887 
2888 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2889 
2890 	if (!status)
2891 		return;
2892 
2893 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2894 	if (!cp)
2895 		return;
2896 
2897 	hci_dev_lock(hdev);
2898 
2899 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2900 	if (conn) {
2901 		if (conn->state == BT_CONFIG) {
2902 			hci_connect_cfm(conn, status);
2903 			hci_conn_drop(conn);
2904 		}
2905 	}
2906 
2907 	hci_dev_unlock(hdev);
2908 }
2909 
2910 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2911 {
2912 	struct hci_cp_le_start_enc *cp;
2913 	struct hci_conn *conn;
2914 
2915 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2916 
2917 	if (!status)
2918 		return;
2919 
2920 	hci_dev_lock(hdev);
2921 
2922 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2923 	if (!cp)
2924 		goto unlock;
2925 
2926 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2927 	if (!conn)
2928 		goto unlock;
2929 
2930 	if (conn->state != BT_CONNECTED)
2931 		goto unlock;
2932 
2933 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2934 	hci_conn_drop(conn);
2935 
2936 unlock:
2937 	hci_dev_unlock(hdev);
2938 }
2939 
2940 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2941 {
2942 	struct hci_cp_switch_role *cp;
2943 	struct hci_conn *conn;
2944 
2945 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2946 
2947 	if (!status)
2948 		return;
2949 
2950 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2951 	if (!cp)
2952 		return;
2953 
2954 	hci_dev_lock(hdev);
2955 
2956 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2957 	if (conn)
2958 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2959 
2960 	hci_dev_unlock(hdev);
2961 }
2962 
2963 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2964 				     struct sk_buff *skb)
2965 {
2966 	struct hci_ev_status *ev = data;
2967 	struct discovery_state *discov = &hdev->discovery;
2968 	struct inquiry_entry *e;
2969 
2970 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2971 
2972 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2973 		return;
2974 
2975 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2976 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2977 
2978 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2979 		return;
2980 
2981 	hci_dev_lock(hdev);
2982 
2983 	if (discov->state != DISCOVERY_FINDING)
2984 		goto unlock;
2985 
2986 	if (list_empty(&discov->resolve)) {
2987 		/* When BR/EDR inquiry is active and no LE scanning is in
2988 		 * progress, then change discovery state to indicate completion.
2989 		 *
2990 		 * When running LE scanning and BR/EDR inquiry simultaneously
2991 		 * and the LE scan already finished, then change the discovery
2992 		 * state to indicate completion.
2993 		 */
2994 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2995 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2996 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2997 		goto unlock;
2998 	}
2999 
3000 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3001 	if (e && hci_resolve_name(hdev, e) == 0) {
3002 		e->name_state = NAME_PENDING;
3003 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3004 		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3005 	} else {
3006 		/* When BR/EDR inquiry is active and no LE scanning is in
3007 		 * progress, then change discovery state to indicate completion.
3008 		 *
3009 		 * When running LE scanning and BR/EDR inquiry simultaneously
3010 		 * and the LE scan already finished, then change the discovery
3011 		 * state to indicate completion.
3012 		 */
3013 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3014 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3015 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3016 	}
3017 
3018 unlock:
3019 	hci_dev_unlock(hdev);
3020 }
3021 
3022 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3023 				   struct sk_buff *skb)
3024 {
3025 	struct hci_ev_inquiry_result *ev = edata;
3026 	struct inquiry_data data;
3027 	int i;
3028 
3029 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3030 			     flex_array_size(ev, info, ev->num)))
3031 		return;
3032 
3033 	bt_dev_dbg(hdev, "num %d", ev->num);
3034 
3035 	if (!ev->num)
3036 		return;
3037 
3038 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3039 		return;
3040 
3041 	hci_dev_lock(hdev);
3042 
3043 	for (i = 0; i < ev->num; i++) {
3044 		struct inquiry_info *info = &ev->info[i];
3045 		u32 flags;
3046 
3047 		bacpy(&data.bdaddr, &info->bdaddr);
3048 		data.pscan_rep_mode	= info->pscan_rep_mode;
3049 		data.pscan_period_mode	= info->pscan_period_mode;
3050 		data.pscan_mode		= info->pscan_mode;
3051 		memcpy(data.dev_class, info->dev_class, 3);
3052 		data.clock_offset	= info->clock_offset;
3053 		data.rssi		= HCI_RSSI_INVALID;
3054 		data.ssp_mode		= 0x00;
3055 
3056 		flags = hci_inquiry_cache_update(hdev, &data, false);
3057 
3058 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3059 				  info->dev_class, HCI_RSSI_INVALID,
3060 				  flags, NULL, 0, NULL, 0, 0);
3061 	}
3062 
3063 	hci_dev_unlock(hdev);
3064 }
3065 
3066 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3067 				  struct sk_buff *skb)
3068 {
3069 	struct hci_ev_conn_complete *ev = data;
3070 	struct hci_conn *conn;
3071 	u8 status = ev->status;
3072 
3073 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3074 
3075 	hci_dev_lock(hdev);
3076 
3077 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3078 	if (!conn) {
3079 		/* In case of error status and there is no connection pending
3080 		 * just unlock as there is nothing to cleanup.
3081 		 */
3082 		if (ev->status)
3083 			goto unlock;
3084 
3085 		/* Connection may not exist if auto-connected. Check the bredr
3086 		 * allowlist to see if this device is allowed to auto connect.
3087 		 * If link is an ACL type, create a connection class
3088 		 * automatically.
3089 		 *
3090 		 * Auto-connect will only occur if the event filter is
3091 		 * programmed with a given address. Right now, event filter is
3092 		 * only used during suspend.
3093 		 */
3094 		if (ev->link_type == ACL_LINK &&
3095 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3096 						      &ev->bdaddr,
3097 						      BDADDR_BREDR)) {
3098 			conn = hci_conn_add_unset(hdev, ev->link_type,
3099 						  &ev->bdaddr, HCI_ROLE_SLAVE);
3100 			if (IS_ERR(conn)) {
3101 				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3102 				goto unlock;
3103 			}
3104 		} else {
3105 			if (ev->link_type != SCO_LINK)
3106 				goto unlock;
3107 
3108 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3109 						       &ev->bdaddr);
3110 			if (!conn)
3111 				goto unlock;
3112 
3113 			conn->type = SCO_LINK;
3114 		}
3115 	}
3116 
3117 	/* The HCI_Connection_Complete event is only sent once per connection.
3118 	 * Processing it more than once per connection can corrupt kernel memory.
3119 	 *
3120 	 * As the connection handle is set here for the first time, it indicates
3121 	 * whether the connection is already set up.
3122 	 */
3123 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3124 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3125 		goto unlock;
3126 	}
3127 
3128 	if (!status) {
3129 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3130 		if (status)
3131 			goto done;
3132 
3133 		if (conn->type == ACL_LINK) {
3134 			conn->state = BT_CONFIG;
3135 			hci_conn_hold(conn);
3136 
3137 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3138 			    !hci_find_link_key(hdev, &ev->bdaddr))
3139 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3140 			else
3141 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3142 		} else
3143 			conn->state = BT_CONNECTED;
3144 
3145 		hci_debugfs_create_conn(conn);
3146 		hci_conn_add_sysfs(conn);
3147 
3148 		if (test_bit(HCI_AUTH, &hdev->flags))
3149 			set_bit(HCI_CONN_AUTH, &conn->flags);
3150 
3151 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3152 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3153 
3154 		/* "Link key request" completed ahead of "connect request" completes */
3155 		if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3156 		    ev->link_type == ACL_LINK) {
3157 			struct link_key *key;
3158 			struct hci_cp_read_enc_key_size cp;
3159 
3160 			key = hci_find_link_key(hdev, &ev->bdaddr);
3161 			if (key) {
3162 				set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3163 
3164 				if (!read_key_size_capable(hdev)) {
3165 					conn->enc_key_size = HCI_LINK_KEY_SIZE;
3166 				} else {
3167 					cp.handle = cpu_to_le16(conn->handle);
3168 					if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3169 							 sizeof(cp), &cp)) {
3170 						bt_dev_err(hdev, "sending read key size failed");
3171 						conn->enc_key_size = HCI_LINK_KEY_SIZE;
3172 					}
3173 				}
3174 
3175 				hci_encrypt_cfm(conn, ev->status);
3176 			}
3177 		}
3178 
3179 		/* Get remote features */
3180 		if (conn->type == ACL_LINK) {
3181 			struct hci_cp_read_remote_features cp;
3182 			cp.handle = ev->handle;
3183 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3184 				     sizeof(cp), &cp);
3185 
3186 			hci_update_scan(hdev);
3187 		}
3188 
3189 		/* Set packet type for incoming connection */
3190 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3191 			struct hci_cp_change_conn_ptype cp;
3192 			cp.handle = ev->handle;
3193 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3194 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3195 				     &cp);
3196 		}
3197 	}
3198 
3199 	if (conn->type == ACL_LINK)
3200 		hci_sco_setup(conn, ev->status);
3201 
3202 done:
3203 	if (status) {
3204 		hci_conn_failed(conn, status);
3205 	} else if (ev->link_type == SCO_LINK) {
3206 		switch (conn->setting & SCO_AIRMODE_MASK) {
3207 		case SCO_AIRMODE_CVSD:
3208 			if (hdev->notify)
3209 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3210 			break;
3211 		}
3212 
3213 		hci_connect_cfm(conn, status);
3214 	}
3215 
3216 unlock:
3217 	hci_dev_unlock(hdev);
3218 }
3219 
3220 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3221 {
3222 	struct hci_cp_reject_conn_req cp;
3223 
3224 	bacpy(&cp.bdaddr, bdaddr);
3225 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3226 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3227 }
3228 
3229 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3230 				 struct sk_buff *skb)
3231 {
3232 	struct hci_ev_conn_request *ev = data;
3233 	int mask = hdev->link_mode;
3234 	struct inquiry_entry *ie;
3235 	struct hci_conn *conn;
3236 	__u8 flags = 0;
3237 
3238 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3239 
3240 	/* Reject incoming connection from device with same BD ADDR against
3241 	 * CVE-2020-26555
3242 	 */
3243 	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3244 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3245 			   &ev->bdaddr);
3246 		hci_reject_conn(hdev, &ev->bdaddr);
3247 		return;
3248 	}
3249 
3250 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3251 				      &flags);
3252 
3253 	if (!(mask & HCI_LM_ACCEPT)) {
3254 		hci_reject_conn(hdev, &ev->bdaddr);
3255 		return;
3256 	}
3257 
3258 	hci_dev_lock(hdev);
3259 
3260 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3261 				   BDADDR_BREDR)) {
3262 		hci_reject_conn(hdev, &ev->bdaddr);
3263 		goto unlock;
3264 	}
3265 
3266 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3267 	 * connection. These features are only touched through mgmt so
3268 	 * only do the checks if HCI_MGMT is set.
3269 	 */
3270 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3271 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3272 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3273 					       BDADDR_BREDR)) {
3274 		hci_reject_conn(hdev, &ev->bdaddr);
3275 		goto unlock;
3276 	}
3277 
3278 	/* Connection accepted */
3279 
3280 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3281 	if (ie)
3282 		memcpy(ie->data.dev_class, ev->dev_class, 3);
3283 
3284 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3285 			&ev->bdaddr);
3286 	if (!conn) {
3287 		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3288 					  HCI_ROLE_SLAVE);
3289 		if (IS_ERR(conn)) {
3290 			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3291 			goto unlock;
3292 		}
3293 	}
3294 
3295 	memcpy(conn->dev_class, ev->dev_class, 3);
3296 
3297 	hci_dev_unlock(hdev);
3298 
3299 	if (ev->link_type == ACL_LINK ||
3300 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3301 		struct hci_cp_accept_conn_req cp;
3302 		conn->state = BT_CONNECT;
3303 
3304 		bacpy(&cp.bdaddr, &ev->bdaddr);
3305 
3306 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3307 			cp.role = 0x00; /* Become central */
3308 		else
3309 			cp.role = 0x01; /* Remain peripheral */
3310 
3311 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3312 	} else if (!(flags & HCI_PROTO_DEFER)) {
3313 		struct hci_cp_accept_sync_conn_req cp;
3314 		conn->state = BT_CONNECT;
3315 
3316 		bacpy(&cp.bdaddr, &ev->bdaddr);
3317 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3318 
3319 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3320 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3321 		cp.max_latency    = cpu_to_le16(0xffff);
3322 		cp.content_format = cpu_to_le16(hdev->voice_setting);
3323 		cp.retrans_effort = 0xff;
3324 
3325 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3326 			     &cp);
3327 	} else {
3328 		conn->state = BT_CONNECT2;
3329 		hci_connect_cfm(conn, 0);
3330 	}
3331 
3332 	return;
3333 unlock:
3334 	hci_dev_unlock(hdev);
3335 }
3336 
3337 static u8 hci_to_mgmt_reason(u8 err)
3338 {
3339 	switch (err) {
3340 	case HCI_ERROR_CONNECTION_TIMEOUT:
3341 		return MGMT_DEV_DISCONN_TIMEOUT;
3342 	case HCI_ERROR_REMOTE_USER_TERM:
3343 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3344 	case HCI_ERROR_REMOTE_POWER_OFF:
3345 		return MGMT_DEV_DISCONN_REMOTE;
3346 	case HCI_ERROR_LOCAL_HOST_TERM:
3347 		return MGMT_DEV_DISCONN_LOCAL_HOST;
3348 	default:
3349 		return MGMT_DEV_DISCONN_UNKNOWN;
3350 	}
3351 }
3352 
3353 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3354 				     struct sk_buff *skb)
3355 {
3356 	struct hci_ev_disconn_complete *ev = data;
3357 	u8 reason;
3358 	struct hci_conn_params *params;
3359 	struct hci_conn *conn;
3360 	bool mgmt_connected;
3361 
3362 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3363 
3364 	hci_dev_lock(hdev);
3365 
3366 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3367 	if (!conn)
3368 		goto unlock;
3369 
3370 	if (ev->status) {
3371 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3372 				       conn->dst_type, ev->status);
3373 		goto unlock;
3374 	}
3375 
3376 	conn->state = BT_CLOSED;
3377 
3378 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3379 
3380 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3381 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3382 	else
3383 		reason = hci_to_mgmt_reason(ev->reason);
3384 
3385 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3386 				reason, mgmt_connected);
3387 
3388 	if (conn->type == ACL_LINK) {
3389 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3390 			hci_remove_link_key(hdev, &conn->dst);
3391 
3392 		hci_update_scan(hdev);
3393 	}
3394 
3395 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3396 	if (params) {
3397 		switch (params->auto_connect) {
3398 		case HCI_AUTO_CONN_LINK_LOSS:
3399 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3400 				break;
3401 			fallthrough;
3402 
3403 		case HCI_AUTO_CONN_DIRECT:
3404 		case HCI_AUTO_CONN_ALWAYS:
3405 			hci_pend_le_list_del_init(params);
3406 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
3407 			hci_update_passive_scan(hdev);
3408 			break;
3409 
3410 		default:
3411 			break;
3412 		}
3413 	}
3414 
3415 	hci_disconn_cfm(conn, ev->reason);
3416 
3417 	/* Re-enable advertising if necessary, since it might
3418 	 * have been disabled by the connection. From the
3419 	 * HCI_LE_Set_Advertise_Enable command description in
3420 	 * the core specification (v4.0):
3421 	 * "The Controller shall continue advertising until the Host
3422 	 * issues an LE_Set_Advertise_Enable command with
3423 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3424 	 * or until a connection is created or until the Advertising
3425 	 * is timed out due to Directed Advertising."
3426 	 */
3427 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3428 		hdev->cur_adv_instance = conn->adv_instance;
3429 		hci_enable_advertising(hdev);
3430 	}
3431 
3432 	hci_conn_del(conn);
3433 
3434 unlock:
3435 	hci_dev_unlock(hdev);
3436 }
3437 
3438 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3439 				  struct sk_buff *skb)
3440 {
3441 	struct hci_ev_auth_complete *ev = data;
3442 	struct hci_conn *conn;
3443 
3444 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3445 
3446 	hci_dev_lock(hdev);
3447 
3448 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3449 	if (!conn)
3450 		goto unlock;
3451 
3452 	if (!ev->status) {
3453 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3454 		set_bit(HCI_CONN_AUTH, &conn->flags);
3455 		conn->sec_level = conn->pending_sec_level;
3456 	} else {
3457 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3458 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3459 
3460 		mgmt_auth_failed(conn, ev->status);
3461 	}
3462 
3463 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3464 
3465 	if (conn->state == BT_CONFIG) {
3466 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3467 			struct hci_cp_set_conn_encrypt cp;
3468 			cp.handle  = ev->handle;
3469 			cp.encrypt = 0x01;
3470 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3471 				     &cp);
3472 		} else {
3473 			conn->state = BT_CONNECTED;
3474 			hci_connect_cfm(conn, ev->status);
3475 			hci_conn_drop(conn);
3476 		}
3477 	} else {
3478 		hci_auth_cfm(conn, ev->status);
3479 
3480 		hci_conn_hold(conn);
3481 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3482 		hci_conn_drop(conn);
3483 	}
3484 
3485 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3486 		if (!ev->status) {
3487 			struct hci_cp_set_conn_encrypt cp;
3488 			cp.handle  = ev->handle;
3489 			cp.encrypt = 0x01;
3490 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3491 				     &cp);
3492 		} else {
3493 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3494 			hci_encrypt_cfm(conn, ev->status);
3495 		}
3496 	}
3497 
3498 unlock:
3499 	hci_dev_unlock(hdev);
3500 }
3501 
3502 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3503 				struct sk_buff *skb)
3504 {
3505 	struct hci_ev_remote_name *ev = data;
3506 	struct hci_conn *conn;
3507 
3508 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3509 
3510 	hci_dev_lock(hdev);
3511 
3512 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3513 
3514 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3515 		goto check_auth;
3516 
3517 	if (ev->status == 0)
3518 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3519 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3520 	else
3521 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3522 
3523 check_auth:
3524 	if (!conn)
3525 		goto unlock;
3526 
3527 	if (!hci_outgoing_auth_needed(hdev, conn))
3528 		goto unlock;
3529 
3530 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3531 		struct hci_cp_auth_requested cp;
3532 
3533 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3534 
3535 		cp.handle = __cpu_to_le16(conn->handle);
3536 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3537 	}
3538 
3539 unlock:
3540 	hci_dev_unlock(hdev);
3541 }
3542 
3543 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3544 				   struct sk_buff *skb)
3545 {
3546 	struct hci_ev_encrypt_change *ev = data;
3547 	struct hci_conn *conn;
3548 
3549 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3550 
3551 	hci_dev_lock(hdev);
3552 
3553 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3554 	if (!conn)
3555 		goto unlock;
3556 
3557 	if (!ev->status) {
3558 		if (ev->encrypt) {
3559 			/* Encryption implies authentication */
3560 			set_bit(HCI_CONN_AUTH, &conn->flags);
3561 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3562 			conn->sec_level = conn->pending_sec_level;
3563 
3564 			/* P-256 authentication key implies FIPS */
3565 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3566 				set_bit(HCI_CONN_FIPS, &conn->flags);
3567 
3568 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3569 			    conn->type == LE_LINK)
3570 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3571 		} else {
3572 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3573 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3574 		}
3575 	}
3576 
3577 	/* We should disregard the current RPA and generate a new one
3578 	 * whenever the encryption procedure fails.
3579 	 */
3580 	if (ev->status && conn->type == LE_LINK) {
3581 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3582 		hci_adv_instances_set_rpa_expired(hdev, true);
3583 	}
3584 
3585 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3586 
3587 	/* Check link security requirements are met */
3588 	if (!hci_conn_check_link_mode(conn))
3589 		ev->status = HCI_ERROR_AUTH_FAILURE;
3590 
3591 	if (ev->status && conn->state == BT_CONNECTED) {
3592 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3593 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3594 
3595 		/* Notify upper layers so they can cleanup before
3596 		 * disconnecting.
3597 		 */
3598 		hci_encrypt_cfm(conn, ev->status);
3599 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3600 		hci_conn_drop(conn);
3601 		goto unlock;
3602 	}
3603 
3604 	/* Try reading the encryption key size for encrypted ACL links */
3605 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3606 		struct hci_cp_read_enc_key_size cp;
3607 
3608 		/* Only send HCI_Read_Encryption_Key_Size if the
3609 		 * controller really supports it. If it doesn't, assume
3610 		 * the default size (16).
3611 		 */
3612 		if (!read_key_size_capable(hdev)) {
3613 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3614 			goto notify;
3615 		}
3616 
3617 		cp.handle = cpu_to_le16(conn->handle);
3618 		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3619 				 sizeof(cp), &cp)) {
3620 			bt_dev_err(hdev, "sending read key size failed");
3621 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3622 			goto notify;
3623 		}
3624 
3625 		goto unlock;
3626 	}
3627 
3628 	/* Set the default Authenticated Payload Timeout after
3629 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3630 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3631 	 * sent when the link is active and Encryption is enabled, the conn
3632 	 * type can be either LE or ACL and controller must support LMP Ping.
3633 	 * Ensure for AES-CCM encryption as well.
3634 	 */
3635 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3636 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3637 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3638 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3639 		struct hci_cp_write_auth_payload_to cp;
3640 
3641 		cp.handle = cpu_to_le16(conn->handle);
3642 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3643 		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3644 				 sizeof(cp), &cp))
3645 			bt_dev_err(hdev, "write auth payload timeout failed");
3646 	}
3647 
3648 notify:
3649 	hci_encrypt_cfm(conn, ev->status);
3650 
3651 unlock:
3652 	hci_dev_unlock(hdev);
3653 }
3654 
3655 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3656 					     struct sk_buff *skb)
3657 {
3658 	struct hci_ev_change_link_key_complete *ev = data;
3659 	struct hci_conn *conn;
3660 
3661 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3662 
3663 	hci_dev_lock(hdev);
3664 
3665 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3666 	if (conn) {
3667 		if (!ev->status)
3668 			set_bit(HCI_CONN_SECURE, &conn->flags);
3669 
3670 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3671 
3672 		hci_key_change_cfm(conn, ev->status);
3673 	}
3674 
3675 	hci_dev_unlock(hdev);
3676 }
3677 
3678 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3679 				    struct sk_buff *skb)
3680 {
3681 	struct hci_ev_remote_features *ev = data;
3682 	struct hci_conn *conn;
3683 
3684 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3685 
3686 	hci_dev_lock(hdev);
3687 
3688 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3689 	if (!conn)
3690 		goto unlock;
3691 
3692 	if (!ev->status)
3693 		memcpy(conn->features[0], ev->features, 8);
3694 
3695 	if (conn->state != BT_CONFIG)
3696 		goto unlock;
3697 
3698 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3699 	    lmp_ext_feat_capable(conn)) {
3700 		struct hci_cp_read_remote_ext_features cp;
3701 		cp.handle = ev->handle;
3702 		cp.page = 0x01;
3703 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3704 			     sizeof(cp), &cp);
3705 		goto unlock;
3706 	}
3707 
3708 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3709 		struct hci_cp_remote_name_req cp;
3710 		memset(&cp, 0, sizeof(cp));
3711 		bacpy(&cp.bdaddr, &conn->dst);
3712 		cp.pscan_rep_mode = 0x02;
3713 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3714 	} else {
3715 		mgmt_device_connected(hdev, conn, NULL, 0);
3716 	}
3717 
3718 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3719 		conn->state = BT_CONNECTED;
3720 		hci_connect_cfm(conn, ev->status);
3721 		hci_conn_drop(conn);
3722 	}
3723 
3724 unlock:
3725 	hci_dev_unlock(hdev);
3726 }
3727 
3728 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3729 {
3730 	cancel_delayed_work(&hdev->cmd_timer);
3731 
3732 	rcu_read_lock();
3733 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3734 		if (ncmd) {
3735 			cancel_delayed_work(&hdev->ncmd_timer);
3736 			atomic_set(&hdev->cmd_cnt, 1);
3737 		} else {
3738 			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3739 				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3740 						   HCI_NCMD_TIMEOUT);
3741 		}
3742 	}
3743 	rcu_read_unlock();
3744 }
3745 
3746 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3747 					struct sk_buff *skb)
3748 {
3749 	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3750 
3751 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3752 
3753 	if (rp->status)
3754 		return rp->status;
3755 
3756 	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3757 	hdev->le_pkts  = rp->acl_max_pkt;
3758 	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3759 	hdev->iso_pkts = rp->iso_max_pkt;
3760 
3761 	hdev->le_cnt  = hdev->le_pkts;
3762 	hdev->iso_cnt = hdev->iso_pkts;
3763 
3764 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3765 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3766 
3767 	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3768 		return HCI_ERROR_INVALID_PARAMETERS;
3769 
3770 	return rp->status;
3771 }
3772 
3773 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3774 {
3775 	struct hci_conn *conn, *tmp;
3776 
3777 	lockdep_assert_held(&hdev->lock);
3778 
3779 	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3780 		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3781 		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3782 			continue;
3783 
3784 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3785 			hci_conn_failed(conn, status);
3786 	}
3787 }
3788 
3789 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3790 				   struct sk_buff *skb)
3791 {
3792 	struct hci_rp_le_set_cig_params *rp = data;
3793 	struct hci_cp_le_set_cig_params *cp;
3794 	struct hci_conn *conn;
3795 	u8 status = rp->status;
3796 	bool pending = false;
3797 	int i;
3798 
3799 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3800 
3801 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3802 	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3803 			    rp->cig_id != cp->cig_id)) {
3804 		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3805 		status = HCI_ERROR_UNSPECIFIED;
3806 	}
3807 
3808 	hci_dev_lock(hdev);
3809 
3810 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3811 	 *
3812 	 * If the Status return parameter is non-zero, then the state of the CIG
3813 	 * and its CIS configurations shall not be changed by the command. If
3814 	 * the CIG did not already exist, it shall not be created.
3815 	 */
3816 	if (status) {
3817 		/* Keep current configuration, fail only the unbound CIS */
3818 		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3819 		goto unlock;
3820 	}
3821 
3822 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3823 	 *
3824 	 * If the Status return parameter is zero, then the Controller shall
3825 	 * set the Connection_Handle arrayed return parameter to the connection
3826 	 * handle(s) corresponding to the CIS configurations specified in
3827 	 * the CIS_IDs command parameter, in the same order.
3828 	 */
3829 	for (i = 0; i < rp->num_handles; ++i) {
3830 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3831 						cp->cis[i].cis_id);
3832 		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3833 			continue;
3834 
3835 		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3836 			continue;
3837 
3838 		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3839 			continue;
3840 
3841 		if (conn->state == BT_CONNECT)
3842 			pending = true;
3843 	}
3844 
3845 unlock:
3846 	if (pending)
3847 		hci_le_create_cis_pending(hdev);
3848 
3849 	hci_dev_unlock(hdev);
3850 
3851 	return rp->status;
3852 }
3853 
3854 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3855 				   struct sk_buff *skb)
3856 {
3857 	struct hci_rp_le_setup_iso_path *rp = data;
3858 	struct hci_cp_le_setup_iso_path *cp;
3859 	struct hci_conn *conn;
3860 
3861 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3862 
3863 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3864 	if (!cp)
3865 		return rp->status;
3866 
3867 	hci_dev_lock(hdev);
3868 
3869 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3870 	if (!conn)
3871 		goto unlock;
3872 
3873 	if (rp->status) {
3874 		hci_connect_cfm(conn, rp->status);
3875 		hci_conn_del(conn);
3876 		goto unlock;
3877 	}
3878 
3879 	switch (cp->direction) {
3880 	/* Input (Host to Controller) */
3881 	case 0x00:
3882 		/* Only confirm connection if output only */
3883 		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3884 			hci_connect_cfm(conn, rp->status);
3885 		break;
3886 	/* Output (Controller to Host) */
3887 	case 0x01:
3888 		/* Confirm connection since conn->iso_qos is always configured
3889 		 * last.
3890 		 */
3891 		hci_connect_cfm(conn, rp->status);
3892 
3893 		/* Notify device connected in case it is a BIG Sync */
3894 		if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3895 			mgmt_device_connected(hdev, conn, NULL, 0);
3896 
3897 		break;
3898 	}
3899 
3900 unlock:
3901 	hci_dev_unlock(hdev);
3902 	return rp->status;
3903 }
3904 
3905 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3906 {
3907 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3908 }
3909 
3910 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3911 				   struct sk_buff *skb)
3912 {
3913 	struct hci_ev_status *rp = data;
3914 	struct hci_cp_le_set_per_adv_params *cp;
3915 
3916 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3917 
3918 	if (rp->status)
3919 		return rp->status;
3920 
3921 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3922 	if (!cp)
3923 		return rp->status;
3924 
3925 	/* TODO: set the conn state */
3926 	return rp->status;
3927 }
3928 
3929 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3930 				       struct sk_buff *skb)
3931 {
3932 	struct hci_ev_status *rp = data;
3933 	struct hci_cp_le_set_per_adv_enable *cp;
3934 	struct adv_info *adv = NULL, *n;
3935 	u8 per_adv_cnt = 0;
3936 
3937 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3938 
3939 	if (rp->status)
3940 		return rp->status;
3941 
3942 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3943 	if (!cp)
3944 		return rp->status;
3945 
3946 	hci_dev_lock(hdev);
3947 
3948 	adv = hci_find_adv_instance(hdev, cp->handle);
3949 
3950 	if (cp->enable) {
3951 		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3952 
3953 		if (adv)
3954 			adv->enabled = true;
3955 	} else {
3956 		/* If just one instance was disabled check if there are
3957 		 * any other instance enabled before clearing HCI_LE_PER_ADV.
3958 		 * The current periodic adv instance will be marked as
3959 		 * disabled once extended advertising is also disabled.
3960 		 */
3961 		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3962 					 list) {
3963 			if (adv->periodic && adv->enabled)
3964 				per_adv_cnt++;
3965 		}
3966 
3967 		if (per_adv_cnt > 1)
3968 			goto unlock;
3969 
3970 		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3971 	}
3972 
3973 unlock:
3974 	hci_dev_unlock(hdev);
3975 
3976 	return rp->status;
3977 }
3978 
3979 #define HCI_CC_VL(_op, _func, _min, _max) \
3980 { \
3981 	.op = _op, \
3982 	.func = _func, \
3983 	.min_len = _min, \
3984 	.max_len = _max, \
3985 }
3986 
3987 #define HCI_CC(_op, _func, _len) \
3988 	HCI_CC_VL(_op, _func, _len, _len)
3989 
3990 #define HCI_CC_STATUS(_op, _func) \
3991 	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
3992 
3993 static const struct hci_cc {
3994 	u16  op;
3995 	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
3996 	u16  min_len;
3997 	u16  max_len;
3998 } hci_cc_table[] = {
3999 	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4000 	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4001 	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4002 	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4003 		      hci_cc_remote_name_req_cancel),
4004 	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4005 	       sizeof(struct hci_rp_role_discovery)),
4006 	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4007 	       sizeof(struct hci_rp_read_link_policy)),
4008 	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4009 	       sizeof(struct hci_rp_write_link_policy)),
4010 	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4011 	       sizeof(struct hci_rp_read_def_link_policy)),
4012 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4013 		      hci_cc_write_def_link_policy),
4014 	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4015 	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4016 	       sizeof(struct hci_rp_read_stored_link_key)),
4017 	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4018 	       sizeof(struct hci_rp_delete_stored_link_key)),
4019 	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4020 	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4021 	       sizeof(struct hci_rp_read_local_name)),
4022 	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4023 	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4024 	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4025 	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4026 	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4027 	       sizeof(struct hci_rp_read_class_of_dev)),
4028 	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4029 	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4030 	       sizeof(struct hci_rp_read_voice_setting)),
4031 	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4032 	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4033 	       sizeof(struct hci_rp_read_num_supported_iac)),
4034 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4035 	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4036 	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4037 	       sizeof(struct hci_rp_read_auth_payload_to)),
4038 	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4039 	       sizeof(struct hci_rp_write_auth_payload_to)),
4040 	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4041 	       sizeof(struct hci_rp_read_local_version)),
4042 	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4043 	       sizeof(struct hci_rp_read_local_commands)),
4044 	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4045 	       sizeof(struct hci_rp_read_local_features)),
4046 	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4047 	       sizeof(struct hci_rp_read_local_ext_features)),
4048 	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4049 	       sizeof(struct hci_rp_read_buffer_size)),
4050 	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4051 	       sizeof(struct hci_rp_read_bd_addr)),
4052 	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4053 	       sizeof(struct hci_rp_read_local_pairing_opts)),
4054 	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4055 	       sizeof(struct hci_rp_read_page_scan_activity)),
4056 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4057 		      hci_cc_write_page_scan_activity),
4058 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4059 	       sizeof(struct hci_rp_read_page_scan_type)),
4060 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4061 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4062 	       sizeof(struct hci_rp_read_clock)),
4063 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4064 	       sizeof(struct hci_rp_read_enc_key_size)),
4065 	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4066 	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4067 	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4068 	       hci_cc_read_def_err_data_reporting,
4069 	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4070 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4071 		      hci_cc_write_def_err_data_reporting),
4072 	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4073 	       sizeof(struct hci_rp_pin_code_reply)),
4074 	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4075 	       sizeof(struct hci_rp_pin_code_neg_reply)),
4076 	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4077 	       sizeof(struct hci_rp_read_local_oob_data)),
4078 	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4079 	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4080 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4081 	       sizeof(struct hci_rp_le_read_buffer_size)),
4082 	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4083 	       sizeof(struct hci_rp_le_read_local_features)),
4084 	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4085 	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4086 	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4087 	       sizeof(struct hci_rp_user_confirm_reply)),
4088 	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4089 	       sizeof(struct hci_rp_user_confirm_reply)),
4090 	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4091 	       sizeof(struct hci_rp_user_confirm_reply)),
4092 	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4093 	       sizeof(struct hci_rp_user_confirm_reply)),
4094 	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4095 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4096 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4097 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4098 	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4099 	       hci_cc_le_read_accept_list_size,
4100 	       sizeof(struct hci_rp_le_read_accept_list_size)),
4101 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4102 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4103 		      hci_cc_le_add_to_accept_list),
4104 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4105 		      hci_cc_le_del_from_accept_list),
4106 	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4107 	       sizeof(struct hci_rp_le_read_supported_states)),
4108 	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4109 	       sizeof(struct hci_rp_le_read_def_data_len)),
4110 	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4111 		      hci_cc_le_write_def_data_len),
4112 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4113 		      hci_cc_le_add_to_resolv_list),
4114 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4115 		      hci_cc_le_del_from_resolv_list),
4116 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4117 		      hci_cc_le_clear_resolv_list),
4118 	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4119 	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4120 	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4121 		      hci_cc_le_set_addr_resolution_enable),
4122 	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4123 	       sizeof(struct hci_rp_le_read_max_data_len)),
4124 	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4125 		      hci_cc_write_le_host_supported),
4126 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4127 	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4128 	       sizeof(struct hci_rp_read_rssi)),
4129 	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4130 	       sizeof(struct hci_rp_read_tx_power)),
4131 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4132 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4133 		      hci_cc_le_set_ext_scan_param),
4134 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4135 		      hci_cc_le_set_ext_scan_enable),
4136 	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4137 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4138 	       hci_cc_le_read_num_adv_sets,
4139 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4140 	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4141 	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4142 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4143 		      hci_cc_le_set_ext_adv_enable),
4144 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4145 		      hci_cc_le_set_adv_set_random_addr),
4146 	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4147 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4148 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4149 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4150 		      hci_cc_le_set_per_adv_enable),
4151 	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4152 	       sizeof(struct hci_rp_le_read_transmit_power)),
4153 	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4154 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4155 	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4156 	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4157 		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4158 	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4159 	       sizeof(struct hci_rp_le_setup_iso_path)),
4160 };
4161 
4162 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4163 		      struct sk_buff *skb)
4164 {
4165 	void *data;
4166 
4167 	if (skb->len < cc->min_len) {
4168 		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4169 			   cc->op, skb->len, cc->min_len);
4170 		return HCI_ERROR_UNSPECIFIED;
4171 	}
4172 
4173 	/* Just warn if the length is over max_len size it still be possible to
4174 	 * partially parse the cc so leave to callback to decide if that is
4175 	 * acceptable.
4176 	 */
4177 	if (skb->len > cc->max_len)
4178 		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4179 			    cc->op, skb->len, cc->max_len);
4180 
4181 	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4182 	if (!data)
4183 		return HCI_ERROR_UNSPECIFIED;
4184 
4185 	return cc->func(hdev, data, skb);
4186 }
4187 
4188 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4189 				 struct sk_buff *skb, u16 *opcode, u8 *status,
4190 				 hci_req_complete_t *req_complete,
4191 				 hci_req_complete_skb_t *req_complete_skb)
4192 {
4193 	struct hci_ev_cmd_complete *ev = data;
4194 	int i;
4195 
4196 	*opcode = __le16_to_cpu(ev->opcode);
4197 
4198 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4199 
4200 	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4201 		if (hci_cc_table[i].op == *opcode) {
4202 			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4203 			break;
4204 		}
4205 	}
4206 
4207 	if (i == ARRAY_SIZE(hci_cc_table)) {
4208 		/* Unknown opcode, assume byte 0 contains the status, so
4209 		 * that e.g. __hci_cmd_sync() properly returns errors
4210 		 * for vendor specific commands send by HCI drivers.
4211 		 * If a vendor doesn't actually follow this convention we may
4212 		 * need to introduce a vendor CC table in order to properly set
4213 		 * the status.
4214 		 */
4215 		*status = skb->data[0];
4216 	}
4217 
4218 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4219 
4220 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4221 			     req_complete_skb);
4222 
4223 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4224 		bt_dev_err(hdev,
4225 			   "unexpected event for opcode 0x%4.4x", *opcode);
4226 		return;
4227 	}
4228 
4229 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4230 		queue_work(hdev->workqueue, &hdev->cmd_work);
4231 }
4232 
4233 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4234 {
4235 	struct hci_cp_le_create_cis *cp;
4236 	bool pending = false;
4237 	int i;
4238 
4239 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4240 
4241 	if (!status)
4242 		return;
4243 
4244 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4245 	if (!cp)
4246 		return;
4247 
4248 	hci_dev_lock(hdev);
4249 
4250 	/* Remove connection if command failed */
4251 	for (i = 0; i < cp->num_cis; i++) {
4252 		struct hci_conn *conn;
4253 		u16 handle;
4254 
4255 		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4256 
4257 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4258 		if (conn) {
4259 			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4260 					       &conn->flags))
4261 				pending = true;
4262 			conn->state = BT_CLOSED;
4263 			hci_connect_cfm(conn, status);
4264 			hci_conn_del(conn);
4265 		}
4266 	}
4267 	cp->num_cis = 0;
4268 
4269 	if (pending)
4270 		hci_le_create_cis_pending(hdev);
4271 
4272 	hci_dev_unlock(hdev);
4273 }
4274 
4275 #define HCI_CS(_op, _func) \
4276 { \
4277 	.op = _op, \
4278 	.func = _func, \
4279 }
4280 
4281 static const struct hci_cs {
4282 	u16  op;
4283 	void (*func)(struct hci_dev *hdev, __u8 status);
4284 } hci_cs_table[] = {
4285 	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4286 	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4287 	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4288 	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4289 	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4290 	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4291 	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4292 	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4293 	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4294 	       hci_cs_read_remote_ext_features),
4295 	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4296 	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4297 	       hci_cs_enhanced_setup_sync_conn),
4298 	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4299 	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4300 	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4301 	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4302 	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4303 	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4304 	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4305 	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4306 	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4307 };
4308 
4309 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4310 			       struct sk_buff *skb, u16 *opcode, u8 *status,
4311 			       hci_req_complete_t *req_complete,
4312 			       hci_req_complete_skb_t *req_complete_skb)
4313 {
4314 	struct hci_ev_cmd_status *ev = data;
4315 	int i;
4316 
4317 	*opcode = __le16_to_cpu(ev->opcode);
4318 	*status = ev->status;
4319 
4320 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4321 
4322 	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4323 		if (hci_cs_table[i].op == *opcode) {
4324 			hci_cs_table[i].func(hdev, ev->status);
4325 			break;
4326 		}
4327 	}
4328 
4329 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4330 
4331 	/* Indicate request completion if the command failed. Also, if
4332 	 * we're not waiting for a special event and we get a success
4333 	 * command status we should try to flag the request as completed
4334 	 * (since for this kind of commands there will not be a command
4335 	 * complete event).
4336 	 */
4337 	if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4338 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4339 				     req_complete_skb);
4340 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4341 			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4342 				   *opcode);
4343 			return;
4344 		}
4345 	}
4346 
4347 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4348 		queue_work(hdev->workqueue, &hdev->cmd_work);
4349 }
4350 
4351 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4352 				   struct sk_buff *skb)
4353 {
4354 	struct hci_ev_hardware_error *ev = data;
4355 
4356 	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4357 
4358 	hdev->hw_error_code = ev->code;
4359 
4360 	queue_work(hdev->req_workqueue, &hdev->error_reset);
4361 }
4362 
4363 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4364 				struct sk_buff *skb)
4365 {
4366 	struct hci_ev_role_change *ev = data;
4367 	struct hci_conn *conn;
4368 
4369 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4370 
4371 	hci_dev_lock(hdev);
4372 
4373 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4374 	if (conn) {
4375 		if (!ev->status)
4376 			conn->role = ev->role;
4377 
4378 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4379 
4380 		hci_role_switch_cfm(conn, ev->status, ev->role);
4381 	}
4382 
4383 	hci_dev_unlock(hdev);
4384 }
4385 
4386 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4387 				  struct sk_buff *skb)
4388 {
4389 	struct hci_ev_num_comp_pkts *ev = data;
4390 	int i;
4391 
4392 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4393 			     flex_array_size(ev, handles, ev->num)))
4394 		return;
4395 
4396 	bt_dev_dbg(hdev, "num %d", ev->num);
4397 
4398 	for (i = 0; i < ev->num; i++) {
4399 		struct hci_comp_pkts_info *info = &ev->handles[i];
4400 		struct hci_conn *conn;
4401 		__u16  handle, count;
4402 
4403 		handle = __le16_to_cpu(info->handle);
4404 		count  = __le16_to_cpu(info->count);
4405 
4406 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4407 		if (!conn)
4408 			continue;
4409 
4410 		conn->sent -= count;
4411 
4412 		switch (conn->type) {
4413 		case ACL_LINK:
4414 			hdev->acl_cnt += count;
4415 			if (hdev->acl_cnt > hdev->acl_pkts)
4416 				hdev->acl_cnt = hdev->acl_pkts;
4417 			break;
4418 
4419 		case LE_LINK:
4420 			if (hdev->le_pkts) {
4421 				hdev->le_cnt += count;
4422 				if (hdev->le_cnt > hdev->le_pkts)
4423 					hdev->le_cnt = hdev->le_pkts;
4424 			} else {
4425 				hdev->acl_cnt += count;
4426 				if (hdev->acl_cnt > hdev->acl_pkts)
4427 					hdev->acl_cnt = hdev->acl_pkts;
4428 			}
4429 			break;
4430 
4431 		case SCO_LINK:
4432 			hdev->sco_cnt += count;
4433 			if (hdev->sco_cnt > hdev->sco_pkts)
4434 				hdev->sco_cnt = hdev->sco_pkts;
4435 			break;
4436 
4437 		case ISO_LINK:
4438 			if (hdev->iso_pkts) {
4439 				hdev->iso_cnt += count;
4440 				if (hdev->iso_cnt > hdev->iso_pkts)
4441 					hdev->iso_cnt = hdev->iso_pkts;
4442 			} else if (hdev->le_pkts) {
4443 				hdev->le_cnt += count;
4444 				if (hdev->le_cnt > hdev->le_pkts)
4445 					hdev->le_cnt = hdev->le_pkts;
4446 			} else {
4447 				hdev->acl_cnt += count;
4448 				if (hdev->acl_cnt > hdev->acl_pkts)
4449 					hdev->acl_cnt = hdev->acl_pkts;
4450 			}
4451 			break;
4452 
4453 		default:
4454 			bt_dev_err(hdev, "unknown type %d conn %p",
4455 				   conn->type, conn);
4456 			break;
4457 		}
4458 	}
4459 
4460 	queue_work(hdev->workqueue, &hdev->tx_work);
4461 }
4462 
4463 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4464 				struct sk_buff *skb)
4465 {
4466 	struct hci_ev_mode_change *ev = data;
4467 	struct hci_conn *conn;
4468 
4469 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4470 
4471 	hci_dev_lock(hdev);
4472 
4473 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4474 	if (conn) {
4475 		conn->mode = ev->mode;
4476 
4477 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4478 					&conn->flags)) {
4479 			if (conn->mode == HCI_CM_ACTIVE)
4480 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4481 			else
4482 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4483 		}
4484 
4485 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4486 			hci_sco_setup(conn, ev->status);
4487 	}
4488 
4489 	hci_dev_unlock(hdev);
4490 }
4491 
4492 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4493 				     struct sk_buff *skb)
4494 {
4495 	struct hci_ev_pin_code_req *ev = data;
4496 	struct hci_conn *conn;
4497 
4498 	bt_dev_dbg(hdev, "");
4499 
4500 	hci_dev_lock(hdev);
4501 
4502 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4503 	if (!conn)
4504 		goto unlock;
4505 
4506 	if (conn->state == BT_CONNECTED) {
4507 		hci_conn_hold(conn);
4508 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4509 		hci_conn_drop(conn);
4510 	}
4511 
4512 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4513 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4514 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4515 			     sizeof(ev->bdaddr), &ev->bdaddr);
4516 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4517 		u8 secure;
4518 
4519 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4520 			secure = 1;
4521 		else
4522 			secure = 0;
4523 
4524 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4525 	}
4526 
4527 unlock:
4528 	hci_dev_unlock(hdev);
4529 }
4530 
4531 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4532 {
4533 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4534 		return;
4535 
4536 	conn->pin_length = pin_len;
4537 	conn->key_type = key_type;
4538 
4539 	switch (key_type) {
4540 	case HCI_LK_LOCAL_UNIT:
4541 	case HCI_LK_REMOTE_UNIT:
4542 	case HCI_LK_DEBUG_COMBINATION:
4543 		return;
4544 	case HCI_LK_COMBINATION:
4545 		if (pin_len == 16)
4546 			conn->pending_sec_level = BT_SECURITY_HIGH;
4547 		else
4548 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4549 		break;
4550 	case HCI_LK_UNAUTH_COMBINATION_P192:
4551 	case HCI_LK_UNAUTH_COMBINATION_P256:
4552 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4553 		break;
4554 	case HCI_LK_AUTH_COMBINATION_P192:
4555 		conn->pending_sec_level = BT_SECURITY_HIGH;
4556 		break;
4557 	case HCI_LK_AUTH_COMBINATION_P256:
4558 		conn->pending_sec_level = BT_SECURITY_FIPS;
4559 		break;
4560 	}
4561 }
4562 
4563 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4564 				     struct sk_buff *skb)
4565 {
4566 	struct hci_ev_link_key_req *ev = data;
4567 	struct hci_cp_link_key_reply cp;
4568 	struct hci_conn *conn;
4569 	struct link_key *key;
4570 
4571 	bt_dev_dbg(hdev, "");
4572 
4573 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4574 		return;
4575 
4576 	hci_dev_lock(hdev);
4577 
4578 	key = hci_find_link_key(hdev, &ev->bdaddr);
4579 	if (!key) {
4580 		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4581 		goto not_found;
4582 	}
4583 
4584 	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4585 
4586 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4587 	if (conn) {
4588 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4589 
4590 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4591 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4592 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4593 			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4594 			goto not_found;
4595 		}
4596 
4597 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4598 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4599 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4600 			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4601 			goto not_found;
4602 		}
4603 
4604 		conn_set_key(conn, key->type, key->pin_len);
4605 	}
4606 
4607 	bacpy(&cp.bdaddr, &ev->bdaddr);
4608 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4609 
4610 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4611 
4612 	hci_dev_unlock(hdev);
4613 
4614 	return;
4615 
4616 not_found:
4617 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4618 	hci_dev_unlock(hdev);
4619 }
4620 
4621 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4622 				    struct sk_buff *skb)
4623 {
4624 	struct hci_ev_link_key_notify *ev = data;
4625 	struct hci_conn *conn;
4626 	struct link_key *key;
4627 	bool persistent;
4628 	u8 pin_len = 0;
4629 
4630 	bt_dev_dbg(hdev, "");
4631 
4632 	hci_dev_lock(hdev);
4633 
4634 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4635 	if (!conn)
4636 		goto unlock;
4637 
4638 	/* Ignore NULL link key against CVE-2020-26555 */
4639 	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4640 		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4641 			   &ev->bdaddr);
4642 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4643 		hci_conn_drop(conn);
4644 		goto unlock;
4645 	}
4646 
4647 	hci_conn_hold(conn);
4648 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4649 	hci_conn_drop(conn);
4650 
4651 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4652 	conn_set_key(conn, ev->key_type, conn->pin_length);
4653 
4654 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4655 		goto unlock;
4656 
4657 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4658 			        ev->key_type, pin_len, &persistent);
4659 	if (!key)
4660 		goto unlock;
4661 
4662 	/* Update connection information since adding the key will have
4663 	 * fixed up the type in the case of changed combination keys.
4664 	 */
4665 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4666 		conn_set_key(conn, key->type, key->pin_len);
4667 
4668 	mgmt_new_link_key(hdev, key, persistent);
4669 
4670 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4671 	 * is set. If it's not set simply remove the key from the kernel
4672 	 * list (we've still notified user space about it but with
4673 	 * store_hint being 0).
4674 	 */
4675 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4676 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4677 		list_del_rcu(&key->list);
4678 		kfree_rcu(key, rcu);
4679 		goto unlock;
4680 	}
4681 
4682 	if (persistent)
4683 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4684 	else
4685 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4686 
4687 unlock:
4688 	hci_dev_unlock(hdev);
4689 }
4690 
4691 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4692 				 struct sk_buff *skb)
4693 {
4694 	struct hci_ev_clock_offset *ev = data;
4695 	struct hci_conn *conn;
4696 
4697 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4698 
4699 	hci_dev_lock(hdev);
4700 
4701 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4702 	if (conn && !ev->status) {
4703 		struct inquiry_entry *ie;
4704 
4705 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4706 		if (ie) {
4707 			ie->data.clock_offset = ev->clock_offset;
4708 			ie->timestamp = jiffies;
4709 		}
4710 	}
4711 
4712 	hci_dev_unlock(hdev);
4713 }
4714 
4715 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4716 				    struct sk_buff *skb)
4717 {
4718 	struct hci_ev_pkt_type_change *ev = data;
4719 	struct hci_conn *conn;
4720 
4721 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4722 
4723 	hci_dev_lock(hdev);
4724 
4725 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4726 	if (conn && !ev->status)
4727 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4728 
4729 	hci_dev_unlock(hdev);
4730 }
4731 
4732 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4733 				   struct sk_buff *skb)
4734 {
4735 	struct hci_ev_pscan_rep_mode *ev = data;
4736 	struct inquiry_entry *ie;
4737 
4738 	bt_dev_dbg(hdev, "");
4739 
4740 	hci_dev_lock(hdev);
4741 
4742 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4743 	if (ie) {
4744 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4745 		ie->timestamp = jiffies;
4746 	}
4747 
4748 	hci_dev_unlock(hdev);
4749 }
4750 
4751 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4752 					     struct sk_buff *skb)
4753 {
4754 	struct hci_ev_inquiry_result_rssi *ev = edata;
4755 	struct inquiry_data data;
4756 	int i;
4757 
4758 	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4759 
4760 	if (!ev->num)
4761 		return;
4762 
4763 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4764 		return;
4765 
4766 	hci_dev_lock(hdev);
4767 
4768 	if (skb->len == array_size(ev->num,
4769 				   sizeof(struct inquiry_info_rssi_pscan))) {
4770 		struct inquiry_info_rssi_pscan *info;
4771 
4772 		for (i = 0; i < ev->num; i++) {
4773 			u32 flags;
4774 
4775 			info = hci_ev_skb_pull(hdev, skb,
4776 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4777 					       sizeof(*info));
4778 			if (!info) {
4779 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4780 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4781 				goto unlock;
4782 			}
4783 
4784 			bacpy(&data.bdaddr, &info->bdaddr);
4785 			data.pscan_rep_mode	= info->pscan_rep_mode;
4786 			data.pscan_period_mode	= info->pscan_period_mode;
4787 			data.pscan_mode		= info->pscan_mode;
4788 			memcpy(data.dev_class, info->dev_class, 3);
4789 			data.clock_offset	= info->clock_offset;
4790 			data.rssi		= info->rssi;
4791 			data.ssp_mode		= 0x00;
4792 
4793 			flags = hci_inquiry_cache_update(hdev, &data, false);
4794 
4795 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4796 					  info->dev_class, info->rssi,
4797 					  flags, NULL, 0, NULL, 0, 0);
4798 		}
4799 	} else if (skb->len == array_size(ev->num,
4800 					  sizeof(struct inquiry_info_rssi))) {
4801 		struct inquiry_info_rssi *info;
4802 
4803 		for (i = 0; i < ev->num; i++) {
4804 			u32 flags;
4805 
4806 			info = hci_ev_skb_pull(hdev, skb,
4807 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4808 					       sizeof(*info));
4809 			if (!info) {
4810 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4811 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4812 				goto unlock;
4813 			}
4814 
4815 			bacpy(&data.bdaddr, &info->bdaddr);
4816 			data.pscan_rep_mode	= info->pscan_rep_mode;
4817 			data.pscan_period_mode	= info->pscan_period_mode;
4818 			data.pscan_mode		= 0x00;
4819 			memcpy(data.dev_class, info->dev_class, 3);
4820 			data.clock_offset	= info->clock_offset;
4821 			data.rssi		= info->rssi;
4822 			data.ssp_mode		= 0x00;
4823 
4824 			flags = hci_inquiry_cache_update(hdev, &data, false);
4825 
4826 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4827 					  info->dev_class, info->rssi,
4828 					  flags, NULL, 0, NULL, 0, 0);
4829 		}
4830 	} else {
4831 		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4832 			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4833 	}
4834 unlock:
4835 	hci_dev_unlock(hdev);
4836 }
4837 
4838 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4839 					struct sk_buff *skb)
4840 {
4841 	struct hci_ev_remote_ext_features *ev = data;
4842 	struct hci_conn *conn;
4843 
4844 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4845 
4846 	hci_dev_lock(hdev);
4847 
4848 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4849 	if (!conn)
4850 		goto unlock;
4851 
4852 	if (ev->page < HCI_MAX_PAGES)
4853 		memcpy(conn->features[ev->page], ev->features, 8);
4854 
4855 	if (!ev->status && ev->page == 0x01) {
4856 		struct inquiry_entry *ie;
4857 
4858 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4859 		if (ie)
4860 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4861 
4862 		if (ev->features[0] & LMP_HOST_SSP) {
4863 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4864 		} else {
4865 			/* It is mandatory by the Bluetooth specification that
4866 			 * Extended Inquiry Results are only used when Secure
4867 			 * Simple Pairing is enabled, but some devices violate
4868 			 * this.
4869 			 *
4870 			 * To make these devices work, the internal SSP
4871 			 * enabled flag needs to be cleared if the remote host
4872 			 * features do not indicate SSP support */
4873 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4874 		}
4875 
4876 		if (ev->features[0] & LMP_HOST_SC)
4877 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4878 	}
4879 
4880 	if (conn->state != BT_CONFIG)
4881 		goto unlock;
4882 
4883 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4884 		struct hci_cp_remote_name_req cp;
4885 		memset(&cp, 0, sizeof(cp));
4886 		bacpy(&cp.bdaddr, &conn->dst);
4887 		cp.pscan_rep_mode = 0x02;
4888 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4889 	} else {
4890 		mgmt_device_connected(hdev, conn, NULL, 0);
4891 	}
4892 
4893 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4894 		conn->state = BT_CONNECTED;
4895 		hci_connect_cfm(conn, ev->status);
4896 		hci_conn_drop(conn);
4897 	}
4898 
4899 unlock:
4900 	hci_dev_unlock(hdev);
4901 }
4902 
4903 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4904 				       struct sk_buff *skb)
4905 {
4906 	struct hci_ev_sync_conn_complete *ev = data;
4907 	struct hci_conn *conn;
4908 	u8 status = ev->status;
4909 
4910 	switch (ev->link_type) {
4911 	case SCO_LINK:
4912 	case ESCO_LINK:
4913 		break;
4914 	default:
4915 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4916 		 * for HCI_Synchronous_Connection_Complete is limited to
4917 		 * either SCO or eSCO
4918 		 */
4919 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4920 		return;
4921 	}
4922 
4923 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4924 
4925 	hci_dev_lock(hdev);
4926 
4927 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4928 	if (!conn) {
4929 		if (ev->link_type == ESCO_LINK)
4930 			goto unlock;
4931 
4932 		/* When the link type in the event indicates SCO connection
4933 		 * and lookup of the connection object fails, then check
4934 		 * if an eSCO connection object exists.
4935 		 *
4936 		 * The core limits the synchronous connections to either
4937 		 * SCO or eSCO. The eSCO connection is preferred and tried
4938 		 * to be setup first and until successfully established,
4939 		 * the link type will be hinted as eSCO.
4940 		 */
4941 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4942 		if (!conn)
4943 			goto unlock;
4944 	}
4945 
4946 	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4947 	 * Processing it more than once per connection can corrupt kernel memory.
4948 	 *
4949 	 * As the connection handle is set here for the first time, it indicates
4950 	 * whether the connection is already set up.
4951 	 */
4952 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
4953 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4954 		goto unlock;
4955 	}
4956 
4957 	switch (status) {
4958 	case 0x00:
4959 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
4960 		if (status) {
4961 			conn->state = BT_CLOSED;
4962 			break;
4963 		}
4964 
4965 		conn->state  = BT_CONNECTED;
4966 		conn->type   = ev->link_type;
4967 
4968 		hci_debugfs_create_conn(conn);
4969 		hci_conn_add_sysfs(conn);
4970 		break;
4971 
4972 	case 0x10:	/* Connection Accept Timeout */
4973 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4974 	case 0x11:	/* Unsupported Feature or Parameter Value */
4975 	case 0x1c:	/* SCO interval rejected */
4976 	case 0x1a:	/* Unsupported Remote Feature */
4977 	case 0x1e:	/* Invalid LMP Parameters */
4978 	case 0x1f:	/* Unspecified error */
4979 	case 0x20:	/* Unsupported LMP Parameter value */
4980 		if (conn->out) {
4981 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4982 					(hdev->esco_type & EDR_ESCO_MASK);
4983 			if (hci_setup_sync(conn, conn->parent->handle))
4984 				goto unlock;
4985 		}
4986 		fallthrough;
4987 
4988 	default:
4989 		conn->state = BT_CLOSED;
4990 		break;
4991 	}
4992 
4993 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4994 	/* Notify only in case of SCO over HCI transport data path which
4995 	 * is zero and non-zero value shall be non-HCI transport data path
4996 	 */
4997 	if (conn->codec.data_path == 0 && hdev->notify) {
4998 		switch (ev->air_mode) {
4999 		case 0x02:
5000 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5001 			break;
5002 		case 0x03:
5003 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5004 			break;
5005 		}
5006 	}
5007 
5008 	hci_connect_cfm(conn, status);
5009 	if (status)
5010 		hci_conn_del(conn);
5011 
5012 unlock:
5013 	hci_dev_unlock(hdev);
5014 }
5015 
5016 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5017 {
5018 	size_t parsed = 0;
5019 
5020 	while (parsed < eir_len) {
5021 		u8 field_len = eir[0];
5022 
5023 		if (field_len == 0)
5024 			return parsed;
5025 
5026 		parsed += field_len + 1;
5027 		eir += field_len + 1;
5028 	}
5029 
5030 	return eir_len;
5031 }
5032 
5033 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5034 					    struct sk_buff *skb)
5035 {
5036 	struct hci_ev_ext_inquiry_result *ev = edata;
5037 	struct inquiry_data data;
5038 	size_t eir_len;
5039 	int i;
5040 
5041 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5042 			     flex_array_size(ev, info, ev->num)))
5043 		return;
5044 
5045 	bt_dev_dbg(hdev, "num %d", ev->num);
5046 
5047 	if (!ev->num)
5048 		return;
5049 
5050 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5051 		return;
5052 
5053 	hci_dev_lock(hdev);
5054 
5055 	for (i = 0; i < ev->num; i++) {
5056 		struct extended_inquiry_info *info = &ev->info[i];
5057 		u32 flags;
5058 		bool name_known;
5059 
5060 		bacpy(&data.bdaddr, &info->bdaddr);
5061 		data.pscan_rep_mode	= info->pscan_rep_mode;
5062 		data.pscan_period_mode	= info->pscan_period_mode;
5063 		data.pscan_mode		= 0x00;
5064 		memcpy(data.dev_class, info->dev_class, 3);
5065 		data.clock_offset	= info->clock_offset;
5066 		data.rssi		= info->rssi;
5067 		data.ssp_mode		= 0x01;
5068 
5069 		if (hci_dev_test_flag(hdev, HCI_MGMT))
5070 			name_known = eir_get_data(info->data,
5071 						  sizeof(info->data),
5072 						  EIR_NAME_COMPLETE, NULL);
5073 		else
5074 			name_known = true;
5075 
5076 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5077 
5078 		eir_len = eir_get_length(info->data, sizeof(info->data));
5079 
5080 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5081 				  info->dev_class, info->rssi,
5082 				  flags, info->data, eir_len, NULL, 0, 0);
5083 	}
5084 
5085 	hci_dev_unlock(hdev);
5086 }
5087 
5088 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5089 					 struct sk_buff *skb)
5090 {
5091 	struct hci_ev_key_refresh_complete *ev = data;
5092 	struct hci_conn *conn;
5093 
5094 	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5095 		   __le16_to_cpu(ev->handle));
5096 
5097 	hci_dev_lock(hdev);
5098 
5099 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5100 	if (!conn)
5101 		goto unlock;
5102 
5103 	/* For BR/EDR the necessary steps are taken through the
5104 	 * auth_complete event.
5105 	 */
5106 	if (conn->type != LE_LINK)
5107 		goto unlock;
5108 
5109 	if (!ev->status)
5110 		conn->sec_level = conn->pending_sec_level;
5111 
5112 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5113 
5114 	if (ev->status && conn->state == BT_CONNECTED) {
5115 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5116 		hci_conn_drop(conn);
5117 		goto unlock;
5118 	}
5119 
5120 	if (conn->state == BT_CONFIG) {
5121 		if (!ev->status)
5122 			conn->state = BT_CONNECTED;
5123 
5124 		hci_connect_cfm(conn, ev->status);
5125 		hci_conn_drop(conn);
5126 	} else {
5127 		hci_auth_cfm(conn, ev->status);
5128 
5129 		hci_conn_hold(conn);
5130 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5131 		hci_conn_drop(conn);
5132 	}
5133 
5134 unlock:
5135 	hci_dev_unlock(hdev);
5136 }
5137 
5138 static u8 hci_get_auth_req(struct hci_conn *conn)
5139 {
5140 	/* If remote requests no-bonding follow that lead */
5141 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5142 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5143 		return conn->remote_auth | (conn->auth_type & 0x01);
5144 
5145 	/* If both remote and local have enough IO capabilities, require
5146 	 * MITM protection
5147 	 */
5148 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5149 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5150 		return conn->remote_auth | 0x01;
5151 
5152 	/* No MITM protection possible so ignore remote requirement */
5153 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5154 }
5155 
5156 static u8 bredr_oob_data_present(struct hci_conn *conn)
5157 {
5158 	struct hci_dev *hdev = conn->hdev;
5159 	struct oob_data *data;
5160 
5161 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5162 	if (!data)
5163 		return 0x00;
5164 
5165 	if (bredr_sc_enabled(hdev)) {
5166 		/* When Secure Connections is enabled, then just
5167 		 * return the present value stored with the OOB
5168 		 * data. The stored value contains the right present
5169 		 * information. However it can only be trusted when
5170 		 * not in Secure Connection Only mode.
5171 		 */
5172 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5173 			return data->present;
5174 
5175 		/* When Secure Connections Only mode is enabled, then
5176 		 * the P-256 values are required. If they are not
5177 		 * available, then do not declare that OOB data is
5178 		 * present.
5179 		 */
5180 		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5181 		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5182 			return 0x00;
5183 
5184 		return 0x02;
5185 	}
5186 
5187 	/* When Secure Connections is not enabled or actually
5188 	 * not supported by the hardware, then check that if
5189 	 * P-192 data values are present.
5190 	 */
5191 	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5192 	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5193 		return 0x00;
5194 
5195 	return 0x01;
5196 }
5197 
5198 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5199 				    struct sk_buff *skb)
5200 {
5201 	struct hci_ev_io_capa_request *ev = data;
5202 	struct hci_conn *conn;
5203 
5204 	bt_dev_dbg(hdev, "");
5205 
5206 	hci_dev_lock(hdev);
5207 
5208 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5209 	if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5210 		goto unlock;
5211 
5212 	/* Assume remote supports SSP since it has triggered this event */
5213 	set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5214 
5215 	hci_conn_hold(conn);
5216 
5217 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5218 		goto unlock;
5219 
5220 	/* Allow pairing if we're pairable, the initiators of the
5221 	 * pairing or if the remote is not requesting bonding.
5222 	 */
5223 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5224 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5225 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5226 		struct hci_cp_io_capability_reply cp;
5227 
5228 		bacpy(&cp.bdaddr, &ev->bdaddr);
5229 		/* Change the IO capability from KeyboardDisplay
5230 		 * to DisplayYesNo as it is not supported by BT spec. */
5231 		cp.capability = (conn->io_capability == 0x04) ?
5232 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5233 
5234 		/* If we are initiators, there is no remote information yet */
5235 		if (conn->remote_auth == 0xff) {
5236 			/* Request MITM protection if our IO caps allow it
5237 			 * except for the no-bonding case.
5238 			 */
5239 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5240 			    conn->auth_type != HCI_AT_NO_BONDING)
5241 				conn->auth_type |= 0x01;
5242 		} else {
5243 			conn->auth_type = hci_get_auth_req(conn);
5244 		}
5245 
5246 		/* If we're not bondable, force one of the non-bondable
5247 		 * authentication requirement values.
5248 		 */
5249 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5250 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5251 
5252 		cp.authentication = conn->auth_type;
5253 		cp.oob_data = bredr_oob_data_present(conn);
5254 
5255 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5256 			     sizeof(cp), &cp);
5257 	} else {
5258 		struct hci_cp_io_capability_neg_reply cp;
5259 
5260 		bacpy(&cp.bdaddr, &ev->bdaddr);
5261 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5262 
5263 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5264 			     sizeof(cp), &cp);
5265 	}
5266 
5267 unlock:
5268 	hci_dev_unlock(hdev);
5269 }
5270 
5271 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5272 				  struct sk_buff *skb)
5273 {
5274 	struct hci_ev_io_capa_reply *ev = data;
5275 	struct hci_conn *conn;
5276 
5277 	bt_dev_dbg(hdev, "");
5278 
5279 	hci_dev_lock(hdev);
5280 
5281 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5282 	if (!conn)
5283 		goto unlock;
5284 
5285 	conn->remote_cap = ev->capability;
5286 	conn->remote_auth = ev->authentication;
5287 
5288 unlock:
5289 	hci_dev_unlock(hdev);
5290 }
5291 
5292 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5293 					 struct sk_buff *skb)
5294 {
5295 	struct hci_ev_user_confirm_req *ev = data;
5296 	int loc_mitm, rem_mitm, confirm_hint = 0;
5297 	struct hci_conn *conn;
5298 
5299 	bt_dev_dbg(hdev, "");
5300 
5301 	hci_dev_lock(hdev);
5302 
5303 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5304 		goto unlock;
5305 
5306 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5307 	if (!conn)
5308 		goto unlock;
5309 
5310 	loc_mitm = (conn->auth_type & 0x01);
5311 	rem_mitm = (conn->remote_auth & 0x01);
5312 
5313 	/* If we require MITM but the remote device can't provide that
5314 	 * (it has NoInputNoOutput) then reject the confirmation
5315 	 * request. We check the security level here since it doesn't
5316 	 * necessarily match conn->auth_type.
5317 	 */
5318 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5319 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5320 		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5321 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5322 			     sizeof(ev->bdaddr), &ev->bdaddr);
5323 		goto unlock;
5324 	}
5325 
5326 	/* If no side requires MITM protection; auto-accept */
5327 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5328 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5329 
5330 		/* If we're not the initiators request authorization to
5331 		 * proceed from user space (mgmt_user_confirm with
5332 		 * confirm_hint set to 1). The exception is if neither
5333 		 * side had MITM or if the local IO capability is
5334 		 * NoInputNoOutput, in which case we do auto-accept
5335 		 */
5336 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5337 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5338 		    (loc_mitm || rem_mitm)) {
5339 			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5340 			confirm_hint = 1;
5341 			goto confirm;
5342 		}
5343 
5344 		/* If there already exists link key in local host, leave the
5345 		 * decision to user space since the remote device could be
5346 		 * legitimate or malicious.
5347 		 */
5348 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5349 			bt_dev_dbg(hdev, "Local host already has link key");
5350 			confirm_hint = 1;
5351 			goto confirm;
5352 		}
5353 
5354 		BT_DBG("Auto-accept of user confirmation with %ums delay",
5355 		       hdev->auto_accept_delay);
5356 
5357 		if (hdev->auto_accept_delay > 0) {
5358 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5359 			queue_delayed_work(conn->hdev->workqueue,
5360 					   &conn->auto_accept_work, delay);
5361 			goto unlock;
5362 		}
5363 
5364 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5365 			     sizeof(ev->bdaddr), &ev->bdaddr);
5366 		goto unlock;
5367 	}
5368 
5369 confirm:
5370 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5371 				  le32_to_cpu(ev->passkey), confirm_hint);
5372 
5373 unlock:
5374 	hci_dev_unlock(hdev);
5375 }
5376 
5377 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5378 					 struct sk_buff *skb)
5379 {
5380 	struct hci_ev_user_passkey_req *ev = data;
5381 
5382 	bt_dev_dbg(hdev, "");
5383 
5384 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5385 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5386 }
5387 
5388 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5389 					struct sk_buff *skb)
5390 {
5391 	struct hci_ev_user_passkey_notify *ev = data;
5392 	struct hci_conn *conn;
5393 
5394 	bt_dev_dbg(hdev, "");
5395 
5396 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5397 	if (!conn)
5398 		return;
5399 
5400 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5401 	conn->passkey_entered = 0;
5402 
5403 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5404 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5405 					 conn->dst_type, conn->passkey_notify,
5406 					 conn->passkey_entered);
5407 }
5408 
5409 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5410 				    struct sk_buff *skb)
5411 {
5412 	struct hci_ev_keypress_notify *ev = data;
5413 	struct hci_conn *conn;
5414 
5415 	bt_dev_dbg(hdev, "");
5416 
5417 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5418 	if (!conn)
5419 		return;
5420 
5421 	switch (ev->type) {
5422 	case HCI_KEYPRESS_STARTED:
5423 		conn->passkey_entered = 0;
5424 		return;
5425 
5426 	case HCI_KEYPRESS_ENTERED:
5427 		conn->passkey_entered++;
5428 		break;
5429 
5430 	case HCI_KEYPRESS_ERASED:
5431 		conn->passkey_entered--;
5432 		break;
5433 
5434 	case HCI_KEYPRESS_CLEARED:
5435 		conn->passkey_entered = 0;
5436 		break;
5437 
5438 	case HCI_KEYPRESS_COMPLETED:
5439 		return;
5440 	}
5441 
5442 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5443 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5444 					 conn->dst_type, conn->passkey_notify,
5445 					 conn->passkey_entered);
5446 }
5447 
5448 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5449 					 struct sk_buff *skb)
5450 {
5451 	struct hci_ev_simple_pair_complete *ev = data;
5452 	struct hci_conn *conn;
5453 
5454 	bt_dev_dbg(hdev, "");
5455 
5456 	hci_dev_lock(hdev);
5457 
5458 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5459 	if (!conn || !hci_conn_ssp_enabled(conn))
5460 		goto unlock;
5461 
5462 	/* Reset the authentication requirement to unknown */
5463 	conn->remote_auth = 0xff;
5464 
5465 	/* To avoid duplicate auth_failed events to user space we check
5466 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5467 	 * initiated the authentication. A traditional auth_complete
5468 	 * event gets always produced as initiator and is also mapped to
5469 	 * the mgmt_auth_failed event */
5470 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5471 		mgmt_auth_failed(conn, ev->status);
5472 
5473 	hci_conn_drop(conn);
5474 
5475 unlock:
5476 	hci_dev_unlock(hdev);
5477 }
5478 
5479 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5480 					 struct sk_buff *skb)
5481 {
5482 	struct hci_ev_remote_host_features *ev = data;
5483 	struct inquiry_entry *ie;
5484 	struct hci_conn *conn;
5485 
5486 	bt_dev_dbg(hdev, "");
5487 
5488 	hci_dev_lock(hdev);
5489 
5490 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5491 	if (conn)
5492 		memcpy(conn->features[1], ev->features, 8);
5493 
5494 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5495 	if (ie)
5496 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5497 
5498 	hci_dev_unlock(hdev);
5499 }
5500 
5501 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5502 					    struct sk_buff *skb)
5503 {
5504 	struct hci_ev_remote_oob_data_request *ev = edata;
5505 	struct oob_data *data;
5506 
5507 	bt_dev_dbg(hdev, "");
5508 
5509 	hci_dev_lock(hdev);
5510 
5511 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5512 		goto unlock;
5513 
5514 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5515 	if (!data) {
5516 		struct hci_cp_remote_oob_data_neg_reply cp;
5517 
5518 		bacpy(&cp.bdaddr, &ev->bdaddr);
5519 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5520 			     sizeof(cp), &cp);
5521 		goto unlock;
5522 	}
5523 
5524 	if (bredr_sc_enabled(hdev)) {
5525 		struct hci_cp_remote_oob_ext_data_reply cp;
5526 
5527 		bacpy(&cp.bdaddr, &ev->bdaddr);
5528 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5529 			memset(cp.hash192, 0, sizeof(cp.hash192));
5530 			memset(cp.rand192, 0, sizeof(cp.rand192));
5531 		} else {
5532 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5533 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5534 		}
5535 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5536 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5537 
5538 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5539 			     sizeof(cp), &cp);
5540 	} else {
5541 		struct hci_cp_remote_oob_data_reply cp;
5542 
5543 		bacpy(&cp.bdaddr, &ev->bdaddr);
5544 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5545 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5546 
5547 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5548 			     sizeof(cp), &cp);
5549 	}
5550 
5551 unlock:
5552 	hci_dev_unlock(hdev);
5553 }
5554 
5555 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5556 				u8 bdaddr_type, bdaddr_t *local_rpa)
5557 {
5558 	if (conn->out) {
5559 		conn->dst_type = bdaddr_type;
5560 		conn->resp_addr_type = bdaddr_type;
5561 		bacpy(&conn->resp_addr, bdaddr);
5562 
5563 		/* Check if the controller has set a Local RPA then it must be
5564 		 * used instead or hdev->rpa.
5565 		 */
5566 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5567 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5568 			bacpy(&conn->init_addr, local_rpa);
5569 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5570 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5571 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5572 		} else {
5573 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5574 						  &conn->init_addr_type);
5575 		}
5576 	} else {
5577 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5578 		/* Check if the controller has set a Local RPA then it must be
5579 		 * used instead or hdev->rpa.
5580 		 */
5581 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5582 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5583 			bacpy(&conn->resp_addr, local_rpa);
5584 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5585 			/* In case of ext adv, resp_addr will be updated in
5586 			 * Adv Terminated event.
5587 			 */
5588 			if (!ext_adv_capable(conn->hdev))
5589 				bacpy(&conn->resp_addr,
5590 				      &conn->hdev->random_addr);
5591 		} else {
5592 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5593 		}
5594 
5595 		conn->init_addr_type = bdaddr_type;
5596 		bacpy(&conn->init_addr, bdaddr);
5597 
5598 		/* For incoming connections, set the default minimum
5599 		 * and maximum connection interval. They will be used
5600 		 * to check if the parameters are in range and if not
5601 		 * trigger the connection update procedure.
5602 		 */
5603 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5604 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5605 	}
5606 }
5607 
5608 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5609 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5610 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5611 				 u16 interval, u16 latency,
5612 				 u16 supervision_timeout)
5613 {
5614 	struct hci_conn_params *params;
5615 	struct hci_conn *conn;
5616 	struct smp_irk *irk;
5617 	u8 addr_type;
5618 
5619 	hci_dev_lock(hdev);
5620 
5621 	/* All controllers implicitly stop advertising in the event of a
5622 	 * connection, so ensure that the state bit is cleared.
5623 	 */
5624 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5625 
5626 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5627 	if (!conn) {
5628 		/* In case of error status and there is no connection pending
5629 		 * just unlock as there is nothing to cleanup.
5630 		 */
5631 		if (status)
5632 			goto unlock;
5633 
5634 		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5635 		if (IS_ERR(conn)) {
5636 			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5637 			goto unlock;
5638 		}
5639 
5640 		conn->dst_type = bdaddr_type;
5641 
5642 		/* If we didn't have a hci_conn object previously
5643 		 * but we're in central role this must be something
5644 		 * initiated using an accept list. Since accept list based
5645 		 * connections are not "first class citizens" we don't
5646 		 * have full tracking of them. Therefore, we go ahead
5647 		 * with a "best effort" approach of determining the
5648 		 * initiator address based on the HCI_PRIVACY flag.
5649 		 */
5650 		if (conn->out) {
5651 			conn->resp_addr_type = bdaddr_type;
5652 			bacpy(&conn->resp_addr, bdaddr);
5653 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5654 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5655 				bacpy(&conn->init_addr, &hdev->rpa);
5656 			} else {
5657 				hci_copy_identity_address(hdev,
5658 							  &conn->init_addr,
5659 							  &conn->init_addr_type);
5660 			}
5661 		}
5662 	} else {
5663 		cancel_delayed_work(&conn->le_conn_timeout);
5664 	}
5665 
5666 	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5667 	 * Processing it more than once per connection can corrupt kernel memory.
5668 	 *
5669 	 * As the connection handle is set here for the first time, it indicates
5670 	 * whether the connection is already set up.
5671 	 */
5672 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5673 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5674 		goto unlock;
5675 	}
5676 
5677 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5678 
5679 	/* Lookup the identity address from the stored connection
5680 	 * address and address type.
5681 	 *
5682 	 * When establishing connections to an identity address, the
5683 	 * connection procedure will store the resolvable random
5684 	 * address first. Now if it can be converted back into the
5685 	 * identity address, start using the identity address from
5686 	 * now on.
5687 	 */
5688 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5689 	if (irk) {
5690 		bacpy(&conn->dst, &irk->bdaddr);
5691 		conn->dst_type = irk->addr_type;
5692 	}
5693 
5694 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5695 
5696 	/* All connection failure handling is taken care of by the
5697 	 * hci_conn_failed function which is triggered by the HCI
5698 	 * request completion callbacks used for connecting.
5699 	 */
5700 	if (status || hci_conn_set_handle(conn, handle))
5701 		goto unlock;
5702 
5703 	/* Drop the connection if it has been aborted */
5704 	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5705 		hci_conn_drop(conn);
5706 		goto unlock;
5707 	}
5708 
5709 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5710 		addr_type = BDADDR_LE_PUBLIC;
5711 	else
5712 		addr_type = BDADDR_LE_RANDOM;
5713 
5714 	/* Drop the connection if the device is blocked */
5715 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5716 		hci_conn_drop(conn);
5717 		goto unlock;
5718 	}
5719 
5720 	mgmt_device_connected(hdev, conn, NULL, 0);
5721 
5722 	conn->sec_level = BT_SECURITY_LOW;
5723 	conn->state = BT_CONFIG;
5724 
5725 	/* Store current advertising instance as connection advertising instance
5726 	 * when sotfware rotation is in use so it can be re-enabled when
5727 	 * disconnected.
5728 	 */
5729 	if (!ext_adv_capable(hdev))
5730 		conn->adv_instance = hdev->cur_adv_instance;
5731 
5732 	conn->le_conn_interval = interval;
5733 	conn->le_conn_latency = latency;
5734 	conn->le_supv_timeout = supervision_timeout;
5735 
5736 	hci_debugfs_create_conn(conn);
5737 	hci_conn_add_sysfs(conn);
5738 
5739 	/* The remote features procedure is defined for central
5740 	 * role only. So only in case of an initiated connection
5741 	 * request the remote features.
5742 	 *
5743 	 * If the local controller supports peripheral-initiated features
5744 	 * exchange, then requesting the remote features in peripheral
5745 	 * role is possible. Otherwise just transition into the
5746 	 * connected state without requesting the remote features.
5747 	 */
5748 	if (conn->out ||
5749 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5750 		struct hci_cp_le_read_remote_features cp;
5751 
5752 		cp.handle = __cpu_to_le16(conn->handle);
5753 
5754 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5755 			     sizeof(cp), &cp);
5756 
5757 		hci_conn_hold(conn);
5758 	} else {
5759 		conn->state = BT_CONNECTED;
5760 		hci_connect_cfm(conn, status);
5761 	}
5762 
5763 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5764 					   conn->dst_type);
5765 	if (params) {
5766 		hci_pend_le_list_del_init(params);
5767 		if (params->conn) {
5768 			hci_conn_drop(params->conn);
5769 			hci_conn_put(params->conn);
5770 			params->conn = NULL;
5771 		}
5772 	}
5773 
5774 unlock:
5775 	hci_update_passive_scan(hdev);
5776 	hci_dev_unlock(hdev);
5777 }
5778 
5779 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5780 				     struct sk_buff *skb)
5781 {
5782 	struct hci_ev_le_conn_complete *ev = data;
5783 
5784 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5785 
5786 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5787 			     NULL, ev->role, le16_to_cpu(ev->handle),
5788 			     le16_to_cpu(ev->interval),
5789 			     le16_to_cpu(ev->latency),
5790 			     le16_to_cpu(ev->supervision_timeout));
5791 }
5792 
5793 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5794 					 struct sk_buff *skb)
5795 {
5796 	struct hci_ev_le_enh_conn_complete *ev = data;
5797 
5798 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5799 
5800 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5801 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5802 			     le16_to_cpu(ev->interval),
5803 			     le16_to_cpu(ev->latency),
5804 			     le16_to_cpu(ev->supervision_timeout));
5805 }
5806 
5807 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5808 				    struct sk_buff *skb)
5809 {
5810 	struct hci_evt_le_ext_adv_set_term *ev = data;
5811 	struct hci_conn *conn;
5812 	struct adv_info *adv, *n;
5813 
5814 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5815 
5816 	/* The Bluetooth Core 5.3 specification clearly states that this event
5817 	 * shall not be sent when the Host disables the advertising set. So in
5818 	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5819 	 *
5820 	 * When the Host disables an advertising set, all cleanup is done via
5821 	 * its command callback and not needed to be duplicated here.
5822 	 */
5823 	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5824 		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5825 		return;
5826 	}
5827 
5828 	hci_dev_lock(hdev);
5829 
5830 	adv = hci_find_adv_instance(hdev, ev->handle);
5831 
5832 	if (ev->status) {
5833 		if (!adv)
5834 			goto unlock;
5835 
5836 		/* Remove advertising as it has been terminated */
5837 		hci_remove_adv_instance(hdev, ev->handle);
5838 		mgmt_advertising_removed(NULL, hdev, ev->handle);
5839 
5840 		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5841 			if (adv->enabled)
5842 				goto unlock;
5843 		}
5844 
5845 		/* We are no longer advertising, clear HCI_LE_ADV */
5846 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
5847 		goto unlock;
5848 	}
5849 
5850 	if (adv)
5851 		adv->enabled = false;
5852 
5853 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5854 	if (conn) {
5855 		/* Store handle in the connection so the correct advertising
5856 		 * instance can be re-enabled when disconnected.
5857 		 */
5858 		conn->adv_instance = ev->handle;
5859 
5860 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5861 		    bacmp(&conn->resp_addr, BDADDR_ANY))
5862 			goto unlock;
5863 
5864 		if (!ev->handle) {
5865 			bacpy(&conn->resp_addr, &hdev->random_addr);
5866 			goto unlock;
5867 		}
5868 
5869 		if (adv)
5870 			bacpy(&conn->resp_addr, &adv->random_addr);
5871 	}
5872 
5873 unlock:
5874 	hci_dev_unlock(hdev);
5875 }
5876 
5877 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5878 					    struct sk_buff *skb)
5879 {
5880 	struct hci_ev_le_conn_update_complete *ev = data;
5881 	struct hci_conn *conn;
5882 
5883 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5884 
5885 	if (ev->status)
5886 		return;
5887 
5888 	hci_dev_lock(hdev);
5889 
5890 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5891 	if (conn) {
5892 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5893 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5894 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5895 	}
5896 
5897 	hci_dev_unlock(hdev);
5898 }
5899 
5900 /* This function requires the caller holds hdev->lock */
5901 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5902 					      bdaddr_t *addr,
5903 					      u8 addr_type, bool addr_resolved,
5904 					      u8 adv_type, u8 phy, u8 sec_phy)
5905 {
5906 	struct hci_conn *conn;
5907 	struct hci_conn_params *params;
5908 
5909 	/* If the event is not connectable don't proceed further */
5910 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5911 		return NULL;
5912 
5913 	/* Ignore if the device is blocked or hdev is suspended */
5914 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
5915 	    hdev->suspended)
5916 		return NULL;
5917 
5918 	/* Most controller will fail if we try to create new connections
5919 	 * while we have an existing one in peripheral role.
5920 	 */
5921 	if (hdev->conn_hash.le_num_peripheral > 0 &&
5922 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5923 	     !(hdev->le_states[3] & 0x10)))
5924 		return NULL;
5925 
5926 	/* If we're not connectable only connect devices that we have in
5927 	 * our pend_le_conns list.
5928 	 */
5929 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5930 					   addr_type);
5931 	if (!params)
5932 		return NULL;
5933 
5934 	if (!params->explicit_connect) {
5935 		switch (params->auto_connect) {
5936 		case HCI_AUTO_CONN_DIRECT:
5937 			/* Only devices advertising with ADV_DIRECT_IND are
5938 			 * triggering a connection attempt. This is allowing
5939 			 * incoming connections from peripheral devices.
5940 			 */
5941 			if (adv_type != LE_ADV_DIRECT_IND)
5942 				return NULL;
5943 			break;
5944 		case HCI_AUTO_CONN_ALWAYS:
5945 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5946 			 * are triggering a connection attempt. This means
5947 			 * that incoming connections from peripheral device are
5948 			 * accepted and also outgoing connections to peripheral
5949 			 * devices are established when found.
5950 			 */
5951 			break;
5952 		default:
5953 			return NULL;
5954 		}
5955 	}
5956 
5957 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5958 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5959 			      HCI_ROLE_MASTER, phy, sec_phy);
5960 	if (!IS_ERR(conn)) {
5961 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5962 		 * by higher layer that tried to connect, if no then
5963 		 * store the pointer since we don't really have any
5964 		 * other owner of the object besides the params that
5965 		 * triggered it. This way we can abort the connection if
5966 		 * the parameters get removed and keep the reference
5967 		 * count consistent once the connection is established.
5968 		 */
5969 
5970 		if (!params->explicit_connect)
5971 			params->conn = hci_conn_get(conn);
5972 
5973 		return conn;
5974 	}
5975 
5976 	switch (PTR_ERR(conn)) {
5977 	case -EBUSY:
5978 		/* If hci_connect() returns -EBUSY it means there is already
5979 		 * an LE connection attempt going on. Since controllers don't
5980 		 * support more than one connection attempt at the time, we
5981 		 * don't consider this an error case.
5982 		 */
5983 		break;
5984 	default:
5985 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5986 		return NULL;
5987 	}
5988 
5989 	return NULL;
5990 }
5991 
5992 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5993 			       u8 bdaddr_type, bdaddr_t *direct_addr,
5994 			       u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
5995 			       u8 *data, u8 len, bool ext_adv, bool ctl_time,
5996 			       u64 instant)
5997 {
5998 	struct discovery_state *d = &hdev->discovery;
5999 	struct smp_irk *irk;
6000 	struct hci_conn *conn;
6001 	bool match, bdaddr_resolved;
6002 	u32 flags;
6003 	u8 *ptr;
6004 
6005 	switch (type) {
6006 	case LE_ADV_IND:
6007 	case LE_ADV_DIRECT_IND:
6008 	case LE_ADV_SCAN_IND:
6009 	case LE_ADV_NONCONN_IND:
6010 	case LE_ADV_SCAN_RSP:
6011 		break;
6012 	default:
6013 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6014 				       "type: 0x%02x", type);
6015 		return;
6016 	}
6017 
6018 	if (len > max_adv_len(hdev)) {
6019 		bt_dev_err_ratelimited(hdev,
6020 				       "adv larger than maximum supported");
6021 		return;
6022 	}
6023 
6024 	/* Find the end of the data in case the report contains padded zero
6025 	 * bytes at the end causing an invalid length value.
6026 	 *
6027 	 * When data is NULL, len is 0 so there is no need for extra ptr
6028 	 * check as 'ptr < data + 0' is already false in such case.
6029 	 */
6030 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6031 		if (ptr + 1 + *ptr > data + len)
6032 			break;
6033 	}
6034 
6035 	/* Adjust for actual length. This handles the case when remote
6036 	 * device is advertising with incorrect data length.
6037 	 */
6038 	len = ptr - data;
6039 
6040 	/* If the direct address is present, then this report is from
6041 	 * a LE Direct Advertising Report event. In that case it is
6042 	 * important to see if the address is matching the local
6043 	 * controller address.
6044 	 */
6045 	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6046 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6047 						  &bdaddr_resolved);
6048 
6049 		/* Only resolvable random addresses are valid for these
6050 		 * kind of reports and others can be ignored.
6051 		 */
6052 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6053 			return;
6054 
6055 		/* If the controller is not using resolvable random
6056 		 * addresses, then this report can be ignored.
6057 		 */
6058 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6059 			return;
6060 
6061 		/* If the local IRK of the controller does not match
6062 		 * with the resolvable random address provided, then
6063 		 * this report can be ignored.
6064 		 */
6065 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6066 			return;
6067 	}
6068 
6069 	/* Check if we need to convert to identity address */
6070 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6071 	if (irk) {
6072 		bdaddr = &irk->bdaddr;
6073 		bdaddr_type = irk->addr_type;
6074 	}
6075 
6076 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6077 
6078 	/* Check if we have been requested to connect to this device.
6079 	 *
6080 	 * direct_addr is set only for directed advertising reports (it is NULL
6081 	 * for advertising reports) and is already verified to be RPA above.
6082 	 */
6083 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6084 				     type, phy, sec_phy);
6085 	if (!ext_adv && conn && type == LE_ADV_IND &&
6086 	    len <= max_adv_len(hdev)) {
6087 		/* Store report for later inclusion by
6088 		 * mgmt_device_connected
6089 		 */
6090 		memcpy(conn->le_adv_data, data, len);
6091 		conn->le_adv_data_len = len;
6092 	}
6093 
6094 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6095 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6096 	else
6097 		flags = 0;
6098 
6099 	/* All scan results should be sent up for Mesh systems */
6100 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6101 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6102 				  rssi, flags, data, len, NULL, 0, instant);
6103 		return;
6104 	}
6105 
6106 	/* Passive scanning shouldn't trigger any device found events,
6107 	 * except for devices marked as CONN_REPORT for which we do send
6108 	 * device found events, or advertisement monitoring requested.
6109 	 */
6110 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6111 		if (type == LE_ADV_DIRECT_IND)
6112 			return;
6113 
6114 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6115 					       bdaddr, bdaddr_type) &&
6116 		    idr_is_empty(&hdev->adv_monitors_idr))
6117 			return;
6118 
6119 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6120 				  rssi, flags, data, len, NULL, 0, 0);
6121 		return;
6122 	}
6123 
6124 	/* When receiving a scan response, then there is no way to
6125 	 * know if the remote device is connectable or not. However
6126 	 * since scan responses are merged with a previously seen
6127 	 * advertising report, the flags field from that report
6128 	 * will be used.
6129 	 *
6130 	 * In the unlikely case that a controller just sends a scan
6131 	 * response event that doesn't match the pending report, then
6132 	 * it is marked as a standalone SCAN_RSP.
6133 	 */
6134 	if (type == LE_ADV_SCAN_RSP)
6135 		flags = MGMT_DEV_FOUND_SCAN_RSP;
6136 
6137 	/* If there's nothing pending either store the data from this
6138 	 * event or send an immediate device found event if the data
6139 	 * should not be stored for later.
6140 	 */
6141 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6142 		/* If the report will trigger a SCAN_REQ store it for
6143 		 * later merging.
6144 		 */
6145 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6146 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6147 						 rssi, flags, data, len);
6148 			return;
6149 		}
6150 
6151 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6152 				  rssi, flags, data, len, NULL, 0, 0);
6153 		return;
6154 	}
6155 
6156 	/* Check if the pending report is for the same device as the new one */
6157 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6158 		 bdaddr_type == d->last_adv_addr_type);
6159 
6160 	/* If the pending data doesn't match this report or this isn't a
6161 	 * scan response (e.g. we got a duplicate ADV_IND) then force
6162 	 * sending of the pending data.
6163 	 */
6164 	if (type != LE_ADV_SCAN_RSP || !match) {
6165 		/* Send out whatever is in the cache, but skip duplicates */
6166 		if (!match)
6167 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6168 					  d->last_adv_addr_type, NULL,
6169 					  d->last_adv_rssi, d->last_adv_flags,
6170 					  d->last_adv_data,
6171 					  d->last_adv_data_len, NULL, 0, 0);
6172 
6173 		/* If the new report will trigger a SCAN_REQ store it for
6174 		 * later merging.
6175 		 */
6176 		if (!ext_adv && (type == LE_ADV_IND ||
6177 				 type == LE_ADV_SCAN_IND)) {
6178 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6179 						 rssi, flags, data, len);
6180 			return;
6181 		}
6182 
6183 		/* The advertising reports cannot be merged, so clear
6184 		 * the pending report and send out a device found event.
6185 		 */
6186 		clear_pending_adv_report(hdev);
6187 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6188 				  rssi, flags, data, len, NULL, 0, 0);
6189 		return;
6190 	}
6191 
6192 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6193 	 * the new event is a SCAN_RSP. We can therefore proceed with
6194 	 * sending a merged device found event.
6195 	 */
6196 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6197 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6198 			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6199 	clear_pending_adv_report(hdev);
6200 }
6201 
6202 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6203 				  struct sk_buff *skb)
6204 {
6205 	struct hci_ev_le_advertising_report *ev = data;
6206 	u64 instant = jiffies;
6207 
6208 	if (!ev->num)
6209 		return;
6210 
6211 	hci_dev_lock(hdev);
6212 
6213 	while (ev->num--) {
6214 		struct hci_ev_le_advertising_info *info;
6215 		s8 rssi;
6216 
6217 		info = hci_le_ev_skb_pull(hdev, skb,
6218 					  HCI_EV_LE_ADVERTISING_REPORT,
6219 					  sizeof(*info));
6220 		if (!info)
6221 			break;
6222 
6223 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6224 					info->length + 1))
6225 			break;
6226 
6227 		if (info->length <= max_adv_len(hdev)) {
6228 			rssi = info->data[info->length];
6229 			process_adv_report(hdev, info->type, &info->bdaddr,
6230 					   info->bdaddr_type, NULL, 0,
6231 					   HCI_ADV_PHY_1M, 0, rssi,
6232 					   info->data, info->length, false,
6233 					   false, instant);
6234 		} else {
6235 			bt_dev_err(hdev, "Dropping invalid advertising data");
6236 		}
6237 	}
6238 
6239 	hci_dev_unlock(hdev);
6240 }
6241 
6242 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6243 {
6244 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6245 		switch (evt_type) {
6246 		case LE_LEGACY_ADV_IND:
6247 			return LE_ADV_IND;
6248 		case LE_LEGACY_ADV_DIRECT_IND:
6249 			return LE_ADV_DIRECT_IND;
6250 		case LE_LEGACY_ADV_SCAN_IND:
6251 			return LE_ADV_SCAN_IND;
6252 		case LE_LEGACY_NONCONN_IND:
6253 			return LE_ADV_NONCONN_IND;
6254 		case LE_LEGACY_SCAN_RSP_ADV:
6255 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6256 			return LE_ADV_SCAN_RSP;
6257 		}
6258 
6259 		goto invalid;
6260 	}
6261 
6262 	if (evt_type & LE_EXT_ADV_CONN_IND) {
6263 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6264 			return LE_ADV_DIRECT_IND;
6265 
6266 		return LE_ADV_IND;
6267 	}
6268 
6269 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6270 		return LE_ADV_SCAN_RSP;
6271 
6272 	if (evt_type & LE_EXT_ADV_SCAN_IND)
6273 		return LE_ADV_SCAN_IND;
6274 
6275 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6276 	    evt_type & LE_EXT_ADV_DIRECT_IND)
6277 		return LE_ADV_NONCONN_IND;
6278 
6279 invalid:
6280 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6281 			       evt_type);
6282 
6283 	return LE_ADV_INVALID;
6284 }
6285 
6286 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6287 				      struct sk_buff *skb)
6288 {
6289 	struct hci_ev_le_ext_adv_report *ev = data;
6290 	u64 instant = jiffies;
6291 
6292 	if (!ev->num)
6293 		return;
6294 
6295 	hci_dev_lock(hdev);
6296 
6297 	while (ev->num--) {
6298 		struct hci_ev_le_ext_adv_info *info;
6299 		u8 legacy_evt_type;
6300 		u16 evt_type;
6301 
6302 		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6303 					  sizeof(*info));
6304 		if (!info)
6305 			break;
6306 
6307 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6308 					info->length))
6309 			break;
6310 
6311 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6312 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6313 
6314 		if (test_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
6315 			     &hdev->quirks)) {
6316 			info->primary_phy &= 0x1f;
6317 			info->secondary_phy &= 0x1f;
6318 		}
6319 
6320 		if (legacy_evt_type != LE_ADV_INVALID) {
6321 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6322 					   info->bdaddr_type, NULL, 0,
6323 					   info->primary_phy,
6324 					   info->secondary_phy,
6325 					   info->rssi, info->data, info->length,
6326 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6327 					   false, instant);
6328 		}
6329 	}
6330 
6331 	hci_dev_unlock(hdev);
6332 }
6333 
6334 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6335 {
6336 	struct hci_cp_le_pa_term_sync cp;
6337 
6338 	memset(&cp, 0, sizeof(cp));
6339 	cp.handle = handle;
6340 
6341 	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6342 }
6343 
6344 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6345 					    struct sk_buff *skb)
6346 {
6347 	struct hci_ev_le_pa_sync_established *ev = data;
6348 	int mask = hdev->link_mode;
6349 	__u8 flags = 0;
6350 	struct hci_conn *pa_sync;
6351 
6352 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6353 
6354 	hci_dev_lock(hdev);
6355 
6356 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6357 
6358 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6359 	if (!(mask & HCI_LM_ACCEPT)) {
6360 		hci_le_pa_term_sync(hdev, ev->handle);
6361 		goto unlock;
6362 	}
6363 
6364 	if (!(flags & HCI_PROTO_DEFER))
6365 		goto unlock;
6366 
6367 	/* Add connection to indicate PA sync event */
6368 	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6369 				     HCI_ROLE_SLAVE);
6370 
6371 	if (IS_ERR(pa_sync))
6372 		goto unlock;
6373 
6374 	pa_sync->sync_handle = le16_to_cpu(ev->handle);
6375 
6376 	if (ev->status) {
6377 		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6378 
6379 		/* Notify iso layer */
6380 		hci_connect_cfm(pa_sync, ev->status);
6381 	}
6382 
6383 unlock:
6384 	hci_dev_unlock(hdev);
6385 }
6386 
6387 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6388 				      struct sk_buff *skb)
6389 {
6390 	struct hci_ev_le_per_adv_report *ev = data;
6391 	int mask = hdev->link_mode;
6392 	__u8 flags = 0;
6393 	struct hci_conn *pa_sync;
6394 
6395 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6396 
6397 	hci_dev_lock(hdev);
6398 
6399 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6400 	if (!(mask & HCI_LM_ACCEPT))
6401 		goto unlock;
6402 
6403 	if (!(flags & HCI_PROTO_DEFER))
6404 		goto unlock;
6405 
6406 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
6407 			(hdev,
6408 			le16_to_cpu(ev->sync_handle));
6409 
6410 	if (!pa_sync)
6411 		goto unlock;
6412 
6413 	if (ev->data_status == LE_PA_DATA_COMPLETE &&
6414 	    !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
6415 		/* Notify iso layer */
6416 		hci_connect_cfm(pa_sync, 0);
6417 
6418 		/* Notify MGMT layer */
6419 		mgmt_device_connected(hdev, pa_sync, NULL, 0);
6420 	}
6421 
6422 unlock:
6423 	hci_dev_unlock(hdev);
6424 }
6425 
6426 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6427 					    struct sk_buff *skb)
6428 {
6429 	struct hci_ev_le_remote_feat_complete *ev = data;
6430 	struct hci_conn *conn;
6431 
6432 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6433 
6434 	hci_dev_lock(hdev);
6435 
6436 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6437 	if (conn) {
6438 		if (!ev->status)
6439 			memcpy(conn->features[0], ev->features, 8);
6440 
6441 		if (conn->state == BT_CONFIG) {
6442 			__u8 status;
6443 
6444 			/* If the local controller supports peripheral-initiated
6445 			 * features exchange, but the remote controller does
6446 			 * not, then it is possible that the error code 0x1a
6447 			 * for unsupported remote feature gets returned.
6448 			 *
6449 			 * In this specific case, allow the connection to
6450 			 * transition into connected state and mark it as
6451 			 * successful.
6452 			 */
6453 			if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6454 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6455 				status = 0x00;
6456 			else
6457 				status = ev->status;
6458 
6459 			conn->state = BT_CONNECTED;
6460 			hci_connect_cfm(conn, status);
6461 			hci_conn_drop(conn);
6462 		}
6463 	}
6464 
6465 	hci_dev_unlock(hdev);
6466 }
6467 
6468 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6469 				   struct sk_buff *skb)
6470 {
6471 	struct hci_ev_le_ltk_req *ev = data;
6472 	struct hci_cp_le_ltk_reply cp;
6473 	struct hci_cp_le_ltk_neg_reply neg;
6474 	struct hci_conn *conn;
6475 	struct smp_ltk *ltk;
6476 
6477 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6478 
6479 	hci_dev_lock(hdev);
6480 
6481 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6482 	if (conn == NULL)
6483 		goto not_found;
6484 
6485 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6486 	if (!ltk)
6487 		goto not_found;
6488 
6489 	if (smp_ltk_is_sc(ltk)) {
6490 		/* With SC both EDiv and Rand are set to zero */
6491 		if (ev->ediv || ev->rand)
6492 			goto not_found;
6493 	} else {
6494 		/* For non-SC keys check that EDiv and Rand match */
6495 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6496 			goto not_found;
6497 	}
6498 
6499 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6500 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6501 	cp.handle = cpu_to_le16(conn->handle);
6502 
6503 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6504 
6505 	conn->enc_key_size = ltk->enc_size;
6506 
6507 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6508 
6509 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6510 	 * temporary key used to encrypt a connection following
6511 	 * pairing. It is used during the Encrypted Session Setup to
6512 	 * distribute the keys. Later, security can be re-established
6513 	 * using a distributed LTK.
6514 	 */
6515 	if (ltk->type == SMP_STK) {
6516 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6517 		list_del_rcu(&ltk->list);
6518 		kfree_rcu(ltk, rcu);
6519 	} else {
6520 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6521 	}
6522 
6523 	hci_dev_unlock(hdev);
6524 
6525 	return;
6526 
6527 not_found:
6528 	neg.handle = ev->handle;
6529 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6530 	hci_dev_unlock(hdev);
6531 }
6532 
6533 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6534 				      u8 reason)
6535 {
6536 	struct hci_cp_le_conn_param_req_neg_reply cp;
6537 
6538 	cp.handle = cpu_to_le16(handle);
6539 	cp.reason = reason;
6540 
6541 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6542 		     &cp);
6543 }
6544 
6545 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6546 					     struct sk_buff *skb)
6547 {
6548 	struct hci_ev_le_remote_conn_param_req *ev = data;
6549 	struct hci_cp_le_conn_param_req_reply cp;
6550 	struct hci_conn *hcon;
6551 	u16 handle, min, max, latency, timeout;
6552 
6553 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6554 
6555 	handle = le16_to_cpu(ev->handle);
6556 	min = le16_to_cpu(ev->interval_min);
6557 	max = le16_to_cpu(ev->interval_max);
6558 	latency = le16_to_cpu(ev->latency);
6559 	timeout = le16_to_cpu(ev->timeout);
6560 
6561 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6562 	if (!hcon || hcon->state != BT_CONNECTED)
6563 		return send_conn_param_neg_reply(hdev, handle,
6564 						 HCI_ERROR_UNKNOWN_CONN_ID);
6565 
6566 	if (max > hcon->le_conn_max_interval)
6567 		return send_conn_param_neg_reply(hdev, handle,
6568 						 HCI_ERROR_INVALID_LL_PARAMS);
6569 
6570 	if (hci_check_conn_params(min, max, latency, timeout))
6571 		return send_conn_param_neg_reply(hdev, handle,
6572 						 HCI_ERROR_INVALID_LL_PARAMS);
6573 
6574 	if (hcon->role == HCI_ROLE_MASTER) {
6575 		struct hci_conn_params *params;
6576 		u8 store_hint;
6577 
6578 		hci_dev_lock(hdev);
6579 
6580 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6581 						hcon->dst_type);
6582 		if (params) {
6583 			params->conn_min_interval = min;
6584 			params->conn_max_interval = max;
6585 			params->conn_latency = latency;
6586 			params->supervision_timeout = timeout;
6587 			store_hint = 0x01;
6588 		} else {
6589 			store_hint = 0x00;
6590 		}
6591 
6592 		hci_dev_unlock(hdev);
6593 
6594 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6595 				    store_hint, min, max, latency, timeout);
6596 	}
6597 
6598 	cp.handle = ev->handle;
6599 	cp.interval_min = ev->interval_min;
6600 	cp.interval_max = ev->interval_max;
6601 	cp.latency = ev->latency;
6602 	cp.timeout = ev->timeout;
6603 	cp.min_ce_len = 0;
6604 	cp.max_ce_len = 0;
6605 
6606 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6607 }
6608 
6609 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6610 					 struct sk_buff *skb)
6611 {
6612 	struct hci_ev_le_direct_adv_report *ev = data;
6613 	u64 instant = jiffies;
6614 	int i;
6615 
6616 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6617 				flex_array_size(ev, info, ev->num)))
6618 		return;
6619 
6620 	if (!ev->num)
6621 		return;
6622 
6623 	hci_dev_lock(hdev);
6624 
6625 	for (i = 0; i < ev->num; i++) {
6626 		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6627 
6628 		process_adv_report(hdev, info->type, &info->bdaddr,
6629 				   info->bdaddr_type, &info->direct_addr,
6630 				   info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6631 				   info->rssi, NULL, 0, false, false, instant);
6632 	}
6633 
6634 	hci_dev_unlock(hdev);
6635 }
6636 
6637 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6638 				  struct sk_buff *skb)
6639 {
6640 	struct hci_ev_le_phy_update_complete *ev = data;
6641 	struct hci_conn *conn;
6642 
6643 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6644 
6645 	if (ev->status)
6646 		return;
6647 
6648 	hci_dev_lock(hdev);
6649 
6650 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6651 	if (!conn)
6652 		goto unlock;
6653 
6654 	conn->le_tx_phy = ev->tx_phy;
6655 	conn->le_rx_phy = ev->rx_phy;
6656 
6657 unlock:
6658 	hci_dev_unlock(hdev);
6659 }
6660 
6661 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6662 					struct sk_buff *skb)
6663 {
6664 	struct hci_evt_le_cis_established *ev = data;
6665 	struct hci_conn *conn;
6666 	struct bt_iso_qos *qos;
6667 	bool pending = false;
6668 	u16 handle = __le16_to_cpu(ev->handle);
6669 	u32 c_sdu_interval, p_sdu_interval;
6670 
6671 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6672 
6673 	hci_dev_lock(hdev);
6674 
6675 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6676 	if (!conn) {
6677 		bt_dev_err(hdev,
6678 			   "Unable to find connection with handle 0x%4.4x",
6679 			   handle);
6680 		goto unlock;
6681 	}
6682 
6683 	if (conn->type != ISO_LINK) {
6684 		bt_dev_err(hdev,
6685 			   "Invalid connection link type handle 0x%4.4x",
6686 			   handle);
6687 		goto unlock;
6688 	}
6689 
6690 	qos = &conn->iso_qos;
6691 
6692 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6693 
6694 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
6695 	 * page 3075:
6696 	 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
6697 	 * ISO_Interval + SDU_Interval_C_To_P
6698 	 * ...
6699 	 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
6700 	 *					Transport_Latency
6701 	 */
6702 	c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6703 			 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
6704 			get_unaligned_le24(ev->c_latency);
6705 	p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6706 			 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
6707 			get_unaligned_le24(ev->p_latency);
6708 
6709 	switch (conn->role) {
6710 	case HCI_ROLE_SLAVE:
6711 		qos->ucast.in.interval = c_sdu_interval;
6712 		qos->ucast.out.interval = p_sdu_interval;
6713 		/* Convert Transport Latency (us) to Latency (msec) */
6714 		qos->ucast.in.latency =
6715 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6716 					  1000);
6717 		qos->ucast.out.latency =
6718 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6719 					  1000);
6720 		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6721 		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6722 		qos->ucast.in.phy = ev->c_phy;
6723 		qos->ucast.out.phy = ev->p_phy;
6724 		break;
6725 	case HCI_ROLE_MASTER:
6726 		qos->ucast.in.interval = p_sdu_interval;
6727 		qos->ucast.out.interval = c_sdu_interval;
6728 		/* Convert Transport Latency (us) to Latency (msec) */
6729 		qos->ucast.out.latency =
6730 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6731 					  1000);
6732 		qos->ucast.in.latency =
6733 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6734 					  1000);
6735 		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6736 		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6737 		qos->ucast.out.phy = ev->c_phy;
6738 		qos->ucast.in.phy = ev->p_phy;
6739 		break;
6740 	}
6741 
6742 	if (!ev->status) {
6743 		conn->state = BT_CONNECTED;
6744 		hci_debugfs_create_conn(conn);
6745 		hci_conn_add_sysfs(conn);
6746 		hci_iso_setup_path(conn);
6747 		goto unlock;
6748 	}
6749 
6750 	conn->state = BT_CLOSED;
6751 	hci_connect_cfm(conn, ev->status);
6752 	hci_conn_del(conn);
6753 
6754 unlock:
6755 	if (pending)
6756 		hci_le_create_cis_pending(hdev);
6757 
6758 	hci_dev_unlock(hdev);
6759 }
6760 
6761 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6762 {
6763 	struct hci_cp_le_reject_cis cp;
6764 
6765 	memset(&cp, 0, sizeof(cp));
6766 	cp.handle = handle;
6767 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6768 	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6769 }
6770 
6771 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6772 {
6773 	struct hci_cp_le_accept_cis cp;
6774 
6775 	memset(&cp, 0, sizeof(cp));
6776 	cp.handle = handle;
6777 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6778 }
6779 
6780 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6781 			       struct sk_buff *skb)
6782 {
6783 	struct hci_evt_le_cis_req *ev = data;
6784 	u16 acl_handle, cis_handle;
6785 	struct hci_conn *acl, *cis;
6786 	int mask;
6787 	__u8 flags = 0;
6788 
6789 	acl_handle = __le16_to_cpu(ev->acl_handle);
6790 	cis_handle = __le16_to_cpu(ev->cis_handle);
6791 
6792 	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6793 		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6794 
6795 	hci_dev_lock(hdev);
6796 
6797 	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6798 	if (!acl)
6799 		goto unlock;
6800 
6801 	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6802 	if (!(mask & HCI_LM_ACCEPT)) {
6803 		hci_le_reject_cis(hdev, ev->cis_handle);
6804 		goto unlock;
6805 	}
6806 
6807 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6808 	if (!cis) {
6809 		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
6810 				   cis_handle);
6811 		if (IS_ERR(cis)) {
6812 			hci_le_reject_cis(hdev, ev->cis_handle);
6813 			goto unlock;
6814 		}
6815 	}
6816 
6817 	cis->iso_qos.ucast.cig = ev->cig_id;
6818 	cis->iso_qos.ucast.cis = ev->cis_id;
6819 
6820 	if (!(flags & HCI_PROTO_DEFER)) {
6821 		hci_le_accept_cis(hdev, ev->cis_handle);
6822 	} else {
6823 		cis->state = BT_CONNECT2;
6824 		hci_connect_cfm(cis, 0);
6825 	}
6826 
6827 unlock:
6828 	hci_dev_unlock(hdev);
6829 }
6830 
6831 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6832 {
6833 	u8 handle = PTR_UINT(data);
6834 
6835 	return hci_le_terminate_big_sync(hdev, handle,
6836 					 HCI_ERROR_LOCAL_HOST_TERM);
6837 }
6838 
6839 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6840 					   struct sk_buff *skb)
6841 {
6842 	struct hci_evt_le_create_big_complete *ev = data;
6843 	struct hci_conn *conn;
6844 	__u8 i = 0;
6845 
6846 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6847 
6848 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6849 				flex_array_size(ev, bis_handle, ev->num_bis)))
6850 		return;
6851 
6852 	hci_dev_lock(hdev);
6853 	rcu_read_lock();
6854 
6855 	/* Connect all BISes that are bound to the BIG */
6856 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6857 		if (bacmp(&conn->dst, BDADDR_ANY) ||
6858 		    conn->type != ISO_LINK ||
6859 		    conn->iso_qos.bcast.big != ev->handle)
6860 			continue;
6861 
6862 		if (hci_conn_set_handle(conn,
6863 					__le16_to_cpu(ev->bis_handle[i++])))
6864 			continue;
6865 
6866 		if (!ev->status) {
6867 			conn->state = BT_CONNECTED;
6868 			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6869 			rcu_read_unlock();
6870 			hci_debugfs_create_conn(conn);
6871 			hci_conn_add_sysfs(conn);
6872 			hci_iso_setup_path(conn);
6873 			rcu_read_lock();
6874 			continue;
6875 		}
6876 
6877 		hci_connect_cfm(conn, ev->status);
6878 		rcu_read_unlock();
6879 		hci_conn_del(conn);
6880 		rcu_read_lock();
6881 	}
6882 
6883 	rcu_read_unlock();
6884 
6885 	if (!ev->status && !i)
6886 		/* If no BISes have been connected for the BIG,
6887 		 * terminate. This is in case all bound connections
6888 		 * have been closed before the BIG creation
6889 		 * has completed.
6890 		 */
6891 		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6892 				   UINT_PTR(ev->handle), NULL);
6893 
6894 	hci_dev_unlock(hdev);
6895 }
6896 
6897 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6898 					    struct sk_buff *skb)
6899 {
6900 	struct hci_evt_le_big_sync_estabilished *ev = data;
6901 	struct hci_conn *bis;
6902 	int i;
6903 
6904 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6905 
6906 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6907 				flex_array_size(ev, bis, ev->num_bis)))
6908 		return;
6909 
6910 	hci_dev_lock(hdev);
6911 
6912 	for (i = 0; i < ev->num_bis; i++) {
6913 		u16 handle = le16_to_cpu(ev->bis[i]);
6914 		__le32 interval;
6915 
6916 		bis = hci_conn_hash_lookup_handle(hdev, handle);
6917 		if (!bis) {
6918 			if (handle > HCI_CONN_HANDLE_MAX) {
6919 				bt_dev_dbg(hdev, "ignore too large handle %u", handle);
6920 				continue;
6921 			}
6922 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
6923 					   HCI_ROLE_SLAVE, handle);
6924 			if (IS_ERR(bis))
6925 				continue;
6926 		}
6927 
6928 		if (ev->status != 0x42)
6929 			/* Mark PA sync as established */
6930 			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
6931 
6932 		bis->iso_qos.bcast.big = ev->handle;
6933 		memset(&interval, 0, sizeof(interval));
6934 		memcpy(&interval, ev->latency, sizeof(ev->latency));
6935 		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
6936 		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
6937 		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
6938 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
6939 
6940 		if (!ev->status) {
6941 			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
6942 			hci_iso_setup_path(bis);
6943 		}
6944 	}
6945 
6946 	/* In case BIG sync failed, notify each failed connection to
6947 	 * the user after all hci connections have been added
6948 	 */
6949 	if (ev->status)
6950 		for (i = 0; i < ev->num_bis; i++) {
6951 			u16 handle = le16_to_cpu(ev->bis[i]);
6952 
6953 			bis = hci_conn_hash_lookup_handle(hdev, handle);
6954 			if (!bis)
6955 				continue;
6956 
6957 			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
6958 			hci_connect_cfm(bis, ev->status);
6959 		}
6960 
6961 	hci_dev_unlock(hdev);
6962 }
6963 
6964 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
6965 					   struct sk_buff *skb)
6966 {
6967 	struct hci_evt_le_big_info_adv_report *ev = data;
6968 	int mask = hdev->link_mode;
6969 	__u8 flags = 0;
6970 	struct hci_conn *pa_sync;
6971 
6972 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6973 
6974 	hci_dev_lock(hdev);
6975 
6976 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6977 	if (!(mask & HCI_LM_ACCEPT))
6978 		goto unlock;
6979 
6980 	if (!(flags & HCI_PROTO_DEFER))
6981 		goto unlock;
6982 
6983 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
6984 			(hdev,
6985 			le16_to_cpu(ev->sync_handle));
6986 
6987 	if (!pa_sync)
6988 		goto unlock;
6989 
6990 	pa_sync->iso_qos.bcast.encryption = ev->encryption;
6991 
6992 	/* Notify iso layer */
6993 	hci_connect_cfm(pa_sync, 0);
6994 
6995 unlock:
6996 	hci_dev_unlock(hdev);
6997 }
6998 
6999 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7000 [_op] = { \
7001 	.func = _func, \
7002 	.min_len = _min_len, \
7003 	.max_len = _max_len, \
7004 }
7005 
7006 #define HCI_LE_EV(_op, _func, _len) \
7007 	HCI_LE_EV_VL(_op, _func, _len, _len)
7008 
7009 #define HCI_LE_EV_STATUS(_op, _func) \
7010 	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7011 
7012 /* Entries in this table shall have their position according to the subevent
7013  * opcode they handle so the use of the macros above is recommend since it does
7014  * attempt to initialize at its proper index using Designated Initializers that
7015  * way events without a callback function can be ommited.
7016  */
7017 static const struct hci_le_ev {
7018 	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7019 	u16  min_len;
7020 	u16  max_len;
7021 } hci_le_ev_table[U8_MAX + 1] = {
7022 	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7023 	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7024 		  sizeof(struct hci_ev_le_conn_complete)),
7025 	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7026 	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7027 		     sizeof(struct hci_ev_le_advertising_report),
7028 		     HCI_MAX_EVENT_SIZE),
7029 	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7030 	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7031 		  hci_le_conn_update_complete_evt,
7032 		  sizeof(struct hci_ev_le_conn_update_complete)),
7033 	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7034 	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7035 		  hci_le_remote_feat_complete_evt,
7036 		  sizeof(struct hci_ev_le_remote_feat_complete)),
7037 	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7038 	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7039 		  sizeof(struct hci_ev_le_ltk_req)),
7040 	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7041 	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7042 		  hci_le_remote_conn_param_req_evt,
7043 		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7044 	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7045 	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7046 		  hci_le_enh_conn_complete_evt,
7047 		  sizeof(struct hci_ev_le_enh_conn_complete)),
7048 	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7049 	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7050 		     sizeof(struct hci_ev_le_direct_adv_report),
7051 		     HCI_MAX_EVENT_SIZE),
7052 	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7053 	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7054 		  sizeof(struct hci_ev_le_phy_update_complete)),
7055 	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7056 	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7057 		     sizeof(struct hci_ev_le_ext_adv_report),
7058 		     HCI_MAX_EVENT_SIZE),
7059 	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7060 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7061 		  hci_le_pa_sync_estabilished_evt,
7062 		  sizeof(struct hci_ev_le_pa_sync_established)),
7063 	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7064 	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7065 				 hci_le_per_adv_report_evt,
7066 				 sizeof(struct hci_ev_le_per_adv_report),
7067 				 HCI_MAX_EVENT_SIZE),
7068 	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7069 	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7070 		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7071 	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7072 	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7073 		  sizeof(struct hci_evt_le_cis_established)),
7074 	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7075 	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7076 		  sizeof(struct hci_evt_le_cis_req)),
7077 	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7078 	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7079 		     hci_le_create_big_complete_evt,
7080 		     sizeof(struct hci_evt_le_create_big_complete),
7081 		     HCI_MAX_EVENT_SIZE),
7082 	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7083 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7084 		     hci_le_big_sync_established_evt,
7085 		     sizeof(struct hci_evt_le_big_sync_estabilished),
7086 		     HCI_MAX_EVENT_SIZE),
7087 	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7088 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7089 		     hci_le_big_info_adv_report_evt,
7090 		     sizeof(struct hci_evt_le_big_info_adv_report),
7091 		     HCI_MAX_EVENT_SIZE),
7092 };
7093 
7094 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7095 			    struct sk_buff *skb, u16 *opcode, u8 *status,
7096 			    hci_req_complete_t *req_complete,
7097 			    hci_req_complete_skb_t *req_complete_skb)
7098 {
7099 	struct hci_ev_le_meta *ev = data;
7100 	const struct hci_le_ev *subev;
7101 
7102 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7103 
7104 	/* Only match event if command OGF is for LE */
7105 	if (hdev->req_skb &&
7106 	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7107 	    hci_skb_event(hdev->req_skb) == ev->subevent) {
7108 		*opcode = hci_skb_opcode(hdev->req_skb);
7109 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7110 				     req_complete_skb);
7111 	}
7112 
7113 	subev = &hci_le_ev_table[ev->subevent];
7114 	if (!subev->func)
7115 		return;
7116 
7117 	if (skb->len < subev->min_len) {
7118 		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7119 			   ev->subevent, skb->len, subev->min_len);
7120 		return;
7121 	}
7122 
7123 	/* Just warn if the length is over max_len size it still be
7124 	 * possible to partially parse the event so leave to callback to
7125 	 * decide if that is acceptable.
7126 	 */
7127 	if (skb->len > subev->max_len)
7128 		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7129 			    ev->subevent, skb->len, subev->max_len);
7130 	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7131 	if (!data)
7132 		return;
7133 
7134 	subev->func(hdev, data, skb);
7135 }
7136 
7137 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7138 				 u8 event, struct sk_buff *skb)
7139 {
7140 	struct hci_ev_cmd_complete *ev;
7141 	struct hci_event_hdr *hdr;
7142 
7143 	if (!skb)
7144 		return false;
7145 
7146 	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7147 	if (!hdr)
7148 		return false;
7149 
7150 	if (event) {
7151 		if (hdr->evt != event)
7152 			return false;
7153 		return true;
7154 	}
7155 
7156 	/* Check if request ended in Command Status - no way to retrieve
7157 	 * any extra parameters in this case.
7158 	 */
7159 	if (hdr->evt == HCI_EV_CMD_STATUS)
7160 		return false;
7161 
7162 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7163 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7164 			   hdr->evt);
7165 		return false;
7166 	}
7167 
7168 	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7169 	if (!ev)
7170 		return false;
7171 
7172 	if (opcode != __le16_to_cpu(ev->opcode)) {
7173 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7174 		       __le16_to_cpu(ev->opcode));
7175 		return false;
7176 	}
7177 
7178 	return true;
7179 }
7180 
7181 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7182 				  struct sk_buff *skb)
7183 {
7184 	struct hci_ev_le_advertising_info *adv;
7185 	struct hci_ev_le_direct_adv_info *direct_adv;
7186 	struct hci_ev_le_ext_adv_info *ext_adv;
7187 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7188 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7189 
7190 	hci_dev_lock(hdev);
7191 
7192 	/* If we are currently suspended and this is the first BT event seen,
7193 	 * save the wake reason associated with the event.
7194 	 */
7195 	if (!hdev->suspended || hdev->wake_reason)
7196 		goto unlock;
7197 
7198 	/* Default to remote wake. Values for wake_reason are documented in the
7199 	 * Bluez mgmt api docs.
7200 	 */
7201 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7202 
7203 	/* Once configured for remote wakeup, we should only wake up for
7204 	 * reconnections. It's useful to see which device is waking us up so
7205 	 * keep track of the bdaddr of the connection event that woke us up.
7206 	 */
7207 	if (event == HCI_EV_CONN_REQUEST) {
7208 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7209 		hdev->wake_addr_type = BDADDR_BREDR;
7210 	} else if (event == HCI_EV_CONN_COMPLETE) {
7211 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7212 		hdev->wake_addr_type = BDADDR_BREDR;
7213 	} else if (event == HCI_EV_LE_META) {
7214 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7215 		u8 subevent = le_ev->subevent;
7216 		u8 *ptr = &skb->data[sizeof(*le_ev)];
7217 		u8 num_reports = *ptr;
7218 
7219 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7220 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7221 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7222 		    num_reports) {
7223 			adv = (void *)(ptr + 1);
7224 			direct_adv = (void *)(ptr + 1);
7225 			ext_adv = (void *)(ptr + 1);
7226 
7227 			switch (subevent) {
7228 			case HCI_EV_LE_ADVERTISING_REPORT:
7229 				bacpy(&hdev->wake_addr, &adv->bdaddr);
7230 				hdev->wake_addr_type = adv->bdaddr_type;
7231 				break;
7232 			case HCI_EV_LE_DIRECT_ADV_REPORT:
7233 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7234 				hdev->wake_addr_type = direct_adv->bdaddr_type;
7235 				break;
7236 			case HCI_EV_LE_EXT_ADV_REPORT:
7237 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7238 				hdev->wake_addr_type = ext_adv->bdaddr_type;
7239 				break;
7240 			}
7241 		}
7242 	} else {
7243 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7244 	}
7245 
7246 unlock:
7247 	hci_dev_unlock(hdev);
7248 }
7249 
7250 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7251 [_op] = { \
7252 	.req = false, \
7253 	.func = _func, \
7254 	.min_len = _min_len, \
7255 	.max_len = _max_len, \
7256 }
7257 
7258 #define HCI_EV(_op, _func, _len) \
7259 	HCI_EV_VL(_op, _func, _len, _len)
7260 
7261 #define HCI_EV_STATUS(_op, _func) \
7262 	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7263 
7264 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7265 [_op] = { \
7266 	.req = true, \
7267 	.func_req = _func, \
7268 	.min_len = _min_len, \
7269 	.max_len = _max_len, \
7270 }
7271 
7272 #define HCI_EV_REQ(_op, _func, _len) \
7273 	HCI_EV_REQ_VL(_op, _func, _len, _len)
7274 
7275 /* Entries in this table shall have their position according to the event opcode
7276  * they handle so the use of the macros above is recommend since it does attempt
7277  * to initialize at its proper index using Designated Initializers that way
7278  * events without a callback function don't have entered.
7279  */
7280 static const struct hci_ev {
7281 	bool req;
7282 	union {
7283 		void (*func)(struct hci_dev *hdev, void *data,
7284 			     struct sk_buff *skb);
7285 		void (*func_req)(struct hci_dev *hdev, void *data,
7286 				 struct sk_buff *skb, u16 *opcode, u8 *status,
7287 				 hci_req_complete_t *req_complete,
7288 				 hci_req_complete_skb_t *req_complete_skb);
7289 	};
7290 	u16  min_len;
7291 	u16  max_len;
7292 } hci_ev_table[U8_MAX + 1] = {
7293 	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7294 	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7295 	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7296 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7297 		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7298 	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7299 	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7300 	       sizeof(struct hci_ev_conn_complete)),
7301 	/* [0x04 = HCI_EV_CONN_REQUEST] */
7302 	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7303 	       sizeof(struct hci_ev_conn_request)),
7304 	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7305 	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7306 	       sizeof(struct hci_ev_disconn_complete)),
7307 	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7308 	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7309 	       sizeof(struct hci_ev_auth_complete)),
7310 	/* [0x07 = HCI_EV_REMOTE_NAME] */
7311 	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7312 	       sizeof(struct hci_ev_remote_name)),
7313 	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7314 	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7315 	       sizeof(struct hci_ev_encrypt_change)),
7316 	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7317 	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7318 	       hci_change_link_key_complete_evt,
7319 	       sizeof(struct hci_ev_change_link_key_complete)),
7320 	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7321 	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7322 	       sizeof(struct hci_ev_remote_features)),
7323 	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7324 	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7325 		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7326 	/* [0x0f = HCI_EV_CMD_STATUS] */
7327 	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7328 		   sizeof(struct hci_ev_cmd_status)),
7329 	/* [0x10 = HCI_EV_CMD_STATUS] */
7330 	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7331 	       sizeof(struct hci_ev_hardware_error)),
7332 	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7333 	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7334 	       sizeof(struct hci_ev_role_change)),
7335 	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7336 	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7337 		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7338 	/* [0x14 = HCI_EV_MODE_CHANGE] */
7339 	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7340 	       sizeof(struct hci_ev_mode_change)),
7341 	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7342 	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7343 	       sizeof(struct hci_ev_pin_code_req)),
7344 	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7345 	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7346 	       sizeof(struct hci_ev_link_key_req)),
7347 	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7348 	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7349 	       sizeof(struct hci_ev_link_key_notify)),
7350 	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7351 	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7352 	       sizeof(struct hci_ev_clock_offset)),
7353 	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7354 	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7355 	       sizeof(struct hci_ev_pkt_type_change)),
7356 	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7357 	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7358 	       sizeof(struct hci_ev_pscan_rep_mode)),
7359 	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7360 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7361 		  hci_inquiry_result_with_rssi_evt,
7362 		  sizeof(struct hci_ev_inquiry_result_rssi),
7363 		  HCI_MAX_EVENT_SIZE),
7364 	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7365 	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7366 	       sizeof(struct hci_ev_remote_ext_features)),
7367 	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7368 	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7369 	       sizeof(struct hci_ev_sync_conn_complete)),
7370 	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7371 	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7372 		  hci_extended_inquiry_result_evt,
7373 		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7374 	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7375 	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7376 	       sizeof(struct hci_ev_key_refresh_complete)),
7377 	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7378 	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7379 	       sizeof(struct hci_ev_io_capa_request)),
7380 	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7381 	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7382 	       sizeof(struct hci_ev_io_capa_reply)),
7383 	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7384 	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7385 	       sizeof(struct hci_ev_user_confirm_req)),
7386 	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7387 	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7388 	       sizeof(struct hci_ev_user_passkey_req)),
7389 	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7390 	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7391 	       sizeof(struct hci_ev_remote_oob_data_request)),
7392 	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7393 	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7394 	       sizeof(struct hci_ev_simple_pair_complete)),
7395 	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7396 	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7397 	       sizeof(struct hci_ev_user_passkey_notify)),
7398 	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7399 	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7400 	       sizeof(struct hci_ev_keypress_notify)),
7401 	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7402 	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7403 	       sizeof(struct hci_ev_remote_host_features)),
7404 	/* [0x3e = HCI_EV_LE_META] */
7405 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7406 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7407 	/* [0xff = HCI_EV_VENDOR] */
7408 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7409 };
7410 
7411 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7412 			   u16 *opcode, u8 *status,
7413 			   hci_req_complete_t *req_complete,
7414 			   hci_req_complete_skb_t *req_complete_skb)
7415 {
7416 	const struct hci_ev *ev = &hci_ev_table[event];
7417 	void *data;
7418 
7419 	if (!ev->func)
7420 		return;
7421 
7422 	if (skb->len < ev->min_len) {
7423 		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7424 			   event, skb->len, ev->min_len);
7425 		return;
7426 	}
7427 
7428 	/* Just warn if the length is over max_len size it still be
7429 	 * possible to partially parse the event so leave to callback to
7430 	 * decide if that is acceptable.
7431 	 */
7432 	if (skb->len > ev->max_len)
7433 		bt_dev_warn_ratelimited(hdev,
7434 					"unexpected event 0x%2.2x length: %u > %u",
7435 					event, skb->len, ev->max_len);
7436 
7437 	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7438 	if (!data)
7439 		return;
7440 
7441 	if (ev->req)
7442 		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7443 			     req_complete_skb);
7444 	else
7445 		ev->func(hdev, data, skb);
7446 }
7447 
7448 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7449 {
7450 	struct hci_event_hdr *hdr = (void *) skb->data;
7451 	hci_req_complete_t req_complete = NULL;
7452 	hci_req_complete_skb_t req_complete_skb = NULL;
7453 	struct sk_buff *orig_skb = NULL;
7454 	u8 status = 0, event, req_evt = 0;
7455 	u16 opcode = HCI_OP_NOP;
7456 
7457 	if (skb->len < sizeof(*hdr)) {
7458 		bt_dev_err(hdev, "Malformed HCI Event");
7459 		goto done;
7460 	}
7461 
7462 	kfree_skb(hdev->recv_event);
7463 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7464 
7465 	event = hdr->evt;
7466 	if (!event) {
7467 		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7468 			    event);
7469 		goto done;
7470 	}
7471 
7472 	/* Only match event if command OGF is not for LE */
7473 	if (hdev->req_skb &&
7474 	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7475 	    hci_skb_event(hdev->req_skb) == event) {
7476 		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7477 				     status, &req_complete, &req_complete_skb);
7478 		req_evt = event;
7479 	}
7480 
7481 	/* If it looks like we might end up having to call
7482 	 * req_complete_skb, store a pristine copy of the skb since the
7483 	 * various handlers may modify the original one through
7484 	 * skb_pull() calls, etc.
7485 	 */
7486 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7487 	    event == HCI_EV_CMD_COMPLETE)
7488 		orig_skb = skb_clone(skb, GFP_KERNEL);
7489 
7490 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7491 
7492 	/* Store wake reason if we're suspended */
7493 	hci_store_wake_reason(hdev, event, skb);
7494 
7495 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7496 
7497 	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7498 		       &req_complete_skb);
7499 
7500 	if (req_complete) {
7501 		req_complete(hdev, status, opcode);
7502 	} else if (req_complete_skb) {
7503 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7504 			kfree_skb(orig_skb);
7505 			orig_skb = NULL;
7506 		}
7507 		req_complete_skb(hdev, status, opcode, orig_skb);
7508 	}
7509 
7510 done:
7511 	kfree_skb(orig_skb);
7512 	kfree_skb(skb);
7513 	hdev->stat.evt_rx++;
7514 }
7515