xref: /linux/net/bluetooth/hci_event.c (revision ae7487d112cf9f310d32f27d5956f53d9f143fea)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI event handling. */
27 
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 
34 #include "hci_request.h"
35 #include "hci_debugfs.h"
36 #include "hci_codec.h"
37 #include "a2mp.h"
38 #include "amp.h"
39 #include "smp.h"
40 #include "msft.h"
41 #include "eir.h"
42 
43 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
44 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
45 
46 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
47 
48 /* Handle HCI Event packets */
49 
50 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
51 			     u8 ev, size_t len)
52 {
53 	void *data;
54 
55 	data = skb_pull_data(skb, len);
56 	if (!data)
57 		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
58 
59 	return data;
60 }
61 
62 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
63 			     u16 op, size_t len)
64 {
65 	void *data;
66 
67 	data = skb_pull_data(skb, len);
68 	if (!data)
69 		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
70 
71 	return data;
72 }
73 
74 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
75 				u8 ev, size_t len)
76 {
77 	void *data;
78 
79 	data = skb_pull_data(skb, len);
80 	if (!data)
81 		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
82 
83 	return data;
84 }
85 
86 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
87 				struct sk_buff *skb)
88 {
89 	struct hci_ev_status *rp = data;
90 
91 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
92 
93 	/* It is possible that we receive Inquiry Complete event right
94 	 * before we receive Inquiry Cancel Command Complete event, in
95 	 * which case the latter event should have status of Command
96 	 * Disallowed (0x0c). This should not be treated as error, since
97 	 * we actually achieve what Inquiry Cancel wants to achieve,
98 	 * which is to end the last Inquiry session.
99 	 */
100 	if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
101 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
102 		rp->status = 0x00;
103 	}
104 
105 	if (rp->status)
106 		return rp->status;
107 
108 	clear_bit(HCI_INQUIRY, &hdev->flags);
109 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
110 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
111 
112 	hci_dev_lock(hdev);
113 	/* Set discovery state to stopped if we're not doing LE active
114 	 * scanning.
115 	 */
116 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
117 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
118 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
119 	hci_dev_unlock(hdev);
120 
121 	hci_conn_check_pending(hdev);
122 
123 	return rp->status;
124 }
125 
126 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
127 			      struct sk_buff *skb)
128 {
129 	struct hci_ev_status *rp = data;
130 
131 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
132 
133 	if (rp->status)
134 		return rp->status;
135 
136 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
137 
138 	return rp->status;
139 }
140 
141 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
142 				   struct sk_buff *skb)
143 {
144 	struct hci_ev_status *rp = data;
145 
146 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
147 
148 	if (rp->status)
149 		return rp->status;
150 
151 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
152 
153 	hci_conn_check_pending(hdev);
154 
155 	return rp->status;
156 }
157 
158 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
159 					struct sk_buff *skb)
160 {
161 	struct hci_ev_status *rp = data;
162 
163 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
164 
165 	return rp->status;
166 }
167 
168 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
169 				struct sk_buff *skb)
170 {
171 	struct hci_rp_role_discovery *rp = data;
172 	struct hci_conn *conn;
173 
174 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
175 
176 	if (rp->status)
177 		return rp->status;
178 
179 	hci_dev_lock(hdev);
180 
181 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
182 	if (conn)
183 		conn->role = rp->role;
184 
185 	hci_dev_unlock(hdev);
186 
187 	return rp->status;
188 }
189 
190 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
191 				  struct sk_buff *skb)
192 {
193 	struct hci_rp_read_link_policy *rp = data;
194 	struct hci_conn *conn;
195 
196 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
197 
198 	if (rp->status)
199 		return rp->status;
200 
201 	hci_dev_lock(hdev);
202 
203 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
204 	if (conn)
205 		conn->link_policy = __le16_to_cpu(rp->policy);
206 
207 	hci_dev_unlock(hdev);
208 
209 	return rp->status;
210 }
211 
212 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
213 				   struct sk_buff *skb)
214 {
215 	struct hci_rp_write_link_policy *rp = data;
216 	struct hci_conn *conn;
217 	void *sent;
218 
219 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
220 
221 	if (rp->status)
222 		return rp->status;
223 
224 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
225 	if (!sent)
226 		return rp->status;
227 
228 	hci_dev_lock(hdev);
229 
230 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
231 	if (conn)
232 		conn->link_policy = get_unaligned_le16(sent + 2);
233 
234 	hci_dev_unlock(hdev);
235 
236 	return rp->status;
237 }
238 
239 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
240 				      struct sk_buff *skb)
241 {
242 	struct hci_rp_read_def_link_policy *rp = data;
243 
244 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
245 
246 	if (rp->status)
247 		return rp->status;
248 
249 	hdev->link_policy = __le16_to_cpu(rp->policy);
250 
251 	return rp->status;
252 }
253 
254 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
255 				       struct sk_buff *skb)
256 {
257 	struct hci_ev_status *rp = data;
258 	void *sent;
259 
260 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
261 
262 	if (rp->status)
263 		return rp->status;
264 
265 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
266 	if (!sent)
267 		return rp->status;
268 
269 	hdev->link_policy = get_unaligned_le16(sent);
270 
271 	return rp->status;
272 }
273 
274 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
275 {
276 	struct hci_ev_status *rp = data;
277 
278 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
279 
280 	clear_bit(HCI_RESET, &hdev->flags);
281 
282 	if (rp->status)
283 		return rp->status;
284 
285 	/* Reset all non-persistent flags */
286 	hci_dev_clear_volatile_flags(hdev);
287 
288 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
289 
290 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
291 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
292 
293 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
294 	hdev->adv_data_len = 0;
295 
296 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
297 	hdev->scan_rsp_data_len = 0;
298 
299 	hdev->le_scan_type = LE_SCAN_PASSIVE;
300 
301 	hdev->ssp_debug_mode = 0;
302 
303 	hci_bdaddr_list_clear(&hdev->le_accept_list);
304 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
305 
306 	return rp->status;
307 }
308 
309 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
310 				      struct sk_buff *skb)
311 {
312 	struct hci_rp_read_stored_link_key *rp = data;
313 	struct hci_cp_read_stored_link_key *sent;
314 
315 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
316 
317 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
318 	if (!sent)
319 		return rp->status;
320 
321 	if (!rp->status && sent->read_all == 0x01) {
322 		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
323 		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
324 	}
325 
326 	return rp->status;
327 }
328 
329 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
330 					struct sk_buff *skb)
331 {
332 	struct hci_rp_delete_stored_link_key *rp = data;
333 	u16 num_keys;
334 
335 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
336 
337 	if (rp->status)
338 		return rp->status;
339 
340 	num_keys = le16_to_cpu(rp->num_keys);
341 
342 	if (num_keys <= hdev->stored_num_keys)
343 		hdev->stored_num_keys -= num_keys;
344 	else
345 		hdev->stored_num_keys = 0;
346 
347 	return rp->status;
348 }
349 
350 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
351 				  struct sk_buff *skb)
352 {
353 	struct hci_ev_status *rp = data;
354 	void *sent;
355 
356 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
357 
358 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
359 	if (!sent)
360 		return rp->status;
361 
362 	hci_dev_lock(hdev);
363 
364 	if (hci_dev_test_flag(hdev, HCI_MGMT))
365 		mgmt_set_local_name_complete(hdev, sent, rp->status);
366 	else if (!rp->status)
367 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
368 
369 	hci_dev_unlock(hdev);
370 
371 	return rp->status;
372 }
373 
374 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
375 				 struct sk_buff *skb)
376 {
377 	struct hci_rp_read_local_name *rp = data;
378 
379 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
380 
381 	if (rp->status)
382 		return rp->status;
383 
384 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
385 	    hci_dev_test_flag(hdev, HCI_CONFIG))
386 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
387 
388 	return rp->status;
389 }
390 
391 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
392 				   struct sk_buff *skb)
393 {
394 	struct hci_ev_status *rp = data;
395 	void *sent;
396 
397 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
398 
399 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
400 	if (!sent)
401 		return rp->status;
402 
403 	hci_dev_lock(hdev);
404 
405 	if (!rp->status) {
406 		__u8 param = *((__u8 *) sent);
407 
408 		if (param == AUTH_ENABLED)
409 			set_bit(HCI_AUTH, &hdev->flags);
410 		else
411 			clear_bit(HCI_AUTH, &hdev->flags);
412 	}
413 
414 	if (hci_dev_test_flag(hdev, HCI_MGMT))
415 		mgmt_auth_enable_complete(hdev, rp->status);
416 
417 	hci_dev_unlock(hdev);
418 
419 	return rp->status;
420 }
421 
422 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
423 				    struct sk_buff *skb)
424 {
425 	struct hci_ev_status *rp = data;
426 	__u8 param;
427 	void *sent;
428 
429 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
430 
431 	if (rp->status)
432 		return rp->status;
433 
434 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
435 	if (!sent)
436 		return rp->status;
437 
438 	param = *((__u8 *) sent);
439 
440 	if (param)
441 		set_bit(HCI_ENCRYPT, &hdev->flags);
442 	else
443 		clear_bit(HCI_ENCRYPT, &hdev->flags);
444 
445 	return rp->status;
446 }
447 
448 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
449 				   struct sk_buff *skb)
450 {
451 	struct hci_ev_status *rp = data;
452 	__u8 param;
453 	void *sent;
454 
455 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
456 
457 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
458 	if (!sent)
459 		return rp->status;
460 
461 	param = *((__u8 *) sent);
462 
463 	hci_dev_lock(hdev);
464 
465 	if (rp->status) {
466 		hdev->discov_timeout = 0;
467 		goto done;
468 	}
469 
470 	if (param & SCAN_INQUIRY)
471 		set_bit(HCI_ISCAN, &hdev->flags);
472 	else
473 		clear_bit(HCI_ISCAN, &hdev->flags);
474 
475 	if (param & SCAN_PAGE)
476 		set_bit(HCI_PSCAN, &hdev->flags);
477 	else
478 		clear_bit(HCI_PSCAN, &hdev->flags);
479 
480 done:
481 	hci_dev_unlock(hdev);
482 
483 	return rp->status;
484 }
485 
486 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
487 				  struct sk_buff *skb)
488 {
489 	struct hci_ev_status *rp = data;
490 	struct hci_cp_set_event_filter *cp;
491 	void *sent;
492 
493 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
494 
495 	if (rp->status)
496 		return rp->status;
497 
498 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
499 	if (!sent)
500 		return rp->status;
501 
502 	cp = (struct hci_cp_set_event_filter *)sent;
503 
504 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
505 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
506 	else
507 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
508 
509 	return rp->status;
510 }
511 
512 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
513 				   struct sk_buff *skb)
514 {
515 	struct hci_rp_read_class_of_dev *rp = data;
516 
517 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
518 
519 	if (rp->status)
520 		return rp->status;
521 
522 	memcpy(hdev->dev_class, rp->dev_class, 3);
523 
524 	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
525 		   hdev->dev_class[1], hdev->dev_class[0]);
526 
527 	return rp->status;
528 }
529 
530 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
531 				    struct sk_buff *skb)
532 {
533 	struct hci_ev_status *rp = data;
534 	void *sent;
535 
536 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
537 
538 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
539 	if (!sent)
540 		return rp->status;
541 
542 	hci_dev_lock(hdev);
543 
544 	if (!rp->status)
545 		memcpy(hdev->dev_class, sent, 3);
546 
547 	if (hci_dev_test_flag(hdev, HCI_MGMT))
548 		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
549 
550 	hci_dev_unlock(hdev);
551 
552 	return rp->status;
553 }
554 
555 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
556 				    struct sk_buff *skb)
557 {
558 	struct hci_rp_read_voice_setting *rp = data;
559 	__u16 setting;
560 
561 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
562 
563 	if (rp->status)
564 		return rp->status;
565 
566 	setting = __le16_to_cpu(rp->voice_setting);
567 
568 	if (hdev->voice_setting == setting)
569 		return rp->status;
570 
571 	hdev->voice_setting = setting;
572 
573 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
574 
575 	if (hdev->notify)
576 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
577 
578 	return rp->status;
579 }
580 
581 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
582 				     struct sk_buff *skb)
583 {
584 	struct hci_ev_status *rp = data;
585 	__u16 setting;
586 	void *sent;
587 
588 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
589 
590 	if (rp->status)
591 		return rp->status;
592 
593 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
594 	if (!sent)
595 		return rp->status;
596 
597 	setting = get_unaligned_le16(sent);
598 
599 	if (hdev->voice_setting == setting)
600 		return rp->status;
601 
602 	hdev->voice_setting = setting;
603 
604 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
605 
606 	if (hdev->notify)
607 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
608 
609 	return rp->status;
610 }
611 
612 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
613 					struct sk_buff *skb)
614 {
615 	struct hci_rp_read_num_supported_iac *rp = data;
616 
617 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
618 
619 	if (rp->status)
620 		return rp->status;
621 
622 	hdev->num_iac = rp->num_iac;
623 
624 	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
625 
626 	return rp->status;
627 }
628 
629 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
630 				struct sk_buff *skb)
631 {
632 	struct hci_ev_status *rp = data;
633 	struct hci_cp_write_ssp_mode *sent;
634 
635 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
636 
637 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
638 	if (!sent)
639 		return rp->status;
640 
641 	hci_dev_lock(hdev);
642 
643 	if (!rp->status) {
644 		if (sent->mode)
645 			hdev->features[1][0] |= LMP_HOST_SSP;
646 		else
647 			hdev->features[1][0] &= ~LMP_HOST_SSP;
648 	}
649 
650 	if (!rp->status) {
651 		if (sent->mode)
652 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
653 		else
654 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
655 	}
656 
657 	hci_dev_unlock(hdev);
658 
659 	return rp->status;
660 }
661 
662 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
663 				  struct sk_buff *skb)
664 {
665 	struct hci_ev_status *rp = data;
666 	struct hci_cp_write_sc_support *sent;
667 
668 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
669 
670 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
671 	if (!sent)
672 		return rp->status;
673 
674 	hci_dev_lock(hdev);
675 
676 	if (!rp->status) {
677 		if (sent->support)
678 			hdev->features[1][0] |= LMP_HOST_SC;
679 		else
680 			hdev->features[1][0] &= ~LMP_HOST_SC;
681 	}
682 
683 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
684 		if (sent->support)
685 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
686 		else
687 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
688 	}
689 
690 	hci_dev_unlock(hdev);
691 
692 	return rp->status;
693 }
694 
695 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
696 				    struct sk_buff *skb)
697 {
698 	struct hci_rp_read_local_version *rp = data;
699 
700 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
701 
702 	if (rp->status)
703 		return rp->status;
704 
705 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
706 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
707 		hdev->hci_ver = rp->hci_ver;
708 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
709 		hdev->lmp_ver = rp->lmp_ver;
710 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
711 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
712 	}
713 
714 	return rp->status;
715 }
716 
717 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
718 				   struct sk_buff *skb)
719 {
720 	struct hci_rp_read_enc_key_size *rp = data;
721 	struct hci_conn *conn;
722 	u16 handle;
723 	u8 status = rp->status;
724 
725 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
726 
727 	handle = le16_to_cpu(rp->handle);
728 
729 	hci_dev_lock(hdev);
730 
731 	conn = hci_conn_hash_lookup_handle(hdev, handle);
732 	if (!conn) {
733 		status = 0xFF;
734 		goto done;
735 	}
736 
737 	/* While unexpected, the read_enc_key_size command may fail. The most
738 	 * secure approach is to then assume the key size is 0 to force a
739 	 * disconnection.
740 	 */
741 	if (status) {
742 		bt_dev_err(hdev, "failed to read key size for handle %u",
743 			   handle);
744 		conn->enc_key_size = 0;
745 	} else {
746 		conn->enc_key_size = rp->key_size;
747 		status = 0;
748 	}
749 
750 	hci_encrypt_cfm(conn, 0);
751 
752 done:
753 	hci_dev_unlock(hdev);
754 
755 	return status;
756 }
757 
758 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
759 				     struct sk_buff *skb)
760 {
761 	struct hci_rp_read_local_commands *rp = data;
762 
763 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
764 
765 	if (rp->status)
766 		return rp->status;
767 
768 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
769 	    hci_dev_test_flag(hdev, HCI_CONFIG))
770 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
771 
772 	return rp->status;
773 }
774 
775 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
776 					   struct sk_buff *skb)
777 {
778 	struct hci_rp_read_auth_payload_to *rp = data;
779 	struct hci_conn *conn;
780 
781 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
782 
783 	if (rp->status)
784 		return rp->status;
785 
786 	hci_dev_lock(hdev);
787 
788 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
789 	if (conn)
790 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
791 
792 	hci_dev_unlock(hdev);
793 
794 	return rp->status;
795 }
796 
797 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
798 					    struct sk_buff *skb)
799 {
800 	struct hci_rp_write_auth_payload_to *rp = data;
801 	struct hci_conn *conn;
802 	void *sent;
803 
804 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
805 
806 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
807 	if (!sent)
808 		return rp->status;
809 
810 	hci_dev_lock(hdev);
811 
812 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
813 	if (!conn) {
814 		rp->status = 0xff;
815 		goto unlock;
816 	}
817 
818 	if (!rp->status)
819 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
820 
821 	hci_encrypt_cfm(conn, 0);
822 
823 unlock:
824 	hci_dev_unlock(hdev);
825 
826 	return rp->status;
827 }
828 
829 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
830 				     struct sk_buff *skb)
831 {
832 	struct hci_rp_read_local_features *rp = data;
833 
834 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
835 
836 	if (rp->status)
837 		return rp->status;
838 
839 	memcpy(hdev->features, rp->features, 8);
840 
841 	/* Adjust default settings according to features
842 	 * supported by device. */
843 
844 	if (hdev->features[0][0] & LMP_3SLOT)
845 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
846 
847 	if (hdev->features[0][0] & LMP_5SLOT)
848 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
849 
850 	if (hdev->features[0][1] & LMP_HV2) {
851 		hdev->pkt_type  |= (HCI_HV2);
852 		hdev->esco_type |= (ESCO_HV2);
853 	}
854 
855 	if (hdev->features[0][1] & LMP_HV3) {
856 		hdev->pkt_type  |= (HCI_HV3);
857 		hdev->esco_type |= (ESCO_HV3);
858 	}
859 
860 	if (lmp_esco_capable(hdev))
861 		hdev->esco_type |= (ESCO_EV3);
862 
863 	if (hdev->features[0][4] & LMP_EV4)
864 		hdev->esco_type |= (ESCO_EV4);
865 
866 	if (hdev->features[0][4] & LMP_EV5)
867 		hdev->esco_type |= (ESCO_EV5);
868 
869 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
870 		hdev->esco_type |= (ESCO_2EV3);
871 
872 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
873 		hdev->esco_type |= (ESCO_3EV3);
874 
875 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
876 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
877 
878 	return rp->status;
879 }
880 
881 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
882 					 struct sk_buff *skb)
883 {
884 	struct hci_rp_read_local_ext_features *rp = data;
885 
886 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
887 
888 	if (rp->status)
889 		return rp->status;
890 
891 	if (hdev->max_page < rp->max_page) {
892 		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
893 			     &hdev->quirks))
894 			bt_dev_warn(hdev, "broken local ext features page 2");
895 		else
896 			hdev->max_page = rp->max_page;
897 	}
898 
899 	if (rp->page < HCI_MAX_PAGES)
900 		memcpy(hdev->features[rp->page], rp->features, 8);
901 
902 	return rp->status;
903 }
904 
905 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
906 					struct sk_buff *skb)
907 {
908 	struct hci_rp_read_flow_control_mode *rp = data;
909 
910 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
911 
912 	if (rp->status)
913 		return rp->status;
914 
915 	hdev->flow_ctl_mode = rp->mode;
916 
917 	return rp->status;
918 }
919 
920 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
921 				  struct sk_buff *skb)
922 {
923 	struct hci_rp_read_buffer_size *rp = data;
924 
925 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
926 
927 	if (rp->status)
928 		return rp->status;
929 
930 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
931 	hdev->sco_mtu  = rp->sco_mtu;
932 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
933 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
934 
935 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
936 		hdev->sco_mtu  = 64;
937 		hdev->sco_pkts = 8;
938 	}
939 
940 	hdev->acl_cnt = hdev->acl_pkts;
941 	hdev->sco_cnt = hdev->sco_pkts;
942 
943 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
944 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
945 
946 	return rp->status;
947 }
948 
949 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
950 			      struct sk_buff *skb)
951 {
952 	struct hci_rp_read_bd_addr *rp = data;
953 
954 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
955 
956 	if (rp->status)
957 		return rp->status;
958 
959 	if (test_bit(HCI_INIT, &hdev->flags))
960 		bacpy(&hdev->bdaddr, &rp->bdaddr);
961 
962 	if (hci_dev_test_flag(hdev, HCI_SETUP))
963 		bacpy(&hdev->setup_addr, &rp->bdaddr);
964 
965 	return rp->status;
966 }
967 
968 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
969 					 struct sk_buff *skb)
970 {
971 	struct hci_rp_read_local_pairing_opts *rp = data;
972 
973 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
974 
975 	if (rp->status)
976 		return rp->status;
977 
978 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
979 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
980 		hdev->pairing_opts = rp->pairing_opts;
981 		hdev->max_enc_key_size = rp->max_key_size;
982 	}
983 
984 	return rp->status;
985 }
986 
987 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
988 					 struct sk_buff *skb)
989 {
990 	struct hci_rp_read_page_scan_activity *rp = data;
991 
992 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
993 
994 	if (rp->status)
995 		return rp->status;
996 
997 	if (test_bit(HCI_INIT, &hdev->flags)) {
998 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
999 		hdev->page_scan_window = __le16_to_cpu(rp->window);
1000 	}
1001 
1002 	return rp->status;
1003 }
1004 
1005 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1006 					  struct sk_buff *skb)
1007 {
1008 	struct hci_ev_status *rp = data;
1009 	struct hci_cp_write_page_scan_activity *sent;
1010 
1011 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1012 
1013 	if (rp->status)
1014 		return rp->status;
1015 
1016 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1017 	if (!sent)
1018 		return rp->status;
1019 
1020 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1021 	hdev->page_scan_window = __le16_to_cpu(sent->window);
1022 
1023 	return rp->status;
1024 }
1025 
1026 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1027 				     struct sk_buff *skb)
1028 {
1029 	struct hci_rp_read_page_scan_type *rp = data;
1030 
1031 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1032 
1033 	if (rp->status)
1034 		return rp->status;
1035 
1036 	if (test_bit(HCI_INIT, &hdev->flags))
1037 		hdev->page_scan_type = rp->type;
1038 
1039 	return rp->status;
1040 }
1041 
1042 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1043 				      struct sk_buff *skb)
1044 {
1045 	struct hci_ev_status *rp = data;
1046 	u8 *type;
1047 
1048 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1049 
1050 	if (rp->status)
1051 		return rp->status;
1052 
1053 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1054 	if (type)
1055 		hdev->page_scan_type = *type;
1056 
1057 	return rp->status;
1058 }
1059 
1060 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1061 				      struct sk_buff *skb)
1062 {
1063 	struct hci_rp_read_data_block_size *rp = data;
1064 
1065 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1066 
1067 	if (rp->status)
1068 		return rp->status;
1069 
1070 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1071 	hdev->block_len = __le16_to_cpu(rp->block_len);
1072 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1073 
1074 	hdev->block_cnt = hdev->num_blocks;
1075 
1076 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1077 	       hdev->block_cnt, hdev->block_len);
1078 
1079 	return rp->status;
1080 }
1081 
1082 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1083 			    struct sk_buff *skb)
1084 {
1085 	struct hci_rp_read_clock *rp = data;
1086 	struct hci_cp_read_clock *cp;
1087 	struct hci_conn *conn;
1088 
1089 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1090 
1091 	if (rp->status)
1092 		return rp->status;
1093 
1094 	hci_dev_lock(hdev);
1095 
1096 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1097 	if (!cp)
1098 		goto unlock;
1099 
1100 	if (cp->which == 0x00) {
1101 		hdev->clock = le32_to_cpu(rp->clock);
1102 		goto unlock;
1103 	}
1104 
1105 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1106 	if (conn) {
1107 		conn->clock = le32_to_cpu(rp->clock);
1108 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1109 	}
1110 
1111 unlock:
1112 	hci_dev_unlock(hdev);
1113 	return rp->status;
1114 }
1115 
1116 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1117 				     struct sk_buff *skb)
1118 {
1119 	struct hci_rp_read_local_amp_info *rp = data;
1120 
1121 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1122 
1123 	if (rp->status)
1124 		return rp->status;
1125 
1126 	hdev->amp_status = rp->amp_status;
1127 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1128 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1129 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1130 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1131 	hdev->amp_type = rp->amp_type;
1132 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1133 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1134 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1135 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1136 
1137 	return rp->status;
1138 }
1139 
1140 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1141 				       struct sk_buff *skb)
1142 {
1143 	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1144 
1145 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1146 
1147 	if (rp->status)
1148 		return rp->status;
1149 
1150 	hdev->inq_tx_power = rp->tx_power;
1151 
1152 	return rp->status;
1153 }
1154 
1155 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1156 					     struct sk_buff *skb)
1157 {
1158 	struct hci_rp_read_def_err_data_reporting *rp = data;
1159 
1160 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1161 
1162 	if (rp->status)
1163 		return rp->status;
1164 
1165 	hdev->err_data_reporting = rp->err_data_reporting;
1166 
1167 	return rp->status;
1168 }
1169 
1170 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1171 					      struct sk_buff *skb)
1172 {
1173 	struct hci_ev_status *rp = data;
1174 	struct hci_cp_write_def_err_data_reporting *cp;
1175 
1176 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1177 
1178 	if (rp->status)
1179 		return rp->status;
1180 
1181 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1182 	if (!cp)
1183 		return rp->status;
1184 
1185 	hdev->err_data_reporting = cp->err_data_reporting;
1186 
1187 	return rp->status;
1188 }
1189 
1190 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1191 				struct sk_buff *skb)
1192 {
1193 	struct hci_rp_pin_code_reply *rp = data;
1194 	struct hci_cp_pin_code_reply *cp;
1195 	struct hci_conn *conn;
1196 
1197 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1198 
1199 	hci_dev_lock(hdev);
1200 
1201 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1202 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1203 
1204 	if (rp->status)
1205 		goto unlock;
1206 
1207 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1208 	if (!cp)
1209 		goto unlock;
1210 
1211 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1212 	if (conn)
1213 		conn->pin_length = cp->pin_len;
1214 
1215 unlock:
1216 	hci_dev_unlock(hdev);
1217 	return rp->status;
1218 }
1219 
1220 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1221 				    struct sk_buff *skb)
1222 {
1223 	struct hci_rp_pin_code_neg_reply *rp = data;
1224 
1225 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1226 
1227 	hci_dev_lock(hdev);
1228 
1229 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1230 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1231 						 rp->status);
1232 
1233 	hci_dev_unlock(hdev);
1234 
1235 	return rp->status;
1236 }
1237 
1238 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1239 				     struct sk_buff *skb)
1240 {
1241 	struct hci_rp_le_read_buffer_size *rp = data;
1242 
1243 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1244 
1245 	if (rp->status)
1246 		return rp->status;
1247 
1248 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1249 	hdev->le_pkts = rp->le_max_pkt;
1250 
1251 	hdev->le_cnt = hdev->le_pkts;
1252 
1253 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1254 
1255 	return rp->status;
1256 }
1257 
1258 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1259 					struct sk_buff *skb)
1260 {
1261 	struct hci_rp_le_read_local_features *rp = data;
1262 
1263 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1264 
1265 	if (rp->status)
1266 		return rp->status;
1267 
1268 	memcpy(hdev->le_features, rp->features, 8);
1269 
1270 	return rp->status;
1271 }
1272 
1273 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1274 				      struct sk_buff *skb)
1275 {
1276 	struct hci_rp_le_read_adv_tx_power *rp = data;
1277 
1278 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1279 
1280 	if (rp->status)
1281 		return rp->status;
1282 
1283 	hdev->adv_tx_power = rp->tx_power;
1284 
1285 	return rp->status;
1286 }
1287 
1288 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1289 				    struct sk_buff *skb)
1290 {
1291 	struct hci_rp_user_confirm_reply *rp = data;
1292 
1293 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1294 
1295 	hci_dev_lock(hdev);
1296 
1297 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1298 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1299 						 rp->status);
1300 
1301 	hci_dev_unlock(hdev);
1302 
1303 	return rp->status;
1304 }
1305 
1306 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1307 					struct sk_buff *skb)
1308 {
1309 	struct hci_rp_user_confirm_reply *rp = data;
1310 
1311 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1312 
1313 	hci_dev_lock(hdev);
1314 
1315 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1316 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1317 						     ACL_LINK, 0, rp->status);
1318 
1319 	hci_dev_unlock(hdev);
1320 
1321 	return rp->status;
1322 }
1323 
1324 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1325 				    struct sk_buff *skb)
1326 {
1327 	struct hci_rp_user_confirm_reply *rp = data;
1328 
1329 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1330 
1331 	hci_dev_lock(hdev);
1332 
1333 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1334 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1335 						 0, rp->status);
1336 
1337 	hci_dev_unlock(hdev);
1338 
1339 	return rp->status;
1340 }
1341 
1342 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1343 					struct sk_buff *skb)
1344 {
1345 	struct hci_rp_user_confirm_reply *rp = data;
1346 
1347 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1348 
1349 	hci_dev_lock(hdev);
1350 
1351 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1352 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1353 						     ACL_LINK, 0, rp->status);
1354 
1355 	hci_dev_unlock(hdev);
1356 
1357 	return rp->status;
1358 }
1359 
1360 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1361 				     struct sk_buff *skb)
1362 {
1363 	struct hci_rp_read_local_oob_data *rp = data;
1364 
1365 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1366 
1367 	return rp->status;
1368 }
1369 
1370 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1371 					 struct sk_buff *skb)
1372 {
1373 	struct hci_rp_read_local_oob_ext_data *rp = data;
1374 
1375 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1376 
1377 	return rp->status;
1378 }
1379 
1380 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1381 				    struct sk_buff *skb)
1382 {
1383 	struct hci_ev_status *rp = data;
1384 	bdaddr_t *sent;
1385 
1386 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1387 
1388 	if (rp->status)
1389 		return rp->status;
1390 
1391 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1392 	if (!sent)
1393 		return rp->status;
1394 
1395 	hci_dev_lock(hdev);
1396 
1397 	bacpy(&hdev->random_addr, sent);
1398 
1399 	if (!bacmp(&hdev->rpa, sent)) {
1400 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1401 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1402 				   secs_to_jiffies(hdev->rpa_timeout));
1403 	}
1404 
1405 	hci_dev_unlock(hdev);
1406 
1407 	return rp->status;
1408 }
1409 
1410 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1411 				    struct sk_buff *skb)
1412 {
1413 	struct hci_ev_status *rp = data;
1414 	struct hci_cp_le_set_default_phy *cp;
1415 
1416 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1417 
1418 	if (rp->status)
1419 		return rp->status;
1420 
1421 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1422 	if (!cp)
1423 		return rp->status;
1424 
1425 	hci_dev_lock(hdev);
1426 
1427 	hdev->le_tx_def_phys = cp->tx_phys;
1428 	hdev->le_rx_def_phys = cp->rx_phys;
1429 
1430 	hci_dev_unlock(hdev);
1431 
1432 	return rp->status;
1433 }
1434 
1435 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1436 					    struct sk_buff *skb)
1437 {
1438 	struct hci_ev_status *rp = data;
1439 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1440 	struct adv_info *adv;
1441 
1442 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1443 
1444 	if (rp->status)
1445 		return rp->status;
1446 
1447 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1448 	/* Update only in case the adv instance since handle 0x00 shall be using
1449 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1450 	 * non-extended adverting.
1451 	 */
1452 	if (!cp || !cp->handle)
1453 		return rp->status;
1454 
1455 	hci_dev_lock(hdev);
1456 
1457 	adv = hci_find_adv_instance(hdev, cp->handle);
1458 	if (adv) {
1459 		bacpy(&adv->random_addr, &cp->bdaddr);
1460 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1461 			adv->rpa_expired = false;
1462 			queue_delayed_work(hdev->workqueue,
1463 					   &adv->rpa_expired_cb,
1464 					   secs_to_jiffies(hdev->rpa_timeout));
1465 		}
1466 	}
1467 
1468 	hci_dev_unlock(hdev);
1469 
1470 	return rp->status;
1471 }
1472 
1473 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1474 				   struct sk_buff *skb)
1475 {
1476 	struct hci_ev_status *rp = data;
1477 	u8 *instance;
1478 	int err;
1479 
1480 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1481 
1482 	if (rp->status)
1483 		return rp->status;
1484 
1485 	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1486 	if (!instance)
1487 		return rp->status;
1488 
1489 	hci_dev_lock(hdev);
1490 
1491 	err = hci_remove_adv_instance(hdev, *instance);
1492 	if (!err)
1493 		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1494 					 *instance);
1495 
1496 	hci_dev_unlock(hdev);
1497 
1498 	return rp->status;
1499 }
1500 
1501 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1502 				   struct sk_buff *skb)
1503 {
1504 	struct hci_ev_status *rp = data;
1505 	struct adv_info *adv, *n;
1506 	int err;
1507 
1508 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1509 
1510 	if (rp->status)
1511 		return rp->status;
1512 
1513 	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1514 		return rp->status;
1515 
1516 	hci_dev_lock(hdev);
1517 
1518 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1519 		u8 instance = adv->instance;
1520 
1521 		err = hci_remove_adv_instance(hdev, instance);
1522 		if (!err)
1523 			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1524 						 hdev, instance);
1525 	}
1526 
1527 	hci_dev_unlock(hdev);
1528 
1529 	return rp->status;
1530 }
1531 
1532 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1533 					struct sk_buff *skb)
1534 {
1535 	struct hci_rp_le_read_transmit_power *rp = data;
1536 
1537 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1538 
1539 	if (rp->status)
1540 		return rp->status;
1541 
1542 	hdev->min_le_tx_power = rp->min_le_tx_power;
1543 	hdev->max_le_tx_power = rp->max_le_tx_power;
1544 
1545 	return rp->status;
1546 }
1547 
1548 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1549 				     struct sk_buff *skb)
1550 {
1551 	struct hci_ev_status *rp = data;
1552 	struct hci_cp_le_set_privacy_mode *cp;
1553 	struct hci_conn_params *params;
1554 
1555 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1556 
1557 	if (rp->status)
1558 		return rp->status;
1559 
1560 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1561 	if (!cp)
1562 		return rp->status;
1563 
1564 	hci_dev_lock(hdev);
1565 
1566 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1567 	if (params)
1568 		WRITE_ONCE(params->privacy_mode, cp->mode);
1569 
1570 	hci_dev_unlock(hdev);
1571 
1572 	return rp->status;
1573 }
1574 
1575 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1576 				   struct sk_buff *skb)
1577 {
1578 	struct hci_ev_status *rp = data;
1579 	__u8 *sent;
1580 
1581 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1582 
1583 	if (rp->status)
1584 		return rp->status;
1585 
1586 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1587 	if (!sent)
1588 		return rp->status;
1589 
1590 	hci_dev_lock(hdev);
1591 
1592 	/* If we're doing connection initiation as peripheral. Set a
1593 	 * timeout in case something goes wrong.
1594 	 */
1595 	if (*sent) {
1596 		struct hci_conn *conn;
1597 
1598 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1599 
1600 		conn = hci_lookup_le_connect(hdev);
1601 		if (conn)
1602 			queue_delayed_work(hdev->workqueue,
1603 					   &conn->le_conn_timeout,
1604 					   conn->conn_timeout);
1605 	} else {
1606 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1607 	}
1608 
1609 	hci_dev_unlock(hdev);
1610 
1611 	return rp->status;
1612 }
1613 
1614 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1615 				       struct sk_buff *skb)
1616 {
1617 	struct hci_cp_le_set_ext_adv_enable *cp;
1618 	struct hci_cp_ext_adv_set *set;
1619 	struct adv_info *adv = NULL, *n;
1620 	struct hci_ev_status *rp = data;
1621 
1622 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1623 
1624 	if (rp->status)
1625 		return rp->status;
1626 
1627 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1628 	if (!cp)
1629 		return rp->status;
1630 
1631 	set = (void *)cp->data;
1632 
1633 	hci_dev_lock(hdev);
1634 
1635 	if (cp->num_of_sets)
1636 		adv = hci_find_adv_instance(hdev, set->handle);
1637 
1638 	if (cp->enable) {
1639 		struct hci_conn *conn;
1640 
1641 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1642 
1643 		if (adv && !adv->periodic)
1644 			adv->enabled = true;
1645 
1646 		conn = hci_lookup_le_connect(hdev);
1647 		if (conn)
1648 			queue_delayed_work(hdev->workqueue,
1649 					   &conn->le_conn_timeout,
1650 					   conn->conn_timeout);
1651 	} else {
1652 		if (cp->num_of_sets) {
1653 			if (adv)
1654 				adv->enabled = false;
1655 
1656 			/* If just one instance was disabled check if there are
1657 			 * any other instance enabled before clearing HCI_LE_ADV
1658 			 */
1659 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1660 						 list) {
1661 				if (adv->enabled)
1662 					goto unlock;
1663 			}
1664 		} else {
1665 			/* All instances shall be considered disabled */
1666 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1667 						 list)
1668 				adv->enabled = false;
1669 		}
1670 
1671 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1672 	}
1673 
1674 unlock:
1675 	hci_dev_unlock(hdev);
1676 	return rp->status;
1677 }
1678 
1679 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1680 				   struct sk_buff *skb)
1681 {
1682 	struct hci_cp_le_set_scan_param *cp;
1683 	struct hci_ev_status *rp = data;
1684 
1685 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1686 
1687 	if (rp->status)
1688 		return rp->status;
1689 
1690 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1691 	if (!cp)
1692 		return rp->status;
1693 
1694 	hci_dev_lock(hdev);
1695 
1696 	hdev->le_scan_type = cp->type;
1697 
1698 	hci_dev_unlock(hdev);
1699 
1700 	return rp->status;
1701 }
1702 
1703 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1704 				       struct sk_buff *skb)
1705 {
1706 	struct hci_cp_le_set_ext_scan_params *cp;
1707 	struct hci_ev_status *rp = data;
1708 	struct hci_cp_le_scan_phy_params *phy_param;
1709 
1710 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1711 
1712 	if (rp->status)
1713 		return rp->status;
1714 
1715 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1716 	if (!cp)
1717 		return rp->status;
1718 
1719 	phy_param = (void *)cp->data;
1720 
1721 	hci_dev_lock(hdev);
1722 
1723 	hdev->le_scan_type = phy_param->type;
1724 
1725 	hci_dev_unlock(hdev);
1726 
1727 	return rp->status;
1728 }
1729 
1730 static bool has_pending_adv_report(struct hci_dev *hdev)
1731 {
1732 	struct discovery_state *d = &hdev->discovery;
1733 
1734 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1735 }
1736 
1737 static void clear_pending_adv_report(struct hci_dev *hdev)
1738 {
1739 	struct discovery_state *d = &hdev->discovery;
1740 
1741 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1742 	d->last_adv_data_len = 0;
1743 }
1744 
1745 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1746 				     u8 bdaddr_type, s8 rssi, u32 flags,
1747 				     u8 *data, u8 len)
1748 {
1749 	struct discovery_state *d = &hdev->discovery;
1750 
1751 	if (len > max_adv_len(hdev))
1752 		return;
1753 
1754 	bacpy(&d->last_adv_addr, bdaddr);
1755 	d->last_adv_addr_type = bdaddr_type;
1756 	d->last_adv_rssi = rssi;
1757 	d->last_adv_flags = flags;
1758 	memcpy(d->last_adv_data, data, len);
1759 	d->last_adv_data_len = len;
1760 }
1761 
1762 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1763 {
1764 	hci_dev_lock(hdev);
1765 
1766 	switch (enable) {
1767 	case LE_SCAN_ENABLE:
1768 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1769 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1770 			clear_pending_adv_report(hdev);
1771 		if (hci_dev_test_flag(hdev, HCI_MESH))
1772 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1773 		break;
1774 
1775 	case LE_SCAN_DISABLE:
1776 		/* We do this here instead of when setting DISCOVERY_STOPPED
1777 		 * since the latter would potentially require waiting for
1778 		 * inquiry to stop too.
1779 		 */
1780 		if (has_pending_adv_report(hdev)) {
1781 			struct discovery_state *d = &hdev->discovery;
1782 
1783 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1784 					  d->last_adv_addr_type, NULL,
1785 					  d->last_adv_rssi, d->last_adv_flags,
1786 					  d->last_adv_data,
1787 					  d->last_adv_data_len, NULL, 0, 0);
1788 		}
1789 
1790 		/* Cancel this timer so that we don't try to disable scanning
1791 		 * when it's already disabled.
1792 		 */
1793 		cancel_delayed_work(&hdev->le_scan_disable);
1794 
1795 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1796 
1797 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1798 		 * interrupted scanning due to a connect request. Mark
1799 		 * therefore discovery as stopped.
1800 		 */
1801 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1802 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1803 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1804 			 hdev->discovery.state == DISCOVERY_FINDING)
1805 			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1806 
1807 		break;
1808 
1809 	default:
1810 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1811 			   enable);
1812 		break;
1813 	}
1814 
1815 	hci_dev_unlock(hdev);
1816 }
1817 
1818 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1819 				    struct sk_buff *skb)
1820 {
1821 	struct hci_cp_le_set_scan_enable *cp;
1822 	struct hci_ev_status *rp = data;
1823 
1824 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1825 
1826 	if (rp->status)
1827 		return rp->status;
1828 
1829 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1830 	if (!cp)
1831 		return rp->status;
1832 
1833 	le_set_scan_enable_complete(hdev, cp->enable);
1834 
1835 	return rp->status;
1836 }
1837 
1838 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1839 					struct sk_buff *skb)
1840 {
1841 	struct hci_cp_le_set_ext_scan_enable *cp;
1842 	struct hci_ev_status *rp = data;
1843 
1844 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1845 
1846 	if (rp->status)
1847 		return rp->status;
1848 
1849 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1850 	if (!cp)
1851 		return rp->status;
1852 
1853 	le_set_scan_enable_complete(hdev, cp->enable);
1854 
1855 	return rp->status;
1856 }
1857 
1858 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1859 				      struct sk_buff *skb)
1860 {
1861 	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1862 
1863 	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1864 		   rp->num_of_sets);
1865 
1866 	if (rp->status)
1867 		return rp->status;
1868 
1869 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1870 
1871 	return rp->status;
1872 }
1873 
1874 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1875 					  struct sk_buff *skb)
1876 {
1877 	struct hci_rp_le_read_accept_list_size *rp = data;
1878 
1879 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1880 
1881 	if (rp->status)
1882 		return rp->status;
1883 
1884 	hdev->le_accept_list_size = rp->size;
1885 
1886 	return rp->status;
1887 }
1888 
1889 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1890 				      struct sk_buff *skb)
1891 {
1892 	struct hci_ev_status *rp = data;
1893 
1894 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1895 
1896 	if (rp->status)
1897 		return rp->status;
1898 
1899 	hci_dev_lock(hdev);
1900 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1901 	hci_dev_unlock(hdev);
1902 
1903 	return rp->status;
1904 }
1905 
1906 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1907 				       struct sk_buff *skb)
1908 {
1909 	struct hci_cp_le_add_to_accept_list *sent;
1910 	struct hci_ev_status *rp = data;
1911 
1912 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1913 
1914 	if (rp->status)
1915 		return rp->status;
1916 
1917 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1918 	if (!sent)
1919 		return rp->status;
1920 
1921 	hci_dev_lock(hdev);
1922 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1923 			    sent->bdaddr_type);
1924 	hci_dev_unlock(hdev);
1925 
1926 	return rp->status;
1927 }
1928 
1929 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1930 					 struct sk_buff *skb)
1931 {
1932 	struct hci_cp_le_del_from_accept_list *sent;
1933 	struct hci_ev_status *rp = data;
1934 
1935 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1936 
1937 	if (rp->status)
1938 		return rp->status;
1939 
1940 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1941 	if (!sent)
1942 		return rp->status;
1943 
1944 	hci_dev_lock(hdev);
1945 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1946 			    sent->bdaddr_type);
1947 	hci_dev_unlock(hdev);
1948 
1949 	return rp->status;
1950 }
1951 
1952 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1953 					  struct sk_buff *skb)
1954 {
1955 	struct hci_rp_le_read_supported_states *rp = data;
1956 
1957 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1958 
1959 	if (rp->status)
1960 		return rp->status;
1961 
1962 	memcpy(hdev->le_states, rp->le_states, 8);
1963 
1964 	return rp->status;
1965 }
1966 
1967 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1968 				      struct sk_buff *skb)
1969 {
1970 	struct hci_rp_le_read_def_data_len *rp = data;
1971 
1972 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1973 
1974 	if (rp->status)
1975 		return rp->status;
1976 
1977 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1978 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1979 
1980 	return rp->status;
1981 }
1982 
1983 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1984 				       struct sk_buff *skb)
1985 {
1986 	struct hci_cp_le_write_def_data_len *sent;
1987 	struct hci_ev_status *rp = data;
1988 
1989 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1990 
1991 	if (rp->status)
1992 		return rp->status;
1993 
1994 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1995 	if (!sent)
1996 		return rp->status;
1997 
1998 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1999 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2000 
2001 	return rp->status;
2002 }
2003 
2004 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2005 				       struct sk_buff *skb)
2006 {
2007 	struct hci_cp_le_add_to_resolv_list *sent;
2008 	struct hci_ev_status *rp = data;
2009 
2010 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2011 
2012 	if (rp->status)
2013 		return rp->status;
2014 
2015 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2016 	if (!sent)
2017 		return rp->status;
2018 
2019 	hci_dev_lock(hdev);
2020 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2021 				sent->bdaddr_type, sent->peer_irk,
2022 				sent->local_irk);
2023 	hci_dev_unlock(hdev);
2024 
2025 	return rp->status;
2026 }
2027 
2028 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2029 					 struct sk_buff *skb)
2030 {
2031 	struct hci_cp_le_del_from_resolv_list *sent;
2032 	struct hci_ev_status *rp = data;
2033 
2034 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2035 
2036 	if (rp->status)
2037 		return rp->status;
2038 
2039 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2040 	if (!sent)
2041 		return rp->status;
2042 
2043 	hci_dev_lock(hdev);
2044 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2045 			    sent->bdaddr_type);
2046 	hci_dev_unlock(hdev);
2047 
2048 	return rp->status;
2049 }
2050 
2051 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2052 				      struct sk_buff *skb)
2053 {
2054 	struct hci_ev_status *rp = data;
2055 
2056 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2057 
2058 	if (rp->status)
2059 		return rp->status;
2060 
2061 	hci_dev_lock(hdev);
2062 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2063 	hci_dev_unlock(hdev);
2064 
2065 	return rp->status;
2066 }
2067 
2068 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2069 					  struct sk_buff *skb)
2070 {
2071 	struct hci_rp_le_read_resolv_list_size *rp = data;
2072 
2073 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2074 
2075 	if (rp->status)
2076 		return rp->status;
2077 
2078 	hdev->le_resolv_list_size = rp->size;
2079 
2080 	return rp->status;
2081 }
2082 
2083 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2084 					       struct sk_buff *skb)
2085 {
2086 	struct hci_ev_status *rp = data;
2087 	__u8 *sent;
2088 
2089 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2090 
2091 	if (rp->status)
2092 		return rp->status;
2093 
2094 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2095 	if (!sent)
2096 		return rp->status;
2097 
2098 	hci_dev_lock(hdev);
2099 
2100 	if (*sent)
2101 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2102 	else
2103 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2104 
2105 	hci_dev_unlock(hdev);
2106 
2107 	return rp->status;
2108 }
2109 
2110 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2111 				      struct sk_buff *skb)
2112 {
2113 	struct hci_rp_le_read_max_data_len *rp = data;
2114 
2115 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2116 
2117 	if (rp->status)
2118 		return rp->status;
2119 
2120 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2121 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2122 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2123 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2124 
2125 	return rp->status;
2126 }
2127 
2128 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2129 					 struct sk_buff *skb)
2130 {
2131 	struct hci_cp_write_le_host_supported *sent;
2132 	struct hci_ev_status *rp = data;
2133 
2134 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2135 
2136 	if (rp->status)
2137 		return rp->status;
2138 
2139 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2140 	if (!sent)
2141 		return rp->status;
2142 
2143 	hci_dev_lock(hdev);
2144 
2145 	if (sent->le) {
2146 		hdev->features[1][0] |= LMP_HOST_LE;
2147 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2148 	} else {
2149 		hdev->features[1][0] &= ~LMP_HOST_LE;
2150 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2151 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2152 	}
2153 
2154 	if (sent->simul)
2155 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2156 	else
2157 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2158 
2159 	hci_dev_unlock(hdev);
2160 
2161 	return rp->status;
2162 }
2163 
2164 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2165 			       struct sk_buff *skb)
2166 {
2167 	struct hci_cp_le_set_adv_param *cp;
2168 	struct hci_ev_status *rp = data;
2169 
2170 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2171 
2172 	if (rp->status)
2173 		return rp->status;
2174 
2175 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2176 	if (!cp)
2177 		return rp->status;
2178 
2179 	hci_dev_lock(hdev);
2180 	hdev->adv_addr_type = cp->own_address_type;
2181 	hci_dev_unlock(hdev);
2182 
2183 	return rp->status;
2184 }
2185 
2186 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2187 				   struct sk_buff *skb)
2188 {
2189 	struct hci_rp_le_set_ext_adv_params *rp = data;
2190 	struct hci_cp_le_set_ext_adv_params *cp;
2191 	struct adv_info *adv_instance;
2192 
2193 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2194 
2195 	if (rp->status)
2196 		return rp->status;
2197 
2198 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2199 	if (!cp)
2200 		return rp->status;
2201 
2202 	hci_dev_lock(hdev);
2203 	hdev->adv_addr_type = cp->own_addr_type;
2204 	if (!cp->handle) {
2205 		/* Store in hdev for instance 0 */
2206 		hdev->adv_tx_power = rp->tx_power;
2207 	} else {
2208 		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2209 		if (adv_instance)
2210 			adv_instance->tx_power = rp->tx_power;
2211 	}
2212 	/* Update adv data as tx power is known now */
2213 	hci_update_adv_data(hdev, cp->handle);
2214 
2215 	hci_dev_unlock(hdev);
2216 
2217 	return rp->status;
2218 }
2219 
2220 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2221 			   struct sk_buff *skb)
2222 {
2223 	struct hci_rp_read_rssi *rp = data;
2224 	struct hci_conn *conn;
2225 
2226 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2227 
2228 	if (rp->status)
2229 		return rp->status;
2230 
2231 	hci_dev_lock(hdev);
2232 
2233 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2234 	if (conn)
2235 		conn->rssi = rp->rssi;
2236 
2237 	hci_dev_unlock(hdev);
2238 
2239 	return rp->status;
2240 }
2241 
2242 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2243 			       struct sk_buff *skb)
2244 {
2245 	struct hci_cp_read_tx_power *sent;
2246 	struct hci_rp_read_tx_power *rp = data;
2247 	struct hci_conn *conn;
2248 
2249 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2250 
2251 	if (rp->status)
2252 		return rp->status;
2253 
2254 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2255 	if (!sent)
2256 		return rp->status;
2257 
2258 	hci_dev_lock(hdev);
2259 
2260 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2261 	if (!conn)
2262 		goto unlock;
2263 
2264 	switch (sent->type) {
2265 	case 0x00:
2266 		conn->tx_power = rp->tx_power;
2267 		break;
2268 	case 0x01:
2269 		conn->max_tx_power = rp->tx_power;
2270 		break;
2271 	}
2272 
2273 unlock:
2274 	hci_dev_unlock(hdev);
2275 	return rp->status;
2276 }
2277 
2278 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2279 				      struct sk_buff *skb)
2280 {
2281 	struct hci_ev_status *rp = data;
2282 	u8 *mode;
2283 
2284 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2285 
2286 	if (rp->status)
2287 		return rp->status;
2288 
2289 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2290 	if (mode)
2291 		hdev->ssp_debug_mode = *mode;
2292 
2293 	return rp->status;
2294 }
2295 
2296 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2297 {
2298 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2299 
2300 	if (status) {
2301 		hci_conn_check_pending(hdev);
2302 		return;
2303 	}
2304 
2305 	set_bit(HCI_INQUIRY, &hdev->flags);
2306 }
2307 
2308 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2309 {
2310 	struct hci_cp_create_conn *cp;
2311 	struct hci_conn *conn;
2312 
2313 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2314 
2315 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2316 	if (!cp)
2317 		return;
2318 
2319 	hci_dev_lock(hdev);
2320 
2321 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2322 
2323 	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2324 
2325 	if (status) {
2326 		if (conn && conn->state == BT_CONNECT) {
2327 			if (status != 0x0c || conn->attempt > 2) {
2328 				conn->state = BT_CLOSED;
2329 				hci_connect_cfm(conn, status);
2330 				hci_conn_del(conn);
2331 			} else
2332 				conn->state = BT_CONNECT2;
2333 		}
2334 	} else {
2335 		if (!conn) {
2336 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2337 					    HCI_ROLE_MASTER);
2338 			if (!conn)
2339 				bt_dev_err(hdev, "no memory for new connection");
2340 		}
2341 	}
2342 
2343 	hci_dev_unlock(hdev);
2344 }
2345 
2346 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2347 {
2348 	struct hci_cp_add_sco *cp;
2349 	struct hci_conn *acl;
2350 	struct hci_link *link;
2351 	__u16 handle;
2352 
2353 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2354 
2355 	if (!status)
2356 		return;
2357 
2358 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2359 	if (!cp)
2360 		return;
2361 
2362 	handle = __le16_to_cpu(cp->handle);
2363 
2364 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2365 
2366 	hci_dev_lock(hdev);
2367 
2368 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2369 	if (acl) {
2370 		link = list_first_entry_or_null(&acl->link_list,
2371 						struct hci_link, list);
2372 		if (link && link->conn) {
2373 			link->conn->state = BT_CLOSED;
2374 
2375 			hci_connect_cfm(link->conn, status);
2376 			hci_conn_del(link->conn);
2377 		}
2378 	}
2379 
2380 	hci_dev_unlock(hdev);
2381 }
2382 
2383 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2384 {
2385 	struct hci_cp_auth_requested *cp;
2386 	struct hci_conn *conn;
2387 
2388 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2389 
2390 	if (!status)
2391 		return;
2392 
2393 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2394 	if (!cp)
2395 		return;
2396 
2397 	hci_dev_lock(hdev);
2398 
2399 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2400 	if (conn) {
2401 		if (conn->state == BT_CONFIG) {
2402 			hci_connect_cfm(conn, status);
2403 			hci_conn_drop(conn);
2404 		}
2405 	}
2406 
2407 	hci_dev_unlock(hdev);
2408 }
2409 
2410 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2411 {
2412 	struct hci_cp_set_conn_encrypt *cp;
2413 	struct hci_conn *conn;
2414 
2415 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2416 
2417 	if (!status)
2418 		return;
2419 
2420 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2421 	if (!cp)
2422 		return;
2423 
2424 	hci_dev_lock(hdev);
2425 
2426 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2427 	if (conn) {
2428 		if (conn->state == BT_CONFIG) {
2429 			hci_connect_cfm(conn, status);
2430 			hci_conn_drop(conn);
2431 		}
2432 	}
2433 
2434 	hci_dev_unlock(hdev);
2435 }
2436 
2437 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2438 				    struct hci_conn *conn)
2439 {
2440 	if (conn->state != BT_CONFIG || !conn->out)
2441 		return 0;
2442 
2443 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2444 		return 0;
2445 
2446 	/* Only request authentication for SSP connections or non-SSP
2447 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2448 	 * is requested.
2449 	 */
2450 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2451 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2452 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2453 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2454 		return 0;
2455 
2456 	return 1;
2457 }
2458 
2459 static int hci_resolve_name(struct hci_dev *hdev,
2460 				   struct inquiry_entry *e)
2461 {
2462 	struct hci_cp_remote_name_req cp;
2463 
2464 	memset(&cp, 0, sizeof(cp));
2465 
2466 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2467 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2468 	cp.pscan_mode = e->data.pscan_mode;
2469 	cp.clock_offset = e->data.clock_offset;
2470 
2471 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2472 }
2473 
2474 static bool hci_resolve_next_name(struct hci_dev *hdev)
2475 {
2476 	struct discovery_state *discov = &hdev->discovery;
2477 	struct inquiry_entry *e;
2478 
2479 	if (list_empty(&discov->resolve))
2480 		return false;
2481 
2482 	/* We should stop if we already spent too much time resolving names. */
2483 	if (time_after(jiffies, discov->name_resolve_timeout)) {
2484 		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2485 		return false;
2486 	}
2487 
2488 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2489 	if (!e)
2490 		return false;
2491 
2492 	if (hci_resolve_name(hdev, e) == 0) {
2493 		e->name_state = NAME_PENDING;
2494 		return true;
2495 	}
2496 
2497 	return false;
2498 }
2499 
2500 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2501 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2502 {
2503 	struct discovery_state *discov = &hdev->discovery;
2504 	struct inquiry_entry *e;
2505 
2506 	/* Update the mgmt connected state if necessary. Be careful with
2507 	 * conn objects that exist but are not (yet) connected however.
2508 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2509 	 * considered connected.
2510 	 */
2511 	if (conn &&
2512 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2513 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2514 		mgmt_device_connected(hdev, conn, name, name_len);
2515 
2516 	if (discov->state == DISCOVERY_STOPPED)
2517 		return;
2518 
2519 	if (discov->state == DISCOVERY_STOPPING)
2520 		goto discov_complete;
2521 
2522 	if (discov->state != DISCOVERY_RESOLVING)
2523 		return;
2524 
2525 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2526 	/* If the device was not found in a list of found devices names of which
2527 	 * are pending. there is no need to continue resolving a next name as it
2528 	 * will be done upon receiving another Remote Name Request Complete
2529 	 * Event */
2530 	if (!e)
2531 		return;
2532 
2533 	list_del(&e->list);
2534 
2535 	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2536 	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2537 			 name, name_len);
2538 
2539 	if (hci_resolve_next_name(hdev))
2540 		return;
2541 
2542 discov_complete:
2543 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2544 }
2545 
2546 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2547 {
2548 	struct hci_cp_remote_name_req *cp;
2549 	struct hci_conn *conn;
2550 
2551 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2552 
2553 	/* If successful wait for the name req complete event before
2554 	 * checking for the need to do authentication */
2555 	if (!status)
2556 		return;
2557 
2558 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2559 	if (!cp)
2560 		return;
2561 
2562 	hci_dev_lock(hdev);
2563 
2564 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2565 
2566 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2567 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2568 
2569 	if (!conn)
2570 		goto unlock;
2571 
2572 	if (!hci_outgoing_auth_needed(hdev, conn))
2573 		goto unlock;
2574 
2575 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2576 		struct hci_cp_auth_requested auth_cp;
2577 
2578 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2579 
2580 		auth_cp.handle = __cpu_to_le16(conn->handle);
2581 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2582 			     sizeof(auth_cp), &auth_cp);
2583 	}
2584 
2585 unlock:
2586 	hci_dev_unlock(hdev);
2587 }
2588 
2589 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2590 {
2591 	struct hci_cp_read_remote_features *cp;
2592 	struct hci_conn *conn;
2593 
2594 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2595 
2596 	if (!status)
2597 		return;
2598 
2599 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2600 	if (!cp)
2601 		return;
2602 
2603 	hci_dev_lock(hdev);
2604 
2605 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2606 	if (conn) {
2607 		if (conn->state == BT_CONFIG) {
2608 			hci_connect_cfm(conn, status);
2609 			hci_conn_drop(conn);
2610 		}
2611 	}
2612 
2613 	hci_dev_unlock(hdev);
2614 }
2615 
2616 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2617 {
2618 	struct hci_cp_read_remote_ext_features *cp;
2619 	struct hci_conn *conn;
2620 
2621 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2622 
2623 	if (!status)
2624 		return;
2625 
2626 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2627 	if (!cp)
2628 		return;
2629 
2630 	hci_dev_lock(hdev);
2631 
2632 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2633 	if (conn) {
2634 		if (conn->state == BT_CONFIG) {
2635 			hci_connect_cfm(conn, status);
2636 			hci_conn_drop(conn);
2637 		}
2638 	}
2639 
2640 	hci_dev_unlock(hdev);
2641 }
2642 
2643 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2644 				       __u8 status)
2645 {
2646 	struct hci_conn *acl;
2647 	struct hci_link *link;
2648 
2649 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2650 
2651 	hci_dev_lock(hdev);
2652 
2653 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2654 	if (acl) {
2655 		link = list_first_entry_or_null(&acl->link_list,
2656 						struct hci_link, list);
2657 		if (link && link->conn) {
2658 			link->conn->state = BT_CLOSED;
2659 
2660 			hci_connect_cfm(link->conn, status);
2661 			hci_conn_del(link->conn);
2662 		}
2663 	}
2664 
2665 	hci_dev_unlock(hdev);
2666 }
2667 
2668 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2669 {
2670 	struct hci_cp_setup_sync_conn *cp;
2671 
2672 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2673 
2674 	if (!status)
2675 		return;
2676 
2677 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2678 	if (!cp)
2679 		return;
2680 
2681 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2682 }
2683 
2684 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2685 {
2686 	struct hci_cp_enhanced_setup_sync_conn *cp;
2687 
2688 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2689 
2690 	if (!status)
2691 		return;
2692 
2693 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2694 	if (!cp)
2695 		return;
2696 
2697 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2698 }
2699 
2700 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2701 {
2702 	struct hci_cp_sniff_mode *cp;
2703 	struct hci_conn *conn;
2704 
2705 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2706 
2707 	if (!status)
2708 		return;
2709 
2710 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2711 	if (!cp)
2712 		return;
2713 
2714 	hci_dev_lock(hdev);
2715 
2716 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2717 	if (conn) {
2718 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2719 
2720 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2721 			hci_sco_setup(conn, status);
2722 	}
2723 
2724 	hci_dev_unlock(hdev);
2725 }
2726 
2727 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2728 {
2729 	struct hci_cp_exit_sniff_mode *cp;
2730 	struct hci_conn *conn;
2731 
2732 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2733 
2734 	if (!status)
2735 		return;
2736 
2737 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2738 	if (!cp)
2739 		return;
2740 
2741 	hci_dev_lock(hdev);
2742 
2743 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2744 	if (conn) {
2745 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2746 
2747 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2748 			hci_sco_setup(conn, status);
2749 	}
2750 
2751 	hci_dev_unlock(hdev);
2752 }
2753 
2754 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2755 {
2756 	struct hci_cp_disconnect *cp;
2757 	struct hci_conn_params *params;
2758 	struct hci_conn *conn;
2759 	bool mgmt_conn;
2760 
2761 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2762 
2763 	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2764 	 * otherwise cleanup the connection immediately.
2765 	 */
2766 	if (!status && !hdev->suspended)
2767 		return;
2768 
2769 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2770 	if (!cp)
2771 		return;
2772 
2773 	hci_dev_lock(hdev);
2774 
2775 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2776 	if (!conn)
2777 		goto unlock;
2778 
2779 	if (status) {
2780 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2781 				       conn->dst_type, status);
2782 
2783 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2784 			hdev->cur_adv_instance = conn->adv_instance;
2785 			hci_enable_advertising(hdev);
2786 		}
2787 
2788 		/* Inform sockets conn is gone before we delete it */
2789 		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2790 
2791 		goto done;
2792 	}
2793 
2794 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2795 
2796 	if (conn->type == ACL_LINK) {
2797 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2798 			hci_remove_link_key(hdev, &conn->dst);
2799 	}
2800 
2801 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2802 	if (params) {
2803 		switch (params->auto_connect) {
2804 		case HCI_AUTO_CONN_LINK_LOSS:
2805 			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2806 				break;
2807 			fallthrough;
2808 
2809 		case HCI_AUTO_CONN_DIRECT:
2810 		case HCI_AUTO_CONN_ALWAYS:
2811 			hci_pend_le_list_del_init(params);
2812 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2813 			break;
2814 
2815 		default:
2816 			break;
2817 		}
2818 	}
2819 
2820 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2821 				 cp->reason, mgmt_conn);
2822 
2823 	hci_disconn_cfm(conn, cp->reason);
2824 
2825 done:
2826 	/* If the disconnection failed for any reason, the upper layer
2827 	 * does not retry to disconnect in current implementation.
2828 	 * Hence, we need to do some basic cleanup here and re-enable
2829 	 * advertising if necessary.
2830 	 */
2831 	hci_conn_del(conn);
2832 unlock:
2833 	hci_dev_unlock(hdev);
2834 }
2835 
2836 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2837 {
2838 	/* When using controller based address resolution, then the new
2839 	 * address types 0x02 and 0x03 are used. These types need to be
2840 	 * converted back into either public address or random address type
2841 	 */
2842 	switch (type) {
2843 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2844 		if (resolved)
2845 			*resolved = true;
2846 		return ADDR_LE_DEV_PUBLIC;
2847 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2848 		if (resolved)
2849 			*resolved = true;
2850 		return ADDR_LE_DEV_RANDOM;
2851 	}
2852 
2853 	if (resolved)
2854 		*resolved = false;
2855 	return type;
2856 }
2857 
2858 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2859 			      u8 peer_addr_type, u8 own_address_type,
2860 			      u8 filter_policy)
2861 {
2862 	struct hci_conn *conn;
2863 
2864 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2865 				       peer_addr_type);
2866 	if (!conn)
2867 		return;
2868 
2869 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2870 
2871 	/* Store the initiator and responder address information which
2872 	 * is needed for SMP. These values will not change during the
2873 	 * lifetime of the connection.
2874 	 */
2875 	conn->init_addr_type = own_address_type;
2876 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2877 		bacpy(&conn->init_addr, &hdev->random_addr);
2878 	else
2879 		bacpy(&conn->init_addr, &hdev->bdaddr);
2880 
2881 	conn->resp_addr_type = peer_addr_type;
2882 	bacpy(&conn->resp_addr, peer_addr);
2883 }
2884 
2885 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2886 {
2887 	struct hci_cp_le_create_conn *cp;
2888 
2889 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2890 
2891 	/* All connection failure handling is taken care of by the
2892 	 * hci_conn_failed function which is triggered by the HCI
2893 	 * request completion callbacks used for connecting.
2894 	 */
2895 	if (status)
2896 		return;
2897 
2898 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2899 	if (!cp)
2900 		return;
2901 
2902 	hci_dev_lock(hdev);
2903 
2904 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2905 			  cp->own_address_type, cp->filter_policy);
2906 
2907 	hci_dev_unlock(hdev);
2908 }
2909 
2910 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2911 {
2912 	struct hci_cp_le_ext_create_conn *cp;
2913 
2914 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2915 
2916 	/* All connection failure handling is taken care of by the
2917 	 * hci_conn_failed function which is triggered by the HCI
2918 	 * request completion callbacks used for connecting.
2919 	 */
2920 	if (status)
2921 		return;
2922 
2923 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2924 	if (!cp)
2925 		return;
2926 
2927 	hci_dev_lock(hdev);
2928 
2929 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2930 			  cp->own_addr_type, cp->filter_policy);
2931 
2932 	hci_dev_unlock(hdev);
2933 }
2934 
2935 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2936 {
2937 	struct hci_cp_le_read_remote_features *cp;
2938 	struct hci_conn *conn;
2939 
2940 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2941 
2942 	if (!status)
2943 		return;
2944 
2945 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2946 	if (!cp)
2947 		return;
2948 
2949 	hci_dev_lock(hdev);
2950 
2951 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2952 	if (conn) {
2953 		if (conn->state == BT_CONFIG) {
2954 			hci_connect_cfm(conn, status);
2955 			hci_conn_drop(conn);
2956 		}
2957 	}
2958 
2959 	hci_dev_unlock(hdev);
2960 }
2961 
2962 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2963 {
2964 	struct hci_cp_le_start_enc *cp;
2965 	struct hci_conn *conn;
2966 
2967 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2968 
2969 	if (!status)
2970 		return;
2971 
2972 	hci_dev_lock(hdev);
2973 
2974 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2975 	if (!cp)
2976 		goto unlock;
2977 
2978 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2979 	if (!conn)
2980 		goto unlock;
2981 
2982 	if (conn->state != BT_CONNECTED)
2983 		goto unlock;
2984 
2985 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2986 	hci_conn_drop(conn);
2987 
2988 unlock:
2989 	hci_dev_unlock(hdev);
2990 }
2991 
2992 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2993 {
2994 	struct hci_cp_switch_role *cp;
2995 	struct hci_conn *conn;
2996 
2997 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2998 
2999 	if (!status)
3000 		return;
3001 
3002 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3003 	if (!cp)
3004 		return;
3005 
3006 	hci_dev_lock(hdev);
3007 
3008 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3009 	if (conn)
3010 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3011 
3012 	hci_dev_unlock(hdev);
3013 }
3014 
3015 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3016 				     struct sk_buff *skb)
3017 {
3018 	struct hci_ev_status *ev = data;
3019 	struct discovery_state *discov = &hdev->discovery;
3020 	struct inquiry_entry *e;
3021 
3022 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3023 
3024 	hci_conn_check_pending(hdev);
3025 
3026 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3027 		return;
3028 
3029 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3030 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
3031 
3032 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3033 		return;
3034 
3035 	hci_dev_lock(hdev);
3036 
3037 	if (discov->state != DISCOVERY_FINDING)
3038 		goto unlock;
3039 
3040 	if (list_empty(&discov->resolve)) {
3041 		/* When BR/EDR inquiry is active and no LE scanning is in
3042 		 * progress, then change discovery state to indicate completion.
3043 		 *
3044 		 * When running LE scanning and BR/EDR inquiry simultaneously
3045 		 * and the LE scan already finished, then change the discovery
3046 		 * state to indicate completion.
3047 		 */
3048 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3049 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3050 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3051 		goto unlock;
3052 	}
3053 
3054 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3055 	if (e && hci_resolve_name(hdev, e) == 0) {
3056 		e->name_state = NAME_PENDING;
3057 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3058 		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3059 	} else {
3060 		/* When BR/EDR inquiry is active and no LE scanning is in
3061 		 * progress, then change discovery state to indicate completion.
3062 		 *
3063 		 * When running LE scanning and BR/EDR inquiry simultaneously
3064 		 * and the LE scan already finished, then change the discovery
3065 		 * state to indicate completion.
3066 		 */
3067 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3068 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3069 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3070 	}
3071 
3072 unlock:
3073 	hci_dev_unlock(hdev);
3074 }
3075 
3076 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3077 				   struct sk_buff *skb)
3078 {
3079 	struct hci_ev_inquiry_result *ev = edata;
3080 	struct inquiry_data data;
3081 	int i;
3082 
3083 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3084 			     flex_array_size(ev, info, ev->num)))
3085 		return;
3086 
3087 	bt_dev_dbg(hdev, "num %d", ev->num);
3088 
3089 	if (!ev->num)
3090 		return;
3091 
3092 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3093 		return;
3094 
3095 	hci_dev_lock(hdev);
3096 
3097 	for (i = 0; i < ev->num; i++) {
3098 		struct inquiry_info *info = &ev->info[i];
3099 		u32 flags;
3100 
3101 		bacpy(&data.bdaddr, &info->bdaddr);
3102 		data.pscan_rep_mode	= info->pscan_rep_mode;
3103 		data.pscan_period_mode	= info->pscan_period_mode;
3104 		data.pscan_mode		= info->pscan_mode;
3105 		memcpy(data.dev_class, info->dev_class, 3);
3106 		data.clock_offset	= info->clock_offset;
3107 		data.rssi		= HCI_RSSI_INVALID;
3108 		data.ssp_mode		= 0x00;
3109 
3110 		flags = hci_inquiry_cache_update(hdev, &data, false);
3111 
3112 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3113 				  info->dev_class, HCI_RSSI_INVALID,
3114 				  flags, NULL, 0, NULL, 0, 0);
3115 	}
3116 
3117 	hci_dev_unlock(hdev);
3118 }
3119 
3120 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3121 				  struct sk_buff *skb)
3122 {
3123 	struct hci_ev_conn_complete *ev = data;
3124 	struct hci_conn *conn;
3125 	u8 status = ev->status;
3126 
3127 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3128 
3129 	hci_dev_lock(hdev);
3130 
3131 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3132 	if (!conn) {
3133 		/* In case of error status and there is no connection pending
3134 		 * just unlock as there is nothing to cleanup.
3135 		 */
3136 		if (ev->status)
3137 			goto unlock;
3138 
3139 		/* Connection may not exist if auto-connected. Check the bredr
3140 		 * allowlist to see if this device is allowed to auto connect.
3141 		 * If link is an ACL type, create a connection class
3142 		 * automatically.
3143 		 *
3144 		 * Auto-connect will only occur if the event filter is
3145 		 * programmed with a given address. Right now, event filter is
3146 		 * only used during suspend.
3147 		 */
3148 		if (ev->link_type == ACL_LINK &&
3149 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3150 						      &ev->bdaddr,
3151 						      BDADDR_BREDR)) {
3152 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3153 					    HCI_ROLE_SLAVE);
3154 			if (!conn) {
3155 				bt_dev_err(hdev, "no memory for new conn");
3156 				goto unlock;
3157 			}
3158 		} else {
3159 			if (ev->link_type != SCO_LINK)
3160 				goto unlock;
3161 
3162 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3163 						       &ev->bdaddr);
3164 			if (!conn)
3165 				goto unlock;
3166 
3167 			conn->type = SCO_LINK;
3168 		}
3169 	}
3170 
3171 	/* The HCI_Connection_Complete event is only sent once per connection.
3172 	 * Processing it more than once per connection can corrupt kernel memory.
3173 	 *
3174 	 * As the connection handle is set here for the first time, it indicates
3175 	 * whether the connection is already set up.
3176 	 */
3177 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3178 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3179 		goto unlock;
3180 	}
3181 
3182 	if (!status) {
3183 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3184 		if (status)
3185 			goto done;
3186 
3187 		if (conn->type == ACL_LINK) {
3188 			conn->state = BT_CONFIG;
3189 			hci_conn_hold(conn);
3190 
3191 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3192 			    !hci_find_link_key(hdev, &ev->bdaddr))
3193 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3194 			else
3195 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3196 		} else
3197 			conn->state = BT_CONNECTED;
3198 
3199 		hci_debugfs_create_conn(conn);
3200 		hci_conn_add_sysfs(conn);
3201 
3202 		if (test_bit(HCI_AUTH, &hdev->flags))
3203 			set_bit(HCI_CONN_AUTH, &conn->flags);
3204 
3205 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3206 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3207 
3208 		/* Get remote features */
3209 		if (conn->type == ACL_LINK) {
3210 			struct hci_cp_read_remote_features cp;
3211 			cp.handle = ev->handle;
3212 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3213 				     sizeof(cp), &cp);
3214 
3215 			hci_update_scan(hdev);
3216 		}
3217 
3218 		/* Set packet type for incoming connection */
3219 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3220 			struct hci_cp_change_conn_ptype cp;
3221 			cp.handle = ev->handle;
3222 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3223 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3224 				     &cp);
3225 		}
3226 	}
3227 
3228 	if (conn->type == ACL_LINK)
3229 		hci_sco_setup(conn, ev->status);
3230 
3231 done:
3232 	if (status) {
3233 		hci_conn_failed(conn, status);
3234 	} else if (ev->link_type == SCO_LINK) {
3235 		switch (conn->setting & SCO_AIRMODE_MASK) {
3236 		case SCO_AIRMODE_CVSD:
3237 			if (hdev->notify)
3238 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3239 			break;
3240 		}
3241 
3242 		hci_connect_cfm(conn, status);
3243 	}
3244 
3245 unlock:
3246 	hci_dev_unlock(hdev);
3247 
3248 	hci_conn_check_pending(hdev);
3249 }
3250 
3251 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3252 {
3253 	struct hci_cp_reject_conn_req cp;
3254 
3255 	bacpy(&cp.bdaddr, bdaddr);
3256 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3257 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3258 }
3259 
3260 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3261 				 struct sk_buff *skb)
3262 {
3263 	struct hci_ev_conn_request *ev = data;
3264 	int mask = hdev->link_mode;
3265 	struct inquiry_entry *ie;
3266 	struct hci_conn *conn;
3267 	__u8 flags = 0;
3268 
3269 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3270 
3271 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3272 				      &flags);
3273 
3274 	if (!(mask & HCI_LM_ACCEPT)) {
3275 		hci_reject_conn(hdev, &ev->bdaddr);
3276 		return;
3277 	}
3278 
3279 	hci_dev_lock(hdev);
3280 
3281 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3282 				   BDADDR_BREDR)) {
3283 		hci_reject_conn(hdev, &ev->bdaddr);
3284 		goto unlock;
3285 	}
3286 
3287 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3288 	 * connection. These features are only touched through mgmt so
3289 	 * only do the checks if HCI_MGMT is set.
3290 	 */
3291 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3292 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3293 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3294 					       BDADDR_BREDR)) {
3295 		hci_reject_conn(hdev, &ev->bdaddr);
3296 		goto unlock;
3297 	}
3298 
3299 	/* Connection accepted */
3300 
3301 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3302 	if (ie)
3303 		memcpy(ie->data.dev_class, ev->dev_class, 3);
3304 
3305 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3306 			&ev->bdaddr);
3307 	if (!conn) {
3308 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3309 				    HCI_ROLE_SLAVE);
3310 		if (!conn) {
3311 			bt_dev_err(hdev, "no memory for new connection");
3312 			goto unlock;
3313 		}
3314 	}
3315 
3316 	memcpy(conn->dev_class, ev->dev_class, 3);
3317 
3318 	hci_dev_unlock(hdev);
3319 
3320 	if (ev->link_type == ACL_LINK ||
3321 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3322 		struct hci_cp_accept_conn_req cp;
3323 		conn->state = BT_CONNECT;
3324 
3325 		bacpy(&cp.bdaddr, &ev->bdaddr);
3326 
3327 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3328 			cp.role = 0x00; /* Become central */
3329 		else
3330 			cp.role = 0x01; /* Remain peripheral */
3331 
3332 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3333 	} else if (!(flags & HCI_PROTO_DEFER)) {
3334 		struct hci_cp_accept_sync_conn_req cp;
3335 		conn->state = BT_CONNECT;
3336 
3337 		bacpy(&cp.bdaddr, &ev->bdaddr);
3338 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3339 
3340 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3341 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3342 		cp.max_latency    = cpu_to_le16(0xffff);
3343 		cp.content_format = cpu_to_le16(hdev->voice_setting);
3344 		cp.retrans_effort = 0xff;
3345 
3346 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3347 			     &cp);
3348 	} else {
3349 		conn->state = BT_CONNECT2;
3350 		hci_connect_cfm(conn, 0);
3351 	}
3352 
3353 	return;
3354 unlock:
3355 	hci_dev_unlock(hdev);
3356 }
3357 
3358 static u8 hci_to_mgmt_reason(u8 err)
3359 {
3360 	switch (err) {
3361 	case HCI_ERROR_CONNECTION_TIMEOUT:
3362 		return MGMT_DEV_DISCONN_TIMEOUT;
3363 	case HCI_ERROR_REMOTE_USER_TERM:
3364 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3365 	case HCI_ERROR_REMOTE_POWER_OFF:
3366 		return MGMT_DEV_DISCONN_REMOTE;
3367 	case HCI_ERROR_LOCAL_HOST_TERM:
3368 		return MGMT_DEV_DISCONN_LOCAL_HOST;
3369 	default:
3370 		return MGMT_DEV_DISCONN_UNKNOWN;
3371 	}
3372 }
3373 
3374 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3375 				     struct sk_buff *skb)
3376 {
3377 	struct hci_ev_disconn_complete *ev = data;
3378 	u8 reason;
3379 	struct hci_conn_params *params;
3380 	struct hci_conn *conn;
3381 	bool mgmt_connected;
3382 
3383 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3384 
3385 	hci_dev_lock(hdev);
3386 
3387 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3388 	if (!conn)
3389 		goto unlock;
3390 
3391 	if (ev->status) {
3392 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3393 				       conn->dst_type, ev->status);
3394 		goto unlock;
3395 	}
3396 
3397 	conn->state = BT_CLOSED;
3398 
3399 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3400 
3401 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3402 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3403 	else
3404 		reason = hci_to_mgmt_reason(ev->reason);
3405 
3406 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3407 				reason, mgmt_connected);
3408 
3409 	if (conn->type == ACL_LINK) {
3410 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3411 			hci_remove_link_key(hdev, &conn->dst);
3412 
3413 		hci_update_scan(hdev);
3414 	}
3415 
3416 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3417 	if (params) {
3418 		switch (params->auto_connect) {
3419 		case HCI_AUTO_CONN_LINK_LOSS:
3420 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3421 				break;
3422 			fallthrough;
3423 
3424 		case HCI_AUTO_CONN_DIRECT:
3425 		case HCI_AUTO_CONN_ALWAYS:
3426 			hci_pend_le_list_del_init(params);
3427 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
3428 			hci_update_passive_scan(hdev);
3429 			break;
3430 
3431 		default:
3432 			break;
3433 		}
3434 	}
3435 
3436 	hci_disconn_cfm(conn, ev->reason);
3437 
3438 	/* Re-enable advertising if necessary, since it might
3439 	 * have been disabled by the connection. From the
3440 	 * HCI_LE_Set_Advertise_Enable command description in
3441 	 * the core specification (v4.0):
3442 	 * "The Controller shall continue advertising until the Host
3443 	 * issues an LE_Set_Advertise_Enable command with
3444 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3445 	 * or until a connection is created or until the Advertising
3446 	 * is timed out due to Directed Advertising."
3447 	 */
3448 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3449 		hdev->cur_adv_instance = conn->adv_instance;
3450 		hci_enable_advertising(hdev);
3451 	}
3452 
3453 	hci_conn_del(conn);
3454 
3455 unlock:
3456 	hci_dev_unlock(hdev);
3457 }
3458 
3459 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3460 				  struct sk_buff *skb)
3461 {
3462 	struct hci_ev_auth_complete *ev = data;
3463 	struct hci_conn *conn;
3464 
3465 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3466 
3467 	hci_dev_lock(hdev);
3468 
3469 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3470 	if (!conn)
3471 		goto unlock;
3472 
3473 	if (!ev->status) {
3474 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3475 
3476 		if (!hci_conn_ssp_enabled(conn) &&
3477 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3478 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3479 		} else {
3480 			set_bit(HCI_CONN_AUTH, &conn->flags);
3481 			conn->sec_level = conn->pending_sec_level;
3482 		}
3483 	} else {
3484 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3485 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3486 
3487 		mgmt_auth_failed(conn, ev->status);
3488 	}
3489 
3490 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3491 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3492 
3493 	if (conn->state == BT_CONFIG) {
3494 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3495 			struct hci_cp_set_conn_encrypt cp;
3496 			cp.handle  = ev->handle;
3497 			cp.encrypt = 0x01;
3498 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3499 				     &cp);
3500 		} else {
3501 			conn->state = BT_CONNECTED;
3502 			hci_connect_cfm(conn, ev->status);
3503 			hci_conn_drop(conn);
3504 		}
3505 	} else {
3506 		hci_auth_cfm(conn, ev->status);
3507 
3508 		hci_conn_hold(conn);
3509 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3510 		hci_conn_drop(conn);
3511 	}
3512 
3513 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3514 		if (!ev->status) {
3515 			struct hci_cp_set_conn_encrypt cp;
3516 			cp.handle  = ev->handle;
3517 			cp.encrypt = 0x01;
3518 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3519 				     &cp);
3520 		} else {
3521 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3522 			hci_encrypt_cfm(conn, ev->status);
3523 		}
3524 	}
3525 
3526 unlock:
3527 	hci_dev_unlock(hdev);
3528 }
3529 
3530 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3531 				struct sk_buff *skb)
3532 {
3533 	struct hci_ev_remote_name *ev = data;
3534 	struct hci_conn *conn;
3535 
3536 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3537 
3538 	hci_conn_check_pending(hdev);
3539 
3540 	hci_dev_lock(hdev);
3541 
3542 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3543 
3544 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3545 		goto check_auth;
3546 
3547 	if (ev->status == 0)
3548 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3549 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3550 	else
3551 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3552 
3553 check_auth:
3554 	if (!conn)
3555 		goto unlock;
3556 
3557 	if (!hci_outgoing_auth_needed(hdev, conn))
3558 		goto unlock;
3559 
3560 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3561 		struct hci_cp_auth_requested cp;
3562 
3563 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3564 
3565 		cp.handle = __cpu_to_le16(conn->handle);
3566 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3567 	}
3568 
3569 unlock:
3570 	hci_dev_unlock(hdev);
3571 }
3572 
3573 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3574 				   struct sk_buff *skb)
3575 {
3576 	struct hci_ev_encrypt_change *ev = data;
3577 	struct hci_conn *conn;
3578 
3579 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3580 
3581 	hci_dev_lock(hdev);
3582 
3583 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3584 	if (!conn)
3585 		goto unlock;
3586 
3587 	if (!ev->status) {
3588 		if (ev->encrypt) {
3589 			/* Encryption implies authentication */
3590 			set_bit(HCI_CONN_AUTH, &conn->flags);
3591 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3592 			conn->sec_level = conn->pending_sec_level;
3593 
3594 			/* P-256 authentication key implies FIPS */
3595 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3596 				set_bit(HCI_CONN_FIPS, &conn->flags);
3597 
3598 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3599 			    conn->type == LE_LINK)
3600 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3601 		} else {
3602 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3603 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3604 		}
3605 	}
3606 
3607 	/* We should disregard the current RPA and generate a new one
3608 	 * whenever the encryption procedure fails.
3609 	 */
3610 	if (ev->status && conn->type == LE_LINK) {
3611 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3612 		hci_adv_instances_set_rpa_expired(hdev, true);
3613 	}
3614 
3615 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3616 
3617 	/* Check link security requirements are met */
3618 	if (!hci_conn_check_link_mode(conn))
3619 		ev->status = HCI_ERROR_AUTH_FAILURE;
3620 
3621 	if (ev->status && conn->state == BT_CONNECTED) {
3622 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3623 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3624 
3625 		/* Notify upper layers so they can cleanup before
3626 		 * disconnecting.
3627 		 */
3628 		hci_encrypt_cfm(conn, ev->status);
3629 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3630 		hci_conn_drop(conn);
3631 		goto unlock;
3632 	}
3633 
3634 	/* Try reading the encryption key size for encrypted ACL links */
3635 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3636 		struct hci_cp_read_enc_key_size cp;
3637 
3638 		/* Only send HCI_Read_Encryption_Key_Size if the
3639 		 * controller really supports it. If it doesn't, assume
3640 		 * the default size (16).
3641 		 */
3642 		if (!(hdev->commands[20] & 0x10)) {
3643 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3644 			goto notify;
3645 		}
3646 
3647 		cp.handle = cpu_to_le16(conn->handle);
3648 		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3649 				 sizeof(cp), &cp)) {
3650 			bt_dev_err(hdev, "sending read key size failed");
3651 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3652 			goto notify;
3653 		}
3654 
3655 		goto unlock;
3656 	}
3657 
3658 	/* Set the default Authenticated Payload Timeout after
3659 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3660 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3661 	 * sent when the link is active and Encryption is enabled, the conn
3662 	 * type can be either LE or ACL and controller must support LMP Ping.
3663 	 * Ensure for AES-CCM encryption as well.
3664 	 */
3665 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3666 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3667 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3668 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3669 		struct hci_cp_write_auth_payload_to cp;
3670 
3671 		cp.handle = cpu_to_le16(conn->handle);
3672 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3673 		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3674 				 sizeof(cp), &cp)) {
3675 			bt_dev_err(hdev, "write auth payload timeout failed");
3676 			goto notify;
3677 		}
3678 
3679 		goto unlock;
3680 	}
3681 
3682 notify:
3683 	hci_encrypt_cfm(conn, ev->status);
3684 
3685 unlock:
3686 	hci_dev_unlock(hdev);
3687 }
3688 
3689 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3690 					     struct sk_buff *skb)
3691 {
3692 	struct hci_ev_change_link_key_complete *ev = data;
3693 	struct hci_conn *conn;
3694 
3695 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3696 
3697 	hci_dev_lock(hdev);
3698 
3699 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3700 	if (conn) {
3701 		if (!ev->status)
3702 			set_bit(HCI_CONN_SECURE, &conn->flags);
3703 
3704 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3705 
3706 		hci_key_change_cfm(conn, ev->status);
3707 	}
3708 
3709 	hci_dev_unlock(hdev);
3710 }
3711 
3712 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3713 				    struct sk_buff *skb)
3714 {
3715 	struct hci_ev_remote_features *ev = data;
3716 	struct hci_conn *conn;
3717 
3718 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3719 
3720 	hci_dev_lock(hdev);
3721 
3722 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3723 	if (!conn)
3724 		goto unlock;
3725 
3726 	if (!ev->status)
3727 		memcpy(conn->features[0], ev->features, 8);
3728 
3729 	if (conn->state != BT_CONFIG)
3730 		goto unlock;
3731 
3732 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3733 	    lmp_ext_feat_capable(conn)) {
3734 		struct hci_cp_read_remote_ext_features cp;
3735 		cp.handle = ev->handle;
3736 		cp.page = 0x01;
3737 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3738 			     sizeof(cp), &cp);
3739 		goto unlock;
3740 	}
3741 
3742 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3743 		struct hci_cp_remote_name_req cp;
3744 		memset(&cp, 0, sizeof(cp));
3745 		bacpy(&cp.bdaddr, &conn->dst);
3746 		cp.pscan_rep_mode = 0x02;
3747 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3748 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3749 		mgmt_device_connected(hdev, conn, NULL, 0);
3750 
3751 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3752 		conn->state = BT_CONNECTED;
3753 		hci_connect_cfm(conn, ev->status);
3754 		hci_conn_drop(conn);
3755 	}
3756 
3757 unlock:
3758 	hci_dev_unlock(hdev);
3759 }
3760 
3761 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3762 {
3763 	cancel_delayed_work(&hdev->cmd_timer);
3764 
3765 	rcu_read_lock();
3766 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3767 		if (ncmd) {
3768 			cancel_delayed_work(&hdev->ncmd_timer);
3769 			atomic_set(&hdev->cmd_cnt, 1);
3770 		} else {
3771 			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3772 				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3773 						   HCI_NCMD_TIMEOUT);
3774 		}
3775 	}
3776 	rcu_read_unlock();
3777 }
3778 
3779 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3780 					struct sk_buff *skb)
3781 {
3782 	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3783 
3784 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3785 
3786 	if (rp->status)
3787 		return rp->status;
3788 
3789 	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3790 	hdev->le_pkts  = rp->acl_max_pkt;
3791 	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3792 	hdev->iso_pkts = rp->iso_max_pkt;
3793 
3794 	hdev->le_cnt  = hdev->le_pkts;
3795 	hdev->iso_cnt = hdev->iso_pkts;
3796 
3797 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3798 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3799 
3800 	return rp->status;
3801 }
3802 
3803 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3804 {
3805 	struct hci_conn *conn, *tmp;
3806 
3807 	lockdep_assert_held(&hdev->lock);
3808 
3809 	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3810 		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3811 		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3812 			continue;
3813 
3814 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3815 			hci_conn_failed(conn, status);
3816 	}
3817 }
3818 
3819 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3820 				   struct sk_buff *skb)
3821 {
3822 	struct hci_rp_le_set_cig_params *rp = data;
3823 	struct hci_cp_le_set_cig_params *cp;
3824 	struct hci_conn *conn;
3825 	u8 status = rp->status;
3826 	bool pending = false;
3827 	int i;
3828 
3829 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3830 
3831 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3832 	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3833 			    rp->cig_id != cp->cig_id)) {
3834 		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3835 		status = HCI_ERROR_UNSPECIFIED;
3836 	}
3837 
3838 	hci_dev_lock(hdev);
3839 
3840 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3841 	 *
3842 	 * If the Status return parameter is non-zero, then the state of the CIG
3843 	 * and its CIS configurations shall not be changed by the command. If
3844 	 * the CIG did not already exist, it shall not be created.
3845 	 */
3846 	if (status) {
3847 		/* Keep current configuration, fail only the unbound CIS */
3848 		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3849 		goto unlock;
3850 	}
3851 
3852 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3853 	 *
3854 	 * If the Status return parameter is zero, then the Controller shall
3855 	 * set the Connection_Handle arrayed return parameter to the connection
3856 	 * handle(s) corresponding to the CIS configurations specified in
3857 	 * the CIS_IDs command parameter, in the same order.
3858 	 */
3859 	for (i = 0; i < rp->num_handles; ++i) {
3860 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3861 						cp->cis[i].cis_id);
3862 		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3863 			continue;
3864 
3865 		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3866 			continue;
3867 
3868 		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3869 			continue;
3870 
3871 		if (conn->state == BT_CONNECT)
3872 			pending = true;
3873 	}
3874 
3875 unlock:
3876 	if (pending)
3877 		hci_le_create_cis_pending(hdev);
3878 
3879 	hci_dev_unlock(hdev);
3880 
3881 	return rp->status;
3882 }
3883 
3884 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3885 				   struct sk_buff *skb)
3886 {
3887 	struct hci_rp_le_setup_iso_path *rp = data;
3888 	struct hci_cp_le_setup_iso_path *cp;
3889 	struct hci_conn *conn;
3890 
3891 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3892 
3893 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3894 	if (!cp)
3895 		return rp->status;
3896 
3897 	hci_dev_lock(hdev);
3898 
3899 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3900 	if (!conn)
3901 		goto unlock;
3902 
3903 	if (rp->status) {
3904 		hci_connect_cfm(conn, rp->status);
3905 		hci_conn_del(conn);
3906 		goto unlock;
3907 	}
3908 
3909 	switch (cp->direction) {
3910 	/* Input (Host to Controller) */
3911 	case 0x00:
3912 		/* Only confirm connection if output only */
3913 		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3914 			hci_connect_cfm(conn, rp->status);
3915 		break;
3916 	/* Output (Controller to Host) */
3917 	case 0x01:
3918 		/* Confirm connection since conn->iso_qos is always configured
3919 		 * last.
3920 		 */
3921 		hci_connect_cfm(conn, rp->status);
3922 		break;
3923 	}
3924 
3925 unlock:
3926 	hci_dev_unlock(hdev);
3927 	return rp->status;
3928 }
3929 
3930 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3931 {
3932 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3933 }
3934 
3935 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3936 				   struct sk_buff *skb)
3937 {
3938 	struct hci_ev_status *rp = data;
3939 	struct hci_cp_le_set_per_adv_params *cp;
3940 
3941 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3942 
3943 	if (rp->status)
3944 		return rp->status;
3945 
3946 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3947 	if (!cp)
3948 		return rp->status;
3949 
3950 	/* TODO: set the conn state */
3951 	return rp->status;
3952 }
3953 
3954 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3955 				       struct sk_buff *skb)
3956 {
3957 	struct hci_ev_status *rp = data;
3958 	struct hci_cp_le_set_per_adv_enable *cp;
3959 	struct adv_info *adv = NULL, *n;
3960 	u8 per_adv_cnt = 0;
3961 
3962 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3963 
3964 	if (rp->status)
3965 		return rp->status;
3966 
3967 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3968 	if (!cp)
3969 		return rp->status;
3970 
3971 	hci_dev_lock(hdev);
3972 
3973 	adv = hci_find_adv_instance(hdev, cp->handle);
3974 
3975 	if (cp->enable) {
3976 		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3977 
3978 		if (adv)
3979 			adv->enabled = true;
3980 	} else {
3981 		/* If just one instance was disabled check if there are
3982 		 * any other instance enabled before clearing HCI_LE_PER_ADV.
3983 		 * The current periodic adv instance will be marked as
3984 		 * disabled once extended advertising is also disabled.
3985 		 */
3986 		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3987 					 list) {
3988 			if (adv->periodic && adv->enabled)
3989 				per_adv_cnt++;
3990 		}
3991 
3992 		if (per_adv_cnt > 1)
3993 			goto unlock;
3994 
3995 		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3996 	}
3997 
3998 unlock:
3999 	hci_dev_unlock(hdev);
4000 
4001 	return rp->status;
4002 }
4003 
4004 #define HCI_CC_VL(_op, _func, _min, _max) \
4005 { \
4006 	.op = _op, \
4007 	.func = _func, \
4008 	.min_len = _min, \
4009 	.max_len = _max, \
4010 }
4011 
4012 #define HCI_CC(_op, _func, _len) \
4013 	HCI_CC_VL(_op, _func, _len, _len)
4014 
4015 #define HCI_CC_STATUS(_op, _func) \
4016 	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4017 
4018 static const struct hci_cc {
4019 	u16  op;
4020 	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4021 	u16  min_len;
4022 	u16  max_len;
4023 } hci_cc_table[] = {
4024 	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4025 	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4026 	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4027 	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4028 		      hci_cc_remote_name_req_cancel),
4029 	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4030 	       sizeof(struct hci_rp_role_discovery)),
4031 	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4032 	       sizeof(struct hci_rp_read_link_policy)),
4033 	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4034 	       sizeof(struct hci_rp_write_link_policy)),
4035 	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4036 	       sizeof(struct hci_rp_read_def_link_policy)),
4037 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4038 		      hci_cc_write_def_link_policy),
4039 	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4040 	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4041 	       sizeof(struct hci_rp_read_stored_link_key)),
4042 	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4043 	       sizeof(struct hci_rp_delete_stored_link_key)),
4044 	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4045 	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4046 	       sizeof(struct hci_rp_read_local_name)),
4047 	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4048 	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4049 	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4050 	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4051 	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4052 	       sizeof(struct hci_rp_read_class_of_dev)),
4053 	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4054 	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4055 	       sizeof(struct hci_rp_read_voice_setting)),
4056 	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4057 	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4058 	       sizeof(struct hci_rp_read_num_supported_iac)),
4059 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4060 	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4061 	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4062 	       sizeof(struct hci_rp_read_auth_payload_to)),
4063 	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4064 	       sizeof(struct hci_rp_write_auth_payload_to)),
4065 	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4066 	       sizeof(struct hci_rp_read_local_version)),
4067 	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4068 	       sizeof(struct hci_rp_read_local_commands)),
4069 	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4070 	       sizeof(struct hci_rp_read_local_features)),
4071 	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4072 	       sizeof(struct hci_rp_read_local_ext_features)),
4073 	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4074 	       sizeof(struct hci_rp_read_buffer_size)),
4075 	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4076 	       sizeof(struct hci_rp_read_bd_addr)),
4077 	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4078 	       sizeof(struct hci_rp_read_local_pairing_opts)),
4079 	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4080 	       sizeof(struct hci_rp_read_page_scan_activity)),
4081 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4082 		      hci_cc_write_page_scan_activity),
4083 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4084 	       sizeof(struct hci_rp_read_page_scan_type)),
4085 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4086 	HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4087 	       sizeof(struct hci_rp_read_data_block_size)),
4088 	HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4089 	       sizeof(struct hci_rp_read_flow_control_mode)),
4090 	HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4091 	       sizeof(struct hci_rp_read_local_amp_info)),
4092 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4093 	       sizeof(struct hci_rp_read_clock)),
4094 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4095 	       sizeof(struct hci_rp_read_enc_key_size)),
4096 	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4097 	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4098 	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4099 	       hci_cc_read_def_err_data_reporting,
4100 	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4101 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4102 		      hci_cc_write_def_err_data_reporting),
4103 	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4104 	       sizeof(struct hci_rp_pin_code_reply)),
4105 	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4106 	       sizeof(struct hci_rp_pin_code_neg_reply)),
4107 	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4108 	       sizeof(struct hci_rp_read_local_oob_data)),
4109 	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4110 	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4111 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4112 	       sizeof(struct hci_rp_le_read_buffer_size)),
4113 	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4114 	       sizeof(struct hci_rp_le_read_local_features)),
4115 	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4116 	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4117 	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4118 	       sizeof(struct hci_rp_user_confirm_reply)),
4119 	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4120 	       sizeof(struct hci_rp_user_confirm_reply)),
4121 	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4122 	       sizeof(struct hci_rp_user_confirm_reply)),
4123 	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4124 	       sizeof(struct hci_rp_user_confirm_reply)),
4125 	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4126 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4127 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4128 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4129 	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4130 	       hci_cc_le_read_accept_list_size,
4131 	       sizeof(struct hci_rp_le_read_accept_list_size)),
4132 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4133 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4134 		      hci_cc_le_add_to_accept_list),
4135 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4136 		      hci_cc_le_del_from_accept_list),
4137 	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4138 	       sizeof(struct hci_rp_le_read_supported_states)),
4139 	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4140 	       sizeof(struct hci_rp_le_read_def_data_len)),
4141 	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4142 		      hci_cc_le_write_def_data_len),
4143 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4144 		      hci_cc_le_add_to_resolv_list),
4145 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4146 		      hci_cc_le_del_from_resolv_list),
4147 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4148 		      hci_cc_le_clear_resolv_list),
4149 	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4150 	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4151 	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4152 		      hci_cc_le_set_addr_resolution_enable),
4153 	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4154 	       sizeof(struct hci_rp_le_read_max_data_len)),
4155 	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4156 		      hci_cc_write_le_host_supported),
4157 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4158 	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4159 	       sizeof(struct hci_rp_read_rssi)),
4160 	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4161 	       sizeof(struct hci_rp_read_tx_power)),
4162 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4163 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4164 		      hci_cc_le_set_ext_scan_param),
4165 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4166 		      hci_cc_le_set_ext_scan_enable),
4167 	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4168 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4169 	       hci_cc_le_read_num_adv_sets,
4170 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4171 	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4172 	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4173 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4174 		      hci_cc_le_set_ext_adv_enable),
4175 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4176 		      hci_cc_le_set_adv_set_random_addr),
4177 	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4178 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4179 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4180 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4181 		      hci_cc_le_set_per_adv_enable),
4182 	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4183 	       sizeof(struct hci_rp_le_read_transmit_power)),
4184 	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4185 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4186 	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4187 	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4188 		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4189 	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4190 	       sizeof(struct hci_rp_le_setup_iso_path)),
4191 };
4192 
4193 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4194 		      struct sk_buff *skb)
4195 {
4196 	void *data;
4197 
4198 	if (skb->len < cc->min_len) {
4199 		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4200 			   cc->op, skb->len, cc->min_len);
4201 		return HCI_ERROR_UNSPECIFIED;
4202 	}
4203 
4204 	/* Just warn if the length is over max_len size it still be possible to
4205 	 * partially parse the cc so leave to callback to decide if that is
4206 	 * acceptable.
4207 	 */
4208 	if (skb->len > cc->max_len)
4209 		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4210 			    cc->op, skb->len, cc->max_len);
4211 
4212 	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4213 	if (!data)
4214 		return HCI_ERROR_UNSPECIFIED;
4215 
4216 	return cc->func(hdev, data, skb);
4217 }
4218 
4219 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4220 				 struct sk_buff *skb, u16 *opcode, u8 *status,
4221 				 hci_req_complete_t *req_complete,
4222 				 hci_req_complete_skb_t *req_complete_skb)
4223 {
4224 	struct hci_ev_cmd_complete *ev = data;
4225 	int i;
4226 
4227 	*opcode = __le16_to_cpu(ev->opcode);
4228 
4229 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4230 
4231 	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4232 		if (hci_cc_table[i].op == *opcode) {
4233 			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4234 			break;
4235 		}
4236 	}
4237 
4238 	if (i == ARRAY_SIZE(hci_cc_table)) {
4239 		/* Unknown opcode, assume byte 0 contains the status, so
4240 		 * that e.g. __hci_cmd_sync() properly returns errors
4241 		 * for vendor specific commands send by HCI drivers.
4242 		 * If a vendor doesn't actually follow this convention we may
4243 		 * need to introduce a vendor CC table in order to properly set
4244 		 * the status.
4245 		 */
4246 		*status = skb->data[0];
4247 	}
4248 
4249 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4250 
4251 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4252 			     req_complete_skb);
4253 
4254 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4255 		bt_dev_err(hdev,
4256 			   "unexpected event for opcode 0x%4.4x", *opcode);
4257 		return;
4258 	}
4259 
4260 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4261 		queue_work(hdev->workqueue, &hdev->cmd_work);
4262 }
4263 
4264 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4265 {
4266 	struct hci_cp_le_create_cis *cp;
4267 	bool pending = false;
4268 	int i;
4269 
4270 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4271 
4272 	if (!status)
4273 		return;
4274 
4275 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4276 	if (!cp)
4277 		return;
4278 
4279 	hci_dev_lock(hdev);
4280 
4281 	/* Remove connection if command failed */
4282 	for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4283 		struct hci_conn *conn;
4284 		u16 handle;
4285 
4286 		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4287 
4288 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4289 		if (conn) {
4290 			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4291 					       &conn->flags))
4292 				pending = true;
4293 			conn->state = BT_CLOSED;
4294 			hci_connect_cfm(conn, status);
4295 			hci_conn_del(conn);
4296 		}
4297 	}
4298 
4299 	if (pending)
4300 		hci_le_create_cis_pending(hdev);
4301 
4302 	hci_dev_unlock(hdev);
4303 }
4304 
4305 #define HCI_CS(_op, _func) \
4306 { \
4307 	.op = _op, \
4308 	.func = _func, \
4309 }
4310 
4311 static const struct hci_cs {
4312 	u16  op;
4313 	void (*func)(struct hci_dev *hdev, __u8 status);
4314 } hci_cs_table[] = {
4315 	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4316 	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4317 	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4318 	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4319 	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4320 	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4321 	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4322 	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4323 	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4324 	       hci_cs_read_remote_ext_features),
4325 	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4326 	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4327 	       hci_cs_enhanced_setup_sync_conn),
4328 	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4329 	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4330 	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4331 	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4332 	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4333 	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4334 	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4335 	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4336 	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4337 };
4338 
4339 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4340 			       struct sk_buff *skb, u16 *opcode, u8 *status,
4341 			       hci_req_complete_t *req_complete,
4342 			       hci_req_complete_skb_t *req_complete_skb)
4343 {
4344 	struct hci_ev_cmd_status *ev = data;
4345 	int i;
4346 
4347 	*opcode = __le16_to_cpu(ev->opcode);
4348 	*status = ev->status;
4349 
4350 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4351 
4352 	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4353 		if (hci_cs_table[i].op == *opcode) {
4354 			hci_cs_table[i].func(hdev, ev->status);
4355 			break;
4356 		}
4357 	}
4358 
4359 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4360 
4361 	/* Indicate request completion if the command failed. Also, if
4362 	 * we're not waiting for a special event and we get a success
4363 	 * command status we should try to flag the request as completed
4364 	 * (since for this kind of commands there will not be a command
4365 	 * complete event).
4366 	 */
4367 	if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4368 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4369 				     req_complete_skb);
4370 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4371 			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4372 				   *opcode);
4373 			return;
4374 		}
4375 	}
4376 
4377 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4378 		queue_work(hdev->workqueue, &hdev->cmd_work);
4379 }
4380 
4381 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4382 				   struct sk_buff *skb)
4383 {
4384 	struct hci_ev_hardware_error *ev = data;
4385 
4386 	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4387 
4388 	hdev->hw_error_code = ev->code;
4389 
4390 	queue_work(hdev->req_workqueue, &hdev->error_reset);
4391 }
4392 
4393 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4394 				struct sk_buff *skb)
4395 {
4396 	struct hci_ev_role_change *ev = data;
4397 	struct hci_conn *conn;
4398 
4399 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4400 
4401 	hci_dev_lock(hdev);
4402 
4403 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4404 	if (conn) {
4405 		if (!ev->status)
4406 			conn->role = ev->role;
4407 
4408 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4409 
4410 		hci_role_switch_cfm(conn, ev->status, ev->role);
4411 	}
4412 
4413 	hci_dev_unlock(hdev);
4414 }
4415 
4416 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4417 				  struct sk_buff *skb)
4418 {
4419 	struct hci_ev_num_comp_pkts *ev = data;
4420 	int i;
4421 
4422 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4423 			     flex_array_size(ev, handles, ev->num)))
4424 		return;
4425 
4426 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4427 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4428 		return;
4429 	}
4430 
4431 	bt_dev_dbg(hdev, "num %d", ev->num);
4432 
4433 	for (i = 0; i < ev->num; i++) {
4434 		struct hci_comp_pkts_info *info = &ev->handles[i];
4435 		struct hci_conn *conn;
4436 		__u16  handle, count;
4437 
4438 		handle = __le16_to_cpu(info->handle);
4439 		count  = __le16_to_cpu(info->count);
4440 
4441 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4442 		if (!conn)
4443 			continue;
4444 
4445 		conn->sent -= count;
4446 
4447 		switch (conn->type) {
4448 		case ACL_LINK:
4449 			hdev->acl_cnt += count;
4450 			if (hdev->acl_cnt > hdev->acl_pkts)
4451 				hdev->acl_cnt = hdev->acl_pkts;
4452 			break;
4453 
4454 		case LE_LINK:
4455 			if (hdev->le_pkts) {
4456 				hdev->le_cnt += count;
4457 				if (hdev->le_cnt > hdev->le_pkts)
4458 					hdev->le_cnt = hdev->le_pkts;
4459 			} else {
4460 				hdev->acl_cnt += count;
4461 				if (hdev->acl_cnt > hdev->acl_pkts)
4462 					hdev->acl_cnt = hdev->acl_pkts;
4463 			}
4464 			break;
4465 
4466 		case SCO_LINK:
4467 			hdev->sco_cnt += count;
4468 			if (hdev->sco_cnt > hdev->sco_pkts)
4469 				hdev->sco_cnt = hdev->sco_pkts;
4470 			break;
4471 
4472 		case ISO_LINK:
4473 			if (hdev->iso_pkts) {
4474 				hdev->iso_cnt += count;
4475 				if (hdev->iso_cnt > hdev->iso_pkts)
4476 					hdev->iso_cnt = hdev->iso_pkts;
4477 			} else if (hdev->le_pkts) {
4478 				hdev->le_cnt += count;
4479 				if (hdev->le_cnt > hdev->le_pkts)
4480 					hdev->le_cnt = hdev->le_pkts;
4481 			} else {
4482 				hdev->acl_cnt += count;
4483 				if (hdev->acl_cnt > hdev->acl_pkts)
4484 					hdev->acl_cnt = hdev->acl_pkts;
4485 			}
4486 			break;
4487 
4488 		default:
4489 			bt_dev_err(hdev, "unknown type %d conn %p",
4490 				   conn->type, conn);
4491 			break;
4492 		}
4493 	}
4494 
4495 	queue_work(hdev->workqueue, &hdev->tx_work);
4496 }
4497 
4498 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4499 						 __u16 handle)
4500 {
4501 	struct hci_chan *chan;
4502 
4503 	switch (hdev->dev_type) {
4504 	case HCI_PRIMARY:
4505 		return hci_conn_hash_lookup_handle(hdev, handle);
4506 	case HCI_AMP:
4507 		chan = hci_chan_lookup_handle(hdev, handle);
4508 		if (chan)
4509 			return chan->conn;
4510 		break;
4511 	default:
4512 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4513 		break;
4514 	}
4515 
4516 	return NULL;
4517 }
4518 
4519 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4520 				    struct sk_buff *skb)
4521 {
4522 	struct hci_ev_num_comp_blocks *ev = data;
4523 	int i;
4524 
4525 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4526 			     flex_array_size(ev, handles, ev->num_hndl)))
4527 		return;
4528 
4529 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4530 		bt_dev_err(hdev, "wrong event for mode %d",
4531 			   hdev->flow_ctl_mode);
4532 		return;
4533 	}
4534 
4535 	bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4536 		   ev->num_hndl);
4537 
4538 	for (i = 0; i < ev->num_hndl; i++) {
4539 		struct hci_comp_blocks_info *info = &ev->handles[i];
4540 		struct hci_conn *conn = NULL;
4541 		__u16  handle, block_count;
4542 
4543 		handle = __le16_to_cpu(info->handle);
4544 		block_count = __le16_to_cpu(info->blocks);
4545 
4546 		conn = __hci_conn_lookup_handle(hdev, handle);
4547 		if (!conn)
4548 			continue;
4549 
4550 		conn->sent -= block_count;
4551 
4552 		switch (conn->type) {
4553 		case ACL_LINK:
4554 		case AMP_LINK:
4555 			hdev->block_cnt += block_count;
4556 			if (hdev->block_cnt > hdev->num_blocks)
4557 				hdev->block_cnt = hdev->num_blocks;
4558 			break;
4559 
4560 		default:
4561 			bt_dev_err(hdev, "unknown type %d conn %p",
4562 				   conn->type, conn);
4563 			break;
4564 		}
4565 	}
4566 
4567 	queue_work(hdev->workqueue, &hdev->tx_work);
4568 }
4569 
4570 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4571 				struct sk_buff *skb)
4572 {
4573 	struct hci_ev_mode_change *ev = data;
4574 	struct hci_conn *conn;
4575 
4576 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4577 
4578 	hci_dev_lock(hdev);
4579 
4580 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4581 	if (conn) {
4582 		conn->mode = ev->mode;
4583 
4584 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4585 					&conn->flags)) {
4586 			if (conn->mode == HCI_CM_ACTIVE)
4587 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4588 			else
4589 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4590 		}
4591 
4592 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4593 			hci_sco_setup(conn, ev->status);
4594 	}
4595 
4596 	hci_dev_unlock(hdev);
4597 }
4598 
4599 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4600 				     struct sk_buff *skb)
4601 {
4602 	struct hci_ev_pin_code_req *ev = data;
4603 	struct hci_conn *conn;
4604 
4605 	bt_dev_dbg(hdev, "");
4606 
4607 	hci_dev_lock(hdev);
4608 
4609 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4610 	if (!conn)
4611 		goto unlock;
4612 
4613 	if (conn->state == BT_CONNECTED) {
4614 		hci_conn_hold(conn);
4615 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4616 		hci_conn_drop(conn);
4617 	}
4618 
4619 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4620 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4621 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4622 			     sizeof(ev->bdaddr), &ev->bdaddr);
4623 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4624 		u8 secure;
4625 
4626 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4627 			secure = 1;
4628 		else
4629 			secure = 0;
4630 
4631 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4632 	}
4633 
4634 unlock:
4635 	hci_dev_unlock(hdev);
4636 }
4637 
4638 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4639 {
4640 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4641 		return;
4642 
4643 	conn->pin_length = pin_len;
4644 	conn->key_type = key_type;
4645 
4646 	switch (key_type) {
4647 	case HCI_LK_LOCAL_UNIT:
4648 	case HCI_LK_REMOTE_UNIT:
4649 	case HCI_LK_DEBUG_COMBINATION:
4650 		return;
4651 	case HCI_LK_COMBINATION:
4652 		if (pin_len == 16)
4653 			conn->pending_sec_level = BT_SECURITY_HIGH;
4654 		else
4655 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4656 		break;
4657 	case HCI_LK_UNAUTH_COMBINATION_P192:
4658 	case HCI_LK_UNAUTH_COMBINATION_P256:
4659 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4660 		break;
4661 	case HCI_LK_AUTH_COMBINATION_P192:
4662 		conn->pending_sec_level = BT_SECURITY_HIGH;
4663 		break;
4664 	case HCI_LK_AUTH_COMBINATION_P256:
4665 		conn->pending_sec_level = BT_SECURITY_FIPS;
4666 		break;
4667 	}
4668 }
4669 
4670 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4671 				     struct sk_buff *skb)
4672 {
4673 	struct hci_ev_link_key_req *ev = data;
4674 	struct hci_cp_link_key_reply cp;
4675 	struct hci_conn *conn;
4676 	struct link_key *key;
4677 
4678 	bt_dev_dbg(hdev, "");
4679 
4680 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4681 		return;
4682 
4683 	hci_dev_lock(hdev);
4684 
4685 	key = hci_find_link_key(hdev, &ev->bdaddr);
4686 	if (!key) {
4687 		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4688 		goto not_found;
4689 	}
4690 
4691 	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4692 
4693 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4694 	if (conn) {
4695 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4696 
4697 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4698 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4699 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4700 			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4701 			goto not_found;
4702 		}
4703 
4704 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4705 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4706 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4707 			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4708 			goto not_found;
4709 		}
4710 
4711 		conn_set_key(conn, key->type, key->pin_len);
4712 	}
4713 
4714 	bacpy(&cp.bdaddr, &ev->bdaddr);
4715 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4716 
4717 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4718 
4719 	hci_dev_unlock(hdev);
4720 
4721 	return;
4722 
4723 not_found:
4724 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4725 	hci_dev_unlock(hdev);
4726 }
4727 
4728 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4729 				    struct sk_buff *skb)
4730 {
4731 	struct hci_ev_link_key_notify *ev = data;
4732 	struct hci_conn *conn;
4733 	struct link_key *key;
4734 	bool persistent;
4735 	u8 pin_len = 0;
4736 
4737 	bt_dev_dbg(hdev, "");
4738 
4739 	hci_dev_lock(hdev);
4740 
4741 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4742 	if (!conn)
4743 		goto unlock;
4744 
4745 	hci_conn_hold(conn);
4746 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4747 	hci_conn_drop(conn);
4748 
4749 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4750 	conn_set_key(conn, ev->key_type, conn->pin_length);
4751 
4752 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4753 		goto unlock;
4754 
4755 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4756 			        ev->key_type, pin_len, &persistent);
4757 	if (!key)
4758 		goto unlock;
4759 
4760 	/* Update connection information since adding the key will have
4761 	 * fixed up the type in the case of changed combination keys.
4762 	 */
4763 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4764 		conn_set_key(conn, key->type, key->pin_len);
4765 
4766 	mgmt_new_link_key(hdev, key, persistent);
4767 
4768 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4769 	 * is set. If it's not set simply remove the key from the kernel
4770 	 * list (we've still notified user space about it but with
4771 	 * store_hint being 0).
4772 	 */
4773 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4774 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4775 		list_del_rcu(&key->list);
4776 		kfree_rcu(key, rcu);
4777 		goto unlock;
4778 	}
4779 
4780 	if (persistent)
4781 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4782 	else
4783 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4784 
4785 unlock:
4786 	hci_dev_unlock(hdev);
4787 }
4788 
4789 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4790 				 struct sk_buff *skb)
4791 {
4792 	struct hci_ev_clock_offset *ev = data;
4793 	struct hci_conn *conn;
4794 
4795 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4796 
4797 	hci_dev_lock(hdev);
4798 
4799 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4800 	if (conn && !ev->status) {
4801 		struct inquiry_entry *ie;
4802 
4803 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4804 		if (ie) {
4805 			ie->data.clock_offset = ev->clock_offset;
4806 			ie->timestamp = jiffies;
4807 		}
4808 	}
4809 
4810 	hci_dev_unlock(hdev);
4811 }
4812 
4813 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4814 				    struct sk_buff *skb)
4815 {
4816 	struct hci_ev_pkt_type_change *ev = data;
4817 	struct hci_conn *conn;
4818 
4819 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4820 
4821 	hci_dev_lock(hdev);
4822 
4823 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4824 	if (conn && !ev->status)
4825 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4826 
4827 	hci_dev_unlock(hdev);
4828 }
4829 
4830 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4831 				   struct sk_buff *skb)
4832 {
4833 	struct hci_ev_pscan_rep_mode *ev = data;
4834 	struct inquiry_entry *ie;
4835 
4836 	bt_dev_dbg(hdev, "");
4837 
4838 	hci_dev_lock(hdev);
4839 
4840 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4841 	if (ie) {
4842 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4843 		ie->timestamp = jiffies;
4844 	}
4845 
4846 	hci_dev_unlock(hdev);
4847 }
4848 
4849 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4850 					     struct sk_buff *skb)
4851 {
4852 	struct hci_ev_inquiry_result_rssi *ev = edata;
4853 	struct inquiry_data data;
4854 	int i;
4855 
4856 	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4857 
4858 	if (!ev->num)
4859 		return;
4860 
4861 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4862 		return;
4863 
4864 	hci_dev_lock(hdev);
4865 
4866 	if (skb->len == array_size(ev->num,
4867 				   sizeof(struct inquiry_info_rssi_pscan))) {
4868 		struct inquiry_info_rssi_pscan *info;
4869 
4870 		for (i = 0; i < ev->num; i++) {
4871 			u32 flags;
4872 
4873 			info = hci_ev_skb_pull(hdev, skb,
4874 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4875 					       sizeof(*info));
4876 			if (!info) {
4877 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4878 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4879 				goto unlock;
4880 			}
4881 
4882 			bacpy(&data.bdaddr, &info->bdaddr);
4883 			data.pscan_rep_mode	= info->pscan_rep_mode;
4884 			data.pscan_period_mode	= info->pscan_period_mode;
4885 			data.pscan_mode		= info->pscan_mode;
4886 			memcpy(data.dev_class, info->dev_class, 3);
4887 			data.clock_offset	= info->clock_offset;
4888 			data.rssi		= info->rssi;
4889 			data.ssp_mode		= 0x00;
4890 
4891 			flags = hci_inquiry_cache_update(hdev, &data, false);
4892 
4893 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4894 					  info->dev_class, info->rssi,
4895 					  flags, NULL, 0, NULL, 0, 0);
4896 		}
4897 	} else if (skb->len == array_size(ev->num,
4898 					  sizeof(struct inquiry_info_rssi))) {
4899 		struct inquiry_info_rssi *info;
4900 
4901 		for (i = 0; i < ev->num; i++) {
4902 			u32 flags;
4903 
4904 			info = hci_ev_skb_pull(hdev, skb,
4905 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4906 					       sizeof(*info));
4907 			if (!info) {
4908 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4909 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4910 				goto unlock;
4911 			}
4912 
4913 			bacpy(&data.bdaddr, &info->bdaddr);
4914 			data.pscan_rep_mode	= info->pscan_rep_mode;
4915 			data.pscan_period_mode	= info->pscan_period_mode;
4916 			data.pscan_mode		= 0x00;
4917 			memcpy(data.dev_class, info->dev_class, 3);
4918 			data.clock_offset	= info->clock_offset;
4919 			data.rssi		= info->rssi;
4920 			data.ssp_mode		= 0x00;
4921 
4922 			flags = hci_inquiry_cache_update(hdev, &data, false);
4923 
4924 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4925 					  info->dev_class, info->rssi,
4926 					  flags, NULL, 0, NULL, 0, 0);
4927 		}
4928 	} else {
4929 		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4930 			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4931 	}
4932 unlock:
4933 	hci_dev_unlock(hdev);
4934 }
4935 
4936 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4937 					struct sk_buff *skb)
4938 {
4939 	struct hci_ev_remote_ext_features *ev = data;
4940 	struct hci_conn *conn;
4941 
4942 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4943 
4944 	hci_dev_lock(hdev);
4945 
4946 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4947 	if (!conn)
4948 		goto unlock;
4949 
4950 	if (ev->page < HCI_MAX_PAGES)
4951 		memcpy(conn->features[ev->page], ev->features, 8);
4952 
4953 	if (!ev->status && ev->page == 0x01) {
4954 		struct inquiry_entry *ie;
4955 
4956 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4957 		if (ie)
4958 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4959 
4960 		if (ev->features[0] & LMP_HOST_SSP) {
4961 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4962 		} else {
4963 			/* It is mandatory by the Bluetooth specification that
4964 			 * Extended Inquiry Results are only used when Secure
4965 			 * Simple Pairing is enabled, but some devices violate
4966 			 * this.
4967 			 *
4968 			 * To make these devices work, the internal SSP
4969 			 * enabled flag needs to be cleared if the remote host
4970 			 * features do not indicate SSP support */
4971 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4972 		}
4973 
4974 		if (ev->features[0] & LMP_HOST_SC)
4975 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4976 	}
4977 
4978 	if (conn->state != BT_CONFIG)
4979 		goto unlock;
4980 
4981 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4982 		struct hci_cp_remote_name_req cp;
4983 		memset(&cp, 0, sizeof(cp));
4984 		bacpy(&cp.bdaddr, &conn->dst);
4985 		cp.pscan_rep_mode = 0x02;
4986 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4987 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4988 		mgmt_device_connected(hdev, conn, NULL, 0);
4989 
4990 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4991 		conn->state = BT_CONNECTED;
4992 		hci_connect_cfm(conn, ev->status);
4993 		hci_conn_drop(conn);
4994 	}
4995 
4996 unlock:
4997 	hci_dev_unlock(hdev);
4998 }
4999 
5000 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5001 				       struct sk_buff *skb)
5002 {
5003 	struct hci_ev_sync_conn_complete *ev = data;
5004 	struct hci_conn *conn;
5005 	u8 status = ev->status;
5006 
5007 	switch (ev->link_type) {
5008 	case SCO_LINK:
5009 	case ESCO_LINK:
5010 		break;
5011 	default:
5012 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5013 		 * for HCI_Synchronous_Connection_Complete is limited to
5014 		 * either SCO or eSCO
5015 		 */
5016 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5017 		return;
5018 	}
5019 
5020 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
5021 
5022 	hci_dev_lock(hdev);
5023 
5024 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5025 	if (!conn) {
5026 		if (ev->link_type == ESCO_LINK)
5027 			goto unlock;
5028 
5029 		/* When the link type in the event indicates SCO connection
5030 		 * and lookup of the connection object fails, then check
5031 		 * if an eSCO connection object exists.
5032 		 *
5033 		 * The core limits the synchronous connections to either
5034 		 * SCO or eSCO. The eSCO connection is preferred and tried
5035 		 * to be setup first and until successfully established,
5036 		 * the link type will be hinted as eSCO.
5037 		 */
5038 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5039 		if (!conn)
5040 			goto unlock;
5041 	}
5042 
5043 	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5044 	 * Processing it more than once per connection can corrupt kernel memory.
5045 	 *
5046 	 * As the connection handle is set here for the first time, it indicates
5047 	 * whether the connection is already set up.
5048 	 */
5049 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5050 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5051 		goto unlock;
5052 	}
5053 
5054 	switch (status) {
5055 	case 0x00:
5056 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5057 		if (status) {
5058 			conn->state = BT_CLOSED;
5059 			break;
5060 		}
5061 
5062 		conn->state  = BT_CONNECTED;
5063 		conn->type   = ev->link_type;
5064 
5065 		hci_debugfs_create_conn(conn);
5066 		hci_conn_add_sysfs(conn);
5067 		break;
5068 
5069 	case 0x10:	/* Connection Accept Timeout */
5070 	case 0x0d:	/* Connection Rejected due to Limited Resources */
5071 	case 0x11:	/* Unsupported Feature or Parameter Value */
5072 	case 0x1c:	/* SCO interval rejected */
5073 	case 0x1a:	/* Unsupported Remote Feature */
5074 	case 0x1e:	/* Invalid LMP Parameters */
5075 	case 0x1f:	/* Unspecified error */
5076 	case 0x20:	/* Unsupported LMP Parameter value */
5077 		if (conn->out) {
5078 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5079 					(hdev->esco_type & EDR_ESCO_MASK);
5080 			if (hci_setup_sync(conn, conn->parent->handle))
5081 				goto unlock;
5082 		}
5083 		fallthrough;
5084 
5085 	default:
5086 		conn->state = BT_CLOSED;
5087 		break;
5088 	}
5089 
5090 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5091 	/* Notify only in case of SCO over HCI transport data path which
5092 	 * is zero and non-zero value shall be non-HCI transport data path
5093 	 */
5094 	if (conn->codec.data_path == 0 && hdev->notify) {
5095 		switch (ev->air_mode) {
5096 		case 0x02:
5097 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5098 			break;
5099 		case 0x03:
5100 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5101 			break;
5102 		}
5103 	}
5104 
5105 	hci_connect_cfm(conn, status);
5106 	if (status)
5107 		hci_conn_del(conn);
5108 
5109 unlock:
5110 	hci_dev_unlock(hdev);
5111 }
5112 
5113 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5114 {
5115 	size_t parsed = 0;
5116 
5117 	while (parsed < eir_len) {
5118 		u8 field_len = eir[0];
5119 
5120 		if (field_len == 0)
5121 			return parsed;
5122 
5123 		parsed += field_len + 1;
5124 		eir += field_len + 1;
5125 	}
5126 
5127 	return eir_len;
5128 }
5129 
5130 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5131 					    struct sk_buff *skb)
5132 {
5133 	struct hci_ev_ext_inquiry_result *ev = edata;
5134 	struct inquiry_data data;
5135 	size_t eir_len;
5136 	int i;
5137 
5138 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5139 			     flex_array_size(ev, info, ev->num)))
5140 		return;
5141 
5142 	bt_dev_dbg(hdev, "num %d", ev->num);
5143 
5144 	if (!ev->num)
5145 		return;
5146 
5147 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5148 		return;
5149 
5150 	hci_dev_lock(hdev);
5151 
5152 	for (i = 0; i < ev->num; i++) {
5153 		struct extended_inquiry_info *info = &ev->info[i];
5154 		u32 flags;
5155 		bool name_known;
5156 
5157 		bacpy(&data.bdaddr, &info->bdaddr);
5158 		data.pscan_rep_mode	= info->pscan_rep_mode;
5159 		data.pscan_period_mode	= info->pscan_period_mode;
5160 		data.pscan_mode		= 0x00;
5161 		memcpy(data.dev_class, info->dev_class, 3);
5162 		data.clock_offset	= info->clock_offset;
5163 		data.rssi		= info->rssi;
5164 		data.ssp_mode		= 0x01;
5165 
5166 		if (hci_dev_test_flag(hdev, HCI_MGMT))
5167 			name_known = eir_get_data(info->data,
5168 						  sizeof(info->data),
5169 						  EIR_NAME_COMPLETE, NULL);
5170 		else
5171 			name_known = true;
5172 
5173 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5174 
5175 		eir_len = eir_get_length(info->data, sizeof(info->data));
5176 
5177 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5178 				  info->dev_class, info->rssi,
5179 				  flags, info->data, eir_len, NULL, 0, 0);
5180 	}
5181 
5182 	hci_dev_unlock(hdev);
5183 }
5184 
5185 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5186 					 struct sk_buff *skb)
5187 {
5188 	struct hci_ev_key_refresh_complete *ev = data;
5189 	struct hci_conn *conn;
5190 
5191 	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5192 		   __le16_to_cpu(ev->handle));
5193 
5194 	hci_dev_lock(hdev);
5195 
5196 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5197 	if (!conn)
5198 		goto unlock;
5199 
5200 	/* For BR/EDR the necessary steps are taken through the
5201 	 * auth_complete event.
5202 	 */
5203 	if (conn->type != LE_LINK)
5204 		goto unlock;
5205 
5206 	if (!ev->status)
5207 		conn->sec_level = conn->pending_sec_level;
5208 
5209 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5210 
5211 	if (ev->status && conn->state == BT_CONNECTED) {
5212 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5213 		hci_conn_drop(conn);
5214 		goto unlock;
5215 	}
5216 
5217 	if (conn->state == BT_CONFIG) {
5218 		if (!ev->status)
5219 			conn->state = BT_CONNECTED;
5220 
5221 		hci_connect_cfm(conn, ev->status);
5222 		hci_conn_drop(conn);
5223 	} else {
5224 		hci_auth_cfm(conn, ev->status);
5225 
5226 		hci_conn_hold(conn);
5227 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5228 		hci_conn_drop(conn);
5229 	}
5230 
5231 unlock:
5232 	hci_dev_unlock(hdev);
5233 }
5234 
5235 static u8 hci_get_auth_req(struct hci_conn *conn)
5236 {
5237 	/* If remote requests no-bonding follow that lead */
5238 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5239 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5240 		return conn->remote_auth | (conn->auth_type & 0x01);
5241 
5242 	/* If both remote and local have enough IO capabilities, require
5243 	 * MITM protection
5244 	 */
5245 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5246 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5247 		return conn->remote_auth | 0x01;
5248 
5249 	/* No MITM protection possible so ignore remote requirement */
5250 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5251 }
5252 
5253 static u8 bredr_oob_data_present(struct hci_conn *conn)
5254 {
5255 	struct hci_dev *hdev = conn->hdev;
5256 	struct oob_data *data;
5257 
5258 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5259 	if (!data)
5260 		return 0x00;
5261 
5262 	if (bredr_sc_enabled(hdev)) {
5263 		/* When Secure Connections is enabled, then just
5264 		 * return the present value stored with the OOB
5265 		 * data. The stored value contains the right present
5266 		 * information. However it can only be trusted when
5267 		 * not in Secure Connection Only mode.
5268 		 */
5269 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5270 			return data->present;
5271 
5272 		/* When Secure Connections Only mode is enabled, then
5273 		 * the P-256 values are required. If they are not
5274 		 * available, then do not declare that OOB data is
5275 		 * present.
5276 		 */
5277 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5278 		    !memcmp(data->hash256, ZERO_KEY, 16))
5279 			return 0x00;
5280 
5281 		return 0x02;
5282 	}
5283 
5284 	/* When Secure Connections is not enabled or actually
5285 	 * not supported by the hardware, then check that if
5286 	 * P-192 data values are present.
5287 	 */
5288 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5289 	    !memcmp(data->hash192, ZERO_KEY, 16))
5290 		return 0x00;
5291 
5292 	return 0x01;
5293 }
5294 
5295 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5296 				    struct sk_buff *skb)
5297 {
5298 	struct hci_ev_io_capa_request *ev = data;
5299 	struct hci_conn *conn;
5300 
5301 	bt_dev_dbg(hdev, "");
5302 
5303 	hci_dev_lock(hdev);
5304 
5305 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5306 	if (!conn)
5307 		goto unlock;
5308 
5309 	hci_conn_hold(conn);
5310 
5311 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5312 		goto unlock;
5313 
5314 	/* Allow pairing if we're pairable, the initiators of the
5315 	 * pairing or if the remote is not requesting bonding.
5316 	 */
5317 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5318 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5319 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5320 		struct hci_cp_io_capability_reply cp;
5321 
5322 		bacpy(&cp.bdaddr, &ev->bdaddr);
5323 		/* Change the IO capability from KeyboardDisplay
5324 		 * to DisplayYesNo as it is not supported by BT spec. */
5325 		cp.capability = (conn->io_capability == 0x04) ?
5326 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5327 
5328 		/* If we are initiators, there is no remote information yet */
5329 		if (conn->remote_auth == 0xff) {
5330 			/* Request MITM protection if our IO caps allow it
5331 			 * except for the no-bonding case.
5332 			 */
5333 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5334 			    conn->auth_type != HCI_AT_NO_BONDING)
5335 				conn->auth_type |= 0x01;
5336 		} else {
5337 			conn->auth_type = hci_get_auth_req(conn);
5338 		}
5339 
5340 		/* If we're not bondable, force one of the non-bondable
5341 		 * authentication requirement values.
5342 		 */
5343 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5344 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5345 
5346 		cp.authentication = conn->auth_type;
5347 		cp.oob_data = bredr_oob_data_present(conn);
5348 
5349 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5350 			     sizeof(cp), &cp);
5351 	} else {
5352 		struct hci_cp_io_capability_neg_reply cp;
5353 
5354 		bacpy(&cp.bdaddr, &ev->bdaddr);
5355 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5356 
5357 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5358 			     sizeof(cp), &cp);
5359 	}
5360 
5361 unlock:
5362 	hci_dev_unlock(hdev);
5363 }
5364 
5365 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5366 				  struct sk_buff *skb)
5367 {
5368 	struct hci_ev_io_capa_reply *ev = data;
5369 	struct hci_conn *conn;
5370 
5371 	bt_dev_dbg(hdev, "");
5372 
5373 	hci_dev_lock(hdev);
5374 
5375 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5376 	if (!conn)
5377 		goto unlock;
5378 
5379 	conn->remote_cap = ev->capability;
5380 	conn->remote_auth = ev->authentication;
5381 
5382 unlock:
5383 	hci_dev_unlock(hdev);
5384 }
5385 
5386 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5387 					 struct sk_buff *skb)
5388 {
5389 	struct hci_ev_user_confirm_req *ev = data;
5390 	int loc_mitm, rem_mitm, confirm_hint = 0;
5391 	struct hci_conn *conn;
5392 
5393 	bt_dev_dbg(hdev, "");
5394 
5395 	hci_dev_lock(hdev);
5396 
5397 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5398 		goto unlock;
5399 
5400 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5401 	if (!conn)
5402 		goto unlock;
5403 
5404 	loc_mitm = (conn->auth_type & 0x01);
5405 	rem_mitm = (conn->remote_auth & 0x01);
5406 
5407 	/* If we require MITM but the remote device can't provide that
5408 	 * (it has NoInputNoOutput) then reject the confirmation
5409 	 * request. We check the security level here since it doesn't
5410 	 * necessarily match conn->auth_type.
5411 	 */
5412 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5413 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5414 		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5415 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5416 			     sizeof(ev->bdaddr), &ev->bdaddr);
5417 		goto unlock;
5418 	}
5419 
5420 	/* If no side requires MITM protection; auto-accept */
5421 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5422 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5423 
5424 		/* If we're not the initiators request authorization to
5425 		 * proceed from user space (mgmt_user_confirm with
5426 		 * confirm_hint set to 1). The exception is if neither
5427 		 * side had MITM or if the local IO capability is
5428 		 * NoInputNoOutput, in which case we do auto-accept
5429 		 */
5430 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5431 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5432 		    (loc_mitm || rem_mitm)) {
5433 			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5434 			confirm_hint = 1;
5435 			goto confirm;
5436 		}
5437 
5438 		/* If there already exists link key in local host, leave the
5439 		 * decision to user space since the remote device could be
5440 		 * legitimate or malicious.
5441 		 */
5442 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5443 			bt_dev_dbg(hdev, "Local host already has link key");
5444 			confirm_hint = 1;
5445 			goto confirm;
5446 		}
5447 
5448 		BT_DBG("Auto-accept of user confirmation with %ums delay",
5449 		       hdev->auto_accept_delay);
5450 
5451 		if (hdev->auto_accept_delay > 0) {
5452 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5453 			queue_delayed_work(conn->hdev->workqueue,
5454 					   &conn->auto_accept_work, delay);
5455 			goto unlock;
5456 		}
5457 
5458 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5459 			     sizeof(ev->bdaddr), &ev->bdaddr);
5460 		goto unlock;
5461 	}
5462 
5463 confirm:
5464 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5465 				  le32_to_cpu(ev->passkey), confirm_hint);
5466 
5467 unlock:
5468 	hci_dev_unlock(hdev);
5469 }
5470 
5471 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5472 					 struct sk_buff *skb)
5473 {
5474 	struct hci_ev_user_passkey_req *ev = data;
5475 
5476 	bt_dev_dbg(hdev, "");
5477 
5478 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5479 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5480 }
5481 
5482 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5483 					struct sk_buff *skb)
5484 {
5485 	struct hci_ev_user_passkey_notify *ev = data;
5486 	struct hci_conn *conn;
5487 
5488 	bt_dev_dbg(hdev, "");
5489 
5490 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5491 	if (!conn)
5492 		return;
5493 
5494 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5495 	conn->passkey_entered = 0;
5496 
5497 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5498 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5499 					 conn->dst_type, conn->passkey_notify,
5500 					 conn->passkey_entered);
5501 }
5502 
5503 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5504 				    struct sk_buff *skb)
5505 {
5506 	struct hci_ev_keypress_notify *ev = data;
5507 	struct hci_conn *conn;
5508 
5509 	bt_dev_dbg(hdev, "");
5510 
5511 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5512 	if (!conn)
5513 		return;
5514 
5515 	switch (ev->type) {
5516 	case HCI_KEYPRESS_STARTED:
5517 		conn->passkey_entered = 0;
5518 		return;
5519 
5520 	case HCI_KEYPRESS_ENTERED:
5521 		conn->passkey_entered++;
5522 		break;
5523 
5524 	case HCI_KEYPRESS_ERASED:
5525 		conn->passkey_entered--;
5526 		break;
5527 
5528 	case HCI_KEYPRESS_CLEARED:
5529 		conn->passkey_entered = 0;
5530 		break;
5531 
5532 	case HCI_KEYPRESS_COMPLETED:
5533 		return;
5534 	}
5535 
5536 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5537 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5538 					 conn->dst_type, conn->passkey_notify,
5539 					 conn->passkey_entered);
5540 }
5541 
5542 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5543 					 struct sk_buff *skb)
5544 {
5545 	struct hci_ev_simple_pair_complete *ev = data;
5546 	struct hci_conn *conn;
5547 
5548 	bt_dev_dbg(hdev, "");
5549 
5550 	hci_dev_lock(hdev);
5551 
5552 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5553 	if (!conn)
5554 		goto unlock;
5555 
5556 	/* Reset the authentication requirement to unknown */
5557 	conn->remote_auth = 0xff;
5558 
5559 	/* To avoid duplicate auth_failed events to user space we check
5560 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5561 	 * initiated the authentication. A traditional auth_complete
5562 	 * event gets always produced as initiator and is also mapped to
5563 	 * the mgmt_auth_failed event */
5564 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5565 		mgmt_auth_failed(conn, ev->status);
5566 
5567 	hci_conn_drop(conn);
5568 
5569 unlock:
5570 	hci_dev_unlock(hdev);
5571 }
5572 
5573 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5574 					 struct sk_buff *skb)
5575 {
5576 	struct hci_ev_remote_host_features *ev = data;
5577 	struct inquiry_entry *ie;
5578 	struct hci_conn *conn;
5579 
5580 	bt_dev_dbg(hdev, "");
5581 
5582 	hci_dev_lock(hdev);
5583 
5584 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5585 	if (conn)
5586 		memcpy(conn->features[1], ev->features, 8);
5587 
5588 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5589 	if (ie)
5590 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5591 
5592 	hci_dev_unlock(hdev);
5593 }
5594 
5595 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5596 					    struct sk_buff *skb)
5597 {
5598 	struct hci_ev_remote_oob_data_request *ev = edata;
5599 	struct oob_data *data;
5600 
5601 	bt_dev_dbg(hdev, "");
5602 
5603 	hci_dev_lock(hdev);
5604 
5605 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5606 		goto unlock;
5607 
5608 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5609 	if (!data) {
5610 		struct hci_cp_remote_oob_data_neg_reply cp;
5611 
5612 		bacpy(&cp.bdaddr, &ev->bdaddr);
5613 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5614 			     sizeof(cp), &cp);
5615 		goto unlock;
5616 	}
5617 
5618 	if (bredr_sc_enabled(hdev)) {
5619 		struct hci_cp_remote_oob_ext_data_reply cp;
5620 
5621 		bacpy(&cp.bdaddr, &ev->bdaddr);
5622 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5623 			memset(cp.hash192, 0, sizeof(cp.hash192));
5624 			memset(cp.rand192, 0, sizeof(cp.rand192));
5625 		} else {
5626 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5627 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5628 		}
5629 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5630 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5631 
5632 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5633 			     sizeof(cp), &cp);
5634 	} else {
5635 		struct hci_cp_remote_oob_data_reply cp;
5636 
5637 		bacpy(&cp.bdaddr, &ev->bdaddr);
5638 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5639 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5640 
5641 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5642 			     sizeof(cp), &cp);
5643 	}
5644 
5645 unlock:
5646 	hci_dev_unlock(hdev);
5647 }
5648 
5649 #if IS_ENABLED(CONFIG_BT_HS)
5650 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5651 				  struct sk_buff *skb)
5652 {
5653 	struct hci_ev_channel_selected *ev = data;
5654 	struct hci_conn *hcon;
5655 
5656 	bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5657 
5658 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5659 	if (!hcon)
5660 		return;
5661 
5662 	amp_read_loc_assoc_final_data(hdev, hcon);
5663 }
5664 
5665 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5666 				      struct sk_buff *skb)
5667 {
5668 	struct hci_ev_phy_link_complete *ev = data;
5669 	struct hci_conn *hcon, *bredr_hcon;
5670 
5671 	bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5672 		   ev->status);
5673 
5674 	hci_dev_lock(hdev);
5675 
5676 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5677 	if (!hcon)
5678 		goto unlock;
5679 
5680 	if (!hcon->amp_mgr)
5681 		goto unlock;
5682 
5683 	if (ev->status) {
5684 		hci_conn_del(hcon);
5685 		goto unlock;
5686 	}
5687 
5688 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5689 
5690 	hcon->state = BT_CONNECTED;
5691 	bacpy(&hcon->dst, &bredr_hcon->dst);
5692 
5693 	hci_conn_hold(hcon);
5694 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5695 	hci_conn_drop(hcon);
5696 
5697 	hci_debugfs_create_conn(hcon);
5698 	hci_conn_add_sysfs(hcon);
5699 
5700 	amp_physical_cfm(bredr_hcon, hcon);
5701 
5702 unlock:
5703 	hci_dev_unlock(hdev);
5704 }
5705 
5706 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5707 				     struct sk_buff *skb)
5708 {
5709 	struct hci_ev_logical_link_complete *ev = data;
5710 	struct hci_conn *hcon;
5711 	struct hci_chan *hchan;
5712 	struct amp_mgr *mgr;
5713 
5714 	bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5715 		   le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5716 
5717 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5718 	if (!hcon)
5719 		return;
5720 
5721 	/* Create AMP hchan */
5722 	hchan = hci_chan_create(hcon);
5723 	if (!hchan)
5724 		return;
5725 
5726 	hchan->handle = le16_to_cpu(ev->handle);
5727 	hchan->amp = true;
5728 
5729 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5730 
5731 	mgr = hcon->amp_mgr;
5732 	if (mgr && mgr->bredr_chan) {
5733 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5734 
5735 		l2cap_chan_lock(bredr_chan);
5736 
5737 		bredr_chan->conn->mtu = hdev->block_mtu;
5738 		l2cap_logical_cfm(bredr_chan, hchan, 0);
5739 		hci_conn_hold(hcon);
5740 
5741 		l2cap_chan_unlock(bredr_chan);
5742 	}
5743 }
5744 
5745 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5746 					     struct sk_buff *skb)
5747 {
5748 	struct hci_ev_disconn_logical_link_complete *ev = data;
5749 	struct hci_chan *hchan;
5750 
5751 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5752 		   le16_to_cpu(ev->handle), ev->status);
5753 
5754 	if (ev->status)
5755 		return;
5756 
5757 	hci_dev_lock(hdev);
5758 
5759 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5760 	if (!hchan || !hchan->amp)
5761 		goto unlock;
5762 
5763 	amp_destroy_logical_link(hchan, ev->reason);
5764 
5765 unlock:
5766 	hci_dev_unlock(hdev);
5767 }
5768 
5769 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5770 					     struct sk_buff *skb)
5771 {
5772 	struct hci_ev_disconn_phy_link_complete *ev = data;
5773 	struct hci_conn *hcon;
5774 
5775 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5776 
5777 	if (ev->status)
5778 		return;
5779 
5780 	hci_dev_lock(hdev);
5781 
5782 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5783 	if (hcon && hcon->type == AMP_LINK) {
5784 		hcon->state = BT_CLOSED;
5785 		hci_disconn_cfm(hcon, ev->reason);
5786 		hci_conn_del(hcon);
5787 	}
5788 
5789 	hci_dev_unlock(hdev);
5790 }
5791 #endif
5792 
5793 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5794 				u8 bdaddr_type, bdaddr_t *local_rpa)
5795 {
5796 	if (conn->out) {
5797 		conn->dst_type = bdaddr_type;
5798 		conn->resp_addr_type = bdaddr_type;
5799 		bacpy(&conn->resp_addr, bdaddr);
5800 
5801 		/* Check if the controller has set a Local RPA then it must be
5802 		 * used instead or hdev->rpa.
5803 		 */
5804 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5805 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5806 			bacpy(&conn->init_addr, local_rpa);
5807 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5808 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5809 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5810 		} else {
5811 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5812 						  &conn->init_addr_type);
5813 		}
5814 	} else {
5815 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5816 		/* Check if the controller has set a Local RPA then it must be
5817 		 * used instead or hdev->rpa.
5818 		 */
5819 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5820 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5821 			bacpy(&conn->resp_addr, local_rpa);
5822 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5823 			/* In case of ext adv, resp_addr will be updated in
5824 			 * Adv Terminated event.
5825 			 */
5826 			if (!ext_adv_capable(conn->hdev))
5827 				bacpy(&conn->resp_addr,
5828 				      &conn->hdev->random_addr);
5829 		} else {
5830 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5831 		}
5832 
5833 		conn->init_addr_type = bdaddr_type;
5834 		bacpy(&conn->init_addr, bdaddr);
5835 
5836 		/* For incoming connections, set the default minimum
5837 		 * and maximum connection interval. They will be used
5838 		 * to check if the parameters are in range and if not
5839 		 * trigger the connection update procedure.
5840 		 */
5841 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5842 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5843 	}
5844 }
5845 
5846 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5847 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5848 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5849 				 u16 interval, u16 latency,
5850 				 u16 supervision_timeout)
5851 {
5852 	struct hci_conn_params *params;
5853 	struct hci_conn *conn;
5854 	struct smp_irk *irk;
5855 	u8 addr_type;
5856 
5857 	hci_dev_lock(hdev);
5858 
5859 	/* All controllers implicitly stop advertising in the event of a
5860 	 * connection, so ensure that the state bit is cleared.
5861 	 */
5862 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5863 
5864 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5865 	if (!conn) {
5866 		/* In case of error status and there is no connection pending
5867 		 * just unlock as there is nothing to cleanup.
5868 		 */
5869 		if (status)
5870 			goto unlock;
5871 
5872 		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5873 		if (!conn) {
5874 			bt_dev_err(hdev, "no memory for new connection");
5875 			goto unlock;
5876 		}
5877 
5878 		conn->dst_type = bdaddr_type;
5879 
5880 		/* If we didn't have a hci_conn object previously
5881 		 * but we're in central role this must be something
5882 		 * initiated using an accept list. Since accept list based
5883 		 * connections are not "first class citizens" we don't
5884 		 * have full tracking of them. Therefore, we go ahead
5885 		 * with a "best effort" approach of determining the
5886 		 * initiator address based on the HCI_PRIVACY flag.
5887 		 */
5888 		if (conn->out) {
5889 			conn->resp_addr_type = bdaddr_type;
5890 			bacpy(&conn->resp_addr, bdaddr);
5891 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5892 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5893 				bacpy(&conn->init_addr, &hdev->rpa);
5894 			} else {
5895 				hci_copy_identity_address(hdev,
5896 							  &conn->init_addr,
5897 							  &conn->init_addr_type);
5898 			}
5899 		}
5900 	} else {
5901 		cancel_delayed_work(&conn->le_conn_timeout);
5902 	}
5903 
5904 	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5905 	 * Processing it more than once per connection can corrupt kernel memory.
5906 	 *
5907 	 * As the connection handle is set here for the first time, it indicates
5908 	 * whether the connection is already set up.
5909 	 */
5910 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5911 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5912 		goto unlock;
5913 	}
5914 
5915 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5916 
5917 	/* Lookup the identity address from the stored connection
5918 	 * address and address type.
5919 	 *
5920 	 * When establishing connections to an identity address, the
5921 	 * connection procedure will store the resolvable random
5922 	 * address first. Now if it can be converted back into the
5923 	 * identity address, start using the identity address from
5924 	 * now on.
5925 	 */
5926 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5927 	if (irk) {
5928 		bacpy(&conn->dst, &irk->bdaddr);
5929 		conn->dst_type = irk->addr_type;
5930 	}
5931 
5932 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5933 
5934 	if (handle > HCI_CONN_HANDLE_MAX) {
5935 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5936 			   HCI_CONN_HANDLE_MAX);
5937 		status = HCI_ERROR_INVALID_PARAMETERS;
5938 	}
5939 
5940 	/* All connection failure handling is taken care of by the
5941 	 * hci_conn_failed function which is triggered by the HCI
5942 	 * request completion callbacks used for connecting.
5943 	 */
5944 	if (status)
5945 		goto unlock;
5946 
5947 	/* Drop the connection if it has been aborted */
5948 	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5949 		hci_conn_drop(conn);
5950 		goto unlock;
5951 	}
5952 
5953 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5954 		addr_type = BDADDR_LE_PUBLIC;
5955 	else
5956 		addr_type = BDADDR_LE_RANDOM;
5957 
5958 	/* Drop the connection if the device is blocked */
5959 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5960 		hci_conn_drop(conn);
5961 		goto unlock;
5962 	}
5963 
5964 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5965 		mgmt_device_connected(hdev, conn, NULL, 0);
5966 
5967 	conn->sec_level = BT_SECURITY_LOW;
5968 	conn->handle = handle;
5969 	conn->state = BT_CONFIG;
5970 
5971 	/* Store current advertising instance as connection advertising instance
5972 	 * when sotfware rotation is in use so it can be re-enabled when
5973 	 * disconnected.
5974 	 */
5975 	if (!ext_adv_capable(hdev))
5976 		conn->adv_instance = hdev->cur_adv_instance;
5977 
5978 	conn->le_conn_interval = interval;
5979 	conn->le_conn_latency = latency;
5980 	conn->le_supv_timeout = supervision_timeout;
5981 
5982 	hci_debugfs_create_conn(conn);
5983 	hci_conn_add_sysfs(conn);
5984 
5985 	/* The remote features procedure is defined for central
5986 	 * role only. So only in case of an initiated connection
5987 	 * request the remote features.
5988 	 *
5989 	 * If the local controller supports peripheral-initiated features
5990 	 * exchange, then requesting the remote features in peripheral
5991 	 * role is possible. Otherwise just transition into the
5992 	 * connected state without requesting the remote features.
5993 	 */
5994 	if (conn->out ||
5995 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5996 		struct hci_cp_le_read_remote_features cp;
5997 
5998 		cp.handle = __cpu_to_le16(conn->handle);
5999 
6000 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6001 			     sizeof(cp), &cp);
6002 
6003 		hci_conn_hold(conn);
6004 	} else {
6005 		conn->state = BT_CONNECTED;
6006 		hci_connect_cfm(conn, status);
6007 	}
6008 
6009 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6010 					   conn->dst_type);
6011 	if (params) {
6012 		hci_pend_le_list_del_init(params);
6013 		if (params->conn) {
6014 			hci_conn_drop(params->conn);
6015 			hci_conn_put(params->conn);
6016 			params->conn = NULL;
6017 		}
6018 	}
6019 
6020 unlock:
6021 	hci_update_passive_scan(hdev);
6022 	hci_dev_unlock(hdev);
6023 }
6024 
6025 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6026 				     struct sk_buff *skb)
6027 {
6028 	struct hci_ev_le_conn_complete *ev = data;
6029 
6030 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6031 
6032 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6033 			     NULL, ev->role, le16_to_cpu(ev->handle),
6034 			     le16_to_cpu(ev->interval),
6035 			     le16_to_cpu(ev->latency),
6036 			     le16_to_cpu(ev->supervision_timeout));
6037 }
6038 
6039 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6040 					 struct sk_buff *skb)
6041 {
6042 	struct hci_ev_le_enh_conn_complete *ev = data;
6043 
6044 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6045 
6046 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6047 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6048 			     le16_to_cpu(ev->interval),
6049 			     le16_to_cpu(ev->latency),
6050 			     le16_to_cpu(ev->supervision_timeout));
6051 }
6052 
6053 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6054 				    struct sk_buff *skb)
6055 {
6056 	struct hci_evt_le_ext_adv_set_term *ev = data;
6057 	struct hci_conn *conn;
6058 	struct adv_info *adv, *n;
6059 
6060 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6061 
6062 	/* The Bluetooth Core 5.3 specification clearly states that this event
6063 	 * shall not be sent when the Host disables the advertising set. So in
6064 	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6065 	 *
6066 	 * When the Host disables an advertising set, all cleanup is done via
6067 	 * its command callback and not needed to be duplicated here.
6068 	 */
6069 	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6070 		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6071 		return;
6072 	}
6073 
6074 	hci_dev_lock(hdev);
6075 
6076 	adv = hci_find_adv_instance(hdev, ev->handle);
6077 
6078 	if (ev->status) {
6079 		if (!adv)
6080 			goto unlock;
6081 
6082 		/* Remove advertising as it has been terminated */
6083 		hci_remove_adv_instance(hdev, ev->handle);
6084 		mgmt_advertising_removed(NULL, hdev, ev->handle);
6085 
6086 		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6087 			if (adv->enabled)
6088 				goto unlock;
6089 		}
6090 
6091 		/* We are no longer advertising, clear HCI_LE_ADV */
6092 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
6093 		goto unlock;
6094 	}
6095 
6096 	if (adv)
6097 		adv->enabled = false;
6098 
6099 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6100 	if (conn) {
6101 		/* Store handle in the connection so the correct advertising
6102 		 * instance can be re-enabled when disconnected.
6103 		 */
6104 		conn->adv_instance = ev->handle;
6105 
6106 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6107 		    bacmp(&conn->resp_addr, BDADDR_ANY))
6108 			goto unlock;
6109 
6110 		if (!ev->handle) {
6111 			bacpy(&conn->resp_addr, &hdev->random_addr);
6112 			goto unlock;
6113 		}
6114 
6115 		if (adv)
6116 			bacpy(&conn->resp_addr, &adv->random_addr);
6117 	}
6118 
6119 unlock:
6120 	hci_dev_unlock(hdev);
6121 }
6122 
6123 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6124 					    struct sk_buff *skb)
6125 {
6126 	struct hci_ev_le_conn_update_complete *ev = data;
6127 	struct hci_conn *conn;
6128 
6129 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6130 
6131 	if (ev->status)
6132 		return;
6133 
6134 	hci_dev_lock(hdev);
6135 
6136 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6137 	if (conn) {
6138 		conn->le_conn_interval = le16_to_cpu(ev->interval);
6139 		conn->le_conn_latency = le16_to_cpu(ev->latency);
6140 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6141 	}
6142 
6143 	hci_dev_unlock(hdev);
6144 }
6145 
6146 /* This function requires the caller holds hdev->lock */
6147 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6148 					      bdaddr_t *addr,
6149 					      u8 addr_type, bool addr_resolved,
6150 					      u8 adv_type)
6151 {
6152 	struct hci_conn *conn;
6153 	struct hci_conn_params *params;
6154 
6155 	/* If the event is not connectable don't proceed further */
6156 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6157 		return NULL;
6158 
6159 	/* Ignore if the device is blocked or hdev is suspended */
6160 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6161 	    hdev->suspended)
6162 		return NULL;
6163 
6164 	/* Most controller will fail if we try to create new connections
6165 	 * while we have an existing one in peripheral role.
6166 	 */
6167 	if (hdev->conn_hash.le_num_peripheral > 0 &&
6168 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6169 	     !(hdev->le_states[3] & 0x10)))
6170 		return NULL;
6171 
6172 	/* If we're not connectable only connect devices that we have in
6173 	 * our pend_le_conns list.
6174 	 */
6175 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6176 					   addr_type);
6177 	if (!params)
6178 		return NULL;
6179 
6180 	if (!params->explicit_connect) {
6181 		switch (params->auto_connect) {
6182 		case HCI_AUTO_CONN_DIRECT:
6183 			/* Only devices advertising with ADV_DIRECT_IND are
6184 			 * triggering a connection attempt. This is allowing
6185 			 * incoming connections from peripheral devices.
6186 			 */
6187 			if (adv_type != LE_ADV_DIRECT_IND)
6188 				return NULL;
6189 			break;
6190 		case HCI_AUTO_CONN_ALWAYS:
6191 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
6192 			 * are triggering a connection attempt. This means
6193 			 * that incoming connections from peripheral device are
6194 			 * accepted and also outgoing connections to peripheral
6195 			 * devices are established when found.
6196 			 */
6197 			break;
6198 		default:
6199 			return NULL;
6200 		}
6201 	}
6202 
6203 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6204 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6205 			      HCI_ROLE_MASTER);
6206 	if (!IS_ERR(conn)) {
6207 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6208 		 * by higher layer that tried to connect, if no then
6209 		 * store the pointer since we don't really have any
6210 		 * other owner of the object besides the params that
6211 		 * triggered it. This way we can abort the connection if
6212 		 * the parameters get removed and keep the reference
6213 		 * count consistent once the connection is established.
6214 		 */
6215 
6216 		if (!params->explicit_connect)
6217 			params->conn = hci_conn_get(conn);
6218 
6219 		return conn;
6220 	}
6221 
6222 	switch (PTR_ERR(conn)) {
6223 	case -EBUSY:
6224 		/* If hci_connect() returns -EBUSY it means there is already
6225 		 * an LE connection attempt going on. Since controllers don't
6226 		 * support more than one connection attempt at the time, we
6227 		 * don't consider this an error case.
6228 		 */
6229 		break;
6230 	default:
6231 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6232 		return NULL;
6233 	}
6234 
6235 	return NULL;
6236 }
6237 
6238 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6239 			       u8 bdaddr_type, bdaddr_t *direct_addr,
6240 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6241 			       bool ext_adv, bool ctl_time, u64 instant)
6242 {
6243 	struct discovery_state *d = &hdev->discovery;
6244 	struct smp_irk *irk;
6245 	struct hci_conn *conn;
6246 	bool match, bdaddr_resolved;
6247 	u32 flags;
6248 	u8 *ptr;
6249 
6250 	switch (type) {
6251 	case LE_ADV_IND:
6252 	case LE_ADV_DIRECT_IND:
6253 	case LE_ADV_SCAN_IND:
6254 	case LE_ADV_NONCONN_IND:
6255 	case LE_ADV_SCAN_RSP:
6256 		break;
6257 	default:
6258 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6259 				       "type: 0x%02x", type);
6260 		return;
6261 	}
6262 
6263 	if (len > max_adv_len(hdev)) {
6264 		bt_dev_err_ratelimited(hdev,
6265 				       "adv larger than maximum supported");
6266 		return;
6267 	}
6268 
6269 	/* Find the end of the data in case the report contains padded zero
6270 	 * bytes at the end causing an invalid length value.
6271 	 *
6272 	 * When data is NULL, len is 0 so there is no need for extra ptr
6273 	 * check as 'ptr < data + 0' is already false in such case.
6274 	 */
6275 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6276 		if (ptr + 1 + *ptr > data + len)
6277 			break;
6278 	}
6279 
6280 	/* Adjust for actual length. This handles the case when remote
6281 	 * device is advertising with incorrect data length.
6282 	 */
6283 	len = ptr - data;
6284 
6285 	/* If the direct address is present, then this report is from
6286 	 * a LE Direct Advertising Report event. In that case it is
6287 	 * important to see if the address is matching the local
6288 	 * controller address.
6289 	 */
6290 	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6291 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6292 						  &bdaddr_resolved);
6293 
6294 		/* Only resolvable random addresses are valid for these
6295 		 * kind of reports and others can be ignored.
6296 		 */
6297 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6298 			return;
6299 
6300 		/* If the controller is not using resolvable random
6301 		 * addresses, then this report can be ignored.
6302 		 */
6303 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6304 			return;
6305 
6306 		/* If the local IRK of the controller does not match
6307 		 * with the resolvable random address provided, then
6308 		 * this report can be ignored.
6309 		 */
6310 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6311 			return;
6312 	}
6313 
6314 	/* Check if we need to convert to identity address */
6315 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6316 	if (irk) {
6317 		bdaddr = &irk->bdaddr;
6318 		bdaddr_type = irk->addr_type;
6319 	}
6320 
6321 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6322 
6323 	/* Check if we have been requested to connect to this device.
6324 	 *
6325 	 * direct_addr is set only for directed advertising reports (it is NULL
6326 	 * for advertising reports) and is already verified to be RPA above.
6327 	 */
6328 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6329 				     type);
6330 	if (!ext_adv && conn && type == LE_ADV_IND &&
6331 	    len <= max_adv_len(hdev)) {
6332 		/* Store report for later inclusion by
6333 		 * mgmt_device_connected
6334 		 */
6335 		memcpy(conn->le_adv_data, data, len);
6336 		conn->le_adv_data_len = len;
6337 	}
6338 
6339 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6340 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6341 	else
6342 		flags = 0;
6343 
6344 	/* All scan results should be sent up for Mesh systems */
6345 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6346 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6347 				  rssi, flags, data, len, NULL, 0, instant);
6348 		return;
6349 	}
6350 
6351 	/* Passive scanning shouldn't trigger any device found events,
6352 	 * except for devices marked as CONN_REPORT for which we do send
6353 	 * device found events, or advertisement monitoring requested.
6354 	 */
6355 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6356 		if (type == LE_ADV_DIRECT_IND)
6357 			return;
6358 
6359 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6360 					       bdaddr, bdaddr_type) &&
6361 		    idr_is_empty(&hdev->adv_monitors_idr))
6362 			return;
6363 
6364 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6365 				  rssi, flags, data, len, NULL, 0, 0);
6366 		return;
6367 	}
6368 
6369 	/* When receiving a scan response, then there is no way to
6370 	 * know if the remote device is connectable or not. However
6371 	 * since scan responses are merged with a previously seen
6372 	 * advertising report, the flags field from that report
6373 	 * will be used.
6374 	 *
6375 	 * In the unlikely case that a controller just sends a scan
6376 	 * response event that doesn't match the pending report, then
6377 	 * it is marked as a standalone SCAN_RSP.
6378 	 */
6379 	if (type == LE_ADV_SCAN_RSP)
6380 		flags = MGMT_DEV_FOUND_SCAN_RSP;
6381 
6382 	/* If there's nothing pending either store the data from this
6383 	 * event or send an immediate device found event if the data
6384 	 * should not be stored for later.
6385 	 */
6386 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6387 		/* If the report will trigger a SCAN_REQ store it for
6388 		 * later merging.
6389 		 */
6390 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6391 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6392 						 rssi, flags, data, len);
6393 			return;
6394 		}
6395 
6396 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6397 				  rssi, flags, data, len, NULL, 0, 0);
6398 		return;
6399 	}
6400 
6401 	/* Check if the pending report is for the same device as the new one */
6402 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6403 		 bdaddr_type == d->last_adv_addr_type);
6404 
6405 	/* If the pending data doesn't match this report or this isn't a
6406 	 * scan response (e.g. we got a duplicate ADV_IND) then force
6407 	 * sending of the pending data.
6408 	 */
6409 	if (type != LE_ADV_SCAN_RSP || !match) {
6410 		/* Send out whatever is in the cache, but skip duplicates */
6411 		if (!match)
6412 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6413 					  d->last_adv_addr_type, NULL,
6414 					  d->last_adv_rssi, d->last_adv_flags,
6415 					  d->last_adv_data,
6416 					  d->last_adv_data_len, NULL, 0, 0);
6417 
6418 		/* If the new report will trigger a SCAN_REQ store it for
6419 		 * later merging.
6420 		 */
6421 		if (!ext_adv && (type == LE_ADV_IND ||
6422 				 type == LE_ADV_SCAN_IND)) {
6423 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6424 						 rssi, flags, data, len);
6425 			return;
6426 		}
6427 
6428 		/* The advertising reports cannot be merged, so clear
6429 		 * the pending report and send out a device found event.
6430 		 */
6431 		clear_pending_adv_report(hdev);
6432 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6433 				  rssi, flags, data, len, NULL, 0, 0);
6434 		return;
6435 	}
6436 
6437 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6438 	 * the new event is a SCAN_RSP. We can therefore proceed with
6439 	 * sending a merged device found event.
6440 	 */
6441 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6442 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6443 			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6444 	clear_pending_adv_report(hdev);
6445 }
6446 
6447 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6448 				  struct sk_buff *skb)
6449 {
6450 	struct hci_ev_le_advertising_report *ev = data;
6451 	u64 instant = jiffies;
6452 
6453 	if (!ev->num)
6454 		return;
6455 
6456 	hci_dev_lock(hdev);
6457 
6458 	while (ev->num--) {
6459 		struct hci_ev_le_advertising_info *info;
6460 		s8 rssi;
6461 
6462 		info = hci_le_ev_skb_pull(hdev, skb,
6463 					  HCI_EV_LE_ADVERTISING_REPORT,
6464 					  sizeof(*info));
6465 		if (!info)
6466 			break;
6467 
6468 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6469 					info->length + 1))
6470 			break;
6471 
6472 		if (info->length <= max_adv_len(hdev)) {
6473 			rssi = info->data[info->length];
6474 			process_adv_report(hdev, info->type, &info->bdaddr,
6475 					   info->bdaddr_type, NULL, 0, rssi,
6476 					   info->data, info->length, false,
6477 					   false, instant);
6478 		} else {
6479 			bt_dev_err(hdev, "Dropping invalid advertising data");
6480 		}
6481 	}
6482 
6483 	hci_dev_unlock(hdev);
6484 }
6485 
6486 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6487 {
6488 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6489 		switch (evt_type) {
6490 		case LE_LEGACY_ADV_IND:
6491 			return LE_ADV_IND;
6492 		case LE_LEGACY_ADV_DIRECT_IND:
6493 			return LE_ADV_DIRECT_IND;
6494 		case LE_LEGACY_ADV_SCAN_IND:
6495 			return LE_ADV_SCAN_IND;
6496 		case LE_LEGACY_NONCONN_IND:
6497 			return LE_ADV_NONCONN_IND;
6498 		case LE_LEGACY_SCAN_RSP_ADV:
6499 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6500 			return LE_ADV_SCAN_RSP;
6501 		}
6502 
6503 		goto invalid;
6504 	}
6505 
6506 	if (evt_type & LE_EXT_ADV_CONN_IND) {
6507 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6508 			return LE_ADV_DIRECT_IND;
6509 
6510 		return LE_ADV_IND;
6511 	}
6512 
6513 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6514 		return LE_ADV_SCAN_RSP;
6515 
6516 	if (evt_type & LE_EXT_ADV_SCAN_IND)
6517 		return LE_ADV_SCAN_IND;
6518 
6519 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6520 	    evt_type & LE_EXT_ADV_DIRECT_IND)
6521 		return LE_ADV_NONCONN_IND;
6522 
6523 invalid:
6524 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6525 			       evt_type);
6526 
6527 	return LE_ADV_INVALID;
6528 }
6529 
6530 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6531 				      struct sk_buff *skb)
6532 {
6533 	struct hci_ev_le_ext_adv_report *ev = data;
6534 	u64 instant = jiffies;
6535 
6536 	if (!ev->num)
6537 		return;
6538 
6539 	hci_dev_lock(hdev);
6540 
6541 	while (ev->num--) {
6542 		struct hci_ev_le_ext_adv_info *info;
6543 		u8 legacy_evt_type;
6544 		u16 evt_type;
6545 
6546 		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6547 					  sizeof(*info));
6548 		if (!info)
6549 			break;
6550 
6551 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6552 					info->length))
6553 			break;
6554 
6555 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6556 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6557 		if (legacy_evt_type != LE_ADV_INVALID) {
6558 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6559 					   info->bdaddr_type, NULL, 0,
6560 					   info->rssi, info->data, info->length,
6561 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6562 					   false, instant);
6563 		}
6564 	}
6565 
6566 	hci_dev_unlock(hdev);
6567 }
6568 
6569 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6570 {
6571 	struct hci_cp_le_pa_term_sync cp;
6572 
6573 	memset(&cp, 0, sizeof(cp));
6574 	cp.handle = handle;
6575 
6576 	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6577 }
6578 
6579 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6580 					    struct sk_buff *skb)
6581 {
6582 	struct hci_ev_le_pa_sync_established *ev = data;
6583 	int mask = hdev->link_mode;
6584 	__u8 flags = 0;
6585 	struct hci_conn *bis;
6586 
6587 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6588 
6589 	hci_dev_lock(hdev);
6590 
6591 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6592 
6593 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6594 	if (!(mask & HCI_LM_ACCEPT)) {
6595 		hci_le_pa_term_sync(hdev, ev->handle);
6596 		goto unlock;
6597 	}
6598 
6599 	if (!(flags & HCI_PROTO_DEFER))
6600 		goto unlock;
6601 
6602 	/* Add connection to indicate the PA sync event */
6603 	bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
6604 			   HCI_ROLE_SLAVE);
6605 
6606 	if (!bis)
6607 		goto unlock;
6608 
6609 	if (ev->status)
6610 		set_bit(HCI_CONN_PA_SYNC_FAILED, &bis->flags);
6611 	else
6612 		set_bit(HCI_CONN_PA_SYNC, &bis->flags);
6613 
6614 	/* Notify connection to iso layer */
6615 	hci_connect_cfm(bis, ev->status);
6616 
6617 unlock:
6618 	hci_dev_unlock(hdev);
6619 }
6620 
6621 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6622 				      struct sk_buff *skb)
6623 {
6624 	struct hci_ev_le_per_adv_report *ev = data;
6625 	int mask = hdev->link_mode;
6626 	__u8 flags = 0;
6627 
6628 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6629 
6630 	hci_dev_lock(hdev);
6631 
6632 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6633 	if (!(mask & HCI_LM_ACCEPT))
6634 		hci_le_pa_term_sync(hdev, ev->sync_handle);
6635 
6636 	hci_dev_unlock(hdev);
6637 }
6638 
6639 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6640 					    struct sk_buff *skb)
6641 {
6642 	struct hci_ev_le_remote_feat_complete *ev = data;
6643 	struct hci_conn *conn;
6644 
6645 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6646 
6647 	hci_dev_lock(hdev);
6648 
6649 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6650 	if (conn) {
6651 		if (!ev->status)
6652 			memcpy(conn->features[0], ev->features, 8);
6653 
6654 		if (conn->state == BT_CONFIG) {
6655 			__u8 status;
6656 
6657 			/* If the local controller supports peripheral-initiated
6658 			 * features exchange, but the remote controller does
6659 			 * not, then it is possible that the error code 0x1a
6660 			 * for unsupported remote feature gets returned.
6661 			 *
6662 			 * In this specific case, allow the connection to
6663 			 * transition into connected state and mark it as
6664 			 * successful.
6665 			 */
6666 			if (!conn->out && ev->status == 0x1a &&
6667 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6668 				status = 0x00;
6669 			else
6670 				status = ev->status;
6671 
6672 			conn->state = BT_CONNECTED;
6673 			hci_connect_cfm(conn, status);
6674 			hci_conn_drop(conn);
6675 		}
6676 	}
6677 
6678 	hci_dev_unlock(hdev);
6679 }
6680 
6681 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6682 				   struct sk_buff *skb)
6683 {
6684 	struct hci_ev_le_ltk_req *ev = data;
6685 	struct hci_cp_le_ltk_reply cp;
6686 	struct hci_cp_le_ltk_neg_reply neg;
6687 	struct hci_conn *conn;
6688 	struct smp_ltk *ltk;
6689 
6690 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6691 
6692 	hci_dev_lock(hdev);
6693 
6694 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6695 	if (conn == NULL)
6696 		goto not_found;
6697 
6698 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6699 	if (!ltk)
6700 		goto not_found;
6701 
6702 	if (smp_ltk_is_sc(ltk)) {
6703 		/* With SC both EDiv and Rand are set to zero */
6704 		if (ev->ediv || ev->rand)
6705 			goto not_found;
6706 	} else {
6707 		/* For non-SC keys check that EDiv and Rand match */
6708 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6709 			goto not_found;
6710 	}
6711 
6712 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6713 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6714 	cp.handle = cpu_to_le16(conn->handle);
6715 
6716 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6717 
6718 	conn->enc_key_size = ltk->enc_size;
6719 
6720 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6721 
6722 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6723 	 * temporary key used to encrypt a connection following
6724 	 * pairing. It is used during the Encrypted Session Setup to
6725 	 * distribute the keys. Later, security can be re-established
6726 	 * using a distributed LTK.
6727 	 */
6728 	if (ltk->type == SMP_STK) {
6729 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6730 		list_del_rcu(&ltk->list);
6731 		kfree_rcu(ltk, rcu);
6732 	} else {
6733 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6734 	}
6735 
6736 	hci_dev_unlock(hdev);
6737 
6738 	return;
6739 
6740 not_found:
6741 	neg.handle = ev->handle;
6742 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6743 	hci_dev_unlock(hdev);
6744 }
6745 
6746 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6747 				      u8 reason)
6748 {
6749 	struct hci_cp_le_conn_param_req_neg_reply cp;
6750 
6751 	cp.handle = cpu_to_le16(handle);
6752 	cp.reason = reason;
6753 
6754 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6755 		     &cp);
6756 }
6757 
6758 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6759 					     struct sk_buff *skb)
6760 {
6761 	struct hci_ev_le_remote_conn_param_req *ev = data;
6762 	struct hci_cp_le_conn_param_req_reply cp;
6763 	struct hci_conn *hcon;
6764 	u16 handle, min, max, latency, timeout;
6765 
6766 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6767 
6768 	handle = le16_to_cpu(ev->handle);
6769 	min = le16_to_cpu(ev->interval_min);
6770 	max = le16_to_cpu(ev->interval_max);
6771 	latency = le16_to_cpu(ev->latency);
6772 	timeout = le16_to_cpu(ev->timeout);
6773 
6774 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6775 	if (!hcon || hcon->state != BT_CONNECTED)
6776 		return send_conn_param_neg_reply(hdev, handle,
6777 						 HCI_ERROR_UNKNOWN_CONN_ID);
6778 
6779 	if (hci_check_conn_params(min, max, latency, timeout))
6780 		return send_conn_param_neg_reply(hdev, handle,
6781 						 HCI_ERROR_INVALID_LL_PARAMS);
6782 
6783 	if (hcon->role == HCI_ROLE_MASTER) {
6784 		struct hci_conn_params *params;
6785 		u8 store_hint;
6786 
6787 		hci_dev_lock(hdev);
6788 
6789 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6790 						hcon->dst_type);
6791 		if (params) {
6792 			params->conn_min_interval = min;
6793 			params->conn_max_interval = max;
6794 			params->conn_latency = latency;
6795 			params->supervision_timeout = timeout;
6796 			store_hint = 0x01;
6797 		} else {
6798 			store_hint = 0x00;
6799 		}
6800 
6801 		hci_dev_unlock(hdev);
6802 
6803 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6804 				    store_hint, min, max, latency, timeout);
6805 	}
6806 
6807 	cp.handle = ev->handle;
6808 	cp.interval_min = ev->interval_min;
6809 	cp.interval_max = ev->interval_max;
6810 	cp.latency = ev->latency;
6811 	cp.timeout = ev->timeout;
6812 	cp.min_ce_len = 0;
6813 	cp.max_ce_len = 0;
6814 
6815 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6816 }
6817 
6818 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6819 					 struct sk_buff *skb)
6820 {
6821 	struct hci_ev_le_direct_adv_report *ev = data;
6822 	u64 instant = jiffies;
6823 	int i;
6824 
6825 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6826 				flex_array_size(ev, info, ev->num)))
6827 		return;
6828 
6829 	if (!ev->num)
6830 		return;
6831 
6832 	hci_dev_lock(hdev);
6833 
6834 	for (i = 0; i < ev->num; i++) {
6835 		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6836 
6837 		process_adv_report(hdev, info->type, &info->bdaddr,
6838 				   info->bdaddr_type, &info->direct_addr,
6839 				   info->direct_addr_type, info->rssi, NULL, 0,
6840 				   false, false, instant);
6841 	}
6842 
6843 	hci_dev_unlock(hdev);
6844 }
6845 
6846 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6847 				  struct sk_buff *skb)
6848 {
6849 	struct hci_ev_le_phy_update_complete *ev = data;
6850 	struct hci_conn *conn;
6851 
6852 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6853 
6854 	if (ev->status)
6855 		return;
6856 
6857 	hci_dev_lock(hdev);
6858 
6859 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6860 	if (!conn)
6861 		goto unlock;
6862 
6863 	conn->le_tx_phy = ev->tx_phy;
6864 	conn->le_rx_phy = ev->rx_phy;
6865 
6866 unlock:
6867 	hci_dev_unlock(hdev);
6868 }
6869 
6870 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6871 					struct sk_buff *skb)
6872 {
6873 	struct hci_evt_le_cis_established *ev = data;
6874 	struct hci_conn *conn;
6875 	struct bt_iso_qos *qos;
6876 	bool pending = false;
6877 	u16 handle = __le16_to_cpu(ev->handle);
6878 
6879 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6880 
6881 	hci_dev_lock(hdev);
6882 
6883 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6884 	if (!conn) {
6885 		bt_dev_err(hdev,
6886 			   "Unable to find connection with handle 0x%4.4x",
6887 			   handle);
6888 		goto unlock;
6889 	}
6890 
6891 	if (conn->type != ISO_LINK) {
6892 		bt_dev_err(hdev,
6893 			   "Invalid connection link type handle 0x%4.4x",
6894 			   handle);
6895 		goto unlock;
6896 	}
6897 
6898 	qos = &conn->iso_qos;
6899 
6900 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6901 
6902 	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6903 	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6904 	qos->ucast.out.interval = qos->ucast.in.interval;
6905 
6906 	switch (conn->role) {
6907 	case HCI_ROLE_SLAVE:
6908 		/* Convert Transport Latency (us) to Latency (msec) */
6909 		qos->ucast.in.latency =
6910 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6911 					  1000);
6912 		qos->ucast.out.latency =
6913 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6914 					  1000);
6915 		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6916 		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6917 		qos->ucast.in.phy = ev->c_phy;
6918 		qos->ucast.out.phy = ev->p_phy;
6919 		break;
6920 	case HCI_ROLE_MASTER:
6921 		/* Convert Transport Latency (us) to Latency (msec) */
6922 		qos->ucast.out.latency =
6923 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6924 					  1000);
6925 		qos->ucast.in.latency =
6926 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6927 					  1000);
6928 		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6929 		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6930 		qos->ucast.out.phy = ev->c_phy;
6931 		qos->ucast.in.phy = ev->p_phy;
6932 		break;
6933 	}
6934 
6935 	if (!ev->status) {
6936 		conn->state = BT_CONNECTED;
6937 		hci_debugfs_create_conn(conn);
6938 		hci_conn_add_sysfs(conn);
6939 		hci_iso_setup_path(conn);
6940 		goto unlock;
6941 	}
6942 
6943 	conn->state = BT_CLOSED;
6944 	hci_connect_cfm(conn, ev->status);
6945 	hci_conn_del(conn);
6946 
6947 unlock:
6948 	if (pending)
6949 		hci_le_create_cis_pending(hdev);
6950 
6951 	hci_dev_unlock(hdev);
6952 }
6953 
6954 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6955 {
6956 	struct hci_cp_le_reject_cis cp;
6957 
6958 	memset(&cp, 0, sizeof(cp));
6959 	cp.handle = handle;
6960 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6961 	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6962 }
6963 
6964 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6965 {
6966 	struct hci_cp_le_accept_cis cp;
6967 
6968 	memset(&cp, 0, sizeof(cp));
6969 	cp.handle = handle;
6970 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6971 }
6972 
6973 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6974 			       struct sk_buff *skb)
6975 {
6976 	struct hci_evt_le_cis_req *ev = data;
6977 	u16 acl_handle, cis_handle;
6978 	struct hci_conn *acl, *cis;
6979 	int mask;
6980 	__u8 flags = 0;
6981 
6982 	acl_handle = __le16_to_cpu(ev->acl_handle);
6983 	cis_handle = __le16_to_cpu(ev->cis_handle);
6984 
6985 	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6986 		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6987 
6988 	hci_dev_lock(hdev);
6989 
6990 	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6991 	if (!acl)
6992 		goto unlock;
6993 
6994 	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6995 	if (!(mask & HCI_LM_ACCEPT)) {
6996 		hci_le_reject_cis(hdev, ev->cis_handle);
6997 		goto unlock;
6998 	}
6999 
7000 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7001 	if (!cis) {
7002 		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
7003 		if (!cis) {
7004 			hci_le_reject_cis(hdev, ev->cis_handle);
7005 			goto unlock;
7006 		}
7007 		cis->handle = cis_handle;
7008 	}
7009 
7010 	cis->iso_qos.ucast.cig = ev->cig_id;
7011 	cis->iso_qos.ucast.cis = ev->cis_id;
7012 
7013 	if (!(flags & HCI_PROTO_DEFER)) {
7014 		hci_le_accept_cis(hdev, ev->cis_handle);
7015 	} else {
7016 		cis->state = BT_CONNECT2;
7017 		hci_connect_cfm(cis, 0);
7018 	}
7019 
7020 unlock:
7021 	hci_dev_unlock(hdev);
7022 }
7023 
7024 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7025 					   struct sk_buff *skb)
7026 {
7027 	struct hci_evt_le_create_big_complete *ev = data;
7028 	struct hci_conn *conn;
7029 	__u8 i = 0;
7030 
7031 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7032 
7033 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7034 				flex_array_size(ev, bis_handle, ev->num_bis)))
7035 		return;
7036 
7037 	hci_dev_lock(hdev);
7038 	rcu_read_lock();
7039 
7040 	/* Connect all BISes that are bound to the BIG */
7041 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
7042 		if (bacmp(&conn->dst, BDADDR_ANY) ||
7043 		    conn->type != ISO_LINK ||
7044 		    conn->iso_qos.bcast.big != ev->handle)
7045 			continue;
7046 
7047 		if (hci_conn_set_handle(conn,
7048 					__le16_to_cpu(ev->bis_handle[i++])))
7049 			continue;
7050 
7051 		if (!ev->status) {
7052 			conn->state = BT_CONNECTED;
7053 			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7054 			rcu_read_unlock();
7055 			hci_debugfs_create_conn(conn);
7056 			hci_conn_add_sysfs(conn);
7057 			hci_iso_setup_path(conn);
7058 			rcu_read_lock();
7059 			continue;
7060 		}
7061 
7062 		hci_connect_cfm(conn, ev->status);
7063 		rcu_read_unlock();
7064 		hci_conn_del(conn);
7065 		rcu_read_lock();
7066 	}
7067 
7068 	if (!ev->status && !i)
7069 		/* If no BISes have been connected for the BIG,
7070 		 * terminate. This is in case all bound connections
7071 		 * have been closed before the BIG creation
7072 		 * has completed.
7073 		 */
7074 		hci_le_terminate_big_sync(hdev, ev->handle,
7075 					  HCI_ERROR_LOCAL_HOST_TERM);
7076 
7077 	rcu_read_unlock();
7078 	hci_dev_unlock(hdev);
7079 }
7080 
7081 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7082 					    struct sk_buff *skb)
7083 {
7084 	struct hci_evt_le_big_sync_estabilished *ev = data;
7085 	struct hci_conn *bis;
7086 	struct hci_conn *pa_sync;
7087 	int i;
7088 
7089 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7090 
7091 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7092 				flex_array_size(ev, bis, ev->num_bis)))
7093 		return;
7094 
7095 	hci_dev_lock(hdev);
7096 
7097 	if (!ev->status) {
7098 		pa_sync = hci_conn_hash_lookup_pa_sync(hdev, ev->handle);
7099 		if (pa_sync)
7100 			/* Also mark the BIG sync established event on the
7101 			 * associated PA sync hcon
7102 			 */
7103 			set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
7104 	}
7105 
7106 	for (i = 0; i < ev->num_bis; i++) {
7107 		u16 handle = le16_to_cpu(ev->bis[i]);
7108 		__le32 interval;
7109 
7110 		bis = hci_conn_hash_lookup_handle(hdev, handle);
7111 		if (!bis) {
7112 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7113 					   HCI_ROLE_SLAVE);
7114 			if (!bis)
7115 				continue;
7116 			bis->handle = handle;
7117 		}
7118 
7119 		if (ev->status != 0x42)
7120 			/* Mark PA sync as established */
7121 			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7122 
7123 		bis->iso_qos.bcast.big = ev->handle;
7124 		memset(&interval, 0, sizeof(interval));
7125 		memcpy(&interval, ev->latency, sizeof(ev->latency));
7126 		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7127 		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7128 		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7129 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7130 
7131 		if (!ev->status) {
7132 			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7133 			hci_iso_setup_path(bis);
7134 		}
7135 	}
7136 
7137 	/* In case BIG sync failed, notify each failed connection to
7138 	 * the user after all hci connections have been added
7139 	 */
7140 	if (ev->status)
7141 		for (i = 0; i < ev->num_bis; i++) {
7142 			u16 handle = le16_to_cpu(ev->bis[i]);
7143 
7144 			bis = hci_conn_hash_lookup_handle(hdev, handle);
7145 
7146 			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7147 			hci_connect_cfm(bis, ev->status);
7148 		}
7149 
7150 	hci_dev_unlock(hdev);
7151 }
7152 
7153 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7154 					   struct sk_buff *skb)
7155 {
7156 	struct hci_evt_le_big_info_adv_report *ev = data;
7157 	int mask = hdev->link_mode;
7158 	__u8 flags = 0;
7159 
7160 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7161 
7162 	hci_dev_lock(hdev);
7163 
7164 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7165 	if (!(mask & HCI_LM_ACCEPT))
7166 		hci_le_pa_term_sync(hdev, ev->sync_handle);
7167 
7168 	hci_dev_unlock(hdev);
7169 }
7170 
7171 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7172 [_op] = { \
7173 	.func = _func, \
7174 	.min_len = _min_len, \
7175 	.max_len = _max_len, \
7176 }
7177 
7178 #define HCI_LE_EV(_op, _func, _len) \
7179 	HCI_LE_EV_VL(_op, _func, _len, _len)
7180 
7181 #define HCI_LE_EV_STATUS(_op, _func) \
7182 	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7183 
7184 /* Entries in this table shall have their position according to the subevent
7185  * opcode they handle so the use of the macros above is recommend since it does
7186  * attempt to initialize at its proper index using Designated Initializers that
7187  * way events without a callback function can be ommited.
7188  */
7189 static const struct hci_le_ev {
7190 	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7191 	u16  min_len;
7192 	u16  max_len;
7193 } hci_le_ev_table[U8_MAX + 1] = {
7194 	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7195 	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7196 		  sizeof(struct hci_ev_le_conn_complete)),
7197 	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7198 	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7199 		     sizeof(struct hci_ev_le_advertising_report),
7200 		     HCI_MAX_EVENT_SIZE),
7201 	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7202 	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7203 		  hci_le_conn_update_complete_evt,
7204 		  sizeof(struct hci_ev_le_conn_update_complete)),
7205 	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7206 	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7207 		  hci_le_remote_feat_complete_evt,
7208 		  sizeof(struct hci_ev_le_remote_feat_complete)),
7209 	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7210 	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7211 		  sizeof(struct hci_ev_le_ltk_req)),
7212 	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7213 	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7214 		  hci_le_remote_conn_param_req_evt,
7215 		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7216 	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7217 	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7218 		  hci_le_enh_conn_complete_evt,
7219 		  sizeof(struct hci_ev_le_enh_conn_complete)),
7220 	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7221 	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7222 		     sizeof(struct hci_ev_le_direct_adv_report),
7223 		     HCI_MAX_EVENT_SIZE),
7224 	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7225 	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7226 		  sizeof(struct hci_ev_le_phy_update_complete)),
7227 	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7228 	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7229 		     sizeof(struct hci_ev_le_ext_adv_report),
7230 		     HCI_MAX_EVENT_SIZE),
7231 	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7232 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7233 		  hci_le_pa_sync_estabilished_evt,
7234 		  sizeof(struct hci_ev_le_pa_sync_established)),
7235 	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7236 	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7237 				 hci_le_per_adv_report_evt,
7238 				 sizeof(struct hci_ev_le_per_adv_report),
7239 				 HCI_MAX_EVENT_SIZE),
7240 	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7241 	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7242 		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7243 	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7244 	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7245 		  sizeof(struct hci_evt_le_cis_established)),
7246 	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7247 	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7248 		  sizeof(struct hci_evt_le_cis_req)),
7249 	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7250 	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7251 		     hci_le_create_big_complete_evt,
7252 		     sizeof(struct hci_evt_le_create_big_complete),
7253 		     HCI_MAX_EVENT_SIZE),
7254 	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7255 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7256 		     hci_le_big_sync_established_evt,
7257 		     sizeof(struct hci_evt_le_big_sync_estabilished),
7258 		     HCI_MAX_EVENT_SIZE),
7259 	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7260 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7261 		     hci_le_big_info_adv_report_evt,
7262 		     sizeof(struct hci_evt_le_big_info_adv_report),
7263 		     HCI_MAX_EVENT_SIZE),
7264 };
7265 
7266 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7267 			    struct sk_buff *skb, u16 *opcode, u8 *status,
7268 			    hci_req_complete_t *req_complete,
7269 			    hci_req_complete_skb_t *req_complete_skb)
7270 {
7271 	struct hci_ev_le_meta *ev = data;
7272 	const struct hci_le_ev *subev;
7273 
7274 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7275 
7276 	/* Only match event if command OGF is for LE */
7277 	if (hdev->sent_cmd &&
7278 	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7279 	    hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7280 		*opcode = hci_skb_opcode(hdev->sent_cmd);
7281 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7282 				     req_complete_skb);
7283 	}
7284 
7285 	subev = &hci_le_ev_table[ev->subevent];
7286 	if (!subev->func)
7287 		return;
7288 
7289 	if (skb->len < subev->min_len) {
7290 		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7291 			   ev->subevent, skb->len, subev->min_len);
7292 		return;
7293 	}
7294 
7295 	/* Just warn if the length is over max_len size it still be
7296 	 * possible to partially parse the event so leave to callback to
7297 	 * decide if that is acceptable.
7298 	 */
7299 	if (skb->len > subev->max_len)
7300 		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7301 			    ev->subevent, skb->len, subev->max_len);
7302 	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7303 	if (!data)
7304 		return;
7305 
7306 	subev->func(hdev, data, skb);
7307 }
7308 
7309 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7310 				 u8 event, struct sk_buff *skb)
7311 {
7312 	struct hci_ev_cmd_complete *ev;
7313 	struct hci_event_hdr *hdr;
7314 
7315 	if (!skb)
7316 		return false;
7317 
7318 	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7319 	if (!hdr)
7320 		return false;
7321 
7322 	if (event) {
7323 		if (hdr->evt != event)
7324 			return false;
7325 		return true;
7326 	}
7327 
7328 	/* Check if request ended in Command Status - no way to retrieve
7329 	 * any extra parameters in this case.
7330 	 */
7331 	if (hdr->evt == HCI_EV_CMD_STATUS)
7332 		return false;
7333 
7334 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7335 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7336 			   hdr->evt);
7337 		return false;
7338 	}
7339 
7340 	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7341 	if (!ev)
7342 		return false;
7343 
7344 	if (opcode != __le16_to_cpu(ev->opcode)) {
7345 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7346 		       __le16_to_cpu(ev->opcode));
7347 		return false;
7348 	}
7349 
7350 	return true;
7351 }
7352 
7353 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7354 				  struct sk_buff *skb)
7355 {
7356 	struct hci_ev_le_advertising_info *adv;
7357 	struct hci_ev_le_direct_adv_info *direct_adv;
7358 	struct hci_ev_le_ext_adv_info *ext_adv;
7359 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7360 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7361 
7362 	hci_dev_lock(hdev);
7363 
7364 	/* If we are currently suspended and this is the first BT event seen,
7365 	 * save the wake reason associated with the event.
7366 	 */
7367 	if (!hdev->suspended || hdev->wake_reason)
7368 		goto unlock;
7369 
7370 	/* Default to remote wake. Values for wake_reason are documented in the
7371 	 * Bluez mgmt api docs.
7372 	 */
7373 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7374 
7375 	/* Once configured for remote wakeup, we should only wake up for
7376 	 * reconnections. It's useful to see which device is waking us up so
7377 	 * keep track of the bdaddr of the connection event that woke us up.
7378 	 */
7379 	if (event == HCI_EV_CONN_REQUEST) {
7380 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7381 		hdev->wake_addr_type = BDADDR_BREDR;
7382 	} else if (event == HCI_EV_CONN_COMPLETE) {
7383 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7384 		hdev->wake_addr_type = BDADDR_BREDR;
7385 	} else if (event == HCI_EV_LE_META) {
7386 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7387 		u8 subevent = le_ev->subevent;
7388 		u8 *ptr = &skb->data[sizeof(*le_ev)];
7389 		u8 num_reports = *ptr;
7390 
7391 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7392 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7393 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7394 		    num_reports) {
7395 			adv = (void *)(ptr + 1);
7396 			direct_adv = (void *)(ptr + 1);
7397 			ext_adv = (void *)(ptr + 1);
7398 
7399 			switch (subevent) {
7400 			case HCI_EV_LE_ADVERTISING_REPORT:
7401 				bacpy(&hdev->wake_addr, &adv->bdaddr);
7402 				hdev->wake_addr_type = adv->bdaddr_type;
7403 				break;
7404 			case HCI_EV_LE_DIRECT_ADV_REPORT:
7405 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7406 				hdev->wake_addr_type = direct_adv->bdaddr_type;
7407 				break;
7408 			case HCI_EV_LE_EXT_ADV_REPORT:
7409 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7410 				hdev->wake_addr_type = ext_adv->bdaddr_type;
7411 				break;
7412 			}
7413 		}
7414 	} else {
7415 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7416 	}
7417 
7418 unlock:
7419 	hci_dev_unlock(hdev);
7420 }
7421 
7422 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7423 [_op] = { \
7424 	.req = false, \
7425 	.func = _func, \
7426 	.min_len = _min_len, \
7427 	.max_len = _max_len, \
7428 }
7429 
7430 #define HCI_EV(_op, _func, _len) \
7431 	HCI_EV_VL(_op, _func, _len, _len)
7432 
7433 #define HCI_EV_STATUS(_op, _func) \
7434 	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7435 
7436 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7437 [_op] = { \
7438 	.req = true, \
7439 	.func_req = _func, \
7440 	.min_len = _min_len, \
7441 	.max_len = _max_len, \
7442 }
7443 
7444 #define HCI_EV_REQ(_op, _func, _len) \
7445 	HCI_EV_REQ_VL(_op, _func, _len, _len)
7446 
7447 /* Entries in this table shall have their position according to the event opcode
7448  * they handle so the use of the macros above is recommend since it does attempt
7449  * to initialize at its proper index using Designated Initializers that way
7450  * events without a callback function don't have entered.
7451  */
7452 static const struct hci_ev {
7453 	bool req;
7454 	union {
7455 		void (*func)(struct hci_dev *hdev, void *data,
7456 			     struct sk_buff *skb);
7457 		void (*func_req)(struct hci_dev *hdev, void *data,
7458 				 struct sk_buff *skb, u16 *opcode, u8 *status,
7459 				 hci_req_complete_t *req_complete,
7460 				 hci_req_complete_skb_t *req_complete_skb);
7461 	};
7462 	u16  min_len;
7463 	u16  max_len;
7464 } hci_ev_table[U8_MAX + 1] = {
7465 	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7466 	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7467 	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7468 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7469 		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7470 	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7471 	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7472 	       sizeof(struct hci_ev_conn_complete)),
7473 	/* [0x04 = HCI_EV_CONN_REQUEST] */
7474 	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7475 	       sizeof(struct hci_ev_conn_request)),
7476 	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7477 	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7478 	       sizeof(struct hci_ev_disconn_complete)),
7479 	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7480 	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7481 	       sizeof(struct hci_ev_auth_complete)),
7482 	/* [0x07 = HCI_EV_REMOTE_NAME] */
7483 	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7484 	       sizeof(struct hci_ev_remote_name)),
7485 	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7486 	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7487 	       sizeof(struct hci_ev_encrypt_change)),
7488 	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7489 	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7490 	       hci_change_link_key_complete_evt,
7491 	       sizeof(struct hci_ev_change_link_key_complete)),
7492 	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7493 	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7494 	       sizeof(struct hci_ev_remote_features)),
7495 	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7496 	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7497 		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7498 	/* [0x0f = HCI_EV_CMD_STATUS] */
7499 	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7500 		   sizeof(struct hci_ev_cmd_status)),
7501 	/* [0x10 = HCI_EV_CMD_STATUS] */
7502 	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7503 	       sizeof(struct hci_ev_hardware_error)),
7504 	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7505 	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7506 	       sizeof(struct hci_ev_role_change)),
7507 	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7508 	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7509 		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7510 	/* [0x14 = HCI_EV_MODE_CHANGE] */
7511 	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7512 	       sizeof(struct hci_ev_mode_change)),
7513 	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7514 	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7515 	       sizeof(struct hci_ev_pin_code_req)),
7516 	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7517 	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7518 	       sizeof(struct hci_ev_link_key_req)),
7519 	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7520 	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7521 	       sizeof(struct hci_ev_link_key_notify)),
7522 	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7523 	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7524 	       sizeof(struct hci_ev_clock_offset)),
7525 	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7526 	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7527 	       sizeof(struct hci_ev_pkt_type_change)),
7528 	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7529 	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7530 	       sizeof(struct hci_ev_pscan_rep_mode)),
7531 	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7532 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7533 		  hci_inquiry_result_with_rssi_evt,
7534 		  sizeof(struct hci_ev_inquiry_result_rssi),
7535 		  HCI_MAX_EVENT_SIZE),
7536 	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7537 	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7538 	       sizeof(struct hci_ev_remote_ext_features)),
7539 	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7540 	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7541 	       sizeof(struct hci_ev_sync_conn_complete)),
7542 	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7543 	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7544 		  hci_extended_inquiry_result_evt,
7545 		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7546 	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7547 	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7548 	       sizeof(struct hci_ev_key_refresh_complete)),
7549 	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7550 	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7551 	       sizeof(struct hci_ev_io_capa_request)),
7552 	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7553 	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7554 	       sizeof(struct hci_ev_io_capa_reply)),
7555 	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7556 	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7557 	       sizeof(struct hci_ev_user_confirm_req)),
7558 	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7559 	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7560 	       sizeof(struct hci_ev_user_passkey_req)),
7561 	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7562 	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7563 	       sizeof(struct hci_ev_remote_oob_data_request)),
7564 	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7565 	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7566 	       sizeof(struct hci_ev_simple_pair_complete)),
7567 	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7568 	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7569 	       sizeof(struct hci_ev_user_passkey_notify)),
7570 	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7571 	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7572 	       sizeof(struct hci_ev_keypress_notify)),
7573 	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7574 	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7575 	       sizeof(struct hci_ev_remote_host_features)),
7576 	/* [0x3e = HCI_EV_LE_META] */
7577 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7578 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7579 #if IS_ENABLED(CONFIG_BT_HS)
7580 	/* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7581 	HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7582 	       sizeof(struct hci_ev_phy_link_complete)),
7583 	/* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7584 	HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7585 	       sizeof(struct hci_ev_channel_selected)),
7586 	/* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7587 	HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7588 	       hci_disconn_loglink_complete_evt,
7589 	       sizeof(struct hci_ev_disconn_logical_link_complete)),
7590 	/* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7591 	HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7592 	       sizeof(struct hci_ev_logical_link_complete)),
7593 	/* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7594 	HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7595 	       hci_disconn_phylink_complete_evt,
7596 	       sizeof(struct hci_ev_disconn_phy_link_complete)),
7597 #endif
7598 	/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7599 	HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7600 	       sizeof(struct hci_ev_num_comp_blocks)),
7601 	/* [0xff = HCI_EV_VENDOR] */
7602 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7603 };
7604 
7605 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7606 			   u16 *opcode, u8 *status,
7607 			   hci_req_complete_t *req_complete,
7608 			   hci_req_complete_skb_t *req_complete_skb)
7609 {
7610 	const struct hci_ev *ev = &hci_ev_table[event];
7611 	void *data;
7612 
7613 	if (!ev->func)
7614 		return;
7615 
7616 	if (skb->len < ev->min_len) {
7617 		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7618 			   event, skb->len, ev->min_len);
7619 		return;
7620 	}
7621 
7622 	/* Just warn if the length is over max_len size it still be
7623 	 * possible to partially parse the event so leave to callback to
7624 	 * decide if that is acceptable.
7625 	 */
7626 	if (skb->len > ev->max_len)
7627 		bt_dev_warn_ratelimited(hdev,
7628 					"unexpected event 0x%2.2x length: %u > %u",
7629 					event, skb->len, ev->max_len);
7630 
7631 	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7632 	if (!data)
7633 		return;
7634 
7635 	if (ev->req)
7636 		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7637 			     req_complete_skb);
7638 	else
7639 		ev->func(hdev, data, skb);
7640 }
7641 
7642 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7643 {
7644 	struct hci_event_hdr *hdr = (void *) skb->data;
7645 	hci_req_complete_t req_complete = NULL;
7646 	hci_req_complete_skb_t req_complete_skb = NULL;
7647 	struct sk_buff *orig_skb = NULL;
7648 	u8 status = 0, event, req_evt = 0;
7649 	u16 opcode = HCI_OP_NOP;
7650 
7651 	if (skb->len < sizeof(*hdr)) {
7652 		bt_dev_err(hdev, "Malformed HCI Event");
7653 		goto done;
7654 	}
7655 
7656 	kfree_skb(hdev->recv_event);
7657 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7658 
7659 	event = hdr->evt;
7660 	if (!event) {
7661 		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7662 			    event);
7663 		goto done;
7664 	}
7665 
7666 	/* Only match event if command OGF is not for LE */
7667 	if (hdev->sent_cmd &&
7668 	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7669 	    hci_skb_event(hdev->sent_cmd) == event) {
7670 		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7671 				     status, &req_complete, &req_complete_skb);
7672 		req_evt = event;
7673 	}
7674 
7675 	/* If it looks like we might end up having to call
7676 	 * req_complete_skb, store a pristine copy of the skb since the
7677 	 * various handlers may modify the original one through
7678 	 * skb_pull() calls, etc.
7679 	 */
7680 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7681 	    event == HCI_EV_CMD_COMPLETE)
7682 		orig_skb = skb_clone(skb, GFP_KERNEL);
7683 
7684 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7685 
7686 	/* Store wake reason if we're suspended */
7687 	hci_store_wake_reason(hdev, event, skb);
7688 
7689 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7690 
7691 	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7692 		       &req_complete_skb);
7693 
7694 	if (req_complete) {
7695 		req_complete(hdev, status, opcode);
7696 	} else if (req_complete_skb) {
7697 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7698 			kfree_skb(orig_skb);
7699 			orig_skb = NULL;
7700 		}
7701 		req_complete_skb(hdev, status, opcode, orig_skb);
7702 	}
7703 
7704 done:
7705 	kfree_skb(orig_skb);
7706 	kfree_skb(skb);
7707 	hdev->stat.evt_rx++;
7708 }
7709