xref: /linux/net/bluetooth/hci_event.c (revision b255531b27da336571411248c2a72a350662bd09)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023-2024 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI event handling. */
27 
28 #include <linux/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_debugfs.h"
37 #include "hci_codec.h"
38 #include "smp.h"
39 #include "msft.h"
40 #include "eir.h"
41 
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
44 
45 /* Handle HCI Event packets */
46 
47 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
48 			     u8 ev, size_t len)
49 {
50 	void *data;
51 
52 	data = skb_pull_data(skb, len);
53 	if (!data)
54 		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
55 
56 	return data;
57 }
58 
59 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
60 			     u16 op, size_t len)
61 {
62 	void *data;
63 
64 	data = skb_pull_data(skb, len);
65 	if (!data)
66 		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
67 
68 	return data;
69 }
70 
71 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
72 				u8 ev, size_t len)
73 {
74 	void *data;
75 
76 	data = skb_pull_data(skb, len);
77 	if (!data)
78 		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
79 
80 	return data;
81 }
82 
83 static void hci_store_wake_reason(struct hci_dev *hdev,
84 				  const bdaddr_t *bdaddr, u8 addr_type)
85 	__must_hold(&hdev->lock);
86 
87 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
88 				struct sk_buff *skb)
89 {
90 	struct hci_ev_status *rp = data;
91 
92 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
93 
94 	/* It is possible that we receive Inquiry Complete event right
95 	 * before we receive Inquiry Cancel Command Complete event, in
96 	 * which case the latter event should have status of Command
97 	 * Disallowed. This should not be treated as error, since
98 	 * we actually achieve what Inquiry Cancel wants to achieve,
99 	 * which is to end the last Inquiry session.
100 	 */
101 	if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
102 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
103 		rp->status = 0x00;
104 	}
105 
106 	if (rp->status)
107 		return rp->status;
108 
109 	clear_bit(HCI_INQUIRY, &hdev->flags);
110 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
111 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
112 
113 	hci_dev_lock(hdev);
114 	/* Set discovery state to stopped if we're not doing LE active
115 	 * scanning.
116 	 */
117 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
118 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
119 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
120 	hci_dev_unlock(hdev);
121 
122 	return rp->status;
123 }
124 
125 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
126 			      struct sk_buff *skb)
127 {
128 	struct hci_ev_status *rp = data;
129 
130 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
131 
132 	if (rp->status)
133 		return rp->status;
134 
135 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
136 
137 	return rp->status;
138 }
139 
140 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
141 				   struct sk_buff *skb)
142 {
143 	struct hci_ev_status *rp = data;
144 
145 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
146 
147 	if (rp->status)
148 		return rp->status;
149 
150 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
151 
152 	return rp->status;
153 }
154 
155 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
156 					struct sk_buff *skb)
157 {
158 	struct hci_rp_remote_name_req_cancel *rp = data;
159 
160 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
161 
162 	return rp->status;
163 }
164 
165 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
166 				struct sk_buff *skb)
167 {
168 	struct hci_rp_role_discovery *rp = data;
169 	struct hci_conn *conn;
170 
171 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
172 
173 	if (rp->status)
174 		return rp->status;
175 
176 	hci_dev_lock(hdev);
177 
178 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
179 	if (conn)
180 		conn->role = rp->role;
181 
182 	hci_dev_unlock(hdev);
183 
184 	return rp->status;
185 }
186 
187 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
188 				  struct sk_buff *skb)
189 {
190 	struct hci_rp_read_link_policy *rp = data;
191 	struct hci_conn *conn;
192 
193 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
194 
195 	if (rp->status)
196 		return rp->status;
197 
198 	hci_dev_lock(hdev);
199 
200 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
201 	if (conn)
202 		conn->link_policy = __le16_to_cpu(rp->policy);
203 
204 	hci_dev_unlock(hdev);
205 
206 	return rp->status;
207 }
208 
209 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
210 				   struct sk_buff *skb)
211 {
212 	struct hci_rp_write_link_policy *rp = data;
213 	struct hci_conn *conn;
214 	void *sent;
215 
216 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
217 
218 	if (rp->status)
219 		return rp->status;
220 
221 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
222 	if (!sent)
223 		return rp->status;
224 
225 	hci_dev_lock(hdev);
226 
227 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
228 	if (conn)
229 		conn->link_policy = get_unaligned_le16(sent + 2);
230 
231 	hci_dev_unlock(hdev);
232 
233 	return rp->status;
234 }
235 
236 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
237 				      struct sk_buff *skb)
238 {
239 	struct hci_rp_read_def_link_policy *rp = data;
240 
241 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
242 
243 	if (rp->status)
244 		return rp->status;
245 
246 	hdev->link_policy = __le16_to_cpu(rp->policy);
247 
248 	return rp->status;
249 }
250 
251 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
252 				       struct sk_buff *skb)
253 {
254 	struct hci_ev_status *rp = data;
255 	void *sent;
256 
257 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
258 
259 	if (rp->status)
260 		return rp->status;
261 
262 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
263 	if (!sent)
264 		return rp->status;
265 
266 	hdev->link_policy = get_unaligned_le16(sent);
267 
268 	return rp->status;
269 }
270 
271 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
272 {
273 	struct hci_ev_status *rp = data;
274 
275 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
276 
277 	clear_bit(HCI_RESET, &hdev->flags);
278 
279 	if (rp->status)
280 		return rp->status;
281 
282 	/* Reset all non-persistent flags */
283 	hci_dev_clear_volatile_flags(hdev);
284 
285 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
286 
287 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
288 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
289 
290 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
291 	hdev->adv_data_len = 0;
292 
293 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
294 	hdev->scan_rsp_data_len = 0;
295 
296 	hdev->le_scan_type = LE_SCAN_PASSIVE;
297 
298 	hdev->ssp_debug_mode = 0;
299 
300 	hci_bdaddr_list_clear(&hdev->le_accept_list);
301 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
302 
303 	return rp->status;
304 }
305 
306 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
307 				      struct sk_buff *skb)
308 {
309 	struct hci_rp_read_stored_link_key *rp = data;
310 	struct hci_cp_read_stored_link_key *sent;
311 
312 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
313 
314 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
315 	if (!sent)
316 		return rp->status;
317 
318 	if (!rp->status && sent->read_all == 0x01) {
319 		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
320 		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
321 	}
322 
323 	return rp->status;
324 }
325 
326 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
327 					struct sk_buff *skb)
328 {
329 	struct hci_rp_delete_stored_link_key *rp = data;
330 	u16 num_keys;
331 
332 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
333 
334 	if (rp->status)
335 		return rp->status;
336 
337 	num_keys = le16_to_cpu(rp->num_keys);
338 
339 	if (num_keys <= hdev->stored_num_keys)
340 		hdev->stored_num_keys -= num_keys;
341 	else
342 		hdev->stored_num_keys = 0;
343 
344 	return rp->status;
345 }
346 
347 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
348 				  struct sk_buff *skb)
349 {
350 	struct hci_ev_status *rp = data;
351 	void *sent;
352 
353 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
354 
355 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
356 	if (!sent)
357 		return rp->status;
358 
359 	hci_dev_lock(hdev);
360 
361 	if (hci_dev_test_flag(hdev, HCI_MGMT))
362 		mgmt_set_local_name_complete(hdev, sent, rp->status);
363 	else if (!rp->status)
364 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
365 
366 	hci_dev_unlock(hdev);
367 
368 	return rp->status;
369 }
370 
371 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
372 				 struct sk_buff *skb)
373 {
374 	struct hci_rp_read_local_name *rp = data;
375 
376 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
377 
378 	if (rp->status)
379 		return rp->status;
380 
381 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
382 	    hci_dev_test_flag(hdev, HCI_CONFIG))
383 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
384 
385 	return rp->status;
386 }
387 
388 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
389 				   struct sk_buff *skb)
390 {
391 	struct hci_ev_status *rp = data;
392 	void *sent;
393 
394 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
395 
396 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
397 	if (!sent)
398 		return rp->status;
399 
400 	hci_dev_lock(hdev);
401 
402 	if (!rp->status) {
403 		__u8 param = *((__u8 *) sent);
404 
405 		if (param == AUTH_ENABLED)
406 			set_bit(HCI_AUTH, &hdev->flags);
407 		else
408 			clear_bit(HCI_AUTH, &hdev->flags);
409 	}
410 
411 	if (hci_dev_test_flag(hdev, HCI_MGMT))
412 		mgmt_auth_enable_complete(hdev, rp->status);
413 
414 	hci_dev_unlock(hdev);
415 
416 	return rp->status;
417 }
418 
419 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
420 				    struct sk_buff *skb)
421 {
422 	struct hci_ev_status *rp = data;
423 	__u8 param;
424 	void *sent;
425 
426 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
427 
428 	if (rp->status)
429 		return rp->status;
430 
431 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
432 	if (!sent)
433 		return rp->status;
434 
435 	param = *((__u8 *) sent);
436 
437 	if (param)
438 		set_bit(HCI_ENCRYPT, &hdev->flags);
439 	else
440 		clear_bit(HCI_ENCRYPT, &hdev->flags);
441 
442 	return rp->status;
443 }
444 
445 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
446 				   struct sk_buff *skb)
447 {
448 	struct hci_ev_status *rp = data;
449 	__u8 param;
450 	void *sent;
451 
452 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
453 
454 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
455 	if (!sent)
456 		return rp->status;
457 
458 	param = *((__u8 *) sent);
459 
460 	hci_dev_lock(hdev);
461 
462 	if (rp->status) {
463 		hdev->discov_timeout = 0;
464 		goto done;
465 	}
466 
467 	if (param & SCAN_INQUIRY)
468 		set_bit(HCI_ISCAN, &hdev->flags);
469 	else
470 		clear_bit(HCI_ISCAN, &hdev->flags);
471 
472 	if (param & SCAN_PAGE)
473 		set_bit(HCI_PSCAN, &hdev->flags);
474 	else
475 		clear_bit(HCI_PSCAN, &hdev->flags);
476 
477 done:
478 	hci_dev_unlock(hdev);
479 
480 	return rp->status;
481 }
482 
483 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
484 				  struct sk_buff *skb)
485 {
486 	struct hci_ev_status *rp = data;
487 	struct hci_cp_set_event_filter *cp;
488 	void *sent;
489 
490 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
491 
492 	if (rp->status)
493 		return rp->status;
494 
495 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
496 	if (!sent)
497 		return rp->status;
498 
499 	cp = (struct hci_cp_set_event_filter *)sent;
500 
501 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
502 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
503 	else
504 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
505 
506 	return rp->status;
507 }
508 
509 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
510 				   struct sk_buff *skb)
511 {
512 	struct hci_rp_read_class_of_dev *rp = data;
513 
514 	if (WARN_ON(!hdev))
515 		return HCI_ERROR_UNSPECIFIED;
516 
517 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
518 
519 	if (rp->status)
520 		return rp->status;
521 
522 	memcpy(hdev->dev_class, rp->dev_class, 3);
523 
524 	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
525 		   hdev->dev_class[1], hdev->dev_class[0]);
526 
527 	return rp->status;
528 }
529 
530 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
531 				    struct sk_buff *skb)
532 {
533 	struct hci_ev_status *rp = data;
534 	void *sent;
535 
536 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
537 
538 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
539 	if (!sent)
540 		return rp->status;
541 
542 	hci_dev_lock(hdev);
543 
544 	if (!rp->status)
545 		memcpy(hdev->dev_class, sent, 3);
546 
547 	if (hci_dev_test_flag(hdev, HCI_MGMT))
548 		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
549 
550 	hci_dev_unlock(hdev);
551 
552 	return rp->status;
553 }
554 
555 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
556 				    struct sk_buff *skb)
557 {
558 	struct hci_rp_read_voice_setting *rp = data;
559 	__u16 setting;
560 
561 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
562 
563 	if (rp->status)
564 		return rp->status;
565 
566 	setting = __le16_to_cpu(rp->voice_setting);
567 
568 	if (hdev->voice_setting == setting)
569 		return rp->status;
570 
571 	hdev->voice_setting = setting;
572 
573 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
574 
575 	if (hdev->notify)
576 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
577 
578 	return rp->status;
579 }
580 
581 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
582 				     struct sk_buff *skb)
583 {
584 	struct hci_ev_status *rp = data;
585 	__u16 setting;
586 	void *sent;
587 
588 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
589 
590 	if (rp->status)
591 		return rp->status;
592 
593 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
594 	if (!sent)
595 		return rp->status;
596 
597 	setting = get_unaligned_le16(sent);
598 
599 	if (hdev->voice_setting == setting)
600 		return rp->status;
601 
602 	hdev->voice_setting = setting;
603 
604 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
605 
606 	if (hdev->notify)
607 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
608 
609 	return rp->status;
610 }
611 
612 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
613 					struct sk_buff *skb)
614 {
615 	struct hci_rp_read_num_supported_iac *rp = data;
616 
617 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
618 
619 	if (rp->status)
620 		return rp->status;
621 
622 	hdev->num_iac = rp->num_iac;
623 
624 	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
625 
626 	return rp->status;
627 }
628 
629 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
630 				struct sk_buff *skb)
631 {
632 	struct hci_ev_status *rp = data;
633 	struct hci_cp_write_ssp_mode *sent;
634 
635 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
636 
637 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
638 	if (!sent)
639 		return rp->status;
640 
641 	hci_dev_lock(hdev);
642 
643 	if (!rp->status) {
644 		if (sent->mode)
645 			hdev->features[1][0] |= LMP_HOST_SSP;
646 		else
647 			hdev->features[1][0] &= ~LMP_HOST_SSP;
648 	}
649 
650 	if (!rp->status) {
651 		if (sent->mode)
652 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
653 		else
654 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
655 	}
656 
657 	hci_dev_unlock(hdev);
658 
659 	return rp->status;
660 }
661 
662 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
663 				  struct sk_buff *skb)
664 {
665 	struct hci_ev_status *rp = data;
666 	struct hci_cp_write_sc_support *sent;
667 
668 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
669 
670 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
671 	if (!sent)
672 		return rp->status;
673 
674 	hci_dev_lock(hdev);
675 
676 	if (!rp->status) {
677 		if (sent->support)
678 			hdev->features[1][0] |= LMP_HOST_SC;
679 		else
680 			hdev->features[1][0] &= ~LMP_HOST_SC;
681 	}
682 
683 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
684 		if (sent->support)
685 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
686 		else
687 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
688 	}
689 
690 	hci_dev_unlock(hdev);
691 
692 	return rp->status;
693 }
694 
695 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
696 				    struct sk_buff *skb)
697 {
698 	struct hci_rp_read_local_version *rp = data;
699 
700 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
701 
702 	if (rp->status)
703 		return rp->status;
704 
705 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
706 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
707 		hdev->hci_ver = rp->hci_ver;
708 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
709 		hdev->lmp_ver = rp->lmp_ver;
710 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
711 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
712 	}
713 
714 	return rp->status;
715 }
716 
717 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
718 				   struct sk_buff *skb)
719 {
720 	struct hci_rp_read_enc_key_size *rp = data;
721 	struct hci_conn *conn;
722 	u16 handle;
723 	u8 status = rp->status;
724 
725 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
726 
727 	handle = le16_to_cpu(rp->handle);
728 
729 	hci_dev_lock(hdev);
730 
731 	conn = hci_conn_hash_lookup_handle(hdev, handle);
732 	if (!conn) {
733 		status = 0xFF;
734 		goto done;
735 	}
736 
737 	/* While unexpected, the read_enc_key_size command may fail. The most
738 	 * secure approach is to then assume the key size is 0 to force a
739 	 * disconnection.
740 	 */
741 	if (status) {
742 		bt_dev_err(hdev, "failed to read key size for handle %u",
743 			   handle);
744 		conn->enc_key_size = 0;
745 	} else {
746 		u8 *key_enc_size = hci_conn_key_enc_size(conn);
747 
748 		conn->enc_key_size = rp->key_size;
749 		status = 0;
750 
751 		/* Attempt to check if the key size is too small or if it has
752 		 * been downgraded from the last time it was stored as part of
753 		 * the link_key.
754 		 */
755 		if (conn->enc_key_size < hdev->min_enc_key_size ||
756 		    (key_enc_size && conn->enc_key_size < *key_enc_size)) {
757 			/* As slave role, the conn->state has been set to
758 			 * BT_CONNECTED and l2cap conn req might not be received
759 			 * yet, at this moment the l2cap layer almost does
760 			 * nothing with the non-zero status.
761 			 * So we also clear encrypt related bits, and then the
762 			 * handler of l2cap conn req will get the right secure
763 			 * state at a later time.
764 			 */
765 			status = HCI_ERROR_AUTH_FAILURE;
766 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
767 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
768 		}
769 
770 		/* Update the key encryption size with the connection one */
771 		if (key_enc_size && *key_enc_size != conn->enc_key_size)
772 			*key_enc_size = conn->enc_key_size;
773 	}
774 
775 	hci_encrypt_cfm(conn, status);
776 
777 done:
778 	hci_dev_unlock(hdev);
779 
780 	return status;
781 }
782 
783 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
784 				     struct sk_buff *skb)
785 {
786 	struct hci_rp_read_local_commands *rp = data;
787 
788 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
789 
790 	if (rp->status)
791 		return rp->status;
792 
793 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
794 	    hci_dev_test_flag(hdev, HCI_CONFIG))
795 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
796 
797 	return rp->status;
798 }
799 
800 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
801 					   struct sk_buff *skb)
802 {
803 	struct hci_rp_read_auth_payload_to *rp = data;
804 	struct hci_conn *conn;
805 
806 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
807 
808 	if (rp->status)
809 		return rp->status;
810 
811 	hci_dev_lock(hdev);
812 
813 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
814 	if (conn)
815 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
816 
817 	hci_dev_unlock(hdev);
818 
819 	return rp->status;
820 }
821 
822 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
823 					    struct sk_buff *skb)
824 {
825 	struct hci_rp_write_auth_payload_to *rp = data;
826 	struct hci_conn *conn;
827 	void *sent;
828 
829 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
830 
831 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
832 	if (!sent)
833 		return rp->status;
834 
835 	hci_dev_lock(hdev);
836 
837 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
838 	if (!conn) {
839 		rp->status = 0xff;
840 		goto unlock;
841 	}
842 
843 	if (!rp->status)
844 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
845 
846 unlock:
847 	hci_dev_unlock(hdev);
848 
849 	return rp->status;
850 }
851 
852 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
853 				     struct sk_buff *skb)
854 {
855 	struct hci_rp_read_local_features *rp = data;
856 
857 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
858 
859 	if (rp->status)
860 		return rp->status;
861 
862 	memcpy(hdev->features, rp->features, 8);
863 
864 	/* Adjust default settings according to features
865 	 * supported by device. */
866 
867 	if (hdev->features[0][0] & LMP_3SLOT)
868 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
869 
870 	if (hdev->features[0][0] & LMP_5SLOT)
871 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
872 
873 	if (hdev->features[0][1] & LMP_HV2) {
874 		hdev->pkt_type  |= (HCI_HV2);
875 		hdev->esco_type |= (ESCO_HV2);
876 	}
877 
878 	if (hdev->features[0][1] & LMP_HV3) {
879 		hdev->pkt_type  |= (HCI_HV3);
880 		hdev->esco_type |= (ESCO_HV3);
881 	}
882 
883 	if (lmp_esco_capable(hdev))
884 		hdev->esco_type |= (ESCO_EV3);
885 
886 	if (hdev->features[0][4] & LMP_EV4)
887 		hdev->esco_type |= (ESCO_EV4);
888 
889 	if (hdev->features[0][4] & LMP_EV5)
890 		hdev->esco_type |= (ESCO_EV5);
891 
892 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
893 		hdev->esco_type |= (ESCO_2EV3);
894 
895 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
896 		hdev->esco_type |= (ESCO_3EV3);
897 
898 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
899 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
900 
901 	return rp->status;
902 }
903 
904 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
905 					 struct sk_buff *skb)
906 {
907 	struct hci_rp_read_local_ext_features *rp = data;
908 
909 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
910 
911 	if (rp->status)
912 		return rp->status;
913 
914 	if (hdev->max_page < rp->max_page) {
915 		if (hci_test_quirk(hdev,
916 				   HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2))
917 			bt_dev_warn(hdev, "broken local ext features page 2");
918 		else
919 			hdev->max_page = rp->max_page;
920 	}
921 
922 	if (rp->page < HCI_MAX_PAGES)
923 		memcpy(hdev->features[rp->page], rp->features, 8);
924 
925 	return rp->status;
926 }
927 
928 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
929 				  struct sk_buff *skb)
930 {
931 	struct hci_rp_read_buffer_size *rp = data;
932 
933 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
934 
935 	if (rp->status)
936 		return rp->status;
937 
938 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
939 	hdev->sco_mtu  = rp->sco_mtu;
940 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
941 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
942 
943 	if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) {
944 		hdev->sco_mtu  = 64;
945 		hdev->sco_pkts = 8;
946 	}
947 
948 	if (!read_voice_setting_capable(hdev))
949 		hdev->sco_pkts = 0;
950 
951 	hdev->acl_cnt = hdev->acl_pkts;
952 	hdev->sco_cnt = hdev->sco_pkts;
953 
954 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
955 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
956 
957 	if (!hdev->acl_mtu || !hdev->acl_pkts)
958 		return HCI_ERROR_INVALID_PARAMETERS;
959 
960 	return rp->status;
961 }
962 
963 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
964 			      struct sk_buff *skb)
965 {
966 	struct hci_rp_read_bd_addr *rp = data;
967 
968 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
969 
970 	if (rp->status)
971 		return rp->status;
972 
973 	if (test_bit(HCI_INIT, &hdev->flags))
974 		bacpy(&hdev->bdaddr, &rp->bdaddr);
975 
976 	if (hci_dev_test_flag(hdev, HCI_SETUP))
977 		bacpy(&hdev->setup_addr, &rp->bdaddr);
978 
979 	return rp->status;
980 }
981 
982 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
983 					 struct sk_buff *skb)
984 {
985 	struct hci_rp_read_local_pairing_opts *rp = data;
986 
987 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
988 
989 	if (rp->status)
990 		return rp->status;
991 
992 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
993 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
994 		hdev->pairing_opts = rp->pairing_opts;
995 		hdev->max_enc_key_size = rp->max_key_size;
996 	}
997 
998 	return rp->status;
999 }
1000 
1001 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1002 					 struct sk_buff *skb)
1003 {
1004 	struct hci_rp_read_page_scan_activity *rp = data;
1005 
1006 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1007 
1008 	if (rp->status)
1009 		return rp->status;
1010 
1011 	if (test_bit(HCI_INIT, &hdev->flags)) {
1012 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1013 		hdev->page_scan_window = __le16_to_cpu(rp->window);
1014 	}
1015 
1016 	return rp->status;
1017 }
1018 
1019 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1020 					  struct sk_buff *skb)
1021 {
1022 	struct hci_ev_status *rp = data;
1023 	struct hci_cp_write_page_scan_activity *sent;
1024 
1025 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1026 
1027 	if (rp->status)
1028 		return rp->status;
1029 
1030 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1031 	if (!sent)
1032 		return rp->status;
1033 
1034 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1035 	hdev->page_scan_window = __le16_to_cpu(sent->window);
1036 
1037 	return rp->status;
1038 }
1039 
1040 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1041 				     struct sk_buff *skb)
1042 {
1043 	struct hci_rp_read_page_scan_type *rp = data;
1044 
1045 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1046 
1047 	if (rp->status)
1048 		return rp->status;
1049 
1050 	if (test_bit(HCI_INIT, &hdev->flags))
1051 		hdev->page_scan_type = rp->type;
1052 
1053 	return rp->status;
1054 }
1055 
1056 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1057 				      struct sk_buff *skb)
1058 {
1059 	struct hci_ev_status *rp = data;
1060 	u8 *type;
1061 
1062 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1063 
1064 	if (rp->status)
1065 		return rp->status;
1066 
1067 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1068 	if (type)
1069 		hdev->page_scan_type = *type;
1070 
1071 	return rp->status;
1072 }
1073 
1074 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1075 			    struct sk_buff *skb)
1076 {
1077 	struct hci_rp_read_clock *rp = data;
1078 	struct hci_cp_read_clock *cp;
1079 	struct hci_conn *conn;
1080 
1081 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1082 
1083 	if (rp->status)
1084 		return rp->status;
1085 
1086 	hci_dev_lock(hdev);
1087 
1088 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1089 	if (!cp)
1090 		goto unlock;
1091 
1092 	if (cp->which == 0x00) {
1093 		hdev->clock = le32_to_cpu(rp->clock);
1094 		goto unlock;
1095 	}
1096 
1097 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1098 	if (conn) {
1099 		conn->clock = le32_to_cpu(rp->clock);
1100 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1101 	}
1102 
1103 unlock:
1104 	hci_dev_unlock(hdev);
1105 	return rp->status;
1106 }
1107 
1108 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1109 				       struct sk_buff *skb)
1110 {
1111 	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1112 
1113 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1114 
1115 	if (rp->status)
1116 		return rp->status;
1117 
1118 	hdev->inq_tx_power = rp->tx_power;
1119 
1120 	return rp->status;
1121 }
1122 
1123 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1124 					     struct sk_buff *skb)
1125 {
1126 	struct hci_rp_read_def_err_data_reporting *rp = data;
1127 
1128 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1129 
1130 	if (rp->status)
1131 		return rp->status;
1132 
1133 	hdev->err_data_reporting = rp->err_data_reporting;
1134 
1135 	return rp->status;
1136 }
1137 
1138 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1139 					      struct sk_buff *skb)
1140 {
1141 	struct hci_ev_status *rp = data;
1142 	struct hci_cp_write_def_err_data_reporting *cp;
1143 
1144 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1145 
1146 	if (rp->status)
1147 		return rp->status;
1148 
1149 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1150 	if (!cp)
1151 		return rp->status;
1152 
1153 	hdev->err_data_reporting = cp->err_data_reporting;
1154 
1155 	return rp->status;
1156 }
1157 
1158 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1159 				struct sk_buff *skb)
1160 {
1161 	struct hci_rp_pin_code_reply *rp = data;
1162 	struct hci_cp_pin_code_reply *cp;
1163 	struct hci_conn *conn;
1164 
1165 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1166 
1167 	hci_dev_lock(hdev);
1168 
1169 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1170 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1171 
1172 	if (rp->status)
1173 		goto unlock;
1174 
1175 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1176 	if (!cp)
1177 		goto unlock;
1178 
1179 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1180 	if (conn)
1181 		conn->pin_length = cp->pin_len;
1182 
1183 unlock:
1184 	hci_dev_unlock(hdev);
1185 	return rp->status;
1186 }
1187 
1188 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1189 				    struct sk_buff *skb)
1190 {
1191 	struct hci_rp_pin_code_neg_reply *rp = data;
1192 
1193 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1194 
1195 	hci_dev_lock(hdev);
1196 
1197 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1198 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1199 						 rp->status);
1200 
1201 	hci_dev_unlock(hdev);
1202 
1203 	return rp->status;
1204 }
1205 
1206 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1207 				     struct sk_buff *skb)
1208 {
1209 	struct hci_rp_le_read_buffer_size *rp = data;
1210 
1211 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1212 
1213 	if (rp->status)
1214 		return rp->status;
1215 
1216 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1217 	hdev->le_pkts = rp->le_max_pkt;
1218 
1219 	hdev->le_cnt = hdev->le_pkts;
1220 
1221 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1222 
1223 	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1224 		return HCI_ERROR_INVALID_PARAMETERS;
1225 
1226 	return rp->status;
1227 }
1228 
1229 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1230 					struct sk_buff *skb)
1231 {
1232 	struct hci_rp_le_read_local_features *rp = data;
1233 
1234 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1235 
1236 	if (rp->status)
1237 		return rp->status;
1238 
1239 	memcpy(hdev->le_features, rp->features, 8);
1240 
1241 	return rp->status;
1242 }
1243 
1244 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1245 				      struct sk_buff *skb)
1246 {
1247 	struct hci_rp_le_read_adv_tx_power *rp = data;
1248 
1249 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1250 
1251 	if (rp->status)
1252 		return rp->status;
1253 
1254 	hdev->adv_tx_power = rp->tx_power;
1255 
1256 	return rp->status;
1257 }
1258 
1259 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1260 				    struct sk_buff *skb)
1261 {
1262 	struct hci_rp_user_confirm_reply *rp = data;
1263 
1264 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1265 
1266 	hci_dev_lock(hdev);
1267 
1268 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1269 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1270 						 rp->status);
1271 
1272 	hci_dev_unlock(hdev);
1273 
1274 	return rp->status;
1275 }
1276 
1277 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1278 					struct sk_buff *skb)
1279 {
1280 	struct hci_rp_user_confirm_reply *rp = data;
1281 
1282 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1283 
1284 	hci_dev_lock(hdev);
1285 
1286 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1287 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1288 						     ACL_LINK, 0, rp->status);
1289 
1290 	hci_dev_unlock(hdev);
1291 
1292 	return rp->status;
1293 }
1294 
1295 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1296 				    struct sk_buff *skb)
1297 {
1298 	struct hci_rp_user_confirm_reply *rp = data;
1299 
1300 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1301 
1302 	hci_dev_lock(hdev);
1303 
1304 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1305 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1306 						 0, rp->status);
1307 
1308 	hci_dev_unlock(hdev);
1309 
1310 	return rp->status;
1311 }
1312 
1313 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1314 					struct sk_buff *skb)
1315 {
1316 	struct hci_rp_user_confirm_reply *rp = data;
1317 
1318 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1319 
1320 	hci_dev_lock(hdev);
1321 
1322 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1323 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1324 						     ACL_LINK, 0, rp->status);
1325 
1326 	hci_dev_unlock(hdev);
1327 
1328 	return rp->status;
1329 }
1330 
1331 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1332 				     struct sk_buff *skb)
1333 {
1334 	struct hci_rp_read_local_oob_data *rp = data;
1335 
1336 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1337 
1338 	return rp->status;
1339 }
1340 
1341 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1342 					 struct sk_buff *skb)
1343 {
1344 	struct hci_rp_read_local_oob_ext_data *rp = data;
1345 
1346 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1347 
1348 	return rp->status;
1349 }
1350 
1351 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1352 				    struct sk_buff *skb)
1353 {
1354 	struct hci_ev_status *rp = data;
1355 	bdaddr_t *sent;
1356 
1357 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1358 
1359 	if (rp->status)
1360 		return rp->status;
1361 
1362 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1363 	if (!sent)
1364 		return rp->status;
1365 
1366 	hci_dev_lock(hdev);
1367 
1368 	bacpy(&hdev->random_addr, sent);
1369 
1370 	if (!bacmp(&hdev->rpa, sent)) {
1371 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1372 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1373 				   secs_to_jiffies(hdev->rpa_timeout));
1374 	}
1375 
1376 	hci_dev_unlock(hdev);
1377 
1378 	return rp->status;
1379 }
1380 
1381 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1382 				    struct sk_buff *skb)
1383 {
1384 	struct hci_ev_status *rp = data;
1385 	struct hci_cp_le_set_default_phy *cp;
1386 
1387 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1388 
1389 	if (rp->status)
1390 		return rp->status;
1391 
1392 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1393 	if (!cp)
1394 		return rp->status;
1395 
1396 	hci_dev_lock(hdev);
1397 
1398 	hdev->le_tx_def_phys = cp->tx_phys;
1399 	hdev->le_rx_def_phys = cp->rx_phys;
1400 
1401 	hci_dev_unlock(hdev);
1402 
1403 	return rp->status;
1404 }
1405 
1406 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1407 					    struct sk_buff *skb)
1408 {
1409 	struct hci_ev_status *rp = data;
1410 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1411 	struct adv_info *adv;
1412 
1413 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1414 
1415 	if (rp->status)
1416 		return rp->status;
1417 
1418 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1419 	/* Update only in case the adv instance since handle 0x00 shall be using
1420 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1421 	 * non-extended adverting.
1422 	 */
1423 	if (!cp || !cp->handle)
1424 		return rp->status;
1425 
1426 	hci_dev_lock(hdev);
1427 
1428 	adv = hci_find_adv_instance(hdev, cp->handle);
1429 	if (adv) {
1430 		bacpy(&adv->random_addr, &cp->bdaddr);
1431 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1432 			adv->rpa_expired = false;
1433 			queue_delayed_work(hdev->workqueue,
1434 					   &adv->rpa_expired_cb,
1435 					   secs_to_jiffies(hdev->rpa_timeout));
1436 		}
1437 	}
1438 
1439 	hci_dev_unlock(hdev);
1440 
1441 	return rp->status;
1442 }
1443 
1444 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1445 				   struct sk_buff *skb)
1446 {
1447 	struct hci_ev_status *rp = data;
1448 	u8 *instance;
1449 	int err;
1450 
1451 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1452 
1453 	if (rp->status)
1454 		return rp->status;
1455 
1456 	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1457 	if (!instance)
1458 		return rp->status;
1459 
1460 	hci_dev_lock(hdev);
1461 
1462 	err = hci_remove_adv_instance(hdev, *instance);
1463 	if (!err)
1464 		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1465 					 *instance);
1466 
1467 	hci_dev_unlock(hdev);
1468 
1469 	return rp->status;
1470 }
1471 
1472 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1473 				   struct sk_buff *skb)
1474 {
1475 	struct hci_ev_status *rp = data;
1476 	struct adv_info *adv, *n;
1477 	int err;
1478 
1479 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1480 
1481 	if (rp->status)
1482 		return rp->status;
1483 
1484 	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1485 		return rp->status;
1486 
1487 	hci_dev_lock(hdev);
1488 
1489 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1490 		u8 instance = adv->instance;
1491 
1492 		err = hci_remove_adv_instance(hdev, instance);
1493 		if (!err)
1494 			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1495 						 hdev, instance);
1496 	}
1497 
1498 	hci_dev_unlock(hdev);
1499 
1500 	return rp->status;
1501 }
1502 
1503 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1504 					struct sk_buff *skb)
1505 {
1506 	struct hci_rp_le_read_transmit_power *rp = data;
1507 
1508 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1509 
1510 	if (rp->status)
1511 		return rp->status;
1512 
1513 	hdev->min_le_tx_power = rp->min_le_tx_power;
1514 	hdev->max_le_tx_power = rp->max_le_tx_power;
1515 
1516 	return rp->status;
1517 }
1518 
1519 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1520 				     struct sk_buff *skb)
1521 {
1522 	struct hci_ev_status *rp = data;
1523 	struct hci_cp_le_set_privacy_mode *cp;
1524 	struct hci_conn_params *params;
1525 
1526 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1527 
1528 	if (rp->status)
1529 		return rp->status;
1530 
1531 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1532 	if (!cp)
1533 		return rp->status;
1534 
1535 	hci_dev_lock(hdev);
1536 
1537 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1538 	if (params)
1539 		WRITE_ONCE(params->privacy_mode, cp->mode);
1540 
1541 	hci_dev_unlock(hdev);
1542 
1543 	return rp->status;
1544 }
1545 
1546 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1547 				   struct sk_buff *skb)
1548 {
1549 	struct hci_ev_status *rp = data;
1550 	__u8 *sent;
1551 
1552 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1553 
1554 	if (rp->status)
1555 		return rp->status;
1556 
1557 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1558 	if (!sent)
1559 		return rp->status;
1560 
1561 	hci_dev_lock(hdev);
1562 
1563 	/* If we're doing connection initiation as peripheral. Set a
1564 	 * timeout in case something goes wrong.
1565 	 */
1566 	if (*sent) {
1567 		struct hci_conn *conn;
1568 
1569 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1570 
1571 		conn = hci_lookup_le_connect(hdev);
1572 		if (conn)
1573 			queue_delayed_work(hdev->workqueue,
1574 					   &conn->le_conn_timeout,
1575 					   conn->conn_timeout);
1576 	} else {
1577 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1578 	}
1579 
1580 	hci_dev_unlock(hdev);
1581 
1582 	return rp->status;
1583 }
1584 
1585 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1586 				       struct sk_buff *skb)
1587 {
1588 	struct hci_cp_le_set_ext_adv_enable *cp;
1589 	struct hci_cp_ext_adv_set *set;
1590 	struct adv_info *adv = NULL, *n;
1591 	struct hci_ev_status *rp = data;
1592 
1593 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1594 
1595 	if (rp->status)
1596 		return rp->status;
1597 
1598 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1599 	if (!cp)
1600 		return rp->status;
1601 
1602 	set = (void *)cp->data;
1603 
1604 	hci_dev_lock(hdev);
1605 
1606 	if (cp->num_of_sets)
1607 		adv = hci_find_adv_instance(hdev, set->handle);
1608 
1609 	if (cp->enable) {
1610 		struct hci_conn *conn;
1611 
1612 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1613 
1614 		if (adv)
1615 			adv->enabled = true;
1616 		else if (!set->handle)
1617 			hci_dev_set_flag(hdev, HCI_LE_ADV_0);
1618 
1619 		conn = hci_lookup_le_connect(hdev);
1620 		if (conn)
1621 			queue_delayed_work(hdev->workqueue,
1622 					   &conn->le_conn_timeout,
1623 					   conn->conn_timeout);
1624 	} else {
1625 		if (cp->num_of_sets) {
1626 			if (adv)
1627 				adv->enabled = false;
1628 			else if (!set->handle)
1629 				hci_dev_clear_flag(hdev, HCI_LE_ADV_0);
1630 
1631 			/* If just one instance was disabled check if there are
1632 			 * any other instance enabled before clearing HCI_LE_ADV
1633 			 */
1634 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1635 						 list) {
1636 				if (adv->enabled)
1637 					goto unlock;
1638 			}
1639 		} else {
1640 			/* All instances shall be considered disabled */
1641 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1642 						 list)
1643 				adv->enabled = false;
1644 		}
1645 
1646 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1647 	}
1648 
1649 unlock:
1650 	hci_dev_unlock(hdev);
1651 	return rp->status;
1652 }
1653 
1654 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1655 				   struct sk_buff *skb)
1656 {
1657 	struct hci_cp_le_set_scan_param *cp;
1658 	struct hci_ev_status *rp = data;
1659 
1660 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1661 
1662 	if (rp->status)
1663 		return rp->status;
1664 
1665 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1666 	if (!cp)
1667 		return rp->status;
1668 
1669 	hci_dev_lock(hdev);
1670 
1671 	hdev->le_scan_type = cp->type;
1672 
1673 	hci_dev_unlock(hdev);
1674 
1675 	return rp->status;
1676 }
1677 
1678 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1679 				       struct sk_buff *skb)
1680 {
1681 	struct hci_cp_le_set_ext_scan_params *cp;
1682 	struct hci_ev_status *rp = data;
1683 	struct hci_cp_le_scan_phy_params *phy_param;
1684 
1685 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1686 
1687 	if (rp->status)
1688 		return rp->status;
1689 
1690 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1691 	if (!cp)
1692 		return rp->status;
1693 
1694 	phy_param = (void *)cp->data;
1695 
1696 	hci_dev_lock(hdev);
1697 
1698 	hdev->le_scan_type = phy_param->type;
1699 
1700 	hci_dev_unlock(hdev);
1701 
1702 	return rp->status;
1703 }
1704 
1705 static bool has_pending_adv_report(struct hci_dev *hdev)
1706 {
1707 	struct discovery_state *d = &hdev->discovery;
1708 
1709 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1710 }
1711 
1712 static void clear_pending_adv_report(struct hci_dev *hdev)
1713 {
1714 	struct discovery_state *d = &hdev->discovery;
1715 
1716 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1717 	d->last_adv_data_len = 0;
1718 }
1719 
1720 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1721 				     u8 bdaddr_type, s8 rssi, u32 flags,
1722 				     u8 *data, u8 len)
1723 {
1724 	struct discovery_state *d = &hdev->discovery;
1725 
1726 	if (len > max_adv_len(hdev))
1727 		return;
1728 
1729 	bacpy(&d->last_adv_addr, bdaddr);
1730 	d->last_adv_addr_type = bdaddr_type;
1731 	d->last_adv_rssi = rssi;
1732 	d->last_adv_flags = flags;
1733 	memcpy(d->last_adv_data, data, len);
1734 	d->last_adv_data_len = len;
1735 }
1736 
1737 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1738 {
1739 	hci_dev_lock(hdev);
1740 
1741 	switch (enable) {
1742 	case LE_SCAN_ENABLE:
1743 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1744 		if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
1745 			clear_pending_adv_report(hdev);
1746 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1747 		}
1748 		break;
1749 
1750 	case LE_SCAN_DISABLE:
1751 		/* We do this here instead of when setting DISCOVERY_STOPPED
1752 		 * since the latter would potentially require waiting for
1753 		 * inquiry to stop too.
1754 		 */
1755 		if (has_pending_adv_report(hdev)) {
1756 			struct discovery_state *d = &hdev->discovery;
1757 
1758 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1759 					  d->last_adv_addr_type, NULL,
1760 					  d->last_adv_rssi, d->last_adv_flags,
1761 					  d->last_adv_data,
1762 					  d->last_adv_data_len, NULL, 0, 0);
1763 		}
1764 
1765 		/* Cancel this timer so that we don't try to disable scanning
1766 		 * when it's already disabled.
1767 		 */
1768 		cancel_delayed_work(&hdev->le_scan_disable);
1769 
1770 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1771 
1772 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1773 		 * interrupted scanning due to a connect request. Mark
1774 		 * therefore discovery as stopped.
1775 		 */
1776 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1777 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1778 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1779 			 hdev->discovery.state == DISCOVERY_FINDING)
1780 			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1781 
1782 		break;
1783 
1784 	default:
1785 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1786 			   enable);
1787 		break;
1788 	}
1789 
1790 	hci_dev_unlock(hdev);
1791 }
1792 
1793 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1794 				    struct sk_buff *skb)
1795 {
1796 	struct hci_cp_le_set_scan_enable *cp;
1797 	struct hci_ev_status *rp = data;
1798 
1799 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1800 
1801 	if (rp->status)
1802 		return rp->status;
1803 
1804 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1805 	if (!cp)
1806 		return rp->status;
1807 
1808 	le_set_scan_enable_complete(hdev, cp->enable);
1809 
1810 	return rp->status;
1811 }
1812 
1813 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1814 					struct sk_buff *skb)
1815 {
1816 	struct hci_cp_le_set_ext_scan_enable *cp;
1817 	struct hci_ev_status *rp = data;
1818 
1819 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1820 
1821 	if (rp->status)
1822 		return rp->status;
1823 
1824 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1825 	if (!cp)
1826 		return rp->status;
1827 
1828 	le_set_scan_enable_complete(hdev, cp->enable);
1829 
1830 	return rp->status;
1831 }
1832 
1833 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1834 				      struct sk_buff *skb)
1835 {
1836 	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1837 
1838 	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1839 		   rp->num_of_sets);
1840 
1841 	if (rp->status)
1842 		return rp->status;
1843 
1844 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1845 
1846 	return rp->status;
1847 }
1848 
1849 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1850 					  struct sk_buff *skb)
1851 {
1852 	struct hci_rp_le_read_accept_list_size *rp = data;
1853 
1854 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1855 
1856 	if (rp->status)
1857 		return rp->status;
1858 
1859 	hdev->le_accept_list_size = rp->size;
1860 
1861 	return rp->status;
1862 }
1863 
1864 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1865 				      struct sk_buff *skb)
1866 {
1867 	struct hci_ev_status *rp = data;
1868 
1869 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1870 
1871 	if (rp->status)
1872 		return rp->status;
1873 
1874 	hci_dev_lock(hdev);
1875 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1876 	hci_dev_unlock(hdev);
1877 
1878 	return rp->status;
1879 }
1880 
1881 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1882 				       struct sk_buff *skb)
1883 {
1884 	struct hci_cp_le_add_to_accept_list *sent;
1885 	struct hci_ev_status *rp = data;
1886 
1887 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1888 
1889 	if (rp->status)
1890 		return rp->status;
1891 
1892 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1893 	if (!sent)
1894 		return rp->status;
1895 
1896 	hci_dev_lock(hdev);
1897 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1898 			    sent->bdaddr_type);
1899 	hci_dev_unlock(hdev);
1900 
1901 	return rp->status;
1902 }
1903 
1904 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1905 					 struct sk_buff *skb)
1906 {
1907 	struct hci_cp_le_del_from_accept_list *sent;
1908 	struct hci_ev_status *rp = data;
1909 
1910 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1911 
1912 	if (rp->status)
1913 		return rp->status;
1914 
1915 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1916 	if (!sent)
1917 		return rp->status;
1918 
1919 	hci_dev_lock(hdev);
1920 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1921 			    sent->bdaddr_type);
1922 	hci_dev_unlock(hdev);
1923 
1924 	return rp->status;
1925 }
1926 
1927 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1928 					  struct sk_buff *skb)
1929 {
1930 	struct hci_rp_le_read_supported_states *rp = data;
1931 
1932 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1933 
1934 	if (rp->status)
1935 		return rp->status;
1936 
1937 	memcpy(hdev->le_states, rp->le_states, 8);
1938 
1939 	return rp->status;
1940 }
1941 
1942 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1943 				      struct sk_buff *skb)
1944 {
1945 	struct hci_rp_le_read_def_data_len *rp = data;
1946 
1947 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1948 
1949 	if (rp->status)
1950 		return rp->status;
1951 
1952 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1953 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1954 
1955 	return rp->status;
1956 }
1957 
1958 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1959 				       struct sk_buff *skb)
1960 {
1961 	struct hci_cp_le_write_def_data_len *sent;
1962 	struct hci_ev_status *rp = data;
1963 
1964 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1965 
1966 	if (rp->status)
1967 		return rp->status;
1968 
1969 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1970 	if (!sent)
1971 		return rp->status;
1972 
1973 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1974 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1975 
1976 	return rp->status;
1977 }
1978 
1979 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1980 				       struct sk_buff *skb)
1981 {
1982 	struct hci_cp_le_add_to_resolv_list *sent;
1983 	struct hci_ev_status *rp = data;
1984 
1985 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1986 
1987 	if (rp->status)
1988 		return rp->status;
1989 
1990 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1991 	if (!sent)
1992 		return rp->status;
1993 
1994 	hci_dev_lock(hdev);
1995 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1996 				sent->bdaddr_type, sent->peer_irk,
1997 				sent->local_irk);
1998 	hci_dev_unlock(hdev);
1999 
2000 	return rp->status;
2001 }
2002 
2003 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2004 					 struct sk_buff *skb)
2005 {
2006 	struct hci_cp_le_del_from_resolv_list *sent;
2007 	struct hci_ev_status *rp = data;
2008 
2009 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2010 
2011 	if (rp->status)
2012 		return rp->status;
2013 
2014 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2015 	if (!sent)
2016 		return rp->status;
2017 
2018 	hci_dev_lock(hdev);
2019 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2020 			    sent->bdaddr_type);
2021 	hci_dev_unlock(hdev);
2022 
2023 	return rp->status;
2024 }
2025 
2026 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2027 				      struct sk_buff *skb)
2028 {
2029 	struct hci_ev_status *rp = data;
2030 
2031 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2032 
2033 	if (rp->status)
2034 		return rp->status;
2035 
2036 	hci_dev_lock(hdev);
2037 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2038 	hci_dev_unlock(hdev);
2039 
2040 	return rp->status;
2041 }
2042 
2043 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2044 					  struct sk_buff *skb)
2045 {
2046 	struct hci_rp_le_read_resolv_list_size *rp = data;
2047 
2048 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2049 
2050 	if (rp->status)
2051 		return rp->status;
2052 
2053 	hdev->le_resolv_list_size = rp->size;
2054 
2055 	return rp->status;
2056 }
2057 
2058 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2059 					       struct sk_buff *skb)
2060 {
2061 	struct hci_ev_status *rp = data;
2062 	__u8 *sent;
2063 
2064 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2065 
2066 	if (rp->status)
2067 		return rp->status;
2068 
2069 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2070 	if (!sent)
2071 		return rp->status;
2072 
2073 	hci_dev_lock(hdev);
2074 
2075 	if (*sent)
2076 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2077 	else
2078 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2079 
2080 	hci_dev_unlock(hdev);
2081 
2082 	return rp->status;
2083 }
2084 
2085 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2086 				      struct sk_buff *skb)
2087 {
2088 	struct hci_rp_le_read_max_data_len *rp = data;
2089 
2090 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2091 
2092 	if (rp->status)
2093 		return rp->status;
2094 
2095 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2096 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2097 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2098 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2099 
2100 	return rp->status;
2101 }
2102 
2103 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2104 					 struct sk_buff *skb)
2105 {
2106 	struct hci_cp_write_le_host_supported *sent;
2107 	struct hci_ev_status *rp = data;
2108 
2109 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2110 
2111 	if (rp->status)
2112 		return rp->status;
2113 
2114 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2115 	if (!sent)
2116 		return rp->status;
2117 
2118 	hci_dev_lock(hdev);
2119 
2120 	if (sent->le) {
2121 		hdev->features[1][0] |= LMP_HOST_LE;
2122 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2123 	} else {
2124 		hdev->features[1][0] &= ~LMP_HOST_LE;
2125 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2126 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2127 	}
2128 
2129 	if (sent->simul)
2130 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2131 	else
2132 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2133 
2134 	hci_dev_unlock(hdev);
2135 
2136 	return rp->status;
2137 }
2138 
2139 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2140 			       struct sk_buff *skb)
2141 {
2142 	struct hci_cp_le_set_adv_param *cp;
2143 	struct hci_ev_status *rp = data;
2144 
2145 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2146 
2147 	if (rp->status)
2148 		return rp->status;
2149 
2150 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2151 	if (!cp)
2152 		return rp->status;
2153 
2154 	hci_dev_lock(hdev);
2155 	hdev->adv_addr_type = cp->own_address_type;
2156 	hci_dev_unlock(hdev);
2157 
2158 	return rp->status;
2159 }
2160 
2161 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2162 			   struct sk_buff *skb)
2163 {
2164 	struct hci_rp_read_rssi *rp = data;
2165 	struct hci_conn *conn;
2166 
2167 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2168 
2169 	if (rp->status)
2170 		return rp->status;
2171 
2172 	hci_dev_lock(hdev);
2173 
2174 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2175 	if (conn)
2176 		conn->rssi = rp->rssi;
2177 
2178 	hci_dev_unlock(hdev);
2179 
2180 	return rp->status;
2181 }
2182 
2183 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2184 			       struct sk_buff *skb)
2185 {
2186 	struct hci_cp_read_tx_power *sent;
2187 	struct hci_rp_read_tx_power *rp = data;
2188 	struct hci_conn *conn;
2189 
2190 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2191 
2192 	if (rp->status)
2193 		return rp->status;
2194 
2195 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2196 	if (!sent)
2197 		return rp->status;
2198 
2199 	hci_dev_lock(hdev);
2200 
2201 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2202 	if (!conn)
2203 		goto unlock;
2204 
2205 	switch (sent->type) {
2206 	case 0x00:
2207 		conn->tx_power = rp->tx_power;
2208 		break;
2209 	case 0x01:
2210 		conn->max_tx_power = rp->tx_power;
2211 		break;
2212 	}
2213 
2214 unlock:
2215 	hci_dev_unlock(hdev);
2216 	return rp->status;
2217 }
2218 
2219 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2220 				      struct sk_buff *skb)
2221 {
2222 	struct hci_ev_status *rp = data;
2223 	u8 *mode;
2224 
2225 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2226 
2227 	if (rp->status)
2228 		return rp->status;
2229 
2230 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2231 	if (mode)
2232 		hdev->ssp_debug_mode = *mode;
2233 
2234 	return rp->status;
2235 }
2236 
2237 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2238 {
2239 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2240 
2241 	if (status)
2242 		return;
2243 
2244 	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2245 		set_bit(HCI_INQUIRY, &hdev->flags);
2246 }
2247 
2248 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2249 {
2250 	struct hci_cp_create_conn *cp;
2251 	struct hci_conn *conn;
2252 
2253 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2254 
2255 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2256 	if (!cp)
2257 		return;
2258 
2259 	hci_dev_lock(hdev);
2260 
2261 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2262 
2263 	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2264 
2265 	if (status) {
2266 		if (conn && conn->state == BT_CONNECT) {
2267 			conn->state = BT_CLOSED;
2268 			hci_connect_cfm(conn, status);
2269 			hci_conn_del(conn);
2270 		}
2271 	} else {
2272 		if (!conn) {
2273 			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2274 						  0, HCI_ROLE_MASTER);
2275 			if (IS_ERR(conn))
2276 				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2277 		}
2278 	}
2279 
2280 	hci_dev_unlock(hdev);
2281 }
2282 
2283 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2284 {
2285 	struct hci_cp_add_sco *cp;
2286 	struct hci_conn *acl;
2287 	struct hci_link *link;
2288 	__u16 handle;
2289 
2290 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2291 
2292 	if (!status)
2293 		return;
2294 
2295 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2296 	if (!cp)
2297 		return;
2298 
2299 	handle = __le16_to_cpu(cp->handle);
2300 
2301 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2302 
2303 	hci_dev_lock(hdev);
2304 
2305 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2306 	if (acl) {
2307 		link = list_first_entry_or_null(&acl->link_list,
2308 						struct hci_link, list);
2309 		if (link && link->conn) {
2310 			link->conn->state = BT_CLOSED;
2311 
2312 			hci_connect_cfm(link->conn, status);
2313 			hci_conn_del(link->conn);
2314 		}
2315 	}
2316 
2317 	hci_dev_unlock(hdev);
2318 }
2319 
2320 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2321 {
2322 	struct hci_cp_auth_requested *cp;
2323 	struct hci_conn *conn;
2324 
2325 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2326 
2327 	if (!status)
2328 		return;
2329 
2330 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2331 	if (!cp)
2332 		return;
2333 
2334 	hci_dev_lock(hdev);
2335 
2336 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2337 	if (conn) {
2338 		if (conn->state == BT_CONFIG) {
2339 			hci_connect_cfm(conn, status);
2340 			hci_conn_drop(conn);
2341 		}
2342 	}
2343 
2344 	hci_dev_unlock(hdev);
2345 }
2346 
2347 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2348 {
2349 	struct hci_cp_set_conn_encrypt *cp;
2350 	struct hci_conn *conn;
2351 
2352 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2353 
2354 	if (!status)
2355 		return;
2356 
2357 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2358 	if (!cp)
2359 		return;
2360 
2361 	hci_dev_lock(hdev);
2362 
2363 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2364 	if (conn) {
2365 		if (conn->state == BT_CONFIG) {
2366 			hci_connect_cfm(conn, status);
2367 			hci_conn_drop(conn);
2368 		}
2369 	}
2370 
2371 	hci_dev_unlock(hdev);
2372 }
2373 
2374 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2375 				    struct hci_conn *conn)
2376 {
2377 	if (conn->state != BT_CONFIG || !conn->out)
2378 		return 0;
2379 
2380 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2381 		return 0;
2382 
2383 	/* Only request authentication for SSP connections or non-SSP
2384 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2385 	 * is requested.
2386 	 */
2387 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2388 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2389 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2390 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2391 		return 0;
2392 
2393 	return 1;
2394 }
2395 
2396 static int hci_resolve_name(struct hci_dev *hdev,
2397 				   struct inquiry_entry *e)
2398 {
2399 	struct hci_cp_remote_name_req cp;
2400 
2401 	memset(&cp, 0, sizeof(cp));
2402 
2403 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2404 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2405 	cp.pscan_mode = e->data.pscan_mode;
2406 	cp.clock_offset = e->data.clock_offset;
2407 
2408 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2409 }
2410 
2411 static bool hci_resolve_next_name(struct hci_dev *hdev)
2412 {
2413 	struct discovery_state *discov = &hdev->discovery;
2414 	struct inquiry_entry *e;
2415 
2416 	if (list_empty(&discov->resolve))
2417 		return false;
2418 
2419 	/* We should stop if we already spent too much time resolving names. */
2420 	if (time_after(jiffies, discov->name_resolve_timeout)) {
2421 		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2422 		return false;
2423 	}
2424 
2425 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2426 	if (!e)
2427 		return false;
2428 
2429 	if (hci_resolve_name(hdev, e) == 0) {
2430 		e->name_state = NAME_PENDING;
2431 		return true;
2432 	}
2433 
2434 	return false;
2435 }
2436 
2437 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2438 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2439 {
2440 	struct discovery_state *discov = &hdev->discovery;
2441 	struct inquiry_entry *e;
2442 
2443 	/* Update the mgmt connected state if necessary. Be careful with
2444 	 * conn objects that exist but are not (yet) connected however.
2445 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2446 	 * considered connected.
2447 	 */
2448 	if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2449 		mgmt_device_connected(hdev, conn, name, name_len);
2450 
2451 	if (discov->state == DISCOVERY_STOPPED)
2452 		return;
2453 
2454 	if (discov->state == DISCOVERY_STOPPING)
2455 		goto discov_complete;
2456 
2457 	if (discov->state != DISCOVERY_RESOLVING)
2458 		return;
2459 
2460 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2461 	/* If the device was not found in a list of found devices names of which
2462 	 * are pending. there is no need to continue resolving a next name as it
2463 	 * will be done upon receiving another Remote Name Request Complete
2464 	 * Event */
2465 	if (!e)
2466 		return;
2467 
2468 	list_del(&e->list);
2469 
2470 	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2471 	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2472 			 name, name_len);
2473 
2474 	if (hci_resolve_next_name(hdev))
2475 		return;
2476 
2477 discov_complete:
2478 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2479 }
2480 
2481 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2482 {
2483 	struct hci_cp_remote_name_req *cp;
2484 	struct hci_conn *conn;
2485 
2486 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2487 
2488 	/* If successful wait for the name req complete event before
2489 	 * checking for the need to do authentication */
2490 	if (!status)
2491 		return;
2492 
2493 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2494 	if (!cp)
2495 		return;
2496 
2497 	hci_dev_lock(hdev);
2498 
2499 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2500 
2501 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2502 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2503 
2504 	if (!conn)
2505 		goto unlock;
2506 
2507 	if (!hci_outgoing_auth_needed(hdev, conn))
2508 		goto unlock;
2509 
2510 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2511 		struct hci_cp_auth_requested auth_cp;
2512 
2513 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2514 
2515 		auth_cp.handle = __cpu_to_le16(conn->handle);
2516 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2517 			     sizeof(auth_cp), &auth_cp);
2518 	}
2519 
2520 unlock:
2521 	hci_dev_unlock(hdev);
2522 }
2523 
2524 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2525 {
2526 	struct hci_cp_read_remote_features *cp;
2527 	struct hci_conn *conn;
2528 
2529 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2530 
2531 	if (!status)
2532 		return;
2533 
2534 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2535 	if (!cp)
2536 		return;
2537 
2538 	hci_dev_lock(hdev);
2539 
2540 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2541 	if (conn) {
2542 		if (conn->state == BT_CONFIG) {
2543 			hci_connect_cfm(conn, status);
2544 			hci_conn_drop(conn);
2545 		}
2546 	}
2547 
2548 	hci_dev_unlock(hdev);
2549 }
2550 
2551 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2552 {
2553 	struct hci_cp_read_remote_ext_features *cp;
2554 	struct hci_conn *conn;
2555 
2556 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2557 
2558 	if (!status)
2559 		return;
2560 
2561 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2562 	if (!cp)
2563 		return;
2564 
2565 	hci_dev_lock(hdev);
2566 
2567 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2568 	if (conn) {
2569 		if (conn->state == BT_CONFIG) {
2570 			hci_connect_cfm(conn, status);
2571 			hci_conn_drop(conn);
2572 		}
2573 	}
2574 
2575 	hci_dev_unlock(hdev);
2576 }
2577 
2578 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2579 				       __u8 status)
2580 {
2581 	struct hci_conn *acl;
2582 	struct hci_link *link;
2583 
2584 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2585 
2586 	hci_dev_lock(hdev);
2587 
2588 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2589 	if (acl) {
2590 		link = list_first_entry_or_null(&acl->link_list,
2591 						struct hci_link, list);
2592 		if (link && link->conn) {
2593 			link->conn->state = BT_CLOSED;
2594 
2595 			hci_connect_cfm(link->conn, status);
2596 			hci_conn_del(link->conn);
2597 		}
2598 	}
2599 
2600 	hci_dev_unlock(hdev);
2601 }
2602 
2603 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2604 {
2605 	struct hci_cp_setup_sync_conn *cp;
2606 
2607 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2608 
2609 	if (!status)
2610 		return;
2611 
2612 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2613 	if (!cp)
2614 		return;
2615 
2616 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2617 }
2618 
2619 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2620 {
2621 	struct hci_cp_enhanced_setup_sync_conn *cp;
2622 
2623 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2624 
2625 	if (!status)
2626 		return;
2627 
2628 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2629 	if (!cp)
2630 		return;
2631 
2632 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2633 }
2634 
2635 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2636 {
2637 	struct hci_cp_sniff_mode *cp;
2638 	struct hci_conn *conn;
2639 
2640 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2641 
2642 	if (!status)
2643 		return;
2644 
2645 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2646 	if (!cp)
2647 		return;
2648 
2649 	hci_dev_lock(hdev);
2650 
2651 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2652 	if (conn) {
2653 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2654 
2655 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2656 			hci_sco_setup(conn, status);
2657 	}
2658 
2659 	hci_dev_unlock(hdev);
2660 }
2661 
2662 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2663 {
2664 	struct hci_cp_exit_sniff_mode *cp;
2665 	struct hci_conn *conn;
2666 
2667 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2668 
2669 	if (!status)
2670 		return;
2671 
2672 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2673 	if (!cp)
2674 		return;
2675 
2676 	hci_dev_lock(hdev);
2677 
2678 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2679 	if (conn) {
2680 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2681 
2682 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2683 			hci_sco_setup(conn, status);
2684 	}
2685 
2686 	hci_dev_unlock(hdev);
2687 }
2688 
2689 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2690 {
2691 	struct hci_cp_disconnect *cp;
2692 	struct hci_conn_params *params;
2693 	struct hci_conn *conn;
2694 	bool mgmt_conn;
2695 
2696 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2697 
2698 	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2699 	 * otherwise cleanup the connection immediately.
2700 	 */
2701 	if (!status && !hdev->suspended)
2702 		return;
2703 
2704 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2705 	if (!cp)
2706 		return;
2707 
2708 	hci_dev_lock(hdev);
2709 
2710 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2711 	if (!conn)
2712 		goto unlock;
2713 
2714 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) {
2715 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2716 				       conn->dst_type, status);
2717 
2718 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2719 			hdev->cur_adv_instance = conn->adv_instance;
2720 			hci_enable_advertising(hdev);
2721 		}
2722 
2723 		/* Inform sockets conn is gone before we delete it */
2724 		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2725 
2726 		goto done;
2727 	}
2728 
2729 	/* During suspend, mark connection as closed immediately
2730 	 * since we might not receive HCI_EV_DISCONN_COMPLETE
2731 	 */
2732 	if (hdev->suspended)
2733 		conn->state = BT_CLOSED;
2734 
2735 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2736 
2737 	if (conn->type == ACL_LINK) {
2738 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2739 			hci_remove_link_key(hdev, &conn->dst);
2740 	}
2741 
2742 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2743 	if (params) {
2744 		switch (params->auto_connect) {
2745 		case HCI_AUTO_CONN_LINK_LOSS:
2746 			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2747 				break;
2748 			fallthrough;
2749 
2750 		case HCI_AUTO_CONN_DIRECT:
2751 		case HCI_AUTO_CONN_ALWAYS:
2752 			hci_pend_le_list_del_init(params);
2753 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2754 			break;
2755 
2756 		default:
2757 			break;
2758 		}
2759 	}
2760 
2761 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2762 				 cp->reason, mgmt_conn);
2763 
2764 	hci_disconn_cfm(conn, cp->reason);
2765 
2766 done:
2767 	/* If the disconnection failed for any reason, the upper layer
2768 	 * does not retry to disconnect in current implementation.
2769 	 * Hence, we need to do some basic cleanup here and re-enable
2770 	 * advertising if necessary.
2771 	 */
2772 	hci_conn_del(conn);
2773 unlock:
2774 	hci_dev_unlock(hdev);
2775 }
2776 
2777 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2778 {
2779 	/* When using controller based address resolution, then the new
2780 	 * address types 0x02 and 0x03 are used. These types need to be
2781 	 * converted back into either public address or random address type
2782 	 */
2783 	switch (type) {
2784 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2785 		if (resolved)
2786 			*resolved = true;
2787 		return ADDR_LE_DEV_PUBLIC;
2788 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2789 		if (resolved)
2790 			*resolved = true;
2791 		return ADDR_LE_DEV_RANDOM;
2792 	}
2793 
2794 	if (resolved)
2795 		*resolved = false;
2796 	return type;
2797 }
2798 
2799 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2800 			      u8 peer_addr_type, u8 own_address_type,
2801 			      u8 filter_policy)
2802 {
2803 	struct hci_conn *conn;
2804 
2805 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2806 				       peer_addr_type);
2807 	if (!conn)
2808 		return;
2809 
2810 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2811 
2812 	/* Store the initiator and responder address information which
2813 	 * is needed for SMP. These values will not change during the
2814 	 * lifetime of the connection.
2815 	 */
2816 	conn->init_addr_type = own_address_type;
2817 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2818 		bacpy(&conn->init_addr, &hdev->random_addr);
2819 	else
2820 		bacpy(&conn->init_addr, &hdev->bdaddr);
2821 
2822 	conn->resp_addr_type = peer_addr_type;
2823 	bacpy(&conn->resp_addr, peer_addr);
2824 }
2825 
2826 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2827 {
2828 	struct hci_cp_le_create_conn *cp;
2829 
2830 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2831 
2832 	/* All connection failure handling is taken care of by the
2833 	 * hci_conn_failed function which is triggered by the HCI
2834 	 * request completion callbacks used for connecting.
2835 	 */
2836 	if (status)
2837 		return;
2838 
2839 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2840 	if (!cp)
2841 		return;
2842 
2843 	hci_dev_lock(hdev);
2844 
2845 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2846 			  cp->own_address_type, cp->filter_policy);
2847 
2848 	hci_dev_unlock(hdev);
2849 }
2850 
2851 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2852 {
2853 	struct hci_cp_le_ext_create_conn *cp;
2854 
2855 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2856 
2857 	/* All connection failure handling is taken care of by the
2858 	 * hci_conn_failed function which is triggered by the HCI
2859 	 * request completion callbacks used for connecting.
2860 	 */
2861 	if (status)
2862 		return;
2863 
2864 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2865 	if (!cp)
2866 		return;
2867 
2868 	hci_dev_lock(hdev);
2869 
2870 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2871 			  cp->own_addr_type, cp->filter_policy);
2872 
2873 	hci_dev_unlock(hdev);
2874 }
2875 
2876 static void hci_cs_le_set_phy(struct hci_dev *hdev, u8 status)
2877 {
2878 	struct hci_cp_le_set_phy *cp;
2879 	struct hci_conn *conn;
2880 
2881 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2882 
2883 	if (status)
2884 		return;
2885 
2886 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PHY);
2887 	if (!cp)
2888 		return;
2889 
2890 	hci_dev_lock(hdev);
2891 
2892 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2893 	if (conn) {
2894 		conn->le_tx_def_phys = cp->tx_phys;
2895 		conn->le_rx_def_phys = cp->rx_phys;
2896 	}
2897 
2898 	hci_dev_unlock(hdev);
2899 }
2900 
2901 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2902 {
2903 	struct hci_cp_le_read_remote_features *cp;
2904 	struct hci_conn *conn;
2905 
2906 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2907 
2908 	if (!status)
2909 		return;
2910 
2911 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2912 	if (!cp)
2913 		return;
2914 
2915 	hci_dev_lock(hdev);
2916 
2917 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2918 	if (conn && conn->state == BT_CONFIG)
2919 		hci_connect_cfm(conn, status);
2920 
2921 	hci_dev_unlock(hdev);
2922 }
2923 
2924 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2925 {
2926 	struct hci_cp_le_start_enc *cp;
2927 	struct hci_conn *conn;
2928 
2929 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2930 
2931 	if (!status)
2932 		return;
2933 
2934 	hci_dev_lock(hdev);
2935 
2936 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2937 	if (!cp)
2938 		goto unlock;
2939 
2940 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2941 	if (!conn)
2942 		goto unlock;
2943 
2944 	if (conn->state != BT_CONNECTED)
2945 		goto unlock;
2946 
2947 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2948 	hci_conn_drop(conn);
2949 
2950 unlock:
2951 	hci_dev_unlock(hdev);
2952 }
2953 
2954 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2955 {
2956 	struct hci_cp_switch_role *cp;
2957 	struct hci_conn *conn;
2958 
2959 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2960 
2961 	if (!status)
2962 		return;
2963 
2964 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2965 	if (!cp)
2966 		return;
2967 
2968 	hci_dev_lock(hdev);
2969 
2970 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2971 	if (conn)
2972 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2973 
2974 	hci_dev_unlock(hdev);
2975 }
2976 
2977 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2978 				     struct sk_buff *skb)
2979 {
2980 	struct hci_ev_status *ev = data;
2981 	struct discovery_state *discov = &hdev->discovery;
2982 	struct inquiry_entry *e;
2983 
2984 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2985 
2986 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2987 		return;
2988 
2989 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2990 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2991 
2992 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2993 		return;
2994 
2995 	hci_dev_lock(hdev);
2996 
2997 	if (discov->state != DISCOVERY_FINDING)
2998 		goto unlock;
2999 
3000 	if (list_empty(&discov->resolve)) {
3001 		/* When BR/EDR inquiry is active and no LE scanning is in
3002 		 * progress, then change discovery state to indicate completion.
3003 		 *
3004 		 * When running LE scanning and BR/EDR inquiry simultaneously
3005 		 * and the LE scan already finished, then change the discovery
3006 		 * state to indicate completion.
3007 		 */
3008 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3009 		    !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
3010 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3011 		goto unlock;
3012 	}
3013 
3014 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3015 	if (e && hci_resolve_name(hdev, e) == 0) {
3016 		e->name_state = NAME_PENDING;
3017 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3018 		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3019 	} else {
3020 		/* When BR/EDR inquiry is active and no LE scanning is in
3021 		 * progress, then change discovery state to indicate completion.
3022 		 *
3023 		 * When running LE scanning and BR/EDR inquiry simultaneously
3024 		 * and the LE scan already finished, then change the discovery
3025 		 * state to indicate completion.
3026 		 */
3027 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3028 		    !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
3029 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3030 	}
3031 
3032 unlock:
3033 	hci_dev_unlock(hdev);
3034 }
3035 
3036 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3037 				   struct sk_buff *skb)
3038 {
3039 	struct hci_ev_inquiry_result *ev = edata;
3040 	struct inquiry_data data;
3041 	int i;
3042 
3043 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3044 			     flex_array_size(ev, info, ev->num)))
3045 		return;
3046 
3047 	bt_dev_dbg(hdev, "num %d", ev->num);
3048 
3049 	if (!ev->num)
3050 		return;
3051 
3052 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3053 		return;
3054 
3055 	hci_dev_lock(hdev);
3056 
3057 	for (i = 0; i < ev->num; i++) {
3058 		struct inquiry_info *info = &ev->info[i];
3059 		u32 flags;
3060 
3061 		bacpy(&data.bdaddr, &info->bdaddr);
3062 		data.pscan_rep_mode	= info->pscan_rep_mode;
3063 		data.pscan_period_mode	= info->pscan_period_mode;
3064 		data.pscan_mode		= info->pscan_mode;
3065 		memcpy(data.dev_class, info->dev_class, 3);
3066 		data.clock_offset	= info->clock_offset;
3067 		data.rssi		= HCI_RSSI_INVALID;
3068 		data.ssp_mode		= 0x00;
3069 
3070 		flags = hci_inquiry_cache_update(hdev, &data, false);
3071 
3072 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3073 				  info->dev_class, HCI_RSSI_INVALID,
3074 				  flags, NULL, 0, NULL, 0, 0);
3075 	}
3076 
3077 	hci_dev_unlock(hdev);
3078 }
3079 
3080 static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn)
3081 {
3082 	struct hci_cp_read_enc_key_size cp;
3083 	u8 *key_enc_size = hci_conn_key_enc_size(conn);
3084 
3085 	if (!read_key_size_capable(hdev)) {
3086 		conn->enc_key_size = HCI_LINK_KEY_SIZE;
3087 		return -EOPNOTSUPP;
3088 	}
3089 
3090 	bt_dev_dbg(hdev, "hcon %p", conn);
3091 
3092 	memset(&cp, 0, sizeof(cp));
3093 	cp.handle = cpu_to_le16(conn->handle);
3094 
3095 	/* If the key enc_size is already known, use it as conn->enc_key_size,
3096 	 * otherwise use hdev->min_enc_key_size so the likes of
3097 	 * l2cap_check_enc_key_size don't fail while waiting for
3098 	 * HCI_OP_READ_ENC_KEY_SIZE response.
3099 	 */
3100 	if (key_enc_size && *key_enc_size)
3101 		conn->enc_key_size = *key_enc_size;
3102 	else
3103 		conn->enc_key_size = hdev->min_enc_key_size;
3104 
3105 	return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3106 }
3107 
3108 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3109 				  struct sk_buff *skb)
3110 {
3111 	struct hci_ev_conn_complete *ev = data;
3112 	struct hci_conn *conn;
3113 	u8 status = ev->status;
3114 
3115 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3116 
3117 	hci_dev_lock(hdev);
3118 	hci_store_wake_reason(hdev, &ev->bdaddr, BDADDR_BREDR);
3119 
3120 	/* Check for existing connection:
3121 	 *
3122 	 * 1. If it doesn't exist then it must be receiver/slave role.
3123 	 * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
3124 	 *    of initiator/master role since there could be a collision where
3125 	 *    either side is attempting to connect or something like a fuzzing
3126 	 *    testing is trying to play tricks to destroy the hcon object before
3127 	 *    it even attempts to connect (e.g. hcon->state == BT_OPEN).
3128 	 */
3129 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3130 	if (!conn ||
3131 	    (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
3132 		/* In case of error status and there is no connection pending
3133 		 * just unlock as there is nothing to cleanup.
3134 		 */
3135 		if (ev->status)
3136 			goto unlock;
3137 
3138 		/* Connection may not exist if auto-connected. Check the bredr
3139 		 * allowlist to see if this device is allowed to auto connect.
3140 		 * If link is an ACL type, create a connection class
3141 		 * automatically.
3142 		 *
3143 		 * Auto-connect will only occur if the event filter is
3144 		 * programmed with a given address. Right now, event filter is
3145 		 * only used during suspend.
3146 		 */
3147 		if (ev->link_type == ACL_LINK &&
3148 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3149 						      &ev->bdaddr,
3150 						      BDADDR_BREDR)) {
3151 			conn = hci_conn_add_unset(hdev, ev->link_type,
3152 						  &ev->bdaddr, 0,
3153 						  HCI_ROLE_SLAVE);
3154 			if (IS_ERR(conn)) {
3155 				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3156 				goto unlock;
3157 			}
3158 		} else {
3159 			if (ev->link_type != SCO_LINK)
3160 				goto unlock;
3161 
3162 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3163 						       &ev->bdaddr);
3164 			if (!conn)
3165 				goto unlock;
3166 
3167 			conn->type = SCO_LINK;
3168 		}
3169 	}
3170 
3171 	/* The HCI_Connection_Complete event is only sent once per connection.
3172 	 * Processing it more than once per connection can corrupt kernel memory.
3173 	 *
3174 	 * As the connection handle is set here for the first time, it indicates
3175 	 * whether the connection is already set up.
3176 	 */
3177 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3178 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3179 		goto unlock;
3180 	}
3181 
3182 	if (!status) {
3183 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3184 		if (status)
3185 			goto done;
3186 
3187 		if (conn->type == ACL_LINK) {
3188 			conn->state = BT_CONFIG;
3189 			hci_conn_hold(conn);
3190 
3191 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3192 			    !hci_find_link_key(hdev, &ev->bdaddr))
3193 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3194 			else
3195 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3196 		} else
3197 			conn->state = BT_CONNECTED;
3198 
3199 		hci_debugfs_create_conn(conn);
3200 		hci_conn_add_sysfs(conn);
3201 
3202 		if (test_bit(HCI_AUTH, &hdev->flags))
3203 			set_bit(HCI_CONN_AUTH, &conn->flags);
3204 
3205 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3206 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3207 
3208 		/* "Link key request" completed ahead of "connect request" completes */
3209 		if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3210 		    ev->link_type == ACL_LINK) {
3211 			struct link_key *key;
3212 
3213 			key = hci_find_link_key(hdev, &ev->bdaddr);
3214 			if (key) {
3215 				set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3216 				hci_read_enc_key_size(hdev, conn);
3217 				hci_encrypt_cfm(conn, ev->status);
3218 			}
3219 		}
3220 
3221 		/* Get remote features */
3222 		if (conn->type == ACL_LINK) {
3223 			struct hci_cp_read_remote_features cp;
3224 			cp.handle = ev->handle;
3225 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3226 				     sizeof(cp), &cp);
3227 
3228 			hci_update_scan(hdev);
3229 		}
3230 
3231 		/* Set packet type for incoming connection */
3232 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3233 			struct hci_cp_change_conn_ptype cp;
3234 			cp.handle = ev->handle;
3235 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3236 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3237 				     &cp);
3238 		}
3239 	}
3240 
3241 	if (conn->type == ACL_LINK)
3242 		hci_sco_setup(conn, ev->status);
3243 
3244 done:
3245 	if (status) {
3246 		hci_conn_failed(conn, status);
3247 	} else if (ev->link_type == SCO_LINK) {
3248 		switch (conn->setting & SCO_AIRMODE_MASK) {
3249 		case SCO_AIRMODE_CVSD:
3250 			if (hdev->notify)
3251 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3252 			break;
3253 		}
3254 
3255 		hci_connect_cfm(conn, status);
3256 	}
3257 
3258 unlock:
3259 	hci_dev_unlock(hdev);
3260 }
3261 
3262 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3263 {
3264 	struct hci_cp_reject_conn_req cp;
3265 
3266 	bacpy(&cp.bdaddr, bdaddr);
3267 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3268 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3269 }
3270 
3271 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3272 				 struct sk_buff *skb)
3273 {
3274 	struct hci_ev_conn_request *ev = data;
3275 	int mask = hdev->link_mode;
3276 	struct inquiry_entry *ie;
3277 	struct hci_conn *conn;
3278 	__u8 flags = 0;
3279 
3280 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3281 
3282 	hci_dev_lock(hdev);
3283 	hci_store_wake_reason(hdev, &ev->bdaddr, BDADDR_BREDR);
3284 	hci_dev_unlock(hdev);
3285 
3286 	/* Reject incoming connection from device with same BD ADDR against
3287 	 * CVE-2020-26555
3288 	 */
3289 	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3290 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3291 			   &ev->bdaddr);
3292 		hci_reject_conn(hdev, &ev->bdaddr);
3293 		return;
3294 	}
3295 
3296 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3297 				      &flags);
3298 
3299 	if (!(mask & HCI_LM_ACCEPT)) {
3300 		hci_reject_conn(hdev, &ev->bdaddr);
3301 		return;
3302 	}
3303 
3304 	hci_dev_lock(hdev);
3305 
3306 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3307 				   BDADDR_BREDR)) {
3308 		hci_reject_conn(hdev, &ev->bdaddr);
3309 		goto unlock;
3310 	}
3311 
3312 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3313 	 * connection. These features are only touched through mgmt so
3314 	 * only do the checks if HCI_MGMT is set.
3315 	 */
3316 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3317 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3318 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3319 					       BDADDR_BREDR)) {
3320 		hci_reject_conn(hdev, &ev->bdaddr);
3321 		goto unlock;
3322 	}
3323 
3324 	/* Connection accepted */
3325 
3326 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3327 	if (ie)
3328 		memcpy(ie->data.dev_class, ev->dev_class, 3);
3329 
3330 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3331 			&ev->bdaddr);
3332 	if (!conn) {
3333 		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, 0,
3334 					  HCI_ROLE_SLAVE);
3335 		if (IS_ERR(conn)) {
3336 			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3337 			goto unlock;
3338 		}
3339 	}
3340 
3341 	memcpy(conn->dev_class, ev->dev_class, 3);
3342 
3343 	hci_dev_unlock(hdev);
3344 
3345 	if (ev->link_type == ACL_LINK ||
3346 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3347 		struct hci_cp_accept_conn_req cp;
3348 		conn->state = BT_CONNECT;
3349 
3350 		bacpy(&cp.bdaddr, &ev->bdaddr);
3351 
3352 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3353 			cp.role = 0x00; /* Become central */
3354 		else
3355 			cp.role = 0x01; /* Remain peripheral */
3356 
3357 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3358 	} else if (!(flags & HCI_PROTO_DEFER)) {
3359 		struct hci_cp_accept_sync_conn_req cp;
3360 		conn->state = BT_CONNECT;
3361 
3362 		bacpy(&cp.bdaddr, &ev->bdaddr);
3363 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3364 
3365 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3366 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3367 		cp.max_latency    = cpu_to_le16(0xffff);
3368 		cp.content_format = cpu_to_le16(hdev->voice_setting);
3369 		cp.retrans_effort = 0xff;
3370 
3371 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3372 			     &cp);
3373 	} else {
3374 		conn->state = BT_CONNECT2;
3375 		hci_connect_cfm(conn, 0);
3376 	}
3377 
3378 	return;
3379 unlock:
3380 	hci_dev_unlock(hdev);
3381 }
3382 
3383 static u8 hci_to_mgmt_reason(u8 err)
3384 {
3385 	switch (err) {
3386 	case HCI_ERROR_CONNECTION_TIMEOUT:
3387 		return MGMT_DEV_DISCONN_TIMEOUT;
3388 	case HCI_ERROR_REMOTE_USER_TERM:
3389 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3390 	case HCI_ERROR_REMOTE_POWER_OFF:
3391 		return MGMT_DEV_DISCONN_REMOTE;
3392 	case HCI_ERROR_LOCAL_HOST_TERM:
3393 		return MGMT_DEV_DISCONN_LOCAL_HOST;
3394 	default:
3395 		return MGMT_DEV_DISCONN_UNKNOWN;
3396 	}
3397 }
3398 
3399 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3400 				     struct sk_buff *skb)
3401 {
3402 	struct hci_ev_disconn_complete *ev = data;
3403 	u8 reason;
3404 	struct hci_conn_params *params;
3405 	struct hci_conn *conn;
3406 	bool mgmt_connected;
3407 
3408 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3409 
3410 	hci_dev_lock(hdev);
3411 
3412 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3413 	if (!conn)
3414 		goto unlock;
3415 
3416 	if (ev->status) {
3417 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3418 				       conn->dst_type, ev->status);
3419 		goto unlock;
3420 	}
3421 
3422 	conn->state = BT_CLOSED;
3423 
3424 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3425 
3426 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3427 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3428 	else
3429 		reason = hci_to_mgmt_reason(ev->reason);
3430 
3431 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3432 				reason, mgmt_connected);
3433 
3434 	if (conn->type == ACL_LINK) {
3435 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3436 			hci_remove_link_key(hdev, &conn->dst);
3437 
3438 		hci_update_scan(hdev);
3439 	}
3440 
3441 	/* Re-enable passive scanning if disconnected device is marked
3442 	 * as auto-connectable.
3443 	 */
3444 	if (conn->type == LE_LINK) {
3445 		params = hci_conn_params_lookup(hdev, &conn->dst,
3446 						conn->dst_type);
3447 		if (params) {
3448 			switch (params->auto_connect) {
3449 			case HCI_AUTO_CONN_LINK_LOSS:
3450 				if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3451 					break;
3452 				fallthrough;
3453 
3454 			case HCI_AUTO_CONN_DIRECT:
3455 			case HCI_AUTO_CONN_ALWAYS:
3456 				hci_pend_le_list_del_init(params);
3457 				hci_pend_le_list_add(params,
3458 						     &hdev->pend_le_conns);
3459 				hci_update_passive_scan(hdev);
3460 				break;
3461 
3462 			default:
3463 				break;
3464 			}
3465 		}
3466 	}
3467 
3468 	hci_disconn_cfm(conn, ev->reason);
3469 
3470 	/* Re-enable advertising if necessary, since it might
3471 	 * have been disabled by the connection. From the
3472 	 * HCI_LE_Set_Advertise_Enable command description in
3473 	 * the core specification (v4.0):
3474 	 * "The Controller shall continue advertising until the Host
3475 	 * issues an LE_Set_Advertise_Enable command with
3476 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3477 	 * or until a connection is created or until the Advertising
3478 	 * is timed out due to Directed Advertising."
3479 	 */
3480 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3481 		hdev->cur_adv_instance = conn->adv_instance;
3482 		hci_enable_advertising(hdev);
3483 	}
3484 
3485 	hci_conn_del(conn);
3486 
3487 unlock:
3488 	hci_dev_unlock(hdev);
3489 }
3490 
3491 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3492 				  struct sk_buff *skb)
3493 {
3494 	struct hci_ev_auth_complete *ev = data;
3495 	struct hci_conn *conn;
3496 
3497 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3498 
3499 	hci_dev_lock(hdev);
3500 
3501 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3502 	if (!conn)
3503 		goto unlock;
3504 
3505 	if (!ev->status) {
3506 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3507 		set_bit(HCI_CONN_AUTH, &conn->flags);
3508 		conn->sec_level = conn->pending_sec_level;
3509 	} else {
3510 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3511 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3512 
3513 		mgmt_auth_failed(conn, ev->status);
3514 	}
3515 
3516 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3517 
3518 	if (conn->state == BT_CONFIG) {
3519 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3520 			struct hci_cp_set_conn_encrypt cp;
3521 			cp.handle  = ev->handle;
3522 			cp.encrypt = 0x01;
3523 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3524 				     &cp);
3525 		} else {
3526 			conn->state = BT_CONNECTED;
3527 			hci_connect_cfm(conn, ev->status);
3528 			hci_conn_drop(conn);
3529 		}
3530 	} else {
3531 		hci_auth_cfm(conn, ev->status);
3532 
3533 		hci_conn_hold(conn);
3534 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3535 		hci_conn_drop(conn);
3536 	}
3537 
3538 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3539 		if (!ev->status) {
3540 			struct hci_cp_set_conn_encrypt cp;
3541 			cp.handle  = ev->handle;
3542 			cp.encrypt = 0x01;
3543 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3544 				     &cp);
3545 		} else {
3546 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3547 			hci_encrypt_cfm(conn, ev->status);
3548 		}
3549 	}
3550 
3551 unlock:
3552 	hci_dev_unlock(hdev);
3553 }
3554 
3555 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3556 				struct sk_buff *skb)
3557 {
3558 	struct hci_ev_remote_name *ev = data;
3559 	struct hci_conn *conn;
3560 
3561 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3562 
3563 	hci_dev_lock(hdev);
3564 
3565 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3566 
3567 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3568 		goto check_auth;
3569 
3570 	if (ev->status == 0)
3571 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3572 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3573 	else
3574 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3575 
3576 check_auth:
3577 	if (!conn)
3578 		goto unlock;
3579 
3580 	if (!hci_outgoing_auth_needed(hdev, conn))
3581 		goto unlock;
3582 
3583 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3584 		struct hci_cp_auth_requested cp;
3585 
3586 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3587 
3588 		cp.handle = __cpu_to_le16(conn->handle);
3589 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3590 	}
3591 
3592 unlock:
3593 	hci_dev_unlock(hdev);
3594 }
3595 
3596 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3597 				   struct sk_buff *skb)
3598 {
3599 	struct hci_ev_encrypt_change *ev = data;
3600 	struct hci_conn *conn;
3601 
3602 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3603 
3604 	hci_dev_lock(hdev);
3605 
3606 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3607 	if (!conn)
3608 		goto unlock;
3609 
3610 	if (!ev->status) {
3611 		if (ev->encrypt) {
3612 			/* Encryption implies authentication */
3613 			set_bit(HCI_CONN_AUTH, &conn->flags);
3614 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3615 			conn->sec_level = conn->pending_sec_level;
3616 
3617 			/* P-256 authentication key implies FIPS */
3618 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3619 				set_bit(HCI_CONN_FIPS, &conn->flags);
3620 
3621 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3622 			    conn->type == LE_LINK)
3623 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3624 		} else {
3625 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3626 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3627 		}
3628 	}
3629 
3630 	/* We should disregard the current RPA and generate a new one
3631 	 * whenever the encryption procedure fails.
3632 	 */
3633 	if (ev->status && conn->type == LE_LINK) {
3634 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3635 		hci_adv_instances_set_rpa_expired(hdev, true);
3636 	}
3637 
3638 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3639 
3640 	/* Check link security requirements are met */
3641 	if (!hci_conn_check_link_mode(conn))
3642 		ev->status = HCI_ERROR_AUTH_FAILURE;
3643 
3644 	if (ev->status && conn->state == BT_CONNECTED) {
3645 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3646 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3647 
3648 		/* Notify upper layers so they can cleanup before
3649 		 * disconnecting.
3650 		 */
3651 		hci_encrypt_cfm(conn, ev->status);
3652 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3653 		hci_conn_drop(conn);
3654 		goto unlock;
3655 	}
3656 
3657 	/* Try reading the encryption key size for encrypted ACL links */
3658 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3659 		if (hci_read_enc_key_size(hdev, conn))
3660 			goto notify;
3661 
3662 		goto unlock;
3663 	}
3664 
3665 	/* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
3666 	 * to avoid unexpected SMP command errors when pairing.
3667 	 */
3668 	if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT))
3669 		goto notify;
3670 
3671 	/* Set the default Authenticated Payload Timeout after
3672 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3673 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3674 	 * sent when the link is active and Encryption is enabled, the conn
3675 	 * type can be either LE or ACL and controller must support LMP Ping.
3676 	 * Ensure for AES-CCM encryption as well.
3677 	 */
3678 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3679 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3680 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3681 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3682 		struct hci_cp_write_auth_payload_to cp;
3683 
3684 		cp.handle = cpu_to_le16(conn->handle);
3685 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3686 		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3687 				 sizeof(cp), &cp))
3688 			bt_dev_err(hdev, "write auth payload timeout failed");
3689 	}
3690 
3691 notify:
3692 	hci_encrypt_cfm(conn, ev->status);
3693 
3694 unlock:
3695 	hci_dev_unlock(hdev);
3696 }
3697 
3698 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3699 					     struct sk_buff *skb)
3700 {
3701 	struct hci_ev_change_link_key_complete *ev = data;
3702 	struct hci_conn *conn;
3703 
3704 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3705 
3706 	hci_dev_lock(hdev);
3707 
3708 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3709 	if (conn) {
3710 		if (!ev->status)
3711 			set_bit(HCI_CONN_SECURE, &conn->flags);
3712 
3713 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3714 
3715 		hci_key_change_cfm(conn, ev->status);
3716 	}
3717 
3718 	hci_dev_unlock(hdev);
3719 }
3720 
3721 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3722 				    struct sk_buff *skb)
3723 {
3724 	struct hci_ev_remote_features *ev = data;
3725 	struct hci_conn *conn;
3726 
3727 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3728 
3729 	hci_dev_lock(hdev);
3730 
3731 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3732 	if (!conn)
3733 		goto unlock;
3734 
3735 	if (!ev->status)
3736 		memcpy(conn->features[0], ev->features, 8);
3737 
3738 	if (conn->state != BT_CONFIG)
3739 		goto unlock;
3740 
3741 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3742 	    lmp_ext_feat_capable(conn)) {
3743 		struct hci_cp_read_remote_ext_features cp;
3744 		cp.handle = ev->handle;
3745 		cp.page = 0x01;
3746 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3747 			     sizeof(cp), &cp);
3748 		goto unlock;
3749 	}
3750 
3751 	if (!ev->status) {
3752 		struct hci_cp_remote_name_req cp;
3753 		memset(&cp, 0, sizeof(cp));
3754 		bacpy(&cp.bdaddr, &conn->dst);
3755 		cp.pscan_rep_mode = 0x02;
3756 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3757 	} else {
3758 		mgmt_device_connected(hdev, conn, NULL, 0);
3759 	}
3760 
3761 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3762 		conn->state = BT_CONNECTED;
3763 		hci_connect_cfm(conn, ev->status);
3764 		hci_conn_drop(conn);
3765 	}
3766 
3767 unlock:
3768 	hci_dev_unlock(hdev);
3769 }
3770 
3771 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3772 {
3773 	cancel_delayed_work(&hdev->cmd_timer);
3774 
3775 	rcu_read_lock();
3776 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3777 		if (ncmd) {
3778 			cancel_delayed_work(&hdev->ncmd_timer);
3779 			atomic_set(&hdev->cmd_cnt, 1);
3780 		} else {
3781 			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3782 				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3783 						   HCI_NCMD_TIMEOUT);
3784 		}
3785 	}
3786 	rcu_read_unlock();
3787 }
3788 
3789 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3790 					struct sk_buff *skb)
3791 {
3792 	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3793 
3794 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3795 
3796 	if (rp->status)
3797 		return rp->status;
3798 
3799 	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3800 	hdev->le_pkts  = rp->acl_max_pkt;
3801 	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3802 	hdev->iso_pkts = rp->iso_max_pkt;
3803 
3804 	hdev->le_cnt  = hdev->le_pkts;
3805 	hdev->iso_cnt = hdev->iso_pkts;
3806 
3807 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3808 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3809 
3810 	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3811 		return HCI_ERROR_INVALID_PARAMETERS;
3812 
3813 	return rp->status;
3814 }
3815 
3816 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3817 {
3818 	struct hci_conn *conn, *tmp;
3819 
3820 	lockdep_assert_held(&hdev->lock);
3821 
3822 	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3823 		if (conn->type != CIS_LINK ||
3824 		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3825 			continue;
3826 
3827 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3828 			hci_conn_failed(conn, status);
3829 	}
3830 }
3831 
3832 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3833 				   struct sk_buff *skb)
3834 {
3835 	struct hci_rp_le_set_cig_params *rp = data;
3836 	struct hci_cp_le_set_cig_params *cp;
3837 	struct hci_conn *conn;
3838 	u8 status = rp->status;
3839 	bool pending = false;
3840 	int i;
3841 
3842 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3843 
3844 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3845 	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3846 			    rp->cig_id != cp->cig_id)) {
3847 		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3848 		status = HCI_ERROR_UNSPECIFIED;
3849 	}
3850 
3851 	hci_dev_lock(hdev);
3852 
3853 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3854 	 *
3855 	 * If the Status return parameter is non-zero, then the state of the CIG
3856 	 * and its CIS configurations shall not be changed by the command. If
3857 	 * the CIG did not already exist, it shall not be created.
3858 	 */
3859 	if (status) {
3860 		/* Keep current configuration, fail only the unbound CIS */
3861 		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3862 		goto unlock;
3863 	}
3864 
3865 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3866 	 *
3867 	 * If the Status return parameter is zero, then the Controller shall
3868 	 * set the Connection_Handle arrayed return parameter to the connection
3869 	 * handle(s) corresponding to the CIS configurations specified in
3870 	 * the CIS_IDs command parameter, in the same order.
3871 	 */
3872 	for (i = 0; i < rp->num_handles; ++i) {
3873 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3874 						cp->cis[i].cis_id);
3875 		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3876 			continue;
3877 
3878 		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3879 			continue;
3880 
3881 		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3882 			continue;
3883 
3884 		if (conn->state == BT_CONNECT)
3885 			pending = true;
3886 	}
3887 
3888 unlock:
3889 	if (pending)
3890 		hci_le_create_cis_pending(hdev);
3891 
3892 	hci_dev_unlock(hdev);
3893 
3894 	return rp->status;
3895 }
3896 
3897 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3898 				   struct sk_buff *skb)
3899 {
3900 	struct hci_rp_le_setup_iso_path *rp = data;
3901 	struct hci_cp_le_setup_iso_path *cp;
3902 	struct hci_conn *conn;
3903 
3904 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3905 
3906 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3907 	if (!cp)
3908 		return rp->status;
3909 
3910 	hci_dev_lock(hdev);
3911 
3912 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3913 	if (!conn)
3914 		goto unlock;
3915 
3916 	if (rp->status) {
3917 		hci_connect_cfm(conn, rp->status);
3918 		hci_conn_del(conn);
3919 		goto unlock;
3920 	}
3921 
3922 	switch (cp->direction) {
3923 	/* Input (Host to Controller) */
3924 	case 0x00:
3925 		/* Only confirm connection if output only */
3926 		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3927 			hci_connect_cfm(conn, rp->status);
3928 		break;
3929 	/* Output (Controller to Host) */
3930 	case 0x01:
3931 		/* Confirm connection since conn->iso_qos is always configured
3932 		 * last.
3933 		 */
3934 		hci_connect_cfm(conn, rp->status);
3935 
3936 		/* Notify device connected in case it is a BIG Sync */
3937 		if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3938 			mgmt_device_connected(hdev, conn, NULL, 0);
3939 
3940 		break;
3941 	}
3942 
3943 unlock:
3944 	hci_dev_unlock(hdev);
3945 	return rp->status;
3946 }
3947 
3948 static u8 hci_cc_le_read_all_local_features(struct hci_dev *hdev, void *data,
3949 					    struct sk_buff *skb)
3950 {
3951 	struct hci_rp_le_read_all_local_features *rp = data;
3952 
3953 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3954 
3955 	if (rp->status)
3956 		return rp->status;
3957 
3958 	memcpy(hdev->le_features, rp->features, 248);
3959 
3960 	return rp->status;
3961 }
3962 
3963 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3964 {
3965 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3966 }
3967 
3968 static void hci_cs_le_read_all_remote_features(struct hci_dev *hdev, u8 status)
3969 {
3970 	struct hci_cp_le_read_remote_features *cp;
3971 	struct hci_conn *conn;
3972 
3973 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3974 
3975 	if (!status)
3976 		return;
3977 
3978 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_ALL_REMOTE_FEATURES);
3979 	if (!cp)
3980 		return;
3981 
3982 	hci_dev_lock(hdev);
3983 
3984 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3985 	if (conn && conn->state == BT_CONFIG)
3986 		hci_connect_cfm(conn, status);
3987 
3988 	hci_dev_unlock(hdev);
3989 }
3990 
3991 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3992 				   struct sk_buff *skb)
3993 {
3994 	struct hci_ev_status *rp = data;
3995 	struct hci_cp_le_set_per_adv_params *cp;
3996 
3997 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3998 
3999 	if (rp->status)
4000 		return rp->status;
4001 
4002 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
4003 	if (!cp)
4004 		return rp->status;
4005 
4006 	/* TODO: set the conn state */
4007 	return rp->status;
4008 }
4009 
4010 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4011 				       struct sk_buff *skb)
4012 {
4013 	struct hci_ev_status *rp = data;
4014 	struct hci_cp_le_set_per_adv_enable *cp;
4015 	struct adv_info *adv = NULL, *n;
4016 	u8 per_adv_cnt = 0;
4017 
4018 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4019 
4020 	if (rp->status)
4021 		return rp->status;
4022 
4023 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4024 	if (!cp)
4025 		return rp->status;
4026 
4027 	hci_dev_lock(hdev);
4028 
4029 	adv = hci_find_adv_instance(hdev, cp->handle);
4030 
4031 	if (cp->enable) {
4032 		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4033 
4034 		if (adv)
4035 			adv->periodic_enabled = true;
4036 	} else {
4037 		if (adv)
4038 			adv->periodic_enabled = false;
4039 
4040 		/* If just one instance was disabled check if there are
4041 		 * any other instance enabled before clearing HCI_LE_PER_ADV.
4042 		 * The current periodic adv instance will be marked as
4043 		 * disabled once extended advertising is also disabled.
4044 		 */
4045 		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4046 					 list) {
4047 			if (adv->periodic && adv->enabled)
4048 				per_adv_cnt++;
4049 		}
4050 
4051 		if (per_adv_cnt > 1)
4052 			goto unlock;
4053 
4054 		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4055 	}
4056 
4057 unlock:
4058 	hci_dev_unlock(hdev);
4059 
4060 	return rp->status;
4061 }
4062 
4063 #define HCI_CC_VL(_op, _func, _min, _max) \
4064 { \
4065 	.op = _op, \
4066 	.func = _func, \
4067 	.min_len = _min, \
4068 	.max_len = _max, \
4069 }
4070 
4071 #define HCI_CC(_op, _func, _len) \
4072 	HCI_CC_VL(_op, _func, _len, _len)
4073 
4074 #define HCI_CC_STATUS(_op, _func) \
4075 	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4076 
4077 static const struct hci_cc {
4078 	u16  op;
4079 	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4080 	u16  min_len;
4081 	u16  max_len;
4082 } hci_cc_table[] = {
4083 	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4084 	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4085 	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4086 	HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel,
4087 	       sizeof(struct hci_rp_remote_name_req_cancel)),
4088 	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4089 	       sizeof(struct hci_rp_role_discovery)),
4090 	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4091 	       sizeof(struct hci_rp_read_link_policy)),
4092 	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4093 	       sizeof(struct hci_rp_write_link_policy)),
4094 	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4095 	       sizeof(struct hci_rp_read_def_link_policy)),
4096 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4097 		      hci_cc_write_def_link_policy),
4098 	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4099 	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4100 	       sizeof(struct hci_rp_read_stored_link_key)),
4101 	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4102 	       sizeof(struct hci_rp_delete_stored_link_key)),
4103 	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4104 	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4105 	       sizeof(struct hci_rp_read_local_name)),
4106 	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4107 	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4108 	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4109 	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4110 	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4111 	       sizeof(struct hci_rp_read_class_of_dev)),
4112 	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4113 	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4114 	       sizeof(struct hci_rp_read_voice_setting)),
4115 	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4116 	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4117 	       sizeof(struct hci_rp_read_num_supported_iac)),
4118 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4119 	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4120 	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4121 	       sizeof(struct hci_rp_read_auth_payload_to)),
4122 	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4123 	       sizeof(struct hci_rp_write_auth_payload_to)),
4124 	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4125 	       sizeof(struct hci_rp_read_local_version)),
4126 	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4127 	       sizeof(struct hci_rp_read_local_commands)),
4128 	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4129 	       sizeof(struct hci_rp_read_local_features)),
4130 	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4131 	       sizeof(struct hci_rp_read_local_ext_features)),
4132 	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4133 	       sizeof(struct hci_rp_read_buffer_size)),
4134 	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4135 	       sizeof(struct hci_rp_read_bd_addr)),
4136 	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4137 	       sizeof(struct hci_rp_read_local_pairing_opts)),
4138 	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4139 	       sizeof(struct hci_rp_read_page_scan_activity)),
4140 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4141 		      hci_cc_write_page_scan_activity),
4142 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4143 	       sizeof(struct hci_rp_read_page_scan_type)),
4144 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4145 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4146 	       sizeof(struct hci_rp_read_clock)),
4147 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4148 	       sizeof(struct hci_rp_read_enc_key_size)),
4149 	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4150 	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4151 	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4152 	       hci_cc_read_def_err_data_reporting,
4153 	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4154 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4155 		      hci_cc_write_def_err_data_reporting),
4156 	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4157 	       sizeof(struct hci_rp_pin_code_reply)),
4158 	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4159 	       sizeof(struct hci_rp_pin_code_neg_reply)),
4160 	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4161 	       sizeof(struct hci_rp_read_local_oob_data)),
4162 	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4163 	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4164 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4165 	       sizeof(struct hci_rp_le_read_buffer_size)),
4166 	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4167 	       sizeof(struct hci_rp_le_read_local_features)),
4168 	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4169 	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4170 	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4171 	       sizeof(struct hci_rp_user_confirm_reply)),
4172 	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4173 	       sizeof(struct hci_rp_user_confirm_reply)),
4174 	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4175 	       sizeof(struct hci_rp_user_confirm_reply)),
4176 	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4177 	       sizeof(struct hci_rp_user_confirm_reply)),
4178 	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4179 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4180 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4181 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4182 	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4183 	       hci_cc_le_read_accept_list_size,
4184 	       sizeof(struct hci_rp_le_read_accept_list_size)),
4185 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4186 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4187 		      hci_cc_le_add_to_accept_list),
4188 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4189 		      hci_cc_le_del_from_accept_list),
4190 	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4191 	       sizeof(struct hci_rp_le_read_supported_states)),
4192 	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4193 	       sizeof(struct hci_rp_le_read_def_data_len)),
4194 	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4195 		      hci_cc_le_write_def_data_len),
4196 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4197 		      hci_cc_le_add_to_resolv_list),
4198 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4199 		      hci_cc_le_del_from_resolv_list),
4200 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4201 		      hci_cc_le_clear_resolv_list),
4202 	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4203 	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4204 	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4205 		      hci_cc_le_set_addr_resolution_enable),
4206 	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4207 	       sizeof(struct hci_rp_le_read_max_data_len)),
4208 	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4209 		      hci_cc_write_le_host_supported),
4210 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4211 	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4212 	       sizeof(struct hci_rp_read_rssi)),
4213 	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4214 	       sizeof(struct hci_rp_read_tx_power)),
4215 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4216 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4217 		      hci_cc_le_set_ext_scan_param),
4218 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4219 		      hci_cc_le_set_ext_scan_enable),
4220 	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4221 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4222 	       hci_cc_le_read_num_adv_sets,
4223 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4224 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4225 		      hci_cc_le_set_ext_adv_enable),
4226 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4227 		      hci_cc_le_set_adv_set_random_addr),
4228 	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4229 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4230 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4231 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4232 		      hci_cc_le_set_per_adv_enable),
4233 	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4234 	       sizeof(struct hci_rp_le_read_transmit_power)),
4235 	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4236 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4237 	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4238 	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4239 		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4240 	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4241 	       sizeof(struct hci_rp_le_setup_iso_path)),
4242 	HCI_CC(HCI_OP_LE_READ_ALL_LOCAL_FEATURES,
4243 	       hci_cc_le_read_all_local_features,
4244 	       sizeof(struct hci_rp_le_read_all_local_features)),
4245 };
4246 
4247 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4248 		      struct sk_buff *skb)
4249 {
4250 	void *data;
4251 
4252 	if (skb->len < cc->min_len) {
4253 		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4254 			   cc->op, skb->len, cc->min_len);
4255 		return HCI_ERROR_UNSPECIFIED;
4256 	}
4257 
4258 	/* Just warn if the length is over max_len size it still be possible to
4259 	 * partially parse the cc so leave to callback to decide if that is
4260 	 * acceptable.
4261 	 */
4262 	if (skb->len > cc->max_len)
4263 		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4264 			    cc->op, skb->len, cc->max_len);
4265 
4266 	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4267 	if (!data)
4268 		return HCI_ERROR_UNSPECIFIED;
4269 
4270 	return cc->func(hdev, data, skb);
4271 }
4272 
4273 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4274 				 struct sk_buff *skb, u16 *opcode, u8 *status,
4275 				 hci_req_complete_t *req_complete,
4276 				 hci_req_complete_skb_t *req_complete_skb)
4277 {
4278 	struct hci_ev_cmd_complete *ev = data;
4279 	int i;
4280 
4281 	*opcode = __le16_to_cpu(ev->opcode);
4282 
4283 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4284 
4285 	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4286 		if (hci_cc_table[i].op == *opcode) {
4287 			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4288 			break;
4289 		}
4290 	}
4291 
4292 	if (i == ARRAY_SIZE(hci_cc_table)) {
4293 		if (!skb->len) {
4294 			bt_dev_err(hdev, "Unexpected cc 0x%4.4x with no status",
4295 				   *opcode);
4296 			*status = HCI_ERROR_UNSPECIFIED;
4297 			return;
4298 		}
4299 
4300 		/* Unknown opcode, assume byte 0 contains the status, so
4301 		 * that e.g. __hci_cmd_sync() properly returns errors
4302 		 * for vendor specific commands send by HCI drivers.
4303 		 * If a vendor doesn't actually follow this convention we may
4304 		 * need to introduce a vendor CC table in order to properly set
4305 		 * the status.
4306 		 */
4307 		*status = skb->data[0];
4308 	}
4309 
4310 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4311 
4312 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4313 			     req_complete_skb);
4314 
4315 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4316 		bt_dev_err(hdev,
4317 			   "unexpected event for opcode 0x%4.4x", *opcode);
4318 		return;
4319 	}
4320 
4321 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4322 		queue_work(hdev->workqueue, &hdev->cmd_work);
4323 }
4324 
4325 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4326 {
4327 	struct hci_cp_le_create_cis *cp;
4328 	bool pending = false;
4329 	int i;
4330 
4331 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4332 
4333 	if (!status)
4334 		return;
4335 
4336 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4337 	if (!cp)
4338 		return;
4339 
4340 	hci_dev_lock(hdev);
4341 
4342 	/* Remove connection if command failed */
4343 	for (i = 0; i < cp->num_cis; i++) {
4344 		struct hci_conn *conn;
4345 		u16 handle;
4346 
4347 		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4348 
4349 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4350 		if (conn) {
4351 			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4352 					       &conn->flags))
4353 				pending = true;
4354 			conn->state = BT_CLOSED;
4355 			hci_connect_cfm(conn, status);
4356 			hci_conn_del(conn);
4357 		}
4358 	}
4359 	cp->num_cis = 0;
4360 
4361 	if (pending)
4362 		hci_le_create_cis_pending(hdev);
4363 
4364 	hci_dev_unlock(hdev);
4365 }
4366 
4367 #define HCI_CS(_op, _func) \
4368 { \
4369 	.op = _op, \
4370 	.func = _func, \
4371 }
4372 
4373 static const struct hci_cs {
4374 	u16  op;
4375 	void (*func)(struct hci_dev *hdev, __u8 status);
4376 } hci_cs_table[] = {
4377 	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4378 	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4379 	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4380 	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4381 	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4382 	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4383 	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4384 	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4385 	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4386 	       hci_cs_read_remote_ext_features),
4387 	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4388 	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4389 	       hci_cs_enhanced_setup_sync_conn),
4390 	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4391 	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4392 	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4393 	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4394 	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4395 	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4396 	HCI_CS(HCI_OP_LE_SET_PHY, hci_cs_le_set_phy),
4397 	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4398 	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4399 	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4400 	HCI_CS(HCI_OP_LE_READ_ALL_REMOTE_FEATURES,
4401 	       hci_cs_le_read_all_remote_features),
4402 };
4403 
4404 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4405 			       struct sk_buff *skb, u16 *opcode, u8 *status,
4406 			       hci_req_complete_t *req_complete,
4407 			       hci_req_complete_skb_t *req_complete_skb)
4408 {
4409 	struct hci_ev_cmd_status *ev = data;
4410 	int i;
4411 
4412 	*opcode = __le16_to_cpu(ev->opcode);
4413 	*status = ev->status;
4414 
4415 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4416 
4417 	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4418 		if (hci_cs_table[i].op == *opcode) {
4419 			hci_cs_table[i].func(hdev, ev->status);
4420 			break;
4421 		}
4422 	}
4423 
4424 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4425 
4426 	/* Indicate request completion if the command failed. Also, if
4427 	 * we're not waiting for a special event and we get a success
4428 	 * command status we should try to flag the request as completed
4429 	 * (since for this kind of commands there will not be a command
4430 	 * complete event).
4431 	 */
4432 	if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4433 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4434 				     req_complete_skb);
4435 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4436 			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4437 				   *opcode);
4438 			return;
4439 		}
4440 	}
4441 
4442 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4443 		queue_work(hdev->workqueue, &hdev->cmd_work);
4444 }
4445 
4446 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4447 				   struct sk_buff *skb)
4448 {
4449 	struct hci_ev_hardware_error *ev = data;
4450 
4451 	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4452 
4453 	hdev->hw_error_code = ev->code;
4454 
4455 	queue_work(hdev->req_workqueue, &hdev->error_reset);
4456 }
4457 
4458 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4459 				struct sk_buff *skb)
4460 {
4461 	struct hci_ev_role_change *ev = data;
4462 	struct hci_conn *conn;
4463 
4464 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4465 
4466 	hci_dev_lock(hdev);
4467 
4468 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4469 	if (conn) {
4470 		if (!ev->status)
4471 			conn->role = ev->role;
4472 
4473 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4474 
4475 		hci_role_switch_cfm(conn, ev->status, ev->role);
4476 	}
4477 
4478 	hci_dev_unlock(hdev);
4479 }
4480 
4481 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4482 				  struct sk_buff *skb)
4483 {
4484 	struct hci_ev_num_comp_pkts *ev = data;
4485 	int i;
4486 
4487 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4488 			     flex_array_size(ev, handles, ev->num)))
4489 		return;
4490 
4491 	bt_dev_dbg(hdev, "num %d", ev->num);
4492 
4493 	hci_dev_lock(hdev);
4494 
4495 	for (i = 0; i < ev->num; i++) {
4496 		struct hci_comp_pkts_info *info = &ev->handles[i];
4497 		struct hci_conn *conn;
4498 		__u16  handle, count;
4499 		unsigned int i;
4500 
4501 		handle = __le16_to_cpu(info->handle);
4502 		count  = __le16_to_cpu(info->count);
4503 
4504 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4505 		if (!conn)
4506 			continue;
4507 
4508 		/* Check if there is really enough packets outstanding before
4509 		 * attempting to decrease the sent counter otherwise it could
4510 		 * underflow..
4511 		 */
4512 		if (conn->sent >= count) {
4513 			conn->sent -= count;
4514 		} else {
4515 			bt_dev_warn(hdev, "hcon %p sent %u < count %u",
4516 				    conn, conn->sent, count);
4517 			conn->sent = 0;
4518 		}
4519 
4520 		for (i = 0; i < count; ++i)
4521 			hci_conn_tx_dequeue(conn);
4522 
4523 		switch (conn->type) {
4524 		case ACL_LINK:
4525 			hdev->acl_cnt += count;
4526 			if (hdev->acl_cnt > hdev->acl_pkts)
4527 				hdev->acl_cnt = hdev->acl_pkts;
4528 			break;
4529 
4530 		case LE_LINK:
4531 			if (hdev->le_pkts) {
4532 				hdev->le_cnt += count;
4533 				if (hdev->le_cnt > hdev->le_pkts)
4534 					hdev->le_cnt = hdev->le_pkts;
4535 			} else {
4536 				hdev->acl_cnt += count;
4537 				if (hdev->acl_cnt > hdev->acl_pkts)
4538 					hdev->acl_cnt = hdev->acl_pkts;
4539 			}
4540 			break;
4541 
4542 		case SCO_LINK:
4543 		case ESCO_LINK:
4544 			hdev->sco_cnt += count;
4545 			if (hdev->sco_cnt > hdev->sco_pkts)
4546 				hdev->sco_cnt = hdev->sco_pkts;
4547 
4548 			break;
4549 
4550 		case CIS_LINK:
4551 		case BIS_LINK:
4552 		case PA_LINK:
4553 			hdev->iso_cnt += count;
4554 			if (hdev->iso_cnt > hdev->iso_pkts)
4555 				hdev->iso_cnt = hdev->iso_pkts;
4556 			break;
4557 
4558 		default:
4559 			bt_dev_err(hdev, "unknown type %d conn %p",
4560 				   conn->type, conn);
4561 			break;
4562 		}
4563 	}
4564 
4565 	queue_work(hdev->workqueue, &hdev->tx_work);
4566 
4567 	hci_dev_unlock(hdev);
4568 }
4569 
4570 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4571 				struct sk_buff *skb)
4572 {
4573 	struct hci_ev_mode_change *ev = data;
4574 	struct hci_conn *conn;
4575 
4576 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4577 
4578 	hci_dev_lock(hdev);
4579 
4580 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4581 	if (conn) {
4582 		conn->mode = ev->mode;
4583 
4584 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4585 					&conn->flags)) {
4586 			if (conn->mode == HCI_CM_ACTIVE)
4587 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4588 			else
4589 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4590 		}
4591 
4592 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4593 			hci_sco_setup(conn, ev->status);
4594 	}
4595 
4596 	hci_dev_unlock(hdev);
4597 }
4598 
4599 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4600 				     struct sk_buff *skb)
4601 {
4602 	struct hci_ev_pin_code_req *ev = data;
4603 	struct hci_conn *conn;
4604 
4605 	bt_dev_dbg(hdev, "");
4606 
4607 	hci_dev_lock(hdev);
4608 
4609 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4610 	if (!conn)
4611 		goto unlock;
4612 
4613 	if (conn->state == BT_CONNECTED) {
4614 		hci_conn_hold(conn);
4615 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4616 		hci_conn_drop(conn);
4617 	}
4618 
4619 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4620 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4621 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4622 			     sizeof(ev->bdaddr), &ev->bdaddr);
4623 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4624 		u8 secure;
4625 
4626 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4627 			secure = 1;
4628 		else
4629 			secure = 0;
4630 
4631 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4632 	}
4633 
4634 unlock:
4635 	hci_dev_unlock(hdev);
4636 }
4637 
4638 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4639 {
4640 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4641 		return;
4642 
4643 	conn->pin_length = pin_len;
4644 	conn->key_type = key_type;
4645 
4646 	switch (key_type) {
4647 	case HCI_LK_LOCAL_UNIT:
4648 	case HCI_LK_REMOTE_UNIT:
4649 	case HCI_LK_DEBUG_COMBINATION:
4650 		return;
4651 	case HCI_LK_COMBINATION:
4652 		if (pin_len == 16)
4653 			conn->pending_sec_level = BT_SECURITY_HIGH;
4654 		else
4655 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4656 		break;
4657 	case HCI_LK_UNAUTH_COMBINATION_P192:
4658 	case HCI_LK_UNAUTH_COMBINATION_P256:
4659 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4660 		break;
4661 	case HCI_LK_AUTH_COMBINATION_P192:
4662 		conn->pending_sec_level = BT_SECURITY_HIGH;
4663 		break;
4664 	case HCI_LK_AUTH_COMBINATION_P256:
4665 		conn->pending_sec_level = BT_SECURITY_FIPS;
4666 		break;
4667 	}
4668 }
4669 
4670 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4671 				     struct sk_buff *skb)
4672 {
4673 	struct hci_ev_link_key_req *ev = data;
4674 	struct hci_cp_link_key_reply cp;
4675 	struct hci_conn *conn;
4676 	struct link_key *key;
4677 
4678 	bt_dev_dbg(hdev, "");
4679 
4680 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4681 		return;
4682 
4683 	hci_dev_lock(hdev);
4684 
4685 	key = hci_find_link_key(hdev, &ev->bdaddr);
4686 	if (!key) {
4687 		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4688 		goto not_found;
4689 	}
4690 
4691 	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4692 
4693 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4694 	if (conn) {
4695 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4696 
4697 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4698 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4699 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4700 			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4701 			goto not_found;
4702 		}
4703 
4704 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4705 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4706 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4707 			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4708 			goto not_found;
4709 		}
4710 
4711 		conn_set_key(conn, key->type, key->pin_len);
4712 	}
4713 
4714 	bacpy(&cp.bdaddr, &ev->bdaddr);
4715 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4716 
4717 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4718 
4719 	hci_dev_unlock(hdev);
4720 
4721 	return;
4722 
4723 not_found:
4724 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4725 	hci_dev_unlock(hdev);
4726 }
4727 
4728 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4729 				    struct sk_buff *skb)
4730 {
4731 	struct hci_ev_link_key_notify *ev = data;
4732 	struct hci_conn *conn;
4733 	struct link_key *key;
4734 	bool persistent;
4735 	u8 pin_len = 0;
4736 
4737 	bt_dev_dbg(hdev, "");
4738 
4739 	hci_dev_lock(hdev);
4740 
4741 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4742 	if (!conn)
4743 		goto unlock;
4744 
4745 	/* Ignore NULL link key against CVE-2020-26555 */
4746 	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4747 		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4748 			   &ev->bdaddr);
4749 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4750 		hci_conn_drop(conn);
4751 		goto unlock;
4752 	}
4753 
4754 	hci_conn_hold(conn);
4755 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4756 	hci_conn_drop(conn);
4757 
4758 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4759 	conn_set_key(conn, ev->key_type, conn->pin_length);
4760 
4761 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4762 		goto unlock;
4763 
4764 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4765 			        ev->key_type, pin_len, &persistent);
4766 	if (!key)
4767 		goto unlock;
4768 
4769 	/* Update connection information since adding the key will have
4770 	 * fixed up the type in the case of changed combination keys.
4771 	 */
4772 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4773 		conn_set_key(conn, key->type, key->pin_len);
4774 
4775 	mgmt_new_link_key(hdev, key, persistent);
4776 
4777 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4778 	 * is set. If it's not set simply remove the key from the kernel
4779 	 * list (we've still notified user space about it but with
4780 	 * store_hint being 0).
4781 	 */
4782 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4783 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4784 		list_del_rcu(&key->list);
4785 		kfree_rcu(key, rcu);
4786 		goto unlock;
4787 	}
4788 
4789 	if (persistent)
4790 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4791 	else
4792 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4793 
4794 unlock:
4795 	hci_dev_unlock(hdev);
4796 }
4797 
4798 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4799 				 struct sk_buff *skb)
4800 {
4801 	struct hci_ev_clock_offset *ev = data;
4802 	struct hci_conn *conn;
4803 
4804 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4805 
4806 	hci_dev_lock(hdev);
4807 
4808 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4809 	if (conn && !ev->status) {
4810 		struct inquiry_entry *ie;
4811 
4812 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4813 		if (ie) {
4814 			ie->data.clock_offset = ev->clock_offset;
4815 			ie->timestamp = jiffies;
4816 		}
4817 	}
4818 
4819 	hci_dev_unlock(hdev);
4820 }
4821 
4822 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4823 				    struct sk_buff *skb)
4824 {
4825 	struct hci_ev_pkt_type_change *ev = data;
4826 	struct hci_conn *conn;
4827 
4828 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4829 
4830 	hci_dev_lock(hdev);
4831 
4832 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4833 	if (conn && !ev->status)
4834 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4835 
4836 	hci_dev_unlock(hdev);
4837 }
4838 
4839 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4840 				   struct sk_buff *skb)
4841 {
4842 	struct hci_ev_pscan_rep_mode *ev = data;
4843 	struct inquiry_entry *ie;
4844 
4845 	bt_dev_dbg(hdev, "");
4846 
4847 	hci_dev_lock(hdev);
4848 
4849 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4850 	if (ie) {
4851 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4852 		ie->timestamp = jiffies;
4853 	}
4854 
4855 	hci_dev_unlock(hdev);
4856 }
4857 
4858 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4859 					     struct sk_buff *skb)
4860 {
4861 	struct hci_ev_inquiry_result_rssi *ev = edata;
4862 	struct inquiry_data data;
4863 	int i;
4864 
4865 	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4866 
4867 	if (!ev->num)
4868 		return;
4869 
4870 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4871 		return;
4872 
4873 	hci_dev_lock(hdev);
4874 
4875 	if (skb->len == array_size(ev->num,
4876 				   sizeof(struct inquiry_info_rssi_pscan))) {
4877 		struct inquiry_info_rssi_pscan *info;
4878 
4879 		for (i = 0; i < ev->num; i++) {
4880 			u32 flags;
4881 
4882 			info = hci_ev_skb_pull(hdev, skb,
4883 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4884 					       sizeof(*info));
4885 			if (!info) {
4886 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4887 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4888 				goto unlock;
4889 			}
4890 
4891 			bacpy(&data.bdaddr, &info->bdaddr);
4892 			data.pscan_rep_mode	= info->pscan_rep_mode;
4893 			data.pscan_period_mode	= info->pscan_period_mode;
4894 			data.pscan_mode		= info->pscan_mode;
4895 			memcpy(data.dev_class, info->dev_class, 3);
4896 			data.clock_offset	= info->clock_offset;
4897 			data.rssi		= info->rssi;
4898 			data.ssp_mode		= 0x00;
4899 
4900 			flags = hci_inquiry_cache_update(hdev, &data, false);
4901 
4902 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4903 					  info->dev_class, info->rssi,
4904 					  flags, NULL, 0, NULL, 0, 0);
4905 		}
4906 	} else if (skb->len == array_size(ev->num,
4907 					  sizeof(struct inquiry_info_rssi))) {
4908 		struct inquiry_info_rssi *info;
4909 
4910 		for (i = 0; i < ev->num; i++) {
4911 			u32 flags;
4912 
4913 			info = hci_ev_skb_pull(hdev, skb,
4914 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4915 					       sizeof(*info));
4916 			if (!info) {
4917 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4918 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4919 				goto unlock;
4920 			}
4921 
4922 			bacpy(&data.bdaddr, &info->bdaddr);
4923 			data.pscan_rep_mode	= info->pscan_rep_mode;
4924 			data.pscan_period_mode	= info->pscan_period_mode;
4925 			data.pscan_mode		= 0x00;
4926 			memcpy(data.dev_class, info->dev_class, 3);
4927 			data.clock_offset	= info->clock_offset;
4928 			data.rssi		= info->rssi;
4929 			data.ssp_mode		= 0x00;
4930 
4931 			flags = hci_inquiry_cache_update(hdev, &data, false);
4932 
4933 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4934 					  info->dev_class, info->rssi,
4935 					  flags, NULL, 0, NULL, 0, 0);
4936 		}
4937 	} else {
4938 		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4939 			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4940 	}
4941 unlock:
4942 	hci_dev_unlock(hdev);
4943 }
4944 
4945 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4946 					struct sk_buff *skb)
4947 {
4948 	struct hci_ev_remote_ext_features *ev = data;
4949 	struct hci_conn *conn;
4950 
4951 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4952 
4953 	hci_dev_lock(hdev);
4954 
4955 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4956 	if (!conn)
4957 		goto unlock;
4958 
4959 	if (ev->page < HCI_MAX_PAGES)
4960 		memcpy(conn->features[ev->page], ev->features, 8);
4961 
4962 	if (!ev->status && ev->page == 0x01) {
4963 		struct inquiry_entry *ie;
4964 
4965 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4966 		if (ie)
4967 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4968 
4969 		if (ev->features[0] & LMP_HOST_SSP) {
4970 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4971 		} else {
4972 			/* It is mandatory by the Bluetooth specification that
4973 			 * Extended Inquiry Results are only used when Secure
4974 			 * Simple Pairing is enabled, but some devices violate
4975 			 * this.
4976 			 *
4977 			 * To make these devices work, the internal SSP
4978 			 * enabled flag needs to be cleared if the remote host
4979 			 * features do not indicate SSP support */
4980 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4981 		}
4982 
4983 		if (ev->features[0] & LMP_HOST_SC)
4984 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4985 	}
4986 
4987 	if (conn->state != BT_CONFIG)
4988 		goto unlock;
4989 
4990 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4991 		struct hci_cp_remote_name_req cp;
4992 		memset(&cp, 0, sizeof(cp));
4993 		bacpy(&cp.bdaddr, &conn->dst);
4994 		cp.pscan_rep_mode = 0x02;
4995 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4996 	} else {
4997 		mgmt_device_connected(hdev, conn, NULL, 0);
4998 	}
4999 
5000 	if (!hci_outgoing_auth_needed(hdev, conn)) {
5001 		conn->state = BT_CONNECTED;
5002 		hci_connect_cfm(conn, ev->status);
5003 		hci_conn_drop(conn);
5004 	}
5005 
5006 unlock:
5007 	hci_dev_unlock(hdev);
5008 }
5009 
5010 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5011 				       struct sk_buff *skb)
5012 {
5013 	struct hci_ev_sync_conn_complete *ev = data;
5014 	struct hci_conn *conn;
5015 	u8 status = ev->status;
5016 
5017 	switch (ev->link_type) {
5018 	case SCO_LINK:
5019 	case ESCO_LINK:
5020 		break;
5021 	default:
5022 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5023 		 * for HCI_Synchronous_Connection_Complete is limited to
5024 		 * either SCO or eSCO
5025 		 */
5026 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5027 		return;
5028 	}
5029 
5030 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
5031 
5032 	hci_dev_lock(hdev);
5033 	hci_store_wake_reason(hdev, &ev->bdaddr, BDADDR_BREDR);
5034 
5035 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5036 	if (!conn) {
5037 		if (ev->link_type == ESCO_LINK)
5038 			goto unlock;
5039 
5040 		/* When the link type in the event indicates SCO connection
5041 		 * and lookup of the connection object fails, then check
5042 		 * if an eSCO connection object exists.
5043 		 *
5044 		 * The core limits the synchronous connections to either
5045 		 * SCO or eSCO. The eSCO connection is preferred and tried
5046 		 * to be setup first and until successfully established,
5047 		 * the link type will be hinted as eSCO.
5048 		 */
5049 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5050 		if (!conn)
5051 			goto unlock;
5052 	}
5053 
5054 	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5055 	 * Processing it more than once per connection can corrupt kernel memory.
5056 	 *
5057 	 * As the connection handle is set here for the first time, it indicates
5058 	 * whether the connection is already set up.
5059 	 */
5060 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5061 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5062 		goto unlock;
5063 	}
5064 
5065 	switch (status) {
5066 	case 0x00:
5067 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5068 		if (status) {
5069 			conn->state = BT_CLOSED;
5070 			break;
5071 		}
5072 
5073 		conn->state  = BT_CONNECTED;
5074 		conn->type   = ev->link_type;
5075 
5076 		hci_debugfs_create_conn(conn);
5077 		hci_conn_add_sysfs(conn);
5078 		break;
5079 
5080 	case 0x10:	/* Connection Accept Timeout */
5081 	case 0x0d:	/* Connection Rejected due to Limited Resources */
5082 	case 0x11:	/* Unsupported Feature or Parameter Value */
5083 	case 0x1c:	/* SCO interval rejected */
5084 	case 0x1a:	/* Unsupported Remote Feature */
5085 	case 0x1e:	/* Invalid LMP Parameters */
5086 	case 0x1f:	/* Unspecified error */
5087 	case 0x20:	/* Unsupported LMP Parameter value */
5088 		if (conn->out) {
5089 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5090 					(hdev->esco_type & EDR_ESCO_MASK);
5091 			if (hci_setup_sync(conn, conn->parent->handle))
5092 				goto unlock;
5093 		}
5094 		fallthrough;
5095 
5096 	default:
5097 		conn->state = BT_CLOSED;
5098 		break;
5099 	}
5100 
5101 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5102 	/* Notify only in case of SCO over HCI transport data path which
5103 	 * is zero and non-zero value shall be non-HCI transport data path
5104 	 */
5105 	if (conn->codec.data_path == 0 && hdev->notify) {
5106 		switch (ev->air_mode) {
5107 		case 0x02:
5108 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5109 			break;
5110 		case 0x03:
5111 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5112 			break;
5113 		}
5114 	}
5115 
5116 	hci_connect_cfm(conn, status);
5117 	if (status)
5118 		hci_conn_del(conn);
5119 
5120 unlock:
5121 	hci_dev_unlock(hdev);
5122 }
5123 
5124 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5125 {
5126 	size_t parsed = 0;
5127 
5128 	while (parsed < eir_len) {
5129 		u8 field_len = eir[0];
5130 
5131 		if (field_len == 0)
5132 			return parsed;
5133 
5134 		parsed += field_len + 1;
5135 		eir += field_len + 1;
5136 	}
5137 
5138 	return eir_len;
5139 }
5140 
5141 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5142 					    struct sk_buff *skb)
5143 {
5144 	struct hci_ev_ext_inquiry_result *ev = edata;
5145 	struct inquiry_data data;
5146 	size_t eir_len;
5147 	int i;
5148 
5149 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5150 			     flex_array_size(ev, info, ev->num)))
5151 		return;
5152 
5153 	bt_dev_dbg(hdev, "num %d", ev->num);
5154 
5155 	if (!ev->num)
5156 		return;
5157 
5158 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5159 		return;
5160 
5161 	hci_dev_lock(hdev);
5162 
5163 	for (i = 0; i < ev->num; i++) {
5164 		struct extended_inquiry_info *info = &ev->info[i];
5165 		u32 flags;
5166 		bool name_known;
5167 
5168 		bacpy(&data.bdaddr, &info->bdaddr);
5169 		data.pscan_rep_mode	= info->pscan_rep_mode;
5170 		data.pscan_period_mode	= info->pscan_period_mode;
5171 		data.pscan_mode		= 0x00;
5172 		memcpy(data.dev_class, info->dev_class, 3);
5173 		data.clock_offset	= info->clock_offset;
5174 		data.rssi		= info->rssi;
5175 		data.ssp_mode		= 0x01;
5176 
5177 		if (hci_dev_test_flag(hdev, HCI_MGMT))
5178 			name_known = eir_get_data(info->data,
5179 						  sizeof(info->data),
5180 						  EIR_NAME_COMPLETE, NULL);
5181 		else
5182 			name_known = true;
5183 
5184 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5185 
5186 		eir_len = eir_get_length(info->data, sizeof(info->data));
5187 
5188 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5189 				  info->dev_class, info->rssi,
5190 				  flags, info->data, eir_len, NULL, 0, 0);
5191 	}
5192 
5193 	hci_dev_unlock(hdev);
5194 }
5195 
5196 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5197 					 struct sk_buff *skb)
5198 {
5199 	struct hci_ev_key_refresh_complete *ev = data;
5200 	struct hci_conn *conn;
5201 
5202 	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5203 		   __le16_to_cpu(ev->handle));
5204 
5205 	hci_dev_lock(hdev);
5206 
5207 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5208 	if (!conn)
5209 		goto unlock;
5210 
5211 	/* For BR/EDR the necessary steps are taken through the
5212 	 * auth_complete event.
5213 	 */
5214 	if (conn->type != LE_LINK)
5215 		goto unlock;
5216 
5217 	if (!ev->status)
5218 		conn->sec_level = conn->pending_sec_level;
5219 
5220 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5221 
5222 	if (ev->status && conn->state == BT_CONNECTED) {
5223 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5224 		hci_conn_drop(conn);
5225 		goto unlock;
5226 	}
5227 
5228 	if (conn->state == BT_CONFIG) {
5229 		if (!ev->status)
5230 			conn->state = BT_CONNECTED;
5231 
5232 		hci_connect_cfm(conn, ev->status);
5233 		hci_conn_drop(conn);
5234 	} else {
5235 		hci_auth_cfm(conn, ev->status);
5236 
5237 		hci_conn_hold(conn);
5238 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5239 		hci_conn_drop(conn);
5240 	}
5241 
5242 unlock:
5243 	hci_dev_unlock(hdev);
5244 }
5245 
5246 static u8 hci_get_auth_req(struct hci_conn *conn)
5247 {
5248 	/* If remote requests no-bonding follow that lead */
5249 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5250 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5251 		return conn->remote_auth | (conn->auth_type & 0x01);
5252 
5253 	/* If both remote and local have enough IO capabilities, require
5254 	 * MITM protection
5255 	 */
5256 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5257 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5258 		return conn->remote_auth | 0x01;
5259 
5260 	/* No MITM protection possible so ignore remote requirement */
5261 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5262 }
5263 
5264 static u8 bredr_oob_data_present(struct hci_conn *conn)
5265 {
5266 	struct hci_dev *hdev = conn->hdev;
5267 	struct oob_data *data;
5268 
5269 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5270 	if (!data)
5271 		return 0x00;
5272 
5273 	if (bredr_sc_enabled(hdev)) {
5274 		/* When Secure Connections is enabled, then just
5275 		 * return the present value stored with the OOB
5276 		 * data. The stored value contains the right present
5277 		 * information. However it can only be trusted when
5278 		 * not in Secure Connection Only mode.
5279 		 */
5280 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5281 			return data->present;
5282 
5283 		/* When Secure Connections Only mode is enabled, then
5284 		 * the P-256 values are required. If they are not
5285 		 * available, then do not declare that OOB data is
5286 		 * present.
5287 		 */
5288 		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5289 		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5290 			return 0x00;
5291 
5292 		return 0x02;
5293 	}
5294 
5295 	/* When Secure Connections is not enabled or actually
5296 	 * not supported by the hardware, then check that if
5297 	 * P-192 data values are present.
5298 	 */
5299 	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5300 	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5301 		return 0x00;
5302 
5303 	return 0x01;
5304 }
5305 
5306 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5307 				    struct sk_buff *skb)
5308 {
5309 	struct hci_ev_io_capa_request *ev = data;
5310 	struct hci_conn *conn;
5311 
5312 	bt_dev_dbg(hdev, "");
5313 
5314 	hci_dev_lock(hdev);
5315 
5316 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5317 	if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5318 		goto unlock;
5319 
5320 	/* Assume remote supports SSP since it has triggered this event */
5321 	set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5322 
5323 	hci_conn_hold(conn);
5324 
5325 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5326 		goto unlock;
5327 
5328 	/* Allow pairing if we're pairable, the initiators of the
5329 	 * pairing or if the remote is not requesting bonding.
5330 	 */
5331 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5332 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5333 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5334 		struct hci_cp_io_capability_reply cp;
5335 
5336 		bacpy(&cp.bdaddr, &ev->bdaddr);
5337 		/* Change the IO capability from KeyboardDisplay
5338 		 * to DisplayYesNo as it is not supported by BT spec. */
5339 		cp.capability = (conn->io_capability == 0x04) ?
5340 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5341 
5342 		/* If we are initiators, there is no remote information yet */
5343 		if (conn->remote_auth == 0xff) {
5344 			/* Request MITM protection if our IO caps allow it
5345 			 * except for the no-bonding case.
5346 			 */
5347 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5348 			    conn->auth_type != HCI_AT_NO_BONDING)
5349 				conn->auth_type |= 0x01;
5350 		} else {
5351 			conn->auth_type = hci_get_auth_req(conn);
5352 		}
5353 
5354 		/* If we're not bondable, force one of the non-bondable
5355 		 * authentication requirement values.
5356 		 */
5357 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5358 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5359 
5360 		cp.authentication = conn->auth_type;
5361 		cp.oob_data = bredr_oob_data_present(conn);
5362 
5363 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5364 			     sizeof(cp), &cp);
5365 	} else {
5366 		struct hci_cp_io_capability_neg_reply cp;
5367 
5368 		bacpy(&cp.bdaddr, &ev->bdaddr);
5369 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5370 
5371 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5372 			     sizeof(cp), &cp);
5373 	}
5374 
5375 unlock:
5376 	hci_dev_unlock(hdev);
5377 }
5378 
5379 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5380 				  struct sk_buff *skb)
5381 {
5382 	struct hci_ev_io_capa_reply *ev = data;
5383 	struct hci_conn *conn;
5384 
5385 	bt_dev_dbg(hdev, "");
5386 
5387 	hci_dev_lock(hdev);
5388 
5389 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5390 	if (!conn)
5391 		goto unlock;
5392 
5393 	conn->remote_cap = ev->capability;
5394 	conn->remote_auth = ev->authentication;
5395 
5396 unlock:
5397 	hci_dev_unlock(hdev);
5398 }
5399 
5400 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5401 					 struct sk_buff *skb)
5402 {
5403 	struct hci_ev_user_confirm_req *ev = data;
5404 	int loc_mitm, rem_mitm, confirm_hint = 0;
5405 	struct hci_conn *conn;
5406 
5407 	bt_dev_dbg(hdev, "");
5408 
5409 	hci_dev_lock(hdev);
5410 
5411 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5412 		goto unlock;
5413 
5414 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5415 	if (!conn)
5416 		goto unlock;
5417 
5418 	loc_mitm = (conn->auth_type & 0x01);
5419 	rem_mitm = (conn->remote_auth & 0x01);
5420 
5421 	/* If we require MITM but the remote device can't provide that
5422 	 * (it has NoInputNoOutput) then reject the confirmation
5423 	 * request. We check the security level here since it doesn't
5424 	 * necessarily match conn->auth_type.
5425 	 */
5426 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5427 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5428 		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5429 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5430 			     sizeof(ev->bdaddr), &ev->bdaddr);
5431 		goto unlock;
5432 	}
5433 
5434 	/* If no side requires MITM protection; use JUST_CFM method */
5435 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5436 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5437 
5438 		/* If we're not the initiator of request authorization and the
5439 		 * local IO capability is not NoInputNoOutput, use JUST_WORKS
5440 		 * method (mgmt_user_confirm with confirm_hint set to 1).
5441 		 */
5442 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5443 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) {
5444 			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5445 			confirm_hint = 1;
5446 			goto confirm;
5447 		}
5448 
5449 		/* If there already exists link key in local host, leave the
5450 		 * decision to user space since the remote device could be
5451 		 * legitimate or malicious.
5452 		 */
5453 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5454 			bt_dev_dbg(hdev, "Local host already has link key");
5455 			confirm_hint = 1;
5456 			goto confirm;
5457 		}
5458 
5459 		BT_DBG("Auto-accept of user confirmation with %ums delay",
5460 		       hdev->auto_accept_delay);
5461 
5462 		if (hdev->auto_accept_delay > 0) {
5463 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5464 			queue_delayed_work(conn->hdev->workqueue,
5465 					   &conn->auto_accept_work, delay);
5466 			goto unlock;
5467 		}
5468 
5469 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5470 			     sizeof(ev->bdaddr), &ev->bdaddr);
5471 		goto unlock;
5472 	}
5473 
5474 confirm:
5475 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5476 				  le32_to_cpu(ev->passkey), confirm_hint);
5477 
5478 unlock:
5479 	hci_dev_unlock(hdev);
5480 }
5481 
5482 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5483 					 struct sk_buff *skb)
5484 {
5485 	struct hci_ev_user_passkey_req *ev = data;
5486 
5487 	bt_dev_dbg(hdev, "");
5488 
5489 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5490 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5491 }
5492 
5493 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5494 					struct sk_buff *skb)
5495 {
5496 	struct hci_ev_user_passkey_notify *ev = data;
5497 	struct hci_conn *conn;
5498 
5499 	bt_dev_dbg(hdev, "");
5500 
5501 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5502 	if (!conn)
5503 		return;
5504 
5505 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5506 	conn->passkey_entered = 0;
5507 
5508 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5509 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5510 					 conn->dst_type, conn->passkey_notify,
5511 					 conn->passkey_entered);
5512 }
5513 
5514 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5515 				    struct sk_buff *skb)
5516 {
5517 	struct hci_ev_keypress_notify *ev = data;
5518 	struct hci_conn *conn;
5519 
5520 	bt_dev_dbg(hdev, "");
5521 
5522 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5523 	if (!conn)
5524 		return;
5525 
5526 	switch (ev->type) {
5527 	case HCI_KEYPRESS_STARTED:
5528 		conn->passkey_entered = 0;
5529 		return;
5530 
5531 	case HCI_KEYPRESS_ENTERED:
5532 		conn->passkey_entered++;
5533 		break;
5534 
5535 	case HCI_KEYPRESS_ERASED:
5536 		conn->passkey_entered--;
5537 		break;
5538 
5539 	case HCI_KEYPRESS_CLEARED:
5540 		conn->passkey_entered = 0;
5541 		break;
5542 
5543 	case HCI_KEYPRESS_COMPLETED:
5544 		return;
5545 	}
5546 
5547 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5548 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5549 					 conn->dst_type, conn->passkey_notify,
5550 					 conn->passkey_entered);
5551 }
5552 
5553 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5554 					 struct sk_buff *skb)
5555 {
5556 	struct hci_ev_simple_pair_complete *ev = data;
5557 	struct hci_conn *conn;
5558 
5559 	bt_dev_dbg(hdev, "");
5560 
5561 	hci_dev_lock(hdev);
5562 
5563 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5564 	if (!conn || !hci_conn_ssp_enabled(conn))
5565 		goto unlock;
5566 
5567 	/* Reset the authentication requirement to unknown */
5568 	conn->remote_auth = 0xff;
5569 
5570 	/* To avoid duplicate auth_failed events to user space we check
5571 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5572 	 * initiated the authentication. A traditional auth_complete
5573 	 * event gets always produced as initiator and is also mapped to
5574 	 * the mgmt_auth_failed event */
5575 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5576 		mgmt_auth_failed(conn, ev->status);
5577 
5578 	hci_conn_drop(conn);
5579 
5580 unlock:
5581 	hci_dev_unlock(hdev);
5582 }
5583 
5584 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5585 					 struct sk_buff *skb)
5586 {
5587 	struct hci_ev_remote_host_features *ev = data;
5588 	struct inquiry_entry *ie;
5589 	struct hci_conn *conn;
5590 
5591 	bt_dev_dbg(hdev, "");
5592 
5593 	hci_dev_lock(hdev);
5594 
5595 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5596 	if (conn)
5597 		memcpy(conn->features[1], ev->features, 8);
5598 
5599 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5600 	if (ie)
5601 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5602 
5603 	hci_dev_unlock(hdev);
5604 }
5605 
5606 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5607 					    struct sk_buff *skb)
5608 {
5609 	struct hci_ev_remote_oob_data_request *ev = edata;
5610 	struct oob_data *data;
5611 
5612 	bt_dev_dbg(hdev, "");
5613 
5614 	hci_dev_lock(hdev);
5615 
5616 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5617 		goto unlock;
5618 
5619 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5620 	if (!data) {
5621 		struct hci_cp_remote_oob_data_neg_reply cp;
5622 
5623 		bacpy(&cp.bdaddr, &ev->bdaddr);
5624 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5625 			     sizeof(cp), &cp);
5626 		goto unlock;
5627 	}
5628 
5629 	if (bredr_sc_enabled(hdev)) {
5630 		struct hci_cp_remote_oob_ext_data_reply cp;
5631 
5632 		bacpy(&cp.bdaddr, &ev->bdaddr);
5633 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5634 			memset(cp.hash192, 0, sizeof(cp.hash192));
5635 			memset(cp.rand192, 0, sizeof(cp.rand192));
5636 		} else {
5637 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5638 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5639 		}
5640 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5641 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5642 
5643 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5644 			     sizeof(cp), &cp);
5645 	} else {
5646 		struct hci_cp_remote_oob_data_reply cp;
5647 
5648 		bacpy(&cp.bdaddr, &ev->bdaddr);
5649 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5650 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5651 
5652 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5653 			     sizeof(cp), &cp);
5654 	}
5655 
5656 unlock:
5657 	hci_dev_unlock(hdev);
5658 }
5659 
5660 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5661 				u8 bdaddr_type, bdaddr_t *local_rpa)
5662 {
5663 	if (conn->out) {
5664 		conn->dst_type = bdaddr_type;
5665 		conn->resp_addr_type = bdaddr_type;
5666 		bacpy(&conn->resp_addr, bdaddr);
5667 
5668 		/* Check if the controller has set a Local RPA then it must be
5669 		 * used instead or hdev->rpa.
5670 		 */
5671 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5672 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5673 			bacpy(&conn->init_addr, local_rpa);
5674 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5675 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5676 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5677 		} else {
5678 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5679 						  &conn->init_addr_type);
5680 		}
5681 	} else {
5682 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5683 		/* Check if the controller has set a Local RPA then it must be
5684 		 * used instead or hdev->rpa.
5685 		 */
5686 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5687 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5688 			bacpy(&conn->resp_addr, local_rpa);
5689 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5690 			/* In case of ext adv, resp_addr will be updated in
5691 			 * Adv Terminated event.
5692 			 */
5693 			if (!ext_adv_capable(conn->hdev))
5694 				bacpy(&conn->resp_addr,
5695 				      &conn->hdev->random_addr);
5696 		} else {
5697 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5698 		}
5699 
5700 		conn->init_addr_type = bdaddr_type;
5701 		bacpy(&conn->init_addr, bdaddr);
5702 
5703 		/* For incoming connections, set the default minimum
5704 		 * and maximum connection interval. They will be used
5705 		 * to check if the parameters are in range and if not
5706 		 * trigger the connection update procedure.
5707 		 */
5708 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5709 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5710 	}
5711 }
5712 
5713 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5714 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5715 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5716 				 u16 interval, u16 latency,
5717 				 u16 supervision_timeout)
5718 {
5719 	struct hci_conn_params *params;
5720 	struct hci_conn *conn;
5721 	struct smp_irk *irk;
5722 	u8 addr_type;
5723 	int err;
5724 
5725 	hci_dev_lock(hdev);
5726 	hci_store_wake_reason(hdev, bdaddr, bdaddr_type);
5727 
5728 	/* All controllers implicitly stop advertising in the event of a
5729 	 * connection, so ensure that the state bit is cleared.
5730 	 */
5731 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5732 
5733 	/* Check for existing connection:
5734 	 *
5735 	 * 1. If it doesn't exist then use the role to create a new object.
5736 	 * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
5737 	 *    of initiator/master role since there could be a collision where
5738 	 *    either side is attempting to connect or something like a fuzzing
5739 	 *    testing is trying to play tricks to destroy the hcon object before
5740 	 *    it even attempts to connect (e.g. hcon->state == BT_OPEN).
5741 	 */
5742 	conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr);
5743 	if (!conn ||
5744 	    (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
5745 		/* In case of error status and there is no connection pending
5746 		 * just unlock as there is nothing to cleanup.
5747 		 */
5748 		if (status)
5749 			goto unlock;
5750 
5751 		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, bdaddr_type,
5752 					  role);
5753 		if (IS_ERR(conn)) {
5754 			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5755 			goto unlock;
5756 		}
5757 
5758 		/* If we didn't have a hci_conn object previously
5759 		 * but we're in central role this must be something
5760 		 * initiated using an accept list. Since accept list based
5761 		 * connections are not "first class citizens" we don't
5762 		 * have full tracking of them. Therefore, we go ahead
5763 		 * with a "best effort" approach of determining the
5764 		 * initiator address based on the HCI_PRIVACY flag.
5765 		 */
5766 		if (conn->out) {
5767 			conn->resp_addr_type = bdaddr_type;
5768 			bacpy(&conn->resp_addr, bdaddr);
5769 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5770 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5771 				bacpy(&conn->init_addr, &hdev->rpa);
5772 			} else {
5773 				hci_copy_identity_address(hdev,
5774 							  &conn->init_addr,
5775 							  &conn->init_addr_type);
5776 			}
5777 		}
5778 	} else {
5779 		cancel_delayed_work(&conn->le_conn_timeout);
5780 	}
5781 
5782 	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5783 	 * Processing it more than once per connection can corrupt kernel memory.
5784 	 *
5785 	 * As the connection handle is set here for the first time, it indicates
5786 	 * whether the connection is already set up.
5787 	 */
5788 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5789 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5790 		goto unlock;
5791 	}
5792 
5793 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5794 
5795 	/* Lookup the identity address from the stored connection
5796 	 * address and address type.
5797 	 *
5798 	 * When establishing connections to an identity address, the
5799 	 * connection procedure will store the resolvable random
5800 	 * address first. Now if it can be converted back into the
5801 	 * identity address, start using the identity address from
5802 	 * now on.
5803 	 */
5804 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5805 	if (irk) {
5806 		bacpy(&conn->dst, &irk->bdaddr);
5807 		conn->dst_type = irk->addr_type;
5808 	}
5809 
5810 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5811 
5812 	/* All connection failure handling is taken care of by the
5813 	 * hci_conn_failed function which is triggered by the HCI
5814 	 * request completion callbacks used for connecting.
5815 	 */
5816 	if (status || hci_conn_set_handle(conn, handle))
5817 		goto unlock;
5818 
5819 	/* Drop the connection if it has been aborted */
5820 	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5821 		hci_conn_drop(conn);
5822 		goto unlock;
5823 	}
5824 
5825 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5826 		addr_type = BDADDR_LE_PUBLIC;
5827 	else
5828 		addr_type = BDADDR_LE_RANDOM;
5829 
5830 	/* Drop the connection if the device is blocked */
5831 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5832 		hci_conn_drop(conn);
5833 		goto unlock;
5834 	}
5835 
5836 	mgmt_device_connected(hdev, conn, NULL, 0);
5837 
5838 	conn->sec_level = BT_SECURITY_LOW;
5839 	conn->state = BT_CONFIG;
5840 
5841 	/* Store current advertising instance as connection advertising instance
5842 	 * when software rotation is in use so it can be re-enabled when
5843 	 * disconnected.
5844 	 */
5845 	if (!ext_adv_capable(hdev))
5846 		conn->adv_instance = hdev->cur_adv_instance;
5847 
5848 	conn->le_conn_interval = interval;
5849 	conn->le_conn_latency = latency;
5850 	conn->le_supv_timeout = supervision_timeout;
5851 
5852 	hci_debugfs_create_conn(conn);
5853 	hci_conn_add_sysfs(conn);
5854 
5855 	err = hci_le_read_remote_features(conn);
5856 	if (err) {
5857 		conn->state = BT_CONNECTED;
5858 		hci_connect_cfm(conn, status);
5859 	}
5860 
5861 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5862 					   conn->dst_type);
5863 	if (params) {
5864 		hci_pend_le_list_del_init(params);
5865 		if (params->conn) {
5866 			hci_conn_drop(params->conn);
5867 			hci_conn_put(params->conn);
5868 			params->conn = NULL;
5869 		}
5870 	}
5871 
5872 unlock:
5873 	hci_update_passive_scan(hdev);
5874 	hci_dev_unlock(hdev);
5875 }
5876 
5877 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5878 				     struct sk_buff *skb)
5879 {
5880 	struct hci_ev_le_conn_complete *ev = data;
5881 
5882 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5883 
5884 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5885 			     NULL, ev->role, le16_to_cpu(ev->handle),
5886 			     le16_to_cpu(ev->interval),
5887 			     le16_to_cpu(ev->latency),
5888 			     le16_to_cpu(ev->supervision_timeout));
5889 }
5890 
5891 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5892 					 struct sk_buff *skb)
5893 {
5894 	struct hci_ev_le_enh_conn_complete *ev = data;
5895 
5896 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5897 
5898 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5899 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5900 			     le16_to_cpu(ev->interval),
5901 			     le16_to_cpu(ev->latency),
5902 			     le16_to_cpu(ev->supervision_timeout));
5903 }
5904 
5905 static void hci_le_pa_sync_lost_evt(struct hci_dev *hdev, void *data,
5906 				    struct sk_buff *skb)
5907 {
5908 	struct hci_ev_le_pa_sync_lost *ev = data;
5909 	u16 handle = le16_to_cpu(ev->handle);
5910 	struct hci_conn *conn;
5911 
5912 	bt_dev_dbg(hdev, "sync handle 0x%4.4x", handle);
5913 
5914 	hci_dev_lock(hdev);
5915 
5916 	/* Delete the pa sync connection */
5917 	conn = hci_conn_hash_lookup_pa_sync_handle(hdev, handle);
5918 	if (conn) {
5919 		clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
5920 		clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
5921 		hci_disconn_cfm(conn, HCI_ERROR_REMOTE_USER_TERM);
5922 		hci_conn_del(conn);
5923 	}
5924 
5925 	hci_dev_unlock(hdev);
5926 }
5927 
5928 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5929 				    struct sk_buff *skb)
5930 {
5931 	struct hci_evt_le_ext_adv_set_term *ev = data;
5932 	struct hci_conn *conn;
5933 	struct adv_info *adv, *n;
5934 
5935 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5936 
5937 	/* The Bluetooth Core 5.3 specification clearly states that this event
5938 	 * shall not be sent when the Host disables the advertising set. So in
5939 	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5940 	 *
5941 	 * When the Host disables an advertising set, all cleanup is done via
5942 	 * its command callback and not needed to be duplicated here.
5943 	 */
5944 	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5945 		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5946 		return;
5947 	}
5948 
5949 	hci_dev_lock(hdev);
5950 
5951 	adv = hci_find_adv_instance(hdev, ev->handle);
5952 
5953 	if (ev->status) {
5954 		if (!adv)
5955 			goto unlock;
5956 
5957 		/* Remove advertising as it has been terminated */
5958 		hci_remove_adv_instance(hdev, ev->handle);
5959 		mgmt_advertising_removed(NULL, hdev, ev->handle);
5960 
5961 		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5962 			if (adv->enabled)
5963 				goto unlock;
5964 		}
5965 
5966 		/* We are no longer advertising, clear HCI_LE_ADV */
5967 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
5968 		goto unlock;
5969 	}
5970 
5971 	if (adv)
5972 		adv->enabled = false;
5973 
5974 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5975 	if (conn) {
5976 		/* Store handle in the connection so the correct advertising
5977 		 * instance can be re-enabled when disconnected.
5978 		 */
5979 		conn->adv_instance = ev->handle;
5980 
5981 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5982 		    bacmp(&conn->resp_addr, BDADDR_ANY))
5983 			goto unlock;
5984 
5985 		if (!ev->handle) {
5986 			bacpy(&conn->resp_addr, &hdev->random_addr);
5987 			goto unlock;
5988 		}
5989 
5990 		if (adv)
5991 			bacpy(&conn->resp_addr, &adv->random_addr);
5992 	}
5993 
5994 unlock:
5995 	hci_dev_unlock(hdev);
5996 }
5997 
5998 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
5999 {
6000 	struct hci_cp_le_pa_term_sync cp;
6001 
6002 	memset(&cp, 0, sizeof(cp));
6003 	cp.handle = handle;
6004 
6005 	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6006 }
6007 
6008 static void hci_le_past_received_evt(struct hci_dev *hdev, void *data,
6009 				     struct sk_buff *skb)
6010 {
6011 	struct hci_ev_le_past_received *ev = data;
6012 	int mask = hdev->link_mode;
6013 	__u8 flags = 0;
6014 	struct hci_conn *pa_sync, *conn;
6015 
6016 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6017 
6018 	hci_dev_lock(hdev);
6019 	hci_store_wake_reason(hdev, &ev->bdaddr, ev->bdaddr_type);
6020 
6021 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6022 
6023 	conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6024 	if (!conn) {
6025 		bt_dev_err(hdev,
6026 			   "Unable to find connection for dst %pMR sid 0x%2.2x",
6027 			   &ev->bdaddr, ev->sid);
6028 		goto unlock;
6029 	}
6030 
6031 	conn->sync_handle = le16_to_cpu(ev->sync_handle);
6032 	conn->sid = HCI_SID_INVALID;
6033 
6034 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
6035 				      &flags);
6036 	if (!(mask & HCI_LM_ACCEPT)) {
6037 		hci_le_pa_term_sync(hdev, ev->sync_handle);
6038 		goto unlock;
6039 	}
6040 
6041 	if (!(flags & HCI_PROTO_DEFER))
6042 		goto unlock;
6043 
6044 	/* Add connection to indicate PA sync event */
6045 	pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, 0,
6046 				     HCI_ROLE_SLAVE);
6047 
6048 	if (IS_ERR(pa_sync))
6049 		goto unlock;
6050 
6051 	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
6052 
6053 	if (ev->status) {
6054 		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6055 
6056 		/* Notify iso layer */
6057 		hci_connect_cfm(pa_sync, ev->status);
6058 	}
6059 
6060 unlock:
6061 	hci_dev_unlock(hdev);
6062 }
6063 
6064 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6065 					    struct sk_buff *skb)
6066 {
6067 	struct hci_ev_le_conn_update_complete *ev = data;
6068 	struct hci_conn *conn;
6069 
6070 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6071 
6072 	if (ev->status)
6073 		return;
6074 
6075 	hci_dev_lock(hdev);
6076 
6077 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6078 	if (conn) {
6079 		conn->le_conn_interval = le16_to_cpu(ev->interval);
6080 		conn->le_conn_latency = le16_to_cpu(ev->latency);
6081 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6082 	}
6083 
6084 	hci_dev_unlock(hdev);
6085 }
6086 
6087 /* This function requires the caller holds hdev->lock */
6088 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6089 					      bdaddr_t *addr,
6090 					      u8 addr_type, bool addr_resolved,
6091 					      u8 adv_type, u8 phy, u8 sec_phy)
6092 {
6093 	struct hci_conn *conn;
6094 	struct hci_conn_params *params;
6095 
6096 	/* If the event is not connectable don't proceed further */
6097 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6098 		return NULL;
6099 
6100 	/* Ignore if the device is blocked or hdev is suspended */
6101 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6102 	    hdev->suspended)
6103 		return NULL;
6104 
6105 	/* Most controller will fail if we try to create new connections
6106 	 * while we have an existing one in peripheral role.
6107 	 */
6108 	if (hdev->conn_hash.le_num_peripheral > 0 &&
6109 	    (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) ||
6110 	     !(hdev->le_states[3] & 0x10)))
6111 		return NULL;
6112 
6113 	/* If we're not connectable only connect devices that we have in
6114 	 * our pend_le_conns list.
6115 	 */
6116 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6117 					   addr_type);
6118 	if (!params)
6119 		return NULL;
6120 
6121 	if (!params->explicit_connect) {
6122 		switch (params->auto_connect) {
6123 		case HCI_AUTO_CONN_DIRECT:
6124 			/* Only devices advertising with ADV_DIRECT_IND are
6125 			 * triggering a connection attempt. This is allowing
6126 			 * incoming connections from peripheral devices.
6127 			 */
6128 			if (adv_type != LE_ADV_DIRECT_IND)
6129 				return NULL;
6130 			break;
6131 		case HCI_AUTO_CONN_ALWAYS:
6132 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
6133 			 * are triggering a connection attempt. This means
6134 			 * that incoming connections from peripheral device are
6135 			 * accepted and also outgoing connections to peripheral
6136 			 * devices are established when found.
6137 			 */
6138 			break;
6139 		default:
6140 			return NULL;
6141 		}
6142 	}
6143 
6144 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6145 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6146 			      HCI_ROLE_MASTER, phy, sec_phy);
6147 	if (!IS_ERR(conn)) {
6148 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6149 		 * by higher layer that tried to connect, if no then
6150 		 * store the pointer since we don't really have any
6151 		 * other owner of the object besides the params that
6152 		 * triggered it. This way we can abort the connection if
6153 		 * the parameters get removed and keep the reference
6154 		 * count consistent once the connection is established.
6155 		 */
6156 
6157 		if (!params->explicit_connect)
6158 			params->conn = hci_conn_get(conn);
6159 
6160 		return conn;
6161 	}
6162 
6163 	switch (PTR_ERR(conn)) {
6164 	case -EBUSY:
6165 		/* If hci_connect() returns -EBUSY it means there is already
6166 		 * an LE connection attempt going on. Since controllers don't
6167 		 * support more than one connection attempt at the time, we
6168 		 * don't consider this an error case.
6169 		 */
6170 		break;
6171 	default:
6172 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6173 		return NULL;
6174 	}
6175 
6176 	return NULL;
6177 }
6178 
6179 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6180 			       u8 bdaddr_type, bdaddr_t *direct_addr,
6181 			       u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
6182 			       u8 *data, u8 len, bool ext_adv, bool ctl_time,
6183 			       u64 instant)
6184 {
6185 	struct discovery_state *d = &hdev->discovery;
6186 	struct smp_irk *irk;
6187 	struct hci_conn *conn;
6188 	bool match, bdaddr_resolved;
6189 	u32 flags;
6190 	u8 *ptr;
6191 
6192 	switch (type) {
6193 	case LE_ADV_IND:
6194 	case LE_ADV_DIRECT_IND:
6195 	case LE_ADV_SCAN_IND:
6196 	case LE_ADV_NONCONN_IND:
6197 	case LE_ADV_SCAN_RSP:
6198 		break;
6199 	default:
6200 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6201 				       "type: 0x%02x", type);
6202 		return;
6203 	}
6204 
6205 	if (len > max_adv_len(hdev)) {
6206 		bt_dev_err_ratelimited(hdev,
6207 				       "adv larger than maximum supported");
6208 		return;
6209 	}
6210 
6211 	/* Find the end of the data in case the report contains padded zero
6212 	 * bytes at the end causing an invalid length value.
6213 	 *
6214 	 * When data is NULL, len is 0 so there is no need for extra ptr
6215 	 * check as 'ptr < data + 0' is already false in such case.
6216 	 */
6217 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6218 		if (ptr + 1 + *ptr > data + len)
6219 			break;
6220 	}
6221 
6222 	/* Adjust for actual length. This handles the case when remote
6223 	 * device is advertising with incorrect data length.
6224 	 */
6225 	len = ptr - data;
6226 
6227 	/* If the direct address is present, then this report is from
6228 	 * a LE Direct Advertising Report event. In that case it is
6229 	 * important to see if the address is matching the local
6230 	 * controller address.
6231 	 *
6232 	 * If local privacy is not enable the controller shall not be
6233 	 * generating such event since according to its documentation it is only
6234 	 * valid for filter_policy 0x02 and 0x03, but the fact that it did
6235 	 * generate LE Direct Advertising Report means it is probably broken and
6236 	 * won't generate any other event which can potentially break
6237 	 * auto-connect logic so in case local privacy is not enable this
6238 	 * ignores the direct_addr so it works as a regular report.
6239 	 */
6240 	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr &&
6241 	    hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6242 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6243 						  &bdaddr_resolved);
6244 
6245 		/* Only resolvable random addresses are valid for these
6246 		 * kind of reports and others can be ignored.
6247 		 */
6248 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6249 			return;
6250 
6251 		/* If the local IRK of the controller does not match
6252 		 * with the resolvable random address provided, then
6253 		 * this report can be ignored.
6254 		 */
6255 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6256 			return;
6257 	}
6258 
6259 	/* Check if we need to convert to identity address */
6260 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6261 	if (irk) {
6262 		bdaddr = &irk->bdaddr;
6263 		bdaddr_type = irk->addr_type;
6264 	}
6265 
6266 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6267 
6268 	/* Check if we have been requested to connect to this device.
6269 	 *
6270 	 * direct_addr is set only for directed advertising reports (it is NULL
6271 	 * for advertising reports) and is already verified to be RPA above.
6272 	 */
6273 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6274 				     type, phy, sec_phy);
6275 	if (!ext_adv && conn && type == LE_ADV_IND &&
6276 	    len <= max_adv_len(hdev)) {
6277 		/* Store report for later inclusion by
6278 		 * mgmt_device_connected
6279 		 */
6280 		memcpy(conn->le_adv_data, data, len);
6281 		conn->le_adv_data_len = len;
6282 	}
6283 
6284 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6285 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6286 	else
6287 		flags = 0;
6288 
6289 	/* All scan results should be sent up for Mesh systems */
6290 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6291 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6292 				  rssi, flags, data, len, NULL, 0, instant);
6293 		return;
6294 	}
6295 
6296 	/* Passive scanning shouldn't trigger any device found events,
6297 	 * except for devices marked as CONN_REPORT for which we do send
6298 	 * device found events, or advertisement monitoring requested.
6299 	 */
6300 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6301 		if (type == LE_ADV_DIRECT_IND)
6302 			return;
6303 
6304 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6305 					       bdaddr, bdaddr_type) &&
6306 		    idr_is_empty(&hdev->adv_monitors_idr))
6307 			return;
6308 
6309 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6310 				  rssi, flags, data, len, NULL, 0, 0);
6311 		return;
6312 	}
6313 
6314 	/* When receiving a scan response, then there is no way to
6315 	 * know if the remote device is connectable or not. However
6316 	 * since scan responses are merged with a previously seen
6317 	 * advertising report, the flags field from that report
6318 	 * will be used.
6319 	 *
6320 	 * In the unlikely case that a controller just sends a scan
6321 	 * response event that doesn't match the pending report, then
6322 	 * it is marked as a standalone SCAN_RSP.
6323 	 */
6324 	if (type == LE_ADV_SCAN_RSP)
6325 		flags = MGMT_DEV_FOUND_SCAN_RSP;
6326 
6327 	/* If there's nothing pending either store the data from this
6328 	 * event or send an immediate device found event if the data
6329 	 * should not be stored for later.
6330 	 */
6331 	if (!has_pending_adv_report(hdev)) {
6332 		/* If the report will trigger a SCAN_REQ store it for
6333 		 * later merging.
6334 		 */
6335 		if (!ext_adv && (type == LE_ADV_IND ||
6336 				 type == LE_ADV_SCAN_IND)) {
6337 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6338 						 rssi, flags, data, len);
6339 			return;
6340 		}
6341 
6342 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6343 				  rssi, flags, data, len, NULL, 0, 0);
6344 		return;
6345 	}
6346 
6347 	/* Check if the pending report is for the same device as the new one */
6348 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6349 		 bdaddr_type == d->last_adv_addr_type);
6350 
6351 	/* If the pending data doesn't match this report or this isn't a
6352 	 * scan response (e.g. we got a duplicate ADV_IND) then force
6353 	 * sending of the pending data.
6354 	 */
6355 	if (type != LE_ADV_SCAN_RSP || !match) {
6356 		/* Send out whatever is in the cache, but skip duplicates */
6357 		if (!match)
6358 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6359 					  d->last_adv_addr_type, NULL,
6360 					  d->last_adv_rssi, d->last_adv_flags,
6361 					  d->last_adv_data,
6362 					  d->last_adv_data_len, NULL, 0, 0);
6363 
6364 		/* If the new report will trigger a SCAN_REQ store it for
6365 		 * later merging.
6366 		 */
6367 		if (!ext_adv && (type == LE_ADV_IND ||
6368 				 type == LE_ADV_SCAN_IND)) {
6369 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6370 						 rssi, flags, data, len);
6371 			return;
6372 		}
6373 
6374 		/* The advertising reports cannot be merged, so clear
6375 		 * the pending report and send out a device found event.
6376 		 */
6377 		clear_pending_adv_report(hdev);
6378 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6379 				  rssi, flags, data, len, NULL, 0, 0);
6380 		return;
6381 	}
6382 
6383 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6384 	 * the new event is a SCAN_RSP. We can therefore proceed with
6385 	 * sending a merged device found event.
6386 	 */
6387 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6388 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6389 			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6390 	clear_pending_adv_report(hdev);
6391 }
6392 
6393 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6394 				  struct sk_buff *skb)
6395 {
6396 	struct hci_ev_le_advertising_report *ev = data;
6397 	u64 instant = jiffies;
6398 
6399 	if (!ev->num)
6400 		return;
6401 
6402 	hci_dev_lock(hdev);
6403 
6404 	while (ev->num--) {
6405 		struct hci_ev_le_advertising_info *info;
6406 		s8 rssi;
6407 
6408 		info = hci_le_ev_skb_pull(hdev, skb,
6409 					  HCI_EV_LE_ADVERTISING_REPORT,
6410 					  sizeof(*info));
6411 		if (!info)
6412 			break;
6413 
6414 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6415 					info->length + 1))
6416 			break;
6417 
6418 		hci_store_wake_reason(hdev, &info->bdaddr, info->bdaddr_type);
6419 
6420 		if (info->length <= max_adv_len(hdev)) {
6421 			rssi = info->data[info->length];
6422 			process_adv_report(hdev, info->type, &info->bdaddr,
6423 					   info->bdaddr_type, NULL, 0,
6424 					   HCI_ADV_PHY_1M, 0, rssi,
6425 					   info->data, info->length, false,
6426 					   false, instant);
6427 		} else {
6428 			bt_dev_err(hdev, "Dropping invalid advertising data");
6429 		}
6430 	}
6431 
6432 	hci_dev_unlock(hdev);
6433 }
6434 
6435 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6436 {
6437 	u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK;
6438 
6439 	if (!pdu_type)
6440 		return LE_ADV_NONCONN_IND;
6441 
6442 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6443 		switch (evt_type) {
6444 		case LE_LEGACY_ADV_IND:
6445 			return LE_ADV_IND;
6446 		case LE_LEGACY_ADV_DIRECT_IND:
6447 			return LE_ADV_DIRECT_IND;
6448 		case LE_LEGACY_ADV_SCAN_IND:
6449 			return LE_ADV_SCAN_IND;
6450 		case LE_LEGACY_NONCONN_IND:
6451 			return LE_ADV_NONCONN_IND;
6452 		case LE_LEGACY_SCAN_RSP_ADV:
6453 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6454 			return LE_ADV_SCAN_RSP;
6455 		}
6456 
6457 		goto invalid;
6458 	}
6459 
6460 	if (evt_type & LE_EXT_ADV_CONN_IND) {
6461 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6462 			return LE_ADV_DIRECT_IND;
6463 
6464 		return LE_ADV_IND;
6465 	}
6466 
6467 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6468 		return LE_ADV_SCAN_RSP;
6469 
6470 	if (evt_type & LE_EXT_ADV_SCAN_IND)
6471 		return LE_ADV_SCAN_IND;
6472 
6473 	if (evt_type & LE_EXT_ADV_DIRECT_IND)
6474 		return LE_ADV_NONCONN_IND;
6475 
6476 invalid:
6477 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6478 			       evt_type);
6479 
6480 	return LE_ADV_INVALID;
6481 }
6482 
6483 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6484 				      struct sk_buff *skb)
6485 {
6486 	struct hci_ev_le_ext_adv_report *ev = data;
6487 	u64 instant = jiffies;
6488 
6489 	if (!ev->num)
6490 		return;
6491 
6492 	hci_dev_lock(hdev);
6493 
6494 	while (ev->num--) {
6495 		struct hci_ev_le_ext_adv_info *info;
6496 		u8 legacy_evt_type;
6497 		u16 evt_type;
6498 
6499 		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6500 					  sizeof(*info));
6501 		if (!info)
6502 			break;
6503 
6504 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6505 					info->length))
6506 			break;
6507 
6508 		hci_store_wake_reason(hdev, &info->bdaddr, info->bdaddr_type);
6509 
6510 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6511 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6512 
6513 		if (hci_test_quirk(hdev,
6514 				   HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) {
6515 			info->primary_phy &= 0x1f;
6516 			info->secondary_phy &= 0x1f;
6517 		}
6518 
6519 		/* Check if PA Sync is pending and if the hci_conn SID has not
6520 		 * been set update it.
6521 		 */
6522 		if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
6523 			struct hci_conn *conn;
6524 
6525 			conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6526 			if (conn && conn->sid == HCI_SID_INVALID)
6527 				conn->sid = info->sid;
6528 		}
6529 
6530 		if (legacy_evt_type != LE_ADV_INVALID) {
6531 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6532 					   info->bdaddr_type, NULL, 0,
6533 					   info->primary_phy,
6534 					   info->secondary_phy,
6535 					   info->rssi, info->data, info->length,
6536 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6537 					   false, instant);
6538 		}
6539 	}
6540 
6541 	hci_dev_unlock(hdev);
6542 }
6543 
6544 static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data,
6545 					   struct sk_buff *skb)
6546 {
6547 	struct hci_ev_le_pa_sync_established *ev = data;
6548 	int mask = hdev->link_mode;
6549 	__u8 flags = 0;
6550 	struct hci_conn *pa_sync, *conn;
6551 
6552 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6553 
6554 	hci_dev_lock(hdev);
6555 	hci_store_wake_reason(hdev, &ev->bdaddr, ev->bdaddr_type);
6556 
6557 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6558 
6559 	conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6560 	if (!conn) {
6561 		bt_dev_err(hdev,
6562 			   "Unable to find connection for dst %pMR sid 0x%2.2x",
6563 			   &ev->bdaddr, ev->sid);
6564 		goto unlock;
6565 	}
6566 
6567 	clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
6568 
6569 	conn->sync_handle = le16_to_cpu(ev->handle);
6570 	conn->sid = HCI_SID_INVALID;
6571 
6572 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
6573 				      &flags);
6574 	if (!(mask & HCI_LM_ACCEPT)) {
6575 		hci_le_pa_term_sync(hdev, ev->handle);
6576 		goto unlock;
6577 	}
6578 
6579 	if (!(flags & HCI_PROTO_DEFER))
6580 		goto unlock;
6581 
6582 	/* Add connection to indicate PA sync event */
6583 	pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, 0,
6584 				     HCI_ROLE_SLAVE);
6585 
6586 	if (IS_ERR(pa_sync))
6587 		goto unlock;
6588 
6589 	pa_sync->sync_handle = le16_to_cpu(ev->handle);
6590 
6591 	if (ev->status) {
6592 		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6593 
6594 		/* Notify iso layer */
6595 		hci_connect_cfm(pa_sync, ev->status);
6596 	}
6597 
6598 unlock:
6599 	hci_dev_unlock(hdev);
6600 }
6601 
6602 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6603 				      struct sk_buff *skb)
6604 {
6605 	struct hci_ev_le_per_adv_report *ev = data;
6606 	int mask = hdev->link_mode;
6607 	__u8 flags = 0;
6608 	struct hci_conn *pa_sync;
6609 
6610 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6611 
6612 	hci_dev_lock(hdev);
6613 
6614 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags);
6615 	if (!(mask & HCI_LM_ACCEPT))
6616 		goto unlock;
6617 
6618 	if (!(flags & HCI_PROTO_DEFER))
6619 		goto unlock;
6620 
6621 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
6622 			(hdev,
6623 			le16_to_cpu(ev->sync_handle));
6624 
6625 	if (!pa_sync)
6626 		goto unlock;
6627 
6628 	if (ev->data_status == LE_PA_DATA_COMPLETE &&
6629 	    !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
6630 		/* Notify iso layer */
6631 		hci_connect_cfm(pa_sync, 0);
6632 
6633 		/* Notify MGMT layer */
6634 		mgmt_device_connected(hdev, pa_sync, NULL, 0);
6635 	}
6636 
6637 unlock:
6638 	hci_dev_unlock(hdev);
6639 }
6640 
6641 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6642 					    struct sk_buff *skb)
6643 {
6644 	struct hci_ev_le_remote_feat_complete *ev = data;
6645 	struct hci_conn *conn;
6646 
6647 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6648 
6649 	hci_dev_lock(hdev);
6650 
6651 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6652 	if (conn) {
6653 		if (!ev->status) {
6654 			memcpy(conn->le_features, ev->features, 8);
6655 
6656 			/* Update supported PHYs */
6657 			if (!(conn->le_features[1] & HCI_LE_PHY_2M)) {
6658 				conn->le_tx_def_phys &= ~HCI_LE_SET_PHY_2M;
6659 				conn->le_rx_def_phys &= ~HCI_LE_SET_PHY_2M;
6660 			}
6661 
6662 			if (!(conn->le_features[1] & HCI_LE_PHY_CODED)) {
6663 				conn->le_tx_def_phys &= ~HCI_LE_SET_PHY_CODED;
6664 				conn->le_rx_def_phys &= ~HCI_LE_SET_PHY_CODED;
6665 			}
6666 		}
6667 
6668 		if (conn->state == BT_CONFIG) {
6669 			__u8 status;
6670 
6671 			/* If the local controller supports peripheral-initiated
6672 			 * features exchange, but the remote controller does
6673 			 * not, then it is possible that the error code 0x1a
6674 			 * for unsupported remote feature gets returned.
6675 			 *
6676 			 * In this specific case, allow the connection to
6677 			 * transition into connected state and mark it as
6678 			 * successful.
6679 			 */
6680 			if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6681 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6682 				status = 0x00;
6683 			else
6684 				status = ev->status;
6685 
6686 			conn->state = BT_CONNECTED;
6687 			hci_connect_cfm(conn, status);
6688 		}
6689 	}
6690 
6691 	hci_dev_unlock(hdev);
6692 }
6693 
6694 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6695 				   struct sk_buff *skb)
6696 {
6697 	struct hci_ev_le_ltk_req *ev = data;
6698 	struct hci_cp_le_ltk_reply cp;
6699 	struct hci_cp_le_ltk_neg_reply neg;
6700 	struct hci_conn *conn;
6701 	struct smp_ltk *ltk;
6702 
6703 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6704 
6705 	hci_dev_lock(hdev);
6706 
6707 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6708 	if (conn == NULL)
6709 		goto not_found;
6710 
6711 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6712 	if (!ltk)
6713 		goto not_found;
6714 
6715 	if (smp_ltk_is_sc(ltk)) {
6716 		/* With SC both EDiv and Rand are set to zero */
6717 		if (ev->ediv || ev->rand)
6718 			goto not_found;
6719 	} else {
6720 		/* For non-SC keys check that EDiv and Rand match */
6721 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6722 			goto not_found;
6723 	}
6724 
6725 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6726 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6727 	cp.handle = cpu_to_le16(conn->handle);
6728 
6729 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6730 
6731 	conn->enc_key_size = ltk->enc_size;
6732 
6733 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6734 
6735 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6736 	 * temporary key used to encrypt a connection following
6737 	 * pairing. It is used during the Encrypted Session Setup to
6738 	 * distribute the keys. Later, security can be re-established
6739 	 * using a distributed LTK.
6740 	 */
6741 	if (ltk->type == SMP_STK) {
6742 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6743 		list_del_rcu(&ltk->list);
6744 		kfree_rcu(ltk, rcu);
6745 	} else {
6746 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6747 	}
6748 
6749 	hci_dev_unlock(hdev);
6750 
6751 	return;
6752 
6753 not_found:
6754 	neg.handle = ev->handle;
6755 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6756 	hci_dev_unlock(hdev);
6757 }
6758 
6759 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6760 				      u8 reason)
6761 {
6762 	struct hci_cp_le_conn_param_req_neg_reply cp;
6763 
6764 	cp.handle = cpu_to_le16(handle);
6765 	cp.reason = reason;
6766 
6767 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6768 		     &cp);
6769 }
6770 
6771 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6772 					     struct sk_buff *skb)
6773 {
6774 	struct hci_ev_le_remote_conn_param_req *ev = data;
6775 	struct hci_cp_le_conn_param_req_reply cp;
6776 	struct hci_conn *hcon;
6777 	u16 handle, min, max, latency, timeout;
6778 
6779 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6780 
6781 	handle = le16_to_cpu(ev->handle);
6782 	min = le16_to_cpu(ev->interval_min);
6783 	max = le16_to_cpu(ev->interval_max);
6784 	latency = le16_to_cpu(ev->latency);
6785 	timeout = le16_to_cpu(ev->timeout);
6786 
6787 	hci_dev_lock(hdev);
6788 
6789 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6790 	if (!hcon || hcon->state != BT_CONNECTED) {
6791 		send_conn_param_neg_reply(hdev, handle,
6792 					  HCI_ERROR_UNKNOWN_CONN_ID);
6793 		goto unlock;
6794 	}
6795 
6796 	if (max > hcon->le_conn_max_interval) {
6797 		send_conn_param_neg_reply(hdev, handle,
6798 					  HCI_ERROR_INVALID_LL_PARAMS);
6799 		goto unlock;
6800 	}
6801 
6802 	if (hci_check_conn_params(min, max, latency, timeout)) {
6803 		send_conn_param_neg_reply(hdev, handle,
6804 					  HCI_ERROR_INVALID_LL_PARAMS);
6805 		goto unlock;
6806 	}
6807 
6808 	if (hcon->role == HCI_ROLE_MASTER) {
6809 		struct hci_conn_params *params;
6810 		u8 store_hint;
6811 
6812 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6813 						hcon->dst_type);
6814 		if (params) {
6815 			params->conn_min_interval = min;
6816 			params->conn_max_interval = max;
6817 			params->conn_latency = latency;
6818 			params->supervision_timeout = timeout;
6819 			store_hint = 0x01;
6820 		} else {
6821 			store_hint = 0x00;
6822 		}
6823 
6824 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6825 				    store_hint, min, max, latency, timeout);
6826 	}
6827 
6828 	cp.handle = ev->handle;
6829 	cp.interval_min = ev->interval_min;
6830 	cp.interval_max = ev->interval_max;
6831 	cp.latency = ev->latency;
6832 	cp.timeout = ev->timeout;
6833 	cp.min_ce_len = 0;
6834 	cp.max_ce_len = 0;
6835 
6836 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6837 
6838 unlock:
6839 	hci_dev_unlock(hdev);
6840 }
6841 
6842 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6843 					 struct sk_buff *skb)
6844 {
6845 	struct hci_ev_le_direct_adv_report *ev = data;
6846 	u64 instant = jiffies;
6847 	int i;
6848 
6849 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6850 				flex_array_size(ev, info, ev->num)))
6851 		return;
6852 
6853 	if (!ev->num)
6854 		return;
6855 
6856 	hci_dev_lock(hdev);
6857 
6858 	for (i = 0; i < ev->num; i++) {
6859 		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6860 
6861 		hci_store_wake_reason(hdev, &info->bdaddr, info->bdaddr_type);
6862 
6863 		process_adv_report(hdev, info->type, &info->bdaddr,
6864 				   info->bdaddr_type, &info->direct_addr,
6865 				   info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6866 				   info->rssi, NULL, 0, false, false, instant);
6867 	}
6868 
6869 	hci_dev_unlock(hdev);
6870 }
6871 
6872 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6873 				  struct sk_buff *skb)
6874 {
6875 	struct hci_ev_le_phy_update_complete *ev = data;
6876 	struct hci_conn *conn;
6877 
6878 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6879 
6880 	if (ev->status)
6881 		return;
6882 
6883 	hci_dev_lock(hdev);
6884 
6885 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6886 	if (!conn)
6887 		goto unlock;
6888 
6889 	conn->le_tx_phy = ev->tx_phy;
6890 	conn->le_rx_phy = ev->rx_phy;
6891 
6892 unlock:
6893 	hci_dev_unlock(hdev);
6894 }
6895 
6896 /* Convert LE PHY to QoS PHYs */
6897 static u8 le_phy_qos(u8 phy)
6898 {
6899 	switch (phy) {
6900 	case 0x01:
6901 		return HCI_LE_SET_PHY_1M;
6902 	case 0x02:
6903 		return HCI_LE_SET_PHY_2M;
6904 	case 0x03:
6905 		return HCI_LE_SET_PHY_CODED;
6906 	}
6907 
6908 	return 0;
6909 }
6910 
6911 static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data,
6912 				       struct sk_buff *skb)
6913 {
6914 	struct hci_evt_le_cis_established *ev = data;
6915 	struct hci_conn *conn;
6916 	struct bt_iso_qos *qos;
6917 	bool pending = false;
6918 	u16 handle = __le16_to_cpu(ev->handle);
6919 	u32 c_sdu_interval, p_sdu_interval;
6920 
6921 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6922 
6923 	hci_dev_lock(hdev);
6924 
6925 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6926 	if (!conn) {
6927 		bt_dev_err(hdev,
6928 			   "Unable to find connection with handle 0x%4.4x",
6929 			   handle);
6930 		goto unlock;
6931 	}
6932 
6933 	if (conn->type != CIS_LINK) {
6934 		bt_dev_err(hdev,
6935 			   "Invalid connection link type handle 0x%4.4x",
6936 			   handle);
6937 		goto unlock;
6938 	}
6939 
6940 	qos = &conn->iso_qos;
6941 
6942 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6943 
6944 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
6945 	 * page 3075:
6946 	 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
6947 	 * ISO_Interval + SDU_Interval_C_To_P
6948 	 * ...
6949 	 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
6950 	 *					Transport_Latency
6951 	 */
6952 	c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6953 			 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
6954 			get_unaligned_le24(ev->c_latency);
6955 	p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6956 			 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
6957 			get_unaligned_le24(ev->p_latency);
6958 
6959 	switch (conn->role) {
6960 	case HCI_ROLE_SLAVE:
6961 		qos->ucast.in.interval = c_sdu_interval;
6962 		qos->ucast.out.interval = p_sdu_interval;
6963 		/* Convert Transport Latency (us) to Latency (msec) */
6964 		qos->ucast.in.latency =
6965 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6966 					  1000);
6967 		qos->ucast.out.latency =
6968 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6969 					  1000);
6970 		qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6971 		qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6972 		qos->ucast.in.phys = le_phy_qos(ev->c_phy);
6973 		qos->ucast.out.phys = le_phy_qos(ev->p_phy);
6974 		break;
6975 	case HCI_ROLE_MASTER:
6976 		qos->ucast.in.interval = p_sdu_interval;
6977 		qos->ucast.out.interval = c_sdu_interval;
6978 		/* Convert Transport Latency (us) to Latency (msec) */
6979 		qos->ucast.out.latency =
6980 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6981 					  1000);
6982 		qos->ucast.in.latency =
6983 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6984 					  1000);
6985 		qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6986 		qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6987 		qos->ucast.out.phys = le_phy_qos(ev->c_phy);
6988 		qos->ucast.in.phys = le_phy_qos(ev->p_phy);
6989 		break;
6990 	}
6991 
6992 	if (!ev->status) {
6993 		conn->state = BT_CONNECTED;
6994 		hci_debugfs_create_conn(conn);
6995 		hci_conn_add_sysfs(conn);
6996 		hci_iso_setup_path(conn);
6997 		goto unlock;
6998 	}
6999 
7000 	conn->state = BT_CLOSED;
7001 	hci_connect_cfm(conn, ev->status);
7002 	hci_conn_del(conn);
7003 
7004 unlock:
7005 	if (pending)
7006 		hci_le_create_cis_pending(hdev);
7007 
7008 	hci_dev_unlock(hdev);
7009 }
7010 
7011 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
7012 {
7013 	struct hci_cp_le_reject_cis cp;
7014 
7015 	memset(&cp, 0, sizeof(cp));
7016 	cp.handle = handle;
7017 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7018 	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7019 }
7020 
7021 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7022 {
7023 	struct hci_cp_le_accept_cis cp;
7024 
7025 	memset(&cp, 0, sizeof(cp));
7026 	cp.handle = handle;
7027 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7028 }
7029 
7030 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7031 			       struct sk_buff *skb)
7032 {
7033 	struct hci_evt_le_cis_req *ev = data;
7034 	u16 acl_handle, cis_handle;
7035 	struct hci_conn *acl, *cis;
7036 	int mask;
7037 	__u8 flags = 0;
7038 
7039 	acl_handle = __le16_to_cpu(ev->acl_handle);
7040 	cis_handle = __le16_to_cpu(ev->cis_handle);
7041 
7042 	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7043 		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7044 
7045 	hci_dev_lock(hdev);
7046 
7047 	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7048 	if (!acl)
7049 		goto unlock;
7050 
7051 	mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags);
7052 	if (!(mask & HCI_LM_ACCEPT)) {
7053 		hci_le_reject_cis(hdev, ev->cis_handle);
7054 		goto unlock;
7055 	}
7056 
7057 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7058 	if (!cis) {
7059 		cis = hci_conn_add(hdev, CIS_LINK, &acl->dst, acl->dst_type,
7060 				   HCI_ROLE_SLAVE, cis_handle);
7061 		if (IS_ERR(cis)) {
7062 			hci_le_reject_cis(hdev, ev->cis_handle);
7063 			goto unlock;
7064 		}
7065 	}
7066 
7067 	cis->iso_qos.ucast.cig = ev->cig_id;
7068 	cis->iso_qos.ucast.cis = ev->cis_id;
7069 
7070 	if (!(flags & HCI_PROTO_DEFER)) {
7071 		hci_le_accept_cis(hdev, ev->cis_handle);
7072 	} else {
7073 		cis->state = BT_CONNECT2;
7074 		hci_connect_cfm(cis, 0);
7075 	}
7076 
7077 unlock:
7078 	hci_dev_unlock(hdev);
7079 }
7080 
7081 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7082 {
7083 	u8 handle = PTR_UINT(data);
7084 
7085 	return hci_le_terminate_big_sync(hdev, handle,
7086 					 HCI_ERROR_LOCAL_HOST_TERM);
7087 }
7088 
7089 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7090 					   struct sk_buff *skb)
7091 {
7092 	struct hci_evt_le_create_big_complete *ev = data;
7093 	struct hci_conn *conn;
7094 	__u8 i = 0;
7095 
7096 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7097 
7098 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7099 				flex_array_size(ev, bis_handle, ev->num_bis)))
7100 		return;
7101 
7102 	hci_dev_lock(hdev);
7103 
7104 	/* Connect all BISes that are bound to the BIG */
7105 	while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
7106 						      BT_BOUND,
7107 						      HCI_ROLE_MASTER))) {
7108 		if (ev->status) {
7109 			hci_connect_cfm(conn, ev->status);
7110 			hci_conn_del(conn);
7111 			continue;
7112 		}
7113 
7114 		if (hci_conn_set_handle(conn,
7115 					__le16_to_cpu(ev->bis_handle[i++])))
7116 			continue;
7117 
7118 		conn->state = BT_CONNECTED;
7119 		set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7120 		hci_debugfs_create_conn(conn);
7121 		hci_conn_add_sysfs(conn);
7122 		hci_iso_setup_path(conn);
7123 	}
7124 
7125 	if (!ev->status && !i)
7126 		/* If no BISes have been connected for the BIG,
7127 		 * terminate. This is in case all bound connections
7128 		 * have been closed before the BIG creation
7129 		 * has completed.
7130 		 */
7131 		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7132 				   UINT_PTR(ev->handle), NULL);
7133 
7134 	hci_dev_unlock(hdev);
7135 }
7136 
7137 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7138 					    struct sk_buff *skb)
7139 {
7140 	struct hci_evt_le_big_sync_established *ev = data;
7141 	struct hci_conn *bis, *conn;
7142 	int i;
7143 
7144 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7145 
7146 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7147 				flex_array_size(ev, bis, ev->num_bis)))
7148 		return;
7149 
7150 	hci_dev_lock(hdev);
7151 
7152 	conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
7153 						  ev->num_bis);
7154 	if (!conn) {
7155 		bt_dev_err(hdev,
7156 			   "Unable to find connection for big 0x%2.2x",
7157 			   ev->handle);
7158 		goto unlock;
7159 	}
7160 
7161 	clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
7162 
7163 	conn->num_bis = 0;
7164 	memset(conn->bis, 0, sizeof(conn->num_bis));
7165 
7166 	for (i = 0; i < ev->num_bis; i++) {
7167 		u16 handle = le16_to_cpu(ev->bis[i]);
7168 		__le32 interval;
7169 
7170 		bis = hci_conn_hash_lookup_handle(hdev, handle);
7171 		if (!bis) {
7172 			if (handle > HCI_CONN_HANDLE_MAX) {
7173 				bt_dev_dbg(hdev, "ignore too large handle %u", handle);
7174 				continue;
7175 			}
7176 			bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY, 0,
7177 					   HCI_ROLE_SLAVE, handle);
7178 			if (IS_ERR(bis))
7179 				continue;
7180 		}
7181 
7182 		if (ev->status != 0x42)
7183 			/* Mark PA sync as established */
7184 			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7185 
7186 		bis->sync_handle = conn->sync_handle;
7187 		bis->iso_qos.bcast.big = ev->handle;
7188 		memset(&interval, 0, sizeof(interval));
7189 		memcpy(&interval, ev->latency, sizeof(ev->latency));
7190 		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7191 		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7192 		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7193 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7194 
7195 		if (!ev->status) {
7196 			bis->state = BT_CONNECTED;
7197 			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7198 			hci_debugfs_create_conn(bis);
7199 			hci_conn_add_sysfs(bis);
7200 			hci_iso_setup_path(bis);
7201 		}
7202 	}
7203 
7204 	/* In case BIG sync failed, notify each failed connection to
7205 	 * the user after all hci connections have been added
7206 	 */
7207 	if (ev->status)
7208 		for (i = 0; i < ev->num_bis; i++) {
7209 			u16 handle = le16_to_cpu(ev->bis[i]);
7210 
7211 			bis = hci_conn_hash_lookup_handle(hdev, handle);
7212 			if (!bis)
7213 				continue;
7214 
7215 			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7216 			hci_connect_cfm(bis, ev->status);
7217 		}
7218 
7219 unlock:
7220 	hci_dev_unlock(hdev);
7221 }
7222 
7223 static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data,
7224 				     struct sk_buff *skb)
7225 {
7226 	struct hci_evt_le_big_sync_lost *ev = data;
7227 	struct hci_conn *bis;
7228 	bool mgmt_conn = false;
7229 
7230 	bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle);
7231 
7232 	hci_dev_lock(hdev);
7233 
7234 	/* Delete each bis connection */
7235 	while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle,
7236 						     BT_CONNECTED,
7237 						     HCI_ROLE_SLAVE))) {
7238 		if (!mgmt_conn) {
7239 			mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED,
7240 						       &bis->flags);
7241 			mgmt_device_disconnected(hdev, &bis->dst, bis->type,
7242 						 bis->dst_type, ev->reason,
7243 						 mgmt_conn);
7244 		}
7245 
7246 		clear_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7247 		hci_disconn_cfm(bis, ev->reason);
7248 		hci_conn_del(bis);
7249 	}
7250 
7251 	hci_dev_unlock(hdev);
7252 }
7253 
7254 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7255 					   struct sk_buff *skb)
7256 {
7257 	struct hci_evt_le_big_info_adv_report *ev = data;
7258 	int mask = hdev->link_mode;
7259 	__u8 flags = 0;
7260 	struct hci_conn *pa_sync;
7261 
7262 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7263 
7264 	hci_dev_lock(hdev);
7265 
7266 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags);
7267 	if (!(mask & HCI_LM_ACCEPT))
7268 		goto unlock;
7269 
7270 	if (!(flags & HCI_PROTO_DEFER))
7271 		goto unlock;
7272 
7273 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
7274 			(hdev,
7275 			le16_to_cpu(ev->sync_handle));
7276 
7277 	if (!pa_sync)
7278 		goto unlock;
7279 
7280 	pa_sync->iso_qos.bcast.encryption = ev->encryption;
7281 
7282 	/* Notify iso layer */
7283 	hci_connect_cfm(pa_sync, 0);
7284 
7285 unlock:
7286 	hci_dev_unlock(hdev);
7287 }
7288 
7289 static void hci_le_read_all_remote_features_evt(struct hci_dev *hdev,
7290 						void *data, struct sk_buff *skb)
7291 {
7292 	struct hci_evt_le_read_all_remote_features_complete *ev = data;
7293 	struct hci_conn *conn;
7294 
7295 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7296 
7297 	hci_dev_lock(hdev);
7298 
7299 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
7300 	if (!conn)
7301 		goto unlock;
7302 
7303 	if (!ev->status) {
7304 		memcpy(conn->le_features, ev->features, 248);
7305 
7306 		/* Update supported PHYs */
7307 		if (!(conn->le_features[1] & HCI_LE_PHY_2M)) {
7308 			conn->le_tx_def_phys &= ~HCI_LE_SET_PHY_2M;
7309 			conn->le_rx_def_phys &= ~HCI_LE_SET_PHY_2M;
7310 		}
7311 
7312 		if (!(conn->le_features[1] & HCI_LE_PHY_CODED)) {
7313 			conn->le_tx_def_phys &= ~HCI_LE_SET_PHY_CODED;
7314 			conn->le_rx_def_phys &= ~HCI_LE_SET_PHY_CODED;
7315 		}
7316 	}
7317 
7318 	if (conn->state == BT_CONFIG) {
7319 		__u8 status;
7320 
7321 		/* If the local controller supports peripheral-initiated
7322 		 * features exchange, but the remote controller does
7323 		 * not, then it is possible that the error code 0x1a
7324 		 * for unsupported remote feature gets returned.
7325 		 *
7326 		 * In this specific case, allow the connection to
7327 		 * transition into connected state and mark it as
7328 		 * successful.
7329 		 */
7330 		if (!conn->out &&
7331 		    ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
7332 		    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
7333 			status = 0x00;
7334 		else
7335 			status = ev->status;
7336 
7337 		conn->state = BT_CONNECTED;
7338 		hci_connect_cfm(conn, status);
7339 	}
7340 
7341 unlock:
7342 	hci_dev_unlock(hdev);
7343 }
7344 
7345 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7346 [_op] = { \
7347 	.func = _func, \
7348 	.min_len = _min_len, \
7349 	.max_len = _max_len, \
7350 }
7351 
7352 #define HCI_LE_EV(_op, _func, _len) \
7353 	HCI_LE_EV_VL(_op, _func, _len, _len)
7354 
7355 #define HCI_LE_EV_STATUS(_op, _func) \
7356 	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7357 
7358 /* Entries in this table shall have their position according to the subevent
7359  * opcode they handle so the use of the macros above is recommend since it does
7360  * attempt to initialize at its proper index using Designated Initializers that
7361  * way events without a callback function can be omitted.
7362  */
7363 static const struct hci_le_ev {
7364 	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7365 	u16  min_len;
7366 	u16  max_len;
7367 } hci_le_ev_table[U8_MAX + 1] = {
7368 	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7369 	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7370 		  sizeof(struct hci_ev_le_conn_complete)),
7371 	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7372 	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7373 		     sizeof(struct hci_ev_le_advertising_report),
7374 		     HCI_MAX_EVENT_SIZE),
7375 	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7376 	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7377 		  hci_le_conn_update_complete_evt,
7378 		  sizeof(struct hci_ev_le_conn_update_complete)),
7379 	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7380 	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7381 		  hci_le_remote_feat_complete_evt,
7382 		  sizeof(struct hci_ev_le_remote_feat_complete)),
7383 	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7384 	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7385 		  sizeof(struct hci_ev_le_ltk_req)),
7386 	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7387 	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7388 		  hci_le_remote_conn_param_req_evt,
7389 		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7390 	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7391 	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7392 		  hci_le_enh_conn_complete_evt,
7393 		  sizeof(struct hci_ev_le_enh_conn_complete)),
7394 	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7395 	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7396 		     sizeof(struct hci_ev_le_direct_adv_report),
7397 		     HCI_MAX_EVENT_SIZE),
7398 	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7399 	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7400 		  sizeof(struct hci_ev_le_phy_update_complete)),
7401 	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7402 	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7403 		     sizeof(struct hci_ev_le_ext_adv_report),
7404 		     HCI_MAX_EVENT_SIZE),
7405 	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7406 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7407 		  hci_le_pa_sync_established_evt,
7408 		  sizeof(struct hci_ev_le_pa_sync_established)),
7409 	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7410 	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7411 				 hci_le_per_adv_report_evt,
7412 				 sizeof(struct hci_ev_le_per_adv_report),
7413 				 HCI_MAX_EVENT_SIZE),
7414 	/* [0x10 = HCI_EV_LE_PA_SYNC_LOST] */
7415 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_LOST, hci_le_pa_sync_lost_evt,
7416 		  sizeof(struct hci_ev_le_pa_sync_lost)),
7417 	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7418 	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7419 		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7420 	/* [0x18 = HCI_EVT_LE_PAST_RECEIVED] */
7421 	HCI_LE_EV(HCI_EV_LE_PAST_RECEIVED,
7422 		  hci_le_past_received_evt,
7423 		  sizeof(struct hci_ev_le_past_received)),
7424 	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7425 	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt,
7426 		  sizeof(struct hci_evt_le_cis_established)),
7427 	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7428 	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7429 		  sizeof(struct hci_evt_le_cis_req)),
7430 	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7431 	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7432 		     hci_le_create_big_complete_evt,
7433 		     sizeof(struct hci_evt_le_create_big_complete),
7434 		     HCI_MAX_EVENT_SIZE),
7435 	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
7436 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7437 		     hci_le_big_sync_established_evt,
7438 		     sizeof(struct hci_evt_le_big_sync_established),
7439 		     HCI_MAX_EVENT_SIZE),
7440 	/* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */
7441 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST,
7442 		     hci_le_big_sync_lost_evt,
7443 		     sizeof(struct hci_evt_le_big_sync_lost),
7444 		     HCI_MAX_EVENT_SIZE),
7445 	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7446 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7447 		     hci_le_big_info_adv_report_evt,
7448 		     sizeof(struct hci_evt_le_big_info_adv_report),
7449 		     HCI_MAX_EVENT_SIZE),
7450 	/* [0x2b = HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE] */
7451 	HCI_LE_EV_VL(HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE,
7452 		     hci_le_read_all_remote_features_evt,
7453 		     sizeof(struct
7454 			    hci_evt_le_read_all_remote_features_complete),
7455 		     HCI_MAX_EVENT_SIZE),
7456 };
7457 
7458 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7459 			    struct sk_buff *skb, u16 *opcode, u8 *status,
7460 			    hci_req_complete_t *req_complete,
7461 			    hci_req_complete_skb_t *req_complete_skb)
7462 {
7463 	struct hci_ev_le_meta *ev = data;
7464 	const struct hci_le_ev *subev;
7465 
7466 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7467 
7468 	/* Only match event if command OGF is for LE */
7469 	if (hdev->req_skb &&
7470 	   (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 ||
7471 	    hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) &&
7472 	    hci_skb_event(hdev->req_skb) == ev->subevent) {
7473 		*opcode = hci_skb_opcode(hdev->req_skb);
7474 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7475 				     req_complete_skb);
7476 	}
7477 
7478 	subev = &hci_le_ev_table[ev->subevent];
7479 	if (!subev->func)
7480 		return;
7481 
7482 	if (skb->len < subev->min_len) {
7483 		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7484 			   ev->subevent, skb->len, subev->min_len);
7485 		return;
7486 	}
7487 
7488 	/* Just warn if the length is over max_len size it still be
7489 	 * possible to partially parse the event so leave to callback to
7490 	 * decide if that is acceptable.
7491 	 */
7492 	if (skb->len > subev->max_len)
7493 		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7494 			    ev->subevent, skb->len, subev->max_len);
7495 	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7496 	if (!data)
7497 		return;
7498 
7499 	subev->func(hdev, data, skb);
7500 }
7501 
7502 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7503 				 u8 event, struct sk_buff *skb)
7504 {
7505 	struct hci_ev_cmd_complete *ev;
7506 	struct hci_event_hdr *hdr;
7507 
7508 	if (!skb)
7509 		return false;
7510 
7511 	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7512 	if (!hdr)
7513 		return false;
7514 
7515 	if (event) {
7516 		if (hdr->evt != event)
7517 			return false;
7518 		return true;
7519 	}
7520 
7521 	/* Check if request ended in Command Status - no way to retrieve
7522 	 * any extra parameters in this case.
7523 	 */
7524 	if (hdr->evt == HCI_EV_CMD_STATUS)
7525 		return false;
7526 
7527 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7528 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7529 			   hdr->evt);
7530 		return false;
7531 	}
7532 
7533 	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7534 	if (!ev)
7535 		return false;
7536 
7537 	if (opcode != __le16_to_cpu(ev->opcode)) {
7538 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7539 		       __le16_to_cpu(ev->opcode));
7540 		return false;
7541 	}
7542 
7543 	return true;
7544 }
7545 
7546 static void hci_store_wake_reason(struct hci_dev *hdev,
7547 				  const bdaddr_t *bdaddr, u8 addr_type)
7548 	__must_hold(&hdev->lock)
7549 {
7550 	lockdep_assert_held(&hdev->lock);
7551 
7552 	/* If we are currently suspended and this is the first BT event seen,
7553 	 * save the wake reason associated with the event.
7554 	 */
7555 	if (!hdev->suspended || hdev->wake_reason)
7556 		return;
7557 
7558 	if (!bdaddr) {
7559 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7560 		return;
7561 	}
7562 
7563 	/* Default to remote wake. Values for wake_reason are documented in the
7564 	 * Bluez mgmt api docs.
7565 	 */
7566 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7567 	bacpy(&hdev->wake_addr, bdaddr);
7568 	hdev->wake_addr_type = addr_type;
7569 }
7570 
7571 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7572 [_op] = { \
7573 	.req = false, \
7574 	.func = _func, \
7575 	.min_len = _min_len, \
7576 	.max_len = _max_len, \
7577 }
7578 
7579 #define HCI_EV(_op, _func, _len) \
7580 	HCI_EV_VL(_op, _func, _len, _len)
7581 
7582 #define HCI_EV_STATUS(_op, _func) \
7583 	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7584 
7585 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7586 [_op] = { \
7587 	.req = true, \
7588 	.func_req = _func, \
7589 	.min_len = _min_len, \
7590 	.max_len = _max_len, \
7591 }
7592 
7593 #define HCI_EV_REQ(_op, _func, _len) \
7594 	HCI_EV_REQ_VL(_op, _func, _len, _len)
7595 
7596 /* Entries in this table shall have their position according to the event opcode
7597  * they handle so the use of the macros above is recommend since it does attempt
7598  * to initialize at its proper index using Designated Initializers that way
7599  * events without a callback function don't have entered.
7600  */
7601 static const struct hci_ev {
7602 	bool req;
7603 	union {
7604 		void (*func)(struct hci_dev *hdev, void *data,
7605 			     struct sk_buff *skb);
7606 		void (*func_req)(struct hci_dev *hdev, void *data,
7607 				 struct sk_buff *skb, u16 *opcode, u8 *status,
7608 				 hci_req_complete_t *req_complete,
7609 				 hci_req_complete_skb_t *req_complete_skb);
7610 	};
7611 	u16  min_len;
7612 	u16  max_len;
7613 } hci_ev_table[U8_MAX + 1] = {
7614 	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7615 	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7616 	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7617 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7618 		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7619 	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7620 	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7621 	       sizeof(struct hci_ev_conn_complete)),
7622 	/* [0x04 = HCI_EV_CONN_REQUEST] */
7623 	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7624 	       sizeof(struct hci_ev_conn_request)),
7625 	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7626 	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7627 	       sizeof(struct hci_ev_disconn_complete)),
7628 	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7629 	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7630 	       sizeof(struct hci_ev_auth_complete)),
7631 	/* [0x07 = HCI_EV_REMOTE_NAME] */
7632 	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7633 	       sizeof(struct hci_ev_remote_name)),
7634 	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7635 	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7636 	       sizeof(struct hci_ev_encrypt_change)),
7637 	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7638 	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7639 	       hci_change_link_key_complete_evt,
7640 	       sizeof(struct hci_ev_change_link_key_complete)),
7641 	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7642 	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7643 	       sizeof(struct hci_ev_remote_features)),
7644 	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7645 	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7646 		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7647 	/* [0x0f = HCI_EV_CMD_STATUS] */
7648 	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7649 		   sizeof(struct hci_ev_cmd_status)),
7650 	/* [0x10 = HCI_EV_CMD_STATUS] */
7651 	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7652 	       sizeof(struct hci_ev_hardware_error)),
7653 	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7654 	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7655 	       sizeof(struct hci_ev_role_change)),
7656 	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7657 	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7658 		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7659 	/* [0x14 = HCI_EV_MODE_CHANGE] */
7660 	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7661 	       sizeof(struct hci_ev_mode_change)),
7662 	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7663 	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7664 	       sizeof(struct hci_ev_pin_code_req)),
7665 	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7666 	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7667 	       sizeof(struct hci_ev_link_key_req)),
7668 	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7669 	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7670 	       sizeof(struct hci_ev_link_key_notify)),
7671 	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7672 	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7673 	       sizeof(struct hci_ev_clock_offset)),
7674 	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7675 	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7676 	       sizeof(struct hci_ev_pkt_type_change)),
7677 	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7678 	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7679 	       sizeof(struct hci_ev_pscan_rep_mode)),
7680 	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7681 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7682 		  hci_inquiry_result_with_rssi_evt,
7683 		  sizeof(struct hci_ev_inquiry_result_rssi),
7684 		  HCI_MAX_EVENT_SIZE),
7685 	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7686 	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7687 	       sizeof(struct hci_ev_remote_ext_features)),
7688 	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7689 	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7690 	       sizeof(struct hci_ev_sync_conn_complete)),
7691 	/* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7692 	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7693 		  hci_extended_inquiry_result_evt,
7694 		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7695 	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7696 	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7697 	       sizeof(struct hci_ev_key_refresh_complete)),
7698 	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7699 	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7700 	       sizeof(struct hci_ev_io_capa_request)),
7701 	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7702 	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7703 	       sizeof(struct hci_ev_io_capa_reply)),
7704 	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7705 	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7706 	       sizeof(struct hci_ev_user_confirm_req)),
7707 	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7708 	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7709 	       sizeof(struct hci_ev_user_passkey_req)),
7710 	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7711 	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7712 	       sizeof(struct hci_ev_remote_oob_data_request)),
7713 	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7714 	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7715 	       sizeof(struct hci_ev_simple_pair_complete)),
7716 	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7717 	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7718 	       sizeof(struct hci_ev_user_passkey_notify)),
7719 	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7720 	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7721 	       sizeof(struct hci_ev_keypress_notify)),
7722 	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7723 	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7724 	       sizeof(struct hci_ev_remote_host_features)),
7725 	/* [0x3e = HCI_EV_LE_META] */
7726 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7727 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7728 	/* [0xff = HCI_EV_VENDOR] */
7729 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7730 };
7731 
7732 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7733 			   u16 *opcode, u8 *status,
7734 			   hci_req_complete_t *req_complete,
7735 			   hci_req_complete_skb_t *req_complete_skb)
7736 {
7737 	const struct hci_ev *ev = &hci_ev_table[event];
7738 	void *data;
7739 
7740 	if (!ev->func)
7741 		return;
7742 
7743 	if (skb->len < ev->min_len) {
7744 		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7745 			   event, skb->len, ev->min_len);
7746 		return;
7747 	}
7748 
7749 	/* Just warn if the length is over max_len size it still be
7750 	 * possible to partially parse the event so leave to callback to
7751 	 * decide if that is acceptable.
7752 	 */
7753 	if (skb->len > ev->max_len)
7754 		bt_dev_warn_ratelimited(hdev,
7755 					"unexpected event 0x%2.2x length: %u > %u",
7756 					event, skb->len, ev->max_len);
7757 
7758 	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7759 	if (!data)
7760 		return;
7761 
7762 	if (ev->req)
7763 		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7764 			     req_complete_skb);
7765 	else
7766 		ev->func(hdev, data, skb);
7767 }
7768 
7769 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7770 {
7771 	struct hci_event_hdr *hdr = (void *) skb->data;
7772 	hci_req_complete_t req_complete = NULL;
7773 	hci_req_complete_skb_t req_complete_skb = NULL;
7774 	struct sk_buff *orig_skb = NULL;
7775 	u8 status = 0, event, req_evt = 0;
7776 	u16 opcode = HCI_OP_NOP;
7777 
7778 	if (skb->len < sizeof(*hdr)) {
7779 		bt_dev_err(hdev, "Malformed HCI Event");
7780 		goto done;
7781 	}
7782 
7783 	hci_dev_lock(hdev);
7784 	kfree_skb(hdev->recv_event);
7785 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7786 	hci_dev_unlock(hdev);
7787 
7788 	event = hdr->evt;
7789 	if (!event) {
7790 		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7791 			    event);
7792 		goto done;
7793 	}
7794 
7795 	/* Only match event if command OGF is not for LE */
7796 	if (hdev->req_skb &&
7797 	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7798 	    hci_skb_event(hdev->req_skb) == event) {
7799 		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7800 				     status, &req_complete, &req_complete_skb);
7801 		req_evt = event;
7802 	}
7803 
7804 	/* If it looks like we might end up having to call
7805 	 * req_complete_skb, store a pristine copy of the skb since the
7806 	 * various handlers may modify the original one through
7807 	 * skb_pull() calls, etc.
7808 	 */
7809 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7810 	    event == HCI_EV_CMD_COMPLETE)
7811 		orig_skb = skb_clone(skb, GFP_KERNEL);
7812 
7813 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7814 
7815 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7816 
7817 	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7818 		       &req_complete_skb);
7819 
7820 	hci_dev_lock(hdev);
7821 	hci_store_wake_reason(hdev, NULL, 0);
7822 	hci_dev_unlock(hdev);
7823 
7824 	if (req_complete) {
7825 		req_complete(hdev, status, opcode);
7826 	} else if (req_complete_skb) {
7827 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7828 			kfree_skb(orig_skb);
7829 			orig_skb = NULL;
7830 		}
7831 		req_complete_skb(hdev, status, opcode, orig_skb);
7832 	}
7833 
7834 done:
7835 	kfree_skb(orig_skb);
7836 	kfree_skb(skb);
7837 	hdev->stat.evt_rx++;
7838 }
7839