xref: /linux/net/bluetooth/hci_event.c (revision dbcedec3a31119d7594baacc743300d127c99c56)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI event handling. */
27 
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
39 #include "smp.h"
40 #include "msft.h"
41 #include "eir.h"
42 
43 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
44 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
45 
46 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
47 
48 /* Handle HCI Event packets */
49 
50 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
51 			     u8 ev, size_t len)
52 {
53 	void *data;
54 
55 	data = skb_pull_data(skb, len);
56 	if (!data)
57 		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
58 
59 	return data;
60 }
61 
62 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
63 			     u16 op, size_t len)
64 {
65 	void *data;
66 
67 	data = skb_pull_data(skb, len);
68 	if (!data)
69 		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
70 
71 	return data;
72 }
73 
74 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
75 				u8 ev, size_t len)
76 {
77 	void *data;
78 
79 	data = skb_pull_data(skb, len);
80 	if (!data)
81 		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
82 
83 	return data;
84 }
85 
86 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
87 				struct sk_buff *skb)
88 {
89 	struct hci_ev_status *rp = data;
90 
91 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
92 
93 	/* It is possible that we receive Inquiry Complete event right
94 	 * before we receive Inquiry Cancel Command Complete event, in
95 	 * which case the latter event should have status of Command
96 	 * Disallowed. This should not be treated as error, since
97 	 * we actually achieve what Inquiry Cancel wants to achieve,
98 	 * which is to end the last Inquiry session.
99 	 */
100 	if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
101 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
102 		rp->status = 0x00;
103 	}
104 
105 	if (rp->status)
106 		return rp->status;
107 
108 	clear_bit(HCI_INQUIRY, &hdev->flags);
109 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
110 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
111 
112 	hci_dev_lock(hdev);
113 	/* Set discovery state to stopped if we're not doing LE active
114 	 * scanning.
115 	 */
116 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
117 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
118 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
119 	hci_dev_unlock(hdev);
120 
121 	return rp->status;
122 }
123 
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125 			      struct sk_buff *skb)
126 {
127 	struct hci_ev_status *rp = data;
128 
129 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130 
131 	if (rp->status)
132 		return rp->status;
133 
134 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135 
136 	return rp->status;
137 }
138 
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140 				   struct sk_buff *skb)
141 {
142 	struct hci_ev_status *rp = data;
143 
144 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145 
146 	if (rp->status)
147 		return rp->status;
148 
149 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150 
151 	return rp->status;
152 }
153 
154 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
155 					struct sk_buff *skb)
156 {
157 	struct hci_ev_status *rp = data;
158 
159 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
160 
161 	return rp->status;
162 }
163 
164 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
165 				struct sk_buff *skb)
166 {
167 	struct hci_rp_role_discovery *rp = data;
168 	struct hci_conn *conn;
169 
170 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
171 
172 	if (rp->status)
173 		return rp->status;
174 
175 	hci_dev_lock(hdev);
176 
177 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
178 	if (conn)
179 		conn->role = rp->role;
180 
181 	hci_dev_unlock(hdev);
182 
183 	return rp->status;
184 }
185 
186 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
187 				  struct sk_buff *skb)
188 {
189 	struct hci_rp_read_link_policy *rp = data;
190 	struct hci_conn *conn;
191 
192 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
193 
194 	if (rp->status)
195 		return rp->status;
196 
197 	hci_dev_lock(hdev);
198 
199 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
200 	if (conn)
201 		conn->link_policy = __le16_to_cpu(rp->policy);
202 
203 	hci_dev_unlock(hdev);
204 
205 	return rp->status;
206 }
207 
208 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
209 				   struct sk_buff *skb)
210 {
211 	struct hci_rp_write_link_policy *rp = data;
212 	struct hci_conn *conn;
213 	void *sent;
214 
215 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
216 
217 	if (rp->status)
218 		return rp->status;
219 
220 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
221 	if (!sent)
222 		return rp->status;
223 
224 	hci_dev_lock(hdev);
225 
226 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
227 	if (conn)
228 		conn->link_policy = get_unaligned_le16(sent + 2);
229 
230 	hci_dev_unlock(hdev);
231 
232 	return rp->status;
233 }
234 
235 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
236 				      struct sk_buff *skb)
237 {
238 	struct hci_rp_read_def_link_policy *rp = data;
239 
240 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
241 
242 	if (rp->status)
243 		return rp->status;
244 
245 	hdev->link_policy = __le16_to_cpu(rp->policy);
246 
247 	return rp->status;
248 }
249 
250 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
251 				       struct sk_buff *skb)
252 {
253 	struct hci_ev_status *rp = data;
254 	void *sent;
255 
256 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
257 
258 	if (rp->status)
259 		return rp->status;
260 
261 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
262 	if (!sent)
263 		return rp->status;
264 
265 	hdev->link_policy = get_unaligned_le16(sent);
266 
267 	return rp->status;
268 }
269 
270 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
271 {
272 	struct hci_ev_status *rp = data;
273 
274 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
275 
276 	clear_bit(HCI_RESET, &hdev->flags);
277 
278 	if (rp->status)
279 		return rp->status;
280 
281 	/* Reset all non-persistent flags */
282 	hci_dev_clear_volatile_flags(hdev);
283 
284 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
285 
286 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
287 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
288 
289 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
290 	hdev->adv_data_len = 0;
291 
292 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
293 	hdev->scan_rsp_data_len = 0;
294 
295 	hdev->le_scan_type = LE_SCAN_PASSIVE;
296 
297 	hdev->ssp_debug_mode = 0;
298 
299 	hci_bdaddr_list_clear(&hdev->le_accept_list);
300 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
301 
302 	return rp->status;
303 }
304 
305 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
306 				      struct sk_buff *skb)
307 {
308 	struct hci_rp_read_stored_link_key *rp = data;
309 	struct hci_cp_read_stored_link_key *sent;
310 
311 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
312 
313 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
314 	if (!sent)
315 		return rp->status;
316 
317 	if (!rp->status && sent->read_all == 0x01) {
318 		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
319 		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
320 	}
321 
322 	return rp->status;
323 }
324 
325 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
326 					struct sk_buff *skb)
327 {
328 	struct hci_rp_delete_stored_link_key *rp = data;
329 	u16 num_keys;
330 
331 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
332 
333 	if (rp->status)
334 		return rp->status;
335 
336 	num_keys = le16_to_cpu(rp->num_keys);
337 
338 	if (num_keys <= hdev->stored_num_keys)
339 		hdev->stored_num_keys -= num_keys;
340 	else
341 		hdev->stored_num_keys = 0;
342 
343 	return rp->status;
344 }
345 
346 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
347 				  struct sk_buff *skb)
348 {
349 	struct hci_ev_status *rp = data;
350 	void *sent;
351 
352 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
353 
354 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
355 	if (!sent)
356 		return rp->status;
357 
358 	hci_dev_lock(hdev);
359 
360 	if (hci_dev_test_flag(hdev, HCI_MGMT))
361 		mgmt_set_local_name_complete(hdev, sent, rp->status);
362 	else if (!rp->status)
363 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
364 
365 	hci_dev_unlock(hdev);
366 
367 	return rp->status;
368 }
369 
370 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
371 				 struct sk_buff *skb)
372 {
373 	struct hci_rp_read_local_name *rp = data;
374 
375 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
376 
377 	if (rp->status)
378 		return rp->status;
379 
380 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
381 	    hci_dev_test_flag(hdev, HCI_CONFIG))
382 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
383 
384 	return rp->status;
385 }
386 
387 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
388 				   struct sk_buff *skb)
389 {
390 	struct hci_ev_status *rp = data;
391 	void *sent;
392 
393 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
394 
395 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
396 	if (!sent)
397 		return rp->status;
398 
399 	hci_dev_lock(hdev);
400 
401 	if (!rp->status) {
402 		__u8 param = *((__u8 *) sent);
403 
404 		if (param == AUTH_ENABLED)
405 			set_bit(HCI_AUTH, &hdev->flags);
406 		else
407 			clear_bit(HCI_AUTH, &hdev->flags);
408 	}
409 
410 	if (hci_dev_test_flag(hdev, HCI_MGMT))
411 		mgmt_auth_enable_complete(hdev, rp->status);
412 
413 	hci_dev_unlock(hdev);
414 
415 	return rp->status;
416 }
417 
418 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
419 				    struct sk_buff *skb)
420 {
421 	struct hci_ev_status *rp = data;
422 	__u8 param;
423 	void *sent;
424 
425 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
426 
427 	if (rp->status)
428 		return rp->status;
429 
430 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
431 	if (!sent)
432 		return rp->status;
433 
434 	param = *((__u8 *) sent);
435 
436 	if (param)
437 		set_bit(HCI_ENCRYPT, &hdev->flags);
438 	else
439 		clear_bit(HCI_ENCRYPT, &hdev->flags);
440 
441 	return rp->status;
442 }
443 
444 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
445 				   struct sk_buff *skb)
446 {
447 	struct hci_ev_status *rp = data;
448 	__u8 param;
449 	void *sent;
450 
451 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
452 
453 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
454 	if (!sent)
455 		return rp->status;
456 
457 	param = *((__u8 *) sent);
458 
459 	hci_dev_lock(hdev);
460 
461 	if (rp->status) {
462 		hdev->discov_timeout = 0;
463 		goto done;
464 	}
465 
466 	if (param & SCAN_INQUIRY)
467 		set_bit(HCI_ISCAN, &hdev->flags);
468 	else
469 		clear_bit(HCI_ISCAN, &hdev->flags);
470 
471 	if (param & SCAN_PAGE)
472 		set_bit(HCI_PSCAN, &hdev->flags);
473 	else
474 		clear_bit(HCI_PSCAN, &hdev->flags);
475 
476 done:
477 	hci_dev_unlock(hdev);
478 
479 	return rp->status;
480 }
481 
482 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
483 				  struct sk_buff *skb)
484 {
485 	struct hci_ev_status *rp = data;
486 	struct hci_cp_set_event_filter *cp;
487 	void *sent;
488 
489 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
490 
491 	if (rp->status)
492 		return rp->status;
493 
494 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
495 	if (!sent)
496 		return rp->status;
497 
498 	cp = (struct hci_cp_set_event_filter *)sent;
499 
500 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
501 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
502 	else
503 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
504 
505 	return rp->status;
506 }
507 
508 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
509 				   struct sk_buff *skb)
510 {
511 	struct hci_rp_read_class_of_dev *rp = data;
512 
513 	if (WARN_ON(!hdev))
514 		return HCI_ERROR_UNSPECIFIED;
515 
516 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
517 
518 	if (rp->status)
519 		return rp->status;
520 
521 	memcpy(hdev->dev_class, rp->dev_class, 3);
522 
523 	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
524 		   hdev->dev_class[1], hdev->dev_class[0]);
525 
526 	return rp->status;
527 }
528 
529 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
530 				    struct sk_buff *skb)
531 {
532 	struct hci_ev_status *rp = data;
533 	void *sent;
534 
535 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
536 
537 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
538 	if (!sent)
539 		return rp->status;
540 
541 	hci_dev_lock(hdev);
542 
543 	if (!rp->status)
544 		memcpy(hdev->dev_class, sent, 3);
545 
546 	if (hci_dev_test_flag(hdev, HCI_MGMT))
547 		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
548 
549 	hci_dev_unlock(hdev);
550 
551 	return rp->status;
552 }
553 
554 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
555 				    struct sk_buff *skb)
556 {
557 	struct hci_rp_read_voice_setting *rp = data;
558 	__u16 setting;
559 
560 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
561 
562 	if (rp->status)
563 		return rp->status;
564 
565 	setting = __le16_to_cpu(rp->voice_setting);
566 
567 	if (hdev->voice_setting == setting)
568 		return rp->status;
569 
570 	hdev->voice_setting = setting;
571 
572 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
573 
574 	if (hdev->notify)
575 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
576 
577 	return rp->status;
578 }
579 
580 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
581 				     struct sk_buff *skb)
582 {
583 	struct hci_ev_status *rp = data;
584 	__u16 setting;
585 	void *sent;
586 
587 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
588 
589 	if (rp->status)
590 		return rp->status;
591 
592 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
593 	if (!sent)
594 		return rp->status;
595 
596 	setting = get_unaligned_le16(sent);
597 
598 	if (hdev->voice_setting == setting)
599 		return rp->status;
600 
601 	hdev->voice_setting = setting;
602 
603 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
604 
605 	if (hdev->notify)
606 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
607 
608 	return rp->status;
609 }
610 
611 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
612 					struct sk_buff *skb)
613 {
614 	struct hci_rp_read_num_supported_iac *rp = data;
615 
616 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
617 
618 	if (rp->status)
619 		return rp->status;
620 
621 	hdev->num_iac = rp->num_iac;
622 
623 	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
624 
625 	return rp->status;
626 }
627 
628 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
629 				struct sk_buff *skb)
630 {
631 	struct hci_ev_status *rp = data;
632 	struct hci_cp_write_ssp_mode *sent;
633 
634 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
635 
636 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
637 	if (!sent)
638 		return rp->status;
639 
640 	hci_dev_lock(hdev);
641 
642 	if (!rp->status) {
643 		if (sent->mode)
644 			hdev->features[1][0] |= LMP_HOST_SSP;
645 		else
646 			hdev->features[1][0] &= ~LMP_HOST_SSP;
647 	}
648 
649 	if (!rp->status) {
650 		if (sent->mode)
651 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
652 		else
653 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
654 	}
655 
656 	hci_dev_unlock(hdev);
657 
658 	return rp->status;
659 }
660 
661 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
662 				  struct sk_buff *skb)
663 {
664 	struct hci_ev_status *rp = data;
665 	struct hci_cp_write_sc_support *sent;
666 
667 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
668 
669 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
670 	if (!sent)
671 		return rp->status;
672 
673 	hci_dev_lock(hdev);
674 
675 	if (!rp->status) {
676 		if (sent->support)
677 			hdev->features[1][0] |= LMP_HOST_SC;
678 		else
679 			hdev->features[1][0] &= ~LMP_HOST_SC;
680 	}
681 
682 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
683 		if (sent->support)
684 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
685 		else
686 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
687 	}
688 
689 	hci_dev_unlock(hdev);
690 
691 	return rp->status;
692 }
693 
694 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
695 				    struct sk_buff *skb)
696 {
697 	struct hci_rp_read_local_version *rp = data;
698 
699 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
700 
701 	if (rp->status)
702 		return rp->status;
703 
704 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
705 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
706 		hdev->hci_ver = rp->hci_ver;
707 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
708 		hdev->lmp_ver = rp->lmp_ver;
709 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
710 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
711 	}
712 
713 	return rp->status;
714 }
715 
716 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
717 				   struct sk_buff *skb)
718 {
719 	struct hci_rp_read_enc_key_size *rp = data;
720 	struct hci_conn *conn;
721 	u16 handle;
722 	u8 status = rp->status;
723 
724 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
725 
726 	handle = le16_to_cpu(rp->handle);
727 
728 	hci_dev_lock(hdev);
729 
730 	conn = hci_conn_hash_lookup_handle(hdev, handle);
731 	if (!conn) {
732 		status = 0xFF;
733 		goto done;
734 	}
735 
736 	/* While unexpected, the read_enc_key_size command may fail. The most
737 	 * secure approach is to then assume the key size is 0 to force a
738 	 * disconnection.
739 	 */
740 	if (status) {
741 		bt_dev_err(hdev, "failed to read key size for handle %u",
742 			   handle);
743 		conn->enc_key_size = 0;
744 	} else {
745 		conn->enc_key_size = rp->key_size;
746 		status = 0;
747 
748 		if (conn->enc_key_size < hdev->min_enc_key_size) {
749 			/* As slave role, the conn->state has been set to
750 			 * BT_CONNECTED and l2cap conn req might not be received
751 			 * yet, at this moment the l2cap layer almost does
752 			 * nothing with the non-zero status.
753 			 * So we also clear encrypt related bits, and then the
754 			 * handler of l2cap conn req will get the right secure
755 			 * state at a later time.
756 			 */
757 			status = HCI_ERROR_AUTH_FAILURE;
758 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
759 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
760 		}
761 	}
762 
763 	hci_encrypt_cfm(conn, status);
764 
765 done:
766 	hci_dev_unlock(hdev);
767 
768 	return status;
769 }
770 
771 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
772 				     struct sk_buff *skb)
773 {
774 	struct hci_rp_read_local_commands *rp = data;
775 
776 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
777 
778 	if (rp->status)
779 		return rp->status;
780 
781 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
782 	    hci_dev_test_flag(hdev, HCI_CONFIG))
783 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
784 
785 	return rp->status;
786 }
787 
788 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
789 					   struct sk_buff *skb)
790 {
791 	struct hci_rp_read_auth_payload_to *rp = data;
792 	struct hci_conn *conn;
793 
794 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
795 
796 	if (rp->status)
797 		return rp->status;
798 
799 	hci_dev_lock(hdev);
800 
801 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
802 	if (conn)
803 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
804 
805 	hci_dev_unlock(hdev);
806 
807 	return rp->status;
808 }
809 
810 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
811 					    struct sk_buff *skb)
812 {
813 	struct hci_rp_write_auth_payload_to *rp = data;
814 	struct hci_conn *conn;
815 	void *sent;
816 
817 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
818 
819 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
820 	if (!sent)
821 		return rp->status;
822 
823 	hci_dev_lock(hdev);
824 
825 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
826 	if (!conn) {
827 		rp->status = 0xff;
828 		goto unlock;
829 	}
830 
831 	if (!rp->status)
832 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
833 
834 unlock:
835 	hci_dev_unlock(hdev);
836 
837 	return rp->status;
838 }
839 
840 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
841 				     struct sk_buff *skb)
842 {
843 	struct hci_rp_read_local_features *rp = data;
844 
845 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
846 
847 	if (rp->status)
848 		return rp->status;
849 
850 	memcpy(hdev->features, rp->features, 8);
851 
852 	/* Adjust default settings according to features
853 	 * supported by device. */
854 
855 	if (hdev->features[0][0] & LMP_3SLOT)
856 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
857 
858 	if (hdev->features[0][0] & LMP_5SLOT)
859 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
860 
861 	if (hdev->features[0][1] & LMP_HV2) {
862 		hdev->pkt_type  |= (HCI_HV2);
863 		hdev->esco_type |= (ESCO_HV2);
864 	}
865 
866 	if (hdev->features[0][1] & LMP_HV3) {
867 		hdev->pkt_type  |= (HCI_HV3);
868 		hdev->esco_type |= (ESCO_HV3);
869 	}
870 
871 	if (lmp_esco_capable(hdev))
872 		hdev->esco_type |= (ESCO_EV3);
873 
874 	if (hdev->features[0][4] & LMP_EV4)
875 		hdev->esco_type |= (ESCO_EV4);
876 
877 	if (hdev->features[0][4] & LMP_EV5)
878 		hdev->esco_type |= (ESCO_EV5);
879 
880 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
881 		hdev->esco_type |= (ESCO_2EV3);
882 
883 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
884 		hdev->esco_type |= (ESCO_3EV3);
885 
886 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
887 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
888 
889 	return rp->status;
890 }
891 
892 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
893 					 struct sk_buff *skb)
894 {
895 	struct hci_rp_read_local_ext_features *rp = data;
896 
897 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
898 
899 	if (rp->status)
900 		return rp->status;
901 
902 	if (hdev->max_page < rp->max_page) {
903 		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
904 			     &hdev->quirks))
905 			bt_dev_warn(hdev, "broken local ext features page 2");
906 		else
907 			hdev->max_page = rp->max_page;
908 	}
909 
910 	if (rp->page < HCI_MAX_PAGES)
911 		memcpy(hdev->features[rp->page], rp->features, 8);
912 
913 	return rp->status;
914 }
915 
916 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
917 					struct sk_buff *skb)
918 {
919 	struct hci_rp_read_flow_control_mode *rp = data;
920 
921 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
922 
923 	if (rp->status)
924 		return rp->status;
925 
926 	hdev->flow_ctl_mode = rp->mode;
927 
928 	return rp->status;
929 }
930 
931 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
932 				  struct sk_buff *skb)
933 {
934 	struct hci_rp_read_buffer_size *rp = data;
935 
936 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
937 
938 	if (rp->status)
939 		return rp->status;
940 
941 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
942 	hdev->sco_mtu  = rp->sco_mtu;
943 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
944 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
945 
946 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
947 		hdev->sco_mtu  = 64;
948 		hdev->sco_pkts = 8;
949 	}
950 
951 	hdev->acl_cnt = hdev->acl_pkts;
952 	hdev->sco_cnt = hdev->sco_pkts;
953 
954 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
955 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
956 
957 	return rp->status;
958 }
959 
960 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
961 			      struct sk_buff *skb)
962 {
963 	struct hci_rp_read_bd_addr *rp = data;
964 
965 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
966 
967 	if (rp->status)
968 		return rp->status;
969 
970 	if (test_bit(HCI_INIT, &hdev->flags))
971 		bacpy(&hdev->bdaddr, &rp->bdaddr);
972 
973 	if (hci_dev_test_flag(hdev, HCI_SETUP))
974 		bacpy(&hdev->setup_addr, &rp->bdaddr);
975 
976 	return rp->status;
977 }
978 
979 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
980 					 struct sk_buff *skb)
981 {
982 	struct hci_rp_read_local_pairing_opts *rp = data;
983 
984 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
985 
986 	if (rp->status)
987 		return rp->status;
988 
989 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
990 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
991 		hdev->pairing_opts = rp->pairing_opts;
992 		hdev->max_enc_key_size = rp->max_key_size;
993 	}
994 
995 	return rp->status;
996 }
997 
998 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
999 					 struct sk_buff *skb)
1000 {
1001 	struct hci_rp_read_page_scan_activity *rp = data;
1002 
1003 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1004 
1005 	if (rp->status)
1006 		return rp->status;
1007 
1008 	if (test_bit(HCI_INIT, &hdev->flags)) {
1009 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1010 		hdev->page_scan_window = __le16_to_cpu(rp->window);
1011 	}
1012 
1013 	return rp->status;
1014 }
1015 
1016 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1017 					  struct sk_buff *skb)
1018 {
1019 	struct hci_ev_status *rp = data;
1020 	struct hci_cp_write_page_scan_activity *sent;
1021 
1022 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1023 
1024 	if (rp->status)
1025 		return rp->status;
1026 
1027 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1028 	if (!sent)
1029 		return rp->status;
1030 
1031 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1032 	hdev->page_scan_window = __le16_to_cpu(sent->window);
1033 
1034 	return rp->status;
1035 }
1036 
1037 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1038 				     struct sk_buff *skb)
1039 {
1040 	struct hci_rp_read_page_scan_type *rp = data;
1041 
1042 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1043 
1044 	if (rp->status)
1045 		return rp->status;
1046 
1047 	if (test_bit(HCI_INIT, &hdev->flags))
1048 		hdev->page_scan_type = rp->type;
1049 
1050 	return rp->status;
1051 }
1052 
1053 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1054 				      struct sk_buff *skb)
1055 {
1056 	struct hci_ev_status *rp = data;
1057 	u8 *type;
1058 
1059 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1060 
1061 	if (rp->status)
1062 		return rp->status;
1063 
1064 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1065 	if (type)
1066 		hdev->page_scan_type = *type;
1067 
1068 	return rp->status;
1069 }
1070 
1071 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1072 				      struct sk_buff *skb)
1073 {
1074 	struct hci_rp_read_data_block_size *rp = data;
1075 
1076 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1077 
1078 	if (rp->status)
1079 		return rp->status;
1080 
1081 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1082 	hdev->block_len = __le16_to_cpu(rp->block_len);
1083 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1084 
1085 	hdev->block_cnt = hdev->num_blocks;
1086 
1087 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1088 	       hdev->block_cnt, hdev->block_len);
1089 
1090 	return rp->status;
1091 }
1092 
1093 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1094 			    struct sk_buff *skb)
1095 {
1096 	struct hci_rp_read_clock *rp = data;
1097 	struct hci_cp_read_clock *cp;
1098 	struct hci_conn *conn;
1099 
1100 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1101 
1102 	if (rp->status)
1103 		return rp->status;
1104 
1105 	hci_dev_lock(hdev);
1106 
1107 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1108 	if (!cp)
1109 		goto unlock;
1110 
1111 	if (cp->which == 0x00) {
1112 		hdev->clock = le32_to_cpu(rp->clock);
1113 		goto unlock;
1114 	}
1115 
1116 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1117 	if (conn) {
1118 		conn->clock = le32_to_cpu(rp->clock);
1119 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1120 	}
1121 
1122 unlock:
1123 	hci_dev_unlock(hdev);
1124 	return rp->status;
1125 }
1126 
1127 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1128 				     struct sk_buff *skb)
1129 {
1130 	struct hci_rp_read_local_amp_info *rp = data;
1131 
1132 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1133 
1134 	if (rp->status)
1135 		return rp->status;
1136 
1137 	hdev->amp_status = rp->amp_status;
1138 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1139 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1140 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1141 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1142 	hdev->amp_type = rp->amp_type;
1143 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1144 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1145 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1146 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1147 
1148 	return rp->status;
1149 }
1150 
1151 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1152 				       struct sk_buff *skb)
1153 {
1154 	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1155 
1156 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1157 
1158 	if (rp->status)
1159 		return rp->status;
1160 
1161 	hdev->inq_tx_power = rp->tx_power;
1162 
1163 	return rp->status;
1164 }
1165 
1166 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1167 					     struct sk_buff *skb)
1168 {
1169 	struct hci_rp_read_def_err_data_reporting *rp = data;
1170 
1171 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1172 
1173 	if (rp->status)
1174 		return rp->status;
1175 
1176 	hdev->err_data_reporting = rp->err_data_reporting;
1177 
1178 	return rp->status;
1179 }
1180 
1181 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1182 					      struct sk_buff *skb)
1183 {
1184 	struct hci_ev_status *rp = data;
1185 	struct hci_cp_write_def_err_data_reporting *cp;
1186 
1187 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1188 
1189 	if (rp->status)
1190 		return rp->status;
1191 
1192 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1193 	if (!cp)
1194 		return rp->status;
1195 
1196 	hdev->err_data_reporting = cp->err_data_reporting;
1197 
1198 	return rp->status;
1199 }
1200 
1201 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1202 				struct sk_buff *skb)
1203 {
1204 	struct hci_rp_pin_code_reply *rp = data;
1205 	struct hci_cp_pin_code_reply *cp;
1206 	struct hci_conn *conn;
1207 
1208 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1209 
1210 	hci_dev_lock(hdev);
1211 
1212 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1213 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1214 
1215 	if (rp->status)
1216 		goto unlock;
1217 
1218 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1219 	if (!cp)
1220 		goto unlock;
1221 
1222 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1223 	if (conn)
1224 		conn->pin_length = cp->pin_len;
1225 
1226 unlock:
1227 	hci_dev_unlock(hdev);
1228 	return rp->status;
1229 }
1230 
1231 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1232 				    struct sk_buff *skb)
1233 {
1234 	struct hci_rp_pin_code_neg_reply *rp = data;
1235 
1236 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1237 
1238 	hci_dev_lock(hdev);
1239 
1240 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1241 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1242 						 rp->status);
1243 
1244 	hci_dev_unlock(hdev);
1245 
1246 	return rp->status;
1247 }
1248 
1249 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1250 				     struct sk_buff *skb)
1251 {
1252 	struct hci_rp_le_read_buffer_size *rp = data;
1253 
1254 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1255 
1256 	if (rp->status)
1257 		return rp->status;
1258 
1259 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1260 	hdev->le_pkts = rp->le_max_pkt;
1261 
1262 	hdev->le_cnt = hdev->le_pkts;
1263 
1264 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1265 
1266 	return rp->status;
1267 }
1268 
1269 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1270 					struct sk_buff *skb)
1271 {
1272 	struct hci_rp_le_read_local_features *rp = data;
1273 
1274 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1275 
1276 	if (rp->status)
1277 		return rp->status;
1278 
1279 	memcpy(hdev->le_features, rp->features, 8);
1280 
1281 	return rp->status;
1282 }
1283 
1284 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1285 				      struct sk_buff *skb)
1286 {
1287 	struct hci_rp_le_read_adv_tx_power *rp = data;
1288 
1289 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1290 
1291 	if (rp->status)
1292 		return rp->status;
1293 
1294 	hdev->adv_tx_power = rp->tx_power;
1295 
1296 	return rp->status;
1297 }
1298 
1299 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1300 				    struct sk_buff *skb)
1301 {
1302 	struct hci_rp_user_confirm_reply *rp = data;
1303 
1304 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1305 
1306 	hci_dev_lock(hdev);
1307 
1308 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1309 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1310 						 rp->status);
1311 
1312 	hci_dev_unlock(hdev);
1313 
1314 	return rp->status;
1315 }
1316 
1317 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1318 					struct sk_buff *skb)
1319 {
1320 	struct hci_rp_user_confirm_reply *rp = data;
1321 
1322 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1323 
1324 	hci_dev_lock(hdev);
1325 
1326 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1327 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1328 						     ACL_LINK, 0, rp->status);
1329 
1330 	hci_dev_unlock(hdev);
1331 
1332 	return rp->status;
1333 }
1334 
1335 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1336 				    struct sk_buff *skb)
1337 {
1338 	struct hci_rp_user_confirm_reply *rp = data;
1339 
1340 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1341 
1342 	hci_dev_lock(hdev);
1343 
1344 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1345 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1346 						 0, rp->status);
1347 
1348 	hci_dev_unlock(hdev);
1349 
1350 	return rp->status;
1351 }
1352 
1353 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1354 					struct sk_buff *skb)
1355 {
1356 	struct hci_rp_user_confirm_reply *rp = data;
1357 
1358 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1359 
1360 	hci_dev_lock(hdev);
1361 
1362 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1363 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1364 						     ACL_LINK, 0, rp->status);
1365 
1366 	hci_dev_unlock(hdev);
1367 
1368 	return rp->status;
1369 }
1370 
1371 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1372 				     struct sk_buff *skb)
1373 {
1374 	struct hci_rp_read_local_oob_data *rp = data;
1375 
1376 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1377 
1378 	return rp->status;
1379 }
1380 
1381 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1382 					 struct sk_buff *skb)
1383 {
1384 	struct hci_rp_read_local_oob_ext_data *rp = data;
1385 
1386 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1387 
1388 	return rp->status;
1389 }
1390 
1391 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1392 				    struct sk_buff *skb)
1393 {
1394 	struct hci_ev_status *rp = data;
1395 	bdaddr_t *sent;
1396 
1397 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1398 
1399 	if (rp->status)
1400 		return rp->status;
1401 
1402 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1403 	if (!sent)
1404 		return rp->status;
1405 
1406 	hci_dev_lock(hdev);
1407 
1408 	bacpy(&hdev->random_addr, sent);
1409 
1410 	if (!bacmp(&hdev->rpa, sent)) {
1411 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1412 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1413 				   secs_to_jiffies(hdev->rpa_timeout));
1414 	}
1415 
1416 	hci_dev_unlock(hdev);
1417 
1418 	return rp->status;
1419 }
1420 
1421 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1422 				    struct sk_buff *skb)
1423 {
1424 	struct hci_ev_status *rp = data;
1425 	struct hci_cp_le_set_default_phy *cp;
1426 
1427 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1428 
1429 	if (rp->status)
1430 		return rp->status;
1431 
1432 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1433 	if (!cp)
1434 		return rp->status;
1435 
1436 	hci_dev_lock(hdev);
1437 
1438 	hdev->le_tx_def_phys = cp->tx_phys;
1439 	hdev->le_rx_def_phys = cp->rx_phys;
1440 
1441 	hci_dev_unlock(hdev);
1442 
1443 	return rp->status;
1444 }
1445 
1446 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1447 					    struct sk_buff *skb)
1448 {
1449 	struct hci_ev_status *rp = data;
1450 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1451 	struct adv_info *adv;
1452 
1453 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1454 
1455 	if (rp->status)
1456 		return rp->status;
1457 
1458 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1459 	/* Update only in case the adv instance since handle 0x00 shall be using
1460 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1461 	 * non-extended adverting.
1462 	 */
1463 	if (!cp || !cp->handle)
1464 		return rp->status;
1465 
1466 	hci_dev_lock(hdev);
1467 
1468 	adv = hci_find_adv_instance(hdev, cp->handle);
1469 	if (adv) {
1470 		bacpy(&adv->random_addr, &cp->bdaddr);
1471 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1472 			adv->rpa_expired = false;
1473 			queue_delayed_work(hdev->workqueue,
1474 					   &adv->rpa_expired_cb,
1475 					   secs_to_jiffies(hdev->rpa_timeout));
1476 		}
1477 	}
1478 
1479 	hci_dev_unlock(hdev);
1480 
1481 	return rp->status;
1482 }
1483 
1484 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1485 				   struct sk_buff *skb)
1486 {
1487 	struct hci_ev_status *rp = data;
1488 	u8 *instance;
1489 	int err;
1490 
1491 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1492 
1493 	if (rp->status)
1494 		return rp->status;
1495 
1496 	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1497 	if (!instance)
1498 		return rp->status;
1499 
1500 	hci_dev_lock(hdev);
1501 
1502 	err = hci_remove_adv_instance(hdev, *instance);
1503 	if (!err)
1504 		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1505 					 *instance);
1506 
1507 	hci_dev_unlock(hdev);
1508 
1509 	return rp->status;
1510 }
1511 
1512 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1513 				   struct sk_buff *skb)
1514 {
1515 	struct hci_ev_status *rp = data;
1516 	struct adv_info *adv, *n;
1517 	int err;
1518 
1519 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1520 
1521 	if (rp->status)
1522 		return rp->status;
1523 
1524 	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1525 		return rp->status;
1526 
1527 	hci_dev_lock(hdev);
1528 
1529 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1530 		u8 instance = adv->instance;
1531 
1532 		err = hci_remove_adv_instance(hdev, instance);
1533 		if (!err)
1534 			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1535 						 hdev, instance);
1536 	}
1537 
1538 	hci_dev_unlock(hdev);
1539 
1540 	return rp->status;
1541 }
1542 
1543 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1544 					struct sk_buff *skb)
1545 {
1546 	struct hci_rp_le_read_transmit_power *rp = data;
1547 
1548 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549 
1550 	if (rp->status)
1551 		return rp->status;
1552 
1553 	hdev->min_le_tx_power = rp->min_le_tx_power;
1554 	hdev->max_le_tx_power = rp->max_le_tx_power;
1555 
1556 	return rp->status;
1557 }
1558 
1559 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1560 				     struct sk_buff *skb)
1561 {
1562 	struct hci_ev_status *rp = data;
1563 	struct hci_cp_le_set_privacy_mode *cp;
1564 	struct hci_conn_params *params;
1565 
1566 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1567 
1568 	if (rp->status)
1569 		return rp->status;
1570 
1571 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1572 	if (!cp)
1573 		return rp->status;
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1578 	if (params)
1579 		WRITE_ONCE(params->privacy_mode, cp->mode);
1580 
1581 	hci_dev_unlock(hdev);
1582 
1583 	return rp->status;
1584 }
1585 
1586 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1587 				   struct sk_buff *skb)
1588 {
1589 	struct hci_ev_status *rp = data;
1590 	__u8 *sent;
1591 
1592 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1593 
1594 	if (rp->status)
1595 		return rp->status;
1596 
1597 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1598 	if (!sent)
1599 		return rp->status;
1600 
1601 	hci_dev_lock(hdev);
1602 
1603 	/* If we're doing connection initiation as peripheral. Set a
1604 	 * timeout in case something goes wrong.
1605 	 */
1606 	if (*sent) {
1607 		struct hci_conn *conn;
1608 
1609 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1610 
1611 		conn = hci_lookup_le_connect(hdev);
1612 		if (conn)
1613 			queue_delayed_work(hdev->workqueue,
1614 					   &conn->le_conn_timeout,
1615 					   conn->conn_timeout);
1616 	} else {
1617 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1618 	}
1619 
1620 	hci_dev_unlock(hdev);
1621 
1622 	return rp->status;
1623 }
1624 
1625 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1626 				       struct sk_buff *skb)
1627 {
1628 	struct hci_cp_le_set_ext_adv_enable *cp;
1629 	struct hci_cp_ext_adv_set *set;
1630 	struct adv_info *adv = NULL, *n;
1631 	struct hci_ev_status *rp = data;
1632 
1633 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1634 
1635 	if (rp->status)
1636 		return rp->status;
1637 
1638 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1639 	if (!cp)
1640 		return rp->status;
1641 
1642 	set = (void *)cp->data;
1643 
1644 	hci_dev_lock(hdev);
1645 
1646 	if (cp->num_of_sets)
1647 		adv = hci_find_adv_instance(hdev, set->handle);
1648 
1649 	if (cp->enable) {
1650 		struct hci_conn *conn;
1651 
1652 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1653 
1654 		if (adv && !adv->periodic)
1655 			adv->enabled = true;
1656 
1657 		conn = hci_lookup_le_connect(hdev);
1658 		if (conn)
1659 			queue_delayed_work(hdev->workqueue,
1660 					   &conn->le_conn_timeout,
1661 					   conn->conn_timeout);
1662 	} else {
1663 		if (cp->num_of_sets) {
1664 			if (adv)
1665 				adv->enabled = false;
1666 
1667 			/* If just one instance was disabled check if there are
1668 			 * any other instance enabled before clearing HCI_LE_ADV
1669 			 */
1670 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1671 						 list) {
1672 				if (adv->enabled)
1673 					goto unlock;
1674 			}
1675 		} else {
1676 			/* All instances shall be considered disabled */
1677 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1678 						 list)
1679 				adv->enabled = false;
1680 		}
1681 
1682 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1683 	}
1684 
1685 unlock:
1686 	hci_dev_unlock(hdev);
1687 	return rp->status;
1688 }
1689 
1690 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1691 				   struct sk_buff *skb)
1692 {
1693 	struct hci_cp_le_set_scan_param *cp;
1694 	struct hci_ev_status *rp = data;
1695 
1696 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1697 
1698 	if (rp->status)
1699 		return rp->status;
1700 
1701 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1702 	if (!cp)
1703 		return rp->status;
1704 
1705 	hci_dev_lock(hdev);
1706 
1707 	hdev->le_scan_type = cp->type;
1708 
1709 	hci_dev_unlock(hdev);
1710 
1711 	return rp->status;
1712 }
1713 
1714 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1715 				       struct sk_buff *skb)
1716 {
1717 	struct hci_cp_le_set_ext_scan_params *cp;
1718 	struct hci_ev_status *rp = data;
1719 	struct hci_cp_le_scan_phy_params *phy_param;
1720 
1721 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1722 
1723 	if (rp->status)
1724 		return rp->status;
1725 
1726 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1727 	if (!cp)
1728 		return rp->status;
1729 
1730 	phy_param = (void *)cp->data;
1731 
1732 	hci_dev_lock(hdev);
1733 
1734 	hdev->le_scan_type = phy_param->type;
1735 
1736 	hci_dev_unlock(hdev);
1737 
1738 	return rp->status;
1739 }
1740 
1741 static bool has_pending_adv_report(struct hci_dev *hdev)
1742 {
1743 	struct discovery_state *d = &hdev->discovery;
1744 
1745 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1746 }
1747 
1748 static void clear_pending_adv_report(struct hci_dev *hdev)
1749 {
1750 	struct discovery_state *d = &hdev->discovery;
1751 
1752 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1753 	d->last_adv_data_len = 0;
1754 }
1755 
1756 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1757 				     u8 bdaddr_type, s8 rssi, u32 flags,
1758 				     u8 *data, u8 len)
1759 {
1760 	struct discovery_state *d = &hdev->discovery;
1761 
1762 	if (len > max_adv_len(hdev))
1763 		return;
1764 
1765 	bacpy(&d->last_adv_addr, bdaddr);
1766 	d->last_adv_addr_type = bdaddr_type;
1767 	d->last_adv_rssi = rssi;
1768 	d->last_adv_flags = flags;
1769 	memcpy(d->last_adv_data, data, len);
1770 	d->last_adv_data_len = len;
1771 }
1772 
1773 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1774 {
1775 	hci_dev_lock(hdev);
1776 
1777 	switch (enable) {
1778 	case LE_SCAN_ENABLE:
1779 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1780 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1781 			clear_pending_adv_report(hdev);
1782 		if (hci_dev_test_flag(hdev, HCI_MESH))
1783 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1784 		break;
1785 
1786 	case LE_SCAN_DISABLE:
1787 		/* We do this here instead of when setting DISCOVERY_STOPPED
1788 		 * since the latter would potentially require waiting for
1789 		 * inquiry to stop too.
1790 		 */
1791 		if (has_pending_adv_report(hdev)) {
1792 			struct discovery_state *d = &hdev->discovery;
1793 
1794 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1795 					  d->last_adv_addr_type, NULL,
1796 					  d->last_adv_rssi, d->last_adv_flags,
1797 					  d->last_adv_data,
1798 					  d->last_adv_data_len, NULL, 0, 0);
1799 		}
1800 
1801 		/* Cancel this timer so that we don't try to disable scanning
1802 		 * when it's already disabled.
1803 		 */
1804 		cancel_delayed_work(&hdev->le_scan_disable);
1805 
1806 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1807 
1808 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1809 		 * interrupted scanning due to a connect request. Mark
1810 		 * therefore discovery as stopped.
1811 		 */
1812 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1813 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1814 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1815 			 hdev->discovery.state == DISCOVERY_FINDING)
1816 			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1817 
1818 		break;
1819 
1820 	default:
1821 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1822 			   enable);
1823 		break;
1824 	}
1825 
1826 	hci_dev_unlock(hdev);
1827 }
1828 
1829 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1830 				    struct sk_buff *skb)
1831 {
1832 	struct hci_cp_le_set_scan_enable *cp;
1833 	struct hci_ev_status *rp = data;
1834 
1835 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1836 
1837 	if (rp->status)
1838 		return rp->status;
1839 
1840 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1841 	if (!cp)
1842 		return rp->status;
1843 
1844 	le_set_scan_enable_complete(hdev, cp->enable);
1845 
1846 	return rp->status;
1847 }
1848 
1849 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1850 					struct sk_buff *skb)
1851 {
1852 	struct hci_cp_le_set_ext_scan_enable *cp;
1853 	struct hci_ev_status *rp = data;
1854 
1855 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1856 
1857 	if (rp->status)
1858 		return rp->status;
1859 
1860 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1861 	if (!cp)
1862 		return rp->status;
1863 
1864 	le_set_scan_enable_complete(hdev, cp->enable);
1865 
1866 	return rp->status;
1867 }
1868 
1869 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1870 				      struct sk_buff *skb)
1871 {
1872 	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1873 
1874 	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1875 		   rp->num_of_sets);
1876 
1877 	if (rp->status)
1878 		return rp->status;
1879 
1880 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1881 
1882 	return rp->status;
1883 }
1884 
1885 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1886 					  struct sk_buff *skb)
1887 {
1888 	struct hci_rp_le_read_accept_list_size *rp = data;
1889 
1890 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1891 
1892 	if (rp->status)
1893 		return rp->status;
1894 
1895 	hdev->le_accept_list_size = rp->size;
1896 
1897 	return rp->status;
1898 }
1899 
1900 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1901 				      struct sk_buff *skb)
1902 {
1903 	struct hci_ev_status *rp = data;
1904 
1905 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1906 
1907 	if (rp->status)
1908 		return rp->status;
1909 
1910 	hci_dev_lock(hdev);
1911 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1912 	hci_dev_unlock(hdev);
1913 
1914 	return rp->status;
1915 }
1916 
1917 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1918 				       struct sk_buff *skb)
1919 {
1920 	struct hci_cp_le_add_to_accept_list *sent;
1921 	struct hci_ev_status *rp = data;
1922 
1923 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1924 
1925 	if (rp->status)
1926 		return rp->status;
1927 
1928 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1929 	if (!sent)
1930 		return rp->status;
1931 
1932 	hci_dev_lock(hdev);
1933 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1934 			    sent->bdaddr_type);
1935 	hci_dev_unlock(hdev);
1936 
1937 	return rp->status;
1938 }
1939 
1940 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1941 					 struct sk_buff *skb)
1942 {
1943 	struct hci_cp_le_del_from_accept_list *sent;
1944 	struct hci_ev_status *rp = data;
1945 
1946 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1947 
1948 	if (rp->status)
1949 		return rp->status;
1950 
1951 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1952 	if (!sent)
1953 		return rp->status;
1954 
1955 	hci_dev_lock(hdev);
1956 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1957 			    sent->bdaddr_type);
1958 	hci_dev_unlock(hdev);
1959 
1960 	return rp->status;
1961 }
1962 
1963 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1964 					  struct sk_buff *skb)
1965 {
1966 	struct hci_rp_le_read_supported_states *rp = data;
1967 
1968 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1969 
1970 	if (rp->status)
1971 		return rp->status;
1972 
1973 	memcpy(hdev->le_states, rp->le_states, 8);
1974 
1975 	return rp->status;
1976 }
1977 
1978 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1979 				      struct sk_buff *skb)
1980 {
1981 	struct hci_rp_le_read_def_data_len *rp = data;
1982 
1983 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1984 
1985 	if (rp->status)
1986 		return rp->status;
1987 
1988 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1989 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1990 
1991 	return rp->status;
1992 }
1993 
1994 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1995 				       struct sk_buff *skb)
1996 {
1997 	struct hci_cp_le_write_def_data_len *sent;
1998 	struct hci_ev_status *rp = data;
1999 
2000 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2001 
2002 	if (rp->status)
2003 		return rp->status;
2004 
2005 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2006 	if (!sent)
2007 		return rp->status;
2008 
2009 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2010 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2011 
2012 	return rp->status;
2013 }
2014 
2015 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2016 				       struct sk_buff *skb)
2017 {
2018 	struct hci_cp_le_add_to_resolv_list *sent;
2019 	struct hci_ev_status *rp = data;
2020 
2021 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2022 
2023 	if (rp->status)
2024 		return rp->status;
2025 
2026 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2027 	if (!sent)
2028 		return rp->status;
2029 
2030 	hci_dev_lock(hdev);
2031 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2032 				sent->bdaddr_type, sent->peer_irk,
2033 				sent->local_irk);
2034 	hci_dev_unlock(hdev);
2035 
2036 	return rp->status;
2037 }
2038 
2039 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2040 					 struct sk_buff *skb)
2041 {
2042 	struct hci_cp_le_del_from_resolv_list *sent;
2043 	struct hci_ev_status *rp = data;
2044 
2045 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2046 
2047 	if (rp->status)
2048 		return rp->status;
2049 
2050 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2051 	if (!sent)
2052 		return rp->status;
2053 
2054 	hci_dev_lock(hdev);
2055 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2056 			    sent->bdaddr_type);
2057 	hci_dev_unlock(hdev);
2058 
2059 	return rp->status;
2060 }
2061 
2062 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2063 				      struct sk_buff *skb)
2064 {
2065 	struct hci_ev_status *rp = data;
2066 
2067 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2068 
2069 	if (rp->status)
2070 		return rp->status;
2071 
2072 	hci_dev_lock(hdev);
2073 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2074 	hci_dev_unlock(hdev);
2075 
2076 	return rp->status;
2077 }
2078 
2079 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2080 					  struct sk_buff *skb)
2081 {
2082 	struct hci_rp_le_read_resolv_list_size *rp = data;
2083 
2084 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2085 
2086 	if (rp->status)
2087 		return rp->status;
2088 
2089 	hdev->le_resolv_list_size = rp->size;
2090 
2091 	return rp->status;
2092 }
2093 
2094 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2095 					       struct sk_buff *skb)
2096 {
2097 	struct hci_ev_status *rp = data;
2098 	__u8 *sent;
2099 
2100 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2101 
2102 	if (rp->status)
2103 		return rp->status;
2104 
2105 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2106 	if (!sent)
2107 		return rp->status;
2108 
2109 	hci_dev_lock(hdev);
2110 
2111 	if (*sent)
2112 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2113 	else
2114 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2115 
2116 	hci_dev_unlock(hdev);
2117 
2118 	return rp->status;
2119 }
2120 
2121 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2122 				      struct sk_buff *skb)
2123 {
2124 	struct hci_rp_le_read_max_data_len *rp = data;
2125 
2126 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2127 
2128 	if (rp->status)
2129 		return rp->status;
2130 
2131 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2132 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2133 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2134 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2135 
2136 	return rp->status;
2137 }
2138 
2139 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2140 					 struct sk_buff *skb)
2141 {
2142 	struct hci_cp_write_le_host_supported *sent;
2143 	struct hci_ev_status *rp = data;
2144 
2145 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2146 
2147 	if (rp->status)
2148 		return rp->status;
2149 
2150 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2151 	if (!sent)
2152 		return rp->status;
2153 
2154 	hci_dev_lock(hdev);
2155 
2156 	if (sent->le) {
2157 		hdev->features[1][0] |= LMP_HOST_LE;
2158 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2159 	} else {
2160 		hdev->features[1][0] &= ~LMP_HOST_LE;
2161 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2162 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2163 	}
2164 
2165 	if (sent->simul)
2166 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2167 	else
2168 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2169 
2170 	hci_dev_unlock(hdev);
2171 
2172 	return rp->status;
2173 }
2174 
2175 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2176 			       struct sk_buff *skb)
2177 {
2178 	struct hci_cp_le_set_adv_param *cp;
2179 	struct hci_ev_status *rp = data;
2180 
2181 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2182 
2183 	if (rp->status)
2184 		return rp->status;
2185 
2186 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2187 	if (!cp)
2188 		return rp->status;
2189 
2190 	hci_dev_lock(hdev);
2191 	hdev->adv_addr_type = cp->own_address_type;
2192 	hci_dev_unlock(hdev);
2193 
2194 	return rp->status;
2195 }
2196 
2197 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2198 				   struct sk_buff *skb)
2199 {
2200 	struct hci_rp_le_set_ext_adv_params *rp = data;
2201 	struct hci_cp_le_set_ext_adv_params *cp;
2202 	struct adv_info *adv_instance;
2203 
2204 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2205 
2206 	if (rp->status)
2207 		return rp->status;
2208 
2209 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2210 	if (!cp)
2211 		return rp->status;
2212 
2213 	hci_dev_lock(hdev);
2214 	hdev->adv_addr_type = cp->own_addr_type;
2215 	if (!cp->handle) {
2216 		/* Store in hdev for instance 0 */
2217 		hdev->adv_tx_power = rp->tx_power;
2218 	} else {
2219 		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2220 		if (adv_instance)
2221 			adv_instance->tx_power = rp->tx_power;
2222 	}
2223 	/* Update adv data as tx power is known now */
2224 	hci_update_adv_data(hdev, cp->handle);
2225 
2226 	hci_dev_unlock(hdev);
2227 
2228 	return rp->status;
2229 }
2230 
2231 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2232 			   struct sk_buff *skb)
2233 {
2234 	struct hci_rp_read_rssi *rp = data;
2235 	struct hci_conn *conn;
2236 
2237 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2238 
2239 	if (rp->status)
2240 		return rp->status;
2241 
2242 	hci_dev_lock(hdev);
2243 
2244 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2245 	if (conn)
2246 		conn->rssi = rp->rssi;
2247 
2248 	hci_dev_unlock(hdev);
2249 
2250 	return rp->status;
2251 }
2252 
2253 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2254 			       struct sk_buff *skb)
2255 {
2256 	struct hci_cp_read_tx_power *sent;
2257 	struct hci_rp_read_tx_power *rp = data;
2258 	struct hci_conn *conn;
2259 
2260 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2261 
2262 	if (rp->status)
2263 		return rp->status;
2264 
2265 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2266 	if (!sent)
2267 		return rp->status;
2268 
2269 	hci_dev_lock(hdev);
2270 
2271 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2272 	if (!conn)
2273 		goto unlock;
2274 
2275 	switch (sent->type) {
2276 	case 0x00:
2277 		conn->tx_power = rp->tx_power;
2278 		break;
2279 	case 0x01:
2280 		conn->max_tx_power = rp->tx_power;
2281 		break;
2282 	}
2283 
2284 unlock:
2285 	hci_dev_unlock(hdev);
2286 	return rp->status;
2287 }
2288 
2289 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2290 				      struct sk_buff *skb)
2291 {
2292 	struct hci_ev_status *rp = data;
2293 	u8 *mode;
2294 
2295 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2296 
2297 	if (rp->status)
2298 		return rp->status;
2299 
2300 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2301 	if (mode)
2302 		hdev->ssp_debug_mode = *mode;
2303 
2304 	return rp->status;
2305 }
2306 
2307 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2308 {
2309 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2310 
2311 	if (status)
2312 		return;
2313 
2314 	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2315 		set_bit(HCI_INQUIRY, &hdev->flags);
2316 }
2317 
2318 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2319 {
2320 	struct hci_cp_create_conn *cp;
2321 	struct hci_conn *conn;
2322 
2323 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2324 
2325 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2326 	if (!cp)
2327 		return;
2328 
2329 	hci_dev_lock(hdev);
2330 
2331 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2332 
2333 	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2334 
2335 	if (status) {
2336 		if (conn && conn->state == BT_CONNECT) {
2337 			conn->state = BT_CLOSED;
2338 			hci_connect_cfm(conn, status);
2339 			hci_conn_del(conn);
2340 		}
2341 	} else {
2342 		if (!conn) {
2343 			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2344 						  HCI_ROLE_MASTER);
2345 			if (!conn)
2346 				bt_dev_err(hdev, "no memory for new connection");
2347 		}
2348 	}
2349 
2350 	hci_dev_unlock(hdev);
2351 }
2352 
2353 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2354 {
2355 	struct hci_cp_add_sco *cp;
2356 	struct hci_conn *acl;
2357 	struct hci_link *link;
2358 	__u16 handle;
2359 
2360 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2361 
2362 	if (!status)
2363 		return;
2364 
2365 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2366 	if (!cp)
2367 		return;
2368 
2369 	handle = __le16_to_cpu(cp->handle);
2370 
2371 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2372 
2373 	hci_dev_lock(hdev);
2374 
2375 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2376 	if (acl) {
2377 		link = list_first_entry_or_null(&acl->link_list,
2378 						struct hci_link, list);
2379 		if (link && link->conn) {
2380 			link->conn->state = BT_CLOSED;
2381 
2382 			hci_connect_cfm(link->conn, status);
2383 			hci_conn_del(link->conn);
2384 		}
2385 	}
2386 
2387 	hci_dev_unlock(hdev);
2388 }
2389 
2390 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2391 {
2392 	struct hci_cp_auth_requested *cp;
2393 	struct hci_conn *conn;
2394 
2395 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2396 
2397 	if (!status)
2398 		return;
2399 
2400 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2401 	if (!cp)
2402 		return;
2403 
2404 	hci_dev_lock(hdev);
2405 
2406 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2407 	if (conn) {
2408 		if (conn->state == BT_CONFIG) {
2409 			hci_connect_cfm(conn, status);
2410 			hci_conn_drop(conn);
2411 		}
2412 	}
2413 
2414 	hci_dev_unlock(hdev);
2415 }
2416 
2417 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2418 {
2419 	struct hci_cp_set_conn_encrypt *cp;
2420 	struct hci_conn *conn;
2421 
2422 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2423 
2424 	if (!status)
2425 		return;
2426 
2427 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2428 	if (!cp)
2429 		return;
2430 
2431 	hci_dev_lock(hdev);
2432 
2433 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2434 	if (conn) {
2435 		if (conn->state == BT_CONFIG) {
2436 			hci_connect_cfm(conn, status);
2437 			hci_conn_drop(conn);
2438 		}
2439 	}
2440 
2441 	hci_dev_unlock(hdev);
2442 }
2443 
2444 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2445 				    struct hci_conn *conn)
2446 {
2447 	if (conn->state != BT_CONFIG || !conn->out)
2448 		return 0;
2449 
2450 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2451 		return 0;
2452 
2453 	/* Only request authentication for SSP connections or non-SSP
2454 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2455 	 * is requested.
2456 	 */
2457 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2458 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2459 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2460 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2461 		return 0;
2462 
2463 	return 1;
2464 }
2465 
2466 static int hci_resolve_name(struct hci_dev *hdev,
2467 				   struct inquiry_entry *e)
2468 {
2469 	struct hci_cp_remote_name_req cp;
2470 
2471 	memset(&cp, 0, sizeof(cp));
2472 
2473 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2474 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2475 	cp.pscan_mode = e->data.pscan_mode;
2476 	cp.clock_offset = e->data.clock_offset;
2477 
2478 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2479 }
2480 
2481 static bool hci_resolve_next_name(struct hci_dev *hdev)
2482 {
2483 	struct discovery_state *discov = &hdev->discovery;
2484 	struct inquiry_entry *e;
2485 
2486 	if (list_empty(&discov->resolve))
2487 		return false;
2488 
2489 	/* We should stop if we already spent too much time resolving names. */
2490 	if (time_after(jiffies, discov->name_resolve_timeout)) {
2491 		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2492 		return false;
2493 	}
2494 
2495 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2496 	if (!e)
2497 		return false;
2498 
2499 	if (hci_resolve_name(hdev, e) == 0) {
2500 		e->name_state = NAME_PENDING;
2501 		return true;
2502 	}
2503 
2504 	return false;
2505 }
2506 
2507 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2508 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2509 {
2510 	struct discovery_state *discov = &hdev->discovery;
2511 	struct inquiry_entry *e;
2512 
2513 	/* Update the mgmt connected state if necessary. Be careful with
2514 	 * conn objects that exist but are not (yet) connected however.
2515 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2516 	 * considered connected.
2517 	 */
2518 	if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2519 		mgmt_device_connected(hdev, conn, name, name_len);
2520 
2521 	if (discov->state == DISCOVERY_STOPPED)
2522 		return;
2523 
2524 	if (discov->state == DISCOVERY_STOPPING)
2525 		goto discov_complete;
2526 
2527 	if (discov->state != DISCOVERY_RESOLVING)
2528 		return;
2529 
2530 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2531 	/* If the device was not found in a list of found devices names of which
2532 	 * are pending. there is no need to continue resolving a next name as it
2533 	 * will be done upon receiving another Remote Name Request Complete
2534 	 * Event */
2535 	if (!e)
2536 		return;
2537 
2538 	list_del(&e->list);
2539 
2540 	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2541 	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2542 			 name, name_len);
2543 
2544 	if (hci_resolve_next_name(hdev))
2545 		return;
2546 
2547 discov_complete:
2548 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2549 }
2550 
2551 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2552 {
2553 	struct hci_cp_remote_name_req *cp;
2554 	struct hci_conn *conn;
2555 
2556 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2557 
2558 	/* If successful wait for the name req complete event before
2559 	 * checking for the need to do authentication */
2560 	if (!status)
2561 		return;
2562 
2563 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2564 	if (!cp)
2565 		return;
2566 
2567 	hci_dev_lock(hdev);
2568 
2569 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2570 
2571 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2572 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2573 
2574 	if (!conn)
2575 		goto unlock;
2576 
2577 	if (!hci_outgoing_auth_needed(hdev, conn))
2578 		goto unlock;
2579 
2580 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2581 		struct hci_cp_auth_requested auth_cp;
2582 
2583 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2584 
2585 		auth_cp.handle = __cpu_to_le16(conn->handle);
2586 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2587 			     sizeof(auth_cp), &auth_cp);
2588 	}
2589 
2590 unlock:
2591 	hci_dev_unlock(hdev);
2592 }
2593 
2594 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2595 {
2596 	struct hci_cp_read_remote_features *cp;
2597 	struct hci_conn *conn;
2598 
2599 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2600 
2601 	if (!status)
2602 		return;
2603 
2604 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2605 	if (!cp)
2606 		return;
2607 
2608 	hci_dev_lock(hdev);
2609 
2610 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2611 	if (conn) {
2612 		if (conn->state == BT_CONFIG) {
2613 			hci_connect_cfm(conn, status);
2614 			hci_conn_drop(conn);
2615 		}
2616 	}
2617 
2618 	hci_dev_unlock(hdev);
2619 }
2620 
2621 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2622 {
2623 	struct hci_cp_read_remote_ext_features *cp;
2624 	struct hci_conn *conn;
2625 
2626 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2627 
2628 	if (!status)
2629 		return;
2630 
2631 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2632 	if (!cp)
2633 		return;
2634 
2635 	hci_dev_lock(hdev);
2636 
2637 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2638 	if (conn) {
2639 		if (conn->state == BT_CONFIG) {
2640 			hci_connect_cfm(conn, status);
2641 			hci_conn_drop(conn);
2642 		}
2643 	}
2644 
2645 	hci_dev_unlock(hdev);
2646 }
2647 
2648 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2649 				       __u8 status)
2650 {
2651 	struct hci_conn *acl;
2652 	struct hci_link *link;
2653 
2654 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2655 
2656 	hci_dev_lock(hdev);
2657 
2658 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2659 	if (acl) {
2660 		link = list_first_entry_or_null(&acl->link_list,
2661 						struct hci_link, list);
2662 		if (link && link->conn) {
2663 			link->conn->state = BT_CLOSED;
2664 
2665 			hci_connect_cfm(link->conn, status);
2666 			hci_conn_del(link->conn);
2667 		}
2668 	}
2669 
2670 	hci_dev_unlock(hdev);
2671 }
2672 
2673 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2674 {
2675 	struct hci_cp_setup_sync_conn *cp;
2676 
2677 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2678 
2679 	if (!status)
2680 		return;
2681 
2682 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2683 	if (!cp)
2684 		return;
2685 
2686 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2687 }
2688 
2689 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2690 {
2691 	struct hci_cp_enhanced_setup_sync_conn *cp;
2692 
2693 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2694 
2695 	if (!status)
2696 		return;
2697 
2698 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2699 	if (!cp)
2700 		return;
2701 
2702 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2703 }
2704 
2705 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2706 {
2707 	struct hci_cp_sniff_mode *cp;
2708 	struct hci_conn *conn;
2709 
2710 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2711 
2712 	if (!status)
2713 		return;
2714 
2715 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2716 	if (!cp)
2717 		return;
2718 
2719 	hci_dev_lock(hdev);
2720 
2721 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2722 	if (conn) {
2723 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2724 
2725 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2726 			hci_sco_setup(conn, status);
2727 	}
2728 
2729 	hci_dev_unlock(hdev);
2730 }
2731 
2732 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2733 {
2734 	struct hci_cp_exit_sniff_mode *cp;
2735 	struct hci_conn *conn;
2736 
2737 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2738 
2739 	if (!status)
2740 		return;
2741 
2742 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2743 	if (!cp)
2744 		return;
2745 
2746 	hci_dev_lock(hdev);
2747 
2748 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2749 	if (conn) {
2750 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2751 
2752 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2753 			hci_sco_setup(conn, status);
2754 	}
2755 
2756 	hci_dev_unlock(hdev);
2757 }
2758 
2759 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2760 {
2761 	struct hci_cp_disconnect *cp;
2762 	struct hci_conn_params *params;
2763 	struct hci_conn *conn;
2764 	bool mgmt_conn;
2765 
2766 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2767 
2768 	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2769 	 * otherwise cleanup the connection immediately.
2770 	 */
2771 	if (!status && !hdev->suspended)
2772 		return;
2773 
2774 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2775 	if (!cp)
2776 		return;
2777 
2778 	hci_dev_lock(hdev);
2779 
2780 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2781 	if (!conn)
2782 		goto unlock;
2783 
2784 	if (status) {
2785 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2786 				       conn->dst_type, status);
2787 
2788 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2789 			hdev->cur_adv_instance = conn->adv_instance;
2790 			hci_enable_advertising(hdev);
2791 		}
2792 
2793 		/* Inform sockets conn is gone before we delete it */
2794 		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2795 
2796 		goto done;
2797 	}
2798 
2799 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2800 
2801 	if (conn->type == ACL_LINK) {
2802 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2803 			hci_remove_link_key(hdev, &conn->dst);
2804 	}
2805 
2806 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2807 	if (params) {
2808 		switch (params->auto_connect) {
2809 		case HCI_AUTO_CONN_LINK_LOSS:
2810 			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2811 				break;
2812 			fallthrough;
2813 
2814 		case HCI_AUTO_CONN_DIRECT:
2815 		case HCI_AUTO_CONN_ALWAYS:
2816 			hci_pend_le_list_del_init(params);
2817 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2818 			break;
2819 
2820 		default:
2821 			break;
2822 		}
2823 	}
2824 
2825 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2826 				 cp->reason, mgmt_conn);
2827 
2828 	hci_disconn_cfm(conn, cp->reason);
2829 
2830 done:
2831 	/* If the disconnection failed for any reason, the upper layer
2832 	 * does not retry to disconnect in current implementation.
2833 	 * Hence, we need to do some basic cleanup here and re-enable
2834 	 * advertising if necessary.
2835 	 */
2836 	hci_conn_del(conn);
2837 unlock:
2838 	hci_dev_unlock(hdev);
2839 }
2840 
2841 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2842 {
2843 	/* When using controller based address resolution, then the new
2844 	 * address types 0x02 and 0x03 are used. These types need to be
2845 	 * converted back into either public address or random address type
2846 	 */
2847 	switch (type) {
2848 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2849 		if (resolved)
2850 			*resolved = true;
2851 		return ADDR_LE_DEV_PUBLIC;
2852 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2853 		if (resolved)
2854 			*resolved = true;
2855 		return ADDR_LE_DEV_RANDOM;
2856 	}
2857 
2858 	if (resolved)
2859 		*resolved = false;
2860 	return type;
2861 }
2862 
2863 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2864 			      u8 peer_addr_type, u8 own_address_type,
2865 			      u8 filter_policy)
2866 {
2867 	struct hci_conn *conn;
2868 
2869 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2870 				       peer_addr_type);
2871 	if (!conn)
2872 		return;
2873 
2874 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2875 
2876 	/* Store the initiator and responder address information which
2877 	 * is needed for SMP. These values will not change during the
2878 	 * lifetime of the connection.
2879 	 */
2880 	conn->init_addr_type = own_address_type;
2881 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2882 		bacpy(&conn->init_addr, &hdev->random_addr);
2883 	else
2884 		bacpy(&conn->init_addr, &hdev->bdaddr);
2885 
2886 	conn->resp_addr_type = peer_addr_type;
2887 	bacpy(&conn->resp_addr, peer_addr);
2888 }
2889 
2890 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2891 {
2892 	struct hci_cp_le_create_conn *cp;
2893 
2894 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2895 
2896 	/* All connection failure handling is taken care of by the
2897 	 * hci_conn_failed function which is triggered by the HCI
2898 	 * request completion callbacks used for connecting.
2899 	 */
2900 	if (status)
2901 		return;
2902 
2903 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2904 	if (!cp)
2905 		return;
2906 
2907 	hci_dev_lock(hdev);
2908 
2909 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2910 			  cp->own_address_type, cp->filter_policy);
2911 
2912 	hci_dev_unlock(hdev);
2913 }
2914 
2915 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2916 {
2917 	struct hci_cp_le_ext_create_conn *cp;
2918 
2919 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2920 
2921 	/* All connection failure handling is taken care of by the
2922 	 * hci_conn_failed function which is triggered by the HCI
2923 	 * request completion callbacks used for connecting.
2924 	 */
2925 	if (status)
2926 		return;
2927 
2928 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2929 	if (!cp)
2930 		return;
2931 
2932 	hci_dev_lock(hdev);
2933 
2934 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2935 			  cp->own_addr_type, cp->filter_policy);
2936 
2937 	hci_dev_unlock(hdev);
2938 }
2939 
2940 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2941 {
2942 	struct hci_cp_le_read_remote_features *cp;
2943 	struct hci_conn *conn;
2944 
2945 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2946 
2947 	if (!status)
2948 		return;
2949 
2950 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2951 	if (!cp)
2952 		return;
2953 
2954 	hci_dev_lock(hdev);
2955 
2956 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2957 	if (conn) {
2958 		if (conn->state == BT_CONFIG) {
2959 			hci_connect_cfm(conn, status);
2960 			hci_conn_drop(conn);
2961 		}
2962 	}
2963 
2964 	hci_dev_unlock(hdev);
2965 }
2966 
2967 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2968 {
2969 	struct hci_cp_le_start_enc *cp;
2970 	struct hci_conn *conn;
2971 
2972 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2973 
2974 	if (!status)
2975 		return;
2976 
2977 	hci_dev_lock(hdev);
2978 
2979 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2980 	if (!cp)
2981 		goto unlock;
2982 
2983 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2984 	if (!conn)
2985 		goto unlock;
2986 
2987 	if (conn->state != BT_CONNECTED)
2988 		goto unlock;
2989 
2990 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2991 	hci_conn_drop(conn);
2992 
2993 unlock:
2994 	hci_dev_unlock(hdev);
2995 }
2996 
2997 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2998 {
2999 	struct hci_cp_switch_role *cp;
3000 	struct hci_conn *conn;
3001 
3002 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
3003 
3004 	if (!status)
3005 		return;
3006 
3007 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3008 	if (!cp)
3009 		return;
3010 
3011 	hci_dev_lock(hdev);
3012 
3013 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3014 	if (conn)
3015 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3016 
3017 	hci_dev_unlock(hdev);
3018 }
3019 
3020 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3021 				     struct sk_buff *skb)
3022 {
3023 	struct hci_ev_status *ev = data;
3024 	struct discovery_state *discov = &hdev->discovery;
3025 	struct inquiry_entry *e;
3026 
3027 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3028 
3029 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3030 		return;
3031 
3032 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3033 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
3034 
3035 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3036 		return;
3037 
3038 	hci_dev_lock(hdev);
3039 
3040 	if (discov->state != DISCOVERY_FINDING)
3041 		goto unlock;
3042 
3043 	if (list_empty(&discov->resolve)) {
3044 		/* When BR/EDR inquiry is active and no LE scanning is in
3045 		 * progress, then change discovery state to indicate completion.
3046 		 *
3047 		 * When running LE scanning and BR/EDR inquiry simultaneously
3048 		 * and the LE scan already finished, then change the discovery
3049 		 * state to indicate completion.
3050 		 */
3051 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3052 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3053 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3054 		goto unlock;
3055 	}
3056 
3057 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3058 	if (e && hci_resolve_name(hdev, e) == 0) {
3059 		e->name_state = NAME_PENDING;
3060 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3061 		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3062 	} else {
3063 		/* When BR/EDR inquiry is active and no LE scanning is in
3064 		 * progress, then change discovery state to indicate completion.
3065 		 *
3066 		 * When running LE scanning and BR/EDR inquiry simultaneously
3067 		 * and the LE scan already finished, then change the discovery
3068 		 * state to indicate completion.
3069 		 */
3070 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3071 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3072 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3073 	}
3074 
3075 unlock:
3076 	hci_dev_unlock(hdev);
3077 }
3078 
3079 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3080 				   struct sk_buff *skb)
3081 {
3082 	struct hci_ev_inquiry_result *ev = edata;
3083 	struct inquiry_data data;
3084 	int i;
3085 
3086 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3087 			     flex_array_size(ev, info, ev->num)))
3088 		return;
3089 
3090 	bt_dev_dbg(hdev, "num %d", ev->num);
3091 
3092 	if (!ev->num)
3093 		return;
3094 
3095 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3096 		return;
3097 
3098 	hci_dev_lock(hdev);
3099 
3100 	for (i = 0; i < ev->num; i++) {
3101 		struct inquiry_info *info = &ev->info[i];
3102 		u32 flags;
3103 
3104 		bacpy(&data.bdaddr, &info->bdaddr);
3105 		data.pscan_rep_mode	= info->pscan_rep_mode;
3106 		data.pscan_period_mode	= info->pscan_period_mode;
3107 		data.pscan_mode		= info->pscan_mode;
3108 		memcpy(data.dev_class, info->dev_class, 3);
3109 		data.clock_offset	= info->clock_offset;
3110 		data.rssi		= HCI_RSSI_INVALID;
3111 		data.ssp_mode		= 0x00;
3112 
3113 		flags = hci_inquiry_cache_update(hdev, &data, false);
3114 
3115 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3116 				  info->dev_class, HCI_RSSI_INVALID,
3117 				  flags, NULL, 0, NULL, 0, 0);
3118 	}
3119 
3120 	hci_dev_unlock(hdev);
3121 }
3122 
3123 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3124 				  struct sk_buff *skb)
3125 {
3126 	struct hci_ev_conn_complete *ev = data;
3127 	struct hci_conn *conn;
3128 	u8 status = ev->status;
3129 
3130 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3131 
3132 	hci_dev_lock(hdev);
3133 
3134 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3135 	if (!conn) {
3136 		/* In case of error status and there is no connection pending
3137 		 * just unlock as there is nothing to cleanup.
3138 		 */
3139 		if (ev->status)
3140 			goto unlock;
3141 
3142 		/* Connection may not exist if auto-connected. Check the bredr
3143 		 * allowlist to see if this device is allowed to auto connect.
3144 		 * If link is an ACL type, create a connection class
3145 		 * automatically.
3146 		 *
3147 		 * Auto-connect will only occur if the event filter is
3148 		 * programmed with a given address. Right now, event filter is
3149 		 * only used during suspend.
3150 		 */
3151 		if (ev->link_type == ACL_LINK &&
3152 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3153 						      &ev->bdaddr,
3154 						      BDADDR_BREDR)) {
3155 			conn = hci_conn_add_unset(hdev, ev->link_type,
3156 						  &ev->bdaddr, HCI_ROLE_SLAVE);
3157 			if (!conn) {
3158 				bt_dev_err(hdev, "no memory for new conn");
3159 				goto unlock;
3160 			}
3161 		} else {
3162 			if (ev->link_type != SCO_LINK)
3163 				goto unlock;
3164 
3165 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3166 						       &ev->bdaddr);
3167 			if (!conn)
3168 				goto unlock;
3169 
3170 			conn->type = SCO_LINK;
3171 		}
3172 	}
3173 
3174 	/* The HCI_Connection_Complete event is only sent once per connection.
3175 	 * Processing it more than once per connection can corrupt kernel memory.
3176 	 *
3177 	 * As the connection handle is set here for the first time, it indicates
3178 	 * whether the connection is already set up.
3179 	 */
3180 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3181 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3182 		goto unlock;
3183 	}
3184 
3185 	if (!status) {
3186 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3187 		if (status)
3188 			goto done;
3189 
3190 		if (conn->type == ACL_LINK) {
3191 			conn->state = BT_CONFIG;
3192 			hci_conn_hold(conn);
3193 
3194 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3195 			    !hci_find_link_key(hdev, &ev->bdaddr))
3196 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3197 			else
3198 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3199 		} else
3200 			conn->state = BT_CONNECTED;
3201 
3202 		hci_debugfs_create_conn(conn);
3203 		hci_conn_add_sysfs(conn);
3204 
3205 		if (test_bit(HCI_AUTH, &hdev->flags))
3206 			set_bit(HCI_CONN_AUTH, &conn->flags);
3207 
3208 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3209 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3210 
3211 		/* Get remote features */
3212 		if (conn->type == ACL_LINK) {
3213 			struct hci_cp_read_remote_features cp;
3214 			cp.handle = ev->handle;
3215 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3216 				     sizeof(cp), &cp);
3217 
3218 			hci_update_scan(hdev);
3219 		}
3220 
3221 		/* Set packet type for incoming connection */
3222 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3223 			struct hci_cp_change_conn_ptype cp;
3224 			cp.handle = ev->handle;
3225 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3226 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3227 				     &cp);
3228 		}
3229 	}
3230 
3231 	if (conn->type == ACL_LINK)
3232 		hci_sco_setup(conn, ev->status);
3233 
3234 done:
3235 	if (status) {
3236 		hci_conn_failed(conn, status);
3237 	} else if (ev->link_type == SCO_LINK) {
3238 		switch (conn->setting & SCO_AIRMODE_MASK) {
3239 		case SCO_AIRMODE_CVSD:
3240 			if (hdev->notify)
3241 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3242 			break;
3243 		}
3244 
3245 		hci_connect_cfm(conn, status);
3246 	}
3247 
3248 unlock:
3249 	hci_dev_unlock(hdev);
3250 }
3251 
3252 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3253 {
3254 	struct hci_cp_reject_conn_req cp;
3255 
3256 	bacpy(&cp.bdaddr, bdaddr);
3257 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3258 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3259 }
3260 
3261 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3262 				 struct sk_buff *skb)
3263 {
3264 	struct hci_ev_conn_request *ev = data;
3265 	int mask = hdev->link_mode;
3266 	struct inquiry_entry *ie;
3267 	struct hci_conn *conn;
3268 	__u8 flags = 0;
3269 
3270 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3271 
3272 	/* Reject incoming connection from device with same BD ADDR against
3273 	 * CVE-2020-26555
3274 	 */
3275 	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3276 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3277 			   &ev->bdaddr);
3278 		hci_reject_conn(hdev, &ev->bdaddr);
3279 		return;
3280 	}
3281 
3282 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3283 				      &flags);
3284 
3285 	if (!(mask & HCI_LM_ACCEPT)) {
3286 		hci_reject_conn(hdev, &ev->bdaddr);
3287 		return;
3288 	}
3289 
3290 	hci_dev_lock(hdev);
3291 
3292 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3293 				   BDADDR_BREDR)) {
3294 		hci_reject_conn(hdev, &ev->bdaddr);
3295 		goto unlock;
3296 	}
3297 
3298 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3299 	 * connection. These features are only touched through mgmt so
3300 	 * only do the checks if HCI_MGMT is set.
3301 	 */
3302 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3303 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3304 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3305 					       BDADDR_BREDR)) {
3306 		hci_reject_conn(hdev, &ev->bdaddr);
3307 		goto unlock;
3308 	}
3309 
3310 	/* Connection accepted */
3311 
3312 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3313 	if (ie)
3314 		memcpy(ie->data.dev_class, ev->dev_class, 3);
3315 
3316 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3317 			&ev->bdaddr);
3318 	if (!conn) {
3319 		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3320 					  HCI_ROLE_SLAVE);
3321 		if (!conn) {
3322 			bt_dev_err(hdev, "no memory for new connection");
3323 			goto unlock;
3324 		}
3325 	}
3326 
3327 	memcpy(conn->dev_class, ev->dev_class, 3);
3328 
3329 	hci_dev_unlock(hdev);
3330 
3331 	if (ev->link_type == ACL_LINK ||
3332 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3333 		struct hci_cp_accept_conn_req cp;
3334 		conn->state = BT_CONNECT;
3335 
3336 		bacpy(&cp.bdaddr, &ev->bdaddr);
3337 
3338 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3339 			cp.role = 0x00; /* Become central */
3340 		else
3341 			cp.role = 0x01; /* Remain peripheral */
3342 
3343 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3344 	} else if (!(flags & HCI_PROTO_DEFER)) {
3345 		struct hci_cp_accept_sync_conn_req cp;
3346 		conn->state = BT_CONNECT;
3347 
3348 		bacpy(&cp.bdaddr, &ev->bdaddr);
3349 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3350 
3351 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3352 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3353 		cp.max_latency    = cpu_to_le16(0xffff);
3354 		cp.content_format = cpu_to_le16(hdev->voice_setting);
3355 		cp.retrans_effort = 0xff;
3356 
3357 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3358 			     &cp);
3359 	} else {
3360 		conn->state = BT_CONNECT2;
3361 		hci_connect_cfm(conn, 0);
3362 	}
3363 
3364 	return;
3365 unlock:
3366 	hci_dev_unlock(hdev);
3367 }
3368 
3369 static u8 hci_to_mgmt_reason(u8 err)
3370 {
3371 	switch (err) {
3372 	case HCI_ERROR_CONNECTION_TIMEOUT:
3373 		return MGMT_DEV_DISCONN_TIMEOUT;
3374 	case HCI_ERROR_REMOTE_USER_TERM:
3375 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3376 	case HCI_ERROR_REMOTE_POWER_OFF:
3377 		return MGMT_DEV_DISCONN_REMOTE;
3378 	case HCI_ERROR_LOCAL_HOST_TERM:
3379 		return MGMT_DEV_DISCONN_LOCAL_HOST;
3380 	default:
3381 		return MGMT_DEV_DISCONN_UNKNOWN;
3382 	}
3383 }
3384 
3385 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3386 				     struct sk_buff *skb)
3387 {
3388 	struct hci_ev_disconn_complete *ev = data;
3389 	u8 reason;
3390 	struct hci_conn_params *params;
3391 	struct hci_conn *conn;
3392 	bool mgmt_connected;
3393 
3394 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3395 
3396 	hci_dev_lock(hdev);
3397 
3398 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3399 	if (!conn)
3400 		goto unlock;
3401 
3402 	if (ev->status) {
3403 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3404 				       conn->dst_type, ev->status);
3405 		goto unlock;
3406 	}
3407 
3408 	conn->state = BT_CLOSED;
3409 
3410 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3411 
3412 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3413 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3414 	else
3415 		reason = hci_to_mgmt_reason(ev->reason);
3416 
3417 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3418 				reason, mgmt_connected);
3419 
3420 	if (conn->type == ACL_LINK) {
3421 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3422 			hci_remove_link_key(hdev, &conn->dst);
3423 
3424 		hci_update_scan(hdev);
3425 	}
3426 
3427 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3428 	if (params) {
3429 		switch (params->auto_connect) {
3430 		case HCI_AUTO_CONN_LINK_LOSS:
3431 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3432 				break;
3433 			fallthrough;
3434 
3435 		case HCI_AUTO_CONN_DIRECT:
3436 		case HCI_AUTO_CONN_ALWAYS:
3437 			hci_pend_le_list_del_init(params);
3438 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
3439 			hci_update_passive_scan(hdev);
3440 			break;
3441 
3442 		default:
3443 			break;
3444 		}
3445 	}
3446 
3447 	hci_disconn_cfm(conn, ev->reason);
3448 
3449 	/* Re-enable advertising if necessary, since it might
3450 	 * have been disabled by the connection. From the
3451 	 * HCI_LE_Set_Advertise_Enable command description in
3452 	 * the core specification (v4.0):
3453 	 * "The Controller shall continue advertising until the Host
3454 	 * issues an LE_Set_Advertise_Enable command with
3455 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3456 	 * or until a connection is created or until the Advertising
3457 	 * is timed out due to Directed Advertising."
3458 	 */
3459 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3460 		hdev->cur_adv_instance = conn->adv_instance;
3461 		hci_enable_advertising(hdev);
3462 	}
3463 
3464 	hci_conn_del(conn);
3465 
3466 unlock:
3467 	hci_dev_unlock(hdev);
3468 }
3469 
3470 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3471 				  struct sk_buff *skb)
3472 {
3473 	struct hci_ev_auth_complete *ev = data;
3474 	struct hci_conn *conn;
3475 
3476 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3477 
3478 	hci_dev_lock(hdev);
3479 
3480 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3481 	if (!conn)
3482 		goto unlock;
3483 
3484 	if (!ev->status) {
3485 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3486 		set_bit(HCI_CONN_AUTH, &conn->flags);
3487 		conn->sec_level = conn->pending_sec_level;
3488 	} else {
3489 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3490 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3491 
3492 		mgmt_auth_failed(conn, ev->status);
3493 	}
3494 
3495 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3496 
3497 	if (conn->state == BT_CONFIG) {
3498 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3499 			struct hci_cp_set_conn_encrypt cp;
3500 			cp.handle  = ev->handle;
3501 			cp.encrypt = 0x01;
3502 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3503 				     &cp);
3504 		} else {
3505 			conn->state = BT_CONNECTED;
3506 			hci_connect_cfm(conn, ev->status);
3507 			hci_conn_drop(conn);
3508 		}
3509 	} else {
3510 		hci_auth_cfm(conn, ev->status);
3511 
3512 		hci_conn_hold(conn);
3513 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3514 		hci_conn_drop(conn);
3515 	}
3516 
3517 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3518 		if (!ev->status) {
3519 			struct hci_cp_set_conn_encrypt cp;
3520 			cp.handle  = ev->handle;
3521 			cp.encrypt = 0x01;
3522 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3523 				     &cp);
3524 		} else {
3525 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3526 			hci_encrypt_cfm(conn, ev->status);
3527 		}
3528 	}
3529 
3530 unlock:
3531 	hci_dev_unlock(hdev);
3532 }
3533 
3534 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3535 				struct sk_buff *skb)
3536 {
3537 	struct hci_ev_remote_name *ev = data;
3538 	struct hci_conn *conn;
3539 
3540 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3541 
3542 	hci_dev_lock(hdev);
3543 
3544 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3545 
3546 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3547 		goto check_auth;
3548 
3549 	if (ev->status == 0)
3550 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3551 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3552 	else
3553 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3554 
3555 check_auth:
3556 	if (!conn)
3557 		goto unlock;
3558 
3559 	if (!hci_outgoing_auth_needed(hdev, conn))
3560 		goto unlock;
3561 
3562 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3563 		struct hci_cp_auth_requested cp;
3564 
3565 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3566 
3567 		cp.handle = __cpu_to_le16(conn->handle);
3568 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3569 	}
3570 
3571 unlock:
3572 	hci_dev_unlock(hdev);
3573 }
3574 
3575 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3576 				   struct sk_buff *skb)
3577 {
3578 	struct hci_ev_encrypt_change *ev = data;
3579 	struct hci_conn *conn;
3580 
3581 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3582 
3583 	hci_dev_lock(hdev);
3584 
3585 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3586 	if (!conn)
3587 		goto unlock;
3588 
3589 	if (!ev->status) {
3590 		if (ev->encrypt) {
3591 			/* Encryption implies authentication */
3592 			set_bit(HCI_CONN_AUTH, &conn->flags);
3593 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3594 			conn->sec_level = conn->pending_sec_level;
3595 
3596 			/* P-256 authentication key implies FIPS */
3597 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3598 				set_bit(HCI_CONN_FIPS, &conn->flags);
3599 
3600 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3601 			    conn->type == LE_LINK)
3602 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3603 		} else {
3604 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3605 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3606 		}
3607 	}
3608 
3609 	/* We should disregard the current RPA and generate a new one
3610 	 * whenever the encryption procedure fails.
3611 	 */
3612 	if (ev->status && conn->type == LE_LINK) {
3613 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3614 		hci_adv_instances_set_rpa_expired(hdev, true);
3615 	}
3616 
3617 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3618 
3619 	/* Check link security requirements are met */
3620 	if (!hci_conn_check_link_mode(conn))
3621 		ev->status = HCI_ERROR_AUTH_FAILURE;
3622 
3623 	if (ev->status && conn->state == BT_CONNECTED) {
3624 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3625 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3626 
3627 		/* Notify upper layers so they can cleanup before
3628 		 * disconnecting.
3629 		 */
3630 		hci_encrypt_cfm(conn, ev->status);
3631 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3632 		hci_conn_drop(conn);
3633 		goto unlock;
3634 	}
3635 
3636 	/* Try reading the encryption key size for encrypted ACL links */
3637 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3638 		struct hci_cp_read_enc_key_size cp;
3639 
3640 		/* Only send HCI_Read_Encryption_Key_Size if the
3641 		 * controller really supports it. If it doesn't, assume
3642 		 * the default size (16).
3643 		 */
3644 		if (!(hdev->commands[20] & 0x10) ||
3645 		    test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) {
3646 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3647 			goto notify;
3648 		}
3649 
3650 		cp.handle = cpu_to_le16(conn->handle);
3651 		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3652 				 sizeof(cp), &cp)) {
3653 			bt_dev_err(hdev, "sending read key size failed");
3654 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3655 			goto notify;
3656 		}
3657 
3658 		goto unlock;
3659 	}
3660 
3661 	/* Set the default Authenticated Payload Timeout after
3662 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3663 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3664 	 * sent when the link is active and Encryption is enabled, the conn
3665 	 * type can be either LE or ACL and controller must support LMP Ping.
3666 	 * Ensure for AES-CCM encryption as well.
3667 	 */
3668 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3669 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3670 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3671 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3672 		struct hci_cp_write_auth_payload_to cp;
3673 
3674 		cp.handle = cpu_to_le16(conn->handle);
3675 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3676 		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3677 				 sizeof(cp), &cp))
3678 			bt_dev_err(hdev, "write auth payload timeout failed");
3679 	}
3680 
3681 notify:
3682 	hci_encrypt_cfm(conn, ev->status);
3683 
3684 unlock:
3685 	hci_dev_unlock(hdev);
3686 }
3687 
3688 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3689 					     struct sk_buff *skb)
3690 {
3691 	struct hci_ev_change_link_key_complete *ev = data;
3692 	struct hci_conn *conn;
3693 
3694 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3695 
3696 	hci_dev_lock(hdev);
3697 
3698 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3699 	if (conn) {
3700 		if (!ev->status)
3701 			set_bit(HCI_CONN_SECURE, &conn->flags);
3702 
3703 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3704 
3705 		hci_key_change_cfm(conn, ev->status);
3706 	}
3707 
3708 	hci_dev_unlock(hdev);
3709 }
3710 
3711 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3712 				    struct sk_buff *skb)
3713 {
3714 	struct hci_ev_remote_features *ev = data;
3715 	struct hci_conn *conn;
3716 
3717 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3718 
3719 	hci_dev_lock(hdev);
3720 
3721 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3722 	if (!conn)
3723 		goto unlock;
3724 
3725 	if (!ev->status)
3726 		memcpy(conn->features[0], ev->features, 8);
3727 
3728 	if (conn->state != BT_CONFIG)
3729 		goto unlock;
3730 
3731 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3732 	    lmp_ext_feat_capable(conn)) {
3733 		struct hci_cp_read_remote_ext_features cp;
3734 		cp.handle = ev->handle;
3735 		cp.page = 0x01;
3736 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3737 			     sizeof(cp), &cp);
3738 		goto unlock;
3739 	}
3740 
3741 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3742 		struct hci_cp_remote_name_req cp;
3743 		memset(&cp, 0, sizeof(cp));
3744 		bacpy(&cp.bdaddr, &conn->dst);
3745 		cp.pscan_rep_mode = 0x02;
3746 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3747 	} else {
3748 		mgmt_device_connected(hdev, conn, NULL, 0);
3749 	}
3750 
3751 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3752 		conn->state = BT_CONNECTED;
3753 		hci_connect_cfm(conn, ev->status);
3754 		hci_conn_drop(conn);
3755 	}
3756 
3757 unlock:
3758 	hci_dev_unlock(hdev);
3759 }
3760 
3761 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3762 {
3763 	cancel_delayed_work(&hdev->cmd_timer);
3764 
3765 	rcu_read_lock();
3766 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3767 		if (ncmd) {
3768 			cancel_delayed_work(&hdev->ncmd_timer);
3769 			atomic_set(&hdev->cmd_cnt, 1);
3770 		} else {
3771 			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3772 				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3773 						   HCI_NCMD_TIMEOUT);
3774 		}
3775 	}
3776 	rcu_read_unlock();
3777 }
3778 
3779 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3780 					struct sk_buff *skb)
3781 {
3782 	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3783 
3784 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3785 
3786 	if (rp->status)
3787 		return rp->status;
3788 
3789 	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3790 	hdev->le_pkts  = rp->acl_max_pkt;
3791 	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3792 	hdev->iso_pkts = rp->iso_max_pkt;
3793 
3794 	hdev->le_cnt  = hdev->le_pkts;
3795 	hdev->iso_cnt = hdev->iso_pkts;
3796 
3797 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3798 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3799 
3800 	return rp->status;
3801 }
3802 
3803 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3804 {
3805 	struct hci_conn *conn, *tmp;
3806 
3807 	lockdep_assert_held(&hdev->lock);
3808 
3809 	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3810 		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3811 		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3812 			continue;
3813 
3814 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3815 			hci_conn_failed(conn, status);
3816 	}
3817 }
3818 
3819 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3820 				   struct sk_buff *skb)
3821 {
3822 	struct hci_rp_le_set_cig_params *rp = data;
3823 	struct hci_cp_le_set_cig_params *cp;
3824 	struct hci_conn *conn;
3825 	u8 status = rp->status;
3826 	bool pending = false;
3827 	int i;
3828 
3829 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3830 
3831 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3832 	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3833 			    rp->cig_id != cp->cig_id)) {
3834 		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3835 		status = HCI_ERROR_UNSPECIFIED;
3836 	}
3837 
3838 	hci_dev_lock(hdev);
3839 
3840 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3841 	 *
3842 	 * If the Status return parameter is non-zero, then the state of the CIG
3843 	 * and its CIS configurations shall not be changed by the command. If
3844 	 * the CIG did not already exist, it shall not be created.
3845 	 */
3846 	if (status) {
3847 		/* Keep current configuration, fail only the unbound CIS */
3848 		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3849 		goto unlock;
3850 	}
3851 
3852 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3853 	 *
3854 	 * If the Status return parameter is zero, then the Controller shall
3855 	 * set the Connection_Handle arrayed return parameter to the connection
3856 	 * handle(s) corresponding to the CIS configurations specified in
3857 	 * the CIS_IDs command parameter, in the same order.
3858 	 */
3859 	for (i = 0; i < rp->num_handles; ++i) {
3860 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3861 						cp->cis[i].cis_id);
3862 		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3863 			continue;
3864 
3865 		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3866 			continue;
3867 
3868 		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3869 			continue;
3870 
3871 		if (conn->state == BT_CONNECT)
3872 			pending = true;
3873 	}
3874 
3875 unlock:
3876 	if (pending)
3877 		hci_le_create_cis_pending(hdev);
3878 
3879 	hci_dev_unlock(hdev);
3880 
3881 	return rp->status;
3882 }
3883 
3884 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3885 				   struct sk_buff *skb)
3886 {
3887 	struct hci_rp_le_setup_iso_path *rp = data;
3888 	struct hci_cp_le_setup_iso_path *cp;
3889 	struct hci_conn *conn;
3890 
3891 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3892 
3893 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3894 	if (!cp)
3895 		return rp->status;
3896 
3897 	hci_dev_lock(hdev);
3898 
3899 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3900 	if (!conn)
3901 		goto unlock;
3902 
3903 	if (rp->status) {
3904 		hci_connect_cfm(conn, rp->status);
3905 		hci_conn_del(conn);
3906 		goto unlock;
3907 	}
3908 
3909 	switch (cp->direction) {
3910 	/* Input (Host to Controller) */
3911 	case 0x00:
3912 		/* Only confirm connection if output only */
3913 		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3914 			hci_connect_cfm(conn, rp->status);
3915 		break;
3916 	/* Output (Controller to Host) */
3917 	case 0x01:
3918 		/* Confirm connection since conn->iso_qos is always configured
3919 		 * last.
3920 		 */
3921 		hci_connect_cfm(conn, rp->status);
3922 
3923 		/* Notify device connected in case it is a BIG Sync */
3924 		if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3925 			mgmt_device_connected(hdev, conn, NULL, 0);
3926 
3927 		break;
3928 	}
3929 
3930 unlock:
3931 	hci_dev_unlock(hdev);
3932 	return rp->status;
3933 }
3934 
3935 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3936 {
3937 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3938 }
3939 
3940 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3941 				   struct sk_buff *skb)
3942 {
3943 	struct hci_ev_status *rp = data;
3944 	struct hci_cp_le_set_per_adv_params *cp;
3945 
3946 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3947 
3948 	if (rp->status)
3949 		return rp->status;
3950 
3951 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3952 	if (!cp)
3953 		return rp->status;
3954 
3955 	/* TODO: set the conn state */
3956 	return rp->status;
3957 }
3958 
3959 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3960 				       struct sk_buff *skb)
3961 {
3962 	struct hci_ev_status *rp = data;
3963 	struct hci_cp_le_set_per_adv_enable *cp;
3964 	struct adv_info *adv = NULL, *n;
3965 	u8 per_adv_cnt = 0;
3966 
3967 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3968 
3969 	if (rp->status)
3970 		return rp->status;
3971 
3972 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3973 	if (!cp)
3974 		return rp->status;
3975 
3976 	hci_dev_lock(hdev);
3977 
3978 	adv = hci_find_adv_instance(hdev, cp->handle);
3979 
3980 	if (cp->enable) {
3981 		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3982 
3983 		if (adv)
3984 			adv->enabled = true;
3985 	} else {
3986 		/* If just one instance was disabled check if there are
3987 		 * any other instance enabled before clearing HCI_LE_PER_ADV.
3988 		 * The current periodic adv instance will be marked as
3989 		 * disabled once extended advertising is also disabled.
3990 		 */
3991 		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3992 					 list) {
3993 			if (adv->periodic && adv->enabled)
3994 				per_adv_cnt++;
3995 		}
3996 
3997 		if (per_adv_cnt > 1)
3998 			goto unlock;
3999 
4000 		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4001 	}
4002 
4003 unlock:
4004 	hci_dev_unlock(hdev);
4005 
4006 	return rp->status;
4007 }
4008 
4009 #define HCI_CC_VL(_op, _func, _min, _max) \
4010 { \
4011 	.op = _op, \
4012 	.func = _func, \
4013 	.min_len = _min, \
4014 	.max_len = _max, \
4015 }
4016 
4017 #define HCI_CC(_op, _func, _len) \
4018 	HCI_CC_VL(_op, _func, _len, _len)
4019 
4020 #define HCI_CC_STATUS(_op, _func) \
4021 	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4022 
4023 static const struct hci_cc {
4024 	u16  op;
4025 	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4026 	u16  min_len;
4027 	u16  max_len;
4028 } hci_cc_table[] = {
4029 	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4030 	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4031 	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4032 	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4033 		      hci_cc_remote_name_req_cancel),
4034 	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4035 	       sizeof(struct hci_rp_role_discovery)),
4036 	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4037 	       sizeof(struct hci_rp_read_link_policy)),
4038 	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4039 	       sizeof(struct hci_rp_write_link_policy)),
4040 	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4041 	       sizeof(struct hci_rp_read_def_link_policy)),
4042 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4043 		      hci_cc_write_def_link_policy),
4044 	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4045 	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4046 	       sizeof(struct hci_rp_read_stored_link_key)),
4047 	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4048 	       sizeof(struct hci_rp_delete_stored_link_key)),
4049 	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4050 	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4051 	       sizeof(struct hci_rp_read_local_name)),
4052 	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4053 	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4054 	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4055 	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4056 	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4057 	       sizeof(struct hci_rp_read_class_of_dev)),
4058 	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4059 	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4060 	       sizeof(struct hci_rp_read_voice_setting)),
4061 	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4062 	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4063 	       sizeof(struct hci_rp_read_num_supported_iac)),
4064 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4065 	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4066 	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4067 	       sizeof(struct hci_rp_read_auth_payload_to)),
4068 	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4069 	       sizeof(struct hci_rp_write_auth_payload_to)),
4070 	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4071 	       sizeof(struct hci_rp_read_local_version)),
4072 	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4073 	       sizeof(struct hci_rp_read_local_commands)),
4074 	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4075 	       sizeof(struct hci_rp_read_local_features)),
4076 	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4077 	       sizeof(struct hci_rp_read_local_ext_features)),
4078 	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4079 	       sizeof(struct hci_rp_read_buffer_size)),
4080 	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4081 	       sizeof(struct hci_rp_read_bd_addr)),
4082 	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4083 	       sizeof(struct hci_rp_read_local_pairing_opts)),
4084 	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4085 	       sizeof(struct hci_rp_read_page_scan_activity)),
4086 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4087 		      hci_cc_write_page_scan_activity),
4088 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4089 	       sizeof(struct hci_rp_read_page_scan_type)),
4090 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4091 	HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4092 	       sizeof(struct hci_rp_read_data_block_size)),
4093 	HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4094 	       sizeof(struct hci_rp_read_flow_control_mode)),
4095 	HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4096 	       sizeof(struct hci_rp_read_local_amp_info)),
4097 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4098 	       sizeof(struct hci_rp_read_clock)),
4099 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4100 	       sizeof(struct hci_rp_read_enc_key_size)),
4101 	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4102 	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4103 	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4104 	       hci_cc_read_def_err_data_reporting,
4105 	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4106 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4107 		      hci_cc_write_def_err_data_reporting),
4108 	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4109 	       sizeof(struct hci_rp_pin_code_reply)),
4110 	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4111 	       sizeof(struct hci_rp_pin_code_neg_reply)),
4112 	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4113 	       sizeof(struct hci_rp_read_local_oob_data)),
4114 	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4115 	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4116 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4117 	       sizeof(struct hci_rp_le_read_buffer_size)),
4118 	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4119 	       sizeof(struct hci_rp_le_read_local_features)),
4120 	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4121 	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4122 	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4123 	       sizeof(struct hci_rp_user_confirm_reply)),
4124 	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4125 	       sizeof(struct hci_rp_user_confirm_reply)),
4126 	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4127 	       sizeof(struct hci_rp_user_confirm_reply)),
4128 	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4129 	       sizeof(struct hci_rp_user_confirm_reply)),
4130 	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4131 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4132 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4133 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4134 	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4135 	       hci_cc_le_read_accept_list_size,
4136 	       sizeof(struct hci_rp_le_read_accept_list_size)),
4137 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4138 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4139 		      hci_cc_le_add_to_accept_list),
4140 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4141 		      hci_cc_le_del_from_accept_list),
4142 	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4143 	       sizeof(struct hci_rp_le_read_supported_states)),
4144 	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4145 	       sizeof(struct hci_rp_le_read_def_data_len)),
4146 	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4147 		      hci_cc_le_write_def_data_len),
4148 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4149 		      hci_cc_le_add_to_resolv_list),
4150 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4151 		      hci_cc_le_del_from_resolv_list),
4152 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4153 		      hci_cc_le_clear_resolv_list),
4154 	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4155 	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4156 	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4157 		      hci_cc_le_set_addr_resolution_enable),
4158 	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4159 	       sizeof(struct hci_rp_le_read_max_data_len)),
4160 	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4161 		      hci_cc_write_le_host_supported),
4162 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4163 	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4164 	       sizeof(struct hci_rp_read_rssi)),
4165 	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4166 	       sizeof(struct hci_rp_read_tx_power)),
4167 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4168 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4169 		      hci_cc_le_set_ext_scan_param),
4170 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4171 		      hci_cc_le_set_ext_scan_enable),
4172 	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4173 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4174 	       hci_cc_le_read_num_adv_sets,
4175 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4176 	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4177 	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4178 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4179 		      hci_cc_le_set_ext_adv_enable),
4180 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4181 		      hci_cc_le_set_adv_set_random_addr),
4182 	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4183 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4184 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4185 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4186 		      hci_cc_le_set_per_adv_enable),
4187 	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4188 	       sizeof(struct hci_rp_le_read_transmit_power)),
4189 	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4190 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4191 	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4192 	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4193 		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4194 	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4195 	       sizeof(struct hci_rp_le_setup_iso_path)),
4196 };
4197 
4198 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4199 		      struct sk_buff *skb)
4200 {
4201 	void *data;
4202 
4203 	if (skb->len < cc->min_len) {
4204 		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4205 			   cc->op, skb->len, cc->min_len);
4206 		return HCI_ERROR_UNSPECIFIED;
4207 	}
4208 
4209 	/* Just warn if the length is over max_len size it still be possible to
4210 	 * partially parse the cc so leave to callback to decide if that is
4211 	 * acceptable.
4212 	 */
4213 	if (skb->len > cc->max_len)
4214 		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4215 			    cc->op, skb->len, cc->max_len);
4216 
4217 	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4218 	if (!data)
4219 		return HCI_ERROR_UNSPECIFIED;
4220 
4221 	return cc->func(hdev, data, skb);
4222 }
4223 
4224 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4225 				 struct sk_buff *skb, u16 *opcode, u8 *status,
4226 				 hci_req_complete_t *req_complete,
4227 				 hci_req_complete_skb_t *req_complete_skb)
4228 {
4229 	struct hci_ev_cmd_complete *ev = data;
4230 	int i;
4231 
4232 	*opcode = __le16_to_cpu(ev->opcode);
4233 
4234 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4235 
4236 	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4237 		if (hci_cc_table[i].op == *opcode) {
4238 			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4239 			break;
4240 		}
4241 	}
4242 
4243 	if (i == ARRAY_SIZE(hci_cc_table)) {
4244 		/* Unknown opcode, assume byte 0 contains the status, so
4245 		 * that e.g. __hci_cmd_sync() properly returns errors
4246 		 * for vendor specific commands send by HCI drivers.
4247 		 * If a vendor doesn't actually follow this convention we may
4248 		 * need to introduce a vendor CC table in order to properly set
4249 		 * the status.
4250 		 */
4251 		*status = skb->data[0];
4252 	}
4253 
4254 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4255 
4256 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4257 			     req_complete_skb);
4258 
4259 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4260 		bt_dev_err(hdev,
4261 			   "unexpected event for opcode 0x%4.4x", *opcode);
4262 		return;
4263 	}
4264 
4265 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4266 		queue_work(hdev->workqueue, &hdev->cmd_work);
4267 }
4268 
4269 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4270 {
4271 	struct hci_cp_le_create_cis *cp;
4272 	bool pending = false;
4273 	int i;
4274 
4275 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4276 
4277 	if (!status)
4278 		return;
4279 
4280 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4281 	if (!cp)
4282 		return;
4283 
4284 	hci_dev_lock(hdev);
4285 
4286 	/* Remove connection if command failed */
4287 	for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4288 		struct hci_conn *conn;
4289 		u16 handle;
4290 
4291 		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4292 
4293 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4294 		if (conn) {
4295 			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4296 					       &conn->flags))
4297 				pending = true;
4298 			conn->state = BT_CLOSED;
4299 			hci_connect_cfm(conn, status);
4300 			hci_conn_del(conn);
4301 		}
4302 	}
4303 
4304 	if (pending)
4305 		hci_le_create_cis_pending(hdev);
4306 
4307 	hci_dev_unlock(hdev);
4308 }
4309 
4310 #define HCI_CS(_op, _func) \
4311 { \
4312 	.op = _op, \
4313 	.func = _func, \
4314 }
4315 
4316 static const struct hci_cs {
4317 	u16  op;
4318 	void (*func)(struct hci_dev *hdev, __u8 status);
4319 } hci_cs_table[] = {
4320 	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4321 	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4322 	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4323 	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4324 	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4325 	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4326 	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4327 	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4328 	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4329 	       hci_cs_read_remote_ext_features),
4330 	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4331 	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4332 	       hci_cs_enhanced_setup_sync_conn),
4333 	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4334 	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4335 	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4336 	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4337 	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4338 	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4339 	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4340 	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4341 	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4342 };
4343 
4344 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4345 			       struct sk_buff *skb, u16 *opcode, u8 *status,
4346 			       hci_req_complete_t *req_complete,
4347 			       hci_req_complete_skb_t *req_complete_skb)
4348 {
4349 	struct hci_ev_cmd_status *ev = data;
4350 	int i;
4351 
4352 	*opcode = __le16_to_cpu(ev->opcode);
4353 	*status = ev->status;
4354 
4355 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4356 
4357 	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4358 		if (hci_cs_table[i].op == *opcode) {
4359 			hci_cs_table[i].func(hdev, ev->status);
4360 			break;
4361 		}
4362 	}
4363 
4364 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4365 
4366 	/* Indicate request completion if the command failed. Also, if
4367 	 * we're not waiting for a special event and we get a success
4368 	 * command status we should try to flag the request as completed
4369 	 * (since for this kind of commands there will not be a command
4370 	 * complete event).
4371 	 */
4372 	if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4373 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4374 				     req_complete_skb);
4375 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4376 			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4377 				   *opcode);
4378 			return;
4379 		}
4380 	}
4381 
4382 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4383 		queue_work(hdev->workqueue, &hdev->cmd_work);
4384 }
4385 
4386 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4387 				   struct sk_buff *skb)
4388 {
4389 	struct hci_ev_hardware_error *ev = data;
4390 
4391 	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4392 
4393 	hdev->hw_error_code = ev->code;
4394 
4395 	queue_work(hdev->req_workqueue, &hdev->error_reset);
4396 }
4397 
4398 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4399 				struct sk_buff *skb)
4400 {
4401 	struct hci_ev_role_change *ev = data;
4402 	struct hci_conn *conn;
4403 
4404 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4405 
4406 	hci_dev_lock(hdev);
4407 
4408 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4409 	if (conn) {
4410 		if (!ev->status)
4411 			conn->role = ev->role;
4412 
4413 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4414 
4415 		hci_role_switch_cfm(conn, ev->status, ev->role);
4416 	}
4417 
4418 	hci_dev_unlock(hdev);
4419 }
4420 
4421 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4422 				  struct sk_buff *skb)
4423 {
4424 	struct hci_ev_num_comp_pkts *ev = data;
4425 	int i;
4426 
4427 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4428 			     flex_array_size(ev, handles, ev->num)))
4429 		return;
4430 
4431 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4432 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4433 		return;
4434 	}
4435 
4436 	bt_dev_dbg(hdev, "num %d", ev->num);
4437 
4438 	for (i = 0; i < ev->num; i++) {
4439 		struct hci_comp_pkts_info *info = &ev->handles[i];
4440 		struct hci_conn *conn;
4441 		__u16  handle, count;
4442 
4443 		handle = __le16_to_cpu(info->handle);
4444 		count  = __le16_to_cpu(info->count);
4445 
4446 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4447 		if (!conn)
4448 			continue;
4449 
4450 		conn->sent -= count;
4451 
4452 		switch (conn->type) {
4453 		case ACL_LINK:
4454 			hdev->acl_cnt += count;
4455 			if (hdev->acl_cnt > hdev->acl_pkts)
4456 				hdev->acl_cnt = hdev->acl_pkts;
4457 			break;
4458 
4459 		case LE_LINK:
4460 			if (hdev->le_pkts) {
4461 				hdev->le_cnt += count;
4462 				if (hdev->le_cnt > hdev->le_pkts)
4463 					hdev->le_cnt = hdev->le_pkts;
4464 			} else {
4465 				hdev->acl_cnt += count;
4466 				if (hdev->acl_cnt > hdev->acl_pkts)
4467 					hdev->acl_cnt = hdev->acl_pkts;
4468 			}
4469 			break;
4470 
4471 		case SCO_LINK:
4472 			hdev->sco_cnt += count;
4473 			if (hdev->sco_cnt > hdev->sco_pkts)
4474 				hdev->sco_cnt = hdev->sco_pkts;
4475 			break;
4476 
4477 		case ISO_LINK:
4478 			if (hdev->iso_pkts) {
4479 				hdev->iso_cnt += count;
4480 				if (hdev->iso_cnt > hdev->iso_pkts)
4481 					hdev->iso_cnt = hdev->iso_pkts;
4482 			} else if (hdev->le_pkts) {
4483 				hdev->le_cnt += count;
4484 				if (hdev->le_cnt > hdev->le_pkts)
4485 					hdev->le_cnt = hdev->le_pkts;
4486 			} else {
4487 				hdev->acl_cnt += count;
4488 				if (hdev->acl_cnt > hdev->acl_pkts)
4489 					hdev->acl_cnt = hdev->acl_pkts;
4490 			}
4491 			break;
4492 
4493 		default:
4494 			bt_dev_err(hdev, "unknown type %d conn %p",
4495 				   conn->type, conn);
4496 			break;
4497 		}
4498 	}
4499 
4500 	queue_work(hdev->workqueue, &hdev->tx_work);
4501 }
4502 
4503 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4504 						 __u16 handle)
4505 {
4506 	struct hci_chan *chan;
4507 
4508 	switch (hdev->dev_type) {
4509 	case HCI_PRIMARY:
4510 		return hci_conn_hash_lookup_handle(hdev, handle);
4511 	case HCI_AMP:
4512 		chan = hci_chan_lookup_handle(hdev, handle);
4513 		if (chan)
4514 			return chan->conn;
4515 		break;
4516 	default:
4517 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4518 		break;
4519 	}
4520 
4521 	return NULL;
4522 }
4523 
4524 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4525 				    struct sk_buff *skb)
4526 {
4527 	struct hci_ev_num_comp_blocks *ev = data;
4528 	int i;
4529 
4530 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4531 			     flex_array_size(ev, handles, ev->num_hndl)))
4532 		return;
4533 
4534 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4535 		bt_dev_err(hdev, "wrong event for mode %d",
4536 			   hdev->flow_ctl_mode);
4537 		return;
4538 	}
4539 
4540 	bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4541 		   ev->num_hndl);
4542 
4543 	for (i = 0; i < ev->num_hndl; i++) {
4544 		struct hci_comp_blocks_info *info = &ev->handles[i];
4545 		struct hci_conn *conn = NULL;
4546 		__u16  handle, block_count;
4547 
4548 		handle = __le16_to_cpu(info->handle);
4549 		block_count = __le16_to_cpu(info->blocks);
4550 
4551 		conn = __hci_conn_lookup_handle(hdev, handle);
4552 		if (!conn)
4553 			continue;
4554 
4555 		conn->sent -= block_count;
4556 
4557 		switch (conn->type) {
4558 		case ACL_LINK:
4559 		case AMP_LINK:
4560 			hdev->block_cnt += block_count;
4561 			if (hdev->block_cnt > hdev->num_blocks)
4562 				hdev->block_cnt = hdev->num_blocks;
4563 			break;
4564 
4565 		default:
4566 			bt_dev_err(hdev, "unknown type %d conn %p",
4567 				   conn->type, conn);
4568 			break;
4569 		}
4570 	}
4571 
4572 	queue_work(hdev->workqueue, &hdev->tx_work);
4573 }
4574 
4575 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4576 				struct sk_buff *skb)
4577 {
4578 	struct hci_ev_mode_change *ev = data;
4579 	struct hci_conn *conn;
4580 
4581 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4582 
4583 	hci_dev_lock(hdev);
4584 
4585 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4586 	if (conn) {
4587 		conn->mode = ev->mode;
4588 
4589 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4590 					&conn->flags)) {
4591 			if (conn->mode == HCI_CM_ACTIVE)
4592 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4593 			else
4594 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4595 		}
4596 
4597 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4598 			hci_sco_setup(conn, ev->status);
4599 	}
4600 
4601 	hci_dev_unlock(hdev);
4602 }
4603 
4604 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4605 				     struct sk_buff *skb)
4606 {
4607 	struct hci_ev_pin_code_req *ev = data;
4608 	struct hci_conn *conn;
4609 
4610 	bt_dev_dbg(hdev, "");
4611 
4612 	hci_dev_lock(hdev);
4613 
4614 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4615 	if (!conn)
4616 		goto unlock;
4617 
4618 	if (conn->state == BT_CONNECTED) {
4619 		hci_conn_hold(conn);
4620 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4621 		hci_conn_drop(conn);
4622 	}
4623 
4624 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4625 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4626 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4627 			     sizeof(ev->bdaddr), &ev->bdaddr);
4628 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4629 		u8 secure;
4630 
4631 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4632 			secure = 1;
4633 		else
4634 			secure = 0;
4635 
4636 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4637 	}
4638 
4639 unlock:
4640 	hci_dev_unlock(hdev);
4641 }
4642 
4643 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4644 {
4645 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4646 		return;
4647 
4648 	conn->pin_length = pin_len;
4649 	conn->key_type = key_type;
4650 
4651 	switch (key_type) {
4652 	case HCI_LK_LOCAL_UNIT:
4653 	case HCI_LK_REMOTE_UNIT:
4654 	case HCI_LK_DEBUG_COMBINATION:
4655 		return;
4656 	case HCI_LK_COMBINATION:
4657 		if (pin_len == 16)
4658 			conn->pending_sec_level = BT_SECURITY_HIGH;
4659 		else
4660 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4661 		break;
4662 	case HCI_LK_UNAUTH_COMBINATION_P192:
4663 	case HCI_LK_UNAUTH_COMBINATION_P256:
4664 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4665 		break;
4666 	case HCI_LK_AUTH_COMBINATION_P192:
4667 		conn->pending_sec_level = BT_SECURITY_HIGH;
4668 		break;
4669 	case HCI_LK_AUTH_COMBINATION_P256:
4670 		conn->pending_sec_level = BT_SECURITY_FIPS;
4671 		break;
4672 	}
4673 }
4674 
4675 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4676 				     struct sk_buff *skb)
4677 {
4678 	struct hci_ev_link_key_req *ev = data;
4679 	struct hci_cp_link_key_reply cp;
4680 	struct hci_conn *conn;
4681 	struct link_key *key;
4682 
4683 	bt_dev_dbg(hdev, "");
4684 
4685 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4686 		return;
4687 
4688 	hci_dev_lock(hdev);
4689 
4690 	key = hci_find_link_key(hdev, &ev->bdaddr);
4691 	if (!key) {
4692 		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4693 		goto not_found;
4694 	}
4695 
4696 	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4697 
4698 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4699 	if (conn) {
4700 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4701 
4702 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4703 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4704 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4705 			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4706 			goto not_found;
4707 		}
4708 
4709 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4710 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4711 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4712 			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4713 			goto not_found;
4714 		}
4715 
4716 		conn_set_key(conn, key->type, key->pin_len);
4717 	}
4718 
4719 	bacpy(&cp.bdaddr, &ev->bdaddr);
4720 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4721 
4722 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4723 
4724 	hci_dev_unlock(hdev);
4725 
4726 	return;
4727 
4728 not_found:
4729 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4730 	hci_dev_unlock(hdev);
4731 }
4732 
4733 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4734 				    struct sk_buff *skb)
4735 {
4736 	struct hci_ev_link_key_notify *ev = data;
4737 	struct hci_conn *conn;
4738 	struct link_key *key;
4739 	bool persistent;
4740 	u8 pin_len = 0;
4741 
4742 	bt_dev_dbg(hdev, "");
4743 
4744 	hci_dev_lock(hdev);
4745 
4746 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4747 	if (!conn)
4748 		goto unlock;
4749 
4750 	/* Ignore NULL link key against CVE-2020-26555 */
4751 	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4752 		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4753 			   &ev->bdaddr);
4754 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4755 		hci_conn_drop(conn);
4756 		goto unlock;
4757 	}
4758 
4759 	hci_conn_hold(conn);
4760 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4761 	hci_conn_drop(conn);
4762 
4763 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4764 	conn_set_key(conn, ev->key_type, conn->pin_length);
4765 
4766 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4767 		goto unlock;
4768 
4769 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4770 			        ev->key_type, pin_len, &persistent);
4771 	if (!key)
4772 		goto unlock;
4773 
4774 	/* Update connection information since adding the key will have
4775 	 * fixed up the type in the case of changed combination keys.
4776 	 */
4777 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4778 		conn_set_key(conn, key->type, key->pin_len);
4779 
4780 	mgmt_new_link_key(hdev, key, persistent);
4781 
4782 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4783 	 * is set. If it's not set simply remove the key from the kernel
4784 	 * list (we've still notified user space about it but with
4785 	 * store_hint being 0).
4786 	 */
4787 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4788 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4789 		list_del_rcu(&key->list);
4790 		kfree_rcu(key, rcu);
4791 		goto unlock;
4792 	}
4793 
4794 	if (persistent)
4795 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4796 	else
4797 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4798 
4799 unlock:
4800 	hci_dev_unlock(hdev);
4801 }
4802 
4803 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4804 				 struct sk_buff *skb)
4805 {
4806 	struct hci_ev_clock_offset *ev = data;
4807 	struct hci_conn *conn;
4808 
4809 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4810 
4811 	hci_dev_lock(hdev);
4812 
4813 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4814 	if (conn && !ev->status) {
4815 		struct inquiry_entry *ie;
4816 
4817 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4818 		if (ie) {
4819 			ie->data.clock_offset = ev->clock_offset;
4820 			ie->timestamp = jiffies;
4821 		}
4822 	}
4823 
4824 	hci_dev_unlock(hdev);
4825 }
4826 
4827 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4828 				    struct sk_buff *skb)
4829 {
4830 	struct hci_ev_pkt_type_change *ev = data;
4831 	struct hci_conn *conn;
4832 
4833 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4834 
4835 	hci_dev_lock(hdev);
4836 
4837 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4838 	if (conn && !ev->status)
4839 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4840 
4841 	hci_dev_unlock(hdev);
4842 }
4843 
4844 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4845 				   struct sk_buff *skb)
4846 {
4847 	struct hci_ev_pscan_rep_mode *ev = data;
4848 	struct inquiry_entry *ie;
4849 
4850 	bt_dev_dbg(hdev, "");
4851 
4852 	hci_dev_lock(hdev);
4853 
4854 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4855 	if (ie) {
4856 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4857 		ie->timestamp = jiffies;
4858 	}
4859 
4860 	hci_dev_unlock(hdev);
4861 }
4862 
4863 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4864 					     struct sk_buff *skb)
4865 {
4866 	struct hci_ev_inquiry_result_rssi *ev = edata;
4867 	struct inquiry_data data;
4868 	int i;
4869 
4870 	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4871 
4872 	if (!ev->num)
4873 		return;
4874 
4875 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4876 		return;
4877 
4878 	hci_dev_lock(hdev);
4879 
4880 	if (skb->len == array_size(ev->num,
4881 				   sizeof(struct inquiry_info_rssi_pscan))) {
4882 		struct inquiry_info_rssi_pscan *info;
4883 
4884 		for (i = 0; i < ev->num; i++) {
4885 			u32 flags;
4886 
4887 			info = hci_ev_skb_pull(hdev, skb,
4888 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4889 					       sizeof(*info));
4890 			if (!info) {
4891 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4892 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4893 				goto unlock;
4894 			}
4895 
4896 			bacpy(&data.bdaddr, &info->bdaddr);
4897 			data.pscan_rep_mode	= info->pscan_rep_mode;
4898 			data.pscan_period_mode	= info->pscan_period_mode;
4899 			data.pscan_mode		= info->pscan_mode;
4900 			memcpy(data.dev_class, info->dev_class, 3);
4901 			data.clock_offset	= info->clock_offset;
4902 			data.rssi		= info->rssi;
4903 			data.ssp_mode		= 0x00;
4904 
4905 			flags = hci_inquiry_cache_update(hdev, &data, false);
4906 
4907 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4908 					  info->dev_class, info->rssi,
4909 					  flags, NULL, 0, NULL, 0, 0);
4910 		}
4911 	} else if (skb->len == array_size(ev->num,
4912 					  sizeof(struct inquiry_info_rssi))) {
4913 		struct inquiry_info_rssi *info;
4914 
4915 		for (i = 0; i < ev->num; i++) {
4916 			u32 flags;
4917 
4918 			info = hci_ev_skb_pull(hdev, skb,
4919 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4920 					       sizeof(*info));
4921 			if (!info) {
4922 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4923 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4924 				goto unlock;
4925 			}
4926 
4927 			bacpy(&data.bdaddr, &info->bdaddr);
4928 			data.pscan_rep_mode	= info->pscan_rep_mode;
4929 			data.pscan_period_mode	= info->pscan_period_mode;
4930 			data.pscan_mode		= 0x00;
4931 			memcpy(data.dev_class, info->dev_class, 3);
4932 			data.clock_offset	= info->clock_offset;
4933 			data.rssi		= info->rssi;
4934 			data.ssp_mode		= 0x00;
4935 
4936 			flags = hci_inquiry_cache_update(hdev, &data, false);
4937 
4938 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4939 					  info->dev_class, info->rssi,
4940 					  flags, NULL, 0, NULL, 0, 0);
4941 		}
4942 	} else {
4943 		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4944 			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4945 	}
4946 unlock:
4947 	hci_dev_unlock(hdev);
4948 }
4949 
4950 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4951 					struct sk_buff *skb)
4952 {
4953 	struct hci_ev_remote_ext_features *ev = data;
4954 	struct hci_conn *conn;
4955 
4956 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4957 
4958 	hci_dev_lock(hdev);
4959 
4960 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4961 	if (!conn)
4962 		goto unlock;
4963 
4964 	if (ev->page < HCI_MAX_PAGES)
4965 		memcpy(conn->features[ev->page], ev->features, 8);
4966 
4967 	if (!ev->status && ev->page == 0x01) {
4968 		struct inquiry_entry *ie;
4969 
4970 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4971 		if (ie)
4972 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4973 
4974 		if (ev->features[0] & LMP_HOST_SSP) {
4975 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4976 		} else {
4977 			/* It is mandatory by the Bluetooth specification that
4978 			 * Extended Inquiry Results are only used when Secure
4979 			 * Simple Pairing is enabled, but some devices violate
4980 			 * this.
4981 			 *
4982 			 * To make these devices work, the internal SSP
4983 			 * enabled flag needs to be cleared if the remote host
4984 			 * features do not indicate SSP support */
4985 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4986 		}
4987 
4988 		if (ev->features[0] & LMP_HOST_SC)
4989 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4990 	}
4991 
4992 	if (conn->state != BT_CONFIG)
4993 		goto unlock;
4994 
4995 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4996 		struct hci_cp_remote_name_req cp;
4997 		memset(&cp, 0, sizeof(cp));
4998 		bacpy(&cp.bdaddr, &conn->dst);
4999 		cp.pscan_rep_mode = 0x02;
5000 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5001 	} else {
5002 		mgmt_device_connected(hdev, conn, NULL, 0);
5003 	}
5004 
5005 	if (!hci_outgoing_auth_needed(hdev, conn)) {
5006 		conn->state = BT_CONNECTED;
5007 		hci_connect_cfm(conn, ev->status);
5008 		hci_conn_drop(conn);
5009 	}
5010 
5011 unlock:
5012 	hci_dev_unlock(hdev);
5013 }
5014 
5015 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5016 				       struct sk_buff *skb)
5017 {
5018 	struct hci_ev_sync_conn_complete *ev = data;
5019 	struct hci_conn *conn;
5020 	u8 status = ev->status;
5021 
5022 	switch (ev->link_type) {
5023 	case SCO_LINK:
5024 	case ESCO_LINK:
5025 		break;
5026 	default:
5027 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5028 		 * for HCI_Synchronous_Connection_Complete is limited to
5029 		 * either SCO or eSCO
5030 		 */
5031 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5032 		return;
5033 	}
5034 
5035 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
5036 
5037 	hci_dev_lock(hdev);
5038 
5039 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5040 	if (!conn) {
5041 		if (ev->link_type == ESCO_LINK)
5042 			goto unlock;
5043 
5044 		/* When the link type in the event indicates SCO connection
5045 		 * and lookup of the connection object fails, then check
5046 		 * if an eSCO connection object exists.
5047 		 *
5048 		 * The core limits the synchronous connections to either
5049 		 * SCO or eSCO. The eSCO connection is preferred and tried
5050 		 * to be setup first and until successfully established,
5051 		 * the link type will be hinted as eSCO.
5052 		 */
5053 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5054 		if (!conn)
5055 			goto unlock;
5056 	}
5057 
5058 	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5059 	 * Processing it more than once per connection can corrupt kernel memory.
5060 	 *
5061 	 * As the connection handle is set here for the first time, it indicates
5062 	 * whether the connection is already set up.
5063 	 */
5064 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5065 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5066 		goto unlock;
5067 	}
5068 
5069 	switch (status) {
5070 	case 0x00:
5071 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5072 		if (status) {
5073 			conn->state = BT_CLOSED;
5074 			break;
5075 		}
5076 
5077 		conn->state  = BT_CONNECTED;
5078 		conn->type   = ev->link_type;
5079 
5080 		hci_debugfs_create_conn(conn);
5081 		hci_conn_add_sysfs(conn);
5082 		break;
5083 
5084 	case 0x10:	/* Connection Accept Timeout */
5085 	case 0x0d:	/* Connection Rejected due to Limited Resources */
5086 	case 0x11:	/* Unsupported Feature or Parameter Value */
5087 	case 0x1c:	/* SCO interval rejected */
5088 	case 0x1a:	/* Unsupported Remote Feature */
5089 	case 0x1e:	/* Invalid LMP Parameters */
5090 	case 0x1f:	/* Unspecified error */
5091 	case 0x20:	/* Unsupported LMP Parameter value */
5092 		if (conn->out) {
5093 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5094 					(hdev->esco_type & EDR_ESCO_MASK);
5095 			if (hci_setup_sync(conn, conn->parent->handle))
5096 				goto unlock;
5097 		}
5098 		fallthrough;
5099 
5100 	default:
5101 		conn->state = BT_CLOSED;
5102 		break;
5103 	}
5104 
5105 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5106 	/* Notify only in case of SCO over HCI transport data path which
5107 	 * is zero and non-zero value shall be non-HCI transport data path
5108 	 */
5109 	if (conn->codec.data_path == 0 && hdev->notify) {
5110 		switch (ev->air_mode) {
5111 		case 0x02:
5112 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5113 			break;
5114 		case 0x03:
5115 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5116 			break;
5117 		}
5118 	}
5119 
5120 	hci_connect_cfm(conn, status);
5121 	if (status)
5122 		hci_conn_del(conn);
5123 
5124 unlock:
5125 	hci_dev_unlock(hdev);
5126 }
5127 
5128 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5129 {
5130 	size_t parsed = 0;
5131 
5132 	while (parsed < eir_len) {
5133 		u8 field_len = eir[0];
5134 
5135 		if (field_len == 0)
5136 			return parsed;
5137 
5138 		parsed += field_len + 1;
5139 		eir += field_len + 1;
5140 	}
5141 
5142 	return eir_len;
5143 }
5144 
5145 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5146 					    struct sk_buff *skb)
5147 {
5148 	struct hci_ev_ext_inquiry_result *ev = edata;
5149 	struct inquiry_data data;
5150 	size_t eir_len;
5151 	int i;
5152 
5153 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5154 			     flex_array_size(ev, info, ev->num)))
5155 		return;
5156 
5157 	bt_dev_dbg(hdev, "num %d", ev->num);
5158 
5159 	if (!ev->num)
5160 		return;
5161 
5162 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5163 		return;
5164 
5165 	hci_dev_lock(hdev);
5166 
5167 	for (i = 0; i < ev->num; i++) {
5168 		struct extended_inquiry_info *info = &ev->info[i];
5169 		u32 flags;
5170 		bool name_known;
5171 
5172 		bacpy(&data.bdaddr, &info->bdaddr);
5173 		data.pscan_rep_mode	= info->pscan_rep_mode;
5174 		data.pscan_period_mode	= info->pscan_period_mode;
5175 		data.pscan_mode		= 0x00;
5176 		memcpy(data.dev_class, info->dev_class, 3);
5177 		data.clock_offset	= info->clock_offset;
5178 		data.rssi		= info->rssi;
5179 		data.ssp_mode		= 0x01;
5180 
5181 		if (hci_dev_test_flag(hdev, HCI_MGMT))
5182 			name_known = eir_get_data(info->data,
5183 						  sizeof(info->data),
5184 						  EIR_NAME_COMPLETE, NULL);
5185 		else
5186 			name_known = true;
5187 
5188 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5189 
5190 		eir_len = eir_get_length(info->data, sizeof(info->data));
5191 
5192 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5193 				  info->dev_class, info->rssi,
5194 				  flags, info->data, eir_len, NULL, 0, 0);
5195 	}
5196 
5197 	hci_dev_unlock(hdev);
5198 }
5199 
5200 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5201 					 struct sk_buff *skb)
5202 {
5203 	struct hci_ev_key_refresh_complete *ev = data;
5204 	struct hci_conn *conn;
5205 
5206 	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5207 		   __le16_to_cpu(ev->handle));
5208 
5209 	hci_dev_lock(hdev);
5210 
5211 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5212 	if (!conn)
5213 		goto unlock;
5214 
5215 	/* For BR/EDR the necessary steps are taken through the
5216 	 * auth_complete event.
5217 	 */
5218 	if (conn->type != LE_LINK)
5219 		goto unlock;
5220 
5221 	if (!ev->status)
5222 		conn->sec_level = conn->pending_sec_level;
5223 
5224 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5225 
5226 	if (ev->status && conn->state == BT_CONNECTED) {
5227 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5228 		hci_conn_drop(conn);
5229 		goto unlock;
5230 	}
5231 
5232 	if (conn->state == BT_CONFIG) {
5233 		if (!ev->status)
5234 			conn->state = BT_CONNECTED;
5235 
5236 		hci_connect_cfm(conn, ev->status);
5237 		hci_conn_drop(conn);
5238 	} else {
5239 		hci_auth_cfm(conn, ev->status);
5240 
5241 		hci_conn_hold(conn);
5242 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5243 		hci_conn_drop(conn);
5244 	}
5245 
5246 unlock:
5247 	hci_dev_unlock(hdev);
5248 }
5249 
5250 static u8 hci_get_auth_req(struct hci_conn *conn)
5251 {
5252 	/* If remote requests no-bonding follow that lead */
5253 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5254 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5255 		return conn->remote_auth | (conn->auth_type & 0x01);
5256 
5257 	/* If both remote and local have enough IO capabilities, require
5258 	 * MITM protection
5259 	 */
5260 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5261 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5262 		return conn->remote_auth | 0x01;
5263 
5264 	/* No MITM protection possible so ignore remote requirement */
5265 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5266 }
5267 
5268 static u8 bredr_oob_data_present(struct hci_conn *conn)
5269 {
5270 	struct hci_dev *hdev = conn->hdev;
5271 	struct oob_data *data;
5272 
5273 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5274 	if (!data)
5275 		return 0x00;
5276 
5277 	if (bredr_sc_enabled(hdev)) {
5278 		/* When Secure Connections is enabled, then just
5279 		 * return the present value stored with the OOB
5280 		 * data. The stored value contains the right present
5281 		 * information. However it can only be trusted when
5282 		 * not in Secure Connection Only mode.
5283 		 */
5284 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5285 			return data->present;
5286 
5287 		/* When Secure Connections Only mode is enabled, then
5288 		 * the P-256 values are required. If they are not
5289 		 * available, then do not declare that OOB data is
5290 		 * present.
5291 		 */
5292 		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5293 		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5294 			return 0x00;
5295 
5296 		return 0x02;
5297 	}
5298 
5299 	/* When Secure Connections is not enabled or actually
5300 	 * not supported by the hardware, then check that if
5301 	 * P-192 data values are present.
5302 	 */
5303 	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5304 	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5305 		return 0x00;
5306 
5307 	return 0x01;
5308 }
5309 
5310 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5311 				    struct sk_buff *skb)
5312 {
5313 	struct hci_ev_io_capa_request *ev = data;
5314 	struct hci_conn *conn;
5315 
5316 	bt_dev_dbg(hdev, "");
5317 
5318 	hci_dev_lock(hdev);
5319 
5320 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5321 	if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5322 		goto unlock;
5323 
5324 	/* Assume remote supports SSP since it has triggered this event */
5325 	set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5326 
5327 	hci_conn_hold(conn);
5328 
5329 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5330 		goto unlock;
5331 
5332 	/* Allow pairing if we're pairable, the initiators of the
5333 	 * pairing or if the remote is not requesting bonding.
5334 	 */
5335 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5336 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5337 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5338 		struct hci_cp_io_capability_reply cp;
5339 
5340 		bacpy(&cp.bdaddr, &ev->bdaddr);
5341 		/* Change the IO capability from KeyboardDisplay
5342 		 * to DisplayYesNo as it is not supported by BT spec. */
5343 		cp.capability = (conn->io_capability == 0x04) ?
5344 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5345 
5346 		/* If we are initiators, there is no remote information yet */
5347 		if (conn->remote_auth == 0xff) {
5348 			/* Request MITM protection if our IO caps allow it
5349 			 * except for the no-bonding case.
5350 			 */
5351 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5352 			    conn->auth_type != HCI_AT_NO_BONDING)
5353 				conn->auth_type |= 0x01;
5354 		} else {
5355 			conn->auth_type = hci_get_auth_req(conn);
5356 		}
5357 
5358 		/* If we're not bondable, force one of the non-bondable
5359 		 * authentication requirement values.
5360 		 */
5361 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5362 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5363 
5364 		cp.authentication = conn->auth_type;
5365 		cp.oob_data = bredr_oob_data_present(conn);
5366 
5367 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5368 			     sizeof(cp), &cp);
5369 	} else {
5370 		struct hci_cp_io_capability_neg_reply cp;
5371 
5372 		bacpy(&cp.bdaddr, &ev->bdaddr);
5373 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5374 
5375 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5376 			     sizeof(cp), &cp);
5377 	}
5378 
5379 unlock:
5380 	hci_dev_unlock(hdev);
5381 }
5382 
5383 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5384 				  struct sk_buff *skb)
5385 {
5386 	struct hci_ev_io_capa_reply *ev = data;
5387 	struct hci_conn *conn;
5388 
5389 	bt_dev_dbg(hdev, "");
5390 
5391 	hci_dev_lock(hdev);
5392 
5393 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5394 	if (!conn)
5395 		goto unlock;
5396 
5397 	conn->remote_cap = ev->capability;
5398 	conn->remote_auth = ev->authentication;
5399 
5400 unlock:
5401 	hci_dev_unlock(hdev);
5402 }
5403 
5404 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5405 					 struct sk_buff *skb)
5406 {
5407 	struct hci_ev_user_confirm_req *ev = data;
5408 	int loc_mitm, rem_mitm, confirm_hint = 0;
5409 	struct hci_conn *conn;
5410 
5411 	bt_dev_dbg(hdev, "");
5412 
5413 	hci_dev_lock(hdev);
5414 
5415 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5416 		goto unlock;
5417 
5418 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5419 	if (!conn)
5420 		goto unlock;
5421 
5422 	loc_mitm = (conn->auth_type & 0x01);
5423 	rem_mitm = (conn->remote_auth & 0x01);
5424 
5425 	/* If we require MITM but the remote device can't provide that
5426 	 * (it has NoInputNoOutput) then reject the confirmation
5427 	 * request. We check the security level here since it doesn't
5428 	 * necessarily match conn->auth_type.
5429 	 */
5430 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5431 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5432 		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5433 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5434 			     sizeof(ev->bdaddr), &ev->bdaddr);
5435 		goto unlock;
5436 	}
5437 
5438 	/* If no side requires MITM protection; auto-accept */
5439 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5440 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5441 
5442 		/* If we're not the initiators request authorization to
5443 		 * proceed from user space (mgmt_user_confirm with
5444 		 * confirm_hint set to 1). The exception is if neither
5445 		 * side had MITM or if the local IO capability is
5446 		 * NoInputNoOutput, in which case we do auto-accept
5447 		 */
5448 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5449 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5450 		    (loc_mitm || rem_mitm)) {
5451 			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5452 			confirm_hint = 1;
5453 			goto confirm;
5454 		}
5455 
5456 		/* If there already exists link key in local host, leave the
5457 		 * decision to user space since the remote device could be
5458 		 * legitimate or malicious.
5459 		 */
5460 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5461 			bt_dev_dbg(hdev, "Local host already has link key");
5462 			confirm_hint = 1;
5463 			goto confirm;
5464 		}
5465 
5466 		BT_DBG("Auto-accept of user confirmation with %ums delay",
5467 		       hdev->auto_accept_delay);
5468 
5469 		if (hdev->auto_accept_delay > 0) {
5470 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5471 			queue_delayed_work(conn->hdev->workqueue,
5472 					   &conn->auto_accept_work, delay);
5473 			goto unlock;
5474 		}
5475 
5476 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5477 			     sizeof(ev->bdaddr), &ev->bdaddr);
5478 		goto unlock;
5479 	}
5480 
5481 confirm:
5482 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5483 				  le32_to_cpu(ev->passkey), confirm_hint);
5484 
5485 unlock:
5486 	hci_dev_unlock(hdev);
5487 }
5488 
5489 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5490 					 struct sk_buff *skb)
5491 {
5492 	struct hci_ev_user_passkey_req *ev = data;
5493 
5494 	bt_dev_dbg(hdev, "");
5495 
5496 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5497 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5498 }
5499 
5500 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5501 					struct sk_buff *skb)
5502 {
5503 	struct hci_ev_user_passkey_notify *ev = data;
5504 	struct hci_conn *conn;
5505 
5506 	bt_dev_dbg(hdev, "");
5507 
5508 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5509 	if (!conn)
5510 		return;
5511 
5512 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5513 	conn->passkey_entered = 0;
5514 
5515 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5516 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5517 					 conn->dst_type, conn->passkey_notify,
5518 					 conn->passkey_entered);
5519 }
5520 
5521 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5522 				    struct sk_buff *skb)
5523 {
5524 	struct hci_ev_keypress_notify *ev = data;
5525 	struct hci_conn *conn;
5526 
5527 	bt_dev_dbg(hdev, "");
5528 
5529 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5530 	if (!conn)
5531 		return;
5532 
5533 	switch (ev->type) {
5534 	case HCI_KEYPRESS_STARTED:
5535 		conn->passkey_entered = 0;
5536 		return;
5537 
5538 	case HCI_KEYPRESS_ENTERED:
5539 		conn->passkey_entered++;
5540 		break;
5541 
5542 	case HCI_KEYPRESS_ERASED:
5543 		conn->passkey_entered--;
5544 		break;
5545 
5546 	case HCI_KEYPRESS_CLEARED:
5547 		conn->passkey_entered = 0;
5548 		break;
5549 
5550 	case HCI_KEYPRESS_COMPLETED:
5551 		return;
5552 	}
5553 
5554 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5555 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5556 					 conn->dst_type, conn->passkey_notify,
5557 					 conn->passkey_entered);
5558 }
5559 
5560 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5561 					 struct sk_buff *skb)
5562 {
5563 	struct hci_ev_simple_pair_complete *ev = data;
5564 	struct hci_conn *conn;
5565 
5566 	bt_dev_dbg(hdev, "");
5567 
5568 	hci_dev_lock(hdev);
5569 
5570 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5571 	if (!conn || !hci_conn_ssp_enabled(conn))
5572 		goto unlock;
5573 
5574 	/* Reset the authentication requirement to unknown */
5575 	conn->remote_auth = 0xff;
5576 
5577 	/* To avoid duplicate auth_failed events to user space we check
5578 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5579 	 * initiated the authentication. A traditional auth_complete
5580 	 * event gets always produced as initiator and is also mapped to
5581 	 * the mgmt_auth_failed event */
5582 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5583 		mgmt_auth_failed(conn, ev->status);
5584 
5585 	hci_conn_drop(conn);
5586 
5587 unlock:
5588 	hci_dev_unlock(hdev);
5589 }
5590 
5591 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5592 					 struct sk_buff *skb)
5593 {
5594 	struct hci_ev_remote_host_features *ev = data;
5595 	struct inquiry_entry *ie;
5596 	struct hci_conn *conn;
5597 
5598 	bt_dev_dbg(hdev, "");
5599 
5600 	hci_dev_lock(hdev);
5601 
5602 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5603 	if (conn)
5604 		memcpy(conn->features[1], ev->features, 8);
5605 
5606 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5607 	if (ie)
5608 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5609 
5610 	hci_dev_unlock(hdev);
5611 }
5612 
5613 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5614 					    struct sk_buff *skb)
5615 {
5616 	struct hci_ev_remote_oob_data_request *ev = edata;
5617 	struct oob_data *data;
5618 
5619 	bt_dev_dbg(hdev, "");
5620 
5621 	hci_dev_lock(hdev);
5622 
5623 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5624 		goto unlock;
5625 
5626 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5627 	if (!data) {
5628 		struct hci_cp_remote_oob_data_neg_reply cp;
5629 
5630 		bacpy(&cp.bdaddr, &ev->bdaddr);
5631 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5632 			     sizeof(cp), &cp);
5633 		goto unlock;
5634 	}
5635 
5636 	if (bredr_sc_enabled(hdev)) {
5637 		struct hci_cp_remote_oob_ext_data_reply cp;
5638 
5639 		bacpy(&cp.bdaddr, &ev->bdaddr);
5640 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5641 			memset(cp.hash192, 0, sizeof(cp.hash192));
5642 			memset(cp.rand192, 0, sizeof(cp.rand192));
5643 		} else {
5644 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5645 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5646 		}
5647 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5648 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5649 
5650 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5651 			     sizeof(cp), &cp);
5652 	} else {
5653 		struct hci_cp_remote_oob_data_reply cp;
5654 
5655 		bacpy(&cp.bdaddr, &ev->bdaddr);
5656 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5657 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5658 
5659 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5660 			     sizeof(cp), &cp);
5661 	}
5662 
5663 unlock:
5664 	hci_dev_unlock(hdev);
5665 }
5666 
5667 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5668 				u8 bdaddr_type, bdaddr_t *local_rpa)
5669 {
5670 	if (conn->out) {
5671 		conn->dst_type = bdaddr_type;
5672 		conn->resp_addr_type = bdaddr_type;
5673 		bacpy(&conn->resp_addr, bdaddr);
5674 
5675 		/* Check if the controller has set a Local RPA then it must be
5676 		 * used instead or hdev->rpa.
5677 		 */
5678 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5679 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5680 			bacpy(&conn->init_addr, local_rpa);
5681 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5682 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5683 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5684 		} else {
5685 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5686 						  &conn->init_addr_type);
5687 		}
5688 	} else {
5689 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5690 		/* Check if the controller has set a Local RPA then it must be
5691 		 * used instead or hdev->rpa.
5692 		 */
5693 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5694 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5695 			bacpy(&conn->resp_addr, local_rpa);
5696 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5697 			/* In case of ext adv, resp_addr will be updated in
5698 			 * Adv Terminated event.
5699 			 */
5700 			if (!ext_adv_capable(conn->hdev))
5701 				bacpy(&conn->resp_addr,
5702 				      &conn->hdev->random_addr);
5703 		} else {
5704 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5705 		}
5706 
5707 		conn->init_addr_type = bdaddr_type;
5708 		bacpy(&conn->init_addr, bdaddr);
5709 
5710 		/* For incoming connections, set the default minimum
5711 		 * and maximum connection interval. They will be used
5712 		 * to check if the parameters are in range and if not
5713 		 * trigger the connection update procedure.
5714 		 */
5715 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5716 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5717 	}
5718 }
5719 
5720 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5721 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5722 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5723 				 u16 interval, u16 latency,
5724 				 u16 supervision_timeout)
5725 {
5726 	struct hci_conn_params *params;
5727 	struct hci_conn *conn;
5728 	struct smp_irk *irk;
5729 	u8 addr_type;
5730 
5731 	hci_dev_lock(hdev);
5732 
5733 	/* All controllers implicitly stop advertising in the event of a
5734 	 * connection, so ensure that the state bit is cleared.
5735 	 */
5736 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5737 
5738 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5739 	if (!conn) {
5740 		/* In case of error status and there is no connection pending
5741 		 * just unlock as there is nothing to cleanup.
5742 		 */
5743 		if (status)
5744 			goto unlock;
5745 
5746 		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5747 		if (!conn) {
5748 			bt_dev_err(hdev, "no memory for new connection");
5749 			goto unlock;
5750 		}
5751 
5752 		conn->dst_type = bdaddr_type;
5753 
5754 		/* If we didn't have a hci_conn object previously
5755 		 * but we're in central role this must be something
5756 		 * initiated using an accept list. Since accept list based
5757 		 * connections are not "first class citizens" we don't
5758 		 * have full tracking of them. Therefore, we go ahead
5759 		 * with a "best effort" approach of determining the
5760 		 * initiator address based on the HCI_PRIVACY flag.
5761 		 */
5762 		if (conn->out) {
5763 			conn->resp_addr_type = bdaddr_type;
5764 			bacpy(&conn->resp_addr, bdaddr);
5765 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5766 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5767 				bacpy(&conn->init_addr, &hdev->rpa);
5768 			} else {
5769 				hci_copy_identity_address(hdev,
5770 							  &conn->init_addr,
5771 							  &conn->init_addr_type);
5772 			}
5773 		}
5774 	} else {
5775 		cancel_delayed_work(&conn->le_conn_timeout);
5776 	}
5777 
5778 	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5779 	 * Processing it more than once per connection can corrupt kernel memory.
5780 	 *
5781 	 * As the connection handle is set here for the first time, it indicates
5782 	 * whether the connection is already set up.
5783 	 */
5784 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5785 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5786 		goto unlock;
5787 	}
5788 
5789 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5790 
5791 	/* Lookup the identity address from the stored connection
5792 	 * address and address type.
5793 	 *
5794 	 * When establishing connections to an identity address, the
5795 	 * connection procedure will store the resolvable random
5796 	 * address first. Now if it can be converted back into the
5797 	 * identity address, start using the identity address from
5798 	 * now on.
5799 	 */
5800 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5801 	if (irk) {
5802 		bacpy(&conn->dst, &irk->bdaddr);
5803 		conn->dst_type = irk->addr_type;
5804 	}
5805 
5806 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5807 
5808 	/* All connection failure handling is taken care of by the
5809 	 * hci_conn_failed function which is triggered by the HCI
5810 	 * request completion callbacks used for connecting.
5811 	 */
5812 	if (status || hci_conn_set_handle(conn, handle))
5813 		goto unlock;
5814 
5815 	/* Drop the connection if it has been aborted */
5816 	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5817 		hci_conn_drop(conn);
5818 		goto unlock;
5819 	}
5820 
5821 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5822 		addr_type = BDADDR_LE_PUBLIC;
5823 	else
5824 		addr_type = BDADDR_LE_RANDOM;
5825 
5826 	/* Drop the connection if the device is blocked */
5827 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5828 		hci_conn_drop(conn);
5829 		goto unlock;
5830 	}
5831 
5832 	mgmt_device_connected(hdev, conn, NULL, 0);
5833 
5834 	conn->sec_level = BT_SECURITY_LOW;
5835 	conn->state = BT_CONFIG;
5836 
5837 	/* Store current advertising instance as connection advertising instance
5838 	 * when sotfware rotation is in use so it can be re-enabled when
5839 	 * disconnected.
5840 	 */
5841 	if (!ext_adv_capable(hdev))
5842 		conn->adv_instance = hdev->cur_adv_instance;
5843 
5844 	conn->le_conn_interval = interval;
5845 	conn->le_conn_latency = latency;
5846 	conn->le_supv_timeout = supervision_timeout;
5847 
5848 	hci_debugfs_create_conn(conn);
5849 	hci_conn_add_sysfs(conn);
5850 
5851 	/* The remote features procedure is defined for central
5852 	 * role only. So only in case of an initiated connection
5853 	 * request the remote features.
5854 	 *
5855 	 * If the local controller supports peripheral-initiated features
5856 	 * exchange, then requesting the remote features in peripheral
5857 	 * role is possible. Otherwise just transition into the
5858 	 * connected state without requesting the remote features.
5859 	 */
5860 	if (conn->out ||
5861 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5862 		struct hci_cp_le_read_remote_features cp;
5863 
5864 		cp.handle = __cpu_to_le16(conn->handle);
5865 
5866 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5867 			     sizeof(cp), &cp);
5868 
5869 		hci_conn_hold(conn);
5870 	} else {
5871 		conn->state = BT_CONNECTED;
5872 		hci_connect_cfm(conn, status);
5873 	}
5874 
5875 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5876 					   conn->dst_type);
5877 	if (params) {
5878 		hci_pend_le_list_del_init(params);
5879 		if (params->conn) {
5880 			hci_conn_drop(params->conn);
5881 			hci_conn_put(params->conn);
5882 			params->conn = NULL;
5883 		}
5884 	}
5885 
5886 unlock:
5887 	hci_update_passive_scan(hdev);
5888 	hci_dev_unlock(hdev);
5889 }
5890 
5891 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5892 				     struct sk_buff *skb)
5893 {
5894 	struct hci_ev_le_conn_complete *ev = data;
5895 
5896 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5897 
5898 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5899 			     NULL, ev->role, le16_to_cpu(ev->handle),
5900 			     le16_to_cpu(ev->interval),
5901 			     le16_to_cpu(ev->latency),
5902 			     le16_to_cpu(ev->supervision_timeout));
5903 }
5904 
5905 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5906 					 struct sk_buff *skb)
5907 {
5908 	struct hci_ev_le_enh_conn_complete *ev = data;
5909 
5910 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5911 
5912 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5913 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5914 			     le16_to_cpu(ev->interval),
5915 			     le16_to_cpu(ev->latency),
5916 			     le16_to_cpu(ev->supervision_timeout));
5917 }
5918 
5919 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5920 				    struct sk_buff *skb)
5921 {
5922 	struct hci_evt_le_ext_adv_set_term *ev = data;
5923 	struct hci_conn *conn;
5924 	struct adv_info *adv, *n;
5925 
5926 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5927 
5928 	/* The Bluetooth Core 5.3 specification clearly states that this event
5929 	 * shall not be sent when the Host disables the advertising set. So in
5930 	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5931 	 *
5932 	 * When the Host disables an advertising set, all cleanup is done via
5933 	 * its command callback and not needed to be duplicated here.
5934 	 */
5935 	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5936 		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5937 		return;
5938 	}
5939 
5940 	hci_dev_lock(hdev);
5941 
5942 	adv = hci_find_adv_instance(hdev, ev->handle);
5943 
5944 	if (ev->status) {
5945 		if (!adv)
5946 			goto unlock;
5947 
5948 		/* Remove advertising as it has been terminated */
5949 		hci_remove_adv_instance(hdev, ev->handle);
5950 		mgmt_advertising_removed(NULL, hdev, ev->handle);
5951 
5952 		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5953 			if (adv->enabled)
5954 				goto unlock;
5955 		}
5956 
5957 		/* We are no longer advertising, clear HCI_LE_ADV */
5958 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
5959 		goto unlock;
5960 	}
5961 
5962 	if (adv)
5963 		adv->enabled = false;
5964 
5965 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5966 	if (conn) {
5967 		/* Store handle in the connection so the correct advertising
5968 		 * instance can be re-enabled when disconnected.
5969 		 */
5970 		conn->adv_instance = ev->handle;
5971 
5972 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5973 		    bacmp(&conn->resp_addr, BDADDR_ANY))
5974 			goto unlock;
5975 
5976 		if (!ev->handle) {
5977 			bacpy(&conn->resp_addr, &hdev->random_addr);
5978 			goto unlock;
5979 		}
5980 
5981 		if (adv)
5982 			bacpy(&conn->resp_addr, &adv->random_addr);
5983 	}
5984 
5985 unlock:
5986 	hci_dev_unlock(hdev);
5987 }
5988 
5989 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5990 					    struct sk_buff *skb)
5991 {
5992 	struct hci_ev_le_conn_update_complete *ev = data;
5993 	struct hci_conn *conn;
5994 
5995 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5996 
5997 	if (ev->status)
5998 		return;
5999 
6000 	hci_dev_lock(hdev);
6001 
6002 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6003 	if (conn) {
6004 		conn->le_conn_interval = le16_to_cpu(ev->interval);
6005 		conn->le_conn_latency = le16_to_cpu(ev->latency);
6006 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6007 	}
6008 
6009 	hci_dev_unlock(hdev);
6010 }
6011 
6012 /* This function requires the caller holds hdev->lock */
6013 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6014 					      bdaddr_t *addr,
6015 					      u8 addr_type, bool addr_resolved,
6016 					      u8 adv_type)
6017 {
6018 	struct hci_conn *conn;
6019 	struct hci_conn_params *params;
6020 
6021 	/* If the event is not connectable don't proceed further */
6022 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6023 		return NULL;
6024 
6025 	/* Ignore if the device is blocked or hdev is suspended */
6026 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6027 	    hdev->suspended)
6028 		return NULL;
6029 
6030 	/* Most controller will fail if we try to create new connections
6031 	 * while we have an existing one in peripheral role.
6032 	 */
6033 	if (hdev->conn_hash.le_num_peripheral > 0 &&
6034 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6035 	     !(hdev->le_states[3] & 0x10)))
6036 		return NULL;
6037 
6038 	/* If we're not connectable only connect devices that we have in
6039 	 * our pend_le_conns list.
6040 	 */
6041 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6042 					   addr_type);
6043 	if (!params)
6044 		return NULL;
6045 
6046 	if (!params->explicit_connect) {
6047 		switch (params->auto_connect) {
6048 		case HCI_AUTO_CONN_DIRECT:
6049 			/* Only devices advertising with ADV_DIRECT_IND are
6050 			 * triggering a connection attempt. This is allowing
6051 			 * incoming connections from peripheral devices.
6052 			 */
6053 			if (adv_type != LE_ADV_DIRECT_IND)
6054 				return NULL;
6055 			break;
6056 		case HCI_AUTO_CONN_ALWAYS:
6057 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
6058 			 * are triggering a connection attempt. This means
6059 			 * that incoming connections from peripheral device are
6060 			 * accepted and also outgoing connections to peripheral
6061 			 * devices are established when found.
6062 			 */
6063 			break;
6064 		default:
6065 			return NULL;
6066 		}
6067 	}
6068 
6069 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6070 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6071 			      HCI_ROLE_MASTER);
6072 	if (!IS_ERR(conn)) {
6073 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6074 		 * by higher layer that tried to connect, if no then
6075 		 * store the pointer since we don't really have any
6076 		 * other owner of the object besides the params that
6077 		 * triggered it. This way we can abort the connection if
6078 		 * the parameters get removed and keep the reference
6079 		 * count consistent once the connection is established.
6080 		 */
6081 
6082 		if (!params->explicit_connect)
6083 			params->conn = hci_conn_get(conn);
6084 
6085 		return conn;
6086 	}
6087 
6088 	switch (PTR_ERR(conn)) {
6089 	case -EBUSY:
6090 		/* If hci_connect() returns -EBUSY it means there is already
6091 		 * an LE connection attempt going on. Since controllers don't
6092 		 * support more than one connection attempt at the time, we
6093 		 * don't consider this an error case.
6094 		 */
6095 		break;
6096 	default:
6097 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6098 		return NULL;
6099 	}
6100 
6101 	return NULL;
6102 }
6103 
6104 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6105 			       u8 bdaddr_type, bdaddr_t *direct_addr,
6106 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6107 			       bool ext_adv, bool ctl_time, u64 instant)
6108 {
6109 	struct discovery_state *d = &hdev->discovery;
6110 	struct smp_irk *irk;
6111 	struct hci_conn *conn;
6112 	bool match, bdaddr_resolved;
6113 	u32 flags;
6114 	u8 *ptr;
6115 
6116 	switch (type) {
6117 	case LE_ADV_IND:
6118 	case LE_ADV_DIRECT_IND:
6119 	case LE_ADV_SCAN_IND:
6120 	case LE_ADV_NONCONN_IND:
6121 	case LE_ADV_SCAN_RSP:
6122 		break;
6123 	default:
6124 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6125 				       "type: 0x%02x", type);
6126 		return;
6127 	}
6128 
6129 	if (len > max_adv_len(hdev)) {
6130 		bt_dev_err_ratelimited(hdev,
6131 				       "adv larger than maximum supported");
6132 		return;
6133 	}
6134 
6135 	/* Find the end of the data in case the report contains padded zero
6136 	 * bytes at the end causing an invalid length value.
6137 	 *
6138 	 * When data is NULL, len is 0 so there is no need for extra ptr
6139 	 * check as 'ptr < data + 0' is already false in such case.
6140 	 */
6141 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6142 		if (ptr + 1 + *ptr > data + len)
6143 			break;
6144 	}
6145 
6146 	/* Adjust for actual length. This handles the case when remote
6147 	 * device is advertising with incorrect data length.
6148 	 */
6149 	len = ptr - data;
6150 
6151 	/* If the direct address is present, then this report is from
6152 	 * a LE Direct Advertising Report event. In that case it is
6153 	 * important to see if the address is matching the local
6154 	 * controller address.
6155 	 */
6156 	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6157 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6158 						  &bdaddr_resolved);
6159 
6160 		/* Only resolvable random addresses are valid for these
6161 		 * kind of reports and others can be ignored.
6162 		 */
6163 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6164 			return;
6165 
6166 		/* If the controller is not using resolvable random
6167 		 * addresses, then this report can be ignored.
6168 		 */
6169 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6170 			return;
6171 
6172 		/* If the local IRK of the controller does not match
6173 		 * with the resolvable random address provided, then
6174 		 * this report can be ignored.
6175 		 */
6176 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6177 			return;
6178 	}
6179 
6180 	/* Check if we need to convert to identity address */
6181 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6182 	if (irk) {
6183 		bdaddr = &irk->bdaddr;
6184 		bdaddr_type = irk->addr_type;
6185 	}
6186 
6187 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6188 
6189 	/* Check if we have been requested to connect to this device.
6190 	 *
6191 	 * direct_addr is set only for directed advertising reports (it is NULL
6192 	 * for advertising reports) and is already verified to be RPA above.
6193 	 */
6194 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6195 				     type);
6196 	if (!ext_adv && conn && type == LE_ADV_IND &&
6197 	    len <= max_adv_len(hdev)) {
6198 		/* Store report for later inclusion by
6199 		 * mgmt_device_connected
6200 		 */
6201 		memcpy(conn->le_adv_data, data, len);
6202 		conn->le_adv_data_len = len;
6203 	}
6204 
6205 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6206 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6207 	else
6208 		flags = 0;
6209 
6210 	/* All scan results should be sent up for Mesh systems */
6211 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6212 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6213 				  rssi, flags, data, len, NULL, 0, instant);
6214 		return;
6215 	}
6216 
6217 	/* Passive scanning shouldn't trigger any device found events,
6218 	 * except for devices marked as CONN_REPORT for which we do send
6219 	 * device found events, or advertisement monitoring requested.
6220 	 */
6221 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6222 		if (type == LE_ADV_DIRECT_IND)
6223 			return;
6224 
6225 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6226 					       bdaddr, bdaddr_type) &&
6227 		    idr_is_empty(&hdev->adv_monitors_idr))
6228 			return;
6229 
6230 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6231 				  rssi, flags, data, len, NULL, 0, 0);
6232 		return;
6233 	}
6234 
6235 	/* When receiving a scan response, then there is no way to
6236 	 * know if the remote device is connectable or not. However
6237 	 * since scan responses are merged with a previously seen
6238 	 * advertising report, the flags field from that report
6239 	 * will be used.
6240 	 *
6241 	 * In the unlikely case that a controller just sends a scan
6242 	 * response event that doesn't match the pending report, then
6243 	 * it is marked as a standalone SCAN_RSP.
6244 	 */
6245 	if (type == LE_ADV_SCAN_RSP)
6246 		flags = MGMT_DEV_FOUND_SCAN_RSP;
6247 
6248 	/* If there's nothing pending either store the data from this
6249 	 * event or send an immediate device found event if the data
6250 	 * should not be stored for later.
6251 	 */
6252 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6253 		/* If the report will trigger a SCAN_REQ store it for
6254 		 * later merging.
6255 		 */
6256 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6257 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6258 						 rssi, flags, data, len);
6259 			return;
6260 		}
6261 
6262 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6263 				  rssi, flags, data, len, NULL, 0, 0);
6264 		return;
6265 	}
6266 
6267 	/* Check if the pending report is for the same device as the new one */
6268 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6269 		 bdaddr_type == d->last_adv_addr_type);
6270 
6271 	/* If the pending data doesn't match this report or this isn't a
6272 	 * scan response (e.g. we got a duplicate ADV_IND) then force
6273 	 * sending of the pending data.
6274 	 */
6275 	if (type != LE_ADV_SCAN_RSP || !match) {
6276 		/* Send out whatever is in the cache, but skip duplicates */
6277 		if (!match)
6278 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6279 					  d->last_adv_addr_type, NULL,
6280 					  d->last_adv_rssi, d->last_adv_flags,
6281 					  d->last_adv_data,
6282 					  d->last_adv_data_len, NULL, 0, 0);
6283 
6284 		/* If the new report will trigger a SCAN_REQ store it for
6285 		 * later merging.
6286 		 */
6287 		if (!ext_adv && (type == LE_ADV_IND ||
6288 				 type == LE_ADV_SCAN_IND)) {
6289 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6290 						 rssi, flags, data, len);
6291 			return;
6292 		}
6293 
6294 		/* The advertising reports cannot be merged, so clear
6295 		 * the pending report and send out a device found event.
6296 		 */
6297 		clear_pending_adv_report(hdev);
6298 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6299 				  rssi, flags, data, len, NULL, 0, 0);
6300 		return;
6301 	}
6302 
6303 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6304 	 * the new event is a SCAN_RSP. We can therefore proceed with
6305 	 * sending a merged device found event.
6306 	 */
6307 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6308 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6309 			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6310 	clear_pending_adv_report(hdev);
6311 }
6312 
6313 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6314 				  struct sk_buff *skb)
6315 {
6316 	struct hci_ev_le_advertising_report *ev = data;
6317 	u64 instant = jiffies;
6318 
6319 	if (!ev->num)
6320 		return;
6321 
6322 	hci_dev_lock(hdev);
6323 
6324 	while (ev->num--) {
6325 		struct hci_ev_le_advertising_info *info;
6326 		s8 rssi;
6327 
6328 		info = hci_le_ev_skb_pull(hdev, skb,
6329 					  HCI_EV_LE_ADVERTISING_REPORT,
6330 					  sizeof(*info));
6331 		if (!info)
6332 			break;
6333 
6334 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6335 					info->length + 1))
6336 			break;
6337 
6338 		if (info->length <= max_adv_len(hdev)) {
6339 			rssi = info->data[info->length];
6340 			process_adv_report(hdev, info->type, &info->bdaddr,
6341 					   info->bdaddr_type, NULL, 0, rssi,
6342 					   info->data, info->length, false,
6343 					   false, instant);
6344 		} else {
6345 			bt_dev_err(hdev, "Dropping invalid advertising data");
6346 		}
6347 	}
6348 
6349 	hci_dev_unlock(hdev);
6350 }
6351 
6352 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6353 {
6354 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6355 		switch (evt_type) {
6356 		case LE_LEGACY_ADV_IND:
6357 			return LE_ADV_IND;
6358 		case LE_LEGACY_ADV_DIRECT_IND:
6359 			return LE_ADV_DIRECT_IND;
6360 		case LE_LEGACY_ADV_SCAN_IND:
6361 			return LE_ADV_SCAN_IND;
6362 		case LE_LEGACY_NONCONN_IND:
6363 			return LE_ADV_NONCONN_IND;
6364 		case LE_LEGACY_SCAN_RSP_ADV:
6365 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6366 			return LE_ADV_SCAN_RSP;
6367 		}
6368 
6369 		goto invalid;
6370 	}
6371 
6372 	if (evt_type & LE_EXT_ADV_CONN_IND) {
6373 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6374 			return LE_ADV_DIRECT_IND;
6375 
6376 		return LE_ADV_IND;
6377 	}
6378 
6379 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6380 		return LE_ADV_SCAN_RSP;
6381 
6382 	if (evt_type & LE_EXT_ADV_SCAN_IND)
6383 		return LE_ADV_SCAN_IND;
6384 
6385 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6386 	    evt_type & LE_EXT_ADV_DIRECT_IND)
6387 		return LE_ADV_NONCONN_IND;
6388 
6389 invalid:
6390 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6391 			       evt_type);
6392 
6393 	return LE_ADV_INVALID;
6394 }
6395 
6396 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6397 				      struct sk_buff *skb)
6398 {
6399 	struct hci_ev_le_ext_adv_report *ev = data;
6400 	u64 instant = jiffies;
6401 
6402 	if (!ev->num)
6403 		return;
6404 
6405 	hci_dev_lock(hdev);
6406 
6407 	while (ev->num--) {
6408 		struct hci_ev_le_ext_adv_info *info;
6409 		u8 legacy_evt_type;
6410 		u16 evt_type;
6411 
6412 		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6413 					  sizeof(*info));
6414 		if (!info)
6415 			break;
6416 
6417 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6418 					info->length))
6419 			break;
6420 
6421 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6422 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6423 		if (legacy_evt_type != LE_ADV_INVALID) {
6424 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6425 					   info->bdaddr_type, NULL, 0,
6426 					   info->rssi, info->data, info->length,
6427 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6428 					   false, instant);
6429 		}
6430 	}
6431 
6432 	hci_dev_unlock(hdev);
6433 }
6434 
6435 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6436 {
6437 	struct hci_cp_le_pa_term_sync cp;
6438 
6439 	memset(&cp, 0, sizeof(cp));
6440 	cp.handle = handle;
6441 
6442 	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6443 }
6444 
6445 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6446 					    struct sk_buff *skb)
6447 {
6448 	struct hci_ev_le_pa_sync_established *ev = data;
6449 	int mask = hdev->link_mode;
6450 	__u8 flags = 0;
6451 	struct hci_conn *pa_sync;
6452 
6453 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6454 
6455 	hci_dev_lock(hdev);
6456 
6457 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6458 
6459 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6460 	if (!(mask & HCI_LM_ACCEPT)) {
6461 		hci_le_pa_term_sync(hdev, ev->handle);
6462 		goto unlock;
6463 	}
6464 
6465 	if (!(flags & HCI_PROTO_DEFER))
6466 		goto unlock;
6467 
6468 	if (ev->status) {
6469 		/* Add connection to indicate the failed PA sync event */
6470 		pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6471 					     HCI_ROLE_SLAVE);
6472 
6473 		if (!pa_sync)
6474 			goto unlock;
6475 
6476 		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6477 
6478 		/* Notify iso layer */
6479 		hci_connect_cfm(pa_sync, ev->status);
6480 	}
6481 
6482 unlock:
6483 	hci_dev_unlock(hdev);
6484 }
6485 
6486 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6487 				      struct sk_buff *skb)
6488 {
6489 	struct hci_ev_le_per_adv_report *ev = data;
6490 	int mask = hdev->link_mode;
6491 	__u8 flags = 0;
6492 
6493 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6494 
6495 	hci_dev_lock(hdev);
6496 
6497 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6498 	if (!(mask & HCI_LM_ACCEPT))
6499 		hci_le_pa_term_sync(hdev, ev->sync_handle);
6500 
6501 	hci_dev_unlock(hdev);
6502 }
6503 
6504 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6505 					    struct sk_buff *skb)
6506 {
6507 	struct hci_ev_le_remote_feat_complete *ev = data;
6508 	struct hci_conn *conn;
6509 
6510 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6511 
6512 	hci_dev_lock(hdev);
6513 
6514 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6515 	if (conn) {
6516 		if (!ev->status)
6517 			memcpy(conn->features[0], ev->features, 8);
6518 
6519 		if (conn->state == BT_CONFIG) {
6520 			__u8 status;
6521 
6522 			/* If the local controller supports peripheral-initiated
6523 			 * features exchange, but the remote controller does
6524 			 * not, then it is possible that the error code 0x1a
6525 			 * for unsupported remote feature gets returned.
6526 			 *
6527 			 * In this specific case, allow the connection to
6528 			 * transition into connected state and mark it as
6529 			 * successful.
6530 			 */
6531 			if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6532 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6533 				status = 0x00;
6534 			else
6535 				status = ev->status;
6536 
6537 			conn->state = BT_CONNECTED;
6538 			hci_connect_cfm(conn, status);
6539 			hci_conn_drop(conn);
6540 		}
6541 	}
6542 
6543 	hci_dev_unlock(hdev);
6544 }
6545 
6546 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6547 				   struct sk_buff *skb)
6548 {
6549 	struct hci_ev_le_ltk_req *ev = data;
6550 	struct hci_cp_le_ltk_reply cp;
6551 	struct hci_cp_le_ltk_neg_reply neg;
6552 	struct hci_conn *conn;
6553 	struct smp_ltk *ltk;
6554 
6555 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6556 
6557 	hci_dev_lock(hdev);
6558 
6559 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6560 	if (conn == NULL)
6561 		goto not_found;
6562 
6563 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6564 	if (!ltk)
6565 		goto not_found;
6566 
6567 	if (smp_ltk_is_sc(ltk)) {
6568 		/* With SC both EDiv and Rand are set to zero */
6569 		if (ev->ediv || ev->rand)
6570 			goto not_found;
6571 	} else {
6572 		/* For non-SC keys check that EDiv and Rand match */
6573 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6574 			goto not_found;
6575 	}
6576 
6577 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6578 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6579 	cp.handle = cpu_to_le16(conn->handle);
6580 
6581 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6582 
6583 	conn->enc_key_size = ltk->enc_size;
6584 
6585 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6586 
6587 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6588 	 * temporary key used to encrypt a connection following
6589 	 * pairing. It is used during the Encrypted Session Setup to
6590 	 * distribute the keys. Later, security can be re-established
6591 	 * using a distributed LTK.
6592 	 */
6593 	if (ltk->type == SMP_STK) {
6594 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6595 		list_del_rcu(&ltk->list);
6596 		kfree_rcu(ltk, rcu);
6597 	} else {
6598 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6599 	}
6600 
6601 	hci_dev_unlock(hdev);
6602 
6603 	return;
6604 
6605 not_found:
6606 	neg.handle = ev->handle;
6607 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6608 	hci_dev_unlock(hdev);
6609 }
6610 
6611 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6612 				      u8 reason)
6613 {
6614 	struct hci_cp_le_conn_param_req_neg_reply cp;
6615 
6616 	cp.handle = cpu_to_le16(handle);
6617 	cp.reason = reason;
6618 
6619 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6620 		     &cp);
6621 }
6622 
6623 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6624 					     struct sk_buff *skb)
6625 {
6626 	struct hci_ev_le_remote_conn_param_req *ev = data;
6627 	struct hci_cp_le_conn_param_req_reply cp;
6628 	struct hci_conn *hcon;
6629 	u16 handle, min, max, latency, timeout;
6630 
6631 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6632 
6633 	handle = le16_to_cpu(ev->handle);
6634 	min = le16_to_cpu(ev->interval_min);
6635 	max = le16_to_cpu(ev->interval_max);
6636 	latency = le16_to_cpu(ev->latency);
6637 	timeout = le16_to_cpu(ev->timeout);
6638 
6639 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6640 	if (!hcon || hcon->state != BT_CONNECTED)
6641 		return send_conn_param_neg_reply(hdev, handle,
6642 						 HCI_ERROR_UNKNOWN_CONN_ID);
6643 
6644 	if (max > hcon->le_conn_max_interval)
6645 		return send_conn_param_neg_reply(hdev, handle,
6646 						 HCI_ERROR_INVALID_LL_PARAMS);
6647 
6648 	if (hci_check_conn_params(min, max, latency, timeout))
6649 		return send_conn_param_neg_reply(hdev, handle,
6650 						 HCI_ERROR_INVALID_LL_PARAMS);
6651 
6652 	if (hcon->role == HCI_ROLE_MASTER) {
6653 		struct hci_conn_params *params;
6654 		u8 store_hint;
6655 
6656 		hci_dev_lock(hdev);
6657 
6658 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6659 						hcon->dst_type);
6660 		if (params) {
6661 			params->conn_min_interval = min;
6662 			params->conn_max_interval = max;
6663 			params->conn_latency = latency;
6664 			params->supervision_timeout = timeout;
6665 			store_hint = 0x01;
6666 		} else {
6667 			store_hint = 0x00;
6668 		}
6669 
6670 		hci_dev_unlock(hdev);
6671 
6672 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6673 				    store_hint, min, max, latency, timeout);
6674 	}
6675 
6676 	cp.handle = ev->handle;
6677 	cp.interval_min = ev->interval_min;
6678 	cp.interval_max = ev->interval_max;
6679 	cp.latency = ev->latency;
6680 	cp.timeout = ev->timeout;
6681 	cp.min_ce_len = 0;
6682 	cp.max_ce_len = 0;
6683 
6684 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6685 }
6686 
6687 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6688 					 struct sk_buff *skb)
6689 {
6690 	struct hci_ev_le_direct_adv_report *ev = data;
6691 	u64 instant = jiffies;
6692 	int i;
6693 
6694 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6695 				flex_array_size(ev, info, ev->num)))
6696 		return;
6697 
6698 	if (!ev->num)
6699 		return;
6700 
6701 	hci_dev_lock(hdev);
6702 
6703 	for (i = 0; i < ev->num; i++) {
6704 		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6705 
6706 		process_adv_report(hdev, info->type, &info->bdaddr,
6707 				   info->bdaddr_type, &info->direct_addr,
6708 				   info->direct_addr_type, info->rssi, NULL, 0,
6709 				   false, false, instant);
6710 	}
6711 
6712 	hci_dev_unlock(hdev);
6713 }
6714 
6715 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6716 				  struct sk_buff *skb)
6717 {
6718 	struct hci_ev_le_phy_update_complete *ev = data;
6719 	struct hci_conn *conn;
6720 
6721 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6722 
6723 	if (ev->status)
6724 		return;
6725 
6726 	hci_dev_lock(hdev);
6727 
6728 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6729 	if (!conn)
6730 		goto unlock;
6731 
6732 	conn->le_tx_phy = ev->tx_phy;
6733 	conn->le_rx_phy = ev->rx_phy;
6734 
6735 unlock:
6736 	hci_dev_unlock(hdev);
6737 }
6738 
6739 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6740 					struct sk_buff *skb)
6741 {
6742 	struct hci_evt_le_cis_established *ev = data;
6743 	struct hci_conn *conn;
6744 	struct bt_iso_qos *qos;
6745 	bool pending = false;
6746 	u16 handle = __le16_to_cpu(ev->handle);
6747 
6748 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6749 
6750 	hci_dev_lock(hdev);
6751 
6752 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6753 	if (!conn) {
6754 		bt_dev_err(hdev,
6755 			   "Unable to find connection with handle 0x%4.4x",
6756 			   handle);
6757 		goto unlock;
6758 	}
6759 
6760 	if (conn->type != ISO_LINK) {
6761 		bt_dev_err(hdev,
6762 			   "Invalid connection link type handle 0x%4.4x",
6763 			   handle);
6764 		goto unlock;
6765 	}
6766 
6767 	qos = &conn->iso_qos;
6768 
6769 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6770 
6771 	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6772 	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6773 	qos->ucast.out.interval = qos->ucast.in.interval;
6774 
6775 	switch (conn->role) {
6776 	case HCI_ROLE_SLAVE:
6777 		/* Convert Transport Latency (us) to Latency (msec) */
6778 		qos->ucast.in.latency =
6779 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6780 					  1000);
6781 		qos->ucast.out.latency =
6782 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6783 					  1000);
6784 		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6785 		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6786 		qos->ucast.in.phy = ev->c_phy;
6787 		qos->ucast.out.phy = ev->p_phy;
6788 		break;
6789 	case HCI_ROLE_MASTER:
6790 		/* Convert Transport Latency (us) to Latency (msec) */
6791 		qos->ucast.out.latency =
6792 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6793 					  1000);
6794 		qos->ucast.in.latency =
6795 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6796 					  1000);
6797 		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6798 		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6799 		qos->ucast.out.phy = ev->c_phy;
6800 		qos->ucast.in.phy = ev->p_phy;
6801 		break;
6802 	}
6803 
6804 	if (!ev->status) {
6805 		conn->state = BT_CONNECTED;
6806 		hci_debugfs_create_conn(conn);
6807 		hci_conn_add_sysfs(conn);
6808 		hci_iso_setup_path(conn);
6809 		goto unlock;
6810 	}
6811 
6812 	conn->state = BT_CLOSED;
6813 	hci_connect_cfm(conn, ev->status);
6814 	hci_conn_del(conn);
6815 
6816 unlock:
6817 	if (pending)
6818 		hci_le_create_cis_pending(hdev);
6819 
6820 	hci_dev_unlock(hdev);
6821 }
6822 
6823 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6824 {
6825 	struct hci_cp_le_reject_cis cp;
6826 
6827 	memset(&cp, 0, sizeof(cp));
6828 	cp.handle = handle;
6829 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6830 	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6831 }
6832 
6833 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6834 {
6835 	struct hci_cp_le_accept_cis cp;
6836 
6837 	memset(&cp, 0, sizeof(cp));
6838 	cp.handle = handle;
6839 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6840 }
6841 
6842 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6843 			       struct sk_buff *skb)
6844 {
6845 	struct hci_evt_le_cis_req *ev = data;
6846 	u16 acl_handle, cis_handle;
6847 	struct hci_conn *acl, *cis;
6848 	int mask;
6849 	__u8 flags = 0;
6850 
6851 	acl_handle = __le16_to_cpu(ev->acl_handle);
6852 	cis_handle = __le16_to_cpu(ev->cis_handle);
6853 
6854 	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6855 		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6856 
6857 	hci_dev_lock(hdev);
6858 
6859 	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6860 	if (!acl)
6861 		goto unlock;
6862 
6863 	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6864 	if (!(mask & HCI_LM_ACCEPT)) {
6865 		hci_le_reject_cis(hdev, ev->cis_handle);
6866 		goto unlock;
6867 	}
6868 
6869 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6870 	if (!cis) {
6871 		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
6872 				   cis_handle);
6873 		if (!cis) {
6874 			hci_le_reject_cis(hdev, ev->cis_handle);
6875 			goto unlock;
6876 		}
6877 	}
6878 
6879 	cis->iso_qos.ucast.cig = ev->cig_id;
6880 	cis->iso_qos.ucast.cis = ev->cis_id;
6881 
6882 	if (!(flags & HCI_PROTO_DEFER)) {
6883 		hci_le_accept_cis(hdev, ev->cis_handle);
6884 	} else {
6885 		cis->state = BT_CONNECT2;
6886 		hci_connect_cfm(cis, 0);
6887 	}
6888 
6889 unlock:
6890 	hci_dev_unlock(hdev);
6891 }
6892 
6893 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6894 {
6895 	u8 handle = PTR_UINT(data);
6896 
6897 	return hci_le_terminate_big_sync(hdev, handle,
6898 					 HCI_ERROR_LOCAL_HOST_TERM);
6899 }
6900 
6901 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6902 					   struct sk_buff *skb)
6903 {
6904 	struct hci_evt_le_create_big_complete *ev = data;
6905 	struct hci_conn *conn;
6906 	__u8 i = 0;
6907 
6908 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6909 
6910 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6911 				flex_array_size(ev, bis_handle, ev->num_bis)))
6912 		return;
6913 
6914 	hci_dev_lock(hdev);
6915 	rcu_read_lock();
6916 
6917 	/* Connect all BISes that are bound to the BIG */
6918 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6919 		if (bacmp(&conn->dst, BDADDR_ANY) ||
6920 		    conn->type != ISO_LINK ||
6921 		    conn->iso_qos.bcast.big != ev->handle)
6922 			continue;
6923 
6924 		if (hci_conn_set_handle(conn,
6925 					__le16_to_cpu(ev->bis_handle[i++])))
6926 			continue;
6927 
6928 		if (!ev->status) {
6929 			conn->state = BT_CONNECTED;
6930 			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6931 			rcu_read_unlock();
6932 			hci_debugfs_create_conn(conn);
6933 			hci_conn_add_sysfs(conn);
6934 			hci_iso_setup_path(conn);
6935 			rcu_read_lock();
6936 			continue;
6937 		}
6938 
6939 		hci_connect_cfm(conn, ev->status);
6940 		rcu_read_unlock();
6941 		hci_conn_del(conn);
6942 		rcu_read_lock();
6943 	}
6944 
6945 	rcu_read_unlock();
6946 
6947 	if (!ev->status && !i)
6948 		/* If no BISes have been connected for the BIG,
6949 		 * terminate. This is in case all bound connections
6950 		 * have been closed before the BIG creation
6951 		 * has completed.
6952 		 */
6953 		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6954 				   UINT_PTR(ev->handle), NULL);
6955 
6956 	hci_dev_unlock(hdev);
6957 }
6958 
6959 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6960 					    struct sk_buff *skb)
6961 {
6962 	struct hci_evt_le_big_sync_estabilished *ev = data;
6963 	struct hci_conn *bis;
6964 	int i;
6965 
6966 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6967 
6968 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6969 				flex_array_size(ev, bis, ev->num_bis)))
6970 		return;
6971 
6972 	hci_dev_lock(hdev);
6973 
6974 	for (i = 0; i < ev->num_bis; i++) {
6975 		u16 handle = le16_to_cpu(ev->bis[i]);
6976 		__le32 interval;
6977 
6978 		bis = hci_conn_hash_lookup_handle(hdev, handle);
6979 		if (!bis) {
6980 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
6981 					   HCI_ROLE_SLAVE, handle);
6982 			if (!bis)
6983 				continue;
6984 		}
6985 
6986 		if (ev->status != 0x42)
6987 			/* Mark PA sync as established */
6988 			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
6989 
6990 		bis->iso_qos.bcast.big = ev->handle;
6991 		memset(&interval, 0, sizeof(interval));
6992 		memcpy(&interval, ev->latency, sizeof(ev->latency));
6993 		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
6994 		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
6995 		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
6996 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
6997 
6998 		if (!ev->status) {
6999 			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7000 			hci_iso_setup_path(bis);
7001 		}
7002 	}
7003 
7004 	/* In case BIG sync failed, notify each failed connection to
7005 	 * the user after all hci connections have been added
7006 	 */
7007 	if (ev->status)
7008 		for (i = 0; i < ev->num_bis; i++) {
7009 			u16 handle = le16_to_cpu(ev->bis[i]);
7010 
7011 			bis = hci_conn_hash_lookup_handle(hdev, handle);
7012 
7013 			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7014 			hci_connect_cfm(bis, ev->status);
7015 		}
7016 
7017 	hci_dev_unlock(hdev);
7018 }
7019 
7020 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7021 					   struct sk_buff *skb)
7022 {
7023 	struct hci_evt_le_big_info_adv_report *ev = data;
7024 	int mask = hdev->link_mode;
7025 	__u8 flags = 0;
7026 	struct hci_conn *pa_sync;
7027 
7028 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7029 
7030 	hci_dev_lock(hdev);
7031 
7032 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7033 	if (!(mask & HCI_LM_ACCEPT)) {
7034 		hci_le_pa_term_sync(hdev, ev->sync_handle);
7035 		goto unlock;
7036 	}
7037 
7038 	if (!(flags & HCI_PROTO_DEFER))
7039 		goto unlock;
7040 
7041 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
7042 			(hdev,
7043 			le16_to_cpu(ev->sync_handle));
7044 
7045 	if (pa_sync)
7046 		goto unlock;
7047 
7048 	/* Add connection to indicate the PA sync event */
7049 	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7050 				     HCI_ROLE_SLAVE);
7051 
7052 	if (!pa_sync)
7053 		goto unlock;
7054 
7055 	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7056 	set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7057 
7058 	/* Notify iso layer */
7059 	hci_connect_cfm(pa_sync, 0x00);
7060 
7061 	/* Notify MGMT layer */
7062 	mgmt_device_connected(hdev, pa_sync, NULL, 0);
7063 
7064 unlock:
7065 	hci_dev_unlock(hdev);
7066 }
7067 
7068 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7069 [_op] = { \
7070 	.func = _func, \
7071 	.min_len = _min_len, \
7072 	.max_len = _max_len, \
7073 }
7074 
7075 #define HCI_LE_EV(_op, _func, _len) \
7076 	HCI_LE_EV_VL(_op, _func, _len, _len)
7077 
7078 #define HCI_LE_EV_STATUS(_op, _func) \
7079 	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7080 
7081 /* Entries in this table shall have their position according to the subevent
7082  * opcode they handle so the use of the macros above is recommend since it does
7083  * attempt to initialize at its proper index using Designated Initializers that
7084  * way events without a callback function can be ommited.
7085  */
7086 static const struct hci_le_ev {
7087 	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7088 	u16  min_len;
7089 	u16  max_len;
7090 } hci_le_ev_table[U8_MAX + 1] = {
7091 	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7092 	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7093 		  sizeof(struct hci_ev_le_conn_complete)),
7094 	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7095 	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7096 		     sizeof(struct hci_ev_le_advertising_report),
7097 		     HCI_MAX_EVENT_SIZE),
7098 	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7099 	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7100 		  hci_le_conn_update_complete_evt,
7101 		  sizeof(struct hci_ev_le_conn_update_complete)),
7102 	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7103 	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7104 		  hci_le_remote_feat_complete_evt,
7105 		  sizeof(struct hci_ev_le_remote_feat_complete)),
7106 	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7107 	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7108 		  sizeof(struct hci_ev_le_ltk_req)),
7109 	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7110 	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7111 		  hci_le_remote_conn_param_req_evt,
7112 		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7113 	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7114 	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7115 		  hci_le_enh_conn_complete_evt,
7116 		  sizeof(struct hci_ev_le_enh_conn_complete)),
7117 	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7118 	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7119 		     sizeof(struct hci_ev_le_direct_adv_report),
7120 		     HCI_MAX_EVENT_SIZE),
7121 	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7122 	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7123 		  sizeof(struct hci_ev_le_phy_update_complete)),
7124 	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7125 	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7126 		     sizeof(struct hci_ev_le_ext_adv_report),
7127 		     HCI_MAX_EVENT_SIZE),
7128 	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7129 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7130 		  hci_le_pa_sync_estabilished_evt,
7131 		  sizeof(struct hci_ev_le_pa_sync_established)),
7132 	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7133 	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7134 				 hci_le_per_adv_report_evt,
7135 				 sizeof(struct hci_ev_le_per_adv_report),
7136 				 HCI_MAX_EVENT_SIZE),
7137 	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7138 	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7139 		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7140 	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7141 	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7142 		  sizeof(struct hci_evt_le_cis_established)),
7143 	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7144 	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7145 		  sizeof(struct hci_evt_le_cis_req)),
7146 	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7147 	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7148 		     hci_le_create_big_complete_evt,
7149 		     sizeof(struct hci_evt_le_create_big_complete),
7150 		     HCI_MAX_EVENT_SIZE),
7151 	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7152 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7153 		     hci_le_big_sync_established_evt,
7154 		     sizeof(struct hci_evt_le_big_sync_estabilished),
7155 		     HCI_MAX_EVENT_SIZE),
7156 	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7157 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7158 		     hci_le_big_info_adv_report_evt,
7159 		     sizeof(struct hci_evt_le_big_info_adv_report),
7160 		     HCI_MAX_EVENT_SIZE),
7161 };
7162 
7163 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7164 			    struct sk_buff *skb, u16 *opcode, u8 *status,
7165 			    hci_req_complete_t *req_complete,
7166 			    hci_req_complete_skb_t *req_complete_skb)
7167 {
7168 	struct hci_ev_le_meta *ev = data;
7169 	const struct hci_le_ev *subev;
7170 
7171 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7172 
7173 	/* Only match event if command OGF is for LE */
7174 	if (hdev->req_skb &&
7175 	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7176 	    hci_skb_event(hdev->req_skb) == ev->subevent) {
7177 		*opcode = hci_skb_opcode(hdev->req_skb);
7178 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7179 				     req_complete_skb);
7180 	}
7181 
7182 	subev = &hci_le_ev_table[ev->subevent];
7183 	if (!subev->func)
7184 		return;
7185 
7186 	if (skb->len < subev->min_len) {
7187 		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7188 			   ev->subevent, skb->len, subev->min_len);
7189 		return;
7190 	}
7191 
7192 	/* Just warn if the length is over max_len size it still be
7193 	 * possible to partially parse the event so leave to callback to
7194 	 * decide if that is acceptable.
7195 	 */
7196 	if (skb->len > subev->max_len)
7197 		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7198 			    ev->subevent, skb->len, subev->max_len);
7199 	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7200 	if (!data)
7201 		return;
7202 
7203 	subev->func(hdev, data, skb);
7204 }
7205 
7206 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7207 				 u8 event, struct sk_buff *skb)
7208 {
7209 	struct hci_ev_cmd_complete *ev;
7210 	struct hci_event_hdr *hdr;
7211 
7212 	if (!skb)
7213 		return false;
7214 
7215 	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7216 	if (!hdr)
7217 		return false;
7218 
7219 	if (event) {
7220 		if (hdr->evt != event)
7221 			return false;
7222 		return true;
7223 	}
7224 
7225 	/* Check if request ended in Command Status - no way to retrieve
7226 	 * any extra parameters in this case.
7227 	 */
7228 	if (hdr->evt == HCI_EV_CMD_STATUS)
7229 		return false;
7230 
7231 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7232 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7233 			   hdr->evt);
7234 		return false;
7235 	}
7236 
7237 	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7238 	if (!ev)
7239 		return false;
7240 
7241 	if (opcode != __le16_to_cpu(ev->opcode)) {
7242 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7243 		       __le16_to_cpu(ev->opcode));
7244 		return false;
7245 	}
7246 
7247 	return true;
7248 }
7249 
7250 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7251 				  struct sk_buff *skb)
7252 {
7253 	struct hci_ev_le_advertising_info *adv;
7254 	struct hci_ev_le_direct_adv_info *direct_adv;
7255 	struct hci_ev_le_ext_adv_info *ext_adv;
7256 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7257 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7258 
7259 	hci_dev_lock(hdev);
7260 
7261 	/* If we are currently suspended and this is the first BT event seen,
7262 	 * save the wake reason associated with the event.
7263 	 */
7264 	if (!hdev->suspended || hdev->wake_reason)
7265 		goto unlock;
7266 
7267 	/* Default to remote wake. Values for wake_reason are documented in the
7268 	 * Bluez mgmt api docs.
7269 	 */
7270 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7271 
7272 	/* Once configured for remote wakeup, we should only wake up for
7273 	 * reconnections. It's useful to see which device is waking us up so
7274 	 * keep track of the bdaddr of the connection event that woke us up.
7275 	 */
7276 	if (event == HCI_EV_CONN_REQUEST) {
7277 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7278 		hdev->wake_addr_type = BDADDR_BREDR;
7279 	} else if (event == HCI_EV_CONN_COMPLETE) {
7280 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7281 		hdev->wake_addr_type = BDADDR_BREDR;
7282 	} else if (event == HCI_EV_LE_META) {
7283 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7284 		u8 subevent = le_ev->subevent;
7285 		u8 *ptr = &skb->data[sizeof(*le_ev)];
7286 		u8 num_reports = *ptr;
7287 
7288 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7289 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7290 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7291 		    num_reports) {
7292 			adv = (void *)(ptr + 1);
7293 			direct_adv = (void *)(ptr + 1);
7294 			ext_adv = (void *)(ptr + 1);
7295 
7296 			switch (subevent) {
7297 			case HCI_EV_LE_ADVERTISING_REPORT:
7298 				bacpy(&hdev->wake_addr, &adv->bdaddr);
7299 				hdev->wake_addr_type = adv->bdaddr_type;
7300 				break;
7301 			case HCI_EV_LE_DIRECT_ADV_REPORT:
7302 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7303 				hdev->wake_addr_type = direct_adv->bdaddr_type;
7304 				break;
7305 			case HCI_EV_LE_EXT_ADV_REPORT:
7306 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7307 				hdev->wake_addr_type = ext_adv->bdaddr_type;
7308 				break;
7309 			}
7310 		}
7311 	} else {
7312 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7313 	}
7314 
7315 unlock:
7316 	hci_dev_unlock(hdev);
7317 }
7318 
7319 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7320 [_op] = { \
7321 	.req = false, \
7322 	.func = _func, \
7323 	.min_len = _min_len, \
7324 	.max_len = _max_len, \
7325 }
7326 
7327 #define HCI_EV(_op, _func, _len) \
7328 	HCI_EV_VL(_op, _func, _len, _len)
7329 
7330 #define HCI_EV_STATUS(_op, _func) \
7331 	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7332 
7333 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7334 [_op] = { \
7335 	.req = true, \
7336 	.func_req = _func, \
7337 	.min_len = _min_len, \
7338 	.max_len = _max_len, \
7339 }
7340 
7341 #define HCI_EV_REQ(_op, _func, _len) \
7342 	HCI_EV_REQ_VL(_op, _func, _len, _len)
7343 
7344 /* Entries in this table shall have their position according to the event opcode
7345  * they handle so the use of the macros above is recommend since it does attempt
7346  * to initialize at its proper index using Designated Initializers that way
7347  * events without a callback function don't have entered.
7348  */
7349 static const struct hci_ev {
7350 	bool req;
7351 	union {
7352 		void (*func)(struct hci_dev *hdev, void *data,
7353 			     struct sk_buff *skb);
7354 		void (*func_req)(struct hci_dev *hdev, void *data,
7355 				 struct sk_buff *skb, u16 *opcode, u8 *status,
7356 				 hci_req_complete_t *req_complete,
7357 				 hci_req_complete_skb_t *req_complete_skb);
7358 	};
7359 	u16  min_len;
7360 	u16  max_len;
7361 } hci_ev_table[U8_MAX + 1] = {
7362 	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7363 	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7364 	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7365 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7366 		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7367 	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7368 	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7369 	       sizeof(struct hci_ev_conn_complete)),
7370 	/* [0x04 = HCI_EV_CONN_REQUEST] */
7371 	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7372 	       sizeof(struct hci_ev_conn_request)),
7373 	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7374 	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7375 	       sizeof(struct hci_ev_disconn_complete)),
7376 	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7377 	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7378 	       sizeof(struct hci_ev_auth_complete)),
7379 	/* [0x07 = HCI_EV_REMOTE_NAME] */
7380 	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7381 	       sizeof(struct hci_ev_remote_name)),
7382 	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7383 	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7384 	       sizeof(struct hci_ev_encrypt_change)),
7385 	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7386 	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7387 	       hci_change_link_key_complete_evt,
7388 	       sizeof(struct hci_ev_change_link_key_complete)),
7389 	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7390 	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7391 	       sizeof(struct hci_ev_remote_features)),
7392 	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7393 	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7394 		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7395 	/* [0x0f = HCI_EV_CMD_STATUS] */
7396 	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7397 		   sizeof(struct hci_ev_cmd_status)),
7398 	/* [0x10 = HCI_EV_CMD_STATUS] */
7399 	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7400 	       sizeof(struct hci_ev_hardware_error)),
7401 	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7402 	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7403 	       sizeof(struct hci_ev_role_change)),
7404 	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7405 	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7406 		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7407 	/* [0x14 = HCI_EV_MODE_CHANGE] */
7408 	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7409 	       sizeof(struct hci_ev_mode_change)),
7410 	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7411 	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7412 	       sizeof(struct hci_ev_pin_code_req)),
7413 	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7414 	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7415 	       sizeof(struct hci_ev_link_key_req)),
7416 	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7417 	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7418 	       sizeof(struct hci_ev_link_key_notify)),
7419 	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7420 	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7421 	       sizeof(struct hci_ev_clock_offset)),
7422 	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7423 	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7424 	       sizeof(struct hci_ev_pkt_type_change)),
7425 	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7426 	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7427 	       sizeof(struct hci_ev_pscan_rep_mode)),
7428 	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7429 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7430 		  hci_inquiry_result_with_rssi_evt,
7431 		  sizeof(struct hci_ev_inquiry_result_rssi),
7432 		  HCI_MAX_EVENT_SIZE),
7433 	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7434 	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7435 	       sizeof(struct hci_ev_remote_ext_features)),
7436 	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7437 	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7438 	       sizeof(struct hci_ev_sync_conn_complete)),
7439 	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7440 	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7441 		  hci_extended_inquiry_result_evt,
7442 		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7443 	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7444 	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7445 	       sizeof(struct hci_ev_key_refresh_complete)),
7446 	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7447 	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7448 	       sizeof(struct hci_ev_io_capa_request)),
7449 	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7450 	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7451 	       sizeof(struct hci_ev_io_capa_reply)),
7452 	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7453 	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7454 	       sizeof(struct hci_ev_user_confirm_req)),
7455 	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7456 	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7457 	       sizeof(struct hci_ev_user_passkey_req)),
7458 	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7459 	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7460 	       sizeof(struct hci_ev_remote_oob_data_request)),
7461 	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7462 	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7463 	       sizeof(struct hci_ev_simple_pair_complete)),
7464 	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7465 	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7466 	       sizeof(struct hci_ev_user_passkey_notify)),
7467 	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7468 	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7469 	       sizeof(struct hci_ev_keypress_notify)),
7470 	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7471 	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7472 	       sizeof(struct hci_ev_remote_host_features)),
7473 	/* [0x3e = HCI_EV_LE_META] */
7474 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7475 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7476 	/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7477 	HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7478 	       sizeof(struct hci_ev_num_comp_blocks)),
7479 	/* [0xff = HCI_EV_VENDOR] */
7480 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7481 };
7482 
7483 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7484 			   u16 *opcode, u8 *status,
7485 			   hci_req_complete_t *req_complete,
7486 			   hci_req_complete_skb_t *req_complete_skb)
7487 {
7488 	const struct hci_ev *ev = &hci_ev_table[event];
7489 	void *data;
7490 
7491 	if (!ev->func)
7492 		return;
7493 
7494 	if (skb->len < ev->min_len) {
7495 		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7496 			   event, skb->len, ev->min_len);
7497 		return;
7498 	}
7499 
7500 	/* Just warn if the length is over max_len size it still be
7501 	 * possible to partially parse the event so leave to callback to
7502 	 * decide if that is acceptable.
7503 	 */
7504 	if (skb->len > ev->max_len)
7505 		bt_dev_warn_ratelimited(hdev,
7506 					"unexpected event 0x%2.2x length: %u > %u",
7507 					event, skb->len, ev->max_len);
7508 
7509 	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7510 	if (!data)
7511 		return;
7512 
7513 	if (ev->req)
7514 		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7515 			     req_complete_skb);
7516 	else
7517 		ev->func(hdev, data, skb);
7518 }
7519 
7520 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7521 {
7522 	struct hci_event_hdr *hdr = (void *) skb->data;
7523 	hci_req_complete_t req_complete = NULL;
7524 	hci_req_complete_skb_t req_complete_skb = NULL;
7525 	struct sk_buff *orig_skb = NULL;
7526 	u8 status = 0, event, req_evt = 0;
7527 	u16 opcode = HCI_OP_NOP;
7528 
7529 	if (skb->len < sizeof(*hdr)) {
7530 		bt_dev_err(hdev, "Malformed HCI Event");
7531 		goto done;
7532 	}
7533 
7534 	kfree_skb(hdev->recv_event);
7535 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7536 
7537 	event = hdr->evt;
7538 	if (!event) {
7539 		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7540 			    event);
7541 		goto done;
7542 	}
7543 
7544 	/* Only match event if command OGF is not for LE */
7545 	if (hdev->req_skb &&
7546 	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7547 	    hci_skb_event(hdev->req_skb) == event) {
7548 		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7549 				     status, &req_complete, &req_complete_skb);
7550 		req_evt = event;
7551 	}
7552 
7553 	/* If it looks like we might end up having to call
7554 	 * req_complete_skb, store a pristine copy of the skb since the
7555 	 * various handlers may modify the original one through
7556 	 * skb_pull() calls, etc.
7557 	 */
7558 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7559 	    event == HCI_EV_CMD_COMPLETE)
7560 		orig_skb = skb_clone(skb, GFP_KERNEL);
7561 
7562 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7563 
7564 	/* Store wake reason if we're suspended */
7565 	hci_store_wake_reason(hdev, event, skb);
7566 
7567 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7568 
7569 	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7570 		       &req_complete_skb);
7571 
7572 	if (req_complete) {
7573 		req_complete(hdev, status, opcode);
7574 	} else if (req_complete_skb) {
7575 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7576 			kfree_skb(orig_skb);
7577 			orig_skb = NULL;
7578 		}
7579 		req_complete_skb(hdev, status, opcode, orig_skb);
7580 	}
7581 
7582 done:
7583 	kfree_skb(orig_skb);
7584 	kfree_skb(skb);
7585 	hdev->stat.evt_rx++;
7586 }
7587