xref: /linux/net/bluetooth/hci_event.c (revision 9d106c6dd81bb26ad7fc3ee89cb1d62557c8e2c9)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
41 
42 /* Handle HCI Event packets */
43 
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 	__u8 status = *((__u8 *) skb->data);
47 
48 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
49 
50 	if (status)
51 		return;
52 
53 	clear_bit(HCI_INQUIRY, &hdev->flags);
54 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
56 
57 	hci_dev_lock(hdev);
58 	/* Set discovery state to stopped if we're not doing LE active
59 	 * scanning.
60 	 */
61 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
63 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
64 	hci_dev_unlock(hdev);
65 
66 	hci_conn_check_pending(hdev);
67 }
68 
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
70 {
71 	__u8 status = *((__u8 *) skb->data);
72 
73 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
74 
75 	if (status)
76 		return;
77 
78 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
79 }
80 
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
82 {
83 	__u8 status = *((__u8 *) skb->data);
84 
85 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
86 
87 	if (status)
88 		return;
89 
90 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
91 
92 	hci_conn_check_pending(hdev);
93 }
94 
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
96 					  struct sk_buff *skb)
97 {
98 	BT_DBG("%s", hdev->name);
99 }
100 
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
102 {
103 	struct hci_rp_role_discovery *rp = (void *) skb->data;
104 	struct hci_conn *conn;
105 
106 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
107 
108 	if (rp->status)
109 		return;
110 
111 	hci_dev_lock(hdev);
112 
113 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
114 	if (conn)
115 		conn->role = rp->role;
116 
117 	hci_dev_unlock(hdev);
118 }
119 
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
121 {
122 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 	struct hci_conn *conn;
124 
125 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
126 
127 	if (rp->status)
128 		return;
129 
130 	hci_dev_lock(hdev);
131 
132 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 	if (conn)
134 		conn->link_policy = __le16_to_cpu(rp->policy);
135 
136 	hci_dev_unlock(hdev);
137 }
138 
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 {
141 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 	struct hci_conn *conn;
143 	void *sent;
144 
145 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
146 
147 	if (rp->status)
148 		return;
149 
150 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 	if (!sent)
152 		return;
153 
154 	hci_dev_lock(hdev);
155 
156 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
157 	if (conn)
158 		conn->link_policy = get_unaligned_le16(sent + 2);
159 
160 	hci_dev_unlock(hdev);
161 }
162 
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
164 					struct sk_buff *skb)
165 {
166 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
167 
168 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
169 
170 	if (rp->status)
171 		return;
172 
173 	hdev->link_policy = __le16_to_cpu(rp->policy);
174 }
175 
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
177 					 struct sk_buff *skb)
178 {
179 	__u8 status = *((__u8 *) skb->data);
180 	void *sent;
181 
182 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
183 
184 	if (status)
185 		return;
186 
187 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
188 	if (!sent)
189 		return;
190 
191 	hdev->link_policy = get_unaligned_le16(sent);
192 }
193 
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 {
196 	__u8 status = *((__u8 *) skb->data);
197 
198 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
199 
200 	clear_bit(HCI_RESET, &hdev->flags);
201 
202 	if (status)
203 		return;
204 
205 	/* Reset all non-persistent flags */
206 	hci_dev_clear_volatile_flags(hdev);
207 
208 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
209 
210 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
212 
213 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 	hdev->adv_data_len = 0;
215 
216 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 	hdev->scan_rsp_data_len = 0;
218 
219 	hdev->le_scan_type = LE_SCAN_PASSIVE;
220 
221 	hdev->ssp_debug_mode = 0;
222 
223 	hci_bdaddr_list_clear(&hdev->le_white_list);
224 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
225 }
226 
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
228 					struct sk_buff *skb)
229 {
230 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 	struct hci_cp_read_stored_link_key *sent;
232 
233 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
234 
235 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
236 	if (!sent)
237 		return;
238 
239 	if (!rp->status && sent->read_all == 0x01) {
240 		hdev->stored_max_keys = rp->max_keys;
241 		hdev->stored_num_keys = rp->num_keys;
242 	}
243 }
244 
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
246 					  struct sk_buff *skb)
247 {
248 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
249 
250 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
251 
252 	if (rp->status)
253 		return;
254 
255 	if (rp->num_keys <= hdev->stored_num_keys)
256 		hdev->stored_num_keys -= rp->num_keys;
257 	else
258 		hdev->stored_num_keys = 0;
259 }
260 
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
262 {
263 	__u8 status = *((__u8 *) skb->data);
264 	void *sent;
265 
266 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
267 
268 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
269 	if (!sent)
270 		return;
271 
272 	hci_dev_lock(hdev);
273 
274 	if (hci_dev_test_flag(hdev, HCI_MGMT))
275 		mgmt_set_local_name_complete(hdev, sent, status);
276 	else if (!status)
277 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
278 
279 	hci_dev_unlock(hdev);
280 }
281 
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
283 {
284 	struct hci_rp_read_local_name *rp = (void *) skb->data;
285 
286 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
287 
288 	if (rp->status)
289 		return;
290 
291 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 	    hci_dev_test_flag(hdev, HCI_CONFIG))
293 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
294 }
295 
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 	__u8 status = *((__u8 *) skb->data);
299 	void *sent;
300 
301 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
302 
303 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 	if (!sent)
305 		return;
306 
307 	hci_dev_lock(hdev);
308 
309 	if (!status) {
310 		__u8 param = *((__u8 *) sent);
311 
312 		if (param == AUTH_ENABLED)
313 			set_bit(HCI_AUTH, &hdev->flags);
314 		else
315 			clear_bit(HCI_AUTH, &hdev->flags);
316 	}
317 
318 	if (hci_dev_test_flag(hdev, HCI_MGMT))
319 		mgmt_auth_enable_complete(hdev, status);
320 
321 	hci_dev_unlock(hdev);
322 }
323 
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
325 {
326 	__u8 status = *((__u8 *) skb->data);
327 	__u8 param;
328 	void *sent;
329 
330 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
331 
332 	if (status)
333 		return;
334 
335 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
336 	if (!sent)
337 		return;
338 
339 	param = *((__u8 *) sent);
340 
341 	if (param)
342 		set_bit(HCI_ENCRYPT, &hdev->flags);
343 	else
344 		clear_bit(HCI_ENCRYPT, &hdev->flags);
345 }
346 
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 	__u8 status = *((__u8 *) skb->data);
350 	__u8 param;
351 	void *sent;
352 
353 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
354 
355 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
356 	if (!sent)
357 		return;
358 
359 	param = *((__u8 *) sent);
360 
361 	hci_dev_lock(hdev);
362 
363 	if (status) {
364 		hdev->discov_timeout = 0;
365 		goto done;
366 	}
367 
368 	if (param & SCAN_INQUIRY)
369 		set_bit(HCI_ISCAN, &hdev->flags);
370 	else
371 		clear_bit(HCI_ISCAN, &hdev->flags);
372 
373 	if (param & SCAN_PAGE)
374 		set_bit(HCI_PSCAN, &hdev->flags);
375 	else
376 		clear_bit(HCI_PSCAN, &hdev->flags);
377 
378 done:
379 	hci_dev_unlock(hdev);
380 }
381 
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
383 {
384 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
385 
386 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
387 
388 	if (rp->status)
389 		return;
390 
391 	memcpy(hdev->dev_class, rp->dev_class, 3);
392 
393 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
395 }
396 
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398 {
399 	__u8 status = *((__u8 *) skb->data);
400 	void *sent;
401 
402 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
403 
404 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 	if (!sent)
406 		return;
407 
408 	hci_dev_lock(hdev);
409 
410 	if (status == 0)
411 		memcpy(hdev->dev_class, sent, 3);
412 
413 	if (hci_dev_test_flag(hdev, HCI_MGMT))
414 		mgmt_set_class_of_dev_complete(hdev, sent, status);
415 
416 	hci_dev_unlock(hdev);
417 }
418 
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
420 {
421 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
422 	__u16 setting;
423 
424 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
425 
426 	if (rp->status)
427 		return;
428 
429 	setting = __le16_to_cpu(rp->voice_setting);
430 
431 	if (hdev->voice_setting == setting)
432 		return;
433 
434 	hdev->voice_setting = setting;
435 
436 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
437 
438 	if (hdev->notify)
439 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
440 }
441 
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
443 				       struct sk_buff *skb)
444 {
445 	__u8 status = *((__u8 *) skb->data);
446 	__u16 setting;
447 	void *sent;
448 
449 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
450 
451 	if (status)
452 		return;
453 
454 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
455 	if (!sent)
456 		return;
457 
458 	setting = get_unaligned_le16(sent);
459 
460 	if (hdev->voice_setting == setting)
461 		return;
462 
463 	hdev->voice_setting = setting;
464 
465 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
466 
467 	if (hdev->notify)
468 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
469 }
470 
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
472 					  struct sk_buff *skb)
473 {
474 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
475 
476 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
477 
478 	if (rp->status)
479 		return;
480 
481 	hdev->num_iac = rp->num_iac;
482 
483 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
484 }
485 
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
487 {
488 	__u8 status = *((__u8 *) skb->data);
489 	struct hci_cp_write_ssp_mode *sent;
490 
491 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
492 
493 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
494 	if (!sent)
495 		return;
496 
497 	hci_dev_lock(hdev);
498 
499 	if (!status) {
500 		if (sent->mode)
501 			hdev->features[1][0] |= LMP_HOST_SSP;
502 		else
503 			hdev->features[1][0] &= ~LMP_HOST_SSP;
504 	}
505 
506 	if (hci_dev_test_flag(hdev, HCI_MGMT))
507 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
508 	else if (!status) {
509 		if (sent->mode)
510 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
511 		else
512 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
513 	}
514 
515 	hci_dev_unlock(hdev);
516 }
517 
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
519 {
520 	u8 status = *((u8 *) skb->data);
521 	struct hci_cp_write_sc_support *sent;
522 
523 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
524 
525 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
526 	if (!sent)
527 		return;
528 
529 	hci_dev_lock(hdev);
530 
531 	if (!status) {
532 		if (sent->support)
533 			hdev->features[1][0] |= LMP_HOST_SC;
534 		else
535 			hdev->features[1][0] &= ~LMP_HOST_SC;
536 	}
537 
538 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
539 		if (sent->support)
540 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
541 		else
542 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
543 	}
544 
545 	hci_dev_unlock(hdev);
546 }
547 
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
549 {
550 	struct hci_rp_read_local_version *rp = (void *) skb->data;
551 
552 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
553 
554 	if (rp->status)
555 		return;
556 
557 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 		hdev->hci_ver = rp->hci_ver;
560 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 		hdev->lmp_ver = rp->lmp_ver;
562 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
564 	}
565 }
566 
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
568 				       struct sk_buff *skb)
569 {
570 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
571 
572 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573 
574 	if (rp->status)
575 		return;
576 
577 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 	    hci_dev_test_flag(hdev, HCI_CONFIG))
579 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
580 }
581 
582 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
583 					     struct sk_buff *skb)
584 {
585 	struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
586 	struct hci_conn *conn;
587 
588 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
589 
590 	if (rp->status)
591 		return;
592 
593 	hci_dev_lock(hdev);
594 
595 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
596 	if (conn)
597 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
598 
599 	hci_dev_unlock(hdev);
600 }
601 
602 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
603 					      struct sk_buff *skb)
604 {
605 	struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
606 	struct hci_conn *conn;
607 	void *sent;
608 
609 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610 
611 	if (rp->status)
612 		return;
613 
614 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
615 	if (!sent)
616 		return;
617 
618 	hci_dev_lock(hdev);
619 
620 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
621 	if (conn)
622 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
623 
624 	hci_dev_unlock(hdev);
625 }
626 
627 static void hci_cc_read_local_features(struct hci_dev *hdev,
628 				       struct sk_buff *skb)
629 {
630 	struct hci_rp_read_local_features *rp = (void *) skb->data;
631 
632 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633 
634 	if (rp->status)
635 		return;
636 
637 	memcpy(hdev->features, rp->features, 8);
638 
639 	/* Adjust default settings according to features
640 	 * supported by device. */
641 
642 	if (hdev->features[0][0] & LMP_3SLOT)
643 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
644 
645 	if (hdev->features[0][0] & LMP_5SLOT)
646 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
647 
648 	if (hdev->features[0][1] & LMP_HV2) {
649 		hdev->pkt_type  |= (HCI_HV2);
650 		hdev->esco_type |= (ESCO_HV2);
651 	}
652 
653 	if (hdev->features[0][1] & LMP_HV3) {
654 		hdev->pkt_type  |= (HCI_HV3);
655 		hdev->esco_type |= (ESCO_HV3);
656 	}
657 
658 	if (lmp_esco_capable(hdev))
659 		hdev->esco_type |= (ESCO_EV3);
660 
661 	if (hdev->features[0][4] & LMP_EV4)
662 		hdev->esco_type |= (ESCO_EV4);
663 
664 	if (hdev->features[0][4] & LMP_EV5)
665 		hdev->esco_type |= (ESCO_EV5);
666 
667 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
668 		hdev->esco_type |= (ESCO_2EV3);
669 
670 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
671 		hdev->esco_type |= (ESCO_3EV3);
672 
673 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
674 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
675 }
676 
677 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
678 					   struct sk_buff *skb)
679 {
680 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
681 
682 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
683 
684 	if (rp->status)
685 		return;
686 
687 	if (hdev->max_page < rp->max_page)
688 		hdev->max_page = rp->max_page;
689 
690 	if (rp->page < HCI_MAX_PAGES)
691 		memcpy(hdev->features[rp->page], rp->features, 8);
692 }
693 
694 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
695 					  struct sk_buff *skb)
696 {
697 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
698 
699 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
700 
701 	if (rp->status)
702 		return;
703 
704 	hdev->flow_ctl_mode = rp->mode;
705 }
706 
707 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
708 {
709 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
710 
711 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
712 
713 	if (rp->status)
714 		return;
715 
716 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
717 	hdev->sco_mtu  = rp->sco_mtu;
718 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
719 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
720 
721 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
722 		hdev->sco_mtu  = 64;
723 		hdev->sco_pkts = 8;
724 	}
725 
726 	hdev->acl_cnt = hdev->acl_pkts;
727 	hdev->sco_cnt = hdev->sco_pkts;
728 
729 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
730 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
731 }
732 
733 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
734 {
735 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
736 
737 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
738 
739 	if (rp->status)
740 		return;
741 
742 	if (test_bit(HCI_INIT, &hdev->flags))
743 		bacpy(&hdev->bdaddr, &rp->bdaddr);
744 
745 	if (hci_dev_test_flag(hdev, HCI_SETUP))
746 		bacpy(&hdev->setup_addr, &rp->bdaddr);
747 }
748 
749 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
750 					   struct sk_buff *skb)
751 {
752 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
753 
754 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
755 
756 	if (rp->status)
757 		return;
758 
759 	if (test_bit(HCI_INIT, &hdev->flags)) {
760 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
761 		hdev->page_scan_window = __le16_to_cpu(rp->window);
762 	}
763 }
764 
765 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
766 					    struct sk_buff *skb)
767 {
768 	u8 status = *((u8 *) skb->data);
769 	struct hci_cp_write_page_scan_activity *sent;
770 
771 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
772 
773 	if (status)
774 		return;
775 
776 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
777 	if (!sent)
778 		return;
779 
780 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
781 	hdev->page_scan_window = __le16_to_cpu(sent->window);
782 }
783 
784 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
785 					   struct sk_buff *skb)
786 {
787 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
788 
789 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
790 
791 	if (rp->status)
792 		return;
793 
794 	if (test_bit(HCI_INIT, &hdev->flags))
795 		hdev->page_scan_type = rp->type;
796 }
797 
798 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
799 					struct sk_buff *skb)
800 {
801 	u8 status = *((u8 *) skb->data);
802 	u8 *type;
803 
804 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
805 
806 	if (status)
807 		return;
808 
809 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
810 	if (type)
811 		hdev->page_scan_type = *type;
812 }
813 
814 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
815 					struct sk_buff *skb)
816 {
817 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
818 
819 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
820 
821 	if (rp->status)
822 		return;
823 
824 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
825 	hdev->block_len = __le16_to_cpu(rp->block_len);
826 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
827 
828 	hdev->block_cnt = hdev->num_blocks;
829 
830 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
831 	       hdev->block_cnt, hdev->block_len);
832 }
833 
834 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
835 {
836 	struct hci_rp_read_clock *rp = (void *) skb->data;
837 	struct hci_cp_read_clock *cp;
838 	struct hci_conn *conn;
839 
840 	BT_DBG("%s", hdev->name);
841 
842 	if (skb->len < sizeof(*rp))
843 		return;
844 
845 	if (rp->status)
846 		return;
847 
848 	hci_dev_lock(hdev);
849 
850 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
851 	if (!cp)
852 		goto unlock;
853 
854 	if (cp->which == 0x00) {
855 		hdev->clock = le32_to_cpu(rp->clock);
856 		goto unlock;
857 	}
858 
859 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
860 	if (conn) {
861 		conn->clock = le32_to_cpu(rp->clock);
862 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
863 	}
864 
865 unlock:
866 	hci_dev_unlock(hdev);
867 }
868 
869 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
870 				       struct sk_buff *skb)
871 {
872 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
873 
874 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875 
876 	if (rp->status)
877 		return;
878 
879 	hdev->amp_status = rp->amp_status;
880 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
881 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
882 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
883 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
884 	hdev->amp_type = rp->amp_type;
885 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
886 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
887 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
888 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
889 }
890 
891 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
892 					 struct sk_buff *skb)
893 {
894 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
895 
896 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
897 
898 	if (rp->status)
899 		return;
900 
901 	hdev->inq_tx_power = rp->tx_power;
902 }
903 
904 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
905 					       struct sk_buff *skb)
906 {
907 	struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
908 
909 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
910 
911 	if (rp->status)
912 		return;
913 
914 	hdev->err_data_reporting = rp->err_data_reporting;
915 }
916 
917 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
918 						struct sk_buff *skb)
919 {
920 	__u8 status = *((__u8 *)skb->data);
921 	struct hci_cp_write_def_err_data_reporting *cp;
922 
923 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
924 
925 	if (status)
926 		return;
927 
928 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
929 	if (!cp)
930 		return;
931 
932 	hdev->err_data_reporting = cp->err_data_reporting;
933 }
934 
935 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
936 {
937 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
938 	struct hci_cp_pin_code_reply *cp;
939 	struct hci_conn *conn;
940 
941 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
942 
943 	hci_dev_lock(hdev);
944 
945 	if (hci_dev_test_flag(hdev, HCI_MGMT))
946 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
947 
948 	if (rp->status)
949 		goto unlock;
950 
951 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
952 	if (!cp)
953 		goto unlock;
954 
955 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
956 	if (conn)
957 		conn->pin_length = cp->pin_len;
958 
959 unlock:
960 	hci_dev_unlock(hdev);
961 }
962 
963 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
964 {
965 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
966 
967 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968 
969 	hci_dev_lock(hdev);
970 
971 	if (hci_dev_test_flag(hdev, HCI_MGMT))
972 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
973 						 rp->status);
974 
975 	hci_dev_unlock(hdev);
976 }
977 
978 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
979 				       struct sk_buff *skb)
980 {
981 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
982 
983 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
984 
985 	if (rp->status)
986 		return;
987 
988 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
989 	hdev->le_pkts = rp->le_max_pkt;
990 
991 	hdev->le_cnt = hdev->le_pkts;
992 
993 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
994 }
995 
996 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
997 					  struct sk_buff *skb)
998 {
999 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1000 
1001 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1002 
1003 	if (rp->status)
1004 		return;
1005 
1006 	memcpy(hdev->le_features, rp->features, 8);
1007 }
1008 
1009 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1010 					struct sk_buff *skb)
1011 {
1012 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1013 
1014 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1015 
1016 	if (rp->status)
1017 		return;
1018 
1019 	hdev->adv_tx_power = rp->tx_power;
1020 }
1021 
1022 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1023 {
1024 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1025 
1026 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1027 
1028 	hci_dev_lock(hdev);
1029 
1030 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1031 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1032 						 rp->status);
1033 
1034 	hci_dev_unlock(hdev);
1035 }
1036 
1037 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1038 					  struct sk_buff *skb)
1039 {
1040 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1041 
1042 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1043 
1044 	hci_dev_lock(hdev);
1045 
1046 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1047 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1048 						     ACL_LINK, 0, rp->status);
1049 
1050 	hci_dev_unlock(hdev);
1051 }
1052 
1053 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1054 {
1055 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1056 
1057 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1058 
1059 	hci_dev_lock(hdev);
1060 
1061 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1062 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1063 						 0, rp->status);
1064 
1065 	hci_dev_unlock(hdev);
1066 }
1067 
1068 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1069 					  struct sk_buff *skb)
1070 {
1071 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1072 
1073 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1074 
1075 	hci_dev_lock(hdev);
1076 
1077 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1078 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1079 						     ACL_LINK, 0, rp->status);
1080 
1081 	hci_dev_unlock(hdev);
1082 }
1083 
1084 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1085 				       struct sk_buff *skb)
1086 {
1087 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1088 
1089 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1090 }
1091 
1092 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1093 					   struct sk_buff *skb)
1094 {
1095 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1096 
1097 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1098 }
1099 
1100 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1101 {
1102 	__u8 status = *((__u8 *) skb->data);
1103 	bdaddr_t *sent;
1104 
1105 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1106 
1107 	if (status)
1108 		return;
1109 
1110 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1111 	if (!sent)
1112 		return;
1113 
1114 	hci_dev_lock(hdev);
1115 
1116 	bacpy(&hdev->random_addr, sent);
1117 
1118 	hci_dev_unlock(hdev);
1119 }
1120 
1121 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1122 {
1123 	__u8 status = *((__u8 *) skb->data);
1124 	struct hci_cp_le_set_default_phy *cp;
1125 
1126 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1127 
1128 	if (status)
1129 		return;
1130 
1131 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1132 	if (!cp)
1133 		return;
1134 
1135 	hci_dev_lock(hdev);
1136 
1137 	hdev->le_tx_def_phys = cp->tx_phys;
1138 	hdev->le_rx_def_phys = cp->rx_phys;
1139 
1140 	hci_dev_unlock(hdev);
1141 }
1142 
1143 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1144                                               struct sk_buff *skb)
1145 {
1146 	__u8 status = *((__u8 *) skb->data);
1147 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1148 	struct adv_info *adv_instance;
1149 
1150 	if (status)
1151 		return;
1152 
1153 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1154 	if (!cp)
1155 		return;
1156 
1157 	hci_dev_lock(hdev);
1158 
1159 	if (!hdev->cur_adv_instance) {
1160 		/* Store in hdev for instance 0 (Set adv and Directed advs) */
1161 		bacpy(&hdev->random_addr, &cp->bdaddr);
1162 	} else {
1163 		adv_instance = hci_find_adv_instance(hdev,
1164 						     hdev->cur_adv_instance);
1165 		if (adv_instance)
1166 			bacpy(&adv_instance->random_addr, &cp->bdaddr);
1167 	}
1168 
1169 	hci_dev_unlock(hdev);
1170 }
1171 
1172 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1173 {
1174 	__u8 *sent, status = *((__u8 *) skb->data);
1175 
1176 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1177 
1178 	if (status)
1179 		return;
1180 
1181 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1182 	if (!sent)
1183 		return;
1184 
1185 	hci_dev_lock(hdev);
1186 
1187 	/* If we're doing connection initiation as peripheral. Set a
1188 	 * timeout in case something goes wrong.
1189 	 */
1190 	if (*sent) {
1191 		struct hci_conn *conn;
1192 
1193 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1194 
1195 		conn = hci_lookup_le_connect(hdev);
1196 		if (conn)
1197 			queue_delayed_work(hdev->workqueue,
1198 					   &conn->le_conn_timeout,
1199 					   conn->conn_timeout);
1200 	} else {
1201 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1202 	}
1203 
1204 	hci_dev_unlock(hdev);
1205 }
1206 
1207 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1208 					 struct sk_buff *skb)
1209 {
1210 	struct hci_cp_le_set_ext_adv_enable *cp;
1211 	__u8 status = *((__u8 *) skb->data);
1212 
1213 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1214 
1215 	if (status)
1216 		return;
1217 
1218 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1219 	if (!cp)
1220 		return;
1221 
1222 	hci_dev_lock(hdev);
1223 
1224 	if (cp->enable) {
1225 		struct hci_conn *conn;
1226 
1227 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1228 
1229 		conn = hci_lookup_le_connect(hdev);
1230 		if (conn)
1231 			queue_delayed_work(hdev->workqueue,
1232 					   &conn->le_conn_timeout,
1233 					   conn->conn_timeout);
1234 	} else {
1235 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1236 	}
1237 
1238 	hci_dev_unlock(hdev);
1239 }
1240 
1241 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1242 {
1243 	struct hci_cp_le_set_scan_param *cp;
1244 	__u8 status = *((__u8 *) skb->data);
1245 
1246 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1247 
1248 	if (status)
1249 		return;
1250 
1251 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1252 	if (!cp)
1253 		return;
1254 
1255 	hci_dev_lock(hdev);
1256 
1257 	hdev->le_scan_type = cp->type;
1258 
1259 	hci_dev_unlock(hdev);
1260 }
1261 
1262 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1263 					 struct sk_buff *skb)
1264 {
1265 	struct hci_cp_le_set_ext_scan_params *cp;
1266 	__u8 status = *((__u8 *) skb->data);
1267 	struct hci_cp_le_scan_phy_params *phy_param;
1268 
1269 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1270 
1271 	if (status)
1272 		return;
1273 
1274 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1275 	if (!cp)
1276 		return;
1277 
1278 	phy_param = (void *)cp->data;
1279 
1280 	hci_dev_lock(hdev);
1281 
1282 	hdev->le_scan_type = phy_param->type;
1283 
1284 	hci_dev_unlock(hdev);
1285 }
1286 
1287 static bool has_pending_adv_report(struct hci_dev *hdev)
1288 {
1289 	struct discovery_state *d = &hdev->discovery;
1290 
1291 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1292 }
1293 
1294 static void clear_pending_adv_report(struct hci_dev *hdev)
1295 {
1296 	struct discovery_state *d = &hdev->discovery;
1297 
1298 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1299 	d->last_adv_data_len = 0;
1300 }
1301 
1302 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1303 				     u8 bdaddr_type, s8 rssi, u32 flags,
1304 				     u8 *data, u8 len)
1305 {
1306 	struct discovery_state *d = &hdev->discovery;
1307 
1308 	bacpy(&d->last_adv_addr, bdaddr);
1309 	d->last_adv_addr_type = bdaddr_type;
1310 	d->last_adv_rssi = rssi;
1311 	d->last_adv_flags = flags;
1312 	memcpy(d->last_adv_data, data, len);
1313 	d->last_adv_data_len = len;
1314 }
1315 
1316 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1317 {
1318 	hci_dev_lock(hdev);
1319 
1320 	switch (enable) {
1321 	case LE_SCAN_ENABLE:
1322 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1323 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1324 			clear_pending_adv_report(hdev);
1325 		break;
1326 
1327 	case LE_SCAN_DISABLE:
1328 		/* We do this here instead of when setting DISCOVERY_STOPPED
1329 		 * since the latter would potentially require waiting for
1330 		 * inquiry to stop too.
1331 		 */
1332 		if (has_pending_adv_report(hdev)) {
1333 			struct discovery_state *d = &hdev->discovery;
1334 
1335 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1336 					  d->last_adv_addr_type, NULL,
1337 					  d->last_adv_rssi, d->last_adv_flags,
1338 					  d->last_adv_data,
1339 					  d->last_adv_data_len, NULL, 0);
1340 		}
1341 
1342 		/* Cancel this timer so that we don't try to disable scanning
1343 		 * when it's already disabled.
1344 		 */
1345 		cancel_delayed_work(&hdev->le_scan_disable);
1346 
1347 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1348 
1349 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1350 		 * interrupted scanning due to a connect request. Mark
1351 		 * therefore discovery as stopped. If this was not
1352 		 * because of a connect request advertising might have
1353 		 * been disabled because of active scanning, so
1354 		 * re-enable it again if necessary.
1355 		 */
1356 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1357 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1358 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1359 			 hdev->discovery.state == DISCOVERY_FINDING)
1360 			hci_req_reenable_advertising(hdev);
1361 
1362 		break;
1363 
1364 	default:
1365 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1366 			   enable);
1367 		break;
1368 	}
1369 
1370 	hci_dev_unlock(hdev);
1371 }
1372 
1373 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1374 				      struct sk_buff *skb)
1375 {
1376 	struct hci_cp_le_set_scan_enable *cp;
1377 	__u8 status = *((__u8 *) skb->data);
1378 
1379 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1380 
1381 	if (status)
1382 		return;
1383 
1384 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1385 	if (!cp)
1386 		return;
1387 
1388 	le_set_scan_enable_complete(hdev, cp->enable);
1389 }
1390 
1391 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1392 				      struct sk_buff *skb)
1393 {
1394 	struct hci_cp_le_set_ext_scan_enable *cp;
1395 	__u8 status = *((__u8 *) skb->data);
1396 
1397 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1398 
1399 	if (status)
1400 		return;
1401 
1402 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1403 	if (!cp)
1404 		return;
1405 
1406 	le_set_scan_enable_complete(hdev, cp->enable);
1407 }
1408 
1409 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1410 				      struct sk_buff *skb)
1411 {
1412 	struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1413 
1414 	BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1415 	       rp->num_of_sets);
1416 
1417 	if (rp->status)
1418 		return;
1419 
1420 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1421 }
1422 
1423 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1424 					   struct sk_buff *skb)
1425 {
1426 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1427 
1428 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1429 
1430 	if (rp->status)
1431 		return;
1432 
1433 	hdev->le_white_list_size = rp->size;
1434 }
1435 
1436 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1437 				       struct sk_buff *skb)
1438 {
1439 	__u8 status = *((__u8 *) skb->data);
1440 
1441 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1442 
1443 	if (status)
1444 		return;
1445 
1446 	hci_bdaddr_list_clear(&hdev->le_white_list);
1447 }
1448 
1449 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1450 					struct sk_buff *skb)
1451 {
1452 	struct hci_cp_le_add_to_white_list *sent;
1453 	__u8 status = *((__u8 *) skb->data);
1454 
1455 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1456 
1457 	if (status)
1458 		return;
1459 
1460 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1461 	if (!sent)
1462 		return;
1463 
1464 	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1465 			   sent->bdaddr_type);
1466 }
1467 
1468 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1469 					  struct sk_buff *skb)
1470 {
1471 	struct hci_cp_le_del_from_white_list *sent;
1472 	__u8 status = *((__u8 *) skb->data);
1473 
1474 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1475 
1476 	if (status)
1477 		return;
1478 
1479 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1480 	if (!sent)
1481 		return;
1482 
1483 	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1484 			    sent->bdaddr_type);
1485 }
1486 
1487 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1488 					    struct sk_buff *skb)
1489 {
1490 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1491 
1492 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1493 
1494 	if (rp->status)
1495 		return;
1496 
1497 	memcpy(hdev->le_states, rp->le_states, 8);
1498 }
1499 
1500 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1501 					struct sk_buff *skb)
1502 {
1503 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1504 
1505 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1506 
1507 	if (rp->status)
1508 		return;
1509 
1510 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1511 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1512 }
1513 
1514 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1515 					 struct sk_buff *skb)
1516 {
1517 	struct hci_cp_le_write_def_data_len *sent;
1518 	__u8 status = *((__u8 *) skb->data);
1519 
1520 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1521 
1522 	if (status)
1523 		return;
1524 
1525 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1526 	if (!sent)
1527 		return;
1528 
1529 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1530 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1531 }
1532 
1533 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1534 					 struct sk_buff *skb)
1535 {
1536 	struct hci_cp_le_add_to_resolv_list *sent;
1537 	__u8 status = *((__u8 *) skb->data);
1538 
1539 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1540 
1541 	if (status)
1542 		return;
1543 
1544 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1545 	if (!sent)
1546 		return;
1547 
1548 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1549 				sent->bdaddr_type, sent->peer_irk,
1550 				sent->local_irk);
1551 }
1552 
1553 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1554 					  struct sk_buff *skb)
1555 {
1556 	struct hci_cp_le_del_from_resolv_list *sent;
1557 	__u8 status = *((__u8 *) skb->data);
1558 
1559 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1560 
1561 	if (status)
1562 		return;
1563 
1564 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1565 	if (!sent)
1566 		return;
1567 
1568 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1569 			    sent->bdaddr_type);
1570 }
1571 
1572 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1573 				       struct sk_buff *skb)
1574 {
1575 	__u8 status = *((__u8 *) skb->data);
1576 
1577 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1578 
1579 	if (status)
1580 		return;
1581 
1582 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
1583 }
1584 
1585 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1586 					   struct sk_buff *skb)
1587 {
1588 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1589 
1590 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1591 
1592 	if (rp->status)
1593 		return;
1594 
1595 	hdev->le_resolv_list_size = rp->size;
1596 }
1597 
1598 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1599 						struct sk_buff *skb)
1600 {
1601 	__u8 *sent, status = *((__u8 *) skb->data);
1602 
1603 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1604 
1605 	if (status)
1606 		return;
1607 
1608 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1609 	if (!sent)
1610 		return;
1611 
1612 	hci_dev_lock(hdev);
1613 
1614 	if (*sent)
1615 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1616 	else
1617 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1618 
1619 	hci_dev_unlock(hdev);
1620 }
1621 
1622 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1623 					struct sk_buff *skb)
1624 {
1625 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1626 
1627 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1628 
1629 	if (rp->status)
1630 		return;
1631 
1632 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1633 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1634 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1635 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1636 }
1637 
1638 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1639 					   struct sk_buff *skb)
1640 {
1641 	struct hci_cp_write_le_host_supported *sent;
1642 	__u8 status = *((__u8 *) skb->data);
1643 
1644 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1645 
1646 	if (status)
1647 		return;
1648 
1649 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1650 	if (!sent)
1651 		return;
1652 
1653 	hci_dev_lock(hdev);
1654 
1655 	if (sent->le) {
1656 		hdev->features[1][0] |= LMP_HOST_LE;
1657 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1658 	} else {
1659 		hdev->features[1][0] &= ~LMP_HOST_LE;
1660 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1661 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1662 	}
1663 
1664 	if (sent->simul)
1665 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1666 	else
1667 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1668 
1669 	hci_dev_unlock(hdev);
1670 }
1671 
1672 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1673 {
1674 	struct hci_cp_le_set_adv_param *cp;
1675 	u8 status = *((u8 *) skb->data);
1676 
1677 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1678 
1679 	if (status)
1680 		return;
1681 
1682 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1683 	if (!cp)
1684 		return;
1685 
1686 	hci_dev_lock(hdev);
1687 	hdev->adv_addr_type = cp->own_address_type;
1688 	hci_dev_unlock(hdev);
1689 }
1690 
1691 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1692 {
1693 	struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1694 	struct hci_cp_le_set_ext_adv_params *cp;
1695 	struct adv_info *adv_instance;
1696 
1697 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1698 
1699 	if (rp->status)
1700 		return;
1701 
1702 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1703 	if (!cp)
1704 		return;
1705 
1706 	hci_dev_lock(hdev);
1707 	hdev->adv_addr_type = cp->own_addr_type;
1708 	if (!hdev->cur_adv_instance) {
1709 		/* Store in hdev for instance 0 */
1710 		hdev->adv_tx_power = rp->tx_power;
1711 	} else {
1712 		adv_instance = hci_find_adv_instance(hdev,
1713 						     hdev->cur_adv_instance);
1714 		if (adv_instance)
1715 			adv_instance->tx_power = rp->tx_power;
1716 	}
1717 	/* Update adv data as tx power is known now */
1718 	hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1719 	hci_dev_unlock(hdev);
1720 }
1721 
1722 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1723 {
1724 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1725 	struct hci_conn *conn;
1726 
1727 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1728 
1729 	if (rp->status)
1730 		return;
1731 
1732 	hci_dev_lock(hdev);
1733 
1734 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1735 	if (conn)
1736 		conn->rssi = rp->rssi;
1737 
1738 	hci_dev_unlock(hdev);
1739 }
1740 
1741 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1742 {
1743 	struct hci_cp_read_tx_power *sent;
1744 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1745 	struct hci_conn *conn;
1746 
1747 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1748 
1749 	if (rp->status)
1750 		return;
1751 
1752 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1753 	if (!sent)
1754 		return;
1755 
1756 	hci_dev_lock(hdev);
1757 
1758 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1759 	if (!conn)
1760 		goto unlock;
1761 
1762 	switch (sent->type) {
1763 	case 0x00:
1764 		conn->tx_power = rp->tx_power;
1765 		break;
1766 	case 0x01:
1767 		conn->max_tx_power = rp->tx_power;
1768 		break;
1769 	}
1770 
1771 unlock:
1772 	hci_dev_unlock(hdev);
1773 }
1774 
1775 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1776 {
1777 	u8 status = *((u8 *) skb->data);
1778 	u8 *mode;
1779 
1780 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781 
1782 	if (status)
1783 		return;
1784 
1785 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1786 	if (mode)
1787 		hdev->ssp_debug_mode = *mode;
1788 }
1789 
1790 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1791 {
1792 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1793 
1794 	if (status) {
1795 		hci_conn_check_pending(hdev);
1796 		return;
1797 	}
1798 
1799 	set_bit(HCI_INQUIRY, &hdev->flags);
1800 }
1801 
1802 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1803 {
1804 	struct hci_cp_create_conn *cp;
1805 	struct hci_conn *conn;
1806 
1807 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1808 
1809 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1810 	if (!cp)
1811 		return;
1812 
1813 	hci_dev_lock(hdev);
1814 
1815 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1816 
1817 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1818 
1819 	if (status) {
1820 		if (conn && conn->state == BT_CONNECT) {
1821 			if (status != 0x0c || conn->attempt > 2) {
1822 				conn->state = BT_CLOSED;
1823 				hci_connect_cfm(conn, status);
1824 				hci_conn_del(conn);
1825 			} else
1826 				conn->state = BT_CONNECT2;
1827 		}
1828 	} else {
1829 		if (!conn) {
1830 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1831 					    HCI_ROLE_MASTER);
1832 			if (!conn)
1833 				bt_dev_err(hdev, "no memory for new connection");
1834 		}
1835 	}
1836 
1837 	hci_dev_unlock(hdev);
1838 }
1839 
1840 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1841 {
1842 	struct hci_cp_add_sco *cp;
1843 	struct hci_conn *acl, *sco;
1844 	__u16 handle;
1845 
1846 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1847 
1848 	if (!status)
1849 		return;
1850 
1851 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1852 	if (!cp)
1853 		return;
1854 
1855 	handle = __le16_to_cpu(cp->handle);
1856 
1857 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1858 
1859 	hci_dev_lock(hdev);
1860 
1861 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1862 	if (acl) {
1863 		sco = acl->link;
1864 		if (sco) {
1865 			sco->state = BT_CLOSED;
1866 
1867 			hci_connect_cfm(sco, status);
1868 			hci_conn_del(sco);
1869 		}
1870 	}
1871 
1872 	hci_dev_unlock(hdev);
1873 }
1874 
1875 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1876 {
1877 	struct hci_cp_auth_requested *cp;
1878 	struct hci_conn *conn;
1879 
1880 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1881 
1882 	if (!status)
1883 		return;
1884 
1885 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1886 	if (!cp)
1887 		return;
1888 
1889 	hci_dev_lock(hdev);
1890 
1891 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1892 	if (conn) {
1893 		if (conn->state == BT_CONFIG) {
1894 			hci_connect_cfm(conn, status);
1895 			hci_conn_drop(conn);
1896 		}
1897 	}
1898 
1899 	hci_dev_unlock(hdev);
1900 }
1901 
1902 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1903 {
1904 	struct hci_cp_set_conn_encrypt *cp;
1905 	struct hci_conn *conn;
1906 
1907 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1908 
1909 	if (!status)
1910 		return;
1911 
1912 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1913 	if (!cp)
1914 		return;
1915 
1916 	hci_dev_lock(hdev);
1917 
1918 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1919 	if (conn) {
1920 		if (conn->state == BT_CONFIG) {
1921 			hci_connect_cfm(conn, status);
1922 			hci_conn_drop(conn);
1923 		}
1924 	}
1925 
1926 	hci_dev_unlock(hdev);
1927 }
1928 
1929 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1930 				    struct hci_conn *conn)
1931 {
1932 	if (conn->state != BT_CONFIG || !conn->out)
1933 		return 0;
1934 
1935 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1936 		return 0;
1937 
1938 	/* Only request authentication for SSP connections or non-SSP
1939 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1940 	 * is requested.
1941 	 */
1942 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1943 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1944 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1945 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1946 		return 0;
1947 
1948 	return 1;
1949 }
1950 
1951 static int hci_resolve_name(struct hci_dev *hdev,
1952 				   struct inquiry_entry *e)
1953 {
1954 	struct hci_cp_remote_name_req cp;
1955 
1956 	memset(&cp, 0, sizeof(cp));
1957 
1958 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1959 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1960 	cp.pscan_mode = e->data.pscan_mode;
1961 	cp.clock_offset = e->data.clock_offset;
1962 
1963 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1964 }
1965 
1966 static bool hci_resolve_next_name(struct hci_dev *hdev)
1967 {
1968 	struct discovery_state *discov = &hdev->discovery;
1969 	struct inquiry_entry *e;
1970 
1971 	if (list_empty(&discov->resolve))
1972 		return false;
1973 
1974 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1975 	if (!e)
1976 		return false;
1977 
1978 	if (hci_resolve_name(hdev, e) == 0) {
1979 		e->name_state = NAME_PENDING;
1980 		return true;
1981 	}
1982 
1983 	return false;
1984 }
1985 
1986 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1987 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1988 {
1989 	struct discovery_state *discov = &hdev->discovery;
1990 	struct inquiry_entry *e;
1991 
1992 	/* Update the mgmt connected state if necessary. Be careful with
1993 	 * conn objects that exist but are not (yet) connected however.
1994 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
1995 	 * considered connected.
1996 	 */
1997 	if (conn &&
1998 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1999 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2000 		mgmt_device_connected(hdev, conn, 0, name, name_len);
2001 
2002 	if (discov->state == DISCOVERY_STOPPED)
2003 		return;
2004 
2005 	if (discov->state == DISCOVERY_STOPPING)
2006 		goto discov_complete;
2007 
2008 	if (discov->state != DISCOVERY_RESOLVING)
2009 		return;
2010 
2011 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2012 	/* If the device was not found in a list of found devices names of which
2013 	 * are pending. there is no need to continue resolving a next name as it
2014 	 * will be done upon receiving another Remote Name Request Complete
2015 	 * Event */
2016 	if (!e)
2017 		return;
2018 
2019 	list_del(&e->list);
2020 	if (name) {
2021 		e->name_state = NAME_KNOWN;
2022 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2023 				 e->data.rssi, name, name_len);
2024 	} else {
2025 		e->name_state = NAME_NOT_KNOWN;
2026 	}
2027 
2028 	if (hci_resolve_next_name(hdev))
2029 		return;
2030 
2031 discov_complete:
2032 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2033 }
2034 
2035 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2036 {
2037 	struct hci_cp_remote_name_req *cp;
2038 	struct hci_conn *conn;
2039 
2040 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2041 
2042 	/* If successful wait for the name req complete event before
2043 	 * checking for the need to do authentication */
2044 	if (!status)
2045 		return;
2046 
2047 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2048 	if (!cp)
2049 		return;
2050 
2051 	hci_dev_lock(hdev);
2052 
2053 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2054 
2055 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2056 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2057 
2058 	if (!conn)
2059 		goto unlock;
2060 
2061 	if (!hci_outgoing_auth_needed(hdev, conn))
2062 		goto unlock;
2063 
2064 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2065 		struct hci_cp_auth_requested auth_cp;
2066 
2067 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2068 
2069 		auth_cp.handle = __cpu_to_le16(conn->handle);
2070 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2071 			     sizeof(auth_cp), &auth_cp);
2072 	}
2073 
2074 unlock:
2075 	hci_dev_unlock(hdev);
2076 }
2077 
2078 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2079 {
2080 	struct hci_cp_read_remote_features *cp;
2081 	struct hci_conn *conn;
2082 
2083 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2084 
2085 	if (!status)
2086 		return;
2087 
2088 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2089 	if (!cp)
2090 		return;
2091 
2092 	hci_dev_lock(hdev);
2093 
2094 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2095 	if (conn) {
2096 		if (conn->state == BT_CONFIG) {
2097 			hci_connect_cfm(conn, status);
2098 			hci_conn_drop(conn);
2099 		}
2100 	}
2101 
2102 	hci_dev_unlock(hdev);
2103 }
2104 
2105 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2106 {
2107 	struct hci_cp_read_remote_ext_features *cp;
2108 	struct hci_conn *conn;
2109 
2110 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2111 
2112 	if (!status)
2113 		return;
2114 
2115 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2116 	if (!cp)
2117 		return;
2118 
2119 	hci_dev_lock(hdev);
2120 
2121 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2122 	if (conn) {
2123 		if (conn->state == BT_CONFIG) {
2124 			hci_connect_cfm(conn, status);
2125 			hci_conn_drop(conn);
2126 		}
2127 	}
2128 
2129 	hci_dev_unlock(hdev);
2130 }
2131 
2132 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2133 {
2134 	struct hci_cp_setup_sync_conn *cp;
2135 	struct hci_conn *acl, *sco;
2136 	__u16 handle;
2137 
2138 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2139 
2140 	if (!status)
2141 		return;
2142 
2143 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2144 	if (!cp)
2145 		return;
2146 
2147 	handle = __le16_to_cpu(cp->handle);
2148 
2149 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2150 
2151 	hci_dev_lock(hdev);
2152 
2153 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2154 	if (acl) {
2155 		sco = acl->link;
2156 		if (sco) {
2157 			sco->state = BT_CLOSED;
2158 
2159 			hci_connect_cfm(sco, status);
2160 			hci_conn_del(sco);
2161 		}
2162 	}
2163 
2164 	hci_dev_unlock(hdev);
2165 }
2166 
2167 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2168 {
2169 	struct hci_cp_sniff_mode *cp;
2170 	struct hci_conn *conn;
2171 
2172 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2173 
2174 	if (!status)
2175 		return;
2176 
2177 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2178 	if (!cp)
2179 		return;
2180 
2181 	hci_dev_lock(hdev);
2182 
2183 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2184 	if (conn) {
2185 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2186 
2187 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2188 			hci_sco_setup(conn, status);
2189 	}
2190 
2191 	hci_dev_unlock(hdev);
2192 }
2193 
2194 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2195 {
2196 	struct hci_cp_exit_sniff_mode *cp;
2197 	struct hci_conn *conn;
2198 
2199 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2200 
2201 	if (!status)
2202 		return;
2203 
2204 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2205 	if (!cp)
2206 		return;
2207 
2208 	hci_dev_lock(hdev);
2209 
2210 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2211 	if (conn) {
2212 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2213 
2214 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2215 			hci_sco_setup(conn, status);
2216 	}
2217 
2218 	hci_dev_unlock(hdev);
2219 }
2220 
2221 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2222 {
2223 	struct hci_cp_disconnect *cp;
2224 	struct hci_conn *conn;
2225 
2226 	if (!status)
2227 		return;
2228 
2229 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2230 	if (!cp)
2231 		return;
2232 
2233 	hci_dev_lock(hdev);
2234 
2235 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2236 	if (conn) {
2237 		u8 type = conn->type;
2238 
2239 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2240 				       conn->dst_type, status);
2241 
2242 		/* If the disconnection failed for any reason, the upper layer
2243 		 * does not retry to disconnect in current implementation.
2244 		 * Hence, we need to do some basic cleanup here and re-enable
2245 		 * advertising if necessary.
2246 		 */
2247 		hci_conn_del(conn);
2248 		if (type == LE_LINK)
2249 			hci_req_reenable_advertising(hdev);
2250 	}
2251 
2252 	hci_dev_unlock(hdev);
2253 }
2254 
2255 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2256 			      u8 peer_addr_type, u8 own_address_type,
2257 			      u8 filter_policy)
2258 {
2259 	struct hci_conn *conn;
2260 
2261 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2262 				       peer_addr_type);
2263 	if (!conn)
2264 		return;
2265 
2266 	/* Store the initiator and responder address information which
2267 	 * is needed for SMP. These values will not change during the
2268 	 * lifetime of the connection.
2269 	 */
2270 	conn->init_addr_type = own_address_type;
2271 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2272 		bacpy(&conn->init_addr, &hdev->random_addr);
2273 	else
2274 		bacpy(&conn->init_addr, &hdev->bdaddr);
2275 
2276 	conn->resp_addr_type = peer_addr_type;
2277 	bacpy(&conn->resp_addr, peer_addr);
2278 
2279 	/* We don't want the connection attempt to stick around
2280 	 * indefinitely since LE doesn't have a page timeout concept
2281 	 * like BR/EDR. Set a timer for any connection that doesn't use
2282 	 * the white list for connecting.
2283 	 */
2284 	if (filter_policy == HCI_LE_USE_PEER_ADDR)
2285 		queue_delayed_work(conn->hdev->workqueue,
2286 				   &conn->le_conn_timeout,
2287 				   conn->conn_timeout);
2288 }
2289 
2290 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2291 {
2292 	struct hci_cp_le_create_conn *cp;
2293 
2294 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2295 
2296 	/* All connection failure handling is taken care of by the
2297 	 * hci_le_conn_failed function which is triggered by the HCI
2298 	 * request completion callbacks used for connecting.
2299 	 */
2300 	if (status)
2301 		return;
2302 
2303 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2304 	if (!cp)
2305 		return;
2306 
2307 	hci_dev_lock(hdev);
2308 
2309 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2310 			  cp->own_address_type, cp->filter_policy);
2311 
2312 	hci_dev_unlock(hdev);
2313 }
2314 
2315 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2316 {
2317 	struct hci_cp_le_ext_create_conn *cp;
2318 
2319 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2320 
2321 	/* All connection failure handling is taken care of by the
2322 	 * hci_le_conn_failed function which is triggered by the HCI
2323 	 * request completion callbacks used for connecting.
2324 	 */
2325 	if (status)
2326 		return;
2327 
2328 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2329 	if (!cp)
2330 		return;
2331 
2332 	hci_dev_lock(hdev);
2333 
2334 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2335 			  cp->own_addr_type, cp->filter_policy);
2336 
2337 	hci_dev_unlock(hdev);
2338 }
2339 
2340 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2341 {
2342 	struct hci_cp_le_read_remote_features *cp;
2343 	struct hci_conn *conn;
2344 
2345 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2346 
2347 	if (!status)
2348 		return;
2349 
2350 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2351 	if (!cp)
2352 		return;
2353 
2354 	hci_dev_lock(hdev);
2355 
2356 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2357 	if (conn) {
2358 		if (conn->state == BT_CONFIG) {
2359 			hci_connect_cfm(conn, status);
2360 			hci_conn_drop(conn);
2361 		}
2362 	}
2363 
2364 	hci_dev_unlock(hdev);
2365 }
2366 
2367 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2368 {
2369 	struct hci_cp_le_start_enc *cp;
2370 	struct hci_conn *conn;
2371 
2372 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2373 
2374 	if (!status)
2375 		return;
2376 
2377 	hci_dev_lock(hdev);
2378 
2379 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2380 	if (!cp)
2381 		goto unlock;
2382 
2383 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2384 	if (!conn)
2385 		goto unlock;
2386 
2387 	if (conn->state != BT_CONNECTED)
2388 		goto unlock;
2389 
2390 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2391 	hci_conn_drop(conn);
2392 
2393 unlock:
2394 	hci_dev_unlock(hdev);
2395 }
2396 
2397 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2398 {
2399 	struct hci_cp_switch_role *cp;
2400 	struct hci_conn *conn;
2401 
2402 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2403 
2404 	if (!status)
2405 		return;
2406 
2407 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2408 	if (!cp)
2409 		return;
2410 
2411 	hci_dev_lock(hdev);
2412 
2413 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2414 	if (conn)
2415 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2416 
2417 	hci_dev_unlock(hdev);
2418 }
2419 
2420 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2421 {
2422 	__u8 status = *((__u8 *) skb->data);
2423 	struct discovery_state *discov = &hdev->discovery;
2424 	struct inquiry_entry *e;
2425 
2426 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2427 
2428 	hci_conn_check_pending(hdev);
2429 
2430 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2431 		return;
2432 
2433 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2434 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2435 
2436 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2437 		return;
2438 
2439 	hci_dev_lock(hdev);
2440 
2441 	if (discov->state != DISCOVERY_FINDING)
2442 		goto unlock;
2443 
2444 	if (list_empty(&discov->resolve)) {
2445 		/* When BR/EDR inquiry is active and no LE scanning is in
2446 		 * progress, then change discovery state to indicate completion.
2447 		 *
2448 		 * When running LE scanning and BR/EDR inquiry simultaneously
2449 		 * and the LE scan already finished, then change the discovery
2450 		 * state to indicate completion.
2451 		 */
2452 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2453 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2454 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2455 		goto unlock;
2456 	}
2457 
2458 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2459 	if (e && hci_resolve_name(hdev, e) == 0) {
2460 		e->name_state = NAME_PENDING;
2461 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2462 	} else {
2463 		/* When BR/EDR inquiry is active and no LE scanning is in
2464 		 * progress, then change discovery state to indicate completion.
2465 		 *
2466 		 * When running LE scanning and BR/EDR inquiry simultaneously
2467 		 * and the LE scan already finished, then change the discovery
2468 		 * state to indicate completion.
2469 		 */
2470 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2471 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2472 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2473 	}
2474 
2475 unlock:
2476 	hci_dev_unlock(hdev);
2477 }
2478 
2479 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2480 {
2481 	struct inquiry_data data;
2482 	struct inquiry_info *info = (void *) (skb->data + 1);
2483 	int num_rsp = *((__u8 *) skb->data);
2484 
2485 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2486 
2487 	if (!num_rsp)
2488 		return;
2489 
2490 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2491 		return;
2492 
2493 	hci_dev_lock(hdev);
2494 
2495 	for (; num_rsp; num_rsp--, info++) {
2496 		u32 flags;
2497 
2498 		bacpy(&data.bdaddr, &info->bdaddr);
2499 		data.pscan_rep_mode	= info->pscan_rep_mode;
2500 		data.pscan_period_mode	= info->pscan_period_mode;
2501 		data.pscan_mode		= info->pscan_mode;
2502 		memcpy(data.dev_class, info->dev_class, 3);
2503 		data.clock_offset	= info->clock_offset;
2504 		data.rssi		= HCI_RSSI_INVALID;
2505 		data.ssp_mode		= 0x00;
2506 
2507 		flags = hci_inquiry_cache_update(hdev, &data, false);
2508 
2509 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2510 				  info->dev_class, HCI_RSSI_INVALID,
2511 				  flags, NULL, 0, NULL, 0);
2512 	}
2513 
2514 	hci_dev_unlock(hdev);
2515 }
2516 
2517 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2518 {
2519 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2520 	struct inquiry_entry *ie;
2521 	struct hci_conn *conn;
2522 
2523 	BT_DBG("%s", hdev->name);
2524 
2525 	hci_dev_lock(hdev);
2526 
2527 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2528 	if (!conn) {
2529 		/* Connection may not exist if auto-connected. Check the inquiry
2530 		 * cache to see if we've already discovered this bdaddr before.
2531 		 * If found and link is an ACL type, create a connection class
2532 		 * automatically.
2533 		 */
2534 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2535 		if (ie && ev->link_type == ACL_LINK) {
2536 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2537 					    HCI_ROLE_SLAVE);
2538 			if (!conn) {
2539 				bt_dev_err(hdev, "no memory for new conn");
2540 				goto unlock;
2541 			}
2542 		}
2543 
2544 		if (ev->link_type != SCO_LINK)
2545 			goto unlock;
2546 
2547 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2548 		if (!conn)
2549 			goto unlock;
2550 
2551 		conn->type = SCO_LINK;
2552 	}
2553 
2554 	if (!ev->status) {
2555 		conn->handle = __le16_to_cpu(ev->handle);
2556 
2557 		if (conn->type == ACL_LINK) {
2558 			conn->state = BT_CONFIG;
2559 			hci_conn_hold(conn);
2560 
2561 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2562 			    !hci_find_link_key(hdev, &ev->bdaddr))
2563 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2564 			else
2565 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2566 		} else
2567 			conn->state = BT_CONNECTED;
2568 
2569 		hci_debugfs_create_conn(conn);
2570 		hci_conn_add_sysfs(conn);
2571 
2572 		if (test_bit(HCI_AUTH, &hdev->flags))
2573 			set_bit(HCI_CONN_AUTH, &conn->flags);
2574 
2575 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2576 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2577 
2578 		/* Get remote features */
2579 		if (conn->type == ACL_LINK) {
2580 			struct hci_cp_read_remote_features cp;
2581 			cp.handle = ev->handle;
2582 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2583 				     sizeof(cp), &cp);
2584 
2585 			hci_req_update_scan(hdev);
2586 		}
2587 
2588 		/* Set packet type for incoming connection */
2589 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2590 			struct hci_cp_change_conn_ptype cp;
2591 			cp.handle = ev->handle;
2592 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2593 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2594 				     &cp);
2595 		}
2596 	} else {
2597 		conn->state = BT_CLOSED;
2598 		if (conn->type == ACL_LINK)
2599 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2600 					    conn->dst_type, ev->status);
2601 	}
2602 
2603 	if (conn->type == ACL_LINK)
2604 		hci_sco_setup(conn, ev->status);
2605 
2606 	if (ev->status) {
2607 		hci_connect_cfm(conn, ev->status);
2608 		hci_conn_del(conn);
2609 	} else if (ev->link_type != ACL_LINK)
2610 		hci_connect_cfm(conn, ev->status);
2611 
2612 unlock:
2613 	hci_dev_unlock(hdev);
2614 
2615 	hci_conn_check_pending(hdev);
2616 }
2617 
2618 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2619 {
2620 	struct hci_cp_reject_conn_req cp;
2621 
2622 	bacpy(&cp.bdaddr, bdaddr);
2623 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2624 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2625 }
2626 
2627 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 {
2629 	struct hci_ev_conn_request *ev = (void *) skb->data;
2630 	int mask = hdev->link_mode;
2631 	struct inquiry_entry *ie;
2632 	struct hci_conn *conn;
2633 	__u8 flags = 0;
2634 
2635 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2636 	       ev->link_type);
2637 
2638 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2639 				      &flags);
2640 
2641 	if (!(mask & HCI_LM_ACCEPT)) {
2642 		hci_reject_conn(hdev, &ev->bdaddr);
2643 		return;
2644 	}
2645 
2646 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2647 				   BDADDR_BREDR)) {
2648 		hci_reject_conn(hdev, &ev->bdaddr);
2649 		return;
2650 	}
2651 
2652 	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2653 	 * connection. These features are only touched through mgmt so
2654 	 * only do the checks if HCI_MGMT is set.
2655 	 */
2656 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2657 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2658 	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2659 				    BDADDR_BREDR)) {
2660 		    hci_reject_conn(hdev, &ev->bdaddr);
2661 		    return;
2662 	}
2663 
2664 	/* Connection accepted */
2665 
2666 	hci_dev_lock(hdev);
2667 
2668 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2669 	if (ie)
2670 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2671 
2672 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2673 			&ev->bdaddr);
2674 	if (!conn) {
2675 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2676 				    HCI_ROLE_SLAVE);
2677 		if (!conn) {
2678 			bt_dev_err(hdev, "no memory for new connection");
2679 			hci_dev_unlock(hdev);
2680 			return;
2681 		}
2682 	}
2683 
2684 	memcpy(conn->dev_class, ev->dev_class, 3);
2685 
2686 	hci_dev_unlock(hdev);
2687 
2688 	if (ev->link_type == ACL_LINK ||
2689 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2690 		struct hci_cp_accept_conn_req cp;
2691 		conn->state = BT_CONNECT;
2692 
2693 		bacpy(&cp.bdaddr, &ev->bdaddr);
2694 
2695 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2696 			cp.role = 0x00; /* Become master */
2697 		else
2698 			cp.role = 0x01; /* Remain slave */
2699 
2700 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2701 	} else if (!(flags & HCI_PROTO_DEFER)) {
2702 		struct hci_cp_accept_sync_conn_req cp;
2703 		conn->state = BT_CONNECT;
2704 
2705 		bacpy(&cp.bdaddr, &ev->bdaddr);
2706 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2707 
2708 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2709 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2710 		cp.max_latency    = cpu_to_le16(0xffff);
2711 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2712 		cp.retrans_effort = 0xff;
2713 
2714 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2715 			     &cp);
2716 	} else {
2717 		conn->state = BT_CONNECT2;
2718 		hci_connect_cfm(conn, 0);
2719 	}
2720 }
2721 
2722 static u8 hci_to_mgmt_reason(u8 err)
2723 {
2724 	switch (err) {
2725 	case HCI_ERROR_CONNECTION_TIMEOUT:
2726 		return MGMT_DEV_DISCONN_TIMEOUT;
2727 	case HCI_ERROR_REMOTE_USER_TERM:
2728 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2729 	case HCI_ERROR_REMOTE_POWER_OFF:
2730 		return MGMT_DEV_DISCONN_REMOTE;
2731 	case HCI_ERROR_LOCAL_HOST_TERM:
2732 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2733 	default:
2734 		return MGMT_DEV_DISCONN_UNKNOWN;
2735 	}
2736 }
2737 
2738 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2739 {
2740 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2741 	u8 reason;
2742 	struct hci_conn_params *params;
2743 	struct hci_conn *conn;
2744 	bool mgmt_connected;
2745 	u8 type;
2746 
2747 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2748 
2749 	hci_dev_lock(hdev);
2750 
2751 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2752 	if (!conn)
2753 		goto unlock;
2754 
2755 	if (ev->status) {
2756 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2757 				       conn->dst_type, ev->status);
2758 		goto unlock;
2759 	}
2760 
2761 	conn->state = BT_CLOSED;
2762 
2763 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2764 
2765 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2766 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2767 	else
2768 		reason = hci_to_mgmt_reason(ev->reason);
2769 
2770 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2771 				reason, mgmt_connected);
2772 
2773 	if (conn->type == ACL_LINK) {
2774 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2775 			hci_remove_link_key(hdev, &conn->dst);
2776 
2777 		hci_req_update_scan(hdev);
2778 	}
2779 
2780 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2781 	if (params) {
2782 		switch (params->auto_connect) {
2783 		case HCI_AUTO_CONN_LINK_LOSS:
2784 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2785 				break;
2786 			/* Fall through */
2787 
2788 		case HCI_AUTO_CONN_DIRECT:
2789 		case HCI_AUTO_CONN_ALWAYS:
2790 			list_del_init(&params->action);
2791 			list_add(&params->action, &hdev->pend_le_conns);
2792 			hci_update_background_scan(hdev);
2793 			break;
2794 
2795 		default:
2796 			break;
2797 		}
2798 	}
2799 
2800 	type = conn->type;
2801 
2802 	hci_disconn_cfm(conn, ev->reason);
2803 	hci_conn_del(conn);
2804 
2805 	/* The suspend notifier is waiting for all devices to disconnect so
2806 	 * clear the bit from pending tasks and inform the wait queue.
2807 	 */
2808 	if (list_empty(&hdev->conn_hash.list) &&
2809 	    test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2810 		wake_up(&hdev->suspend_wait_q);
2811 	}
2812 
2813 	/* Re-enable advertising if necessary, since it might
2814 	 * have been disabled by the connection. From the
2815 	 * HCI_LE_Set_Advertise_Enable command description in
2816 	 * the core specification (v4.0):
2817 	 * "The Controller shall continue advertising until the Host
2818 	 * issues an LE_Set_Advertise_Enable command with
2819 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2820 	 * or until a connection is created or until the Advertising
2821 	 * is timed out due to Directed Advertising."
2822 	 */
2823 	if (type == LE_LINK)
2824 		hci_req_reenable_advertising(hdev);
2825 
2826 unlock:
2827 	hci_dev_unlock(hdev);
2828 }
2829 
2830 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2831 {
2832 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2833 	struct hci_conn *conn;
2834 
2835 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2836 
2837 	hci_dev_lock(hdev);
2838 
2839 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2840 	if (!conn)
2841 		goto unlock;
2842 
2843 	if (!ev->status) {
2844 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2845 
2846 		if (!hci_conn_ssp_enabled(conn) &&
2847 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2848 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2849 		} else {
2850 			set_bit(HCI_CONN_AUTH, &conn->flags);
2851 			conn->sec_level = conn->pending_sec_level;
2852 		}
2853 	} else {
2854 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2855 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2856 
2857 		mgmt_auth_failed(conn, ev->status);
2858 	}
2859 
2860 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2861 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2862 
2863 	if (conn->state == BT_CONFIG) {
2864 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2865 			struct hci_cp_set_conn_encrypt cp;
2866 			cp.handle  = ev->handle;
2867 			cp.encrypt = 0x01;
2868 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2869 				     &cp);
2870 		} else {
2871 			conn->state = BT_CONNECTED;
2872 			hci_connect_cfm(conn, ev->status);
2873 			hci_conn_drop(conn);
2874 		}
2875 	} else {
2876 		hci_auth_cfm(conn, ev->status);
2877 
2878 		hci_conn_hold(conn);
2879 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2880 		hci_conn_drop(conn);
2881 	}
2882 
2883 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2884 		if (!ev->status) {
2885 			struct hci_cp_set_conn_encrypt cp;
2886 			cp.handle  = ev->handle;
2887 			cp.encrypt = 0x01;
2888 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2889 				     &cp);
2890 		} else {
2891 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2892 			hci_encrypt_cfm(conn, ev->status, 0x00);
2893 		}
2894 	}
2895 
2896 unlock:
2897 	hci_dev_unlock(hdev);
2898 }
2899 
2900 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2901 {
2902 	struct hci_ev_remote_name *ev = (void *) skb->data;
2903 	struct hci_conn *conn;
2904 
2905 	BT_DBG("%s", hdev->name);
2906 
2907 	hci_conn_check_pending(hdev);
2908 
2909 	hci_dev_lock(hdev);
2910 
2911 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2912 
2913 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2914 		goto check_auth;
2915 
2916 	if (ev->status == 0)
2917 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2918 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2919 	else
2920 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2921 
2922 check_auth:
2923 	if (!conn)
2924 		goto unlock;
2925 
2926 	if (!hci_outgoing_auth_needed(hdev, conn))
2927 		goto unlock;
2928 
2929 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2930 		struct hci_cp_auth_requested cp;
2931 
2932 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2933 
2934 		cp.handle = __cpu_to_le16(conn->handle);
2935 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2936 	}
2937 
2938 unlock:
2939 	hci_dev_unlock(hdev);
2940 }
2941 
2942 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2943 				       u16 opcode, struct sk_buff *skb)
2944 {
2945 	const struct hci_rp_read_enc_key_size *rp;
2946 	struct hci_conn *conn;
2947 	u16 handle;
2948 
2949 	BT_DBG("%s status 0x%02x", hdev->name, status);
2950 
2951 	if (!skb || skb->len < sizeof(*rp)) {
2952 		bt_dev_err(hdev, "invalid read key size response");
2953 		return;
2954 	}
2955 
2956 	rp = (void *)skb->data;
2957 	handle = le16_to_cpu(rp->handle);
2958 
2959 	hci_dev_lock(hdev);
2960 
2961 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2962 	if (!conn)
2963 		goto unlock;
2964 
2965 	/* If we fail to read the encryption key size, assume maximum
2966 	 * (which is the same we do also when this HCI command isn't
2967 	 * supported.
2968 	 */
2969 	if (rp->status) {
2970 		bt_dev_err(hdev, "failed to read key size for handle %u",
2971 			   handle);
2972 		conn->enc_key_size = HCI_LINK_KEY_SIZE;
2973 	} else {
2974 		conn->enc_key_size = rp->key_size;
2975 	}
2976 
2977 	if (conn->state == BT_CONFIG) {
2978 		conn->state = BT_CONNECTED;
2979 		hci_connect_cfm(conn, 0);
2980 		hci_conn_drop(conn);
2981 	} else {
2982 		u8 encrypt;
2983 
2984 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2985 			encrypt = 0x00;
2986 		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2987 			encrypt = 0x02;
2988 		else
2989 			encrypt = 0x01;
2990 
2991 		hci_encrypt_cfm(conn, 0, encrypt);
2992 	}
2993 
2994 unlock:
2995 	hci_dev_unlock(hdev);
2996 }
2997 
2998 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2999 {
3000 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
3001 	struct hci_conn *conn;
3002 
3003 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3004 
3005 	hci_dev_lock(hdev);
3006 
3007 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3008 	if (!conn)
3009 		goto unlock;
3010 
3011 	if (!ev->status) {
3012 		if (ev->encrypt) {
3013 			/* Encryption implies authentication */
3014 			set_bit(HCI_CONN_AUTH, &conn->flags);
3015 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3016 			conn->sec_level = conn->pending_sec_level;
3017 
3018 			/* P-256 authentication key implies FIPS */
3019 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3020 				set_bit(HCI_CONN_FIPS, &conn->flags);
3021 
3022 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3023 			    conn->type == LE_LINK)
3024 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3025 		} else {
3026 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3027 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3028 		}
3029 	}
3030 
3031 	/* We should disregard the current RPA and generate a new one
3032 	 * whenever the encryption procedure fails.
3033 	 */
3034 	if (ev->status && conn->type == LE_LINK) {
3035 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3036 		hci_adv_instances_set_rpa_expired(hdev, true);
3037 	}
3038 
3039 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3040 
3041 	if (ev->status && conn->state == BT_CONNECTED) {
3042 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3043 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3044 
3045 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3046 		hci_conn_drop(conn);
3047 		goto unlock;
3048 	}
3049 
3050 	/* In Secure Connections Only mode, do not allow any connections
3051 	 * that are not encrypted with AES-CCM using a P-256 authenticated
3052 	 * combination key.
3053 	 */
3054 	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
3055 	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
3056 	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
3057 		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
3058 		hci_conn_drop(conn);
3059 		goto unlock;
3060 	}
3061 
3062 	/* Try reading the encryption key size for encrypted ACL links */
3063 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3064 		struct hci_cp_read_enc_key_size cp;
3065 		struct hci_request req;
3066 
3067 		/* Only send HCI_Read_Encryption_Key_Size if the
3068 		 * controller really supports it. If it doesn't, assume
3069 		 * the default size (16).
3070 		 */
3071 		if (!(hdev->commands[20] & 0x10)) {
3072 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3073 			goto notify;
3074 		}
3075 
3076 		hci_req_init(&req, hdev);
3077 
3078 		cp.handle = cpu_to_le16(conn->handle);
3079 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3080 
3081 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3082 			bt_dev_err(hdev, "sending read key size failed");
3083 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3084 			goto notify;
3085 		}
3086 
3087 		goto unlock;
3088 	}
3089 
3090 	/* Set the default Authenticated Payload Timeout after
3091 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3092 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3093 	 * sent when the link is active and Encryption is enabled, the conn
3094 	 * type can be either LE or ACL and controller must support LMP Ping.
3095 	 * Ensure for AES-CCM encryption as well.
3096 	 */
3097 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3098 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3099 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3100 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3101 		struct hci_cp_write_auth_payload_to cp;
3102 
3103 		cp.handle = cpu_to_le16(conn->handle);
3104 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3105 		hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3106 			     sizeof(cp), &cp);
3107 	}
3108 
3109 notify:
3110 	if (conn->state == BT_CONFIG) {
3111 		if (!ev->status)
3112 			conn->state = BT_CONNECTED;
3113 
3114 		hci_connect_cfm(conn, ev->status);
3115 		hci_conn_drop(conn);
3116 	} else
3117 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
3118 
3119 unlock:
3120 	hci_dev_unlock(hdev);
3121 }
3122 
3123 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3124 					     struct sk_buff *skb)
3125 {
3126 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3127 	struct hci_conn *conn;
3128 
3129 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3130 
3131 	hci_dev_lock(hdev);
3132 
3133 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3134 	if (conn) {
3135 		if (!ev->status)
3136 			set_bit(HCI_CONN_SECURE, &conn->flags);
3137 
3138 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3139 
3140 		hci_key_change_cfm(conn, ev->status);
3141 	}
3142 
3143 	hci_dev_unlock(hdev);
3144 }
3145 
3146 static void hci_remote_features_evt(struct hci_dev *hdev,
3147 				    struct sk_buff *skb)
3148 {
3149 	struct hci_ev_remote_features *ev = (void *) skb->data;
3150 	struct hci_conn *conn;
3151 
3152 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3153 
3154 	hci_dev_lock(hdev);
3155 
3156 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3157 	if (!conn)
3158 		goto unlock;
3159 
3160 	if (!ev->status)
3161 		memcpy(conn->features[0], ev->features, 8);
3162 
3163 	if (conn->state != BT_CONFIG)
3164 		goto unlock;
3165 
3166 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3167 	    lmp_ext_feat_capable(conn)) {
3168 		struct hci_cp_read_remote_ext_features cp;
3169 		cp.handle = ev->handle;
3170 		cp.page = 0x01;
3171 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3172 			     sizeof(cp), &cp);
3173 		goto unlock;
3174 	}
3175 
3176 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3177 		struct hci_cp_remote_name_req cp;
3178 		memset(&cp, 0, sizeof(cp));
3179 		bacpy(&cp.bdaddr, &conn->dst);
3180 		cp.pscan_rep_mode = 0x02;
3181 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3182 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3183 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3184 
3185 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3186 		conn->state = BT_CONNECTED;
3187 		hci_connect_cfm(conn, ev->status);
3188 		hci_conn_drop(conn);
3189 	}
3190 
3191 unlock:
3192 	hci_dev_unlock(hdev);
3193 }
3194 
3195 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3196 				 u16 *opcode, u8 *status,
3197 				 hci_req_complete_t *req_complete,
3198 				 hci_req_complete_skb_t *req_complete_skb)
3199 {
3200 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
3201 
3202 	*opcode = __le16_to_cpu(ev->opcode);
3203 	*status = skb->data[sizeof(*ev)];
3204 
3205 	skb_pull(skb, sizeof(*ev));
3206 
3207 	switch (*opcode) {
3208 	case HCI_OP_INQUIRY_CANCEL:
3209 		hci_cc_inquiry_cancel(hdev, skb);
3210 		break;
3211 
3212 	case HCI_OP_PERIODIC_INQ:
3213 		hci_cc_periodic_inq(hdev, skb);
3214 		break;
3215 
3216 	case HCI_OP_EXIT_PERIODIC_INQ:
3217 		hci_cc_exit_periodic_inq(hdev, skb);
3218 		break;
3219 
3220 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3221 		hci_cc_remote_name_req_cancel(hdev, skb);
3222 		break;
3223 
3224 	case HCI_OP_ROLE_DISCOVERY:
3225 		hci_cc_role_discovery(hdev, skb);
3226 		break;
3227 
3228 	case HCI_OP_READ_LINK_POLICY:
3229 		hci_cc_read_link_policy(hdev, skb);
3230 		break;
3231 
3232 	case HCI_OP_WRITE_LINK_POLICY:
3233 		hci_cc_write_link_policy(hdev, skb);
3234 		break;
3235 
3236 	case HCI_OP_READ_DEF_LINK_POLICY:
3237 		hci_cc_read_def_link_policy(hdev, skb);
3238 		break;
3239 
3240 	case HCI_OP_WRITE_DEF_LINK_POLICY:
3241 		hci_cc_write_def_link_policy(hdev, skb);
3242 		break;
3243 
3244 	case HCI_OP_RESET:
3245 		hci_cc_reset(hdev, skb);
3246 		break;
3247 
3248 	case HCI_OP_READ_STORED_LINK_KEY:
3249 		hci_cc_read_stored_link_key(hdev, skb);
3250 		break;
3251 
3252 	case HCI_OP_DELETE_STORED_LINK_KEY:
3253 		hci_cc_delete_stored_link_key(hdev, skb);
3254 		break;
3255 
3256 	case HCI_OP_WRITE_LOCAL_NAME:
3257 		hci_cc_write_local_name(hdev, skb);
3258 		break;
3259 
3260 	case HCI_OP_READ_LOCAL_NAME:
3261 		hci_cc_read_local_name(hdev, skb);
3262 		break;
3263 
3264 	case HCI_OP_WRITE_AUTH_ENABLE:
3265 		hci_cc_write_auth_enable(hdev, skb);
3266 		break;
3267 
3268 	case HCI_OP_WRITE_ENCRYPT_MODE:
3269 		hci_cc_write_encrypt_mode(hdev, skb);
3270 		break;
3271 
3272 	case HCI_OP_WRITE_SCAN_ENABLE:
3273 		hci_cc_write_scan_enable(hdev, skb);
3274 		break;
3275 
3276 	case HCI_OP_READ_CLASS_OF_DEV:
3277 		hci_cc_read_class_of_dev(hdev, skb);
3278 		break;
3279 
3280 	case HCI_OP_WRITE_CLASS_OF_DEV:
3281 		hci_cc_write_class_of_dev(hdev, skb);
3282 		break;
3283 
3284 	case HCI_OP_READ_VOICE_SETTING:
3285 		hci_cc_read_voice_setting(hdev, skb);
3286 		break;
3287 
3288 	case HCI_OP_WRITE_VOICE_SETTING:
3289 		hci_cc_write_voice_setting(hdev, skb);
3290 		break;
3291 
3292 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
3293 		hci_cc_read_num_supported_iac(hdev, skb);
3294 		break;
3295 
3296 	case HCI_OP_WRITE_SSP_MODE:
3297 		hci_cc_write_ssp_mode(hdev, skb);
3298 		break;
3299 
3300 	case HCI_OP_WRITE_SC_SUPPORT:
3301 		hci_cc_write_sc_support(hdev, skb);
3302 		break;
3303 
3304 	case HCI_OP_READ_AUTH_PAYLOAD_TO:
3305 		hci_cc_read_auth_payload_timeout(hdev, skb);
3306 		break;
3307 
3308 	case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3309 		hci_cc_write_auth_payload_timeout(hdev, skb);
3310 		break;
3311 
3312 	case HCI_OP_READ_LOCAL_VERSION:
3313 		hci_cc_read_local_version(hdev, skb);
3314 		break;
3315 
3316 	case HCI_OP_READ_LOCAL_COMMANDS:
3317 		hci_cc_read_local_commands(hdev, skb);
3318 		break;
3319 
3320 	case HCI_OP_READ_LOCAL_FEATURES:
3321 		hci_cc_read_local_features(hdev, skb);
3322 		break;
3323 
3324 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
3325 		hci_cc_read_local_ext_features(hdev, skb);
3326 		break;
3327 
3328 	case HCI_OP_READ_BUFFER_SIZE:
3329 		hci_cc_read_buffer_size(hdev, skb);
3330 		break;
3331 
3332 	case HCI_OP_READ_BD_ADDR:
3333 		hci_cc_read_bd_addr(hdev, skb);
3334 		break;
3335 
3336 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3337 		hci_cc_read_page_scan_activity(hdev, skb);
3338 		break;
3339 
3340 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3341 		hci_cc_write_page_scan_activity(hdev, skb);
3342 		break;
3343 
3344 	case HCI_OP_READ_PAGE_SCAN_TYPE:
3345 		hci_cc_read_page_scan_type(hdev, skb);
3346 		break;
3347 
3348 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3349 		hci_cc_write_page_scan_type(hdev, skb);
3350 		break;
3351 
3352 	case HCI_OP_READ_DATA_BLOCK_SIZE:
3353 		hci_cc_read_data_block_size(hdev, skb);
3354 		break;
3355 
3356 	case HCI_OP_READ_FLOW_CONTROL_MODE:
3357 		hci_cc_read_flow_control_mode(hdev, skb);
3358 		break;
3359 
3360 	case HCI_OP_READ_LOCAL_AMP_INFO:
3361 		hci_cc_read_local_amp_info(hdev, skb);
3362 		break;
3363 
3364 	case HCI_OP_READ_CLOCK:
3365 		hci_cc_read_clock(hdev, skb);
3366 		break;
3367 
3368 	case HCI_OP_READ_INQ_RSP_TX_POWER:
3369 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
3370 		break;
3371 
3372 	case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3373 		hci_cc_read_def_err_data_reporting(hdev, skb);
3374 		break;
3375 
3376 	case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3377 		hci_cc_write_def_err_data_reporting(hdev, skb);
3378 		break;
3379 
3380 	case HCI_OP_PIN_CODE_REPLY:
3381 		hci_cc_pin_code_reply(hdev, skb);
3382 		break;
3383 
3384 	case HCI_OP_PIN_CODE_NEG_REPLY:
3385 		hci_cc_pin_code_neg_reply(hdev, skb);
3386 		break;
3387 
3388 	case HCI_OP_READ_LOCAL_OOB_DATA:
3389 		hci_cc_read_local_oob_data(hdev, skb);
3390 		break;
3391 
3392 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3393 		hci_cc_read_local_oob_ext_data(hdev, skb);
3394 		break;
3395 
3396 	case HCI_OP_LE_READ_BUFFER_SIZE:
3397 		hci_cc_le_read_buffer_size(hdev, skb);
3398 		break;
3399 
3400 	case HCI_OP_LE_READ_LOCAL_FEATURES:
3401 		hci_cc_le_read_local_features(hdev, skb);
3402 		break;
3403 
3404 	case HCI_OP_LE_READ_ADV_TX_POWER:
3405 		hci_cc_le_read_adv_tx_power(hdev, skb);
3406 		break;
3407 
3408 	case HCI_OP_USER_CONFIRM_REPLY:
3409 		hci_cc_user_confirm_reply(hdev, skb);
3410 		break;
3411 
3412 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
3413 		hci_cc_user_confirm_neg_reply(hdev, skb);
3414 		break;
3415 
3416 	case HCI_OP_USER_PASSKEY_REPLY:
3417 		hci_cc_user_passkey_reply(hdev, skb);
3418 		break;
3419 
3420 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
3421 		hci_cc_user_passkey_neg_reply(hdev, skb);
3422 		break;
3423 
3424 	case HCI_OP_LE_SET_RANDOM_ADDR:
3425 		hci_cc_le_set_random_addr(hdev, skb);
3426 		break;
3427 
3428 	case HCI_OP_LE_SET_ADV_ENABLE:
3429 		hci_cc_le_set_adv_enable(hdev, skb);
3430 		break;
3431 
3432 	case HCI_OP_LE_SET_SCAN_PARAM:
3433 		hci_cc_le_set_scan_param(hdev, skb);
3434 		break;
3435 
3436 	case HCI_OP_LE_SET_SCAN_ENABLE:
3437 		hci_cc_le_set_scan_enable(hdev, skb);
3438 		break;
3439 
3440 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3441 		hci_cc_le_read_white_list_size(hdev, skb);
3442 		break;
3443 
3444 	case HCI_OP_LE_CLEAR_WHITE_LIST:
3445 		hci_cc_le_clear_white_list(hdev, skb);
3446 		break;
3447 
3448 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
3449 		hci_cc_le_add_to_white_list(hdev, skb);
3450 		break;
3451 
3452 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3453 		hci_cc_le_del_from_white_list(hdev, skb);
3454 		break;
3455 
3456 	case HCI_OP_LE_READ_SUPPORTED_STATES:
3457 		hci_cc_le_read_supported_states(hdev, skb);
3458 		break;
3459 
3460 	case HCI_OP_LE_READ_DEF_DATA_LEN:
3461 		hci_cc_le_read_def_data_len(hdev, skb);
3462 		break;
3463 
3464 	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3465 		hci_cc_le_write_def_data_len(hdev, skb);
3466 		break;
3467 
3468 	case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3469 		hci_cc_le_add_to_resolv_list(hdev, skb);
3470 		break;
3471 
3472 	case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3473 		hci_cc_le_del_from_resolv_list(hdev, skb);
3474 		break;
3475 
3476 	case HCI_OP_LE_CLEAR_RESOLV_LIST:
3477 		hci_cc_le_clear_resolv_list(hdev, skb);
3478 		break;
3479 
3480 	case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3481 		hci_cc_le_read_resolv_list_size(hdev, skb);
3482 		break;
3483 
3484 	case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3485 		hci_cc_le_set_addr_resolution_enable(hdev, skb);
3486 		break;
3487 
3488 	case HCI_OP_LE_READ_MAX_DATA_LEN:
3489 		hci_cc_le_read_max_data_len(hdev, skb);
3490 		break;
3491 
3492 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3493 		hci_cc_write_le_host_supported(hdev, skb);
3494 		break;
3495 
3496 	case HCI_OP_LE_SET_ADV_PARAM:
3497 		hci_cc_set_adv_param(hdev, skb);
3498 		break;
3499 
3500 	case HCI_OP_READ_RSSI:
3501 		hci_cc_read_rssi(hdev, skb);
3502 		break;
3503 
3504 	case HCI_OP_READ_TX_POWER:
3505 		hci_cc_read_tx_power(hdev, skb);
3506 		break;
3507 
3508 	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3509 		hci_cc_write_ssp_debug_mode(hdev, skb);
3510 		break;
3511 
3512 	case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3513 		hci_cc_le_set_ext_scan_param(hdev, skb);
3514 		break;
3515 
3516 	case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3517 		hci_cc_le_set_ext_scan_enable(hdev, skb);
3518 		break;
3519 
3520 	case HCI_OP_LE_SET_DEFAULT_PHY:
3521 		hci_cc_le_set_default_phy(hdev, skb);
3522 		break;
3523 
3524 	case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3525 		hci_cc_le_read_num_adv_sets(hdev, skb);
3526 		break;
3527 
3528 	case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3529 		hci_cc_set_ext_adv_param(hdev, skb);
3530 		break;
3531 
3532 	case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3533 		hci_cc_le_set_ext_adv_enable(hdev, skb);
3534 		break;
3535 
3536 	case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3537 		hci_cc_le_set_adv_set_random_addr(hdev, skb);
3538 		break;
3539 
3540 	default:
3541 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3542 		break;
3543 	}
3544 
3545 	if (*opcode != HCI_OP_NOP)
3546 		cancel_delayed_work(&hdev->cmd_timer);
3547 
3548 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3549 		atomic_set(&hdev->cmd_cnt, 1);
3550 
3551 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3552 			     req_complete_skb);
3553 
3554 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3555 		bt_dev_err(hdev,
3556 			   "unexpected event for opcode 0x%4.4x", *opcode);
3557 		return;
3558 	}
3559 
3560 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3561 		queue_work(hdev->workqueue, &hdev->cmd_work);
3562 }
3563 
3564 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3565 			       u16 *opcode, u8 *status,
3566 			       hci_req_complete_t *req_complete,
3567 			       hci_req_complete_skb_t *req_complete_skb)
3568 {
3569 	struct hci_ev_cmd_status *ev = (void *) skb->data;
3570 
3571 	skb_pull(skb, sizeof(*ev));
3572 
3573 	*opcode = __le16_to_cpu(ev->opcode);
3574 	*status = ev->status;
3575 
3576 	switch (*opcode) {
3577 	case HCI_OP_INQUIRY:
3578 		hci_cs_inquiry(hdev, ev->status);
3579 		break;
3580 
3581 	case HCI_OP_CREATE_CONN:
3582 		hci_cs_create_conn(hdev, ev->status);
3583 		break;
3584 
3585 	case HCI_OP_DISCONNECT:
3586 		hci_cs_disconnect(hdev, ev->status);
3587 		break;
3588 
3589 	case HCI_OP_ADD_SCO:
3590 		hci_cs_add_sco(hdev, ev->status);
3591 		break;
3592 
3593 	case HCI_OP_AUTH_REQUESTED:
3594 		hci_cs_auth_requested(hdev, ev->status);
3595 		break;
3596 
3597 	case HCI_OP_SET_CONN_ENCRYPT:
3598 		hci_cs_set_conn_encrypt(hdev, ev->status);
3599 		break;
3600 
3601 	case HCI_OP_REMOTE_NAME_REQ:
3602 		hci_cs_remote_name_req(hdev, ev->status);
3603 		break;
3604 
3605 	case HCI_OP_READ_REMOTE_FEATURES:
3606 		hci_cs_read_remote_features(hdev, ev->status);
3607 		break;
3608 
3609 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3610 		hci_cs_read_remote_ext_features(hdev, ev->status);
3611 		break;
3612 
3613 	case HCI_OP_SETUP_SYNC_CONN:
3614 		hci_cs_setup_sync_conn(hdev, ev->status);
3615 		break;
3616 
3617 	case HCI_OP_SNIFF_MODE:
3618 		hci_cs_sniff_mode(hdev, ev->status);
3619 		break;
3620 
3621 	case HCI_OP_EXIT_SNIFF_MODE:
3622 		hci_cs_exit_sniff_mode(hdev, ev->status);
3623 		break;
3624 
3625 	case HCI_OP_SWITCH_ROLE:
3626 		hci_cs_switch_role(hdev, ev->status);
3627 		break;
3628 
3629 	case HCI_OP_LE_CREATE_CONN:
3630 		hci_cs_le_create_conn(hdev, ev->status);
3631 		break;
3632 
3633 	case HCI_OP_LE_READ_REMOTE_FEATURES:
3634 		hci_cs_le_read_remote_features(hdev, ev->status);
3635 		break;
3636 
3637 	case HCI_OP_LE_START_ENC:
3638 		hci_cs_le_start_enc(hdev, ev->status);
3639 		break;
3640 
3641 	case HCI_OP_LE_EXT_CREATE_CONN:
3642 		hci_cs_le_ext_create_conn(hdev, ev->status);
3643 		break;
3644 
3645 	default:
3646 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3647 		break;
3648 	}
3649 
3650 	if (*opcode != HCI_OP_NOP)
3651 		cancel_delayed_work(&hdev->cmd_timer);
3652 
3653 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3654 		atomic_set(&hdev->cmd_cnt, 1);
3655 
3656 	/* Indicate request completion if the command failed. Also, if
3657 	 * we're not waiting for a special event and we get a success
3658 	 * command status we should try to flag the request as completed
3659 	 * (since for this kind of commands there will not be a command
3660 	 * complete event).
3661 	 */
3662 	if (ev->status ||
3663 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3664 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3665 				     req_complete_skb);
3666 
3667 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3668 		bt_dev_err(hdev,
3669 			   "unexpected event for opcode 0x%4.4x", *opcode);
3670 		return;
3671 	}
3672 
3673 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3674 		queue_work(hdev->workqueue, &hdev->cmd_work);
3675 }
3676 
3677 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3678 {
3679 	struct hci_ev_hardware_error *ev = (void *) skb->data;
3680 
3681 	hdev->hw_error_code = ev->code;
3682 
3683 	queue_work(hdev->req_workqueue, &hdev->error_reset);
3684 }
3685 
3686 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3687 {
3688 	struct hci_ev_role_change *ev = (void *) skb->data;
3689 	struct hci_conn *conn;
3690 
3691 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3692 
3693 	hci_dev_lock(hdev);
3694 
3695 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3696 	if (conn) {
3697 		if (!ev->status)
3698 			conn->role = ev->role;
3699 
3700 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3701 
3702 		hci_role_switch_cfm(conn, ev->status, ev->role);
3703 	}
3704 
3705 	hci_dev_unlock(hdev);
3706 }
3707 
3708 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3709 {
3710 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3711 	int i;
3712 
3713 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3714 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3715 		return;
3716 	}
3717 
3718 	if (skb->len < sizeof(*ev) ||
3719 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3720 		BT_DBG("%s bad parameters", hdev->name);
3721 		return;
3722 	}
3723 
3724 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3725 
3726 	for (i = 0; i < ev->num_hndl; i++) {
3727 		struct hci_comp_pkts_info *info = &ev->handles[i];
3728 		struct hci_conn *conn;
3729 		__u16  handle, count;
3730 
3731 		handle = __le16_to_cpu(info->handle);
3732 		count  = __le16_to_cpu(info->count);
3733 
3734 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3735 		if (!conn)
3736 			continue;
3737 
3738 		conn->sent -= count;
3739 
3740 		switch (conn->type) {
3741 		case ACL_LINK:
3742 			hdev->acl_cnt += count;
3743 			if (hdev->acl_cnt > hdev->acl_pkts)
3744 				hdev->acl_cnt = hdev->acl_pkts;
3745 			break;
3746 
3747 		case LE_LINK:
3748 			if (hdev->le_pkts) {
3749 				hdev->le_cnt += count;
3750 				if (hdev->le_cnt > hdev->le_pkts)
3751 					hdev->le_cnt = hdev->le_pkts;
3752 			} else {
3753 				hdev->acl_cnt += count;
3754 				if (hdev->acl_cnt > hdev->acl_pkts)
3755 					hdev->acl_cnt = hdev->acl_pkts;
3756 			}
3757 			break;
3758 
3759 		case SCO_LINK:
3760 			hdev->sco_cnt += count;
3761 			if (hdev->sco_cnt > hdev->sco_pkts)
3762 				hdev->sco_cnt = hdev->sco_pkts;
3763 			break;
3764 
3765 		default:
3766 			bt_dev_err(hdev, "unknown type %d conn %p",
3767 				   conn->type, conn);
3768 			break;
3769 		}
3770 	}
3771 
3772 	queue_work(hdev->workqueue, &hdev->tx_work);
3773 }
3774 
3775 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3776 						 __u16 handle)
3777 {
3778 	struct hci_chan *chan;
3779 
3780 	switch (hdev->dev_type) {
3781 	case HCI_PRIMARY:
3782 		return hci_conn_hash_lookup_handle(hdev, handle);
3783 	case HCI_AMP:
3784 		chan = hci_chan_lookup_handle(hdev, handle);
3785 		if (chan)
3786 			return chan->conn;
3787 		break;
3788 	default:
3789 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3790 		break;
3791 	}
3792 
3793 	return NULL;
3794 }
3795 
3796 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3797 {
3798 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3799 	int i;
3800 
3801 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3802 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3803 		return;
3804 	}
3805 
3806 	if (skb->len < sizeof(*ev) ||
3807 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3808 		BT_DBG("%s bad parameters", hdev->name);
3809 		return;
3810 	}
3811 
3812 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3813 	       ev->num_hndl);
3814 
3815 	for (i = 0; i < ev->num_hndl; i++) {
3816 		struct hci_comp_blocks_info *info = &ev->handles[i];
3817 		struct hci_conn *conn = NULL;
3818 		__u16  handle, block_count;
3819 
3820 		handle = __le16_to_cpu(info->handle);
3821 		block_count = __le16_to_cpu(info->blocks);
3822 
3823 		conn = __hci_conn_lookup_handle(hdev, handle);
3824 		if (!conn)
3825 			continue;
3826 
3827 		conn->sent -= block_count;
3828 
3829 		switch (conn->type) {
3830 		case ACL_LINK:
3831 		case AMP_LINK:
3832 			hdev->block_cnt += block_count;
3833 			if (hdev->block_cnt > hdev->num_blocks)
3834 				hdev->block_cnt = hdev->num_blocks;
3835 			break;
3836 
3837 		default:
3838 			bt_dev_err(hdev, "unknown type %d conn %p",
3839 				   conn->type, conn);
3840 			break;
3841 		}
3842 	}
3843 
3844 	queue_work(hdev->workqueue, &hdev->tx_work);
3845 }
3846 
3847 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3848 {
3849 	struct hci_ev_mode_change *ev = (void *) skb->data;
3850 	struct hci_conn *conn;
3851 
3852 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3853 
3854 	hci_dev_lock(hdev);
3855 
3856 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3857 	if (conn) {
3858 		conn->mode = ev->mode;
3859 
3860 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3861 					&conn->flags)) {
3862 			if (conn->mode == HCI_CM_ACTIVE)
3863 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3864 			else
3865 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3866 		}
3867 
3868 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3869 			hci_sco_setup(conn, ev->status);
3870 	}
3871 
3872 	hci_dev_unlock(hdev);
3873 }
3874 
3875 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3876 {
3877 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3878 	struct hci_conn *conn;
3879 
3880 	BT_DBG("%s", hdev->name);
3881 
3882 	hci_dev_lock(hdev);
3883 
3884 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3885 	if (!conn)
3886 		goto unlock;
3887 
3888 	if (conn->state == BT_CONNECTED) {
3889 		hci_conn_hold(conn);
3890 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3891 		hci_conn_drop(conn);
3892 	}
3893 
3894 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3895 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3896 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3897 			     sizeof(ev->bdaddr), &ev->bdaddr);
3898 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3899 		u8 secure;
3900 
3901 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3902 			secure = 1;
3903 		else
3904 			secure = 0;
3905 
3906 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3907 	}
3908 
3909 unlock:
3910 	hci_dev_unlock(hdev);
3911 }
3912 
3913 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3914 {
3915 	if (key_type == HCI_LK_CHANGED_COMBINATION)
3916 		return;
3917 
3918 	conn->pin_length = pin_len;
3919 	conn->key_type = key_type;
3920 
3921 	switch (key_type) {
3922 	case HCI_LK_LOCAL_UNIT:
3923 	case HCI_LK_REMOTE_UNIT:
3924 	case HCI_LK_DEBUG_COMBINATION:
3925 		return;
3926 	case HCI_LK_COMBINATION:
3927 		if (pin_len == 16)
3928 			conn->pending_sec_level = BT_SECURITY_HIGH;
3929 		else
3930 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3931 		break;
3932 	case HCI_LK_UNAUTH_COMBINATION_P192:
3933 	case HCI_LK_UNAUTH_COMBINATION_P256:
3934 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3935 		break;
3936 	case HCI_LK_AUTH_COMBINATION_P192:
3937 		conn->pending_sec_level = BT_SECURITY_HIGH;
3938 		break;
3939 	case HCI_LK_AUTH_COMBINATION_P256:
3940 		conn->pending_sec_level = BT_SECURITY_FIPS;
3941 		break;
3942 	}
3943 }
3944 
3945 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3946 {
3947 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3948 	struct hci_cp_link_key_reply cp;
3949 	struct hci_conn *conn;
3950 	struct link_key *key;
3951 
3952 	BT_DBG("%s", hdev->name);
3953 
3954 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3955 		return;
3956 
3957 	hci_dev_lock(hdev);
3958 
3959 	key = hci_find_link_key(hdev, &ev->bdaddr);
3960 	if (!key) {
3961 		BT_DBG("%s link key not found for %pMR", hdev->name,
3962 		       &ev->bdaddr);
3963 		goto not_found;
3964 	}
3965 
3966 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3967 	       &ev->bdaddr);
3968 
3969 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3970 	if (conn) {
3971 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3972 
3973 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3974 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3975 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3976 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3977 			goto not_found;
3978 		}
3979 
3980 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3981 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3982 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3983 			BT_DBG("%s ignoring key unauthenticated for high security",
3984 			       hdev->name);
3985 			goto not_found;
3986 		}
3987 
3988 		conn_set_key(conn, key->type, key->pin_len);
3989 	}
3990 
3991 	bacpy(&cp.bdaddr, &ev->bdaddr);
3992 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3993 
3994 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3995 
3996 	hci_dev_unlock(hdev);
3997 
3998 	return;
3999 
4000 not_found:
4001 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4002 	hci_dev_unlock(hdev);
4003 }
4004 
4005 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4006 {
4007 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
4008 	struct hci_conn *conn;
4009 	struct link_key *key;
4010 	bool persistent;
4011 	u8 pin_len = 0;
4012 
4013 	BT_DBG("%s", hdev->name);
4014 
4015 	hci_dev_lock(hdev);
4016 
4017 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4018 	if (!conn)
4019 		goto unlock;
4020 
4021 	hci_conn_hold(conn);
4022 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4023 	hci_conn_drop(conn);
4024 
4025 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4026 	conn_set_key(conn, ev->key_type, conn->pin_length);
4027 
4028 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4029 		goto unlock;
4030 
4031 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4032 			        ev->key_type, pin_len, &persistent);
4033 	if (!key)
4034 		goto unlock;
4035 
4036 	/* Update connection information since adding the key will have
4037 	 * fixed up the type in the case of changed combination keys.
4038 	 */
4039 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4040 		conn_set_key(conn, key->type, key->pin_len);
4041 
4042 	mgmt_new_link_key(hdev, key, persistent);
4043 
4044 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4045 	 * is set. If it's not set simply remove the key from the kernel
4046 	 * list (we've still notified user space about it but with
4047 	 * store_hint being 0).
4048 	 */
4049 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4050 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4051 		list_del_rcu(&key->list);
4052 		kfree_rcu(key, rcu);
4053 		goto unlock;
4054 	}
4055 
4056 	if (persistent)
4057 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4058 	else
4059 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4060 
4061 unlock:
4062 	hci_dev_unlock(hdev);
4063 }
4064 
4065 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4066 {
4067 	struct hci_ev_clock_offset *ev = (void *) skb->data;
4068 	struct hci_conn *conn;
4069 
4070 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4071 
4072 	hci_dev_lock(hdev);
4073 
4074 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4075 	if (conn && !ev->status) {
4076 		struct inquiry_entry *ie;
4077 
4078 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4079 		if (ie) {
4080 			ie->data.clock_offset = ev->clock_offset;
4081 			ie->timestamp = jiffies;
4082 		}
4083 	}
4084 
4085 	hci_dev_unlock(hdev);
4086 }
4087 
4088 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4089 {
4090 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4091 	struct hci_conn *conn;
4092 
4093 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4094 
4095 	hci_dev_lock(hdev);
4096 
4097 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4098 	if (conn && !ev->status)
4099 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4100 
4101 	hci_dev_unlock(hdev);
4102 }
4103 
4104 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4105 {
4106 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4107 	struct inquiry_entry *ie;
4108 
4109 	BT_DBG("%s", hdev->name);
4110 
4111 	hci_dev_lock(hdev);
4112 
4113 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4114 	if (ie) {
4115 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4116 		ie->timestamp = jiffies;
4117 	}
4118 
4119 	hci_dev_unlock(hdev);
4120 }
4121 
4122 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4123 					     struct sk_buff *skb)
4124 {
4125 	struct inquiry_data data;
4126 	int num_rsp = *((__u8 *) skb->data);
4127 
4128 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4129 
4130 	if (!num_rsp)
4131 		return;
4132 
4133 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4134 		return;
4135 
4136 	hci_dev_lock(hdev);
4137 
4138 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4139 		struct inquiry_info_with_rssi_and_pscan_mode *info;
4140 		info = (void *) (skb->data + 1);
4141 
4142 		for (; num_rsp; num_rsp--, info++) {
4143 			u32 flags;
4144 
4145 			bacpy(&data.bdaddr, &info->bdaddr);
4146 			data.pscan_rep_mode	= info->pscan_rep_mode;
4147 			data.pscan_period_mode	= info->pscan_period_mode;
4148 			data.pscan_mode		= info->pscan_mode;
4149 			memcpy(data.dev_class, info->dev_class, 3);
4150 			data.clock_offset	= info->clock_offset;
4151 			data.rssi		= info->rssi;
4152 			data.ssp_mode		= 0x00;
4153 
4154 			flags = hci_inquiry_cache_update(hdev, &data, false);
4155 
4156 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4157 					  info->dev_class, info->rssi,
4158 					  flags, NULL, 0, NULL, 0);
4159 		}
4160 	} else {
4161 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4162 
4163 		for (; num_rsp; num_rsp--, info++) {
4164 			u32 flags;
4165 
4166 			bacpy(&data.bdaddr, &info->bdaddr);
4167 			data.pscan_rep_mode	= info->pscan_rep_mode;
4168 			data.pscan_period_mode	= info->pscan_period_mode;
4169 			data.pscan_mode		= 0x00;
4170 			memcpy(data.dev_class, info->dev_class, 3);
4171 			data.clock_offset	= info->clock_offset;
4172 			data.rssi		= info->rssi;
4173 			data.ssp_mode		= 0x00;
4174 
4175 			flags = hci_inquiry_cache_update(hdev, &data, false);
4176 
4177 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4178 					  info->dev_class, info->rssi,
4179 					  flags, NULL, 0, NULL, 0);
4180 		}
4181 	}
4182 
4183 	hci_dev_unlock(hdev);
4184 }
4185 
4186 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4187 					struct sk_buff *skb)
4188 {
4189 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4190 	struct hci_conn *conn;
4191 
4192 	BT_DBG("%s", hdev->name);
4193 
4194 	hci_dev_lock(hdev);
4195 
4196 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4197 	if (!conn)
4198 		goto unlock;
4199 
4200 	if (ev->page < HCI_MAX_PAGES)
4201 		memcpy(conn->features[ev->page], ev->features, 8);
4202 
4203 	if (!ev->status && ev->page == 0x01) {
4204 		struct inquiry_entry *ie;
4205 
4206 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4207 		if (ie)
4208 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4209 
4210 		if (ev->features[0] & LMP_HOST_SSP) {
4211 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4212 		} else {
4213 			/* It is mandatory by the Bluetooth specification that
4214 			 * Extended Inquiry Results are only used when Secure
4215 			 * Simple Pairing is enabled, but some devices violate
4216 			 * this.
4217 			 *
4218 			 * To make these devices work, the internal SSP
4219 			 * enabled flag needs to be cleared if the remote host
4220 			 * features do not indicate SSP support */
4221 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4222 		}
4223 
4224 		if (ev->features[0] & LMP_HOST_SC)
4225 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4226 	}
4227 
4228 	if (conn->state != BT_CONFIG)
4229 		goto unlock;
4230 
4231 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4232 		struct hci_cp_remote_name_req cp;
4233 		memset(&cp, 0, sizeof(cp));
4234 		bacpy(&cp.bdaddr, &conn->dst);
4235 		cp.pscan_rep_mode = 0x02;
4236 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4237 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4238 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4239 
4240 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4241 		conn->state = BT_CONNECTED;
4242 		hci_connect_cfm(conn, ev->status);
4243 		hci_conn_drop(conn);
4244 	}
4245 
4246 unlock:
4247 	hci_dev_unlock(hdev);
4248 }
4249 
4250 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4251 				       struct sk_buff *skb)
4252 {
4253 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4254 	struct hci_conn *conn;
4255 
4256 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4257 
4258 	hci_dev_lock(hdev);
4259 
4260 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4261 	if (!conn) {
4262 		if (ev->link_type == ESCO_LINK)
4263 			goto unlock;
4264 
4265 		/* When the link type in the event indicates SCO connection
4266 		 * and lookup of the connection object fails, then check
4267 		 * if an eSCO connection object exists.
4268 		 *
4269 		 * The core limits the synchronous connections to either
4270 		 * SCO or eSCO. The eSCO connection is preferred and tried
4271 		 * to be setup first and until successfully established,
4272 		 * the link type will be hinted as eSCO.
4273 		 */
4274 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4275 		if (!conn)
4276 			goto unlock;
4277 	}
4278 
4279 	switch (ev->status) {
4280 	case 0x00:
4281 		conn->handle = __le16_to_cpu(ev->handle);
4282 		conn->state  = BT_CONNECTED;
4283 		conn->type   = ev->link_type;
4284 
4285 		hci_debugfs_create_conn(conn);
4286 		hci_conn_add_sysfs(conn);
4287 		break;
4288 
4289 	case 0x10:	/* Connection Accept Timeout */
4290 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4291 	case 0x11:	/* Unsupported Feature or Parameter Value */
4292 	case 0x1c:	/* SCO interval rejected */
4293 	case 0x1a:	/* Unsupported Remote Feature */
4294 	case 0x1f:	/* Unspecified error */
4295 	case 0x20:	/* Unsupported LMP Parameter value */
4296 		if (conn->out) {
4297 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4298 					(hdev->esco_type & EDR_ESCO_MASK);
4299 			if (hci_setup_sync(conn, conn->link->handle))
4300 				goto unlock;
4301 		}
4302 		/* fall through */
4303 
4304 	default:
4305 		conn->state = BT_CLOSED;
4306 		break;
4307 	}
4308 
4309 	hci_connect_cfm(conn, ev->status);
4310 	if (ev->status)
4311 		hci_conn_del(conn);
4312 
4313 unlock:
4314 	hci_dev_unlock(hdev);
4315 }
4316 
4317 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4318 {
4319 	size_t parsed = 0;
4320 
4321 	while (parsed < eir_len) {
4322 		u8 field_len = eir[0];
4323 
4324 		if (field_len == 0)
4325 			return parsed;
4326 
4327 		parsed += field_len + 1;
4328 		eir += field_len + 1;
4329 	}
4330 
4331 	return eir_len;
4332 }
4333 
4334 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4335 					    struct sk_buff *skb)
4336 {
4337 	struct inquiry_data data;
4338 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
4339 	int num_rsp = *((__u8 *) skb->data);
4340 	size_t eir_len;
4341 
4342 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4343 
4344 	if (!num_rsp)
4345 		return;
4346 
4347 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4348 		return;
4349 
4350 	hci_dev_lock(hdev);
4351 
4352 	for (; num_rsp; num_rsp--, info++) {
4353 		u32 flags;
4354 		bool name_known;
4355 
4356 		bacpy(&data.bdaddr, &info->bdaddr);
4357 		data.pscan_rep_mode	= info->pscan_rep_mode;
4358 		data.pscan_period_mode	= info->pscan_period_mode;
4359 		data.pscan_mode		= 0x00;
4360 		memcpy(data.dev_class, info->dev_class, 3);
4361 		data.clock_offset	= info->clock_offset;
4362 		data.rssi		= info->rssi;
4363 		data.ssp_mode		= 0x01;
4364 
4365 		if (hci_dev_test_flag(hdev, HCI_MGMT))
4366 			name_known = eir_get_data(info->data,
4367 						  sizeof(info->data),
4368 						  EIR_NAME_COMPLETE, NULL);
4369 		else
4370 			name_known = true;
4371 
4372 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
4373 
4374 		eir_len = eir_get_length(info->data, sizeof(info->data));
4375 
4376 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4377 				  info->dev_class, info->rssi,
4378 				  flags, info->data, eir_len, NULL, 0);
4379 	}
4380 
4381 	hci_dev_unlock(hdev);
4382 }
4383 
4384 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4385 					 struct sk_buff *skb)
4386 {
4387 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4388 	struct hci_conn *conn;
4389 
4390 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4391 	       __le16_to_cpu(ev->handle));
4392 
4393 	hci_dev_lock(hdev);
4394 
4395 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4396 	if (!conn)
4397 		goto unlock;
4398 
4399 	/* For BR/EDR the necessary steps are taken through the
4400 	 * auth_complete event.
4401 	 */
4402 	if (conn->type != LE_LINK)
4403 		goto unlock;
4404 
4405 	if (!ev->status)
4406 		conn->sec_level = conn->pending_sec_level;
4407 
4408 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4409 
4410 	if (ev->status && conn->state == BT_CONNECTED) {
4411 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4412 		hci_conn_drop(conn);
4413 		goto unlock;
4414 	}
4415 
4416 	if (conn->state == BT_CONFIG) {
4417 		if (!ev->status)
4418 			conn->state = BT_CONNECTED;
4419 
4420 		hci_connect_cfm(conn, ev->status);
4421 		hci_conn_drop(conn);
4422 	} else {
4423 		hci_auth_cfm(conn, ev->status);
4424 
4425 		hci_conn_hold(conn);
4426 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4427 		hci_conn_drop(conn);
4428 	}
4429 
4430 unlock:
4431 	hci_dev_unlock(hdev);
4432 }
4433 
4434 static u8 hci_get_auth_req(struct hci_conn *conn)
4435 {
4436 	/* If remote requests no-bonding follow that lead */
4437 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
4438 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4439 		return conn->remote_auth | (conn->auth_type & 0x01);
4440 
4441 	/* If both remote and local have enough IO capabilities, require
4442 	 * MITM protection
4443 	 */
4444 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4445 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4446 		return conn->remote_auth | 0x01;
4447 
4448 	/* No MITM protection possible so ignore remote requirement */
4449 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4450 }
4451 
4452 static u8 bredr_oob_data_present(struct hci_conn *conn)
4453 {
4454 	struct hci_dev *hdev = conn->hdev;
4455 	struct oob_data *data;
4456 
4457 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4458 	if (!data)
4459 		return 0x00;
4460 
4461 	if (bredr_sc_enabled(hdev)) {
4462 		/* When Secure Connections is enabled, then just
4463 		 * return the present value stored with the OOB
4464 		 * data. The stored value contains the right present
4465 		 * information. However it can only be trusted when
4466 		 * not in Secure Connection Only mode.
4467 		 */
4468 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4469 			return data->present;
4470 
4471 		/* When Secure Connections Only mode is enabled, then
4472 		 * the P-256 values are required. If they are not
4473 		 * available, then do not declare that OOB data is
4474 		 * present.
4475 		 */
4476 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4477 		    !memcmp(data->hash256, ZERO_KEY, 16))
4478 			return 0x00;
4479 
4480 		return 0x02;
4481 	}
4482 
4483 	/* When Secure Connections is not enabled or actually
4484 	 * not supported by the hardware, then check that if
4485 	 * P-192 data values are present.
4486 	 */
4487 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4488 	    !memcmp(data->hash192, ZERO_KEY, 16))
4489 		return 0x00;
4490 
4491 	return 0x01;
4492 }
4493 
4494 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4495 {
4496 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
4497 	struct hci_conn *conn;
4498 
4499 	BT_DBG("%s", hdev->name);
4500 
4501 	hci_dev_lock(hdev);
4502 
4503 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4504 	if (!conn)
4505 		goto unlock;
4506 
4507 	hci_conn_hold(conn);
4508 
4509 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4510 		goto unlock;
4511 
4512 	/* Allow pairing if we're pairable, the initiators of the
4513 	 * pairing or if the remote is not requesting bonding.
4514 	 */
4515 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4516 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4517 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4518 		struct hci_cp_io_capability_reply cp;
4519 
4520 		bacpy(&cp.bdaddr, &ev->bdaddr);
4521 		/* Change the IO capability from KeyboardDisplay
4522 		 * to DisplayYesNo as it is not supported by BT spec. */
4523 		cp.capability = (conn->io_capability == 0x04) ?
4524 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4525 
4526 		/* If we are initiators, there is no remote information yet */
4527 		if (conn->remote_auth == 0xff) {
4528 			/* Request MITM protection if our IO caps allow it
4529 			 * except for the no-bonding case.
4530 			 */
4531 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4532 			    conn->auth_type != HCI_AT_NO_BONDING)
4533 				conn->auth_type |= 0x01;
4534 		} else {
4535 			conn->auth_type = hci_get_auth_req(conn);
4536 		}
4537 
4538 		/* If we're not bondable, force one of the non-bondable
4539 		 * authentication requirement values.
4540 		 */
4541 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4542 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4543 
4544 		cp.authentication = conn->auth_type;
4545 		cp.oob_data = bredr_oob_data_present(conn);
4546 
4547 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4548 			     sizeof(cp), &cp);
4549 	} else {
4550 		struct hci_cp_io_capability_neg_reply cp;
4551 
4552 		bacpy(&cp.bdaddr, &ev->bdaddr);
4553 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4554 
4555 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4556 			     sizeof(cp), &cp);
4557 	}
4558 
4559 unlock:
4560 	hci_dev_unlock(hdev);
4561 }
4562 
4563 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4564 {
4565 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4566 	struct hci_conn *conn;
4567 
4568 	BT_DBG("%s", hdev->name);
4569 
4570 	hci_dev_lock(hdev);
4571 
4572 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4573 	if (!conn)
4574 		goto unlock;
4575 
4576 	conn->remote_cap = ev->capability;
4577 	conn->remote_auth = ev->authentication;
4578 
4579 unlock:
4580 	hci_dev_unlock(hdev);
4581 }
4582 
4583 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4584 					 struct sk_buff *skb)
4585 {
4586 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4587 	int loc_mitm, rem_mitm, confirm_hint = 0;
4588 	struct hci_conn *conn;
4589 
4590 	BT_DBG("%s", hdev->name);
4591 
4592 	hci_dev_lock(hdev);
4593 
4594 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4595 		goto unlock;
4596 
4597 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4598 	if (!conn)
4599 		goto unlock;
4600 
4601 	loc_mitm = (conn->auth_type & 0x01);
4602 	rem_mitm = (conn->remote_auth & 0x01);
4603 
4604 	/* If we require MITM but the remote device can't provide that
4605 	 * (it has NoInputNoOutput) then reject the confirmation
4606 	 * request. We check the security level here since it doesn't
4607 	 * necessarily match conn->auth_type.
4608 	 */
4609 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4610 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4611 		BT_DBG("Rejecting request: remote device can't provide MITM");
4612 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4613 			     sizeof(ev->bdaddr), &ev->bdaddr);
4614 		goto unlock;
4615 	}
4616 
4617 	/* If no side requires MITM protection; auto-accept */
4618 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4619 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4620 
4621 		/* If we're not the initiators request authorization to
4622 		 * proceed from user space (mgmt_user_confirm with
4623 		 * confirm_hint set to 1). The exception is if neither
4624 		 * side had MITM or if the local IO capability is
4625 		 * NoInputNoOutput, in which case we do auto-accept
4626 		 */
4627 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4628 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4629 		    (loc_mitm || rem_mitm)) {
4630 			BT_DBG("Confirming auto-accept as acceptor");
4631 			confirm_hint = 1;
4632 			goto confirm;
4633 		}
4634 
4635 		/* If there already exists link key in local host, leave the
4636 		 * decision to user space since the remote device could be
4637 		 * legitimate or malicious.
4638 		 */
4639 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
4640 			bt_dev_dbg(hdev, "Local host already has link key");
4641 			confirm_hint = 1;
4642 			goto confirm;
4643 		}
4644 
4645 		BT_DBG("Auto-accept of user confirmation with %ums delay",
4646 		       hdev->auto_accept_delay);
4647 
4648 		if (hdev->auto_accept_delay > 0) {
4649 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4650 			queue_delayed_work(conn->hdev->workqueue,
4651 					   &conn->auto_accept_work, delay);
4652 			goto unlock;
4653 		}
4654 
4655 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4656 			     sizeof(ev->bdaddr), &ev->bdaddr);
4657 		goto unlock;
4658 	}
4659 
4660 confirm:
4661 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4662 				  le32_to_cpu(ev->passkey), confirm_hint);
4663 
4664 unlock:
4665 	hci_dev_unlock(hdev);
4666 }
4667 
4668 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4669 					 struct sk_buff *skb)
4670 {
4671 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4672 
4673 	BT_DBG("%s", hdev->name);
4674 
4675 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4676 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4677 }
4678 
4679 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4680 					struct sk_buff *skb)
4681 {
4682 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4683 	struct hci_conn *conn;
4684 
4685 	BT_DBG("%s", hdev->name);
4686 
4687 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4688 	if (!conn)
4689 		return;
4690 
4691 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4692 	conn->passkey_entered = 0;
4693 
4694 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4695 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4696 					 conn->dst_type, conn->passkey_notify,
4697 					 conn->passkey_entered);
4698 }
4699 
4700 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4701 {
4702 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4703 	struct hci_conn *conn;
4704 
4705 	BT_DBG("%s", hdev->name);
4706 
4707 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4708 	if (!conn)
4709 		return;
4710 
4711 	switch (ev->type) {
4712 	case HCI_KEYPRESS_STARTED:
4713 		conn->passkey_entered = 0;
4714 		return;
4715 
4716 	case HCI_KEYPRESS_ENTERED:
4717 		conn->passkey_entered++;
4718 		break;
4719 
4720 	case HCI_KEYPRESS_ERASED:
4721 		conn->passkey_entered--;
4722 		break;
4723 
4724 	case HCI_KEYPRESS_CLEARED:
4725 		conn->passkey_entered = 0;
4726 		break;
4727 
4728 	case HCI_KEYPRESS_COMPLETED:
4729 		return;
4730 	}
4731 
4732 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4733 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4734 					 conn->dst_type, conn->passkey_notify,
4735 					 conn->passkey_entered);
4736 }
4737 
4738 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4739 					 struct sk_buff *skb)
4740 {
4741 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4742 	struct hci_conn *conn;
4743 
4744 	BT_DBG("%s", hdev->name);
4745 
4746 	hci_dev_lock(hdev);
4747 
4748 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4749 	if (!conn)
4750 		goto unlock;
4751 
4752 	/* Reset the authentication requirement to unknown */
4753 	conn->remote_auth = 0xff;
4754 
4755 	/* To avoid duplicate auth_failed events to user space we check
4756 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4757 	 * initiated the authentication. A traditional auth_complete
4758 	 * event gets always produced as initiator and is also mapped to
4759 	 * the mgmt_auth_failed event */
4760 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4761 		mgmt_auth_failed(conn, ev->status);
4762 
4763 	hci_conn_drop(conn);
4764 
4765 unlock:
4766 	hci_dev_unlock(hdev);
4767 }
4768 
4769 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4770 					 struct sk_buff *skb)
4771 {
4772 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4773 	struct inquiry_entry *ie;
4774 	struct hci_conn *conn;
4775 
4776 	BT_DBG("%s", hdev->name);
4777 
4778 	hci_dev_lock(hdev);
4779 
4780 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4781 	if (conn)
4782 		memcpy(conn->features[1], ev->features, 8);
4783 
4784 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4785 	if (ie)
4786 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4787 
4788 	hci_dev_unlock(hdev);
4789 }
4790 
4791 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4792 					    struct sk_buff *skb)
4793 {
4794 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4795 	struct oob_data *data;
4796 
4797 	BT_DBG("%s", hdev->name);
4798 
4799 	hci_dev_lock(hdev);
4800 
4801 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4802 		goto unlock;
4803 
4804 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4805 	if (!data) {
4806 		struct hci_cp_remote_oob_data_neg_reply cp;
4807 
4808 		bacpy(&cp.bdaddr, &ev->bdaddr);
4809 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4810 			     sizeof(cp), &cp);
4811 		goto unlock;
4812 	}
4813 
4814 	if (bredr_sc_enabled(hdev)) {
4815 		struct hci_cp_remote_oob_ext_data_reply cp;
4816 
4817 		bacpy(&cp.bdaddr, &ev->bdaddr);
4818 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4819 			memset(cp.hash192, 0, sizeof(cp.hash192));
4820 			memset(cp.rand192, 0, sizeof(cp.rand192));
4821 		} else {
4822 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4823 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4824 		}
4825 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4826 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4827 
4828 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4829 			     sizeof(cp), &cp);
4830 	} else {
4831 		struct hci_cp_remote_oob_data_reply cp;
4832 
4833 		bacpy(&cp.bdaddr, &ev->bdaddr);
4834 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4835 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4836 
4837 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4838 			     sizeof(cp), &cp);
4839 	}
4840 
4841 unlock:
4842 	hci_dev_unlock(hdev);
4843 }
4844 
4845 #if IS_ENABLED(CONFIG_BT_HS)
4846 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4847 {
4848 	struct hci_ev_channel_selected *ev = (void *)skb->data;
4849 	struct hci_conn *hcon;
4850 
4851 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4852 
4853 	skb_pull(skb, sizeof(*ev));
4854 
4855 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4856 	if (!hcon)
4857 		return;
4858 
4859 	amp_read_loc_assoc_final_data(hdev, hcon);
4860 }
4861 
4862 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4863 				      struct sk_buff *skb)
4864 {
4865 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4866 	struct hci_conn *hcon, *bredr_hcon;
4867 
4868 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4869 	       ev->status);
4870 
4871 	hci_dev_lock(hdev);
4872 
4873 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4874 	if (!hcon) {
4875 		hci_dev_unlock(hdev);
4876 		return;
4877 	}
4878 
4879 	if (ev->status) {
4880 		hci_conn_del(hcon);
4881 		hci_dev_unlock(hdev);
4882 		return;
4883 	}
4884 
4885 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4886 
4887 	hcon->state = BT_CONNECTED;
4888 	bacpy(&hcon->dst, &bredr_hcon->dst);
4889 
4890 	hci_conn_hold(hcon);
4891 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4892 	hci_conn_drop(hcon);
4893 
4894 	hci_debugfs_create_conn(hcon);
4895 	hci_conn_add_sysfs(hcon);
4896 
4897 	amp_physical_cfm(bredr_hcon, hcon);
4898 
4899 	hci_dev_unlock(hdev);
4900 }
4901 
4902 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4903 {
4904 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4905 	struct hci_conn *hcon;
4906 	struct hci_chan *hchan;
4907 	struct amp_mgr *mgr;
4908 
4909 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4910 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4911 	       ev->status);
4912 
4913 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4914 	if (!hcon)
4915 		return;
4916 
4917 	/* Create AMP hchan */
4918 	hchan = hci_chan_create(hcon);
4919 	if (!hchan)
4920 		return;
4921 
4922 	hchan->handle = le16_to_cpu(ev->handle);
4923 
4924 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4925 
4926 	mgr = hcon->amp_mgr;
4927 	if (mgr && mgr->bredr_chan) {
4928 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4929 
4930 		l2cap_chan_lock(bredr_chan);
4931 
4932 		bredr_chan->conn->mtu = hdev->block_mtu;
4933 		l2cap_logical_cfm(bredr_chan, hchan, 0);
4934 		hci_conn_hold(hcon);
4935 
4936 		l2cap_chan_unlock(bredr_chan);
4937 	}
4938 }
4939 
4940 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4941 					     struct sk_buff *skb)
4942 {
4943 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4944 	struct hci_chan *hchan;
4945 
4946 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4947 	       le16_to_cpu(ev->handle), ev->status);
4948 
4949 	if (ev->status)
4950 		return;
4951 
4952 	hci_dev_lock(hdev);
4953 
4954 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4955 	if (!hchan)
4956 		goto unlock;
4957 
4958 	amp_destroy_logical_link(hchan, ev->reason);
4959 
4960 unlock:
4961 	hci_dev_unlock(hdev);
4962 }
4963 
4964 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4965 					     struct sk_buff *skb)
4966 {
4967 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4968 	struct hci_conn *hcon;
4969 
4970 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4971 
4972 	if (ev->status)
4973 		return;
4974 
4975 	hci_dev_lock(hdev);
4976 
4977 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4978 	if (hcon) {
4979 		hcon->state = BT_CLOSED;
4980 		hci_conn_del(hcon);
4981 	}
4982 
4983 	hci_dev_unlock(hdev);
4984 }
4985 #endif
4986 
4987 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4988 			bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4989 			u16 interval, u16 latency, u16 supervision_timeout)
4990 {
4991 	struct hci_conn_params *params;
4992 	struct hci_conn *conn;
4993 	struct smp_irk *irk;
4994 	u8 addr_type;
4995 
4996 	hci_dev_lock(hdev);
4997 
4998 	/* All controllers implicitly stop advertising in the event of a
4999 	 * connection, so ensure that the state bit is cleared.
5000 	 */
5001 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5002 
5003 	conn = hci_lookup_le_connect(hdev);
5004 	if (!conn) {
5005 		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5006 		if (!conn) {
5007 			bt_dev_err(hdev, "no memory for new connection");
5008 			goto unlock;
5009 		}
5010 
5011 		conn->dst_type = bdaddr_type;
5012 
5013 		/* If we didn't have a hci_conn object previously
5014 		 * but we're in master role this must be something
5015 		 * initiated using a white list. Since white list based
5016 		 * connections are not "first class citizens" we don't
5017 		 * have full tracking of them. Therefore, we go ahead
5018 		 * with a "best effort" approach of determining the
5019 		 * initiator address based on the HCI_PRIVACY flag.
5020 		 */
5021 		if (conn->out) {
5022 			conn->resp_addr_type = bdaddr_type;
5023 			bacpy(&conn->resp_addr, bdaddr);
5024 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5025 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5026 				bacpy(&conn->init_addr, &hdev->rpa);
5027 			} else {
5028 				hci_copy_identity_address(hdev,
5029 							  &conn->init_addr,
5030 							  &conn->init_addr_type);
5031 			}
5032 		}
5033 	} else {
5034 		cancel_delayed_work(&conn->le_conn_timeout);
5035 	}
5036 
5037 	if (!conn->out) {
5038 		/* Set the responder (our side) address type based on
5039 		 * the advertising address type.
5040 		 */
5041 		conn->resp_addr_type = hdev->adv_addr_type;
5042 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5043 			/* In case of ext adv, resp_addr will be updated in
5044 			 * Adv Terminated event.
5045 			 */
5046 			if (!ext_adv_capable(hdev))
5047 				bacpy(&conn->resp_addr, &hdev->random_addr);
5048 		} else {
5049 			bacpy(&conn->resp_addr, &hdev->bdaddr);
5050 		}
5051 
5052 		conn->init_addr_type = bdaddr_type;
5053 		bacpy(&conn->init_addr, bdaddr);
5054 
5055 		/* For incoming connections, set the default minimum
5056 		 * and maximum connection interval. They will be used
5057 		 * to check if the parameters are in range and if not
5058 		 * trigger the connection update procedure.
5059 		 */
5060 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
5061 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
5062 	}
5063 
5064 	/* Lookup the identity address from the stored connection
5065 	 * address and address type.
5066 	 *
5067 	 * When establishing connections to an identity address, the
5068 	 * connection procedure will store the resolvable random
5069 	 * address first. Now if it can be converted back into the
5070 	 * identity address, start using the identity address from
5071 	 * now on.
5072 	 */
5073 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5074 	if (irk) {
5075 		bacpy(&conn->dst, &irk->bdaddr);
5076 		conn->dst_type = irk->addr_type;
5077 	}
5078 
5079 	if (status) {
5080 		hci_le_conn_failed(conn, status);
5081 		goto unlock;
5082 	}
5083 
5084 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5085 		addr_type = BDADDR_LE_PUBLIC;
5086 	else
5087 		addr_type = BDADDR_LE_RANDOM;
5088 
5089 	/* Drop the connection if the device is blocked */
5090 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5091 		hci_conn_drop(conn);
5092 		goto unlock;
5093 	}
5094 
5095 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5096 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
5097 
5098 	conn->sec_level = BT_SECURITY_LOW;
5099 	conn->handle = handle;
5100 	conn->state = BT_CONFIG;
5101 
5102 	conn->le_conn_interval = interval;
5103 	conn->le_conn_latency = latency;
5104 	conn->le_supv_timeout = supervision_timeout;
5105 
5106 	hci_debugfs_create_conn(conn);
5107 	hci_conn_add_sysfs(conn);
5108 
5109 	/* The remote features procedure is defined for master
5110 	 * role only. So only in case of an initiated connection
5111 	 * request the remote features.
5112 	 *
5113 	 * If the local controller supports slave-initiated features
5114 	 * exchange, then requesting the remote features in slave
5115 	 * role is possible. Otherwise just transition into the
5116 	 * connected state without requesting the remote features.
5117 	 */
5118 	if (conn->out ||
5119 	    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5120 		struct hci_cp_le_read_remote_features cp;
5121 
5122 		cp.handle = __cpu_to_le16(conn->handle);
5123 
5124 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5125 			     sizeof(cp), &cp);
5126 
5127 		hci_conn_hold(conn);
5128 	} else {
5129 		conn->state = BT_CONNECTED;
5130 		hci_connect_cfm(conn, status);
5131 	}
5132 
5133 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5134 					   conn->dst_type);
5135 	if (params) {
5136 		list_del_init(&params->action);
5137 		if (params->conn) {
5138 			hci_conn_drop(params->conn);
5139 			hci_conn_put(params->conn);
5140 			params->conn = NULL;
5141 		}
5142 	}
5143 
5144 unlock:
5145 	hci_update_background_scan(hdev);
5146 	hci_dev_unlock(hdev);
5147 }
5148 
5149 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5150 {
5151 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5152 
5153 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5154 
5155 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5156 			     ev->role, le16_to_cpu(ev->handle),
5157 			     le16_to_cpu(ev->interval),
5158 			     le16_to_cpu(ev->latency),
5159 			     le16_to_cpu(ev->supervision_timeout));
5160 }
5161 
5162 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5163 					 struct sk_buff *skb)
5164 {
5165 	struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5166 
5167 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5168 
5169 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5170 			     ev->role, le16_to_cpu(ev->handle),
5171 			     le16_to_cpu(ev->interval),
5172 			     le16_to_cpu(ev->latency),
5173 			     le16_to_cpu(ev->supervision_timeout));
5174 }
5175 
5176 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5177 {
5178 	struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5179 	struct hci_conn *conn;
5180 
5181 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5182 
5183 	if (ev->status)
5184 		return;
5185 
5186 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5187 	if (conn) {
5188 		struct adv_info *adv_instance;
5189 
5190 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5191 			return;
5192 
5193 		if (!hdev->cur_adv_instance) {
5194 			bacpy(&conn->resp_addr, &hdev->random_addr);
5195 			return;
5196 		}
5197 
5198 		adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5199 		if (adv_instance)
5200 			bacpy(&conn->resp_addr, &adv_instance->random_addr);
5201 	}
5202 }
5203 
5204 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5205 					    struct sk_buff *skb)
5206 {
5207 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5208 	struct hci_conn *conn;
5209 
5210 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5211 
5212 	if (ev->status)
5213 		return;
5214 
5215 	hci_dev_lock(hdev);
5216 
5217 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5218 	if (conn) {
5219 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5220 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5221 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5222 	}
5223 
5224 	hci_dev_unlock(hdev);
5225 }
5226 
5227 /* This function requires the caller holds hdev->lock */
5228 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5229 					      bdaddr_t *addr,
5230 					      u8 addr_type, u8 adv_type,
5231 					      bdaddr_t *direct_rpa)
5232 {
5233 	struct hci_conn *conn;
5234 	struct hci_conn_params *params;
5235 
5236 	/* If the event is not connectable don't proceed further */
5237 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5238 		return NULL;
5239 
5240 	/* Ignore if the device is blocked */
5241 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5242 		return NULL;
5243 
5244 	/* Most controller will fail if we try to create new connections
5245 	 * while we have an existing one in slave role.
5246 	 */
5247 	if (hdev->conn_hash.le_num_slave > 0)
5248 		return NULL;
5249 
5250 	/* If we're not connectable only connect devices that we have in
5251 	 * our pend_le_conns list.
5252 	 */
5253 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5254 					   addr_type);
5255 	if (!params)
5256 		return NULL;
5257 
5258 	if (!params->explicit_connect) {
5259 		switch (params->auto_connect) {
5260 		case HCI_AUTO_CONN_DIRECT:
5261 			/* Only devices advertising with ADV_DIRECT_IND are
5262 			 * triggering a connection attempt. This is allowing
5263 			 * incoming connections from slave devices.
5264 			 */
5265 			if (adv_type != LE_ADV_DIRECT_IND)
5266 				return NULL;
5267 			break;
5268 		case HCI_AUTO_CONN_ALWAYS:
5269 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5270 			 * are triggering a connection attempt. This means
5271 			 * that incoming connectioms from slave device are
5272 			 * accepted and also outgoing connections to slave
5273 			 * devices are established when found.
5274 			 */
5275 			break;
5276 		default:
5277 			return NULL;
5278 		}
5279 	}
5280 
5281 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5282 			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5283 			      direct_rpa);
5284 	if (!IS_ERR(conn)) {
5285 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5286 		 * by higher layer that tried to connect, if no then
5287 		 * store the pointer since we don't really have any
5288 		 * other owner of the object besides the params that
5289 		 * triggered it. This way we can abort the connection if
5290 		 * the parameters get removed and keep the reference
5291 		 * count consistent once the connection is established.
5292 		 */
5293 
5294 		if (!params->explicit_connect)
5295 			params->conn = hci_conn_get(conn);
5296 
5297 		return conn;
5298 	}
5299 
5300 	switch (PTR_ERR(conn)) {
5301 	case -EBUSY:
5302 		/* If hci_connect() returns -EBUSY it means there is already
5303 		 * an LE connection attempt going on. Since controllers don't
5304 		 * support more than one connection attempt at the time, we
5305 		 * don't consider this an error case.
5306 		 */
5307 		break;
5308 	default:
5309 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5310 		return NULL;
5311 	}
5312 
5313 	return NULL;
5314 }
5315 
5316 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5317 			       u8 bdaddr_type, bdaddr_t *direct_addr,
5318 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5319 {
5320 	struct discovery_state *d = &hdev->discovery;
5321 	struct smp_irk *irk;
5322 	struct hci_conn *conn;
5323 	bool match;
5324 	u32 flags;
5325 	u8 *ptr, real_len;
5326 
5327 	switch (type) {
5328 	case LE_ADV_IND:
5329 	case LE_ADV_DIRECT_IND:
5330 	case LE_ADV_SCAN_IND:
5331 	case LE_ADV_NONCONN_IND:
5332 	case LE_ADV_SCAN_RSP:
5333 		break;
5334 	default:
5335 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5336 				       "type: 0x%02x", type);
5337 		return;
5338 	}
5339 
5340 	/* Find the end of the data in case the report contains padded zero
5341 	 * bytes at the end causing an invalid length value.
5342 	 *
5343 	 * When data is NULL, len is 0 so there is no need for extra ptr
5344 	 * check as 'ptr < data + 0' is already false in such case.
5345 	 */
5346 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5347 		if (ptr + 1 + *ptr > data + len)
5348 			break;
5349 	}
5350 
5351 	real_len = ptr - data;
5352 
5353 	/* Adjust for actual length */
5354 	if (len != real_len) {
5355 		bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5356 		len = real_len;
5357 	}
5358 
5359 	/* If the direct address is present, then this report is from
5360 	 * a LE Direct Advertising Report event. In that case it is
5361 	 * important to see if the address is matching the local
5362 	 * controller address.
5363 	 */
5364 	if (direct_addr) {
5365 		/* Only resolvable random addresses are valid for these
5366 		 * kind of reports and others can be ignored.
5367 		 */
5368 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5369 			return;
5370 
5371 		/* If the controller is not using resolvable random
5372 		 * addresses, then this report can be ignored.
5373 		 */
5374 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5375 			return;
5376 
5377 		/* If the local IRK of the controller does not match
5378 		 * with the resolvable random address provided, then
5379 		 * this report can be ignored.
5380 		 */
5381 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5382 			return;
5383 	}
5384 
5385 	/* Check if we need to convert to identity address */
5386 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5387 	if (irk) {
5388 		bdaddr = &irk->bdaddr;
5389 		bdaddr_type = irk->addr_type;
5390 	}
5391 
5392 	/* Check if we have been requested to connect to this device.
5393 	 *
5394 	 * direct_addr is set only for directed advertising reports (it is NULL
5395 	 * for advertising reports) and is already verified to be RPA above.
5396 	 */
5397 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5398 								direct_addr);
5399 	if (conn && type == LE_ADV_IND) {
5400 		/* Store report for later inclusion by
5401 		 * mgmt_device_connected
5402 		 */
5403 		memcpy(conn->le_adv_data, data, len);
5404 		conn->le_adv_data_len = len;
5405 	}
5406 
5407 	/* Passive scanning shouldn't trigger any device found events,
5408 	 * except for devices marked as CONN_REPORT for which we do send
5409 	 * device found events.
5410 	 */
5411 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5412 		if (type == LE_ADV_DIRECT_IND)
5413 			return;
5414 
5415 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5416 					       bdaddr, bdaddr_type))
5417 			return;
5418 
5419 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5420 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5421 		else
5422 			flags = 0;
5423 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5424 				  rssi, flags, data, len, NULL, 0);
5425 		return;
5426 	}
5427 
5428 	/* When receiving non-connectable or scannable undirected
5429 	 * advertising reports, this means that the remote device is
5430 	 * not connectable and then clearly indicate this in the
5431 	 * device found event.
5432 	 *
5433 	 * When receiving a scan response, then there is no way to
5434 	 * know if the remote device is connectable or not. However
5435 	 * since scan responses are merged with a previously seen
5436 	 * advertising report, the flags field from that report
5437 	 * will be used.
5438 	 *
5439 	 * In the really unlikely case that a controller get confused
5440 	 * and just sends a scan response event, then it is marked as
5441 	 * not connectable as well.
5442 	 */
5443 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5444 	    type == LE_ADV_SCAN_RSP)
5445 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5446 	else
5447 		flags = 0;
5448 
5449 	/* If there's nothing pending either store the data from this
5450 	 * event or send an immediate device found event if the data
5451 	 * should not be stored for later.
5452 	 */
5453 	if (!has_pending_adv_report(hdev)) {
5454 		/* If the report will trigger a SCAN_REQ store it for
5455 		 * later merging.
5456 		 */
5457 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5458 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5459 						 rssi, flags, data, len);
5460 			return;
5461 		}
5462 
5463 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5464 				  rssi, flags, data, len, NULL, 0);
5465 		return;
5466 	}
5467 
5468 	/* Check if the pending report is for the same device as the new one */
5469 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5470 		 bdaddr_type == d->last_adv_addr_type);
5471 
5472 	/* If the pending data doesn't match this report or this isn't a
5473 	 * scan response (e.g. we got a duplicate ADV_IND) then force
5474 	 * sending of the pending data.
5475 	 */
5476 	if (type != LE_ADV_SCAN_RSP || !match) {
5477 		/* Send out whatever is in the cache, but skip duplicates */
5478 		if (!match)
5479 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5480 					  d->last_adv_addr_type, NULL,
5481 					  d->last_adv_rssi, d->last_adv_flags,
5482 					  d->last_adv_data,
5483 					  d->last_adv_data_len, NULL, 0);
5484 
5485 		/* If the new report will trigger a SCAN_REQ store it for
5486 		 * later merging.
5487 		 */
5488 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5489 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5490 						 rssi, flags, data, len);
5491 			return;
5492 		}
5493 
5494 		/* The advertising reports cannot be merged, so clear
5495 		 * the pending report and send out a device found event.
5496 		 */
5497 		clear_pending_adv_report(hdev);
5498 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5499 				  rssi, flags, data, len, NULL, 0);
5500 		return;
5501 	}
5502 
5503 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5504 	 * the new event is a SCAN_RSP. We can therefore proceed with
5505 	 * sending a merged device found event.
5506 	 */
5507 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5508 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5509 			  d->last_adv_data, d->last_adv_data_len, data, len);
5510 	clear_pending_adv_report(hdev);
5511 }
5512 
5513 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5514 {
5515 	u8 num_reports = skb->data[0];
5516 	void *ptr = &skb->data[1];
5517 
5518 	hci_dev_lock(hdev);
5519 
5520 	while (num_reports--) {
5521 		struct hci_ev_le_advertising_info *ev = ptr;
5522 		s8 rssi;
5523 
5524 		if (ev->length <= HCI_MAX_AD_LENGTH) {
5525 			rssi = ev->data[ev->length];
5526 			process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5527 					   ev->bdaddr_type, NULL, 0, rssi,
5528 					   ev->data, ev->length);
5529 		} else {
5530 			bt_dev_err(hdev, "Dropping invalid advertising data");
5531 		}
5532 
5533 		ptr += sizeof(*ev) + ev->length + 1;
5534 	}
5535 
5536 	hci_dev_unlock(hdev);
5537 }
5538 
5539 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5540 {
5541 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5542 		switch (evt_type) {
5543 		case LE_LEGACY_ADV_IND:
5544 			return LE_ADV_IND;
5545 		case LE_LEGACY_ADV_DIRECT_IND:
5546 			return LE_ADV_DIRECT_IND;
5547 		case LE_LEGACY_ADV_SCAN_IND:
5548 			return LE_ADV_SCAN_IND;
5549 		case LE_LEGACY_NONCONN_IND:
5550 			return LE_ADV_NONCONN_IND;
5551 		case LE_LEGACY_SCAN_RSP_ADV:
5552 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5553 			return LE_ADV_SCAN_RSP;
5554 		}
5555 
5556 		goto invalid;
5557 	}
5558 
5559 	if (evt_type & LE_EXT_ADV_CONN_IND) {
5560 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
5561 			return LE_ADV_DIRECT_IND;
5562 
5563 		return LE_ADV_IND;
5564 	}
5565 
5566 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
5567 		return LE_ADV_SCAN_RSP;
5568 
5569 	if (evt_type & LE_EXT_ADV_SCAN_IND)
5570 		return LE_ADV_SCAN_IND;
5571 
5572 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5573 	    evt_type & LE_EXT_ADV_DIRECT_IND)
5574 		return LE_ADV_NONCONN_IND;
5575 
5576 invalid:
5577 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5578 			       evt_type);
5579 
5580 	return LE_ADV_INVALID;
5581 }
5582 
5583 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5584 {
5585 	u8 num_reports = skb->data[0];
5586 	void *ptr = &skb->data[1];
5587 
5588 	hci_dev_lock(hdev);
5589 
5590 	while (num_reports--) {
5591 		struct hci_ev_le_ext_adv_report *ev = ptr;
5592 		u8 legacy_evt_type;
5593 		u16 evt_type;
5594 
5595 		evt_type = __le16_to_cpu(ev->evt_type);
5596 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5597 		if (legacy_evt_type != LE_ADV_INVALID) {
5598 			process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5599 					   ev->bdaddr_type, NULL, 0, ev->rssi,
5600 					   ev->data, ev->length);
5601 		}
5602 
5603 		ptr += sizeof(*ev) + ev->length;
5604 	}
5605 
5606 	hci_dev_unlock(hdev);
5607 }
5608 
5609 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5610 					    struct sk_buff *skb)
5611 {
5612 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5613 	struct hci_conn *conn;
5614 
5615 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5616 
5617 	hci_dev_lock(hdev);
5618 
5619 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5620 	if (conn) {
5621 		if (!ev->status)
5622 			memcpy(conn->features[0], ev->features, 8);
5623 
5624 		if (conn->state == BT_CONFIG) {
5625 			__u8 status;
5626 
5627 			/* If the local controller supports slave-initiated
5628 			 * features exchange, but the remote controller does
5629 			 * not, then it is possible that the error code 0x1a
5630 			 * for unsupported remote feature gets returned.
5631 			 *
5632 			 * In this specific case, allow the connection to
5633 			 * transition into connected state and mark it as
5634 			 * successful.
5635 			 */
5636 			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5637 			    !conn->out && ev->status == 0x1a)
5638 				status = 0x00;
5639 			else
5640 				status = ev->status;
5641 
5642 			conn->state = BT_CONNECTED;
5643 			hci_connect_cfm(conn, status);
5644 			hci_conn_drop(conn);
5645 		}
5646 	}
5647 
5648 	hci_dev_unlock(hdev);
5649 }
5650 
5651 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5652 {
5653 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5654 	struct hci_cp_le_ltk_reply cp;
5655 	struct hci_cp_le_ltk_neg_reply neg;
5656 	struct hci_conn *conn;
5657 	struct smp_ltk *ltk;
5658 
5659 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5660 
5661 	hci_dev_lock(hdev);
5662 
5663 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5664 	if (conn == NULL)
5665 		goto not_found;
5666 
5667 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5668 	if (!ltk)
5669 		goto not_found;
5670 
5671 	if (smp_ltk_is_sc(ltk)) {
5672 		/* With SC both EDiv and Rand are set to zero */
5673 		if (ev->ediv || ev->rand)
5674 			goto not_found;
5675 	} else {
5676 		/* For non-SC keys check that EDiv and Rand match */
5677 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5678 			goto not_found;
5679 	}
5680 
5681 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5682 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5683 	cp.handle = cpu_to_le16(conn->handle);
5684 
5685 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5686 
5687 	conn->enc_key_size = ltk->enc_size;
5688 
5689 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5690 
5691 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5692 	 * temporary key used to encrypt a connection following
5693 	 * pairing. It is used during the Encrypted Session Setup to
5694 	 * distribute the keys. Later, security can be re-established
5695 	 * using a distributed LTK.
5696 	 */
5697 	if (ltk->type == SMP_STK) {
5698 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5699 		list_del_rcu(&ltk->list);
5700 		kfree_rcu(ltk, rcu);
5701 	} else {
5702 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5703 	}
5704 
5705 	hci_dev_unlock(hdev);
5706 
5707 	return;
5708 
5709 not_found:
5710 	neg.handle = ev->handle;
5711 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5712 	hci_dev_unlock(hdev);
5713 }
5714 
5715 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5716 				      u8 reason)
5717 {
5718 	struct hci_cp_le_conn_param_req_neg_reply cp;
5719 
5720 	cp.handle = cpu_to_le16(handle);
5721 	cp.reason = reason;
5722 
5723 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5724 		     &cp);
5725 }
5726 
5727 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5728 					     struct sk_buff *skb)
5729 {
5730 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5731 	struct hci_cp_le_conn_param_req_reply cp;
5732 	struct hci_conn *hcon;
5733 	u16 handle, min, max, latency, timeout;
5734 
5735 	handle = le16_to_cpu(ev->handle);
5736 	min = le16_to_cpu(ev->interval_min);
5737 	max = le16_to_cpu(ev->interval_max);
5738 	latency = le16_to_cpu(ev->latency);
5739 	timeout = le16_to_cpu(ev->timeout);
5740 
5741 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5742 	if (!hcon || hcon->state != BT_CONNECTED)
5743 		return send_conn_param_neg_reply(hdev, handle,
5744 						 HCI_ERROR_UNKNOWN_CONN_ID);
5745 
5746 	if (hci_check_conn_params(min, max, latency, timeout))
5747 		return send_conn_param_neg_reply(hdev, handle,
5748 						 HCI_ERROR_INVALID_LL_PARAMS);
5749 
5750 	if (hcon->role == HCI_ROLE_MASTER) {
5751 		struct hci_conn_params *params;
5752 		u8 store_hint;
5753 
5754 		hci_dev_lock(hdev);
5755 
5756 		params = hci_conn_params_lookup(hdev, &hcon->dst,
5757 						hcon->dst_type);
5758 		if (params) {
5759 			params->conn_min_interval = min;
5760 			params->conn_max_interval = max;
5761 			params->conn_latency = latency;
5762 			params->supervision_timeout = timeout;
5763 			store_hint = 0x01;
5764 		} else{
5765 			store_hint = 0x00;
5766 		}
5767 
5768 		hci_dev_unlock(hdev);
5769 
5770 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5771 				    store_hint, min, max, latency, timeout);
5772 	}
5773 
5774 	cp.handle = ev->handle;
5775 	cp.interval_min = ev->interval_min;
5776 	cp.interval_max = ev->interval_max;
5777 	cp.latency = ev->latency;
5778 	cp.timeout = ev->timeout;
5779 	cp.min_ce_len = 0;
5780 	cp.max_ce_len = 0;
5781 
5782 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5783 }
5784 
5785 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5786 					 struct sk_buff *skb)
5787 {
5788 	u8 num_reports = skb->data[0];
5789 	void *ptr = &skb->data[1];
5790 
5791 	hci_dev_lock(hdev);
5792 
5793 	while (num_reports--) {
5794 		struct hci_ev_le_direct_adv_info *ev = ptr;
5795 
5796 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5797 				   ev->bdaddr_type, &ev->direct_addr,
5798 				   ev->direct_addr_type, ev->rssi, NULL, 0);
5799 
5800 		ptr += sizeof(*ev);
5801 	}
5802 
5803 	hci_dev_unlock(hdev);
5804 }
5805 
5806 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5807 {
5808 	struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5809 	struct hci_conn *conn;
5810 
5811 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5812 
5813 	if (!ev->status)
5814 		return;
5815 
5816 	hci_dev_lock(hdev);
5817 
5818 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5819 	if (!conn)
5820 		goto unlock;
5821 
5822 	conn->le_tx_phy = ev->tx_phy;
5823 	conn->le_rx_phy = ev->rx_phy;
5824 
5825 unlock:
5826 	hci_dev_unlock(hdev);
5827 }
5828 
5829 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5830 {
5831 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
5832 
5833 	skb_pull(skb, sizeof(*le_ev));
5834 
5835 	switch (le_ev->subevent) {
5836 	case HCI_EV_LE_CONN_COMPLETE:
5837 		hci_le_conn_complete_evt(hdev, skb);
5838 		break;
5839 
5840 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5841 		hci_le_conn_update_complete_evt(hdev, skb);
5842 		break;
5843 
5844 	case HCI_EV_LE_ADVERTISING_REPORT:
5845 		hci_le_adv_report_evt(hdev, skb);
5846 		break;
5847 
5848 	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5849 		hci_le_remote_feat_complete_evt(hdev, skb);
5850 		break;
5851 
5852 	case HCI_EV_LE_LTK_REQ:
5853 		hci_le_ltk_request_evt(hdev, skb);
5854 		break;
5855 
5856 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5857 		hci_le_remote_conn_param_req_evt(hdev, skb);
5858 		break;
5859 
5860 	case HCI_EV_LE_DIRECT_ADV_REPORT:
5861 		hci_le_direct_adv_report_evt(hdev, skb);
5862 		break;
5863 
5864 	case HCI_EV_LE_PHY_UPDATE_COMPLETE:
5865 		hci_le_phy_update_evt(hdev, skb);
5866 		break;
5867 
5868 	case HCI_EV_LE_EXT_ADV_REPORT:
5869 		hci_le_ext_adv_report_evt(hdev, skb);
5870 		break;
5871 
5872 	case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5873 		hci_le_enh_conn_complete_evt(hdev, skb);
5874 		break;
5875 
5876 	case HCI_EV_LE_EXT_ADV_SET_TERM:
5877 		hci_le_ext_adv_term_evt(hdev, skb);
5878 		break;
5879 
5880 	default:
5881 		break;
5882 	}
5883 }
5884 
5885 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5886 				 u8 event, struct sk_buff *skb)
5887 {
5888 	struct hci_ev_cmd_complete *ev;
5889 	struct hci_event_hdr *hdr;
5890 
5891 	if (!skb)
5892 		return false;
5893 
5894 	if (skb->len < sizeof(*hdr)) {
5895 		bt_dev_err(hdev, "too short HCI event");
5896 		return false;
5897 	}
5898 
5899 	hdr = (void *) skb->data;
5900 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5901 
5902 	if (event) {
5903 		if (hdr->evt != event)
5904 			return false;
5905 		return true;
5906 	}
5907 
5908 	/* Check if request ended in Command Status - no way to retreive
5909 	 * any extra parameters in this case.
5910 	 */
5911 	if (hdr->evt == HCI_EV_CMD_STATUS)
5912 		return false;
5913 
5914 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5915 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5916 			   hdr->evt);
5917 		return false;
5918 	}
5919 
5920 	if (skb->len < sizeof(*ev)) {
5921 		bt_dev_err(hdev, "too short cmd_complete event");
5922 		return false;
5923 	}
5924 
5925 	ev = (void *) skb->data;
5926 	skb_pull(skb, sizeof(*ev));
5927 
5928 	if (opcode != __le16_to_cpu(ev->opcode)) {
5929 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5930 		       __le16_to_cpu(ev->opcode));
5931 		return false;
5932 	}
5933 
5934 	return true;
5935 }
5936 
5937 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5938 {
5939 	struct hci_event_hdr *hdr = (void *) skb->data;
5940 	hci_req_complete_t req_complete = NULL;
5941 	hci_req_complete_skb_t req_complete_skb = NULL;
5942 	struct sk_buff *orig_skb = NULL;
5943 	u8 status = 0, event = hdr->evt, req_evt = 0;
5944 	u16 opcode = HCI_OP_NOP;
5945 
5946 	if (!event) {
5947 		bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5948 		goto done;
5949 	}
5950 
5951 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5952 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5953 		opcode = __le16_to_cpu(cmd_hdr->opcode);
5954 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5955 				     &req_complete_skb);
5956 		req_evt = event;
5957 	}
5958 
5959 	/* If it looks like we might end up having to call
5960 	 * req_complete_skb, store a pristine copy of the skb since the
5961 	 * various handlers may modify the original one through
5962 	 * skb_pull() calls, etc.
5963 	 */
5964 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5965 	    event == HCI_EV_CMD_COMPLETE)
5966 		orig_skb = skb_clone(skb, GFP_KERNEL);
5967 
5968 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5969 
5970 	switch (event) {
5971 	case HCI_EV_INQUIRY_COMPLETE:
5972 		hci_inquiry_complete_evt(hdev, skb);
5973 		break;
5974 
5975 	case HCI_EV_INQUIRY_RESULT:
5976 		hci_inquiry_result_evt(hdev, skb);
5977 		break;
5978 
5979 	case HCI_EV_CONN_COMPLETE:
5980 		hci_conn_complete_evt(hdev, skb);
5981 		break;
5982 
5983 	case HCI_EV_CONN_REQUEST:
5984 		hci_conn_request_evt(hdev, skb);
5985 		break;
5986 
5987 	case HCI_EV_DISCONN_COMPLETE:
5988 		hci_disconn_complete_evt(hdev, skb);
5989 		break;
5990 
5991 	case HCI_EV_AUTH_COMPLETE:
5992 		hci_auth_complete_evt(hdev, skb);
5993 		break;
5994 
5995 	case HCI_EV_REMOTE_NAME:
5996 		hci_remote_name_evt(hdev, skb);
5997 		break;
5998 
5999 	case HCI_EV_ENCRYPT_CHANGE:
6000 		hci_encrypt_change_evt(hdev, skb);
6001 		break;
6002 
6003 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6004 		hci_change_link_key_complete_evt(hdev, skb);
6005 		break;
6006 
6007 	case HCI_EV_REMOTE_FEATURES:
6008 		hci_remote_features_evt(hdev, skb);
6009 		break;
6010 
6011 	case HCI_EV_CMD_COMPLETE:
6012 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6013 				     &req_complete, &req_complete_skb);
6014 		break;
6015 
6016 	case HCI_EV_CMD_STATUS:
6017 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6018 				   &req_complete_skb);
6019 		break;
6020 
6021 	case HCI_EV_HARDWARE_ERROR:
6022 		hci_hardware_error_evt(hdev, skb);
6023 		break;
6024 
6025 	case HCI_EV_ROLE_CHANGE:
6026 		hci_role_change_evt(hdev, skb);
6027 		break;
6028 
6029 	case HCI_EV_NUM_COMP_PKTS:
6030 		hci_num_comp_pkts_evt(hdev, skb);
6031 		break;
6032 
6033 	case HCI_EV_MODE_CHANGE:
6034 		hci_mode_change_evt(hdev, skb);
6035 		break;
6036 
6037 	case HCI_EV_PIN_CODE_REQ:
6038 		hci_pin_code_request_evt(hdev, skb);
6039 		break;
6040 
6041 	case HCI_EV_LINK_KEY_REQ:
6042 		hci_link_key_request_evt(hdev, skb);
6043 		break;
6044 
6045 	case HCI_EV_LINK_KEY_NOTIFY:
6046 		hci_link_key_notify_evt(hdev, skb);
6047 		break;
6048 
6049 	case HCI_EV_CLOCK_OFFSET:
6050 		hci_clock_offset_evt(hdev, skb);
6051 		break;
6052 
6053 	case HCI_EV_PKT_TYPE_CHANGE:
6054 		hci_pkt_type_change_evt(hdev, skb);
6055 		break;
6056 
6057 	case HCI_EV_PSCAN_REP_MODE:
6058 		hci_pscan_rep_mode_evt(hdev, skb);
6059 		break;
6060 
6061 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6062 		hci_inquiry_result_with_rssi_evt(hdev, skb);
6063 		break;
6064 
6065 	case HCI_EV_REMOTE_EXT_FEATURES:
6066 		hci_remote_ext_features_evt(hdev, skb);
6067 		break;
6068 
6069 	case HCI_EV_SYNC_CONN_COMPLETE:
6070 		hci_sync_conn_complete_evt(hdev, skb);
6071 		break;
6072 
6073 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
6074 		hci_extended_inquiry_result_evt(hdev, skb);
6075 		break;
6076 
6077 	case HCI_EV_KEY_REFRESH_COMPLETE:
6078 		hci_key_refresh_complete_evt(hdev, skb);
6079 		break;
6080 
6081 	case HCI_EV_IO_CAPA_REQUEST:
6082 		hci_io_capa_request_evt(hdev, skb);
6083 		break;
6084 
6085 	case HCI_EV_IO_CAPA_REPLY:
6086 		hci_io_capa_reply_evt(hdev, skb);
6087 		break;
6088 
6089 	case HCI_EV_USER_CONFIRM_REQUEST:
6090 		hci_user_confirm_request_evt(hdev, skb);
6091 		break;
6092 
6093 	case HCI_EV_USER_PASSKEY_REQUEST:
6094 		hci_user_passkey_request_evt(hdev, skb);
6095 		break;
6096 
6097 	case HCI_EV_USER_PASSKEY_NOTIFY:
6098 		hci_user_passkey_notify_evt(hdev, skb);
6099 		break;
6100 
6101 	case HCI_EV_KEYPRESS_NOTIFY:
6102 		hci_keypress_notify_evt(hdev, skb);
6103 		break;
6104 
6105 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
6106 		hci_simple_pair_complete_evt(hdev, skb);
6107 		break;
6108 
6109 	case HCI_EV_REMOTE_HOST_FEATURES:
6110 		hci_remote_host_features_evt(hdev, skb);
6111 		break;
6112 
6113 	case HCI_EV_LE_META:
6114 		hci_le_meta_evt(hdev, skb);
6115 		break;
6116 
6117 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6118 		hci_remote_oob_data_request_evt(hdev, skb);
6119 		break;
6120 
6121 #if IS_ENABLED(CONFIG_BT_HS)
6122 	case HCI_EV_CHANNEL_SELECTED:
6123 		hci_chan_selected_evt(hdev, skb);
6124 		break;
6125 
6126 	case HCI_EV_PHY_LINK_COMPLETE:
6127 		hci_phy_link_complete_evt(hdev, skb);
6128 		break;
6129 
6130 	case HCI_EV_LOGICAL_LINK_COMPLETE:
6131 		hci_loglink_complete_evt(hdev, skb);
6132 		break;
6133 
6134 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6135 		hci_disconn_loglink_complete_evt(hdev, skb);
6136 		break;
6137 
6138 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6139 		hci_disconn_phylink_complete_evt(hdev, skb);
6140 		break;
6141 #endif
6142 
6143 	case HCI_EV_NUM_COMP_BLOCKS:
6144 		hci_num_comp_blocks_evt(hdev, skb);
6145 		break;
6146 
6147 	default:
6148 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
6149 		break;
6150 	}
6151 
6152 	if (req_complete) {
6153 		req_complete(hdev, status, opcode);
6154 	} else if (req_complete_skb) {
6155 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6156 			kfree_skb(orig_skb);
6157 			orig_skb = NULL;
6158 		}
6159 		req_complete_skb(hdev, status, opcode, orig_skb);
6160 	}
6161 
6162 done:
6163 	kfree_skb(orig_skb);
6164 	kfree_skb(skb);
6165 	hdev->stat.evt_rx++;
6166 }
6167