xref: /linux/net/bluetooth/hci_event.c (revision 957e3facd147510f2cf8780e38606f1d707f0e33)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36 
37 /* Handle HCI Event packets */
38 
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 	__u8 status = *((__u8 *) skb->data);
42 
43 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
44 
45 	if (status)
46 		return;
47 
48 	clear_bit(HCI_INQUIRY, &hdev->flags);
49 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
51 
52 	hci_dev_lock(hdev);
53 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 	hci_dev_unlock(hdev);
55 
56 	hci_conn_check_pending(hdev);
57 }
58 
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 	__u8 status = *((__u8 *) skb->data);
62 
63 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
64 
65 	if (status)
66 		return;
67 
68 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70 
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 	__u8 status = *((__u8 *) skb->data);
74 
75 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
76 
77 	if (status)
78 		return;
79 
80 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81 
82 	hci_conn_check_pending(hdev);
83 }
84 
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 					  struct sk_buff *skb)
87 {
88 	BT_DBG("%s", hdev->name);
89 }
90 
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 	struct hci_rp_role_discovery *rp = (void *) skb->data;
94 	struct hci_conn *conn;
95 
96 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97 
98 	if (rp->status)
99 		return;
100 
101 	hci_dev_lock(hdev);
102 
103 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 	if (conn)
105 		conn->role = rp->role;
106 
107 	hci_dev_unlock(hdev);
108 }
109 
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 	struct hci_conn *conn;
114 
115 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116 
117 	if (rp->status)
118 		return;
119 
120 	hci_dev_lock(hdev);
121 
122 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 	if (conn)
124 		conn->link_policy = __le16_to_cpu(rp->policy);
125 
126 	hci_dev_unlock(hdev);
127 }
128 
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 	struct hci_conn *conn;
133 	void *sent;
134 
135 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136 
137 	if (rp->status)
138 		return;
139 
140 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 	if (!sent)
142 		return;
143 
144 	hci_dev_lock(hdev);
145 
146 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 	if (conn)
148 		conn->link_policy = get_unaligned_le16(sent + 2);
149 
150 	hci_dev_unlock(hdev);
151 }
152 
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 					struct sk_buff *skb)
155 {
156 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157 
158 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159 
160 	if (rp->status)
161 		return;
162 
163 	hdev->link_policy = __le16_to_cpu(rp->policy);
164 }
165 
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 					 struct sk_buff *skb)
168 {
169 	__u8 status = *((__u8 *) skb->data);
170 	void *sent;
171 
172 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
173 
174 	if (status)
175 		return;
176 
177 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 	if (!sent)
179 		return;
180 
181 	hdev->link_policy = get_unaligned_le16(sent);
182 }
183 
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 	__u8 status = *((__u8 *) skb->data);
187 
188 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
189 
190 	clear_bit(HCI_RESET, &hdev->flags);
191 
192 	if (status)
193 		return;
194 
195 	/* Reset all non-persistent flags */
196 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
197 
198 	hdev->discovery.state = DISCOVERY_STOPPED;
199 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
200 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
201 
202 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
203 	hdev->adv_data_len = 0;
204 
205 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
206 	hdev->scan_rsp_data_len = 0;
207 
208 	hdev->le_scan_type = LE_SCAN_PASSIVE;
209 
210 	hdev->ssp_debug_mode = 0;
211 
212 	hci_bdaddr_list_clear(&hdev->le_white_list);
213 }
214 
215 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216 {
217 	__u8 status = *((__u8 *) skb->data);
218 	void *sent;
219 
220 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
221 
222 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 	if (!sent)
224 		return;
225 
226 	hci_dev_lock(hdev);
227 
228 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
229 		mgmt_set_local_name_complete(hdev, sent, status);
230 	else if (!status)
231 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
232 
233 	hci_dev_unlock(hdev);
234 }
235 
236 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 	struct hci_rp_read_local_name *rp = (void *) skb->data;
239 
240 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
241 
242 	if (rp->status)
243 		return;
244 
245 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
246 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
247 }
248 
249 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
250 {
251 	__u8 status = *((__u8 *) skb->data);
252 	void *sent;
253 
254 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
255 
256 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
257 	if (!sent)
258 		return;
259 
260 	if (!status) {
261 		__u8 param = *((__u8 *) sent);
262 
263 		if (param == AUTH_ENABLED)
264 			set_bit(HCI_AUTH, &hdev->flags);
265 		else
266 			clear_bit(HCI_AUTH, &hdev->flags);
267 	}
268 
269 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
270 		mgmt_auth_enable_complete(hdev, status);
271 }
272 
273 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
274 {
275 	__u8 status = *((__u8 *) skb->data);
276 	__u8 param;
277 	void *sent;
278 
279 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
280 
281 	if (status)
282 		return;
283 
284 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 	if (!sent)
286 		return;
287 
288 	param = *((__u8 *) sent);
289 
290 	if (param)
291 		set_bit(HCI_ENCRYPT, &hdev->flags);
292 	else
293 		clear_bit(HCI_ENCRYPT, &hdev->flags);
294 }
295 
296 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 	__u8 status = *((__u8 *) skb->data);
299 	__u8 param;
300 	void *sent;
301 
302 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
303 
304 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
305 	if (!sent)
306 		return;
307 
308 	param = *((__u8 *) sent);
309 
310 	hci_dev_lock(hdev);
311 
312 	if (status) {
313 		hdev->discov_timeout = 0;
314 		goto done;
315 	}
316 
317 	if (param & SCAN_INQUIRY)
318 		set_bit(HCI_ISCAN, &hdev->flags);
319 	else
320 		clear_bit(HCI_ISCAN, &hdev->flags);
321 
322 	if (param & SCAN_PAGE)
323 		set_bit(HCI_PSCAN, &hdev->flags);
324 	else
325 		clear_bit(HCI_PSCAN, &hdev->flags);
326 
327 done:
328 	hci_dev_unlock(hdev);
329 }
330 
331 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332 {
333 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334 
335 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
336 
337 	if (rp->status)
338 		return;
339 
340 	memcpy(hdev->dev_class, rp->dev_class, 3);
341 
342 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
344 }
345 
346 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347 {
348 	__u8 status = *((__u8 *) skb->data);
349 	void *sent;
350 
351 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
352 
353 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
354 	if (!sent)
355 		return;
356 
357 	hci_dev_lock(hdev);
358 
359 	if (status == 0)
360 		memcpy(hdev->dev_class, sent, 3);
361 
362 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
363 		mgmt_set_class_of_dev_complete(hdev, sent, status);
364 
365 	hci_dev_unlock(hdev);
366 }
367 
368 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
369 {
370 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
371 	__u16 setting;
372 
373 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
374 
375 	if (rp->status)
376 		return;
377 
378 	setting = __le16_to_cpu(rp->voice_setting);
379 
380 	if (hdev->voice_setting == setting)
381 		return;
382 
383 	hdev->voice_setting = setting;
384 
385 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
386 
387 	if (hdev->notify)
388 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
389 }
390 
391 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
392 				       struct sk_buff *skb)
393 {
394 	__u8 status = *((__u8 *) skb->data);
395 	__u16 setting;
396 	void *sent;
397 
398 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
399 
400 	if (status)
401 		return;
402 
403 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
404 	if (!sent)
405 		return;
406 
407 	setting = get_unaligned_le16(sent);
408 
409 	if (hdev->voice_setting == setting)
410 		return;
411 
412 	hdev->voice_setting = setting;
413 
414 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
415 
416 	if (hdev->notify)
417 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
418 }
419 
420 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
421 					  struct sk_buff *skb)
422 {
423 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
424 
425 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
426 
427 	if (rp->status)
428 		return;
429 
430 	hdev->num_iac = rp->num_iac;
431 
432 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
433 }
434 
435 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
436 {
437 	__u8 status = *((__u8 *) skb->data);
438 	struct hci_cp_write_ssp_mode *sent;
439 
440 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
441 
442 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
443 	if (!sent)
444 		return;
445 
446 	if (!status) {
447 		if (sent->mode)
448 			hdev->features[1][0] |= LMP_HOST_SSP;
449 		else
450 			hdev->features[1][0] &= ~LMP_HOST_SSP;
451 	}
452 
453 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
454 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
455 	else if (!status) {
456 		if (sent->mode)
457 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 		else
459 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
460 	}
461 }
462 
463 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
464 {
465 	u8 status = *((u8 *) skb->data);
466 	struct hci_cp_write_sc_support *sent;
467 
468 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
469 
470 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
471 	if (!sent)
472 		return;
473 
474 	if (!status) {
475 		if (sent->support)
476 			hdev->features[1][0] |= LMP_HOST_SC;
477 		else
478 			hdev->features[1][0] &= ~LMP_HOST_SC;
479 	}
480 
481 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
482 		mgmt_sc_enable_complete(hdev, sent->support, status);
483 	else if (!status) {
484 		if (sent->support)
485 			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
486 		else
487 			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
488 	}
489 }
490 
491 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
492 {
493 	struct hci_rp_read_local_version *rp = (void *) skb->data;
494 
495 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
496 
497 	if (rp->status)
498 		return;
499 
500 	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
501 		hdev->hci_ver = rp->hci_ver;
502 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
503 		hdev->lmp_ver = rp->lmp_ver;
504 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
505 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
506 	}
507 }
508 
509 static void hci_cc_read_local_commands(struct hci_dev *hdev,
510 				       struct sk_buff *skb)
511 {
512 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
513 
514 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
515 
516 	if (rp->status)
517 		return;
518 
519 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
520 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
521 }
522 
523 static void hci_cc_read_local_features(struct hci_dev *hdev,
524 				       struct sk_buff *skb)
525 {
526 	struct hci_rp_read_local_features *rp = (void *) skb->data;
527 
528 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
529 
530 	if (rp->status)
531 		return;
532 
533 	memcpy(hdev->features, rp->features, 8);
534 
535 	/* Adjust default settings according to features
536 	 * supported by device. */
537 
538 	if (hdev->features[0][0] & LMP_3SLOT)
539 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
540 
541 	if (hdev->features[0][0] & LMP_5SLOT)
542 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
543 
544 	if (hdev->features[0][1] & LMP_HV2) {
545 		hdev->pkt_type  |= (HCI_HV2);
546 		hdev->esco_type |= (ESCO_HV2);
547 	}
548 
549 	if (hdev->features[0][1] & LMP_HV3) {
550 		hdev->pkt_type  |= (HCI_HV3);
551 		hdev->esco_type |= (ESCO_HV3);
552 	}
553 
554 	if (lmp_esco_capable(hdev))
555 		hdev->esco_type |= (ESCO_EV3);
556 
557 	if (hdev->features[0][4] & LMP_EV4)
558 		hdev->esco_type |= (ESCO_EV4);
559 
560 	if (hdev->features[0][4] & LMP_EV5)
561 		hdev->esco_type |= (ESCO_EV5);
562 
563 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
564 		hdev->esco_type |= (ESCO_2EV3);
565 
566 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
567 		hdev->esco_type |= (ESCO_3EV3);
568 
569 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
570 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
571 }
572 
573 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
574 					   struct sk_buff *skb)
575 {
576 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
577 
578 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
579 
580 	if (rp->status)
581 		return;
582 
583 	if (hdev->max_page < rp->max_page)
584 		hdev->max_page = rp->max_page;
585 
586 	if (rp->page < HCI_MAX_PAGES)
587 		memcpy(hdev->features[rp->page], rp->features, 8);
588 }
589 
590 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
591 					  struct sk_buff *skb)
592 {
593 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
594 
595 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596 
597 	if (rp->status)
598 		return;
599 
600 	hdev->flow_ctl_mode = rp->mode;
601 }
602 
603 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
604 {
605 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
606 
607 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608 
609 	if (rp->status)
610 		return;
611 
612 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
613 	hdev->sco_mtu  = rp->sco_mtu;
614 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
615 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
616 
617 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
618 		hdev->sco_mtu  = 64;
619 		hdev->sco_pkts = 8;
620 	}
621 
622 	hdev->acl_cnt = hdev->acl_pkts;
623 	hdev->sco_cnt = hdev->sco_pkts;
624 
625 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
626 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
627 }
628 
629 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
630 {
631 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
632 
633 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
634 
635 	if (rp->status)
636 		return;
637 
638 	if (test_bit(HCI_INIT, &hdev->flags))
639 		bacpy(&hdev->bdaddr, &rp->bdaddr);
640 
641 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
642 		bacpy(&hdev->setup_addr, &rp->bdaddr);
643 }
644 
645 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646 					   struct sk_buff *skb)
647 {
648 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649 
650 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651 
652 	if (rp->status)
653 		return;
654 
655 	if (test_bit(HCI_INIT, &hdev->flags)) {
656 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
657 		hdev->page_scan_window = __le16_to_cpu(rp->window);
658 	}
659 }
660 
661 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
662 					    struct sk_buff *skb)
663 {
664 	u8 status = *((u8 *) skb->data);
665 	struct hci_cp_write_page_scan_activity *sent;
666 
667 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
668 
669 	if (status)
670 		return;
671 
672 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
673 	if (!sent)
674 		return;
675 
676 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
677 	hdev->page_scan_window = __le16_to_cpu(sent->window);
678 }
679 
680 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
681 					   struct sk_buff *skb)
682 {
683 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
684 
685 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
686 
687 	if (rp->status)
688 		return;
689 
690 	if (test_bit(HCI_INIT, &hdev->flags))
691 		hdev->page_scan_type = rp->type;
692 }
693 
694 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
695 					struct sk_buff *skb)
696 {
697 	u8 status = *((u8 *) skb->data);
698 	u8 *type;
699 
700 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
701 
702 	if (status)
703 		return;
704 
705 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
706 	if (type)
707 		hdev->page_scan_type = *type;
708 }
709 
710 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
711 					struct sk_buff *skb)
712 {
713 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
714 
715 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716 
717 	if (rp->status)
718 		return;
719 
720 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
721 	hdev->block_len = __le16_to_cpu(rp->block_len);
722 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
723 
724 	hdev->block_cnt = hdev->num_blocks;
725 
726 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
727 	       hdev->block_cnt, hdev->block_len);
728 }
729 
730 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
731 {
732 	struct hci_rp_read_clock *rp = (void *) skb->data;
733 	struct hci_cp_read_clock *cp;
734 	struct hci_conn *conn;
735 
736 	BT_DBG("%s", hdev->name);
737 
738 	if (skb->len < sizeof(*rp))
739 		return;
740 
741 	if (rp->status)
742 		return;
743 
744 	hci_dev_lock(hdev);
745 
746 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
747 	if (!cp)
748 		goto unlock;
749 
750 	if (cp->which == 0x00) {
751 		hdev->clock = le32_to_cpu(rp->clock);
752 		goto unlock;
753 	}
754 
755 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
756 	if (conn) {
757 		conn->clock = le32_to_cpu(rp->clock);
758 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
759 	}
760 
761 unlock:
762 	hci_dev_unlock(hdev);
763 }
764 
765 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
766 				       struct sk_buff *skb)
767 {
768 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
769 
770 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771 
772 	if (rp->status)
773 		goto a2mp_rsp;
774 
775 	hdev->amp_status = rp->amp_status;
776 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
777 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
778 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
779 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
780 	hdev->amp_type = rp->amp_type;
781 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
782 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
783 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
784 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
785 
786 a2mp_rsp:
787 	a2mp_send_getinfo_rsp(hdev);
788 }
789 
790 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
791 					struct sk_buff *skb)
792 {
793 	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
794 	struct amp_assoc *assoc = &hdev->loc_assoc;
795 	size_t rem_len, frag_len;
796 
797 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798 
799 	if (rp->status)
800 		goto a2mp_rsp;
801 
802 	frag_len = skb->len - sizeof(*rp);
803 	rem_len = __le16_to_cpu(rp->rem_len);
804 
805 	if (rem_len > frag_len) {
806 		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
807 
808 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
809 		assoc->offset += frag_len;
810 
811 		/* Read other fragments */
812 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
813 
814 		return;
815 	}
816 
817 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
818 	assoc->len = assoc->offset + rem_len;
819 	assoc->offset = 0;
820 
821 a2mp_rsp:
822 	/* Send A2MP Rsp when all fragments are received */
823 	a2mp_send_getampassoc_rsp(hdev, rp->status);
824 	a2mp_send_create_phy_link_req(hdev, rp->status);
825 }
826 
827 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
828 					 struct sk_buff *skb)
829 {
830 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
831 
832 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
833 
834 	if (rp->status)
835 		return;
836 
837 	hdev->inq_tx_power = rp->tx_power;
838 }
839 
840 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
841 {
842 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
843 	struct hci_cp_pin_code_reply *cp;
844 	struct hci_conn *conn;
845 
846 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
847 
848 	hci_dev_lock(hdev);
849 
850 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
851 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
852 
853 	if (rp->status)
854 		goto unlock;
855 
856 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
857 	if (!cp)
858 		goto unlock;
859 
860 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
861 	if (conn)
862 		conn->pin_length = cp->pin_len;
863 
864 unlock:
865 	hci_dev_unlock(hdev);
866 }
867 
868 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
871 
872 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
873 
874 	hci_dev_lock(hdev);
875 
876 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
877 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
878 						 rp->status);
879 
880 	hci_dev_unlock(hdev);
881 }
882 
883 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
884 				       struct sk_buff *skb)
885 {
886 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
887 
888 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
889 
890 	if (rp->status)
891 		return;
892 
893 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
894 	hdev->le_pkts = rp->le_max_pkt;
895 
896 	hdev->le_cnt = hdev->le_pkts;
897 
898 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
899 }
900 
901 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
902 					  struct sk_buff *skb)
903 {
904 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
905 
906 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907 
908 	if (rp->status)
909 		return;
910 
911 	memcpy(hdev->le_features, rp->features, 8);
912 }
913 
914 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
915 					struct sk_buff *skb)
916 {
917 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
918 
919 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920 
921 	if (rp->status)
922 		return;
923 
924 	hdev->adv_tx_power = rp->tx_power;
925 }
926 
927 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
928 {
929 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
930 
931 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
932 
933 	hci_dev_lock(hdev);
934 
935 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
936 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
937 						 rp->status);
938 
939 	hci_dev_unlock(hdev);
940 }
941 
942 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
943 					  struct sk_buff *skb)
944 {
945 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
946 
947 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948 
949 	hci_dev_lock(hdev);
950 
951 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
953 						     ACL_LINK, 0, rp->status);
954 
955 	hci_dev_unlock(hdev);
956 }
957 
958 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
959 {
960 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
961 
962 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
963 
964 	hci_dev_lock(hdev);
965 
966 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
967 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
968 						 0, rp->status);
969 
970 	hci_dev_unlock(hdev);
971 }
972 
973 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
974 					  struct sk_buff *skb)
975 {
976 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
977 
978 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
979 
980 	hci_dev_lock(hdev);
981 
982 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
983 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
984 						     ACL_LINK, 0, rp->status);
985 
986 	hci_dev_unlock(hdev);
987 }
988 
989 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
990 				       struct sk_buff *skb)
991 {
992 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
993 
994 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
995 
996 	hci_dev_lock(hdev);
997 	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
998 					  rp->status);
999 	hci_dev_unlock(hdev);
1000 }
1001 
1002 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1003 					   struct sk_buff *skb)
1004 {
1005 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1006 
1007 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008 
1009 	hci_dev_lock(hdev);
1010 	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1011 					  rp->hash256, rp->rand256,
1012 					  rp->status);
1013 	hci_dev_unlock(hdev);
1014 }
1015 
1016 
1017 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1018 {
1019 	__u8 status = *((__u8 *) skb->data);
1020 	bdaddr_t *sent;
1021 
1022 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1023 
1024 	if (status)
1025 		return;
1026 
1027 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1028 	if (!sent)
1029 		return;
1030 
1031 	hci_dev_lock(hdev);
1032 
1033 	bacpy(&hdev->random_addr, sent);
1034 
1035 	hci_dev_unlock(hdev);
1036 }
1037 
1038 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1039 {
1040 	__u8 *sent, status = *((__u8 *) skb->data);
1041 
1042 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1043 
1044 	if (status)
1045 		return;
1046 
1047 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1048 	if (!sent)
1049 		return;
1050 
1051 	hci_dev_lock(hdev);
1052 
1053 	/* If we're doing connection initiation as peripheral. Set a
1054 	 * timeout in case something goes wrong.
1055 	 */
1056 	if (*sent) {
1057 		struct hci_conn *conn;
1058 
1059 		set_bit(HCI_LE_ADV, &hdev->dev_flags);
1060 
1061 		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1062 		if (conn)
1063 			queue_delayed_work(hdev->workqueue,
1064 					   &conn->le_conn_timeout,
1065 					   conn->conn_timeout);
1066 	} else {
1067 		clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1068 	}
1069 
1070 	hci_dev_unlock(hdev);
1071 }
1072 
1073 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1074 {
1075 	struct hci_cp_le_set_scan_param *cp;
1076 	__u8 status = *((__u8 *) skb->data);
1077 
1078 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1079 
1080 	if (status)
1081 		return;
1082 
1083 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1084 	if (!cp)
1085 		return;
1086 
1087 	hci_dev_lock(hdev);
1088 
1089 	hdev->le_scan_type = cp->type;
1090 
1091 	hci_dev_unlock(hdev);
1092 }
1093 
1094 static bool has_pending_adv_report(struct hci_dev *hdev)
1095 {
1096 	struct discovery_state *d = &hdev->discovery;
1097 
1098 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1099 }
1100 
1101 static void clear_pending_adv_report(struct hci_dev *hdev)
1102 {
1103 	struct discovery_state *d = &hdev->discovery;
1104 
1105 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1106 	d->last_adv_data_len = 0;
1107 }
1108 
1109 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1110 				     u8 bdaddr_type, s8 rssi, u32 flags,
1111 				     u8 *data, u8 len)
1112 {
1113 	struct discovery_state *d = &hdev->discovery;
1114 
1115 	bacpy(&d->last_adv_addr, bdaddr);
1116 	d->last_adv_addr_type = bdaddr_type;
1117 	d->last_adv_rssi = rssi;
1118 	d->last_adv_flags = flags;
1119 	memcpy(d->last_adv_data, data, len);
1120 	d->last_adv_data_len = len;
1121 }
1122 
1123 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1124 				      struct sk_buff *skb)
1125 {
1126 	struct hci_cp_le_set_scan_enable *cp;
1127 	__u8 status = *((__u8 *) skb->data);
1128 
1129 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1130 
1131 	if (status)
1132 		return;
1133 
1134 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1135 	if (!cp)
1136 		return;
1137 
1138 	switch (cp->enable) {
1139 	case LE_SCAN_ENABLE:
1140 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1141 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1142 			clear_pending_adv_report(hdev);
1143 		break;
1144 
1145 	case LE_SCAN_DISABLE:
1146 		/* We do this here instead of when setting DISCOVERY_STOPPED
1147 		 * since the latter would potentially require waiting for
1148 		 * inquiry to stop too.
1149 		 */
1150 		if (has_pending_adv_report(hdev)) {
1151 			struct discovery_state *d = &hdev->discovery;
1152 
1153 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1154 					  d->last_adv_addr_type, NULL,
1155 					  d->last_adv_rssi, d->last_adv_flags,
1156 					  d->last_adv_data,
1157 					  d->last_adv_data_len, NULL, 0);
1158 		}
1159 
1160 		/* Cancel this timer so that we don't try to disable scanning
1161 		 * when it's already disabled.
1162 		 */
1163 		cancel_delayed_work(&hdev->le_scan_disable);
1164 
1165 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1166 
1167 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1168 		 * interrupted scanning due to a connect request. Mark
1169 		 * therefore discovery as stopped. If this was not
1170 		 * because of a connect request advertising might have
1171 		 * been disabled because of active scanning, so
1172 		 * re-enable it again if necessary.
1173 		 */
1174 		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1175 				       &hdev->dev_flags))
1176 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1177 		else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1178 			 hdev->discovery.state == DISCOVERY_FINDING)
1179 			mgmt_reenable_advertising(hdev);
1180 
1181 		break;
1182 
1183 	default:
1184 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1185 		break;
1186 	}
1187 }
1188 
1189 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1190 					   struct sk_buff *skb)
1191 {
1192 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1193 
1194 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1195 
1196 	if (rp->status)
1197 		return;
1198 
1199 	hdev->le_white_list_size = rp->size;
1200 }
1201 
1202 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1203 				       struct sk_buff *skb)
1204 {
1205 	__u8 status = *((__u8 *) skb->data);
1206 
1207 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1208 
1209 	if (status)
1210 		return;
1211 
1212 	hci_bdaddr_list_clear(&hdev->le_white_list);
1213 }
1214 
1215 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1216 					struct sk_buff *skb)
1217 {
1218 	struct hci_cp_le_add_to_white_list *sent;
1219 	__u8 status = *((__u8 *) skb->data);
1220 
1221 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1222 
1223 	if (status)
1224 		return;
1225 
1226 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1227 	if (!sent)
1228 		return;
1229 
1230 	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1231 			   sent->bdaddr_type);
1232 }
1233 
1234 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1235 					  struct sk_buff *skb)
1236 {
1237 	struct hci_cp_le_del_from_white_list *sent;
1238 	__u8 status = *((__u8 *) skb->data);
1239 
1240 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1241 
1242 	if (status)
1243 		return;
1244 
1245 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1246 	if (!sent)
1247 		return;
1248 
1249 	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1250 			    sent->bdaddr_type);
1251 }
1252 
1253 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1254 					    struct sk_buff *skb)
1255 {
1256 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1257 
1258 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1259 
1260 	if (rp->status)
1261 		return;
1262 
1263 	memcpy(hdev->le_states, rp->le_states, 8);
1264 }
1265 
1266 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1267 					   struct sk_buff *skb)
1268 {
1269 	struct hci_cp_write_le_host_supported *sent;
1270 	__u8 status = *((__u8 *) skb->data);
1271 
1272 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1273 
1274 	if (status)
1275 		return;
1276 
1277 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1278 	if (!sent)
1279 		return;
1280 
1281 	if (sent->le) {
1282 		hdev->features[1][0] |= LMP_HOST_LE;
1283 		set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1284 	} else {
1285 		hdev->features[1][0] &= ~LMP_HOST_LE;
1286 		clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1287 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1288 	}
1289 
1290 	if (sent->simul)
1291 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1292 	else
1293 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1294 }
1295 
1296 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1297 {
1298 	struct hci_cp_le_set_adv_param *cp;
1299 	u8 status = *((u8 *) skb->data);
1300 
1301 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1302 
1303 	if (status)
1304 		return;
1305 
1306 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1307 	if (!cp)
1308 		return;
1309 
1310 	hci_dev_lock(hdev);
1311 	hdev->adv_addr_type = cp->own_address_type;
1312 	hci_dev_unlock(hdev);
1313 }
1314 
1315 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1316 					  struct sk_buff *skb)
1317 {
1318 	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1319 
1320 	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1321 	       hdev->name, rp->status, rp->phy_handle);
1322 
1323 	if (rp->status)
1324 		return;
1325 
1326 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1327 }
1328 
1329 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1330 {
1331 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1332 	struct hci_conn *conn;
1333 
1334 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1335 
1336 	if (rp->status)
1337 		return;
1338 
1339 	hci_dev_lock(hdev);
1340 
1341 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1342 	if (conn)
1343 		conn->rssi = rp->rssi;
1344 
1345 	hci_dev_unlock(hdev);
1346 }
1347 
1348 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1349 {
1350 	struct hci_cp_read_tx_power *sent;
1351 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1352 	struct hci_conn *conn;
1353 
1354 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1355 
1356 	if (rp->status)
1357 		return;
1358 
1359 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1360 	if (!sent)
1361 		return;
1362 
1363 	hci_dev_lock(hdev);
1364 
1365 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1366 	if (!conn)
1367 		goto unlock;
1368 
1369 	switch (sent->type) {
1370 	case 0x00:
1371 		conn->tx_power = rp->tx_power;
1372 		break;
1373 	case 0x01:
1374 		conn->max_tx_power = rp->tx_power;
1375 		break;
1376 	}
1377 
1378 unlock:
1379 	hci_dev_unlock(hdev);
1380 }
1381 
1382 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1383 {
1384 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385 
1386 	if (status) {
1387 		hci_conn_check_pending(hdev);
1388 		return;
1389 	}
1390 
1391 	set_bit(HCI_INQUIRY, &hdev->flags);
1392 }
1393 
1394 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1395 {
1396 	struct hci_cp_create_conn *cp;
1397 	struct hci_conn *conn;
1398 
1399 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400 
1401 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1402 	if (!cp)
1403 		return;
1404 
1405 	hci_dev_lock(hdev);
1406 
1407 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1408 
1409 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1410 
1411 	if (status) {
1412 		if (conn && conn->state == BT_CONNECT) {
1413 			if (status != 0x0c || conn->attempt > 2) {
1414 				conn->state = BT_CLOSED;
1415 				hci_proto_connect_cfm(conn, status);
1416 				hci_conn_del(conn);
1417 			} else
1418 				conn->state = BT_CONNECT2;
1419 		}
1420 	} else {
1421 		if (!conn) {
1422 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1423 					    HCI_ROLE_MASTER);
1424 			if (!conn)
1425 				BT_ERR("No memory for new connection");
1426 		}
1427 	}
1428 
1429 	hci_dev_unlock(hdev);
1430 }
1431 
1432 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1433 {
1434 	struct hci_cp_add_sco *cp;
1435 	struct hci_conn *acl, *sco;
1436 	__u16 handle;
1437 
1438 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1439 
1440 	if (!status)
1441 		return;
1442 
1443 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1444 	if (!cp)
1445 		return;
1446 
1447 	handle = __le16_to_cpu(cp->handle);
1448 
1449 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1450 
1451 	hci_dev_lock(hdev);
1452 
1453 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1454 	if (acl) {
1455 		sco = acl->link;
1456 		if (sco) {
1457 			sco->state = BT_CLOSED;
1458 
1459 			hci_proto_connect_cfm(sco, status);
1460 			hci_conn_del(sco);
1461 		}
1462 	}
1463 
1464 	hci_dev_unlock(hdev);
1465 }
1466 
1467 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1468 {
1469 	struct hci_cp_auth_requested *cp;
1470 	struct hci_conn *conn;
1471 
1472 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1473 
1474 	if (!status)
1475 		return;
1476 
1477 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1478 	if (!cp)
1479 		return;
1480 
1481 	hci_dev_lock(hdev);
1482 
1483 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1484 	if (conn) {
1485 		if (conn->state == BT_CONFIG) {
1486 			hci_proto_connect_cfm(conn, status);
1487 			hci_conn_drop(conn);
1488 		}
1489 	}
1490 
1491 	hci_dev_unlock(hdev);
1492 }
1493 
1494 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1495 {
1496 	struct hci_cp_set_conn_encrypt *cp;
1497 	struct hci_conn *conn;
1498 
1499 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1500 
1501 	if (!status)
1502 		return;
1503 
1504 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1505 	if (!cp)
1506 		return;
1507 
1508 	hci_dev_lock(hdev);
1509 
1510 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1511 	if (conn) {
1512 		if (conn->state == BT_CONFIG) {
1513 			hci_proto_connect_cfm(conn, status);
1514 			hci_conn_drop(conn);
1515 		}
1516 	}
1517 
1518 	hci_dev_unlock(hdev);
1519 }
1520 
1521 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1522 				    struct hci_conn *conn)
1523 {
1524 	if (conn->state != BT_CONFIG || !conn->out)
1525 		return 0;
1526 
1527 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1528 		return 0;
1529 
1530 	/* Only request authentication for SSP connections or non-SSP
1531 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1532 	 * is requested.
1533 	 */
1534 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1535 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1536 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1537 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1538 		return 0;
1539 
1540 	return 1;
1541 }
1542 
1543 static int hci_resolve_name(struct hci_dev *hdev,
1544 				   struct inquiry_entry *e)
1545 {
1546 	struct hci_cp_remote_name_req cp;
1547 
1548 	memset(&cp, 0, sizeof(cp));
1549 
1550 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1551 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1552 	cp.pscan_mode = e->data.pscan_mode;
1553 	cp.clock_offset = e->data.clock_offset;
1554 
1555 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1556 }
1557 
1558 static bool hci_resolve_next_name(struct hci_dev *hdev)
1559 {
1560 	struct discovery_state *discov = &hdev->discovery;
1561 	struct inquiry_entry *e;
1562 
1563 	if (list_empty(&discov->resolve))
1564 		return false;
1565 
1566 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1567 	if (!e)
1568 		return false;
1569 
1570 	if (hci_resolve_name(hdev, e) == 0) {
1571 		e->name_state = NAME_PENDING;
1572 		return true;
1573 	}
1574 
1575 	return false;
1576 }
1577 
1578 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1579 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1580 {
1581 	struct discovery_state *discov = &hdev->discovery;
1582 	struct inquiry_entry *e;
1583 
1584 	/* Update the mgmt connected state if necessary. Be careful with
1585 	 * conn objects that exist but are not (yet) connected however.
1586 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
1587 	 * considered connected.
1588 	 */
1589 	if (conn &&
1590 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1591 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1592 		mgmt_device_connected(hdev, conn, 0, name, name_len);
1593 
1594 	if (discov->state == DISCOVERY_STOPPED)
1595 		return;
1596 
1597 	if (discov->state == DISCOVERY_STOPPING)
1598 		goto discov_complete;
1599 
1600 	if (discov->state != DISCOVERY_RESOLVING)
1601 		return;
1602 
1603 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1604 	/* If the device was not found in a list of found devices names of which
1605 	 * are pending. there is no need to continue resolving a next name as it
1606 	 * will be done upon receiving another Remote Name Request Complete
1607 	 * Event */
1608 	if (!e)
1609 		return;
1610 
1611 	list_del(&e->list);
1612 	if (name) {
1613 		e->name_state = NAME_KNOWN;
1614 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1615 				 e->data.rssi, name, name_len);
1616 	} else {
1617 		e->name_state = NAME_NOT_KNOWN;
1618 	}
1619 
1620 	if (hci_resolve_next_name(hdev))
1621 		return;
1622 
1623 discov_complete:
1624 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1625 }
1626 
1627 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1628 {
1629 	struct hci_cp_remote_name_req *cp;
1630 	struct hci_conn *conn;
1631 
1632 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1633 
1634 	/* If successful wait for the name req complete event before
1635 	 * checking for the need to do authentication */
1636 	if (!status)
1637 		return;
1638 
1639 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1640 	if (!cp)
1641 		return;
1642 
1643 	hci_dev_lock(hdev);
1644 
1645 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1646 
1647 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1648 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1649 
1650 	if (!conn)
1651 		goto unlock;
1652 
1653 	if (!hci_outgoing_auth_needed(hdev, conn))
1654 		goto unlock;
1655 
1656 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1657 		struct hci_cp_auth_requested auth_cp;
1658 
1659 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1660 
1661 		auth_cp.handle = __cpu_to_le16(conn->handle);
1662 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1663 			     sizeof(auth_cp), &auth_cp);
1664 	}
1665 
1666 unlock:
1667 	hci_dev_unlock(hdev);
1668 }
1669 
1670 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1671 {
1672 	struct hci_cp_read_remote_features *cp;
1673 	struct hci_conn *conn;
1674 
1675 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1676 
1677 	if (!status)
1678 		return;
1679 
1680 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1681 	if (!cp)
1682 		return;
1683 
1684 	hci_dev_lock(hdev);
1685 
1686 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1687 	if (conn) {
1688 		if (conn->state == BT_CONFIG) {
1689 			hci_proto_connect_cfm(conn, status);
1690 			hci_conn_drop(conn);
1691 		}
1692 	}
1693 
1694 	hci_dev_unlock(hdev);
1695 }
1696 
1697 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1698 {
1699 	struct hci_cp_read_remote_ext_features *cp;
1700 	struct hci_conn *conn;
1701 
1702 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1703 
1704 	if (!status)
1705 		return;
1706 
1707 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1708 	if (!cp)
1709 		return;
1710 
1711 	hci_dev_lock(hdev);
1712 
1713 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1714 	if (conn) {
1715 		if (conn->state == BT_CONFIG) {
1716 			hci_proto_connect_cfm(conn, status);
1717 			hci_conn_drop(conn);
1718 		}
1719 	}
1720 
1721 	hci_dev_unlock(hdev);
1722 }
1723 
1724 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1725 {
1726 	struct hci_cp_setup_sync_conn *cp;
1727 	struct hci_conn *acl, *sco;
1728 	__u16 handle;
1729 
1730 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1731 
1732 	if (!status)
1733 		return;
1734 
1735 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1736 	if (!cp)
1737 		return;
1738 
1739 	handle = __le16_to_cpu(cp->handle);
1740 
1741 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1742 
1743 	hci_dev_lock(hdev);
1744 
1745 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1746 	if (acl) {
1747 		sco = acl->link;
1748 		if (sco) {
1749 			sco->state = BT_CLOSED;
1750 
1751 			hci_proto_connect_cfm(sco, status);
1752 			hci_conn_del(sco);
1753 		}
1754 	}
1755 
1756 	hci_dev_unlock(hdev);
1757 }
1758 
1759 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1760 {
1761 	struct hci_cp_sniff_mode *cp;
1762 	struct hci_conn *conn;
1763 
1764 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1765 
1766 	if (!status)
1767 		return;
1768 
1769 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1770 	if (!cp)
1771 		return;
1772 
1773 	hci_dev_lock(hdev);
1774 
1775 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1776 	if (conn) {
1777 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1778 
1779 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1780 			hci_sco_setup(conn, status);
1781 	}
1782 
1783 	hci_dev_unlock(hdev);
1784 }
1785 
1786 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1787 {
1788 	struct hci_cp_exit_sniff_mode *cp;
1789 	struct hci_conn *conn;
1790 
1791 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1792 
1793 	if (!status)
1794 		return;
1795 
1796 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1797 	if (!cp)
1798 		return;
1799 
1800 	hci_dev_lock(hdev);
1801 
1802 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1803 	if (conn) {
1804 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1805 
1806 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1807 			hci_sco_setup(conn, status);
1808 	}
1809 
1810 	hci_dev_unlock(hdev);
1811 }
1812 
1813 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1814 {
1815 	struct hci_cp_disconnect *cp;
1816 	struct hci_conn *conn;
1817 
1818 	if (!status)
1819 		return;
1820 
1821 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1822 	if (!cp)
1823 		return;
1824 
1825 	hci_dev_lock(hdev);
1826 
1827 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1828 	if (conn)
1829 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1830 				       conn->dst_type, status);
1831 
1832 	hci_dev_unlock(hdev);
1833 }
1834 
1835 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1836 {
1837 	struct hci_cp_create_phy_link *cp;
1838 
1839 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1840 
1841 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1842 	if (!cp)
1843 		return;
1844 
1845 	hci_dev_lock(hdev);
1846 
1847 	if (status) {
1848 		struct hci_conn *hcon;
1849 
1850 		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1851 		if (hcon)
1852 			hci_conn_del(hcon);
1853 	} else {
1854 		amp_write_remote_assoc(hdev, cp->phy_handle);
1855 	}
1856 
1857 	hci_dev_unlock(hdev);
1858 }
1859 
1860 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1861 {
1862 	struct hci_cp_accept_phy_link *cp;
1863 
1864 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1865 
1866 	if (status)
1867 		return;
1868 
1869 	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1870 	if (!cp)
1871 		return;
1872 
1873 	amp_write_remote_assoc(hdev, cp->phy_handle);
1874 }
1875 
1876 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1877 {
1878 	struct hci_cp_le_create_conn *cp;
1879 	struct hci_conn *conn;
1880 
1881 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1882 
1883 	/* All connection failure handling is taken care of by the
1884 	 * hci_le_conn_failed function which is triggered by the HCI
1885 	 * request completion callbacks used for connecting.
1886 	 */
1887 	if (status)
1888 		return;
1889 
1890 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1891 	if (!cp)
1892 		return;
1893 
1894 	hci_dev_lock(hdev);
1895 
1896 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1897 	if (!conn)
1898 		goto unlock;
1899 
1900 	/* Store the initiator and responder address information which
1901 	 * is needed for SMP. These values will not change during the
1902 	 * lifetime of the connection.
1903 	 */
1904 	conn->init_addr_type = cp->own_address_type;
1905 	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1906 		bacpy(&conn->init_addr, &hdev->random_addr);
1907 	else
1908 		bacpy(&conn->init_addr, &hdev->bdaddr);
1909 
1910 	conn->resp_addr_type = cp->peer_addr_type;
1911 	bacpy(&conn->resp_addr, &cp->peer_addr);
1912 
1913 	/* We don't want the connection attempt to stick around
1914 	 * indefinitely since LE doesn't have a page timeout concept
1915 	 * like BR/EDR. Set a timer for any connection that doesn't use
1916 	 * the white list for connecting.
1917 	 */
1918 	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1919 		queue_delayed_work(conn->hdev->workqueue,
1920 				   &conn->le_conn_timeout,
1921 				   conn->conn_timeout);
1922 
1923 unlock:
1924 	hci_dev_unlock(hdev);
1925 }
1926 
1927 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1928 {
1929 	struct hci_cp_le_start_enc *cp;
1930 	struct hci_conn *conn;
1931 
1932 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1933 
1934 	if (!status)
1935 		return;
1936 
1937 	hci_dev_lock(hdev);
1938 
1939 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1940 	if (!cp)
1941 		goto unlock;
1942 
1943 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1944 	if (!conn)
1945 		goto unlock;
1946 
1947 	if (conn->state != BT_CONNECTED)
1948 		goto unlock;
1949 
1950 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1951 	hci_conn_drop(conn);
1952 
1953 unlock:
1954 	hci_dev_unlock(hdev);
1955 }
1956 
1957 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
1958 {
1959 	struct hci_cp_switch_role *cp;
1960 	struct hci_conn *conn;
1961 
1962 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1963 
1964 	if (!status)
1965 		return;
1966 
1967 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
1968 	if (!cp)
1969 		return;
1970 
1971 	hci_dev_lock(hdev);
1972 
1973 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1974 	if (conn)
1975 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
1976 
1977 	hci_dev_unlock(hdev);
1978 }
1979 
1980 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1981 {
1982 	__u8 status = *((__u8 *) skb->data);
1983 	struct discovery_state *discov = &hdev->discovery;
1984 	struct inquiry_entry *e;
1985 
1986 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1987 
1988 	hci_conn_check_pending(hdev);
1989 
1990 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1991 		return;
1992 
1993 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1994 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1995 
1996 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1997 		return;
1998 
1999 	hci_dev_lock(hdev);
2000 
2001 	if (discov->state != DISCOVERY_FINDING)
2002 		goto unlock;
2003 
2004 	if (list_empty(&discov->resolve)) {
2005 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2006 		goto unlock;
2007 	}
2008 
2009 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2010 	if (e && hci_resolve_name(hdev, e) == 0) {
2011 		e->name_state = NAME_PENDING;
2012 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2013 	} else {
2014 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2015 	}
2016 
2017 unlock:
2018 	hci_dev_unlock(hdev);
2019 }
2020 
2021 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2022 {
2023 	struct inquiry_data data;
2024 	struct inquiry_info *info = (void *) (skb->data + 1);
2025 	int num_rsp = *((__u8 *) skb->data);
2026 
2027 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2028 
2029 	if (!num_rsp)
2030 		return;
2031 
2032 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2033 		return;
2034 
2035 	hci_dev_lock(hdev);
2036 
2037 	for (; num_rsp; num_rsp--, info++) {
2038 		u32 flags;
2039 
2040 		bacpy(&data.bdaddr, &info->bdaddr);
2041 		data.pscan_rep_mode	= info->pscan_rep_mode;
2042 		data.pscan_period_mode	= info->pscan_period_mode;
2043 		data.pscan_mode		= info->pscan_mode;
2044 		memcpy(data.dev_class, info->dev_class, 3);
2045 		data.clock_offset	= info->clock_offset;
2046 		data.rssi		= HCI_RSSI_INVALID;
2047 		data.ssp_mode		= 0x00;
2048 
2049 		flags = hci_inquiry_cache_update(hdev, &data, false);
2050 
2051 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2052 				  info->dev_class, HCI_RSSI_INVALID,
2053 				  flags, NULL, 0, NULL, 0);
2054 	}
2055 
2056 	hci_dev_unlock(hdev);
2057 }
2058 
2059 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2060 {
2061 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2062 	struct hci_conn *conn;
2063 
2064 	BT_DBG("%s", hdev->name);
2065 
2066 	hci_dev_lock(hdev);
2067 
2068 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2069 	if (!conn) {
2070 		if (ev->link_type != SCO_LINK)
2071 			goto unlock;
2072 
2073 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2074 		if (!conn)
2075 			goto unlock;
2076 
2077 		conn->type = SCO_LINK;
2078 	}
2079 
2080 	if (!ev->status) {
2081 		conn->handle = __le16_to_cpu(ev->handle);
2082 
2083 		if (conn->type == ACL_LINK) {
2084 			conn->state = BT_CONFIG;
2085 			hci_conn_hold(conn);
2086 
2087 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2088 			    !hci_find_link_key(hdev, &ev->bdaddr))
2089 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2090 			else
2091 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2092 		} else
2093 			conn->state = BT_CONNECTED;
2094 
2095 		hci_conn_add_sysfs(conn);
2096 
2097 		if (test_bit(HCI_AUTH, &hdev->flags))
2098 			set_bit(HCI_CONN_AUTH, &conn->flags);
2099 
2100 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2101 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2102 
2103 		/* Get remote features */
2104 		if (conn->type == ACL_LINK) {
2105 			struct hci_cp_read_remote_features cp;
2106 			cp.handle = ev->handle;
2107 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2108 				     sizeof(cp), &cp);
2109 
2110 			hci_update_page_scan(hdev, NULL);
2111 		}
2112 
2113 		/* Set packet type for incoming connection */
2114 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2115 			struct hci_cp_change_conn_ptype cp;
2116 			cp.handle = ev->handle;
2117 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2118 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2119 				     &cp);
2120 		}
2121 	} else {
2122 		conn->state = BT_CLOSED;
2123 		if (conn->type == ACL_LINK)
2124 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2125 					    conn->dst_type, ev->status);
2126 	}
2127 
2128 	if (conn->type == ACL_LINK)
2129 		hci_sco_setup(conn, ev->status);
2130 
2131 	if (ev->status) {
2132 		hci_proto_connect_cfm(conn, ev->status);
2133 		hci_conn_del(conn);
2134 	} else if (ev->link_type != ACL_LINK)
2135 		hci_proto_connect_cfm(conn, ev->status);
2136 
2137 unlock:
2138 	hci_dev_unlock(hdev);
2139 
2140 	hci_conn_check_pending(hdev);
2141 }
2142 
2143 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2144 {
2145 	struct hci_cp_reject_conn_req cp;
2146 
2147 	bacpy(&cp.bdaddr, bdaddr);
2148 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2149 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2150 }
2151 
2152 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2153 {
2154 	struct hci_ev_conn_request *ev = (void *) skb->data;
2155 	int mask = hdev->link_mode;
2156 	struct inquiry_entry *ie;
2157 	struct hci_conn *conn;
2158 	__u8 flags = 0;
2159 
2160 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2161 	       ev->link_type);
2162 
2163 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2164 				      &flags);
2165 
2166 	if (!(mask & HCI_LM_ACCEPT)) {
2167 		hci_reject_conn(hdev, &ev->bdaddr);
2168 		return;
2169 	}
2170 
2171 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2172 				   BDADDR_BREDR)) {
2173 		hci_reject_conn(hdev, &ev->bdaddr);
2174 		return;
2175 	}
2176 
2177 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2178 	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2179 				    BDADDR_BREDR)) {
2180 		    hci_reject_conn(hdev, &ev->bdaddr);
2181 		    return;
2182 	}
2183 
2184 	/* Connection accepted */
2185 
2186 	hci_dev_lock(hdev);
2187 
2188 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2189 	if (ie)
2190 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2191 
2192 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2193 			&ev->bdaddr);
2194 	if (!conn) {
2195 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2196 				    HCI_ROLE_SLAVE);
2197 		if (!conn) {
2198 			BT_ERR("No memory for new connection");
2199 			hci_dev_unlock(hdev);
2200 			return;
2201 		}
2202 	}
2203 
2204 	memcpy(conn->dev_class, ev->dev_class, 3);
2205 
2206 	hci_dev_unlock(hdev);
2207 
2208 	if (ev->link_type == ACL_LINK ||
2209 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2210 		struct hci_cp_accept_conn_req cp;
2211 		conn->state = BT_CONNECT;
2212 
2213 		bacpy(&cp.bdaddr, &ev->bdaddr);
2214 
2215 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2216 			cp.role = 0x00; /* Become master */
2217 		else
2218 			cp.role = 0x01; /* Remain slave */
2219 
2220 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2221 	} else if (!(flags & HCI_PROTO_DEFER)) {
2222 		struct hci_cp_accept_sync_conn_req cp;
2223 		conn->state = BT_CONNECT;
2224 
2225 		bacpy(&cp.bdaddr, &ev->bdaddr);
2226 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2227 
2228 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2229 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2230 		cp.max_latency    = cpu_to_le16(0xffff);
2231 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2232 		cp.retrans_effort = 0xff;
2233 
2234 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2235 			     &cp);
2236 	} else {
2237 		conn->state = BT_CONNECT2;
2238 		hci_proto_connect_cfm(conn, 0);
2239 	}
2240 }
2241 
2242 static u8 hci_to_mgmt_reason(u8 err)
2243 {
2244 	switch (err) {
2245 	case HCI_ERROR_CONNECTION_TIMEOUT:
2246 		return MGMT_DEV_DISCONN_TIMEOUT;
2247 	case HCI_ERROR_REMOTE_USER_TERM:
2248 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2249 	case HCI_ERROR_REMOTE_POWER_OFF:
2250 		return MGMT_DEV_DISCONN_REMOTE;
2251 	case HCI_ERROR_LOCAL_HOST_TERM:
2252 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2253 	default:
2254 		return MGMT_DEV_DISCONN_UNKNOWN;
2255 	}
2256 }
2257 
2258 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2259 {
2260 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2261 	u8 reason = hci_to_mgmt_reason(ev->reason);
2262 	struct hci_conn_params *params;
2263 	struct hci_conn *conn;
2264 	bool mgmt_connected;
2265 	u8 type;
2266 
2267 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2268 
2269 	hci_dev_lock(hdev);
2270 
2271 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2272 	if (!conn)
2273 		goto unlock;
2274 
2275 	if (ev->status) {
2276 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2277 				       conn->dst_type, ev->status);
2278 		goto unlock;
2279 	}
2280 
2281 	conn->state = BT_CLOSED;
2282 
2283 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2284 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2285 				reason, mgmt_connected);
2286 
2287 	if (conn->type == ACL_LINK) {
2288 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2289 			hci_remove_link_key(hdev, &conn->dst);
2290 
2291 		hci_update_page_scan(hdev, NULL);
2292 	}
2293 
2294 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2295 	if (params) {
2296 		switch (params->auto_connect) {
2297 		case HCI_AUTO_CONN_LINK_LOSS:
2298 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2299 				break;
2300 			/* Fall through */
2301 
2302 		case HCI_AUTO_CONN_DIRECT:
2303 		case HCI_AUTO_CONN_ALWAYS:
2304 			list_del_init(&params->action);
2305 			list_add(&params->action, &hdev->pend_le_conns);
2306 			hci_update_background_scan(hdev);
2307 			break;
2308 
2309 		default:
2310 			break;
2311 		}
2312 	}
2313 
2314 	type = conn->type;
2315 
2316 	hci_proto_disconn_cfm(conn, ev->reason);
2317 	hci_conn_del(conn);
2318 
2319 	/* Re-enable advertising if necessary, since it might
2320 	 * have been disabled by the connection. From the
2321 	 * HCI_LE_Set_Advertise_Enable command description in
2322 	 * the core specification (v4.0):
2323 	 * "The Controller shall continue advertising until the Host
2324 	 * issues an LE_Set_Advertise_Enable command with
2325 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2326 	 * or until a connection is created or until the Advertising
2327 	 * is timed out due to Directed Advertising."
2328 	 */
2329 	if (type == LE_LINK)
2330 		mgmt_reenable_advertising(hdev);
2331 
2332 unlock:
2333 	hci_dev_unlock(hdev);
2334 }
2335 
2336 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2337 {
2338 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2339 	struct hci_conn *conn;
2340 
2341 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2342 
2343 	hci_dev_lock(hdev);
2344 
2345 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2346 	if (!conn)
2347 		goto unlock;
2348 
2349 	if (!ev->status) {
2350 		if (!hci_conn_ssp_enabled(conn) &&
2351 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2352 			BT_INFO("re-auth of legacy device is not possible.");
2353 		} else {
2354 			set_bit(HCI_CONN_AUTH, &conn->flags);
2355 			conn->sec_level = conn->pending_sec_level;
2356 		}
2357 	} else {
2358 		mgmt_auth_failed(conn, ev->status);
2359 	}
2360 
2361 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2362 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2363 
2364 	if (conn->state == BT_CONFIG) {
2365 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2366 			struct hci_cp_set_conn_encrypt cp;
2367 			cp.handle  = ev->handle;
2368 			cp.encrypt = 0x01;
2369 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2370 				     &cp);
2371 		} else {
2372 			conn->state = BT_CONNECTED;
2373 			hci_proto_connect_cfm(conn, ev->status);
2374 			hci_conn_drop(conn);
2375 		}
2376 	} else {
2377 		hci_auth_cfm(conn, ev->status);
2378 
2379 		hci_conn_hold(conn);
2380 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2381 		hci_conn_drop(conn);
2382 	}
2383 
2384 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2385 		if (!ev->status) {
2386 			struct hci_cp_set_conn_encrypt cp;
2387 			cp.handle  = ev->handle;
2388 			cp.encrypt = 0x01;
2389 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2390 				     &cp);
2391 		} else {
2392 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2393 			hci_encrypt_cfm(conn, ev->status, 0x00);
2394 		}
2395 	}
2396 
2397 unlock:
2398 	hci_dev_unlock(hdev);
2399 }
2400 
2401 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2402 {
2403 	struct hci_ev_remote_name *ev = (void *) skb->data;
2404 	struct hci_conn *conn;
2405 
2406 	BT_DBG("%s", hdev->name);
2407 
2408 	hci_conn_check_pending(hdev);
2409 
2410 	hci_dev_lock(hdev);
2411 
2412 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2413 
2414 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2415 		goto check_auth;
2416 
2417 	if (ev->status == 0)
2418 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2419 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2420 	else
2421 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2422 
2423 check_auth:
2424 	if (!conn)
2425 		goto unlock;
2426 
2427 	if (!hci_outgoing_auth_needed(hdev, conn))
2428 		goto unlock;
2429 
2430 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2431 		struct hci_cp_auth_requested cp;
2432 
2433 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2434 
2435 		cp.handle = __cpu_to_le16(conn->handle);
2436 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2437 	}
2438 
2439 unlock:
2440 	hci_dev_unlock(hdev);
2441 }
2442 
2443 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2444 {
2445 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2446 	struct hci_conn *conn;
2447 
2448 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2449 
2450 	hci_dev_lock(hdev);
2451 
2452 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2453 	if (!conn)
2454 		goto unlock;
2455 
2456 	if (!ev->status) {
2457 		if (ev->encrypt) {
2458 			/* Encryption implies authentication */
2459 			set_bit(HCI_CONN_AUTH, &conn->flags);
2460 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2461 			conn->sec_level = conn->pending_sec_level;
2462 
2463 			/* P-256 authentication key implies FIPS */
2464 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2465 				set_bit(HCI_CONN_FIPS, &conn->flags);
2466 
2467 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2468 			    conn->type == LE_LINK)
2469 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2470 		} else {
2471 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2472 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2473 		}
2474 	}
2475 
2476 	/* We should disregard the current RPA and generate a new one
2477 	 * whenever the encryption procedure fails.
2478 	 */
2479 	if (ev->status && conn->type == LE_LINK)
2480 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2481 
2482 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2483 
2484 	if (ev->status && conn->state == BT_CONNECTED) {
2485 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2486 		hci_conn_drop(conn);
2487 		goto unlock;
2488 	}
2489 
2490 	if (conn->state == BT_CONFIG) {
2491 		if (!ev->status)
2492 			conn->state = BT_CONNECTED;
2493 
2494 		/* In Secure Connections Only mode, do not allow any
2495 		 * connections that are not encrypted with AES-CCM
2496 		 * using a P-256 authenticated combination key.
2497 		 */
2498 		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2499 		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2500 		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2501 			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2502 			hci_conn_drop(conn);
2503 			goto unlock;
2504 		}
2505 
2506 		hci_proto_connect_cfm(conn, ev->status);
2507 		hci_conn_drop(conn);
2508 	} else
2509 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2510 
2511 unlock:
2512 	hci_dev_unlock(hdev);
2513 }
2514 
2515 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2516 					     struct sk_buff *skb)
2517 {
2518 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2519 	struct hci_conn *conn;
2520 
2521 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2522 
2523 	hci_dev_lock(hdev);
2524 
2525 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2526 	if (conn) {
2527 		if (!ev->status)
2528 			set_bit(HCI_CONN_SECURE, &conn->flags);
2529 
2530 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2531 
2532 		hci_key_change_cfm(conn, ev->status);
2533 	}
2534 
2535 	hci_dev_unlock(hdev);
2536 }
2537 
2538 static void hci_remote_features_evt(struct hci_dev *hdev,
2539 				    struct sk_buff *skb)
2540 {
2541 	struct hci_ev_remote_features *ev = (void *) skb->data;
2542 	struct hci_conn *conn;
2543 
2544 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2545 
2546 	hci_dev_lock(hdev);
2547 
2548 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2549 	if (!conn)
2550 		goto unlock;
2551 
2552 	if (!ev->status)
2553 		memcpy(conn->features[0], ev->features, 8);
2554 
2555 	if (conn->state != BT_CONFIG)
2556 		goto unlock;
2557 
2558 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2559 		struct hci_cp_read_remote_ext_features cp;
2560 		cp.handle = ev->handle;
2561 		cp.page = 0x01;
2562 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2563 			     sizeof(cp), &cp);
2564 		goto unlock;
2565 	}
2566 
2567 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2568 		struct hci_cp_remote_name_req cp;
2569 		memset(&cp, 0, sizeof(cp));
2570 		bacpy(&cp.bdaddr, &conn->dst);
2571 		cp.pscan_rep_mode = 0x02;
2572 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2573 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2574 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
2575 
2576 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2577 		conn->state = BT_CONNECTED;
2578 		hci_proto_connect_cfm(conn, ev->status);
2579 		hci_conn_drop(conn);
2580 	}
2581 
2582 unlock:
2583 	hci_dev_unlock(hdev);
2584 }
2585 
2586 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2587 {
2588 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2589 	u8 status = skb->data[sizeof(*ev)];
2590 	__u16 opcode;
2591 
2592 	skb_pull(skb, sizeof(*ev));
2593 
2594 	opcode = __le16_to_cpu(ev->opcode);
2595 
2596 	switch (opcode) {
2597 	case HCI_OP_INQUIRY_CANCEL:
2598 		hci_cc_inquiry_cancel(hdev, skb);
2599 		break;
2600 
2601 	case HCI_OP_PERIODIC_INQ:
2602 		hci_cc_periodic_inq(hdev, skb);
2603 		break;
2604 
2605 	case HCI_OP_EXIT_PERIODIC_INQ:
2606 		hci_cc_exit_periodic_inq(hdev, skb);
2607 		break;
2608 
2609 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2610 		hci_cc_remote_name_req_cancel(hdev, skb);
2611 		break;
2612 
2613 	case HCI_OP_ROLE_DISCOVERY:
2614 		hci_cc_role_discovery(hdev, skb);
2615 		break;
2616 
2617 	case HCI_OP_READ_LINK_POLICY:
2618 		hci_cc_read_link_policy(hdev, skb);
2619 		break;
2620 
2621 	case HCI_OP_WRITE_LINK_POLICY:
2622 		hci_cc_write_link_policy(hdev, skb);
2623 		break;
2624 
2625 	case HCI_OP_READ_DEF_LINK_POLICY:
2626 		hci_cc_read_def_link_policy(hdev, skb);
2627 		break;
2628 
2629 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2630 		hci_cc_write_def_link_policy(hdev, skb);
2631 		break;
2632 
2633 	case HCI_OP_RESET:
2634 		hci_cc_reset(hdev, skb);
2635 		break;
2636 
2637 	case HCI_OP_WRITE_LOCAL_NAME:
2638 		hci_cc_write_local_name(hdev, skb);
2639 		break;
2640 
2641 	case HCI_OP_READ_LOCAL_NAME:
2642 		hci_cc_read_local_name(hdev, skb);
2643 		break;
2644 
2645 	case HCI_OP_WRITE_AUTH_ENABLE:
2646 		hci_cc_write_auth_enable(hdev, skb);
2647 		break;
2648 
2649 	case HCI_OP_WRITE_ENCRYPT_MODE:
2650 		hci_cc_write_encrypt_mode(hdev, skb);
2651 		break;
2652 
2653 	case HCI_OP_WRITE_SCAN_ENABLE:
2654 		hci_cc_write_scan_enable(hdev, skb);
2655 		break;
2656 
2657 	case HCI_OP_READ_CLASS_OF_DEV:
2658 		hci_cc_read_class_of_dev(hdev, skb);
2659 		break;
2660 
2661 	case HCI_OP_WRITE_CLASS_OF_DEV:
2662 		hci_cc_write_class_of_dev(hdev, skb);
2663 		break;
2664 
2665 	case HCI_OP_READ_VOICE_SETTING:
2666 		hci_cc_read_voice_setting(hdev, skb);
2667 		break;
2668 
2669 	case HCI_OP_WRITE_VOICE_SETTING:
2670 		hci_cc_write_voice_setting(hdev, skb);
2671 		break;
2672 
2673 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2674 		hci_cc_read_num_supported_iac(hdev, skb);
2675 		break;
2676 
2677 	case HCI_OP_WRITE_SSP_MODE:
2678 		hci_cc_write_ssp_mode(hdev, skb);
2679 		break;
2680 
2681 	case HCI_OP_WRITE_SC_SUPPORT:
2682 		hci_cc_write_sc_support(hdev, skb);
2683 		break;
2684 
2685 	case HCI_OP_READ_LOCAL_VERSION:
2686 		hci_cc_read_local_version(hdev, skb);
2687 		break;
2688 
2689 	case HCI_OP_READ_LOCAL_COMMANDS:
2690 		hci_cc_read_local_commands(hdev, skb);
2691 		break;
2692 
2693 	case HCI_OP_READ_LOCAL_FEATURES:
2694 		hci_cc_read_local_features(hdev, skb);
2695 		break;
2696 
2697 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2698 		hci_cc_read_local_ext_features(hdev, skb);
2699 		break;
2700 
2701 	case HCI_OP_READ_BUFFER_SIZE:
2702 		hci_cc_read_buffer_size(hdev, skb);
2703 		break;
2704 
2705 	case HCI_OP_READ_BD_ADDR:
2706 		hci_cc_read_bd_addr(hdev, skb);
2707 		break;
2708 
2709 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2710 		hci_cc_read_page_scan_activity(hdev, skb);
2711 		break;
2712 
2713 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2714 		hci_cc_write_page_scan_activity(hdev, skb);
2715 		break;
2716 
2717 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2718 		hci_cc_read_page_scan_type(hdev, skb);
2719 		break;
2720 
2721 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2722 		hci_cc_write_page_scan_type(hdev, skb);
2723 		break;
2724 
2725 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2726 		hci_cc_read_data_block_size(hdev, skb);
2727 		break;
2728 
2729 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2730 		hci_cc_read_flow_control_mode(hdev, skb);
2731 		break;
2732 
2733 	case HCI_OP_READ_LOCAL_AMP_INFO:
2734 		hci_cc_read_local_amp_info(hdev, skb);
2735 		break;
2736 
2737 	case HCI_OP_READ_CLOCK:
2738 		hci_cc_read_clock(hdev, skb);
2739 		break;
2740 
2741 	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2742 		hci_cc_read_local_amp_assoc(hdev, skb);
2743 		break;
2744 
2745 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2746 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2747 		break;
2748 
2749 	case HCI_OP_PIN_CODE_REPLY:
2750 		hci_cc_pin_code_reply(hdev, skb);
2751 		break;
2752 
2753 	case HCI_OP_PIN_CODE_NEG_REPLY:
2754 		hci_cc_pin_code_neg_reply(hdev, skb);
2755 		break;
2756 
2757 	case HCI_OP_READ_LOCAL_OOB_DATA:
2758 		hci_cc_read_local_oob_data(hdev, skb);
2759 		break;
2760 
2761 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2762 		hci_cc_read_local_oob_ext_data(hdev, skb);
2763 		break;
2764 
2765 	case HCI_OP_LE_READ_BUFFER_SIZE:
2766 		hci_cc_le_read_buffer_size(hdev, skb);
2767 		break;
2768 
2769 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2770 		hci_cc_le_read_local_features(hdev, skb);
2771 		break;
2772 
2773 	case HCI_OP_LE_READ_ADV_TX_POWER:
2774 		hci_cc_le_read_adv_tx_power(hdev, skb);
2775 		break;
2776 
2777 	case HCI_OP_USER_CONFIRM_REPLY:
2778 		hci_cc_user_confirm_reply(hdev, skb);
2779 		break;
2780 
2781 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2782 		hci_cc_user_confirm_neg_reply(hdev, skb);
2783 		break;
2784 
2785 	case HCI_OP_USER_PASSKEY_REPLY:
2786 		hci_cc_user_passkey_reply(hdev, skb);
2787 		break;
2788 
2789 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2790 		hci_cc_user_passkey_neg_reply(hdev, skb);
2791 		break;
2792 
2793 	case HCI_OP_LE_SET_RANDOM_ADDR:
2794 		hci_cc_le_set_random_addr(hdev, skb);
2795 		break;
2796 
2797 	case HCI_OP_LE_SET_ADV_ENABLE:
2798 		hci_cc_le_set_adv_enable(hdev, skb);
2799 		break;
2800 
2801 	case HCI_OP_LE_SET_SCAN_PARAM:
2802 		hci_cc_le_set_scan_param(hdev, skb);
2803 		break;
2804 
2805 	case HCI_OP_LE_SET_SCAN_ENABLE:
2806 		hci_cc_le_set_scan_enable(hdev, skb);
2807 		break;
2808 
2809 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2810 		hci_cc_le_read_white_list_size(hdev, skb);
2811 		break;
2812 
2813 	case HCI_OP_LE_CLEAR_WHITE_LIST:
2814 		hci_cc_le_clear_white_list(hdev, skb);
2815 		break;
2816 
2817 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2818 		hci_cc_le_add_to_white_list(hdev, skb);
2819 		break;
2820 
2821 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2822 		hci_cc_le_del_from_white_list(hdev, skb);
2823 		break;
2824 
2825 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2826 		hci_cc_le_read_supported_states(hdev, skb);
2827 		break;
2828 
2829 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2830 		hci_cc_write_le_host_supported(hdev, skb);
2831 		break;
2832 
2833 	case HCI_OP_LE_SET_ADV_PARAM:
2834 		hci_cc_set_adv_param(hdev, skb);
2835 		break;
2836 
2837 	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2838 		hci_cc_write_remote_amp_assoc(hdev, skb);
2839 		break;
2840 
2841 	case HCI_OP_READ_RSSI:
2842 		hci_cc_read_rssi(hdev, skb);
2843 		break;
2844 
2845 	case HCI_OP_READ_TX_POWER:
2846 		hci_cc_read_tx_power(hdev, skb);
2847 		break;
2848 
2849 	default:
2850 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2851 		break;
2852 	}
2853 
2854 	if (opcode != HCI_OP_NOP)
2855 		cancel_delayed_work(&hdev->cmd_timer);
2856 
2857 	hci_req_cmd_complete(hdev, opcode, status);
2858 
2859 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2860 		atomic_set(&hdev->cmd_cnt, 1);
2861 		if (!skb_queue_empty(&hdev->cmd_q))
2862 			queue_work(hdev->workqueue, &hdev->cmd_work);
2863 	}
2864 }
2865 
2866 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2867 {
2868 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2869 	__u16 opcode;
2870 
2871 	skb_pull(skb, sizeof(*ev));
2872 
2873 	opcode = __le16_to_cpu(ev->opcode);
2874 
2875 	switch (opcode) {
2876 	case HCI_OP_INQUIRY:
2877 		hci_cs_inquiry(hdev, ev->status);
2878 		break;
2879 
2880 	case HCI_OP_CREATE_CONN:
2881 		hci_cs_create_conn(hdev, ev->status);
2882 		break;
2883 
2884 	case HCI_OP_DISCONNECT:
2885 		hci_cs_disconnect(hdev, ev->status);
2886 		break;
2887 
2888 	case HCI_OP_ADD_SCO:
2889 		hci_cs_add_sco(hdev, ev->status);
2890 		break;
2891 
2892 	case HCI_OP_AUTH_REQUESTED:
2893 		hci_cs_auth_requested(hdev, ev->status);
2894 		break;
2895 
2896 	case HCI_OP_SET_CONN_ENCRYPT:
2897 		hci_cs_set_conn_encrypt(hdev, ev->status);
2898 		break;
2899 
2900 	case HCI_OP_REMOTE_NAME_REQ:
2901 		hci_cs_remote_name_req(hdev, ev->status);
2902 		break;
2903 
2904 	case HCI_OP_READ_REMOTE_FEATURES:
2905 		hci_cs_read_remote_features(hdev, ev->status);
2906 		break;
2907 
2908 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2909 		hci_cs_read_remote_ext_features(hdev, ev->status);
2910 		break;
2911 
2912 	case HCI_OP_SETUP_SYNC_CONN:
2913 		hci_cs_setup_sync_conn(hdev, ev->status);
2914 		break;
2915 
2916 	case HCI_OP_CREATE_PHY_LINK:
2917 		hci_cs_create_phylink(hdev, ev->status);
2918 		break;
2919 
2920 	case HCI_OP_ACCEPT_PHY_LINK:
2921 		hci_cs_accept_phylink(hdev, ev->status);
2922 		break;
2923 
2924 	case HCI_OP_SNIFF_MODE:
2925 		hci_cs_sniff_mode(hdev, ev->status);
2926 		break;
2927 
2928 	case HCI_OP_EXIT_SNIFF_MODE:
2929 		hci_cs_exit_sniff_mode(hdev, ev->status);
2930 		break;
2931 
2932 	case HCI_OP_SWITCH_ROLE:
2933 		hci_cs_switch_role(hdev, ev->status);
2934 		break;
2935 
2936 	case HCI_OP_LE_CREATE_CONN:
2937 		hci_cs_le_create_conn(hdev, ev->status);
2938 		break;
2939 
2940 	case HCI_OP_LE_START_ENC:
2941 		hci_cs_le_start_enc(hdev, ev->status);
2942 		break;
2943 
2944 	default:
2945 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2946 		break;
2947 	}
2948 
2949 	if (opcode != HCI_OP_NOP)
2950 		cancel_delayed_work(&hdev->cmd_timer);
2951 
2952 	if (ev->status ||
2953 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2954 		hci_req_cmd_complete(hdev, opcode, ev->status);
2955 
2956 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2957 		atomic_set(&hdev->cmd_cnt, 1);
2958 		if (!skb_queue_empty(&hdev->cmd_q))
2959 			queue_work(hdev->workqueue, &hdev->cmd_work);
2960 	}
2961 }
2962 
2963 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
2964 {
2965 	struct hci_ev_hardware_error *ev = (void *) skb->data;
2966 
2967 	BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
2968 }
2969 
2970 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2971 {
2972 	struct hci_ev_role_change *ev = (void *) skb->data;
2973 	struct hci_conn *conn;
2974 
2975 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2976 
2977 	hci_dev_lock(hdev);
2978 
2979 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2980 	if (conn) {
2981 		if (!ev->status)
2982 			conn->role = ev->role;
2983 
2984 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2985 
2986 		hci_role_switch_cfm(conn, ev->status, ev->role);
2987 	}
2988 
2989 	hci_dev_unlock(hdev);
2990 }
2991 
2992 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2993 {
2994 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2995 	int i;
2996 
2997 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2998 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2999 		return;
3000 	}
3001 
3002 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3003 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3004 		BT_DBG("%s bad parameters", hdev->name);
3005 		return;
3006 	}
3007 
3008 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3009 
3010 	for (i = 0; i < ev->num_hndl; i++) {
3011 		struct hci_comp_pkts_info *info = &ev->handles[i];
3012 		struct hci_conn *conn;
3013 		__u16  handle, count;
3014 
3015 		handle = __le16_to_cpu(info->handle);
3016 		count  = __le16_to_cpu(info->count);
3017 
3018 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3019 		if (!conn)
3020 			continue;
3021 
3022 		conn->sent -= count;
3023 
3024 		switch (conn->type) {
3025 		case ACL_LINK:
3026 			hdev->acl_cnt += count;
3027 			if (hdev->acl_cnt > hdev->acl_pkts)
3028 				hdev->acl_cnt = hdev->acl_pkts;
3029 			break;
3030 
3031 		case LE_LINK:
3032 			if (hdev->le_pkts) {
3033 				hdev->le_cnt += count;
3034 				if (hdev->le_cnt > hdev->le_pkts)
3035 					hdev->le_cnt = hdev->le_pkts;
3036 			} else {
3037 				hdev->acl_cnt += count;
3038 				if (hdev->acl_cnt > hdev->acl_pkts)
3039 					hdev->acl_cnt = hdev->acl_pkts;
3040 			}
3041 			break;
3042 
3043 		case SCO_LINK:
3044 			hdev->sco_cnt += count;
3045 			if (hdev->sco_cnt > hdev->sco_pkts)
3046 				hdev->sco_cnt = hdev->sco_pkts;
3047 			break;
3048 
3049 		default:
3050 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3051 			break;
3052 		}
3053 	}
3054 
3055 	queue_work(hdev->workqueue, &hdev->tx_work);
3056 }
3057 
3058 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3059 						 __u16 handle)
3060 {
3061 	struct hci_chan *chan;
3062 
3063 	switch (hdev->dev_type) {
3064 	case HCI_BREDR:
3065 		return hci_conn_hash_lookup_handle(hdev, handle);
3066 	case HCI_AMP:
3067 		chan = hci_chan_lookup_handle(hdev, handle);
3068 		if (chan)
3069 			return chan->conn;
3070 		break;
3071 	default:
3072 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3073 		break;
3074 	}
3075 
3076 	return NULL;
3077 }
3078 
3079 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3080 {
3081 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3082 	int i;
3083 
3084 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3085 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3086 		return;
3087 	}
3088 
3089 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3090 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3091 		BT_DBG("%s bad parameters", hdev->name);
3092 		return;
3093 	}
3094 
3095 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3096 	       ev->num_hndl);
3097 
3098 	for (i = 0; i < ev->num_hndl; i++) {
3099 		struct hci_comp_blocks_info *info = &ev->handles[i];
3100 		struct hci_conn *conn = NULL;
3101 		__u16  handle, block_count;
3102 
3103 		handle = __le16_to_cpu(info->handle);
3104 		block_count = __le16_to_cpu(info->blocks);
3105 
3106 		conn = __hci_conn_lookup_handle(hdev, handle);
3107 		if (!conn)
3108 			continue;
3109 
3110 		conn->sent -= block_count;
3111 
3112 		switch (conn->type) {
3113 		case ACL_LINK:
3114 		case AMP_LINK:
3115 			hdev->block_cnt += block_count;
3116 			if (hdev->block_cnt > hdev->num_blocks)
3117 				hdev->block_cnt = hdev->num_blocks;
3118 			break;
3119 
3120 		default:
3121 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3122 			break;
3123 		}
3124 	}
3125 
3126 	queue_work(hdev->workqueue, &hdev->tx_work);
3127 }
3128 
3129 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3130 {
3131 	struct hci_ev_mode_change *ev = (void *) skb->data;
3132 	struct hci_conn *conn;
3133 
3134 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3135 
3136 	hci_dev_lock(hdev);
3137 
3138 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3139 	if (conn) {
3140 		conn->mode = ev->mode;
3141 
3142 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3143 					&conn->flags)) {
3144 			if (conn->mode == HCI_CM_ACTIVE)
3145 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3146 			else
3147 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3148 		}
3149 
3150 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3151 			hci_sco_setup(conn, ev->status);
3152 	}
3153 
3154 	hci_dev_unlock(hdev);
3155 }
3156 
3157 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3158 {
3159 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3160 	struct hci_conn *conn;
3161 
3162 	BT_DBG("%s", hdev->name);
3163 
3164 	hci_dev_lock(hdev);
3165 
3166 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3167 	if (!conn)
3168 		goto unlock;
3169 
3170 	if (conn->state == BT_CONNECTED) {
3171 		hci_conn_hold(conn);
3172 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3173 		hci_conn_drop(conn);
3174 	}
3175 
3176 	if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3177 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3178 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3179 			     sizeof(ev->bdaddr), &ev->bdaddr);
3180 	} else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3181 		u8 secure;
3182 
3183 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3184 			secure = 1;
3185 		else
3186 			secure = 0;
3187 
3188 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3189 	}
3190 
3191 unlock:
3192 	hci_dev_unlock(hdev);
3193 }
3194 
3195 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3196 {
3197 	if (key_type == HCI_LK_CHANGED_COMBINATION)
3198 		return;
3199 
3200 	conn->pin_length = pin_len;
3201 	conn->key_type = key_type;
3202 
3203 	switch (key_type) {
3204 	case HCI_LK_LOCAL_UNIT:
3205 	case HCI_LK_REMOTE_UNIT:
3206 	case HCI_LK_DEBUG_COMBINATION:
3207 		return;
3208 	case HCI_LK_COMBINATION:
3209 		if (pin_len == 16)
3210 			conn->pending_sec_level = BT_SECURITY_HIGH;
3211 		else
3212 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3213 		break;
3214 	case HCI_LK_UNAUTH_COMBINATION_P192:
3215 	case HCI_LK_UNAUTH_COMBINATION_P256:
3216 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3217 		break;
3218 	case HCI_LK_AUTH_COMBINATION_P192:
3219 		conn->pending_sec_level = BT_SECURITY_HIGH;
3220 		break;
3221 	case HCI_LK_AUTH_COMBINATION_P256:
3222 		conn->pending_sec_level = BT_SECURITY_FIPS;
3223 		break;
3224 	}
3225 }
3226 
3227 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3228 {
3229 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3230 	struct hci_cp_link_key_reply cp;
3231 	struct hci_conn *conn;
3232 	struct link_key *key;
3233 
3234 	BT_DBG("%s", hdev->name);
3235 
3236 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3237 		return;
3238 
3239 	hci_dev_lock(hdev);
3240 
3241 	key = hci_find_link_key(hdev, &ev->bdaddr);
3242 	if (!key) {
3243 		BT_DBG("%s link key not found for %pMR", hdev->name,
3244 		       &ev->bdaddr);
3245 		goto not_found;
3246 	}
3247 
3248 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3249 	       &ev->bdaddr);
3250 
3251 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3252 	if (conn) {
3253 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3254 
3255 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3256 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3257 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3258 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3259 			goto not_found;
3260 		}
3261 
3262 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3263 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3264 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3265 			BT_DBG("%s ignoring key unauthenticated for high security",
3266 			       hdev->name);
3267 			goto not_found;
3268 		}
3269 
3270 		conn_set_key(conn, key->type, key->pin_len);
3271 	}
3272 
3273 	bacpy(&cp.bdaddr, &ev->bdaddr);
3274 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3275 
3276 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3277 
3278 	hci_dev_unlock(hdev);
3279 
3280 	return;
3281 
3282 not_found:
3283 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3284 	hci_dev_unlock(hdev);
3285 }
3286 
3287 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3288 {
3289 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3290 	struct hci_conn *conn;
3291 	struct link_key *key;
3292 	bool persistent;
3293 	u8 pin_len = 0;
3294 
3295 	BT_DBG("%s", hdev->name);
3296 
3297 	hci_dev_lock(hdev);
3298 
3299 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3300 	if (!conn)
3301 		goto unlock;
3302 
3303 	hci_conn_hold(conn);
3304 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3305 	hci_conn_drop(conn);
3306 
3307 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3308 	conn_set_key(conn, ev->key_type, conn->pin_length);
3309 
3310 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3311 		goto unlock;
3312 
3313 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3314 			        ev->key_type, pin_len, &persistent);
3315 	if (!key)
3316 		goto unlock;
3317 
3318 	/* Update connection information since adding the key will have
3319 	 * fixed up the type in the case of changed combination keys.
3320 	 */
3321 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3322 		conn_set_key(conn, key->type, key->pin_len);
3323 
3324 	mgmt_new_link_key(hdev, key, persistent);
3325 
3326 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3327 	 * is set. If it's not set simply remove the key from the kernel
3328 	 * list (we've still notified user space about it but with
3329 	 * store_hint being 0).
3330 	 */
3331 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3332 	    !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3333 		list_del_rcu(&key->list);
3334 		kfree_rcu(key, rcu);
3335 		goto unlock;
3336 	}
3337 
3338 	if (persistent)
3339 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3340 	else
3341 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3342 
3343 unlock:
3344 	hci_dev_unlock(hdev);
3345 }
3346 
3347 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3348 {
3349 	struct hci_ev_clock_offset *ev = (void *) skb->data;
3350 	struct hci_conn *conn;
3351 
3352 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3353 
3354 	hci_dev_lock(hdev);
3355 
3356 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3357 	if (conn && !ev->status) {
3358 		struct inquiry_entry *ie;
3359 
3360 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3361 		if (ie) {
3362 			ie->data.clock_offset = ev->clock_offset;
3363 			ie->timestamp = jiffies;
3364 		}
3365 	}
3366 
3367 	hci_dev_unlock(hdev);
3368 }
3369 
3370 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3371 {
3372 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3373 	struct hci_conn *conn;
3374 
3375 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3376 
3377 	hci_dev_lock(hdev);
3378 
3379 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3380 	if (conn && !ev->status)
3381 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3382 
3383 	hci_dev_unlock(hdev);
3384 }
3385 
3386 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3387 {
3388 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3389 	struct inquiry_entry *ie;
3390 
3391 	BT_DBG("%s", hdev->name);
3392 
3393 	hci_dev_lock(hdev);
3394 
3395 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3396 	if (ie) {
3397 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3398 		ie->timestamp = jiffies;
3399 	}
3400 
3401 	hci_dev_unlock(hdev);
3402 }
3403 
3404 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3405 					     struct sk_buff *skb)
3406 {
3407 	struct inquiry_data data;
3408 	int num_rsp = *((__u8 *) skb->data);
3409 
3410 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3411 
3412 	if (!num_rsp)
3413 		return;
3414 
3415 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3416 		return;
3417 
3418 	hci_dev_lock(hdev);
3419 
3420 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3421 		struct inquiry_info_with_rssi_and_pscan_mode *info;
3422 		info = (void *) (skb->data + 1);
3423 
3424 		for (; num_rsp; num_rsp--, info++) {
3425 			u32 flags;
3426 
3427 			bacpy(&data.bdaddr, &info->bdaddr);
3428 			data.pscan_rep_mode	= info->pscan_rep_mode;
3429 			data.pscan_period_mode	= info->pscan_period_mode;
3430 			data.pscan_mode		= info->pscan_mode;
3431 			memcpy(data.dev_class, info->dev_class, 3);
3432 			data.clock_offset	= info->clock_offset;
3433 			data.rssi		= info->rssi;
3434 			data.ssp_mode		= 0x00;
3435 
3436 			flags = hci_inquiry_cache_update(hdev, &data, false);
3437 
3438 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3439 					  info->dev_class, info->rssi,
3440 					  flags, NULL, 0, NULL, 0);
3441 		}
3442 	} else {
3443 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3444 
3445 		for (; num_rsp; num_rsp--, info++) {
3446 			u32 flags;
3447 
3448 			bacpy(&data.bdaddr, &info->bdaddr);
3449 			data.pscan_rep_mode	= info->pscan_rep_mode;
3450 			data.pscan_period_mode	= info->pscan_period_mode;
3451 			data.pscan_mode		= 0x00;
3452 			memcpy(data.dev_class, info->dev_class, 3);
3453 			data.clock_offset	= info->clock_offset;
3454 			data.rssi		= info->rssi;
3455 			data.ssp_mode		= 0x00;
3456 
3457 			flags = hci_inquiry_cache_update(hdev, &data, false);
3458 
3459 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3460 					  info->dev_class, info->rssi,
3461 					  flags, NULL, 0, NULL, 0);
3462 		}
3463 	}
3464 
3465 	hci_dev_unlock(hdev);
3466 }
3467 
3468 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3469 					struct sk_buff *skb)
3470 {
3471 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3472 	struct hci_conn *conn;
3473 
3474 	BT_DBG("%s", hdev->name);
3475 
3476 	hci_dev_lock(hdev);
3477 
3478 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3479 	if (!conn)
3480 		goto unlock;
3481 
3482 	if (ev->page < HCI_MAX_PAGES)
3483 		memcpy(conn->features[ev->page], ev->features, 8);
3484 
3485 	if (!ev->status && ev->page == 0x01) {
3486 		struct inquiry_entry *ie;
3487 
3488 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3489 		if (ie)
3490 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3491 
3492 		if (ev->features[0] & LMP_HOST_SSP) {
3493 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3494 		} else {
3495 			/* It is mandatory by the Bluetooth specification that
3496 			 * Extended Inquiry Results are only used when Secure
3497 			 * Simple Pairing is enabled, but some devices violate
3498 			 * this.
3499 			 *
3500 			 * To make these devices work, the internal SSP
3501 			 * enabled flag needs to be cleared if the remote host
3502 			 * features do not indicate SSP support */
3503 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3504 		}
3505 
3506 		if (ev->features[0] & LMP_HOST_SC)
3507 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3508 	}
3509 
3510 	if (conn->state != BT_CONFIG)
3511 		goto unlock;
3512 
3513 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3514 		struct hci_cp_remote_name_req cp;
3515 		memset(&cp, 0, sizeof(cp));
3516 		bacpy(&cp.bdaddr, &conn->dst);
3517 		cp.pscan_rep_mode = 0x02;
3518 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3519 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3520 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3521 
3522 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3523 		conn->state = BT_CONNECTED;
3524 		hci_proto_connect_cfm(conn, ev->status);
3525 		hci_conn_drop(conn);
3526 	}
3527 
3528 unlock:
3529 	hci_dev_unlock(hdev);
3530 }
3531 
3532 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3533 				       struct sk_buff *skb)
3534 {
3535 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3536 	struct hci_conn *conn;
3537 
3538 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3539 
3540 	hci_dev_lock(hdev);
3541 
3542 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3543 	if (!conn) {
3544 		if (ev->link_type == ESCO_LINK)
3545 			goto unlock;
3546 
3547 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3548 		if (!conn)
3549 			goto unlock;
3550 
3551 		conn->type = SCO_LINK;
3552 	}
3553 
3554 	switch (ev->status) {
3555 	case 0x00:
3556 		conn->handle = __le16_to_cpu(ev->handle);
3557 		conn->state  = BT_CONNECTED;
3558 
3559 		hci_conn_add_sysfs(conn);
3560 		break;
3561 
3562 	case 0x10:	/* Connection Accept Timeout */
3563 	case 0x0d:	/* Connection Rejected due to Limited Resources */
3564 	case 0x11:	/* Unsupported Feature or Parameter Value */
3565 	case 0x1c:	/* SCO interval rejected */
3566 	case 0x1a:	/* Unsupported Remote Feature */
3567 	case 0x1f:	/* Unspecified error */
3568 	case 0x20:	/* Unsupported LMP Parameter value */
3569 		if (conn->out) {
3570 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3571 					(hdev->esco_type & EDR_ESCO_MASK);
3572 			if (hci_setup_sync(conn, conn->link->handle))
3573 				goto unlock;
3574 		}
3575 		/* fall through */
3576 
3577 	default:
3578 		conn->state = BT_CLOSED;
3579 		break;
3580 	}
3581 
3582 	hci_proto_connect_cfm(conn, ev->status);
3583 	if (ev->status)
3584 		hci_conn_del(conn);
3585 
3586 unlock:
3587 	hci_dev_unlock(hdev);
3588 }
3589 
3590 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3591 {
3592 	size_t parsed = 0;
3593 
3594 	while (parsed < eir_len) {
3595 		u8 field_len = eir[0];
3596 
3597 		if (field_len == 0)
3598 			return parsed;
3599 
3600 		parsed += field_len + 1;
3601 		eir += field_len + 1;
3602 	}
3603 
3604 	return eir_len;
3605 }
3606 
3607 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3608 					    struct sk_buff *skb)
3609 {
3610 	struct inquiry_data data;
3611 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3612 	int num_rsp = *((__u8 *) skb->data);
3613 	size_t eir_len;
3614 
3615 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3616 
3617 	if (!num_rsp)
3618 		return;
3619 
3620 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3621 		return;
3622 
3623 	hci_dev_lock(hdev);
3624 
3625 	for (; num_rsp; num_rsp--, info++) {
3626 		u32 flags;
3627 		bool name_known;
3628 
3629 		bacpy(&data.bdaddr, &info->bdaddr);
3630 		data.pscan_rep_mode	= info->pscan_rep_mode;
3631 		data.pscan_period_mode	= info->pscan_period_mode;
3632 		data.pscan_mode		= 0x00;
3633 		memcpy(data.dev_class, info->dev_class, 3);
3634 		data.clock_offset	= info->clock_offset;
3635 		data.rssi		= info->rssi;
3636 		data.ssp_mode		= 0x01;
3637 
3638 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3639 			name_known = eir_has_data_type(info->data,
3640 						       sizeof(info->data),
3641 						       EIR_NAME_COMPLETE);
3642 		else
3643 			name_known = true;
3644 
3645 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3646 
3647 		eir_len = eir_get_length(info->data, sizeof(info->data));
3648 
3649 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3650 				  info->dev_class, info->rssi,
3651 				  flags, info->data, eir_len, NULL, 0);
3652 	}
3653 
3654 	hci_dev_unlock(hdev);
3655 }
3656 
3657 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3658 					 struct sk_buff *skb)
3659 {
3660 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3661 	struct hci_conn *conn;
3662 
3663 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3664 	       __le16_to_cpu(ev->handle));
3665 
3666 	hci_dev_lock(hdev);
3667 
3668 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3669 	if (!conn)
3670 		goto unlock;
3671 
3672 	/* For BR/EDR the necessary steps are taken through the
3673 	 * auth_complete event.
3674 	 */
3675 	if (conn->type != LE_LINK)
3676 		goto unlock;
3677 
3678 	if (!ev->status)
3679 		conn->sec_level = conn->pending_sec_level;
3680 
3681 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3682 
3683 	if (ev->status && conn->state == BT_CONNECTED) {
3684 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3685 		hci_conn_drop(conn);
3686 		goto unlock;
3687 	}
3688 
3689 	if (conn->state == BT_CONFIG) {
3690 		if (!ev->status)
3691 			conn->state = BT_CONNECTED;
3692 
3693 		hci_proto_connect_cfm(conn, ev->status);
3694 		hci_conn_drop(conn);
3695 	} else {
3696 		hci_auth_cfm(conn, ev->status);
3697 
3698 		hci_conn_hold(conn);
3699 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3700 		hci_conn_drop(conn);
3701 	}
3702 
3703 unlock:
3704 	hci_dev_unlock(hdev);
3705 }
3706 
3707 static u8 hci_get_auth_req(struct hci_conn *conn)
3708 {
3709 	/* If remote requests no-bonding follow that lead */
3710 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3711 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3712 		return conn->remote_auth | (conn->auth_type & 0x01);
3713 
3714 	/* If both remote and local have enough IO capabilities, require
3715 	 * MITM protection
3716 	 */
3717 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3718 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3719 		return conn->remote_auth | 0x01;
3720 
3721 	/* No MITM protection possible so ignore remote requirement */
3722 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3723 }
3724 
3725 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3726 {
3727 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3728 	struct hci_conn *conn;
3729 
3730 	BT_DBG("%s", hdev->name);
3731 
3732 	hci_dev_lock(hdev);
3733 
3734 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3735 	if (!conn)
3736 		goto unlock;
3737 
3738 	hci_conn_hold(conn);
3739 
3740 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3741 		goto unlock;
3742 
3743 	/* Allow pairing if we're pairable, the initiators of the
3744 	 * pairing or if the remote is not requesting bonding.
3745 	 */
3746 	if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3747 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3748 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3749 		struct hci_cp_io_capability_reply cp;
3750 
3751 		bacpy(&cp.bdaddr, &ev->bdaddr);
3752 		/* Change the IO capability from KeyboardDisplay
3753 		 * to DisplayYesNo as it is not supported by BT spec. */
3754 		cp.capability = (conn->io_capability == 0x04) ?
3755 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3756 
3757 		/* If we are initiators, there is no remote information yet */
3758 		if (conn->remote_auth == 0xff) {
3759 			/* Request MITM protection if our IO caps allow it
3760 			 * except for the no-bonding case.
3761 			 */
3762 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3763 			    conn->auth_type != HCI_AT_NO_BONDING)
3764 				conn->auth_type |= 0x01;
3765 		} else {
3766 			conn->auth_type = hci_get_auth_req(conn);
3767 		}
3768 
3769 		/* If we're not bondable, force one of the non-bondable
3770 		 * authentication requirement values.
3771 		 */
3772 		if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3773 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3774 
3775 		cp.authentication = conn->auth_type;
3776 
3777 		if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
3778 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3779 			cp.oob_data = 0x01;
3780 		else
3781 			cp.oob_data = 0x00;
3782 
3783 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3784 			     sizeof(cp), &cp);
3785 	} else {
3786 		struct hci_cp_io_capability_neg_reply cp;
3787 
3788 		bacpy(&cp.bdaddr, &ev->bdaddr);
3789 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3790 
3791 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3792 			     sizeof(cp), &cp);
3793 	}
3794 
3795 unlock:
3796 	hci_dev_unlock(hdev);
3797 }
3798 
3799 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3800 {
3801 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3802 	struct hci_conn *conn;
3803 
3804 	BT_DBG("%s", hdev->name);
3805 
3806 	hci_dev_lock(hdev);
3807 
3808 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3809 	if (!conn)
3810 		goto unlock;
3811 
3812 	conn->remote_cap = ev->capability;
3813 	conn->remote_auth = ev->authentication;
3814 	if (ev->oob_data)
3815 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3816 
3817 unlock:
3818 	hci_dev_unlock(hdev);
3819 }
3820 
3821 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3822 					 struct sk_buff *skb)
3823 {
3824 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3825 	int loc_mitm, rem_mitm, confirm_hint = 0;
3826 	struct hci_conn *conn;
3827 
3828 	BT_DBG("%s", hdev->name);
3829 
3830 	hci_dev_lock(hdev);
3831 
3832 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3833 		goto unlock;
3834 
3835 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3836 	if (!conn)
3837 		goto unlock;
3838 
3839 	loc_mitm = (conn->auth_type & 0x01);
3840 	rem_mitm = (conn->remote_auth & 0x01);
3841 
3842 	/* If we require MITM but the remote device can't provide that
3843 	 * (it has NoInputNoOutput) then reject the confirmation
3844 	 * request. We check the security level here since it doesn't
3845 	 * necessarily match conn->auth_type.
3846 	 */
3847 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3848 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3849 		BT_DBG("Rejecting request: remote device can't provide MITM");
3850 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3851 			     sizeof(ev->bdaddr), &ev->bdaddr);
3852 		goto unlock;
3853 	}
3854 
3855 	/* If no side requires MITM protection; auto-accept */
3856 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3857 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3858 
3859 		/* If we're not the initiators request authorization to
3860 		 * proceed from user space (mgmt_user_confirm with
3861 		 * confirm_hint set to 1). The exception is if neither
3862 		 * side had MITM or if the local IO capability is
3863 		 * NoInputNoOutput, in which case we do auto-accept
3864 		 */
3865 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3866 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3867 		    (loc_mitm || rem_mitm)) {
3868 			BT_DBG("Confirming auto-accept as acceptor");
3869 			confirm_hint = 1;
3870 			goto confirm;
3871 		}
3872 
3873 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3874 		       hdev->auto_accept_delay);
3875 
3876 		if (hdev->auto_accept_delay > 0) {
3877 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3878 			queue_delayed_work(conn->hdev->workqueue,
3879 					   &conn->auto_accept_work, delay);
3880 			goto unlock;
3881 		}
3882 
3883 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3884 			     sizeof(ev->bdaddr), &ev->bdaddr);
3885 		goto unlock;
3886 	}
3887 
3888 confirm:
3889 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3890 				  le32_to_cpu(ev->passkey), confirm_hint);
3891 
3892 unlock:
3893 	hci_dev_unlock(hdev);
3894 }
3895 
3896 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3897 					 struct sk_buff *skb)
3898 {
3899 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3900 
3901 	BT_DBG("%s", hdev->name);
3902 
3903 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3904 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3905 }
3906 
3907 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3908 					struct sk_buff *skb)
3909 {
3910 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3911 	struct hci_conn *conn;
3912 
3913 	BT_DBG("%s", hdev->name);
3914 
3915 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3916 	if (!conn)
3917 		return;
3918 
3919 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3920 	conn->passkey_entered = 0;
3921 
3922 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3923 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3924 					 conn->dst_type, conn->passkey_notify,
3925 					 conn->passkey_entered);
3926 }
3927 
3928 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3929 {
3930 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3931 	struct hci_conn *conn;
3932 
3933 	BT_DBG("%s", hdev->name);
3934 
3935 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3936 	if (!conn)
3937 		return;
3938 
3939 	switch (ev->type) {
3940 	case HCI_KEYPRESS_STARTED:
3941 		conn->passkey_entered = 0;
3942 		return;
3943 
3944 	case HCI_KEYPRESS_ENTERED:
3945 		conn->passkey_entered++;
3946 		break;
3947 
3948 	case HCI_KEYPRESS_ERASED:
3949 		conn->passkey_entered--;
3950 		break;
3951 
3952 	case HCI_KEYPRESS_CLEARED:
3953 		conn->passkey_entered = 0;
3954 		break;
3955 
3956 	case HCI_KEYPRESS_COMPLETED:
3957 		return;
3958 	}
3959 
3960 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3961 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3962 					 conn->dst_type, conn->passkey_notify,
3963 					 conn->passkey_entered);
3964 }
3965 
3966 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3967 					 struct sk_buff *skb)
3968 {
3969 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3970 	struct hci_conn *conn;
3971 
3972 	BT_DBG("%s", hdev->name);
3973 
3974 	hci_dev_lock(hdev);
3975 
3976 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3977 	if (!conn)
3978 		goto unlock;
3979 
3980 	/* Reset the authentication requirement to unknown */
3981 	conn->remote_auth = 0xff;
3982 
3983 	/* To avoid duplicate auth_failed events to user space we check
3984 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3985 	 * initiated the authentication. A traditional auth_complete
3986 	 * event gets always produced as initiator and is also mapped to
3987 	 * the mgmt_auth_failed event */
3988 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3989 		mgmt_auth_failed(conn, ev->status);
3990 
3991 	hci_conn_drop(conn);
3992 
3993 unlock:
3994 	hci_dev_unlock(hdev);
3995 }
3996 
3997 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3998 					 struct sk_buff *skb)
3999 {
4000 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4001 	struct inquiry_entry *ie;
4002 	struct hci_conn *conn;
4003 
4004 	BT_DBG("%s", hdev->name);
4005 
4006 	hci_dev_lock(hdev);
4007 
4008 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4009 	if (conn)
4010 		memcpy(conn->features[1], ev->features, 8);
4011 
4012 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4013 	if (ie)
4014 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4015 
4016 	hci_dev_unlock(hdev);
4017 }
4018 
4019 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4020 					    struct sk_buff *skb)
4021 {
4022 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4023 	struct oob_data *data;
4024 
4025 	BT_DBG("%s", hdev->name);
4026 
4027 	hci_dev_lock(hdev);
4028 
4029 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4030 		goto unlock;
4031 
4032 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4033 	if (data) {
4034 		if (bredr_sc_enabled(hdev)) {
4035 			struct hci_cp_remote_oob_ext_data_reply cp;
4036 
4037 			bacpy(&cp.bdaddr, &ev->bdaddr);
4038 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4039 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4040 			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4041 			memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4042 
4043 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4044 				     sizeof(cp), &cp);
4045 		} else {
4046 			struct hci_cp_remote_oob_data_reply cp;
4047 
4048 			bacpy(&cp.bdaddr, &ev->bdaddr);
4049 			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4050 			memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4051 
4052 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4053 				     sizeof(cp), &cp);
4054 		}
4055 	} else {
4056 		struct hci_cp_remote_oob_data_neg_reply cp;
4057 
4058 		bacpy(&cp.bdaddr, &ev->bdaddr);
4059 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4060 			     sizeof(cp), &cp);
4061 	}
4062 
4063 unlock:
4064 	hci_dev_unlock(hdev);
4065 }
4066 
4067 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4068 				      struct sk_buff *skb)
4069 {
4070 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4071 	struct hci_conn *hcon, *bredr_hcon;
4072 
4073 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4074 	       ev->status);
4075 
4076 	hci_dev_lock(hdev);
4077 
4078 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4079 	if (!hcon) {
4080 		hci_dev_unlock(hdev);
4081 		return;
4082 	}
4083 
4084 	if (ev->status) {
4085 		hci_conn_del(hcon);
4086 		hci_dev_unlock(hdev);
4087 		return;
4088 	}
4089 
4090 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4091 
4092 	hcon->state = BT_CONNECTED;
4093 	bacpy(&hcon->dst, &bredr_hcon->dst);
4094 
4095 	hci_conn_hold(hcon);
4096 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4097 	hci_conn_drop(hcon);
4098 
4099 	hci_conn_add_sysfs(hcon);
4100 
4101 	amp_physical_cfm(bredr_hcon, hcon);
4102 
4103 	hci_dev_unlock(hdev);
4104 }
4105 
4106 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4107 {
4108 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4109 	struct hci_conn *hcon;
4110 	struct hci_chan *hchan;
4111 	struct amp_mgr *mgr;
4112 
4113 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4114 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4115 	       ev->status);
4116 
4117 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4118 	if (!hcon)
4119 		return;
4120 
4121 	/* Create AMP hchan */
4122 	hchan = hci_chan_create(hcon);
4123 	if (!hchan)
4124 		return;
4125 
4126 	hchan->handle = le16_to_cpu(ev->handle);
4127 
4128 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4129 
4130 	mgr = hcon->amp_mgr;
4131 	if (mgr && mgr->bredr_chan) {
4132 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4133 
4134 		l2cap_chan_lock(bredr_chan);
4135 
4136 		bredr_chan->conn->mtu = hdev->block_mtu;
4137 		l2cap_logical_cfm(bredr_chan, hchan, 0);
4138 		hci_conn_hold(hcon);
4139 
4140 		l2cap_chan_unlock(bredr_chan);
4141 	}
4142 }
4143 
4144 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4145 					     struct sk_buff *skb)
4146 {
4147 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4148 	struct hci_chan *hchan;
4149 
4150 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4151 	       le16_to_cpu(ev->handle), ev->status);
4152 
4153 	if (ev->status)
4154 		return;
4155 
4156 	hci_dev_lock(hdev);
4157 
4158 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4159 	if (!hchan)
4160 		goto unlock;
4161 
4162 	amp_destroy_logical_link(hchan, ev->reason);
4163 
4164 unlock:
4165 	hci_dev_unlock(hdev);
4166 }
4167 
4168 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4169 					     struct sk_buff *skb)
4170 {
4171 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4172 	struct hci_conn *hcon;
4173 
4174 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4175 
4176 	if (ev->status)
4177 		return;
4178 
4179 	hci_dev_lock(hdev);
4180 
4181 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4182 	if (hcon) {
4183 		hcon->state = BT_CLOSED;
4184 		hci_conn_del(hcon);
4185 	}
4186 
4187 	hci_dev_unlock(hdev);
4188 }
4189 
4190 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4191 {
4192 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4193 	struct hci_conn_params *params;
4194 	struct hci_conn *conn;
4195 	struct smp_irk *irk;
4196 	u8 addr_type;
4197 
4198 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4199 
4200 	hci_dev_lock(hdev);
4201 
4202 	/* All controllers implicitly stop advertising in the event of a
4203 	 * connection, so ensure that the state bit is cleared.
4204 	 */
4205 	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4206 
4207 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4208 	if (!conn) {
4209 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4210 		if (!conn) {
4211 			BT_ERR("No memory for new connection");
4212 			goto unlock;
4213 		}
4214 
4215 		conn->dst_type = ev->bdaddr_type;
4216 
4217 		/* If we didn't have a hci_conn object previously
4218 		 * but we're in master role this must be something
4219 		 * initiated using a white list. Since white list based
4220 		 * connections are not "first class citizens" we don't
4221 		 * have full tracking of them. Therefore, we go ahead
4222 		 * with a "best effort" approach of determining the
4223 		 * initiator address based on the HCI_PRIVACY flag.
4224 		 */
4225 		if (conn->out) {
4226 			conn->resp_addr_type = ev->bdaddr_type;
4227 			bacpy(&conn->resp_addr, &ev->bdaddr);
4228 			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4229 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4230 				bacpy(&conn->init_addr, &hdev->rpa);
4231 			} else {
4232 				hci_copy_identity_address(hdev,
4233 							  &conn->init_addr,
4234 							  &conn->init_addr_type);
4235 			}
4236 		}
4237 	} else {
4238 		cancel_delayed_work(&conn->le_conn_timeout);
4239 	}
4240 
4241 	if (!conn->out) {
4242 		/* Set the responder (our side) address type based on
4243 		 * the advertising address type.
4244 		 */
4245 		conn->resp_addr_type = hdev->adv_addr_type;
4246 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4247 			bacpy(&conn->resp_addr, &hdev->random_addr);
4248 		else
4249 			bacpy(&conn->resp_addr, &hdev->bdaddr);
4250 
4251 		conn->init_addr_type = ev->bdaddr_type;
4252 		bacpy(&conn->init_addr, &ev->bdaddr);
4253 
4254 		/* For incoming connections, set the default minimum
4255 		 * and maximum connection interval. They will be used
4256 		 * to check if the parameters are in range and if not
4257 		 * trigger the connection update procedure.
4258 		 */
4259 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4260 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4261 	}
4262 
4263 	/* Lookup the identity address from the stored connection
4264 	 * address and address type.
4265 	 *
4266 	 * When establishing connections to an identity address, the
4267 	 * connection procedure will store the resolvable random
4268 	 * address first. Now if it can be converted back into the
4269 	 * identity address, start using the identity address from
4270 	 * now on.
4271 	 */
4272 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4273 	if (irk) {
4274 		bacpy(&conn->dst, &irk->bdaddr);
4275 		conn->dst_type = irk->addr_type;
4276 	}
4277 
4278 	if (ev->status) {
4279 		hci_le_conn_failed(conn, ev->status);
4280 		goto unlock;
4281 	}
4282 
4283 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4284 		addr_type = BDADDR_LE_PUBLIC;
4285 	else
4286 		addr_type = BDADDR_LE_RANDOM;
4287 
4288 	/* Drop the connection if the device is blocked */
4289 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4290 		hci_conn_drop(conn);
4291 		goto unlock;
4292 	}
4293 
4294 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4295 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4296 
4297 	conn->sec_level = BT_SECURITY_LOW;
4298 	conn->handle = __le16_to_cpu(ev->handle);
4299 	conn->state = BT_CONNECTED;
4300 
4301 	conn->le_conn_interval = le16_to_cpu(ev->interval);
4302 	conn->le_conn_latency = le16_to_cpu(ev->latency);
4303 	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4304 
4305 	hci_conn_add_sysfs(conn);
4306 
4307 	hci_proto_connect_cfm(conn, ev->status);
4308 
4309 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4310 					   conn->dst_type);
4311 	if (params) {
4312 		list_del_init(&params->action);
4313 		if (params->conn) {
4314 			hci_conn_drop(params->conn);
4315 			hci_conn_put(params->conn);
4316 			params->conn = NULL;
4317 		}
4318 	}
4319 
4320 unlock:
4321 	hci_update_background_scan(hdev);
4322 	hci_dev_unlock(hdev);
4323 }
4324 
4325 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4326 					    struct sk_buff *skb)
4327 {
4328 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4329 	struct hci_conn *conn;
4330 
4331 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4332 
4333 	if (ev->status)
4334 		return;
4335 
4336 	hci_dev_lock(hdev);
4337 
4338 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4339 	if (conn) {
4340 		conn->le_conn_interval = le16_to_cpu(ev->interval);
4341 		conn->le_conn_latency = le16_to_cpu(ev->latency);
4342 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4343 	}
4344 
4345 	hci_dev_unlock(hdev);
4346 }
4347 
4348 /* This function requires the caller holds hdev->lock */
4349 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4350 					      bdaddr_t *addr,
4351 					      u8 addr_type, u8 adv_type)
4352 {
4353 	struct hci_conn *conn;
4354 	struct hci_conn_params *params;
4355 
4356 	/* If the event is not connectable don't proceed further */
4357 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4358 		return NULL;
4359 
4360 	/* Ignore if the device is blocked */
4361 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4362 		return NULL;
4363 
4364 	/* Most controller will fail if we try to create new connections
4365 	 * while we have an existing one in slave role.
4366 	 */
4367 	if (hdev->conn_hash.le_num_slave > 0)
4368 		return NULL;
4369 
4370 	/* If we're not connectable only connect devices that we have in
4371 	 * our pend_le_conns list.
4372 	 */
4373 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4374 					   addr, addr_type);
4375 	if (!params)
4376 		return NULL;
4377 
4378 	switch (params->auto_connect) {
4379 	case HCI_AUTO_CONN_DIRECT:
4380 		/* Only devices advertising with ADV_DIRECT_IND are
4381 		 * triggering a connection attempt. This is allowing
4382 		 * incoming connections from slave devices.
4383 		 */
4384 		if (adv_type != LE_ADV_DIRECT_IND)
4385 			return NULL;
4386 		break;
4387 	case HCI_AUTO_CONN_ALWAYS:
4388 		/* Devices advertising with ADV_IND or ADV_DIRECT_IND
4389 		 * are triggering a connection attempt. This means
4390 		 * that incoming connectioms from slave device are
4391 		 * accepted and also outgoing connections to slave
4392 		 * devices are established when found.
4393 		 */
4394 		break;
4395 	default:
4396 		return NULL;
4397 	}
4398 
4399 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4400 			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4401 	if (!IS_ERR(conn)) {
4402 		/* Store the pointer since we don't really have any
4403 		 * other owner of the object besides the params that
4404 		 * triggered it. This way we can abort the connection if
4405 		 * the parameters get removed and keep the reference
4406 		 * count consistent once the connection is established.
4407 		 */
4408 		params->conn = hci_conn_get(conn);
4409 		return conn;
4410 	}
4411 
4412 	switch (PTR_ERR(conn)) {
4413 	case -EBUSY:
4414 		/* If hci_connect() returns -EBUSY it means there is already
4415 		 * an LE connection attempt going on. Since controllers don't
4416 		 * support more than one connection attempt at the time, we
4417 		 * don't consider this an error case.
4418 		 */
4419 		break;
4420 	default:
4421 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4422 		return NULL;
4423 	}
4424 
4425 	return NULL;
4426 }
4427 
4428 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4429 			       u8 bdaddr_type, bdaddr_t *direct_addr,
4430 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4431 {
4432 	struct discovery_state *d = &hdev->discovery;
4433 	struct smp_irk *irk;
4434 	struct hci_conn *conn;
4435 	bool match;
4436 	u32 flags;
4437 
4438 	/* If the direct address is present, then this report is from
4439 	 * a LE Direct Advertising Report event. In that case it is
4440 	 * important to see if the address is matching the local
4441 	 * controller address.
4442 	 */
4443 	if (direct_addr) {
4444 		/* Only resolvable random addresses are valid for these
4445 		 * kind of reports and others can be ignored.
4446 		 */
4447 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4448 			return;
4449 
4450 		/* If the controller is not using resolvable random
4451 		 * addresses, then this report can be ignored.
4452 		 */
4453 		if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
4454 			return;
4455 
4456 		/* If the local IRK of the controller does not match
4457 		 * with the resolvable random address provided, then
4458 		 * this report can be ignored.
4459 		 */
4460 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4461 			return;
4462 	}
4463 
4464 	/* Check if we need to convert to identity address */
4465 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4466 	if (irk) {
4467 		bdaddr = &irk->bdaddr;
4468 		bdaddr_type = irk->addr_type;
4469 	}
4470 
4471 	/* Check if we have been requested to connect to this device */
4472 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4473 	if (conn && type == LE_ADV_IND) {
4474 		/* Store report for later inclusion by
4475 		 * mgmt_device_connected
4476 		 */
4477 		memcpy(conn->le_adv_data, data, len);
4478 		conn->le_adv_data_len = len;
4479 	}
4480 
4481 	/* Passive scanning shouldn't trigger any device found events,
4482 	 * except for devices marked as CONN_REPORT for which we do send
4483 	 * device found events.
4484 	 */
4485 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4486 		if (type == LE_ADV_DIRECT_IND)
4487 			return;
4488 
4489 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4490 					       bdaddr, bdaddr_type))
4491 			return;
4492 
4493 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4494 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4495 		else
4496 			flags = 0;
4497 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4498 				  rssi, flags, data, len, NULL, 0);
4499 		return;
4500 	}
4501 
4502 	/* When receiving non-connectable or scannable undirected
4503 	 * advertising reports, this means that the remote device is
4504 	 * not connectable and then clearly indicate this in the
4505 	 * device found event.
4506 	 *
4507 	 * When receiving a scan response, then there is no way to
4508 	 * know if the remote device is connectable or not. However
4509 	 * since scan responses are merged with a previously seen
4510 	 * advertising report, the flags field from that report
4511 	 * will be used.
4512 	 *
4513 	 * In the really unlikely case that a controller get confused
4514 	 * and just sends a scan response event, then it is marked as
4515 	 * not connectable as well.
4516 	 */
4517 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4518 	    type == LE_ADV_SCAN_RSP)
4519 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4520 	else
4521 		flags = 0;
4522 
4523 	/* If there's nothing pending either store the data from this
4524 	 * event or send an immediate device found event if the data
4525 	 * should not be stored for later.
4526 	 */
4527 	if (!has_pending_adv_report(hdev)) {
4528 		/* If the report will trigger a SCAN_REQ store it for
4529 		 * later merging.
4530 		 */
4531 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4532 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4533 						 rssi, flags, data, len);
4534 			return;
4535 		}
4536 
4537 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4538 				  rssi, flags, data, len, NULL, 0);
4539 		return;
4540 	}
4541 
4542 	/* Check if the pending report is for the same device as the new one */
4543 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4544 		 bdaddr_type == d->last_adv_addr_type);
4545 
4546 	/* If the pending data doesn't match this report or this isn't a
4547 	 * scan response (e.g. we got a duplicate ADV_IND) then force
4548 	 * sending of the pending data.
4549 	 */
4550 	if (type != LE_ADV_SCAN_RSP || !match) {
4551 		/* Send out whatever is in the cache, but skip duplicates */
4552 		if (!match)
4553 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4554 					  d->last_adv_addr_type, NULL,
4555 					  d->last_adv_rssi, d->last_adv_flags,
4556 					  d->last_adv_data,
4557 					  d->last_adv_data_len, NULL, 0);
4558 
4559 		/* If the new report will trigger a SCAN_REQ store it for
4560 		 * later merging.
4561 		 */
4562 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4563 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4564 						 rssi, flags, data, len);
4565 			return;
4566 		}
4567 
4568 		/* The advertising reports cannot be merged, so clear
4569 		 * the pending report and send out a device found event.
4570 		 */
4571 		clear_pending_adv_report(hdev);
4572 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4573 				  rssi, flags, data, len, NULL, 0);
4574 		return;
4575 	}
4576 
4577 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4578 	 * the new event is a SCAN_RSP. We can therefore proceed with
4579 	 * sending a merged device found event.
4580 	 */
4581 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4582 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4583 			  d->last_adv_data, d->last_adv_data_len, data, len);
4584 	clear_pending_adv_report(hdev);
4585 }
4586 
4587 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4588 {
4589 	u8 num_reports = skb->data[0];
4590 	void *ptr = &skb->data[1];
4591 
4592 	hci_dev_lock(hdev);
4593 
4594 	while (num_reports--) {
4595 		struct hci_ev_le_advertising_info *ev = ptr;
4596 		s8 rssi;
4597 
4598 		rssi = ev->data[ev->length];
4599 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4600 				   ev->bdaddr_type, NULL, 0, rssi,
4601 				   ev->data, ev->length);
4602 
4603 		ptr += sizeof(*ev) + ev->length + 1;
4604 	}
4605 
4606 	hci_dev_unlock(hdev);
4607 }
4608 
4609 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4610 {
4611 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4612 	struct hci_cp_le_ltk_reply cp;
4613 	struct hci_cp_le_ltk_neg_reply neg;
4614 	struct hci_conn *conn;
4615 	struct smp_ltk *ltk;
4616 
4617 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4618 
4619 	hci_dev_lock(hdev);
4620 
4621 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4622 	if (conn == NULL)
4623 		goto not_found;
4624 
4625 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4626 	if (!ltk)
4627 		goto not_found;
4628 
4629 	if (smp_ltk_is_sc(ltk)) {
4630 		/* With SC both EDiv and Rand are set to zero */
4631 		if (ev->ediv || ev->rand)
4632 			goto not_found;
4633 	} else {
4634 		/* For non-SC keys check that EDiv and Rand match */
4635 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4636 			goto not_found;
4637 	}
4638 
4639 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4640 	cp.handle = cpu_to_le16(conn->handle);
4641 
4642 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
4643 
4644 	conn->enc_key_size = ltk->enc_size;
4645 
4646 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4647 
4648 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4649 	 * temporary key used to encrypt a connection following
4650 	 * pairing. It is used during the Encrypted Session Setup to
4651 	 * distribute the keys. Later, security can be re-established
4652 	 * using a distributed LTK.
4653 	 */
4654 	if (ltk->type == SMP_STK) {
4655 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4656 		list_del_rcu(&ltk->list);
4657 		kfree_rcu(ltk, rcu);
4658 	} else {
4659 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4660 	}
4661 
4662 	hci_dev_unlock(hdev);
4663 
4664 	return;
4665 
4666 not_found:
4667 	neg.handle = ev->handle;
4668 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4669 	hci_dev_unlock(hdev);
4670 }
4671 
4672 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4673 				      u8 reason)
4674 {
4675 	struct hci_cp_le_conn_param_req_neg_reply cp;
4676 
4677 	cp.handle = cpu_to_le16(handle);
4678 	cp.reason = reason;
4679 
4680 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4681 		     &cp);
4682 }
4683 
4684 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4685 					     struct sk_buff *skb)
4686 {
4687 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4688 	struct hci_cp_le_conn_param_req_reply cp;
4689 	struct hci_conn *hcon;
4690 	u16 handle, min, max, latency, timeout;
4691 
4692 	handle = le16_to_cpu(ev->handle);
4693 	min = le16_to_cpu(ev->interval_min);
4694 	max = le16_to_cpu(ev->interval_max);
4695 	latency = le16_to_cpu(ev->latency);
4696 	timeout = le16_to_cpu(ev->timeout);
4697 
4698 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
4699 	if (!hcon || hcon->state != BT_CONNECTED)
4700 		return send_conn_param_neg_reply(hdev, handle,
4701 						 HCI_ERROR_UNKNOWN_CONN_ID);
4702 
4703 	if (hci_check_conn_params(min, max, latency, timeout))
4704 		return send_conn_param_neg_reply(hdev, handle,
4705 						 HCI_ERROR_INVALID_LL_PARAMS);
4706 
4707 	if (hcon->role == HCI_ROLE_MASTER) {
4708 		struct hci_conn_params *params;
4709 		u8 store_hint;
4710 
4711 		hci_dev_lock(hdev);
4712 
4713 		params = hci_conn_params_lookup(hdev, &hcon->dst,
4714 						hcon->dst_type);
4715 		if (params) {
4716 			params->conn_min_interval = min;
4717 			params->conn_max_interval = max;
4718 			params->conn_latency = latency;
4719 			params->supervision_timeout = timeout;
4720 			store_hint = 0x01;
4721 		} else{
4722 			store_hint = 0x00;
4723 		}
4724 
4725 		hci_dev_unlock(hdev);
4726 
4727 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4728 				    store_hint, min, max, latency, timeout);
4729 	}
4730 
4731 	cp.handle = ev->handle;
4732 	cp.interval_min = ev->interval_min;
4733 	cp.interval_max = ev->interval_max;
4734 	cp.latency = ev->latency;
4735 	cp.timeout = ev->timeout;
4736 	cp.min_ce_len = 0;
4737 	cp.max_ce_len = 0;
4738 
4739 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4740 }
4741 
4742 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4743 					 struct sk_buff *skb)
4744 {
4745 	u8 num_reports = skb->data[0];
4746 	void *ptr = &skb->data[1];
4747 
4748 	hci_dev_lock(hdev);
4749 
4750 	while (num_reports--) {
4751 		struct hci_ev_le_direct_adv_info *ev = ptr;
4752 
4753 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4754 				   ev->bdaddr_type, &ev->direct_addr,
4755 				   ev->direct_addr_type, ev->rssi, NULL, 0);
4756 
4757 		ptr += sizeof(*ev);
4758 	}
4759 
4760 	hci_dev_unlock(hdev);
4761 }
4762 
4763 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4764 {
4765 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4766 
4767 	skb_pull(skb, sizeof(*le_ev));
4768 
4769 	switch (le_ev->subevent) {
4770 	case HCI_EV_LE_CONN_COMPLETE:
4771 		hci_le_conn_complete_evt(hdev, skb);
4772 		break;
4773 
4774 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4775 		hci_le_conn_update_complete_evt(hdev, skb);
4776 		break;
4777 
4778 	case HCI_EV_LE_ADVERTISING_REPORT:
4779 		hci_le_adv_report_evt(hdev, skb);
4780 		break;
4781 
4782 	case HCI_EV_LE_LTK_REQ:
4783 		hci_le_ltk_request_evt(hdev, skb);
4784 		break;
4785 
4786 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4787 		hci_le_remote_conn_param_req_evt(hdev, skb);
4788 		break;
4789 
4790 	case HCI_EV_LE_DIRECT_ADV_REPORT:
4791 		hci_le_direct_adv_report_evt(hdev, skb);
4792 		break;
4793 
4794 	default:
4795 		break;
4796 	}
4797 }
4798 
4799 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4800 {
4801 	struct hci_ev_channel_selected *ev = (void *) skb->data;
4802 	struct hci_conn *hcon;
4803 
4804 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4805 
4806 	skb_pull(skb, sizeof(*ev));
4807 
4808 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4809 	if (!hcon)
4810 		return;
4811 
4812 	amp_read_loc_assoc_final_data(hdev, hcon);
4813 }
4814 
4815 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4816 {
4817 	struct hci_event_hdr *hdr = (void *) skb->data;
4818 	__u8 event = hdr->evt;
4819 
4820 	hci_dev_lock(hdev);
4821 
4822 	/* Received events are (currently) only needed when a request is
4823 	 * ongoing so avoid unnecessary memory allocation.
4824 	 */
4825 	if (hci_req_pending(hdev)) {
4826 		kfree_skb(hdev->recv_evt);
4827 		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4828 	}
4829 
4830 	hci_dev_unlock(hdev);
4831 
4832 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4833 
4834 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4835 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4836 		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4837 
4838 		hci_req_cmd_complete(hdev, opcode, 0);
4839 	}
4840 
4841 	switch (event) {
4842 	case HCI_EV_INQUIRY_COMPLETE:
4843 		hci_inquiry_complete_evt(hdev, skb);
4844 		break;
4845 
4846 	case HCI_EV_INQUIRY_RESULT:
4847 		hci_inquiry_result_evt(hdev, skb);
4848 		break;
4849 
4850 	case HCI_EV_CONN_COMPLETE:
4851 		hci_conn_complete_evt(hdev, skb);
4852 		break;
4853 
4854 	case HCI_EV_CONN_REQUEST:
4855 		hci_conn_request_evt(hdev, skb);
4856 		break;
4857 
4858 	case HCI_EV_DISCONN_COMPLETE:
4859 		hci_disconn_complete_evt(hdev, skb);
4860 		break;
4861 
4862 	case HCI_EV_AUTH_COMPLETE:
4863 		hci_auth_complete_evt(hdev, skb);
4864 		break;
4865 
4866 	case HCI_EV_REMOTE_NAME:
4867 		hci_remote_name_evt(hdev, skb);
4868 		break;
4869 
4870 	case HCI_EV_ENCRYPT_CHANGE:
4871 		hci_encrypt_change_evt(hdev, skb);
4872 		break;
4873 
4874 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4875 		hci_change_link_key_complete_evt(hdev, skb);
4876 		break;
4877 
4878 	case HCI_EV_REMOTE_FEATURES:
4879 		hci_remote_features_evt(hdev, skb);
4880 		break;
4881 
4882 	case HCI_EV_CMD_COMPLETE:
4883 		hci_cmd_complete_evt(hdev, skb);
4884 		break;
4885 
4886 	case HCI_EV_CMD_STATUS:
4887 		hci_cmd_status_evt(hdev, skb);
4888 		break;
4889 
4890 	case HCI_EV_HARDWARE_ERROR:
4891 		hci_hardware_error_evt(hdev, skb);
4892 		break;
4893 
4894 	case HCI_EV_ROLE_CHANGE:
4895 		hci_role_change_evt(hdev, skb);
4896 		break;
4897 
4898 	case HCI_EV_NUM_COMP_PKTS:
4899 		hci_num_comp_pkts_evt(hdev, skb);
4900 		break;
4901 
4902 	case HCI_EV_MODE_CHANGE:
4903 		hci_mode_change_evt(hdev, skb);
4904 		break;
4905 
4906 	case HCI_EV_PIN_CODE_REQ:
4907 		hci_pin_code_request_evt(hdev, skb);
4908 		break;
4909 
4910 	case HCI_EV_LINK_KEY_REQ:
4911 		hci_link_key_request_evt(hdev, skb);
4912 		break;
4913 
4914 	case HCI_EV_LINK_KEY_NOTIFY:
4915 		hci_link_key_notify_evt(hdev, skb);
4916 		break;
4917 
4918 	case HCI_EV_CLOCK_OFFSET:
4919 		hci_clock_offset_evt(hdev, skb);
4920 		break;
4921 
4922 	case HCI_EV_PKT_TYPE_CHANGE:
4923 		hci_pkt_type_change_evt(hdev, skb);
4924 		break;
4925 
4926 	case HCI_EV_PSCAN_REP_MODE:
4927 		hci_pscan_rep_mode_evt(hdev, skb);
4928 		break;
4929 
4930 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4931 		hci_inquiry_result_with_rssi_evt(hdev, skb);
4932 		break;
4933 
4934 	case HCI_EV_REMOTE_EXT_FEATURES:
4935 		hci_remote_ext_features_evt(hdev, skb);
4936 		break;
4937 
4938 	case HCI_EV_SYNC_CONN_COMPLETE:
4939 		hci_sync_conn_complete_evt(hdev, skb);
4940 		break;
4941 
4942 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4943 		hci_extended_inquiry_result_evt(hdev, skb);
4944 		break;
4945 
4946 	case HCI_EV_KEY_REFRESH_COMPLETE:
4947 		hci_key_refresh_complete_evt(hdev, skb);
4948 		break;
4949 
4950 	case HCI_EV_IO_CAPA_REQUEST:
4951 		hci_io_capa_request_evt(hdev, skb);
4952 		break;
4953 
4954 	case HCI_EV_IO_CAPA_REPLY:
4955 		hci_io_capa_reply_evt(hdev, skb);
4956 		break;
4957 
4958 	case HCI_EV_USER_CONFIRM_REQUEST:
4959 		hci_user_confirm_request_evt(hdev, skb);
4960 		break;
4961 
4962 	case HCI_EV_USER_PASSKEY_REQUEST:
4963 		hci_user_passkey_request_evt(hdev, skb);
4964 		break;
4965 
4966 	case HCI_EV_USER_PASSKEY_NOTIFY:
4967 		hci_user_passkey_notify_evt(hdev, skb);
4968 		break;
4969 
4970 	case HCI_EV_KEYPRESS_NOTIFY:
4971 		hci_keypress_notify_evt(hdev, skb);
4972 		break;
4973 
4974 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4975 		hci_simple_pair_complete_evt(hdev, skb);
4976 		break;
4977 
4978 	case HCI_EV_REMOTE_HOST_FEATURES:
4979 		hci_remote_host_features_evt(hdev, skb);
4980 		break;
4981 
4982 	case HCI_EV_LE_META:
4983 		hci_le_meta_evt(hdev, skb);
4984 		break;
4985 
4986 	case HCI_EV_CHANNEL_SELECTED:
4987 		hci_chan_selected_evt(hdev, skb);
4988 		break;
4989 
4990 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4991 		hci_remote_oob_data_request_evt(hdev, skb);
4992 		break;
4993 
4994 	case HCI_EV_PHY_LINK_COMPLETE:
4995 		hci_phy_link_complete_evt(hdev, skb);
4996 		break;
4997 
4998 	case HCI_EV_LOGICAL_LINK_COMPLETE:
4999 		hci_loglink_complete_evt(hdev, skb);
5000 		break;
5001 
5002 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5003 		hci_disconn_loglink_complete_evt(hdev, skb);
5004 		break;
5005 
5006 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5007 		hci_disconn_phylink_complete_evt(hdev, skb);
5008 		break;
5009 
5010 	case HCI_EV_NUM_COMP_BLOCKS:
5011 		hci_num_comp_blocks_evt(hdev, skb);
5012 		break;
5013 
5014 	default:
5015 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
5016 		break;
5017 	}
5018 
5019 	kfree_skb(skb);
5020 	hdev->stat.evt_rx++;
5021 }
5022