xref: /linux/net/bluetooth/hci_event.c (revision 6084a6e23c971ef703229ee1aec68d01688578d6)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "a2mp.h"
34 #include "amp.h"
35 
36 /* Handle HCI Event packets */
37 
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 	__u8 status = *((__u8 *) skb->data);
41 
42 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
43 
44 	if (status)
45 		return;
46 
47 	clear_bit(HCI_INQUIRY, &hdev->flags);
48 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
49 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 
51 	hci_conn_check_pending(hdev);
52 }
53 
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55 {
56 	__u8 status = *((__u8 *) skb->data);
57 
58 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
59 
60 	if (status)
61 		return;
62 
63 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64 }
65 
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 	__u8 status = *((__u8 *) skb->data);
69 
70 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
71 
72 	if (status)
73 		return;
74 
75 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76 
77 	hci_conn_check_pending(hdev);
78 }
79 
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81 					  struct sk_buff *skb)
82 {
83 	BT_DBG("%s", hdev->name);
84 }
85 
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87 {
88 	struct hci_rp_role_discovery *rp = (void *) skb->data;
89 	struct hci_conn *conn;
90 
91 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92 
93 	if (rp->status)
94 		return;
95 
96 	hci_dev_lock(hdev);
97 
98 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99 	if (conn) {
100 		if (rp->role)
101 			conn->link_mode &= ~HCI_LM_MASTER;
102 		else
103 			conn->link_mode |= HCI_LM_MASTER;
104 	}
105 
106 	hci_dev_unlock(hdev);
107 }
108 
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110 {
111 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 	struct hci_conn *conn;
113 
114 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115 
116 	if (rp->status)
117 		return;
118 
119 	hci_dev_lock(hdev);
120 
121 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122 	if (conn)
123 		conn->link_policy = __le16_to_cpu(rp->policy);
124 
125 	hci_dev_unlock(hdev);
126 }
127 
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129 {
130 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 	struct hci_conn *conn;
132 	void *sent;
133 
134 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135 
136 	if (rp->status)
137 		return;
138 
139 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140 	if (!sent)
141 		return;
142 
143 	hci_dev_lock(hdev);
144 
145 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146 	if (conn)
147 		conn->link_policy = get_unaligned_le16(sent + 2);
148 
149 	hci_dev_unlock(hdev);
150 }
151 
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153 					struct sk_buff *skb)
154 {
155 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156 
157 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158 
159 	if (rp->status)
160 		return;
161 
162 	hdev->link_policy = __le16_to_cpu(rp->policy);
163 }
164 
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166 					 struct sk_buff *skb)
167 {
168 	__u8 status = *((__u8 *) skb->data);
169 	void *sent;
170 
171 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
172 
173 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174 	if (!sent)
175 		return;
176 
177 	if (!status)
178 		hdev->link_policy = get_unaligned_le16(sent);
179 }
180 
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182 {
183 	__u8 status = *((__u8 *) skb->data);
184 
185 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
186 
187 	clear_bit(HCI_RESET, &hdev->flags);
188 
189 	/* Reset all non-persistent flags */
190 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191 
192 	hdev->discovery.state = DISCOVERY_STOPPED;
193 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195 
196 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 	hdev->adv_data_len = 0;
198 
199 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 	hdev->scan_rsp_data_len = 0;
201 
202 	hdev->le_scan_type = LE_SCAN_PASSIVE;
203 
204 	hdev->ssp_debug_mode = 0;
205 }
206 
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 	__u8 status = *((__u8 *) skb->data);
210 	void *sent;
211 
212 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
213 
214 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 	if (!sent)
216 		return;
217 
218 	hci_dev_lock(hdev);
219 
220 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 		mgmt_set_local_name_complete(hdev, sent, status);
222 	else if (!status)
223 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224 
225 	hci_dev_unlock(hdev);
226 }
227 
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 	struct hci_rp_read_local_name *rp = (void *) skb->data;
231 
232 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233 
234 	if (rp->status)
235 		return;
236 
237 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240 
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 	__u8 status = *((__u8 *) skb->data);
244 	void *sent;
245 
246 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
247 
248 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 	if (!sent)
250 		return;
251 
252 	if (!status) {
253 		__u8 param = *((__u8 *) sent);
254 
255 		if (param == AUTH_ENABLED)
256 			set_bit(HCI_AUTH, &hdev->flags);
257 		else
258 			clear_bit(HCI_AUTH, &hdev->flags);
259 	}
260 
261 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 		mgmt_auth_enable_complete(hdev, status);
263 }
264 
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 	__u8 status = *((__u8 *) skb->data);
268 	void *sent;
269 
270 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
271 
272 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 	if (!sent)
274 		return;
275 
276 	if (!status) {
277 		__u8 param = *((__u8 *) sent);
278 
279 		if (param)
280 			set_bit(HCI_ENCRYPT, &hdev->flags);
281 		else
282 			clear_bit(HCI_ENCRYPT, &hdev->flags);
283 	}
284 }
285 
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 	__u8 param, status = *((__u8 *) skb->data);
289 	int old_pscan, old_iscan;
290 	void *sent;
291 
292 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
293 
294 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 	if (!sent)
296 		return;
297 
298 	param = *((__u8 *) sent);
299 
300 	hci_dev_lock(hdev);
301 
302 	if (status) {
303 		mgmt_write_scan_failed(hdev, param, status);
304 		hdev->discov_timeout = 0;
305 		goto done;
306 	}
307 
308 	/* We need to ensure that we set this back on if someone changed
309 	 * the scan mode through a raw HCI socket.
310 	 */
311 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
312 
313 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
314 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
315 
316 	if (param & SCAN_INQUIRY) {
317 		set_bit(HCI_ISCAN, &hdev->flags);
318 		if (!old_iscan)
319 			mgmt_discoverable(hdev, 1);
320 	} else if (old_iscan)
321 		mgmt_discoverable(hdev, 0);
322 
323 	if (param & SCAN_PAGE) {
324 		set_bit(HCI_PSCAN, &hdev->flags);
325 		if (!old_pscan)
326 			mgmt_connectable(hdev, 1);
327 	} else if (old_pscan)
328 		mgmt_connectable(hdev, 0);
329 
330 done:
331 	hci_dev_unlock(hdev);
332 }
333 
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337 
338 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339 
340 	if (rp->status)
341 		return;
342 
343 	memcpy(hdev->dev_class, rp->dev_class, 3);
344 
345 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348 
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 	__u8 status = *((__u8 *) skb->data);
352 	void *sent;
353 
354 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 
356 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 	if (!sent)
358 		return;
359 
360 	hci_dev_lock(hdev);
361 
362 	if (status == 0)
363 		memcpy(hdev->dev_class, sent, 3);
364 
365 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 		mgmt_set_class_of_dev_complete(hdev, sent, status);
367 
368 	hci_dev_unlock(hdev);
369 }
370 
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 	__u16 setting;
375 
376 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377 
378 	if (rp->status)
379 		return;
380 
381 	setting = __le16_to_cpu(rp->voice_setting);
382 
383 	if (hdev->voice_setting == setting)
384 		return;
385 
386 	hdev->voice_setting = setting;
387 
388 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389 
390 	if (hdev->notify)
391 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393 
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 				       struct sk_buff *skb)
396 {
397 	__u8 status = *((__u8 *) skb->data);
398 	__u16 setting;
399 	void *sent;
400 
401 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
402 
403 	if (status)
404 		return;
405 
406 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 	if (!sent)
408 		return;
409 
410 	setting = get_unaligned_le16(sent);
411 
412 	if (hdev->voice_setting == setting)
413 		return;
414 
415 	hdev->voice_setting = setting;
416 
417 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418 
419 	if (hdev->notify)
420 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422 
423 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
424 					  struct sk_buff *skb)
425 {
426 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
427 
428 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429 
430 	if (rp->status)
431 		return;
432 
433 	hdev->num_iac = rp->num_iac;
434 
435 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
436 }
437 
438 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
439 {
440 	__u8 status = *((__u8 *) skb->data);
441 	struct hci_cp_write_ssp_mode *sent;
442 
443 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
444 
445 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
446 	if (!sent)
447 		return;
448 
449 	if (!status) {
450 		if (sent->mode)
451 			hdev->features[1][0] |= LMP_HOST_SSP;
452 		else
453 			hdev->features[1][0] &= ~LMP_HOST_SSP;
454 	}
455 
456 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
457 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
458 	else if (!status) {
459 		if (sent->mode)
460 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461 		else
462 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
463 	}
464 }
465 
466 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
467 {
468 	u8 status = *((u8 *) skb->data);
469 	struct hci_cp_write_sc_support *sent;
470 
471 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
472 
473 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
474 	if (!sent)
475 		return;
476 
477 	if (!status) {
478 		if (sent->support)
479 			hdev->features[1][0] |= LMP_HOST_SC;
480 		else
481 			hdev->features[1][0] &= ~LMP_HOST_SC;
482 	}
483 
484 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
485 		mgmt_sc_enable_complete(hdev, sent->support, status);
486 	else if (!status) {
487 		if (sent->support)
488 			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
489 		else
490 			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
491 	}
492 }
493 
494 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
495 {
496 	struct hci_rp_read_local_version *rp = (void *) skb->data;
497 
498 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
499 
500 	if (rp->status)
501 		return;
502 
503 	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
504 		hdev->hci_ver = rp->hci_ver;
505 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
506 		hdev->lmp_ver = rp->lmp_ver;
507 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
508 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
509 	}
510 }
511 
512 static void hci_cc_read_local_commands(struct hci_dev *hdev,
513 				       struct sk_buff *skb)
514 {
515 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
516 
517 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
518 
519 	if (rp->status)
520 		return;
521 
522 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
523 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
524 }
525 
526 static void hci_cc_read_local_features(struct hci_dev *hdev,
527 				       struct sk_buff *skb)
528 {
529 	struct hci_rp_read_local_features *rp = (void *) skb->data;
530 
531 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
532 
533 	if (rp->status)
534 		return;
535 
536 	memcpy(hdev->features, rp->features, 8);
537 
538 	/* Adjust default settings according to features
539 	 * supported by device. */
540 
541 	if (hdev->features[0][0] & LMP_3SLOT)
542 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
543 
544 	if (hdev->features[0][0] & LMP_5SLOT)
545 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
546 
547 	if (hdev->features[0][1] & LMP_HV2) {
548 		hdev->pkt_type  |= (HCI_HV2);
549 		hdev->esco_type |= (ESCO_HV2);
550 	}
551 
552 	if (hdev->features[0][1] & LMP_HV3) {
553 		hdev->pkt_type  |= (HCI_HV3);
554 		hdev->esco_type |= (ESCO_HV3);
555 	}
556 
557 	if (lmp_esco_capable(hdev))
558 		hdev->esco_type |= (ESCO_EV3);
559 
560 	if (hdev->features[0][4] & LMP_EV4)
561 		hdev->esco_type |= (ESCO_EV4);
562 
563 	if (hdev->features[0][4] & LMP_EV5)
564 		hdev->esco_type |= (ESCO_EV5);
565 
566 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
567 		hdev->esco_type |= (ESCO_2EV3);
568 
569 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
570 		hdev->esco_type |= (ESCO_3EV3);
571 
572 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
573 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
574 }
575 
576 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
577 					   struct sk_buff *skb)
578 {
579 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
580 
581 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582 
583 	if (rp->status)
584 		return;
585 
586 	if (hdev->max_page < rp->max_page)
587 		hdev->max_page = rp->max_page;
588 
589 	if (rp->page < HCI_MAX_PAGES)
590 		memcpy(hdev->features[rp->page], rp->features, 8);
591 }
592 
593 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
594 					  struct sk_buff *skb)
595 {
596 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
597 
598 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599 
600 	if (!rp->status)
601 		hdev->flow_ctl_mode = rp->mode;
602 }
603 
604 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
605 {
606 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
607 
608 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
609 
610 	if (rp->status)
611 		return;
612 
613 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
614 	hdev->sco_mtu  = rp->sco_mtu;
615 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
616 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
617 
618 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
619 		hdev->sco_mtu  = 64;
620 		hdev->sco_pkts = 8;
621 	}
622 
623 	hdev->acl_cnt = hdev->acl_pkts;
624 	hdev->sco_cnt = hdev->sco_pkts;
625 
626 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
627 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
628 }
629 
630 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
631 {
632 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
633 
634 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
635 
636 	if (!rp->status)
637 		bacpy(&hdev->bdaddr, &rp->bdaddr);
638 }
639 
640 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
641 					   struct sk_buff *skb)
642 {
643 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
644 
645 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
646 
647 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
648 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
649 		hdev->page_scan_window = __le16_to_cpu(rp->window);
650 	}
651 }
652 
653 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
654 					    struct sk_buff *skb)
655 {
656 	u8 status = *((u8 *) skb->data);
657 	struct hci_cp_write_page_scan_activity *sent;
658 
659 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
660 
661 	if (status)
662 		return;
663 
664 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
665 	if (!sent)
666 		return;
667 
668 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
669 	hdev->page_scan_window = __le16_to_cpu(sent->window);
670 }
671 
672 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
673 					   struct sk_buff *skb)
674 {
675 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
676 
677 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
678 
679 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
680 		hdev->page_scan_type = rp->type;
681 }
682 
683 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
684 					struct sk_buff *skb)
685 {
686 	u8 status = *((u8 *) skb->data);
687 	u8 *type;
688 
689 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
690 
691 	if (status)
692 		return;
693 
694 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
695 	if (type)
696 		hdev->page_scan_type = *type;
697 }
698 
699 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
700 					struct sk_buff *skb)
701 {
702 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
703 
704 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
705 
706 	if (rp->status)
707 		return;
708 
709 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
710 	hdev->block_len = __le16_to_cpu(rp->block_len);
711 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
712 
713 	hdev->block_cnt = hdev->num_blocks;
714 
715 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
716 	       hdev->block_cnt, hdev->block_len);
717 }
718 
719 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
720 				       struct sk_buff *skb)
721 {
722 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
723 
724 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
725 
726 	if (rp->status)
727 		goto a2mp_rsp;
728 
729 	hdev->amp_status = rp->amp_status;
730 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
731 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
732 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
733 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
734 	hdev->amp_type = rp->amp_type;
735 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
736 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
737 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
738 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
739 
740 a2mp_rsp:
741 	a2mp_send_getinfo_rsp(hdev);
742 }
743 
744 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
745 					struct sk_buff *skb)
746 {
747 	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
748 	struct amp_assoc *assoc = &hdev->loc_assoc;
749 	size_t rem_len, frag_len;
750 
751 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
752 
753 	if (rp->status)
754 		goto a2mp_rsp;
755 
756 	frag_len = skb->len - sizeof(*rp);
757 	rem_len = __le16_to_cpu(rp->rem_len);
758 
759 	if (rem_len > frag_len) {
760 		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
761 
762 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
763 		assoc->offset += frag_len;
764 
765 		/* Read other fragments */
766 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
767 
768 		return;
769 	}
770 
771 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
772 	assoc->len = assoc->offset + rem_len;
773 	assoc->offset = 0;
774 
775 a2mp_rsp:
776 	/* Send A2MP Rsp when all fragments are received */
777 	a2mp_send_getampassoc_rsp(hdev, rp->status);
778 	a2mp_send_create_phy_link_req(hdev, rp->status);
779 }
780 
781 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
782 					 struct sk_buff *skb)
783 {
784 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
785 
786 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
787 
788 	if (!rp->status)
789 		hdev->inq_tx_power = rp->tx_power;
790 }
791 
792 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
793 {
794 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
795 	struct hci_cp_pin_code_reply *cp;
796 	struct hci_conn *conn;
797 
798 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
799 
800 	hci_dev_lock(hdev);
801 
802 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
803 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
804 
805 	if (rp->status)
806 		goto unlock;
807 
808 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
809 	if (!cp)
810 		goto unlock;
811 
812 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
813 	if (conn)
814 		conn->pin_length = cp->pin_len;
815 
816 unlock:
817 	hci_dev_unlock(hdev);
818 }
819 
820 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
821 {
822 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
823 
824 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
825 
826 	hci_dev_lock(hdev);
827 
828 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
829 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
830 						 rp->status);
831 
832 	hci_dev_unlock(hdev);
833 }
834 
835 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
836 				       struct sk_buff *skb)
837 {
838 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
839 
840 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
841 
842 	if (rp->status)
843 		return;
844 
845 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
846 	hdev->le_pkts = rp->le_max_pkt;
847 
848 	hdev->le_cnt = hdev->le_pkts;
849 
850 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
851 }
852 
853 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
854 					  struct sk_buff *skb)
855 {
856 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
857 
858 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
859 
860 	if (!rp->status)
861 		memcpy(hdev->le_features, rp->features, 8);
862 }
863 
864 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
865 					struct sk_buff *skb)
866 {
867 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
868 
869 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
870 
871 	if (!rp->status)
872 		hdev->adv_tx_power = rp->tx_power;
873 }
874 
875 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
876 {
877 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
878 
879 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
880 
881 	hci_dev_lock(hdev);
882 
883 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
884 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
885 						 rp->status);
886 
887 	hci_dev_unlock(hdev);
888 }
889 
890 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
891 					  struct sk_buff *skb)
892 {
893 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
894 
895 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
896 
897 	hci_dev_lock(hdev);
898 
899 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
900 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
901 						     ACL_LINK, 0, rp->status);
902 
903 	hci_dev_unlock(hdev);
904 }
905 
906 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
909 
910 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
911 
912 	hci_dev_lock(hdev);
913 
914 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
915 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
916 						 0, rp->status);
917 
918 	hci_dev_unlock(hdev);
919 }
920 
921 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
922 					  struct sk_buff *skb)
923 {
924 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
925 
926 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927 
928 	hci_dev_lock(hdev);
929 
930 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
932 						     ACL_LINK, 0, rp->status);
933 
934 	hci_dev_unlock(hdev);
935 }
936 
937 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
938 				       struct sk_buff *skb)
939 {
940 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
941 
942 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943 
944 	hci_dev_lock(hdev);
945 	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
946 					  NULL, NULL, rp->status);
947 	hci_dev_unlock(hdev);
948 }
949 
950 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
951 					   struct sk_buff *skb)
952 {
953 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
954 
955 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
956 
957 	hci_dev_lock(hdev);
958 	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
959 					  rp->hash256, rp->randomizer256,
960 					  rp->status);
961 	hci_dev_unlock(hdev);
962 }
963 
964 
965 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
966 {
967 	__u8 status = *((__u8 *) skb->data);
968 	bdaddr_t *sent;
969 
970 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
971 
972 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
973 	if (!sent)
974 		return;
975 
976 	hci_dev_lock(hdev);
977 
978 	if (!status)
979 		bacpy(&hdev->random_addr, sent);
980 
981 	hci_dev_unlock(hdev);
982 }
983 
984 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
985 {
986 	__u8 *sent, status = *((__u8 *) skb->data);
987 
988 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
989 
990 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
991 	if (!sent)
992 		return;
993 
994 	if (status)
995 		return;
996 
997 	hci_dev_lock(hdev);
998 
999 	/* If we're doing connection initation as peripheral. Set a
1000 	 * timeout in case something goes wrong.
1001 	 */
1002 	if (*sent) {
1003 		struct hci_conn *conn;
1004 
1005 		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1006 		if (conn)
1007 			queue_delayed_work(hdev->workqueue,
1008 					   &conn->le_conn_timeout,
1009 					   HCI_LE_CONN_TIMEOUT);
1010 	}
1011 
1012 	mgmt_advertising(hdev, *sent);
1013 
1014 	hci_dev_unlock(hdev);
1015 }
1016 
1017 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1018 {
1019 	struct hci_cp_le_set_scan_param *cp;
1020 	__u8 status = *((__u8 *) skb->data);
1021 
1022 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1023 
1024 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1025 	if (!cp)
1026 		return;
1027 
1028 	hci_dev_lock(hdev);
1029 
1030 	if (!status)
1031 		hdev->le_scan_type = cp->type;
1032 
1033 	hci_dev_unlock(hdev);
1034 }
1035 
1036 static bool has_pending_adv_report(struct hci_dev *hdev)
1037 {
1038 	struct discovery_state *d = &hdev->discovery;
1039 
1040 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1041 }
1042 
1043 static void clear_pending_adv_report(struct hci_dev *hdev)
1044 {
1045 	struct discovery_state *d = &hdev->discovery;
1046 
1047 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1048 	d->last_adv_data_len = 0;
1049 }
1050 
1051 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1052 				     u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1053 {
1054 	struct discovery_state *d = &hdev->discovery;
1055 
1056 	bacpy(&d->last_adv_addr, bdaddr);
1057 	d->last_adv_addr_type = bdaddr_type;
1058 	d->last_adv_rssi = rssi;
1059 	memcpy(d->last_adv_data, data, len);
1060 	d->last_adv_data_len = len;
1061 }
1062 
1063 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1064 				      struct sk_buff *skb)
1065 {
1066 	struct hci_cp_le_set_scan_enable *cp;
1067 	__u8 status = *((__u8 *) skb->data);
1068 
1069 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1070 
1071 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1072 	if (!cp)
1073 		return;
1074 
1075 	if (status)
1076 		return;
1077 
1078 	switch (cp->enable) {
1079 	case LE_SCAN_ENABLE:
1080 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1081 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1082 			clear_pending_adv_report(hdev);
1083 		break;
1084 
1085 	case LE_SCAN_DISABLE:
1086 		/* We do this here instead of when setting DISCOVERY_STOPPED
1087 		 * since the latter would potentially require waiting for
1088 		 * inquiry to stop too.
1089 		 */
1090 		if (has_pending_adv_report(hdev)) {
1091 			struct discovery_state *d = &hdev->discovery;
1092 
1093 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1094 					  d->last_adv_addr_type, NULL,
1095 					  d->last_adv_rssi, 0, 1,
1096 					  d->last_adv_data,
1097 					  d->last_adv_data_len, NULL, 0);
1098 		}
1099 
1100 		/* Cancel this timer so that we don't try to disable scanning
1101 		 * when it's already disabled.
1102 		 */
1103 		cancel_delayed_work(&hdev->le_scan_disable);
1104 
1105 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1106 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1107 		 * interrupted scanning due to a connect request. Mark
1108 		 * therefore discovery as stopped.
1109 		 */
1110 		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1111 				       &hdev->dev_flags))
1112 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1113 		break;
1114 
1115 	default:
1116 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1117 		break;
1118 	}
1119 }
1120 
1121 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1122 					   struct sk_buff *skb)
1123 {
1124 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1125 
1126 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1127 
1128 	if (!rp->status)
1129 		hdev->le_white_list_size = rp->size;
1130 }
1131 
1132 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1133 				       struct sk_buff *skb)
1134 {
1135 	__u8 status = *((__u8 *) skb->data);
1136 
1137 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1138 
1139 	if (!status)
1140 		hci_white_list_clear(hdev);
1141 }
1142 
1143 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1144 					struct sk_buff *skb)
1145 {
1146 	struct hci_cp_le_add_to_white_list *sent;
1147 	__u8 status = *((__u8 *) skb->data);
1148 
1149 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1150 
1151 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1152 	if (!sent)
1153 		return;
1154 
1155 	if (!status)
1156 		hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1157 }
1158 
1159 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1160 					  struct sk_buff *skb)
1161 {
1162 	struct hci_cp_le_del_from_white_list *sent;
1163 	__u8 status = *((__u8 *) skb->data);
1164 
1165 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1166 
1167 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1168 	if (!sent)
1169 		return;
1170 
1171 	if (!status)
1172 		hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1173 }
1174 
1175 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1176 					    struct sk_buff *skb)
1177 {
1178 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1179 
1180 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1181 
1182 	if (!rp->status)
1183 		memcpy(hdev->le_states, rp->le_states, 8);
1184 }
1185 
1186 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1187 					   struct sk_buff *skb)
1188 {
1189 	struct hci_cp_write_le_host_supported *sent;
1190 	__u8 status = *((__u8 *) skb->data);
1191 
1192 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1193 
1194 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1195 	if (!sent)
1196 		return;
1197 
1198 	if (!status) {
1199 		if (sent->le) {
1200 			hdev->features[1][0] |= LMP_HOST_LE;
1201 			set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1202 		} else {
1203 			hdev->features[1][0] &= ~LMP_HOST_LE;
1204 			clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1205 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1206 		}
1207 
1208 		if (sent->simul)
1209 			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1210 		else
1211 			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1212 	}
1213 }
1214 
1215 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1216 {
1217 	struct hci_cp_le_set_adv_param *cp;
1218 	u8 status = *((u8 *) skb->data);
1219 
1220 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1221 
1222 	if (status)
1223 		return;
1224 
1225 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1226 	if (!cp)
1227 		return;
1228 
1229 	hci_dev_lock(hdev);
1230 	hdev->adv_addr_type = cp->own_address_type;
1231 	hci_dev_unlock(hdev);
1232 }
1233 
1234 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1235 					  struct sk_buff *skb)
1236 {
1237 	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1238 
1239 	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1240 	       hdev->name, rp->status, rp->phy_handle);
1241 
1242 	if (rp->status)
1243 		return;
1244 
1245 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1246 }
1247 
1248 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1249 {
1250 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1251 	struct hci_conn *conn;
1252 
1253 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1254 
1255 	if (rp->status)
1256 		return;
1257 
1258 	hci_dev_lock(hdev);
1259 
1260 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1261 	if (conn)
1262 		conn->rssi = rp->rssi;
1263 
1264 	hci_dev_unlock(hdev);
1265 }
1266 
1267 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1268 {
1269 	struct hci_cp_read_tx_power *sent;
1270 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1271 	struct hci_conn *conn;
1272 
1273 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1274 
1275 	if (rp->status)
1276 		return;
1277 
1278 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1279 	if (!sent)
1280 		return;
1281 
1282 	hci_dev_lock(hdev);
1283 
1284 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1285 	if (!conn)
1286 		goto unlock;
1287 
1288 	switch (sent->type) {
1289 	case 0x00:
1290 		conn->tx_power = rp->tx_power;
1291 		break;
1292 	case 0x01:
1293 		conn->max_tx_power = rp->tx_power;
1294 		break;
1295 	}
1296 
1297 unlock:
1298 	hci_dev_unlock(hdev);
1299 }
1300 
1301 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1302 {
1303 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1304 
1305 	if (status) {
1306 		hci_conn_check_pending(hdev);
1307 		return;
1308 	}
1309 
1310 	set_bit(HCI_INQUIRY, &hdev->flags);
1311 }
1312 
1313 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1314 {
1315 	struct hci_cp_create_conn *cp;
1316 	struct hci_conn *conn;
1317 
1318 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1319 
1320 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1321 	if (!cp)
1322 		return;
1323 
1324 	hci_dev_lock(hdev);
1325 
1326 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1327 
1328 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1329 
1330 	if (status) {
1331 		if (conn && conn->state == BT_CONNECT) {
1332 			if (status != 0x0c || conn->attempt > 2) {
1333 				conn->state = BT_CLOSED;
1334 				hci_proto_connect_cfm(conn, status);
1335 				hci_conn_del(conn);
1336 			} else
1337 				conn->state = BT_CONNECT2;
1338 		}
1339 	} else {
1340 		if (!conn) {
1341 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1342 			if (conn) {
1343 				conn->out = true;
1344 				conn->link_mode |= HCI_LM_MASTER;
1345 			} else
1346 				BT_ERR("No memory for new connection");
1347 		}
1348 	}
1349 
1350 	hci_dev_unlock(hdev);
1351 }
1352 
1353 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1354 {
1355 	struct hci_cp_add_sco *cp;
1356 	struct hci_conn *acl, *sco;
1357 	__u16 handle;
1358 
1359 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1360 
1361 	if (!status)
1362 		return;
1363 
1364 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1365 	if (!cp)
1366 		return;
1367 
1368 	handle = __le16_to_cpu(cp->handle);
1369 
1370 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1371 
1372 	hci_dev_lock(hdev);
1373 
1374 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1375 	if (acl) {
1376 		sco = acl->link;
1377 		if (sco) {
1378 			sco->state = BT_CLOSED;
1379 
1380 			hci_proto_connect_cfm(sco, status);
1381 			hci_conn_del(sco);
1382 		}
1383 	}
1384 
1385 	hci_dev_unlock(hdev);
1386 }
1387 
1388 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1389 {
1390 	struct hci_cp_auth_requested *cp;
1391 	struct hci_conn *conn;
1392 
1393 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1394 
1395 	if (!status)
1396 		return;
1397 
1398 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1399 	if (!cp)
1400 		return;
1401 
1402 	hci_dev_lock(hdev);
1403 
1404 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1405 	if (conn) {
1406 		if (conn->state == BT_CONFIG) {
1407 			hci_proto_connect_cfm(conn, status);
1408 			hci_conn_drop(conn);
1409 		}
1410 	}
1411 
1412 	hci_dev_unlock(hdev);
1413 }
1414 
1415 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1416 {
1417 	struct hci_cp_set_conn_encrypt *cp;
1418 	struct hci_conn *conn;
1419 
1420 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1421 
1422 	if (!status)
1423 		return;
1424 
1425 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1426 	if (!cp)
1427 		return;
1428 
1429 	hci_dev_lock(hdev);
1430 
1431 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1432 	if (conn) {
1433 		if (conn->state == BT_CONFIG) {
1434 			hci_proto_connect_cfm(conn, status);
1435 			hci_conn_drop(conn);
1436 		}
1437 	}
1438 
1439 	hci_dev_unlock(hdev);
1440 }
1441 
1442 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1443 				    struct hci_conn *conn)
1444 {
1445 	if (conn->state != BT_CONFIG || !conn->out)
1446 		return 0;
1447 
1448 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1449 		return 0;
1450 
1451 	/* Only request authentication for SSP connections or non-SSP
1452 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1453 	 * is requested.
1454 	 */
1455 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1456 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1457 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1458 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1459 		return 0;
1460 
1461 	return 1;
1462 }
1463 
1464 static int hci_resolve_name(struct hci_dev *hdev,
1465 				   struct inquiry_entry *e)
1466 {
1467 	struct hci_cp_remote_name_req cp;
1468 
1469 	memset(&cp, 0, sizeof(cp));
1470 
1471 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1472 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1473 	cp.pscan_mode = e->data.pscan_mode;
1474 	cp.clock_offset = e->data.clock_offset;
1475 
1476 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1477 }
1478 
1479 static bool hci_resolve_next_name(struct hci_dev *hdev)
1480 {
1481 	struct discovery_state *discov = &hdev->discovery;
1482 	struct inquiry_entry *e;
1483 
1484 	if (list_empty(&discov->resolve))
1485 		return false;
1486 
1487 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1488 	if (!e)
1489 		return false;
1490 
1491 	if (hci_resolve_name(hdev, e) == 0) {
1492 		e->name_state = NAME_PENDING;
1493 		return true;
1494 	}
1495 
1496 	return false;
1497 }
1498 
1499 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1500 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1501 {
1502 	struct discovery_state *discov = &hdev->discovery;
1503 	struct inquiry_entry *e;
1504 
1505 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1506 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1507 				      name_len, conn->dev_class);
1508 
1509 	if (discov->state == DISCOVERY_STOPPED)
1510 		return;
1511 
1512 	if (discov->state == DISCOVERY_STOPPING)
1513 		goto discov_complete;
1514 
1515 	if (discov->state != DISCOVERY_RESOLVING)
1516 		return;
1517 
1518 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1519 	/* If the device was not found in a list of found devices names of which
1520 	 * are pending. there is no need to continue resolving a next name as it
1521 	 * will be done upon receiving another Remote Name Request Complete
1522 	 * Event */
1523 	if (!e)
1524 		return;
1525 
1526 	list_del(&e->list);
1527 	if (name) {
1528 		e->name_state = NAME_KNOWN;
1529 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1530 				 e->data.rssi, name, name_len);
1531 	} else {
1532 		e->name_state = NAME_NOT_KNOWN;
1533 	}
1534 
1535 	if (hci_resolve_next_name(hdev))
1536 		return;
1537 
1538 discov_complete:
1539 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1540 }
1541 
1542 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1543 {
1544 	struct hci_cp_remote_name_req *cp;
1545 	struct hci_conn *conn;
1546 
1547 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1548 
1549 	/* If successful wait for the name req complete event before
1550 	 * checking for the need to do authentication */
1551 	if (!status)
1552 		return;
1553 
1554 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1555 	if (!cp)
1556 		return;
1557 
1558 	hci_dev_lock(hdev);
1559 
1560 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1561 
1562 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1563 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1564 
1565 	if (!conn)
1566 		goto unlock;
1567 
1568 	if (!hci_outgoing_auth_needed(hdev, conn))
1569 		goto unlock;
1570 
1571 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1572 		struct hci_cp_auth_requested auth_cp;
1573 
1574 		auth_cp.handle = __cpu_to_le16(conn->handle);
1575 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1576 			     sizeof(auth_cp), &auth_cp);
1577 	}
1578 
1579 unlock:
1580 	hci_dev_unlock(hdev);
1581 }
1582 
1583 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1584 {
1585 	struct hci_cp_read_remote_features *cp;
1586 	struct hci_conn *conn;
1587 
1588 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1589 
1590 	if (!status)
1591 		return;
1592 
1593 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1594 	if (!cp)
1595 		return;
1596 
1597 	hci_dev_lock(hdev);
1598 
1599 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1600 	if (conn) {
1601 		if (conn->state == BT_CONFIG) {
1602 			hci_proto_connect_cfm(conn, status);
1603 			hci_conn_drop(conn);
1604 		}
1605 	}
1606 
1607 	hci_dev_unlock(hdev);
1608 }
1609 
1610 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1611 {
1612 	struct hci_cp_read_remote_ext_features *cp;
1613 	struct hci_conn *conn;
1614 
1615 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1616 
1617 	if (!status)
1618 		return;
1619 
1620 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1621 	if (!cp)
1622 		return;
1623 
1624 	hci_dev_lock(hdev);
1625 
1626 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1627 	if (conn) {
1628 		if (conn->state == BT_CONFIG) {
1629 			hci_proto_connect_cfm(conn, status);
1630 			hci_conn_drop(conn);
1631 		}
1632 	}
1633 
1634 	hci_dev_unlock(hdev);
1635 }
1636 
1637 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1638 {
1639 	struct hci_cp_setup_sync_conn *cp;
1640 	struct hci_conn *acl, *sco;
1641 	__u16 handle;
1642 
1643 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1644 
1645 	if (!status)
1646 		return;
1647 
1648 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1649 	if (!cp)
1650 		return;
1651 
1652 	handle = __le16_to_cpu(cp->handle);
1653 
1654 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1655 
1656 	hci_dev_lock(hdev);
1657 
1658 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1659 	if (acl) {
1660 		sco = acl->link;
1661 		if (sco) {
1662 			sco->state = BT_CLOSED;
1663 
1664 			hci_proto_connect_cfm(sco, status);
1665 			hci_conn_del(sco);
1666 		}
1667 	}
1668 
1669 	hci_dev_unlock(hdev);
1670 }
1671 
1672 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1673 {
1674 	struct hci_cp_sniff_mode *cp;
1675 	struct hci_conn *conn;
1676 
1677 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1678 
1679 	if (!status)
1680 		return;
1681 
1682 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1683 	if (!cp)
1684 		return;
1685 
1686 	hci_dev_lock(hdev);
1687 
1688 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1689 	if (conn) {
1690 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1691 
1692 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1693 			hci_sco_setup(conn, status);
1694 	}
1695 
1696 	hci_dev_unlock(hdev);
1697 }
1698 
1699 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1700 {
1701 	struct hci_cp_exit_sniff_mode *cp;
1702 	struct hci_conn *conn;
1703 
1704 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1705 
1706 	if (!status)
1707 		return;
1708 
1709 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1710 	if (!cp)
1711 		return;
1712 
1713 	hci_dev_lock(hdev);
1714 
1715 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1716 	if (conn) {
1717 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1718 
1719 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1720 			hci_sco_setup(conn, status);
1721 	}
1722 
1723 	hci_dev_unlock(hdev);
1724 }
1725 
1726 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1727 {
1728 	struct hci_cp_disconnect *cp;
1729 	struct hci_conn *conn;
1730 
1731 	if (!status)
1732 		return;
1733 
1734 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1735 	if (!cp)
1736 		return;
1737 
1738 	hci_dev_lock(hdev);
1739 
1740 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1741 	if (conn)
1742 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1743 				       conn->dst_type, status);
1744 
1745 	hci_dev_unlock(hdev);
1746 }
1747 
1748 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1749 {
1750 	struct hci_cp_create_phy_link *cp;
1751 
1752 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1753 
1754 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1755 	if (!cp)
1756 		return;
1757 
1758 	hci_dev_lock(hdev);
1759 
1760 	if (status) {
1761 		struct hci_conn *hcon;
1762 
1763 		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1764 		if (hcon)
1765 			hci_conn_del(hcon);
1766 	} else {
1767 		amp_write_remote_assoc(hdev, cp->phy_handle);
1768 	}
1769 
1770 	hci_dev_unlock(hdev);
1771 }
1772 
1773 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1774 {
1775 	struct hci_cp_accept_phy_link *cp;
1776 
1777 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1778 
1779 	if (status)
1780 		return;
1781 
1782 	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1783 	if (!cp)
1784 		return;
1785 
1786 	amp_write_remote_assoc(hdev, cp->phy_handle);
1787 }
1788 
1789 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1790 {
1791 	struct hci_cp_le_create_conn *cp;
1792 	struct hci_conn *conn;
1793 
1794 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1795 
1796 	/* All connection failure handling is taken care of by the
1797 	 * hci_le_conn_failed function which is triggered by the HCI
1798 	 * request completion callbacks used for connecting.
1799 	 */
1800 	if (status)
1801 		return;
1802 
1803 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1804 	if (!cp)
1805 		return;
1806 
1807 	hci_dev_lock(hdev);
1808 
1809 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1810 	if (!conn)
1811 		goto unlock;
1812 
1813 	/* Store the initiator and responder address information which
1814 	 * is needed for SMP. These values will not change during the
1815 	 * lifetime of the connection.
1816 	 */
1817 	conn->init_addr_type = cp->own_address_type;
1818 	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1819 		bacpy(&conn->init_addr, &hdev->random_addr);
1820 	else
1821 		bacpy(&conn->init_addr, &hdev->bdaddr);
1822 
1823 	conn->resp_addr_type = cp->peer_addr_type;
1824 	bacpy(&conn->resp_addr, &cp->peer_addr);
1825 
1826 	/* We don't want the connection attempt to stick around
1827 	 * indefinitely since LE doesn't have a page timeout concept
1828 	 * like BR/EDR. Set a timer for any connection that doesn't use
1829 	 * the white list for connecting.
1830 	 */
1831 	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1832 		queue_delayed_work(conn->hdev->workqueue,
1833 				   &conn->le_conn_timeout,
1834 				   HCI_LE_CONN_TIMEOUT);
1835 
1836 unlock:
1837 	hci_dev_unlock(hdev);
1838 }
1839 
1840 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1841 {
1842 	struct hci_cp_le_start_enc *cp;
1843 	struct hci_conn *conn;
1844 
1845 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1846 
1847 	if (!status)
1848 		return;
1849 
1850 	hci_dev_lock(hdev);
1851 
1852 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1853 	if (!cp)
1854 		goto unlock;
1855 
1856 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1857 	if (!conn)
1858 		goto unlock;
1859 
1860 	if (conn->state != BT_CONNECTED)
1861 		goto unlock;
1862 
1863 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1864 	hci_conn_drop(conn);
1865 
1866 unlock:
1867 	hci_dev_unlock(hdev);
1868 }
1869 
1870 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1871 {
1872 	__u8 status = *((__u8 *) skb->data);
1873 	struct discovery_state *discov = &hdev->discovery;
1874 	struct inquiry_entry *e;
1875 
1876 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1877 
1878 	hci_conn_check_pending(hdev);
1879 
1880 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1881 		return;
1882 
1883 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1884 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1885 
1886 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1887 		return;
1888 
1889 	hci_dev_lock(hdev);
1890 
1891 	if (discov->state != DISCOVERY_FINDING)
1892 		goto unlock;
1893 
1894 	if (list_empty(&discov->resolve)) {
1895 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1896 		goto unlock;
1897 	}
1898 
1899 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1900 	if (e && hci_resolve_name(hdev, e) == 0) {
1901 		e->name_state = NAME_PENDING;
1902 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1903 	} else {
1904 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1905 	}
1906 
1907 unlock:
1908 	hci_dev_unlock(hdev);
1909 }
1910 
1911 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1912 {
1913 	struct inquiry_data data;
1914 	struct inquiry_info *info = (void *) (skb->data + 1);
1915 	int num_rsp = *((__u8 *) skb->data);
1916 
1917 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1918 
1919 	if (!num_rsp)
1920 		return;
1921 
1922 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1923 		return;
1924 
1925 	hci_dev_lock(hdev);
1926 
1927 	for (; num_rsp; num_rsp--, info++) {
1928 		bool name_known, ssp;
1929 
1930 		bacpy(&data.bdaddr, &info->bdaddr);
1931 		data.pscan_rep_mode	= info->pscan_rep_mode;
1932 		data.pscan_period_mode	= info->pscan_period_mode;
1933 		data.pscan_mode		= info->pscan_mode;
1934 		memcpy(data.dev_class, info->dev_class, 3);
1935 		data.clock_offset	= info->clock_offset;
1936 		data.rssi		= 0x00;
1937 		data.ssp_mode		= 0x00;
1938 
1939 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1940 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1941 				  info->dev_class, 0, !name_known, ssp, NULL,
1942 				  0, NULL, 0);
1943 	}
1944 
1945 	hci_dev_unlock(hdev);
1946 }
1947 
1948 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1949 {
1950 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1951 	struct hci_conn *conn;
1952 
1953 	BT_DBG("%s", hdev->name);
1954 
1955 	hci_dev_lock(hdev);
1956 
1957 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1958 	if (!conn) {
1959 		if (ev->link_type != SCO_LINK)
1960 			goto unlock;
1961 
1962 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1963 		if (!conn)
1964 			goto unlock;
1965 
1966 		conn->type = SCO_LINK;
1967 	}
1968 
1969 	if (!ev->status) {
1970 		conn->handle = __le16_to_cpu(ev->handle);
1971 
1972 		if (conn->type == ACL_LINK) {
1973 			conn->state = BT_CONFIG;
1974 			hci_conn_hold(conn);
1975 
1976 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1977 			    !hci_find_link_key(hdev, &ev->bdaddr))
1978 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1979 			else
1980 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1981 		} else
1982 			conn->state = BT_CONNECTED;
1983 
1984 		hci_conn_add_sysfs(conn);
1985 
1986 		if (test_bit(HCI_AUTH, &hdev->flags))
1987 			conn->link_mode |= HCI_LM_AUTH;
1988 
1989 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1990 			conn->link_mode |= HCI_LM_ENCRYPT;
1991 
1992 		/* Get remote features */
1993 		if (conn->type == ACL_LINK) {
1994 			struct hci_cp_read_remote_features cp;
1995 			cp.handle = ev->handle;
1996 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1997 				     sizeof(cp), &cp);
1998 		}
1999 
2000 		/* Set packet type for incoming connection */
2001 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2002 			struct hci_cp_change_conn_ptype cp;
2003 			cp.handle = ev->handle;
2004 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2005 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2006 				     &cp);
2007 		}
2008 	} else {
2009 		conn->state = BT_CLOSED;
2010 		if (conn->type == ACL_LINK)
2011 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2012 					    conn->dst_type, ev->status);
2013 	}
2014 
2015 	if (conn->type == ACL_LINK)
2016 		hci_sco_setup(conn, ev->status);
2017 
2018 	if (ev->status) {
2019 		hci_proto_connect_cfm(conn, ev->status);
2020 		hci_conn_del(conn);
2021 	} else if (ev->link_type != ACL_LINK)
2022 		hci_proto_connect_cfm(conn, ev->status);
2023 
2024 unlock:
2025 	hci_dev_unlock(hdev);
2026 
2027 	hci_conn_check_pending(hdev);
2028 }
2029 
2030 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2031 {
2032 	struct hci_ev_conn_request *ev = (void *) skb->data;
2033 	int mask = hdev->link_mode;
2034 	__u8 flags = 0;
2035 
2036 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2037 	       ev->link_type);
2038 
2039 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2040 				      &flags);
2041 
2042 	if ((mask & HCI_LM_ACCEPT) &&
2043 	    !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2044 		/* Connection accepted */
2045 		struct inquiry_entry *ie;
2046 		struct hci_conn *conn;
2047 
2048 		hci_dev_lock(hdev);
2049 
2050 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2051 		if (ie)
2052 			memcpy(ie->data.dev_class, ev->dev_class, 3);
2053 
2054 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2055 					       &ev->bdaddr);
2056 		if (!conn) {
2057 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2058 			if (!conn) {
2059 				BT_ERR("No memory for new connection");
2060 				hci_dev_unlock(hdev);
2061 				return;
2062 			}
2063 		}
2064 
2065 		memcpy(conn->dev_class, ev->dev_class, 3);
2066 
2067 		hci_dev_unlock(hdev);
2068 
2069 		if (ev->link_type == ACL_LINK ||
2070 		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2071 			struct hci_cp_accept_conn_req cp;
2072 			conn->state = BT_CONNECT;
2073 
2074 			bacpy(&cp.bdaddr, &ev->bdaddr);
2075 
2076 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2077 				cp.role = 0x00; /* Become master */
2078 			else
2079 				cp.role = 0x01; /* Remain slave */
2080 
2081 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2082 				     &cp);
2083 		} else if (!(flags & HCI_PROTO_DEFER)) {
2084 			struct hci_cp_accept_sync_conn_req cp;
2085 			conn->state = BT_CONNECT;
2086 
2087 			bacpy(&cp.bdaddr, &ev->bdaddr);
2088 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2089 
2090 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2091 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2092 			cp.max_latency    = cpu_to_le16(0xffff);
2093 			cp.content_format = cpu_to_le16(hdev->voice_setting);
2094 			cp.retrans_effort = 0xff;
2095 
2096 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2097 				     sizeof(cp), &cp);
2098 		} else {
2099 			conn->state = BT_CONNECT2;
2100 			hci_proto_connect_cfm(conn, 0);
2101 		}
2102 	} else {
2103 		/* Connection rejected */
2104 		struct hci_cp_reject_conn_req cp;
2105 
2106 		bacpy(&cp.bdaddr, &ev->bdaddr);
2107 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2108 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2109 	}
2110 }
2111 
2112 static u8 hci_to_mgmt_reason(u8 err)
2113 {
2114 	switch (err) {
2115 	case HCI_ERROR_CONNECTION_TIMEOUT:
2116 		return MGMT_DEV_DISCONN_TIMEOUT;
2117 	case HCI_ERROR_REMOTE_USER_TERM:
2118 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2119 	case HCI_ERROR_REMOTE_POWER_OFF:
2120 		return MGMT_DEV_DISCONN_REMOTE;
2121 	case HCI_ERROR_LOCAL_HOST_TERM:
2122 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2123 	default:
2124 		return MGMT_DEV_DISCONN_UNKNOWN;
2125 	}
2126 }
2127 
2128 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2129 {
2130 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2131 	u8 reason = hci_to_mgmt_reason(ev->reason);
2132 	struct hci_conn_params *params;
2133 	struct hci_conn *conn;
2134 	bool mgmt_connected;
2135 	u8 type;
2136 
2137 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2138 
2139 	hci_dev_lock(hdev);
2140 
2141 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2142 	if (!conn)
2143 		goto unlock;
2144 
2145 	if (ev->status) {
2146 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2147 				       conn->dst_type, ev->status);
2148 		goto unlock;
2149 	}
2150 
2151 	conn->state = BT_CLOSED;
2152 
2153 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2154 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2155 				reason, mgmt_connected);
2156 
2157 	if (conn->type == ACL_LINK && conn->flush_key)
2158 		hci_remove_link_key(hdev, &conn->dst);
2159 
2160 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2161 	if (params) {
2162 		switch (params->auto_connect) {
2163 		case HCI_AUTO_CONN_LINK_LOSS:
2164 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2165 				break;
2166 			/* Fall through */
2167 
2168 		case HCI_AUTO_CONN_ALWAYS:
2169 			hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2170 			break;
2171 
2172 		default:
2173 			break;
2174 		}
2175 	}
2176 
2177 	type = conn->type;
2178 
2179 	hci_proto_disconn_cfm(conn, ev->reason);
2180 	hci_conn_del(conn);
2181 
2182 	/* Re-enable advertising if necessary, since it might
2183 	 * have been disabled by the connection. From the
2184 	 * HCI_LE_Set_Advertise_Enable command description in
2185 	 * the core specification (v4.0):
2186 	 * "The Controller shall continue advertising until the Host
2187 	 * issues an LE_Set_Advertise_Enable command with
2188 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2189 	 * or until a connection is created or until the Advertising
2190 	 * is timed out due to Directed Advertising."
2191 	 */
2192 	if (type == LE_LINK)
2193 		mgmt_reenable_advertising(hdev);
2194 
2195 unlock:
2196 	hci_dev_unlock(hdev);
2197 }
2198 
2199 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2200 {
2201 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2202 	struct hci_conn *conn;
2203 
2204 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2205 
2206 	hci_dev_lock(hdev);
2207 
2208 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2209 	if (!conn)
2210 		goto unlock;
2211 
2212 	if (!ev->status) {
2213 		if (!hci_conn_ssp_enabled(conn) &&
2214 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2215 			BT_INFO("re-auth of legacy device is not possible.");
2216 		} else {
2217 			conn->link_mode |= HCI_LM_AUTH;
2218 			conn->sec_level = conn->pending_sec_level;
2219 		}
2220 	} else {
2221 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2222 				 ev->status);
2223 	}
2224 
2225 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2226 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2227 
2228 	if (conn->state == BT_CONFIG) {
2229 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2230 			struct hci_cp_set_conn_encrypt cp;
2231 			cp.handle  = ev->handle;
2232 			cp.encrypt = 0x01;
2233 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2234 				     &cp);
2235 		} else {
2236 			conn->state = BT_CONNECTED;
2237 			hci_proto_connect_cfm(conn, ev->status);
2238 			hci_conn_drop(conn);
2239 		}
2240 	} else {
2241 		hci_auth_cfm(conn, ev->status);
2242 
2243 		hci_conn_hold(conn);
2244 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2245 		hci_conn_drop(conn);
2246 	}
2247 
2248 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2249 		if (!ev->status) {
2250 			struct hci_cp_set_conn_encrypt cp;
2251 			cp.handle  = ev->handle;
2252 			cp.encrypt = 0x01;
2253 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2254 				     &cp);
2255 		} else {
2256 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2257 			hci_encrypt_cfm(conn, ev->status, 0x00);
2258 		}
2259 	}
2260 
2261 unlock:
2262 	hci_dev_unlock(hdev);
2263 }
2264 
2265 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2266 {
2267 	struct hci_ev_remote_name *ev = (void *) skb->data;
2268 	struct hci_conn *conn;
2269 
2270 	BT_DBG("%s", hdev->name);
2271 
2272 	hci_conn_check_pending(hdev);
2273 
2274 	hci_dev_lock(hdev);
2275 
2276 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2277 
2278 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2279 		goto check_auth;
2280 
2281 	if (ev->status == 0)
2282 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2283 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2284 	else
2285 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2286 
2287 check_auth:
2288 	if (!conn)
2289 		goto unlock;
2290 
2291 	if (!hci_outgoing_auth_needed(hdev, conn))
2292 		goto unlock;
2293 
2294 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2295 		struct hci_cp_auth_requested cp;
2296 		cp.handle = __cpu_to_le16(conn->handle);
2297 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2298 	}
2299 
2300 unlock:
2301 	hci_dev_unlock(hdev);
2302 }
2303 
2304 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2305 {
2306 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2307 	struct hci_conn *conn;
2308 
2309 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2310 
2311 	hci_dev_lock(hdev);
2312 
2313 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2314 	if (!conn)
2315 		goto unlock;
2316 
2317 	if (!ev->status) {
2318 		if (ev->encrypt) {
2319 			/* Encryption implies authentication */
2320 			conn->link_mode |= HCI_LM_AUTH;
2321 			conn->link_mode |= HCI_LM_ENCRYPT;
2322 			conn->sec_level = conn->pending_sec_level;
2323 
2324 			/* P-256 authentication key implies FIPS */
2325 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2326 				conn->link_mode |= HCI_LM_FIPS;
2327 
2328 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2329 			    conn->type == LE_LINK)
2330 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2331 		} else {
2332 			conn->link_mode &= ~HCI_LM_ENCRYPT;
2333 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2334 		}
2335 	}
2336 
2337 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2338 
2339 	if (ev->status && conn->state == BT_CONNECTED) {
2340 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2341 		hci_conn_drop(conn);
2342 		goto unlock;
2343 	}
2344 
2345 	if (conn->state == BT_CONFIG) {
2346 		if (!ev->status)
2347 			conn->state = BT_CONNECTED;
2348 
2349 		/* In Secure Connections Only mode, do not allow any
2350 		 * connections that are not encrypted with AES-CCM
2351 		 * using a P-256 authenticated combination key.
2352 		 */
2353 		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2354 		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2355 		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2356 			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2357 			hci_conn_drop(conn);
2358 			goto unlock;
2359 		}
2360 
2361 		hci_proto_connect_cfm(conn, ev->status);
2362 		hci_conn_drop(conn);
2363 	} else
2364 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2365 
2366 unlock:
2367 	hci_dev_unlock(hdev);
2368 }
2369 
2370 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2371 					     struct sk_buff *skb)
2372 {
2373 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2374 	struct hci_conn *conn;
2375 
2376 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2377 
2378 	hci_dev_lock(hdev);
2379 
2380 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2381 	if (conn) {
2382 		if (!ev->status)
2383 			conn->link_mode |= HCI_LM_SECURE;
2384 
2385 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2386 
2387 		hci_key_change_cfm(conn, ev->status);
2388 	}
2389 
2390 	hci_dev_unlock(hdev);
2391 }
2392 
2393 static void hci_remote_features_evt(struct hci_dev *hdev,
2394 				    struct sk_buff *skb)
2395 {
2396 	struct hci_ev_remote_features *ev = (void *) skb->data;
2397 	struct hci_conn *conn;
2398 
2399 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2400 
2401 	hci_dev_lock(hdev);
2402 
2403 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2404 	if (!conn)
2405 		goto unlock;
2406 
2407 	if (!ev->status)
2408 		memcpy(conn->features[0], ev->features, 8);
2409 
2410 	if (conn->state != BT_CONFIG)
2411 		goto unlock;
2412 
2413 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2414 		struct hci_cp_read_remote_ext_features cp;
2415 		cp.handle = ev->handle;
2416 		cp.page = 0x01;
2417 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2418 			     sizeof(cp), &cp);
2419 		goto unlock;
2420 	}
2421 
2422 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2423 		struct hci_cp_remote_name_req cp;
2424 		memset(&cp, 0, sizeof(cp));
2425 		bacpy(&cp.bdaddr, &conn->dst);
2426 		cp.pscan_rep_mode = 0x02;
2427 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2428 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2429 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2430 				      conn->dst_type, 0, NULL, 0,
2431 				      conn->dev_class);
2432 
2433 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2434 		conn->state = BT_CONNECTED;
2435 		hci_proto_connect_cfm(conn, ev->status);
2436 		hci_conn_drop(conn);
2437 	}
2438 
2439 unlock:
2440 	hci_dev_unlock(hdev);
2441 }
2442 
2443 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2444 {
2445 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2446 	u8 status = skb->data[sizeof(*ev)];
2447 	__u16 opcode;
2448 
2449 	skb_pull(skb, sizeof(*ev));
2450 
2451 	opcode = __le16_to_cpu(ev->opcode);
2452 
2453 	switch (opcode) {
2454 	case HCI_OP_INQUIRY_CANCEL:
2455 		hci_cc_inquiry_cancel(hdev, skb);
2456 		break;
2457 
2458 	case HCI_OP_PERIODIC_INQ:
2459 		hci_cc_periodic_inq(hdev, skb);
2460 		break;
2461 
2462 	case HCI_OP_EXIT_PERIODIC_INQ:
2463 		hci_cc_exit_periodic_inq(hdev, skb);
2464 		break;
2465 
2466 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2467 		hci_cc_remote_name_req_cancel(hdev, skb);
2468 		break;
2469 
2470 	case HCI_OP_ROLE_DISCOVERY:
2471 		hci_cc_role_discovery(hdev, skb);
2472 		break;
2473 
2474 	case HCI_OP_READ_LINK_POLICY:
2475 		hci_cc_read_link_policy(hdev, skb);
2476 		break;
2477 
2478 	case HCI_OP_WRITE_LINK_POLICY:
2479 		hci_cc_write_link_policy(hdev, skb);
2480 		break;
2481 
2482 	case HCI_OP_READ_DEF_LINK_POLICY:
2483 		hci_cc_read_def_link_policy(hdev, skb);
2484 		break;
2485 
2486 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2487 		hci_cc_write_def_link_policy(hdev, skb);
2488 		break;
2489 
2490 	case HCI_OP_RESET:
2491 		hci_cc_reset(hdev, skb);
2492 		break;
2493 
2494 	case HCI_OP_WRITE_LOCAL_NAME:
2495 		hci_cc_write_local_name(hdev, skb);
2496 		break;
2497 
2498 	case HCI_OP_READ_LOCAL_NAME:
2499 		hci_cc_read_local_name(hdev, skb);
2500 		break;
2501 
2502 	case HCI_OP_WRITE_AUTH_ENABLE:
2503 		hci_cc_write_auth_enable(hdev, skb);
2504 		break;
2505 
2506 	case HCI_OP_WRITE_ENCRYPT_MODE:
2507 		hci_cc_write_encrypt_mode(hdev, skb);
2508 		break;
2509 
2510 	case HCI_OP_WRITE_SCAN_ENABLE:
2511 		hci_cc_write_scan_enable(hdev, skb);
2512 		break;
2513 
2514 	case HCI_OP_READ_CLASS_OF_DEV:
2515 		hci_cc_read_class_of_dev(hdev, skb);
2516 		break;
2517 
2518 	case HCI_OP_WRITE_CLASS_OF_DEV:
2519 		hci_cc_write_class_of_dev(hdev, skb);
2520 		break;
2521 
2522 	case HCI_OP_READ_VOICE_SETTING:
2523 		hci_cc_read_voice_setting(hdev, skb);
2524 		break;
2525 
2526 	case HCI_OP_WRITE_VOICE_SETTING:
2527 		hci_cc_write_voice_setting(hdev, skb);
2528 		break;
2529 
2530 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2531 		hci_cc_read_num_supported_iac(hdev, skb);
2532 		break;
2533 
2534 	case HCI_OP_WRITE_SSP_MODE:
2535 		hci_cc_write_ssp_mode(hdev, skb);
2536 		break;
2537 
2538 	case HCI_OP_WRITE_SC_SUPPORT:
2539 		hci_cc_write_sc_support(hdev, skb);
2540 		break;
2541 
2542 	case HCI_OP_READ_LOCAL_VERSION:
2543 		hci_cc_read_local_version(hdev, skb);
2544 		break;
2545 
2546 	case HCI_OP_READ_LOCAL_COMMANDS:
2547 		hci_cc_read_local_commands(hdev, skb);
2548 		break;
2549 
2550 	case HCI_OP_READ_LOCAL_FEATURES:
2551 		hci_cc_read_local_features(hdev, skb);
2552 		break;
2553 
2554 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2555 		hci_cc_read_local_ext_features(hdev, skb);
2556 		break;
2557 
2558 	case HCI_OP_READ_BUFFER_SIZE:
2559 		hci_cc_read_buffer_size(hdev, skb);
2560 		break;
2561 
2562 	case HCI_OP_READ_BD_ADDR:
2563 		hci_cc_read_bd_addr(hdev, skb);
2564 		break;
2565 
2566 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2567 		hci_cc_read_page_scan_activity(hdev, skb);
2568 		break;
2569 
2570 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2571 		hci_cc_write_page_scan_activity(hdev, skb);
2572 		break;
2573 
2574 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2575 		hci_cc_read_page_scan_type(hdev, skb);
2576 		break;
2577 
2578 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2579 		hci_cc_write_page_scan_type(hdev, skb);
2580 		break;
2581 
2582 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2583 		hci_cc_read_data_block_size(hdev, skb);
2584 		break;
2585 
2586 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2587 		hci_cc_read_flow_control_mode(hdev, skb);
2588 		break;
2589 
2590 	case HCI_OP_READ_LOCAL_AMP_INFO:
2591 		hci_cc_read_local_amp_info(hdev, skb);
2592 		break;
2593 
2594 	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2595 		hci_cc_read_local_amp_assoc(hdev, skb);
2596 		break;
2597 
2598 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2599 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2600 		break;
2601 
2602 	case HCI_OP_PIN_CODE_REPLY:
2603 		hci_cc_pin_code_reply(hdev, skb);
2604 		break;
2605 
2606 	case HCI_OP_PIN_CODE_NEG_REPLY:
2607 		hci_cc_pin_code_neg_reply(hdev, skb);
2608 		break;
2609 
2610 	case HCI_OP_READ_LOCAL_OOB_DATA:
2611 		hci_cc_read_local_oob_data(hdev, skb);
2612 		break;
2613 
2614 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2615 		hci_cc_read_local_oob_ext_data(hdev, skb);
2616 		break;
2617 
2618 	case HCI_OP_LE_READ_BUFFER_SIZE:
2619 		hci_cc_le_read_buffer_size(hdev, skb);
2620 		break;
2621 
2622 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2623 		hci_cc_le_read_local_features(hdev, skb);
2624 		break;
2625 
2626 	case HCI_OP_LE_READ_ADV_TX_POWER:
2627 		hci_cc_le_read_adv_tx_power(hdev, skb);
2628 		break;
2629 
2630 	case HCI_OP_USER_CONFIRM_REPLY:
2631 		hci_cc_user_confirm_reply(hdev, skb);
2632 		break;
2633 
2634 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2635 		hci_cc_user_confirm_neg_reply(hdev, skb);
2636 		break;
2637 
2638 	case HCI_OP_USER_PASSKEY_REPLY:
2639 		hci_cc_user_passkey_reply(hdev, skb);
2640 		break;
2641 
2642 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2643 		hci_cc_user_passkey_neg_reply(hdev, skb);
2644 		break;
2645 
2646 	case HCI_OP_LE_SET_RANDOM_ADDR:
2647 		hci_cc_le_set_random_addr(hdev, skb);
2648 		break;
2649 
2650 	case HCI_OP_LE_SET_ADV_ENABLE:
2651 		hci_cc_le_set_adv_enable(hdev, skb);
2652 		break;
2653 
2654 	case HCI_OP_LE_SET_SCAN_PARAM:
2655 		hci_cc_le_set_scan_param(hdev, skb);
2656 		break;
2657 
2658 	case HCI_OP_LE_SET_SCAN_ENABLE:
2659 		hci_cc_le_set_scan_enable(hdev, skb);
2660 		break;
2661 
2662 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2663 		hci_cc_le_read_white_list_size(hdev, skb);
2664 		break;
2665 
2666 	case HCI_OP_LE_CLEAR_WHITE_LIST:
2667 		hci_cc_le_clear_white_list(hdev, skb);
2668 		break;
2669 
2670 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2671 		hci_cc_le_add_to_white_list(hdev, skb);
2672 		break;
2673 
2674 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2675 		hci_cc_le_del_from_white_list(hdev, skb);
2676 		break;
2677 
2678 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2679 		hci_cc_le_read_supported_states(hdev, skb);
2680 		break;
2681 
2682 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2683 		hci_cc_write_le_host_supported(hdev, skb);
2684 		break;
2685 
2686 	case HCI_OP_LE_SET_ADV_PARAM:
2687 		hci_cc_set_adv_param(hdev, skb);
2688 		break;
2689 
2690 	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2691 		hci_cc_write_remote_amp_assoc(hdev, skb);
2692 		break;
2693 
2694 	case HCI_OP_READ_RSSI:
2695 		hci_cc_read_rssi(hdev, skb);
2696 		break;
2697 
2698 	case HCI_OP_READ_TX_POWER:
2699 		hci_cc_read_tx_power(hdev, skb);
2700 		break;
2701 
2702 	default:
2703 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2704 		break;
2705 	}
2706 
2707 	if (opcode != HCI_OP_NOP)
2708 		del_timer(&hdev->cmd_timer);
2709 
2710 	hci_req_cmd_complete(hdev, opcode, status);
2711 
2712 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2713 		atomic_set(&hdev->cmd_cnt, 1);
2714 		if (!skb_queue_empty(&hdev->cmd_q))
2715 			queue_work(hdev->workqueue, &hdev->cmd_work);
2716 	}
2717 }
2718 
2719 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2720 {
2721 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2722 	__u16 opcode;
2723 
2724 	skb_pull(skb, sizeof(*ev));
2725 
2726 	opcode = __le16_to_cpu(ev->opcode);
2727 
2728 	switch (opcode) {
2729 	case HCI_OP_INQUIRY:
2730 		hci_cs_inquiry(hdev, ev->status);
2731 		break;
2732 
2733 	case HCI_OP_CREATE_CONN:
2734 		hci_cs_create_conn(hdev, ev->status);
2735 		break;
2736 
2737 	case HCI_OP_ADD_SCO:
2738 		hci_cs_add_sco(hdev, ev->status);
2739 		break;
2740 
2741 	case HCI_OP_AUTH_REQUESTED:
2742 		hci_cs_auth_requested(hdev, ev->status);
2743 		break;
2744 
2745 	case HCI_OP_SET_CONN_ENCRYPT:
2746 		hci_cs_set_conn_encrypt(hdev, ev->status);
2747 		break;
2748 
2749 	case HCI_OP_REMOTE_NAME_REQ:
2750 		hci_cs_remote_name_req(hdev, ev->status);
2751 		break;
2752 
2753 	case HCI_OP_READ_REMOTE_FEATURES:
2754 		hci_cs_read_remote_features(hdev, ev->status);
2755 		break;
2756 
2757 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2758 		hci_cs_read_remote_ext_features(hdev, ev->status);
2759 		break;
2760 
2761 	case HCI_OP_SETUP_SYNC_CONN:
2762 		hci_cs_setup_sync_conn(hdev, ev->status);
2763 		break;
2764 
2765 	case HCI_OP_SNIFF_MODE:
2766 		hci_cs_sniff_mode(hdev, ev->status);
2767 		break;
2768 
2769 	case HCI_OP_EXIT_SNIFF_MODE:
2770 		hci_cs_exit_sniff_mode(hdev, ev->status);
2771 		break;
2772 
2773 	case HCI_OP_DISCONNECT:
2774 		hci_cs_disconnect(hdev, ev->status);
2775 		break;
2776 
2777 	case HCI_OP_CREATE_PHY_LINK:
2778 		hci_cs_create_phylink(hdev, ev->status);
2779 		break;
2780 
2781 	case HCI_OP_ACCEPT_PHY_LINK:
2782 		hci_cs_accept_phylink(hdev, ev->status);
2783 		break;
2784 
2785 	case HCI_OP_LE_CREATE_CONN:
2786 		hci_cs_le_create_conn(hdev, ev->status);
2787 		break;
2788 
2789 	case HCI_OP_LE_START_ENC:
2790 		hci_cs_le_start_enc(hdev, ev->status);
2791 		break;
2792 
2793 	default:
2794 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2795 		break;
2796 	}
2797 
2798 	if (opcode != HCI_OP_NOP)
2799 		del_timer(&hdev->cmd_timer);
2800 
2801 	if (ev->status ||
2802 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2803 		hci_req_cmd_complete(hdev, opcode, ev->status);
2804 
2805 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2806 		atomic_set(&hdev->cmd_cnt, 1);
2807 		if (!skb_queue_empty(&hdev->cmd_q))
2808 			queue_work(hdev->workqueue, &hdev->cmd_work);
2809 	}
2810 }
2811 
2812 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2813 {
2814 	struct hci_ev_role_change *ev = (void *) skb->data;
2815 	struct hci_conn *conn;
2816 
2817 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2818 
2819 	hci_dev_lock(hdev);
2820 
2821 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2822 	if (conn) {
2823 		if (!ev->status) {
2824 			if (ev->role)
2825 				conn->link_mode &= ~HCI_LM_MASTER;
2826 			else
2827 				conn->link_mode |= HCI_LM_MASTER;
2828 		}
2829 
2830 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2831 
2832 		hci_role_switch_cfm(conn, ev->status, ev->role);
2833 	}
2834 
2835 	hci_dev_unlock(hdev);
2836 }
2837 
2838 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2839 {
2840 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2841 	int i;
2842 
2843 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2844 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2845 		return;
2846 	}
2847 
2848 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2849 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2850 		BT_DBG("%s bad parameters", hdev->name);
2851 		return;
2852 	}
2853 
2854 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2855 
2856 	for (i = 0; i < ev->num_hndl; i++) {
2857 		struct hci_comp_pkts_info *info = &ev->handles[i];
2858 		struct hci_conn *conn;
2859 		__u16  handle, count;
2860 
2861 		handle = __le16_to_cpu(info->handle);
2862 		count  = __le16_to_cpu(info->count);
2863 
2864 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2865 		if (!conn)
2866 			continue;
2867 
2868 		conn->sent -= count;
2869 
2870 		switch (conn->type) {
2871 		case ACL_LINK:
2872 			hdev->acl_cnt += count;
2873 			if (hdev->acl_cnt > hdev->acl_pkts)
2874 				hdev->acl_cnt = hdev->acl_pkts;
2875 			break;
2876 
2877 		case LE_LINK:
2878 			if (hdev->le_pkts) {
2879 				hdev->le_cnt += count;
2880 				if (hdev->le_cnt > hdev->le_pkts)
2881 					hdev->le_cnt = hdev->le_pkts;
2882 			} else {
2883 				hdev->acl_cnt += count;
2884 				if (hdev->acl_cnt > hdev->acl_pkts)
2885 					hdev->acl_cnt = hdev->acl_pkts;
2886 			}
2887 			break;
2888 
2889 		case SCO_LINK:
2890 			hdev->sco_cnt += count;
2891 			if (hdev->sco_cnt > hdev->sco_pkts)
2892 				hdev->sco_cnt = hdev->sco_pkts;
2893 			break;
2894 
2895 		default:
2896 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2897 			break;
2898 		}
2899 	}
2900 
2901 	queue_work(hdev->workqueue, &hdev->tx_work);
2902 }
2903 
2904 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2905 						 __u16 handle)
2906 {
2907 	struct hci_chan *chan;
2908 
2909 	switch (hdev->dev_type) {
2910 	case HCI_BREDR:
2911 		return hci_conn_hash_lookup_handle(hdev, handle);
2912 	case HCI_AMP:
2913 		chan = hci_chan_lookup_handle(hdev, handle);
2914 		if (chan)
2915 			return chan->conn;
2916 		break;
2917 	default:
2918 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2919 		break;
2920 	}
2921 
2922 	return NULL;
2923 }
2924 
2925 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2926 {
2927 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2928 	int i;
2929 
2930 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2931 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2932 		return;
2933 	}
2934 
2935 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2936 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2937 		BT_DBG("%s bad parameters", hdev->name);
2938 		return;
2939 	}
2940 
2941 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2942 	       ev->num_hndl);
2943 
2944 	for (i = 0; i < ev->num_hndl; i++) {
2945 		struct hci_comp_blocks_info *info = &ev->handles[i];
2946 		struct hci_conn *conn = NULL;
2947 		__u16  handle, block_count;
2948 
2949 		handle = __le16_to_cpu(info->handle);
2950 		block_count = __le16_to_cpu(info->blocks);
2951 
2952 		conn = __hci_conn_lookup_handle(hdev, handle);
2953 		if (!conn)
2954 			continue;
2955 
2956 		conn->sent -= block_count;
2957 
2958 		switch (conn->type) {
2959 		case ACL_LINK:
2960 		case AMP_LINK:
2961 			hdev->block_cnt += block_count;
2962 			if (hdev->block_cnt > hdev->num_blocks)
2963 				hdev->block_cnt = hdev->num_blocks;
2964 			break;
2965 
2966 		default:
2967 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2968 			break;
2969 		}
2970 	}
2971 
2972 	queue_work(hdev->workqueue, &hdev->tx_work);
2973 }
2974 
2975 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2976 {
2977 	struct hci_ev_mode_change *ev = (void *) skb->data;
2978 	struct hci_conn *conn;
2979 
2980 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2981 
2982 	hci_dev_lock(hdev);
2983 
2984 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2985 	if (conn) {
2986 		conn->mode = ev->mode;
2987 
2988 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2989 					&conn->flags)) {
2990 			if (conn->mode == HCI_CM_ACTIVE)
2991 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2992 			else
2993 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2994 		}
2995 
2996 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2997 			hci_sco_setup(conn, ev->status);
2998 	}
2999 
3000 	hci_dev_unlock(hdev);
3001 }
3002 
3003 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3004 {
3005 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3006 	struct hci_conn *conn;
3007 
3008 	BT_DBG("%s", hdev->name);
3009 
3010 	hci_dev_lock(hdev);
3011 
3012 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3013 	if (!conn)
3014 		goto unlock;
3015 
3016 	if (conn->state == BT_CONNECTED) {
3017 		hci_conn_hold(conn);
3018 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3019 		hci_conn_drop(conn);
3020 	}
3021 
3022 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3023 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3024 			     sizeof(ev->bdaddr), &ev->bdaddr);
3025 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3026 		u8 secure;
3027 
3028 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3029 			secure = 1;
3030 		else
3031 			secure = 0;
3032 
3033 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3034 	}
3035 
3036 unlock:
3037 	hci_dev_unlock(hdev);
3038 }
3039 
3040 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3041 {
3042 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3043 	struct hci_cp_link_key_reply cp;
3044 	struct hci_conn *conn;
3045 	struct link_key *key;
3046 
3047 	BT_DBG("%s", hdev->name);
3048 
3049 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3050 		return;
3051 
3052 	hci_dev_lock(hdev);
3053 
3054 	key = hci_find_link_key(hdev, &ev->bdaddr);
3055 	if (!key) {
3056 		BT_DBG("%s link key not found for %pMR", hdev->name,
3057 		       &ev->bdaddr);
3058 		goto not_found;
3059 	}
3060 
3061 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3062 	       &ev->bdaddr);
3063 
3064 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3065 	    key->type == HCI_LK_DEBUG_COMBINATION) {
3066 		BT_DBG("%s ignoring debug key", hdev->name);
3067 		goto not_found;
3068 	}
3069 
3070 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3071 	if (conn) {
3072 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3073 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3074 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3075 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3076 			goto not_found;
3077 		}
3078 
3079 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3080 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3081 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3082 			BT_DBG("%s ignoring key unauthenticated for high security",
3083 			       hdev->name);
3084 			goto not_found;
3085 		}
3086 
3087 		conn->key_type = key->type;
3088 		conn->pin_length = key->pin_len;
3089 	}
3090 
3091 	bacpy(&cp.bdaddr, &ev->bdaddr);
3092 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3093 
3094 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3095 
3096 	hci_dev_unlock(hdev);
3097 
3098 	return;
3099 
3100 not_found:
3101 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3102 	hci_dev_unlock(hdev);
3103 }
3104 
3105 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3106 {
3107 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3108 	struct hci_conn *conn;
3109 	u8 pin_len = 0;
3110 
3111 	BT_DBG("%s", hdev->name);
3112 
3113 	hci_dev_lock(hdev);
3114 
3115 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3116 	if (conn) {
3117 		hci_conn_hold(conn);
3118 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3119 		pin_len = conn->pin_length;
3120 
3121 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3122 			conn->key_type = ev->key_type;
3123 
3124 		hci_conn_drop(conn);
3125 	}
3126 
3127 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3128 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3129 				 ev->key_type, pin_len);
3130 
3131 	hci_dev_unlock(hdev);
3132 }
3133 
3134 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3135 {
3136 	struct hci_ev_clock_offset *ev = (void *) skb->data;
3137 	struct hci_conn *conn;
3138 
3139 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3140 
3141 	hci_dev_lock(hdev);
3142 
3143 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3144 	if (conn && !ev->status) {
3145 		struct inquiry_entry *ie;
3146 
3147 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3148 		if (ie) {
3149 			ie->data.clock_offset = ev->clock_offset;
3150 			ie->timestamp = jiffies;
3151 		}
3152 	}
3153 
3154 	hci_dev_unlock(hdev);
3155 }
3156 
3157 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3158 {
3159 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3160 	struct hci_conn *conn;
3161 
3162 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3163 
3164 	hci_dev_lock(hdev);
3165 
3166 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3167 	if (conn && !ev->status)
3168 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3169 
3170 	hci_dev_unlock(hdev);
3171 }
3172 
3173 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3174 {
3175 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3176 	struct inquiry_entry *ie;
3177 
3178 	BT_DBG("%s", hdev->name);
3179 
3180 	hci_dev_lock(hdev);
3181 
3182 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3183 	if (ie) {
3184 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3185 		ie->timestamp = jiffies;
3186 	}
3187 
3188 	hci_dev_unlock(hdev);
3189 }
3190 
3191 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3192 					     struct sk_buff *skb)
3193 {
3194 	struct inquiry_data data;
3195 	int num_rsp = *((__u8 *) skb->data);
3196 	bool name_known, ssp;
3197 
3198 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3199 
3200 	if (!num_rsp)
3201 		return;
3202 
3203 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3204 		return;
3205 
3206 	hci_dev_lock(hdev);
3207 
3208 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3209 		struct inquiry_info_with_rssi_and_pscan_mode *info;
3210 		info = (void *) (skb->data + 1);
3211 
3212 		for (; num_rsp; num_rsp--, info++) {
3213 			bacpy(&data.bdaddr, &info->bdaddr);
3214 			data.pscan_rep_mode	= info->pscan_rep_mode;
3215 			data.pscan_period_mode	= info->pscan_period_mode;
3216 			data.pscan_mode		= info->pscan_mode;
3217 			memcpy(data.dev_class, info->dev_class, 3);
3218 			data.clock_offset	= info->clock_offset;
3219 			data.rssi		= info->rssi;
3220 			data.ssp_mode		= 0x00;
3221 
3222 			name_known = hci_inquiry_cache_update(hdev, &data,
3223 							      false, &ssp);
3224 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3225 					  info->dev_class, info->rssi,
3226 					  !name_known, ssp, NULL, 0, NULL, 0);
3227 		}
3228 	} else {
3229 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3230 
3231 		for (; num_rsp; num_rsp--, info++) {
3232 			bacpy(&data.bdaddr, &info->bdaddr);
3233 			data.pscan_rep_mode	= info->pscan_rep_mode;
3234 			data.pscan_period_mode	= info->pscan_period_mode;
3235 			data.pscan_mode		= 0x00;
3236 			memcpy(data.dev_class, info->dev_class, 3);
3237 			data.clock_offset	= info->clock_offset;
3238 			data.rssi		= info->rssi;
3239 			data.ssp_mode		= 0x00;
3240 			name_known = hci_inquiry_cache_update(hdev, &data,
3241 							      false, &ssp);
3242 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3243 					  info->dev_class, info->rssi,
3244 					  !name_known, ssp, NULL, 0, NULL, 0);
3245 		}
3246 	}
3247 
3248 	hci_dev_unlock(hdev);
3249 }
3250 
3251 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3252 					struct sk_buff *skb)
3253 {
3254 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3255 	struct hci_conn *conn;
3256 
3257 	BT_DBG("%s", hdev->name);
3258 
3259 	hci_dev_lock(hdev);
3260 
3261 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3262 	if (!conn)
3263 		goto unlock;
3264 
3265 	if (ev->page < HCI_MAX_PAGES)
3266 		memcpy(conn->features[ev->page], ev->features, 8);
3267 
3268 	if (!ev->status && ev->page == 0x01) {
3269 		struct inquiry_entry *ie;
3270 
3271 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3272 		if (ie)
3273 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3274 
3275 		if (ev->features[0] & LMP_HOST_SSP) {
3276 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3277 		} else {
3278 			/* It is mandatory by the Bluetooth specification that
3279 			 * Extended Inquiry Results are only used when Secure
3280 			 * Simple Pairing is enabled, but some devices violate
3281 			 * this.
3282 			 *
3283 			 * To make these devices work, the internal SSP
3284 			 * enabled flag needs to be cleared if the remote host
3285 			 * features do not indicate SSP support */
3286 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3287 		}
3288 
3289 		if (ev->features[0] & LMP_HOST_SC)
3290 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3291 	}
3292 
3293 	if (conn->state != BT_CONFIG)
3294 		goto unlock;
3295 
3296 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3297 		struct hci_cp_remote_name_req cp;
3298 		memset(&cp, 0, sizeof(cp));
3299 		bacpy(&cp.bdaddr, &conn->dst);
3300 		cp.pscan_rep_mode = 0x02;
3301 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3302 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3303 		mgmt_device_connected(hdev, &conn->dst, conn->type,
3304 				      conn->dst_type, 0, NULL, 0,
3305 				      conn->dev_class);
3306 
3307 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3308 		conn->state = BT_CONNECTED;
3309 		hci_proto_connect_cfm(conn, ev->status);
3310 		hci_conn_drop(conn);
3311 	}
3312 
3313 unlock:
3314 	hci_dev_unlock(hdev);
3315 }
3316 
3317 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3318 				       struct sk_buff *skb)
3319 {
3320 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3321 	struct hci_conn *conn;
3322 
3323 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3324 
3325 	hci_dev_lock(hdev);
3326 
3327 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3328 	if (!conn) {
3329 		if (ev->link_type == ESCO_LINK)
3330 			goto unlock;
3331 
3332 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3333 		if (!conn)
3334 			goto unlock;
3335 
3336 		conn->type = SCO_LINK;
3337 	}
3338 
3339 	switch (ev->status) {
3340 	case 0x00:
3341 		conn->handle = __le16_to_cpu(ev->handle);
3342 		conn->state  = BT_CONNECTED;
3343 
3344 		hci_conn_add_sysfs(conn);
3345 		break;
3346 
3347 	case 0x0d:	/* Connection Rejected due to Limited Resources */
3348 	case 0x11:	/* Unsupported Feature or Parameter Value */
3349 	case 0x1c:	/* SCO interval rejected */
3350 	case 0x1a:	/* Unsupported Remote Feature */
3351 	case 0x1f:	/* Unspecified error */
3352 	case 0x20:	/* Unsupported LMP Parameter value */
3353 		if (conn->out) {
3354 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3355 					(hdev->esco_type & EDR_ESCO_MASK);
3356 			if (hci_setup_sync(conn, conn->link->handle))
3357 				goto unlock;
3358 		}
3359 		/* fall through */
3360 
3361 	default:
3362 		conn->state = BT_CLOSED;
3363 		break;
3364 	}
3365 
3366 	hci_proto_connect_cfm(conn, ev->status);
3367 	if (ev->status)
3368 		hci_conn_del(conn);
3369 
3370 unlock:
3371 	hci_dev_unlock(hdev);
3372 }
3373 
3374 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3375 {
3376 	size_t parsed = 0;
3377 
3378 	while (parsed < eir_len) {
3379 		u8 field_len = eir[0];
3380 
3381 		if (field_len == 0)
3382 			return parsed;
3383 
3384 		parsed += field_len + 1;
3385 		eir += field_len + 1;
3386 	}
3387 
3388 	return eir_len;
3389 }
3390 
3391 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3392 					    struct sk_buff *skb)
3393 {
3394 	struct inquiry_data data;
3395 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3396 	int num_rsp = *((__u8 *) skb->data);
3397 	size_t eir_len;
3398 
3399 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3400 
3401 	if (!num_rsp)
3402 		return;
3403 
3404 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3405 		return;
3406 
3407 	hci_dev_lock(hdev);
3408 
3409 	for (; num_rsp; num_rsp--, info++) {
3410 		bool name_known, ssp;
3411 
3412 		bacpy(&data.bdaddr, &info->bdaddr);
3413 		data.pscan_rep_mode	= info->pscan_rep_mode;
3414 		data.pscan_period_mode	= info->pscan_period_mode;
3415 		data.pscan_mode		= 0x00;
3416 		memcpy(data.dev_class, info->dev_class, 3);
3417 		data.clock_offset	= info->clock_offset;
3418 		data.rssi		= info->rssi;
3419 		data.ssp_mode		= 0x01;
3420 
3421 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3422 			name_known = eir_has_data_type(info->data,
3423 						       sizeof(info->data),
3424 						       EIR_NAME_COMPLETE);
3425 		else
3426 			name_known = true;
3427 
3428 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3429 						      &ssp);
3430 		eir_len = eir_get_length(info->data, sizeof(info->data));
3431 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3432 				  info->dev_class, info->rssi, !name_known,
3433 				  ssp, info->data, eir_len, NULL, 0);
3434 	}
3435 
3436 	hci_dev_unlock(hdev);
3437 }
3438 
3439 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3440 					 struct sk_buff *skb)
3441 {
3442 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3443 	struct hci_conn *conn;
3444 
3445 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3446 	       __le16_to_cpu(ev->handle));
3447 
3448 	hci_dev_lock(hdev);
3449 
3450 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3451 	if (!conn)
3452 		goto unlock;
3453 
3454 	/* For BR/EDR the necessary steps are taken through the
3455 	 * auth_complete event.
3456 	 */
3457 	if (conn->type != LE_LINK)
3458 		goto unlock;
3459 
3460 	if (!ev->status)
3461 		conn->sec_level = conn->pending_sec_level;
3462 
3463 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3464 
3465 	if (ev->status && conn->state == BT_CONNECTED) {
3466 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3467 		hci_conn_drop(conn);
3468 		goto unlock;
3469 	}
3470 
3471 	if (conn->state == BT_CONFIG) {
3472 		if (!ev->status)
3473 			conn->state = BT_CONNECTED;
3474 
3475 		hci_proto_connect_cfm(conn, ev->status);
3476 		hci_conn_drop(conn);
3477 	} else {
3478 		hci_auth_cfm(conn, ev->status);
3479 
3480 		hci_conn_hold(conn);
3481 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3482 		hci_conn_drop(conn);
3483 	}
3484 
3485 unlock:
3486 	hci_dev_unlock(hdev);
3487 }
3488 
3489 static u8 hci_get_auth_req(struct hci_conn *conn)
3490 {
3491 	/* If remote requests no-bonding follow that lead */
3492 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3493 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3494 		return conn->remote_auth | (conn->auth_type & 0x01);
3495 
3496 	/* If both remote and local have enough IO capabilities, require
3497 	 * MITM protection
3498 	 */
3499 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3500 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3501 		return conn->remote_auth | 0x01;
3502 
3503 	/* No MITM protection possible so ignore remote requirement */
3504 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3505 }
3506 
3507 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3508 {
3509 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3510 	struct hci_conn *conn;
3511 
3512 	BT_DBG("%s", hdev->name);
3513 
3514 	hci_dev_lock(hdev);
3515 
3516 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3517 	if (!conn)
3518 		goto unlock;
3519 
3520 	hci_conn_hold(conn);
3521 
3522 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3523 		goto unlock;
3524 
3525 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3526 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3527 		struct hci_cp_io_capability_reply cp;
3528 
3529 		bacpy(&cp.bdaddr, &ev->bdaddr);
3530 		/* Change the IO capability from KeyboardDisplay
3531 		 * to DisplayYesNo as it is not supported by BT spec. */
3532 		cp.capability = (conn->io_capability == 0x04) ?
3533 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3534 
3535 		/* If we are initiators, there is no remote information yet */
3536 		if (conn->remote_auth == 0xff) {
3537 			cp.authentication = conn->auth_type;
3538 
3539 			/* Request MITM protection if our IO caps allow it
3540 			 * except for the no-bonding case
3541 			 */
3542 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3543 			    cp.authentication != HCI_AT_NO_BONDING)
3544 				cp.authentication |= 0x01;
3545 		} else {
3546 			conn->auth_type = hci_get_auth_req(conn);
3547 			cp.authentication = conn->auth_type;
3548 		}
3549 
3550 		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3551 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3552 			cp.oob_data = 0x01;
3553 		else
3554 			cp.oob_data = 0x00;
3555 
3556 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3557 			     sizeof(cp), &cp);
3558 	} else {
3559 		struct hci_cp_io_capability_neg_reply cp;
3560 
3561 		bacpy(&cp.bdaddr, &ev->bdaddr);
3562 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3563 
3564 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3565 			     sizeof(cp), &cp);
3566 	}
3567 
3568 unlock:
3569 	hci_dev_unlock(hdev);
3570 }
3571 
3572 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3573 {
3574 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3575 	struct hci_conn *conn;
3576 
3577 	BT_DBG("%s", hdev->name);
3578 
3579 	hci_dev_lock(hdev);
3580 
3581 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3582 	if (!conn)
3583 		goto unlock;
3584 
3585 	conn->remote_cap = ev->capability;
3586 	conn->remote_auth = ev->authentication;
3587 	if (ev->oob_data)
3588 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3589 
3590 unlock:
3591 	hci_dev_unlock(hdev);
3592 }
3593 
3594 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3595 					 struct sk_buff *skb)
3596 {
3597 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3598 	int loc_mitm, rem_mitm, confirm_hint = 0;
3599 	struct hci_conn *conn;
3600 
3601 	BT_DBG("%s", hdev->name);
3602 
3603 	hci_dev_lock(hdev);
3604 
3605 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3606 		goto unlock;
3607 
3608 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3609 	if (!conn)
3610 		goto unlock;
3611 
3612 	loc_mitm = (conn->auth_type & 0x01);
3613 	rem_mitm = (conn->remote_auth & 0x01);
3614 
3615 	/* If we require MITM but the remote device can't provide that
3616 	 * (it has NoInputNoOutput) then reject the confirmation request
3617 	 */
3618 	if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3619 		BT_DBG("Rejecting request: remote device can't provide MITM");
3620 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3621 			     sizeof(ev->bdaddr), &ev->bdaddr);
3622 		goto unlock;
3623 	}
3624 
3625 	/* If no side requires MITM protection; auto-accept */
3626 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3627 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3628 
3629 		/* If we're not the initiators request authorization to
3630 		 * proceed from user space (mgmt_user_confirm with
3631 		 * confirm_hint set to 1). */
3632 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3633 			BT_DBG("Confirming auto-accept as acceptor");
3634 			confirm_hint = 1;
3635 			goto confirm;
3636 		}
3637 
3638 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3639 		       hdev->auto_accept_delay);
3640 
3641 		if (hdev->auto_accept_delay > 0) {
3642 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3643 			queue_delayed_work(conn->hdev->workqueue,
3644 					   &conn->auto_accept_work, delay);
3645 			goto unlock;
3646 		}
3647 
3648 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3649 			     sizeof(ev->bdaddr), &ev->bdaddr);
3650 		goto unlock;
3651 	}
3652 
3653 confirm:
3654 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3655 				  le32_to_cpu(ev->passkey), confirm_hint);
3656 
3657 unlock:
3658 	hci_dev_unlock(hdev);
3659 }
3660 
3661 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3662 					 struct sk_buff *skb)
3663 {
3664 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3665 
3666 	BT_DBG("%s", hdev->name);
3667 
3668 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3669 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3670 }
3671 
3672 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3673 					struct sk_buff *skb)
3674 {
3675 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3676 	struct hci_conn *conn;
3677 
3678 	BT_DBG("%s", hdev->name);
3679 
3680 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3681 	if (!conn)
3682 		return;
3683 
3684 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3685 	conn->passkey_entered = 0;
3686 
3687 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3688 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3689 					 conn->dst_type, conn->passkey_notify,
3690 					 conn->passkey_entered);
3691 }
3692 
3693 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3694 {
3695 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3696 	struct hci_conn *conn;
3697 
3698 	BT_DBG("%s", hdev->name);
3699 
3700 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3701 	if (!conn)
3702 		return;
3703 
3704 	switch (ev->type) {
3705 	case HCI_KEYPRESS_STARTED:
3706 		conn->passkey_entered = 0;
3707 		return;
3708 
3709 	case HCI_KEYPRESS_ENTERED:
3710 		conn->passkey_entered++;
3711 		break;
3712 
3713 	case HCI_KEYPRESS_ERASED:
3714 		conn->passkey_entered--;
3715 		break;
3716 
3717 	case HCI_KEYPRESS_CLEARED:
3718 		conn->passkey_entered = 0;
3719 		break;
3720 
3721 	case HCI_KEYPRESS_COMPLETED:
3722 		return;
3723 	}
3724 
3725 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3726 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3727 					 conn->dst_type, conn->passkey_notify,
3728 					 conn->passkey_entered);
3729 }
3730 
3731 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3732 					 struct sk_buff *skb)
3733 {
3734 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3735 	struct hci_conn *conn;
3736 
3737 	BT_DBG("%s", hdev->name);
3738 
3739 	hci_dev_lock(hdev);
3740 
3741 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3742 	if (!conn)
3743 		goto unlock;
3744 
3745 	/* To avoid duplicate auth_failed events to user space we check
3746 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3747 	 * initiated the authentication. A traditional auth_complete
3748 	 * event gets always produced as initiator and is also mapped to
3749 	 * the mgmt_auth_failed event */
3750 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3751 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3752 				 ev->status);
3753 
3754 	hci_conn_drop(conn);
3755 
3756 unlock:
3757 	hci_dev_unlock(hdev);
3758 }
3759 
3760 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3761 					 struct sk_buff *skb)
3762 {
3763 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3764 	struct inquiry_entry *ie;
3765 	struct hci_conn *conn;
3766 
3767 	BT_DBG("%s", hdev->name);
3768 
3769 	hci_dev_lock(hdev);
3770 
3771 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3772 	if (conn)
3773 		memcpy(conn->features[1], ev->features, 8);
3774 
3775 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3776 	if (ie)
3777 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3778 
3779 	hci_dev_unlock(hdev);
3780 }
3781 
3782 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3783 					    struct sk_buff *skb)
3784 {
3785 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3786 	struct oob_data *data;
3787 
3788 	BT_DBG("%s", hdev->name);
3789 
3790 	hci_dev_lock(hdev);
3791 
3792 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3793 		goto unlock;
3794 
3795 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3796 	if (data) {
3797 		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3798 			struct hci_cp_remote_oob_ext_data_reply cp;
3799 
3800 			bacpy(&cp.bdaddr, &ev->bdaddr);
3801 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3802 			memcpy(cp.randomizer192, data->randomizer192,
3803 			       sizeof(cp.randomizer192));
3804 			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3805 			memcpy(cp.randomizer256, data->randomizer256,
3806 			       sizeof(cp.randomizer256));
3807 
3808 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3809 				     sizeof(cp), &cp);
3810 		} else {
3811 			struct hci_cp_remote_oob_data_reply cp;
3812 
3813 			bacpy(&cp.bdaddr, &ev->bdaddr);
3814 			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3815 			memcpy(cp.randomizer, data->randomizer192,
3816 			       sizeof(cp.randomizer));
3817 
3818 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3819 				     sizeof(cp), &cp);
3820 		}
3821 	} else {
3822 		struct hci_cp_remote_oob_data_neg_reply cp;
3823 
3824 		bacpy(&cp.bdaddr, &ev->bdaddr);
3825 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3826 			     sizeof(cp), &cp);
3827 	}
3828 
3829 unlock:
3830 	hci_dev_unlock(hdev);
3831 }
3832 
3833 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3834 				      struct sk_buff *skb)
3835 {
3836 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3837 	struct hci_conn *hcon, *bredr_hcon;
3838 
3839 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3840 	       ev->status);
3841 
3842 	hci_dev_lock(hdev);
3843 
3844 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3845 	if (!hcon) {
3846 		hci_dev_unlock(hdev);
3847 		return;
3848 	}
3849 
3850 	if (ev->status) {
3851 		hci_conn_del(hcon);
3852 		hci_dev_unlock(hdev);
3853 		return;
3854 	}
3855 
3856 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3857 
3858 	hcon->state = BT_CONNECTED;
3859 	bacpy(&hcon->dst, &bredr_hcon->dst);
3860 
3861 	hci_conn_hold(hcon);
3862 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3863 	hci_conn_drop(hcon);
3864 
3865 	hci_conn_add_sysfs(hcon);
3866 
3867 	amp_physical_cfm(bredr_hcon, hcon);
3868 
3869 	hci_dev_unlock(hdev);
3870 }
3871 
3872 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3873 {
3874 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3875 	struct hci_conn *hcon;
3876 	struct hci_chan *hchan;
3877 	struct amp_mgr *mgr;
3878 
3879 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3880 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3881 	       ev->status);
3882 
3883 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3884 	if (!hcon)
3885 		return;
3886 
3887 	/* Create AMP hchan */
3888 	hchan = hci_chan_create(hcon);
3889 	if (!hchan)
3890 		return;
3891 
3892 	hchan->handle = le16_to_cpu(ev->handle);
3893 
3894 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3895 
3896 	mgr = hcon->amp_mgr;
3897 	if (mgr && mgr->bredr_chan) {
3898 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3899 
3900 		l2cap_chan_lock(bredr_chan);
3901 
3902 		bredr_chan->conn->mtu = hdev->block_mtu;
3903 		l2cap_logical_cfm(bredr_chan, hchan, 0);
3904 		hci_conn_hold(hcon);
3905 
3906 		l2cap_chan_unlock(bredr_chan);
3907 	}
3908 }
3909 
3910 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3911 					     struct sk_buff *skb)
3912 {
3913 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3914 	struct hci_chan *hchan;
3915 
3916 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3917 	       le16_to_cpu(ev->handle), ev->status);
3918 
3919 	if (ev->status)
3920 		return;
3921 
3922 	hci_dev_lock(hdev);
3923 
3924 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3925 	if (!hchan)
3926 		goto unlock;
3927 
3928 	amp_destroy_logical_link(hchan, ev->reason);
3929 
3930 unlock:
3931 	hci_dev_unlock(hdev);
3932 }
3933 
3934 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3935 					     struct sk_buff *skb)
3936 {
3937 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3938 	struct hci_conn *hcon;
3939 
3940 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3941 
3942 	if (ev->status)
3943 		return;
3944 
3945 	hci_dev_lock(hdev);
3946 
3947 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3948 	if (hcon) {
3949 		hcon->state = BT_CLOSED;
3950 		hci_conn_del(hcon);
3951 	}
3952 
3953 	hci_dev_unlock(hdev);
3954 }
3955 
3956 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3957 {
3958 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3959 	struct hci_conn *conn;
3960 	struct smp_irk *irk;
3961 
3962 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3963 
3964 	hci_dev_lock(hdev);
3965 
3966 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3967 	if (!conn) {
3968 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3969 		if (!conn) {
3970 			BT_ERR("No memory for new connection");
3971 			goto unlock;
3972 		}
3973 
3974 		conn->dst_type = ev->bdaddr_type;
3975 
3976 		if (ev->role == LE_CONN_ROLE_MASTER) {
3977 			conn->out = true;
3978 			conn->link_mode |= HCI_LM_MASTER;
3979 		}
3980 
3981 		/* If we didn't have a hci_conn object previously
3982 		 * but we're in master role this must be something
3983 		 * initiated using a white list. Since white list based
3984 		 * connections are not "first class citizens" we don't
3985 		 * have full tracking of them. Therefore, we go ahead
3986 		 * with a "best effort" approach of determining the
3987 		 * initiator address based on the HCI_PRIVACY flag.
3988 		 */
3989 		if (conn->out) {
3990 			conn->resp_addr_type = ev->bdaddr_type;
3991 			bacpy(&conn->resp_addr, &ev->bdaddr);
3992 			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3993 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
3994 				bacpy(&conn->init_addr, &hdev->rpa);
3995 			} else {
3996 				hci_copy_identity_address(hdev,
3997 							  &conn->init_addr,
3998 							  &conn->init_addr_type);
3999 			}
4000 		}
4001 	} else {
4002 		cancel_delayed_work(&conn->le_conn_timeout);
4003 	}
4004 
4005 	if (!conn->out) {
4006 		/* Set the responder (our side) address type based on
4007 		 * the advertising address type.
4008 		 */
4009 		conn->resp_addr_type = hdev->adv_addr_type;
4010 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4011 			bacpy(&conn->resp_addr, &hdev->random_addr);
4012 		else
4013 			bacpy(&conn->resp_addr, &hdev->bdaddr);
4014 
4015 		conn->init_addr_type = ev->bdaddr_type;
4016 		bacpy(&conn->init_addr, &ev->bdaddr);
4017 	}
4018 
4019 	/* Lookup the identity address from the stored connection
4020 	 * address and address type.
4021 	 *
4022 	 * When establishing connections to an identity address, the
4023 	 * connection procedure will store the resolvable random
4024 	 * address first. Now if it can be converted back into the
4025 	 * identity address, start using the identity address from
4026 	 * now on.
4027 	 */
4028 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4029 	if (irk) {
4030 		bacpy(&conn->dst, &irk->bdaddr);
4031 		conn->dst_type = irk->addr_type;
4032 	}
4033 
4034 	if (ev->status) {
4035 		hci_le_conn_failed(conn, ev->status);
4036 		goto unlock;
4037 	}
4038 
4039 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4040 		mgmt_device_connected(hdev, &conn->dst, conn->type,
4041 				      conn->dst_type, 0, NULL, 0, NULL);
4042 
4043 	conn->sec_level = BT_SECURITY_LOW;
4044 	conn->handle = __le16_to_cpu(ev->handle);
4045 	conn->state = BT_CONNECTED;
4046 
4047 	if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
4048 		set_bit(HCI_CONN_6LOWPAN, &conn->flags);
4049 
4050 	hci_conn_add_sysfs(conn);
4051 
4052 	hci_proto_connect_cfm(conn, ev->status);
4053 
4054 	hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
4055 
4056 unlock:
4057 	hci_dev_unlock(hdev);
4058 }
4059 
4060 /* This function requires the caller holds hdev->lock */
4061 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4062 				  u8 addr_type)
4063 {
4064 	struct hci_conn *conn;
4065 	struct smp_irk *irk;
4066 
4067 	/* If this is a resolvable address, we should resolve it and then
4068 	 * update address and address type variables.
4069 	 */
4070 	irk = hci_get_irk(hdev, addr, addr_type);
4071 	if (irk) {
4072 		addr = &irk->bdaddr;
4073 		addr_type = irk->addr_type;
4074 	}
4075 
4076 	if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4077 		return;
4078 
4079 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4080 			      HCI_AT_NO_BONDING);
4081 	if (!IS_ERR(conn))
4082 		return;
4083 
4084 	switch (PTR_ERR(conn)) {
4085 	case -EBUSY:
4086 		/* If hci_connect() returns -EBUSY it means there is already
4087 		 * an LE connection attempt going on. Since controllers don't
4088 		 * support more than one connection attempt at the time, we
4089 		 * don't consider this an error case.
4090 		 */
4091 		break;
4092 	default:
4093 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4094 	}
4095 }
4096 
4097 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4098 			       u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4099 {
4100 	struct discovery_state *d = &hdev->discovery;
4101 	bool match;
4102 
4103 	/* Passive scanning shouldn't trigger any device found events */
4104 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4105 		if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4106 			check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4107 		return;
4108 	}
4109 
4110 	/* If there's nothing pending either store the data from this
4111 	 * event or send an immediate device found event if the data
4112 	 * should not be stored for later.
4113 	 */
4114 	if (!has_pending_adv_report(hdev)) {
4115 		/* If the report will trigger a SCAN_REQ store it for
4116 		 * later merging.
4117 		 */
4118 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4119 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4120 						 rssi, data, len);
4121 			return;
4122 		}
4123 
4124 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4125 				  rssi, 0, 1, data, len, NULL, 0);
4126 		return;
4127 	}
4128 
4129 	/* Check if the pending report is for the same device as the new one */
4130 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4131 		 bdaddr_type == d->last_adv_addr_type);
4132 
4133 	/* If the pending data doesn't match this report or this isn't a
4134 	 * scan response (e.g. we got a duplicate ADV_IND) then force
4135 	 * sending of the pending data.
4136 	 */
4137 	if (type != LE_ADV_SCAN_RSP || !match) {
4138 		/* Send out whatever is in the cache, but skip duplicates */
4139 		if (!match)
4140 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4141 					  d->last_adv_addr_type, NULL,
4142 					  d->last_adv_rssi, 0, 1,
4143 					  d->last_adv_data,
4144 					  d->last_adv_data_len, NULL, 0);
4145 
4146 		/* If the new report will trigger a SCAN_REQ store it for
4147 		 * later merging.
4148 		 */
4149 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4150 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4151 						 rssi, data, len);
4152 			return;
4153 		}
4154 
4155 		/* The advertising reports cannot be merged, so clear
4156 		 * the pending report and send out a device found event.
4157 		 */
4158 		clear_pending_adv_report(hdev);
4159 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4160 				  rssi, 0, 1, data, len, NULL, 0);
4161 		return;
4162 	}
4163 
4164 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4165 	 * the new event is a SCAN_RSP. We can therefore proceed with
4166 	 * sending a merged device found event.
4167 	 */
4168 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4169 			  d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4170 			  d->last_adv_data, d->last_adv_data_len);
4171 	clear_pending_adv_report(hdev);
4172 }
4173 
4174 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4175 {
4176 	u8 num_reports = skb->data[0];
4177 	void *ptr = &skb->data[1];
4178 
4179 	hci_dev_lock(hdev);
4180 
4181 	while (num_reports--) {
4182 		struct hci_ev_le_advertising_info *ev = ptr;
4183 		s8 rssi;
4184 
4185 		rssi = ev->data[ev->length];
4186 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4187 				   ev->bdaddr_type, rssi, ev->data, ev->length);
4188 
4189 		ptr += sizeof(*ev) + ev->length + 1;
4190 	}
4191 
4192 	hci_dev_unlock(hdev);
4193 }
4194 
4195 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4196 {
4197 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4198 	struct hci_cp_le_ltk_reply cp;
4199 	struct hci_cp_le_ltk_neg_reply neg;
4200 	struct hci_conn *conn;
4201 	struct smp_ltk *ltk;
4202 
4203 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4204 
4205 	hci_dev_lock(hdev);
4206 
4207 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4208 	if (conn == NULL)
4209 		goto not_found;
4210 
4211 	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4212 	if (ltk == NULL)
4213 		goto not_found;
4214 
4215 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4216 	cp.handle = cpu_to_le16(conn->handle);
4217 
4218 	if (ltk->authenticated)
4219 		conn->pending_sec_level = BT_SECURITY_HIGH;
4220 	else
4221 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4222 
4223 	conn->enc_key_size = ltk->enc_size;
4224 
4225 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4226 
4227 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4228 	 * temporary key used to encrypt a connection following
4229 	 * pairing. It is used during the Encrypted Session Setup to
4230 	 * distribute the keys. Later, security can be re-established
4231 	 * using a distributed LTK.
4232 	 */
4233 	if (ltk->type == HCI_SMP_STK_SLAVE) {
4234 		list_del(&ltk->list);
4235 		kfree(ltk);
4236 	}
4237 
4238 	hci_dev_unlock(hdev);
4239 
4240 	return;
4241 
4242 not_found:
4243 	neg.handle = ev->handle;
4244 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4245 	hci_dev_unlock(hdev);
4246 }
4247 
4248 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4249 {
4250 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4251 
4252 	skb_pull(skb, sizeof(*le_ev));
4253 
4254 	switch (le_ev->subevent) {
4255 	case HCI_EV_LE_CONN_COMPLETE:
4256 		hci_le_conn_complete_evt(hdev, skb);
4257 		break;
4258 
4259 	case HCI_EV_LE_ADVERTISING_REPORT:
4260 		hci_le_adv_report_evt(hdev, skb);
4261 		break;
4262 
4263 	case HCI_EV_LE_LTK_REQ:
4264 		hci_le_ltk_request_evt(hdev, skb);
4265 		break;
4266 
4267 	default:
4268 		break;
4269 	}
4270 }
4271 
4272 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4273 {
4274 	struct hci_ev_channel_selected *ev = (void *) skb->data;
4275 	struct hci_conn *hcon;
4276 
4277 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4278 
4279 	skb_pull(skb, sizeof(*ev));
4280 
4281 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4282 	if (!hcon)
4283 		return;
4284 
4285 	amp_read_loc_assoc_final_data(hdev, hcon);
4286 }
4287 
4288 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4289 {
4290 	struct hci_event_hdr *hdr = (void *) skb->data;
4291 	__u8 event = hdr->evt;
4292 
4293 	hci_dev_lock(hdev);
4294 
4295 	/* Received events are (currently) only needed when a request is
4296 	 * ongoing so avoid unnecessary memory allocation.
4297 	 */
4298 	if (hdev->req_status == HCI_REQ_PEND) {
4299 		kfree_skb(hdev->recv_evt);
4300 		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4301 	}
4302 
4303 	hci_dev_unlock(hdev);
4304 
4305 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4306 
4307 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4308 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4309 		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4310 
4311 		hci_req_cmd_complete(hdev, opcode, 0);
4312 	}
4313 
4314 	switch (event) {
4315 	case HCI_EV_INQUIRY_COMPLETE:
4316 		hci_inquiry_complete_evt(hdev, skb);
4317 		break;
4318 
4319 	case HCI_EV_INQUIRY_RESULT:
4320 		hci_inquiry_result_evt(hdev, skb);
4321 		break;
4322 
4323 	case HCI_EV_CONN_COMPLETE:
4324 		hci_conn_complete_evt(hdev, skb);
4325 		break;
4326 
4327 	case HCI_EV_CONN_REQUEST:
4328 		hci_conn_request_evt(hdev, skb);
4329 		break;
4330 
4331 	case HCI_EV_DISCONN_COMPLETE:
4332 		hci_disconn_complete_evt(hdev, skb);
4333 		break;
4334 
4335 	case HCI_EV_AUTH_COMPLETE:
4336 		hci_auth_complete_evt(hdev, skb);
4337 		break;
4338 
4339 	case HCI_EV_REMOTE_NAME:
4340 		hci_remote_name_evt(hdev, skb);
4341 		break;
4342 
4343 	case HCI_EV_ENCRYPT_CHANGE:
4344 		hci_encrypt_change_evt(hdev, skb);
4345 		break;
4346 
4347 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4348 		hci_change_link_key_complete_evt(hdev, skb);
4349 		break;
4350 
4351 	case HCI_EV_REMOTE_FEATURES:
4352 		hci_remote_features_evt(hdev, skb);
4353 		break;
4354 
4355 	case HCI_EV_CMD_COMPLETE:
4356 		hci_cmd_complete_evt(hdev, skb);
4357 		break;
4358 
4359 	case HCI_EV_CMD_STATUS:
4360 		hci_cmd_status_evt(hdev, skb);
4361 		break;
4362 
4363 	case HCI_EV_ROLE_CHANGE:
4364 		hci_role_change_evt(hdev, skb);
4365 		break;
4366 
4367 	case HCI_EV_NUM_COMP_PKTS:
4368 		hci_num_comp_pkts_evt(hdev, skb);
4369 		break;
4370 
4371 	case HCI_EV_MODE_CHANGE:
4372 		hci_mode_change_evt(hdev, skb);
4373 		break;
4374 
4375 	case HCI_EV_PIN_CODE_REQ:
4376 		hci_pin_code_request_evt(hdev, skb);
4377 		break;
4378 
4379 	case HCI_EV_LINK_KEY_REQ:
4380 		hci_link_key_request_evt(hdev, skb);
4381 		break;
4382 
4383 	case HCI_EV_LINK_KEY_NOTIFY:
4384 		hci_link_key_notify_evt(hdev, skb);
4385 		break;
4386 
4387 	case HCI_EV_CLOCK_OFFSET:
4388 		hci_clock_offset_evt(hdev, skb);
4389 		break;
4390 
4391 	case HCI_EV_PKT_TYPE_CHANGE:
4392 		hci_pkt_type_change_evt(hdev, skb);
4393 		break;
4394 
4395 	case HCI_EV_PSCAN_REP_MODE:
4396 		hci_pscan_rep_mode_evt(hdev, skb);
4397 		break;
4398 
4399 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4400 		hci_inquiry_result_with_rssi_evt(hdev, skb);
4401 		break;
4402 
4403 	case HCI_EV_REMOTE_EXT_FEATURES:
4404 		hci_remote_ext_features_evt(hdev, skb);
4405 		break;
4406 
4407 	case HCI_EV_SYNC_CONN_COMPLETE:
4408 		hci_sync_conn_complete_evt(hdev, skb);
4409 		break;
4410 
4411 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4412 		hci_extended_inquiry_result_evt(hdev, skb);
4413 		break;
4414 
4415 	case HCI_EV_KEY_REFRESH_COMPLETE:
4416 		hci_key_refresh_complete_evt(hdev, skb);
4417 		break;
4418 
4419 	case HCI_EV_IO_CAPA_REQUEST:
4420 		hci_io_capa_request_evt(hdev, skb);
4421 		break;
4422 
4423 	case HCI_EV_IO_CAPA_REPLY:
4424 		hci_io_capa_reply_evt(hdev, skb);
4425 		break;
4426 
4427 	case HCI_EV_USER_CONFIRM_REQUEST:
4428 		hci_user_confirm_request_evt(hdev, skb);
4429 		break;
4430 
4431 	case HCI_EV_USER_PASSKEY_REQUEST:
4432 		hci_user_passkey_request_evt(hdev, skb);
4433 		break;
4434 
4435 	case HCI_EV_USER_PASSKEY_NOTIFY:
4436 		hci_user_passkey_notify_evt(hdev, skb);
4437 		break;
4438 
4439 	case HCI_EV_KEYPRESS_NOTIFY:
4440 		hci_keypress_notify_evt(hdev, skb);
4441 		break;
4442 
4443 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4444 		hci_simple_pair_complete_evt(hdev, skb);
4445 		break;
4446 
4447 	case HCI_EV_REMOTE_HOST_FEATURES:
4448 		hci_remote_host_features_evt(hdev, skb);
4449 		break;
4450 
4451 	case HCI_EV_LE_META:
4452 		hci_le_meta_evt(hdev, skb);
4453 		break;
4454 
4455 	case HCI_EV_CHANNEL_SELECTED:
4456 		hci_chan_selected_evt(hdev, skb);
4457 		break;
4458 
4459 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4460 		hci_remote_oob_data_request_evt(hdev, skb);
4461 		break;
4462 
4463 	case HCI_EV_PHY_LINK_COMPLETE:
4464 		hci_phy_link_complete_evt(hdev, skb);
4465 		break;
4466 
4467 	case HCI_EV_LOGICAL_LINK_COMPLETE:
4468 		hci_loglink_complete_evt(hdev, skb);
4469 		break;
4470 
4471 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4472 		hci_disconn_loglink_complete_evt(hdev, skb);
4473 		break;
4474 
4475 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4476 		hci_disconn_phylink_complete_evt(hdev, skb);
4477 		break;
4478 
4479 	case HCI_EV_NUM_COMP_BLOCKS:
4480 		hci_num_comp_blocks_evt(hdev, skb);
4481 		break;
4482 
4483 	default:
4484 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4485 		break;
4486 	}
4487 
4488 	kfree_skb(skb);
4489 	hdev->stat.evt_rx++;
4490 }
4491