xref: /linux/net/bluetooth/hci_event.c (revision c75c5ab575af7db707689cdbb5a5c458e9a034bb)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34 
35 /* Handle HCI Event packets */
36 
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 	__u8 status = *((__u8 *) skb->data);
40 
41 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
42 
43 	if (status) {
44 		hci_dev_lock(hdev);
45 		mgmt_stop_discovery_failed(hdev, status);
46 		hci_dev_unlock(hdev);
47 		return;
48 	}
49 
50 	clear_bit(HCI_INQUIRY, &hdev->flags);
51 
52 	hci_dev_lock(hdev);
53 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 	hci_dev_unlock(hdev);
55 
56 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
57 
58 	hci_conn_check_pending(hdev);
59 }
60 
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 	__u8 status = *((__u8 *) skb->data);
64 
65 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
66 
67 	if (status)
68 		return;
69 
70 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72 
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 	__u8 status = *((__u8 *) skb->data);
76 
77 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
78 
79 	if (status)
80 		return;
81 
82 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83 
84 	hci_conn_check_pending(hdev);
85 }
86 
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 					  struct sk_buff *skb)
89 {
90 	BT_DBG("%s", hdev->name);
91 }
92 
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 	struct hci_rp_role_discovery *rp = (void *) skb->data;
96 	struct hci_conn *conn;
97 
98 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99 
100 	if (rp->status)
101 		return;
102 
103 	hci_dev_lock(hdev);
104 
105 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 	if (conn) {
107 		if (rp->role)
108 			conn->link_mode &= ~HCI_LM_MASTER;
109 		else
110 			conn->link_mode |= HCI_LM_MASTER;
111 	}
112 
113 	hci_dev_unlock(hdev);
114 }
115 
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 	struct hci_conn *conn;
120 
121 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122 
123 	if (rp->status)
124 		return;
125 
126 	hci_dev_lock(hdev);
127 
128 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 	if (conn)
130 		conn->link_policy = __le16_to_cpu(rp->policy);
131 
132 	hci_dev_unlock(hdev);
133 }
134 
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 	struct hci_conn *conn;
139 	void *sent;
140 
141 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142 
143 	if (rp->status)
144 		return;
145 
146 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 	if (!sent)
148 		return;
149 
150 	hci_dev_lock(hdev);
151 
152 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 	if (conn)
154 		conn->link_policy = get_unaligned_le16(sent + 2);
155 
156 	hci_dev_unlock(hdev);
157 }
158 
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 					struct sk_buff *skb)
161 {
162 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163 
164 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 
166 	if (rp->status)
167 		return;
168 
169 	hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171 
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 					 struct sk_buff *skb)
174 {
175 	__u8 status = *((__u8 *) skb->data);
176 	void *sent;
177 
178 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
179 
180 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 	if (!sent)
182 		return;
183 
184 	if (!status)
185 		hdev->link_policy = get_unaligned_le16(sent);
186 
187 	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
188 }
189 
190 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 {
192 	__u8 status = *((__u8 *) skb->data);
193 
194 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
195 
196 	clear_bit(HCI_RESET, &hdev->flags);
197 
198 	hci_req_complete(hdev, HCI_OP_RESET, status);
199 
200 	/* Reset all non-persistent flags */
201 	hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
202 			     BIT(HCI_PERIODIC_INQ));
203 
204 	hdev->discovery.state = DISCOVERY_STOPPED;
205 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
207 
208 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 	hdev->adv_data_len = 0;
210 }
211 
212 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 	__u8 status = *((__u8 *) skb->data);
215 	void *sent;
216 
217 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
218 
219 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 	if (!sent)
221 		return;
222 
223 	hci_dev_lock(hdev);
224 
225 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
226 		mgmt_set_local_name_complete(hdev, sent, status);
227 	else if (!status)
228 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
229 
230 	hci_dev_unlock(hdev);
231 
232 	if (!status && !test_bit(HCI_INIT, &hdev->flags))
233 		hci_update_ad(hdev);
234 
235 	hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
236 }
237 
238 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
239 {
240 	struct hci_rp_read_local_name *rp = (void *) skb->data;
241 
242 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
243 
244 	if (rp->status)
245 		return;
246 
247 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
248 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
249 }
250 
251 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
252 {
253 	__u8 status = *((__u8 *) skb->data);
254 	void *sent;
255 
256 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
257 
258 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
259 	if (!sent)
260 		return;
261 
262 	if (!status) {
263 		__u8 param = *((__u8 *) sent);
264 
265 		if (param == AUTH_ENABLED)
266 			set_bit(HCI_AUTH, &hdev->flags);
267 		else
268 			clear_bit(HCI_AUTH, &hdev->flags);
269 	}
270 
271 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
272 		mgmt_auth_enable_complete(hdev, status);
273 
274 	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
275 }
276 
277 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
278 {
279 	__u8 status = *((__u8 *) skb->data);
280 	void *sent;
281 
282 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 
284 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 	if (!sent)
286 		return;
287 
288 	if (!status) {
289 		__u8 param = *((__u8 *) sent);
290 
291 		if (param)
292 			set_bit(HCI_ENCRYPT, &hdev->flags);
293 		else
294 			clear_bit(HCI_ENCRYPT, &hdev->flags);
295 	}
296 
297 	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
298 }
299 
300 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
301 {
302 	__u8 param, status = *((__u8 *) skb->data);
303 	int old_pscan, old_iscan;
304 	void *sent;
305 
306 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
307 
308 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
309 	if (!sent)
310 		return;
311 
312 	param = *((__u8 *) sent);
313 
314 	hci_dev_lock(hdev);
315 
316 	if (status) {
317 		mgmt_write_scan_failed(hdev, param, status);
318 		hdev->discov_timeout = 0;
319 		goto done;
320 	}
321 
322 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
323 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
324 
325 	if (param & SCAN_INQUIRY) {
326 		set_bit(HCI_ISCAN, &hdev->flags);
327 		if (!old_iscan)
328 			mgmt_discoverable(hdev, 1);
329 		if (hdev->discov_timeout > 0) {
330 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 					   to);
333 		}
334 	} else if (old_iscan)
335 		mgmt_discoverable(hdev, 0);
336 
337 	if (param & SCAN_PAGE) {
338 		set_bit(HCI_PSCAN, &hdev->flags);
339 		if (!old_pscan)
340 			mgmt_connectable(hdev, 1);
341 	} else if (old_pscan)
342 		mgmt_connectable(hdev, 0);
343 
344 done:
345 	hci_dev_unlock(hdev);
346 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
347 }
348 
349 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
352 
353 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
354 
355 	if (rp->status)
356 		return;
357 
358 	memcpy(hdev->dev_class, rp->dev_class, 3);
359 
360 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362 }
363 
364 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
365 {
366 	__u8 status = *((__u8 *) skb->data);
367 	void *sent;
368 
369 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
370 
371 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
372 	if (!sent)
373 		return;
374 
375 	hci_dev_lock(hdev);
376 
377 	if (status == 0)
378 		memcpy(hdev->dev_class, sent, 3);
379 
380 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
381 		mgmt_set_class_of_dev_complete(hdev, sent, status);
382 
383 	hci_dev_unlock(hdev);
384 }
385 
386 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
387 {
388 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
389 	__u16 setting;
390 
391 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
392 
393 	if (rp->status)
394 		return;
395 
396 	setting = __le16_to_cpu(rp->voice_setting);
397 
398 	if (hdev->voice_setting == setting)
399 		return;
400 
401 	hdev->voice_setting = setting;
402 
403 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
404 
405 	if (hdev->notify)
406 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407 }
408 
409 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
410 				       struct sk_buff *skb)
411 {
412 	__u8 status = *((__u8 *) skb->data);
413 	__u16 setting;
414 	void *sent;
415 
416 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
417 
418 	if (status)
419 		return;
420 
421 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
422 	if (!sent)
423 		return;
424 
425 	setting = get_unaligned_le16(sent);
426 
427 	if (hdev->voice_setting == setting)
428 		return;
429 
430 	hdev->voice_setting = setting;
431 
432 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
433 
434 	if (hdev->notify)
435 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436 }
437 
438 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
439 {
440 	__u8 status = *((__u8 *) skb->data);
441 
442 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
443 
444 	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
445 }
446 
447 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
448 {
449 	__u8 status = *((__u8 *) skb->data);
450 	struct hci_cp_write_ssp_mode *sent;
451 
452 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
453 
454 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
455 	if (!sent)
456 		return;
457 
458 	if (!status) {
459 		if (sent->mode)
460 			hdev->host_features[0] |= LMP_HOST_SSP;
461 		else
462 			hdev->host_features[0] &= ~LMP_HOST_SSP;
463 	}
464 
465 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
466 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
467 	else if (!status) {
468 		if (sent->mode)
469 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
470 		else
471 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
472 	}
473 }
474 
475 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
476 {
477 	if (lmp_ext_inq_capable(hdev))
478 		return 2;
479 
480 	if (lmp_inq_rssi_capable(hdev))
481 		return 1;
482 
483 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
484 	    hdev->lmp_subver == 0x0757)
485 		return 1;
486 
487 	if (hdev->manufacturer == 15) {
488 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
489 			return 1;
490 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
491 			return 1;
492 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
493 			return 1;
494 	}
495 
496 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
497 	    hdev->lmp_subver == 0x1805)
498 		return 1;
499 
500 	return 0;
501 }
502 
503 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
504 {
505 	u8 mode;
506 
507 	mode = hci_get_inquiry_mode(hdev);
508 
509 	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
510 }
511 
512 static void hci_setup_event_mask(struct hci_dev *hdev)
513 {
514 	/* The second byte is 0xff instead of 0x9f (two reserved bits
515 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
516 	 * command otherwise */
517 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
518 
519 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
520 	 * any event mask for pre 1.2 devices */
521 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
522 		return;
523 
524 	if (lmp_bredr_capable(hdev)) {
525 		events[4] |= 0x01; /* Flow Specification Complete */
526 		events[4] |= 0x02; /* Inquiry Result with RSSI */
527 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
528 		events[5] |= 0x08; /* Synchronous Connection Complete */
529 		events[5] |= 0x10; /* Synchronous Connection Changed */
530 	}
531 
532 	if (lmp_inq_rssi_capable(hdev))
533 		events[4] |= 0x02; /* Inquiry Result with RSSI */
534 
535 	if (lmp_sniffsubr_capable(hdev))
536 		events[5] |= 0x20; /* Sniff Subrating */
537 
538 	if (lmp_pause_enc_capable(hdev))
539 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
540 
541 	if (lmp_ext_inq_capable(hdev))
542 		events[5] |= 0x40; /* Extended Inquiry Result */
543 
544 	if (lmp_no_flush_capable(hdev))
545 		events[7] |= 0x01; /* Enhanced Flush Complete */
546 
547 	if (lmp_lsto_capable(hdev))
548 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
549 
550 	if (lmp_ssp_capable(hdev)) {
551 		events[6] |= 0x01;	/* IO Capability Request */
552 		events[6] |= 0x02;	/* IO Capability Response */
553 		events[6] |= 0x04;	/* User Confirmation Request */
554 		events[6] |= 0x08;	/* User Passkey Request */
555 		events[6] |= 0x10;	/* Remote OOB Data Request */
556 		events[6] |= 0x20;	/* Simple Pairing Complete */
557 		events[7] |= 0x04;	/* User Passkey Notification */
558 		events[7] |= 0x08;	/* Keypress Notification */
559 		events[7] |= 0x10;	/* Remote Host Supported
560 					 * Features Notification */
561 	}
562 
563 	if (lmp_le_capable(hdev))
564 		events[7] |= 0x20;	/* LE Meta-Event */
565 
566 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
567 
568 	if (lmp_le_capable(hdev)) {
569 		memset(events, 0, sizeof(events));
570 		events[0] = 0x1f;
571 		hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
572 			     sizeof(events), events);
573 	}
574 }
575 
576 static void bredr_setup(struct hci_dev *hdev)
577 {
578 	struct hci_cp_delete_stored_link_key cp;
579 	__le16 param;
580 	__u8 flt_type;
581 
582 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
583 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
584 
585 	/* Read Class of Device */
586 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
587 
588 	/* Read Local Name */
589 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
590 
591 	/* Read Voice Setting */
592 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
593 
594 	/* Clear Event Filters */
595 	flt_type = HCI_FLT_CLEAR_ALL;
596 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
597 
598 	/* Connection accept timeout ~20 secs */
599 	param = __constant_cpu_to_le16(0x7d00);
600 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
601 
602 	bacpy(&cp.bdaddr, BDADDR_ANY);
603 	cp.delete_all = 1;
604 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
605 }
606 
607 static void le_setup(struct hci_dev *hdev)
608 {
609 	/* Read LE Buffer Size */
610 	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
611 
612 	/* Read LE Local Supported Features */
613 	hci_send_cmd(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
614 
615 	/* Read LE Advertising Channel TX Power */
616 	hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
617 
618 	/* Read LE White List Size */
619 	hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
620 
621 	/* Read LE Supported States */
622 	hci_send_cmd(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
623 }
624 
625 static void hci_setup(struct hci_dev *hdev)
626 {
627 	if (hdev->dev_type != HCI_BREDR)
628 		return;
629 
630 	/* Read BD Address */
631 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
632 
633 	if (lmp_bredr_capable(hdev))
634 		bredr_setup(hdev);
635 
636 	if (lmp_le_capable(hdev))
637 		le_setup(hdev);
638 
639 	hci_setup_event_mask(hdev);
640 
641 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
642 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
643 
644 	if (lmp_ssp_capable(hdev)) {
645 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
646 			u8 mode = 0x01;
647 			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
648 				     sizeof(mode), &mode);
649 		} else {
650 			struct hci_cp_write_eir cp;
651 
652 			memset(hdev->eir, 0, sizeof(hdev->eir));
653 			memset(&cp, 0, sizeof(cp));
654 
655 			hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
656 		}
657 	}
658 
659 	if (lmp_inq_rssi_capable(hdev))
660 		hci_setup_inquiry_mode(hdev);
661 
662 	if (lmp_inq_tx_pwr_capable(hdev))
663 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
664 
665 	if (lmp_ext_feat_capable(hdev)) {
666 		struct hci_cp_read_local_ext_features cp;
667 
668 		cp.page = 0x01;
669 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
670 			     &cp);
671 	}
672 
673 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
674 		u8 enable = 1;
675 		hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
676 			     &enable);
677 	}
678 }
679 
680 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
681 {
682 	struct hci_rp_read_local_version *rp = (void *) skb->data;
683 
684 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
685 
686 	if (rp->status)
687 		goto done;
688 
689 	hdev->hci_ver = rp->hci_ver;
690 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
691 	hdev->lmp_ver = rp->lmp_ver;
692 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
693 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
694 
695 	BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
696 	       hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
697 
698 	if (test_bit(HCI_INIT, &hdev->flags))
699 		hci_setup(hdev);
700 
701 done:
702 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
703 }
704 
705 static void hci_setup_link_policy(struct hci_dev *hdev)
706 {
707 	struct hci_cp_write_def_link_policy cp;
708 	u16 link_policy = 0;
709 
710 	if (lmp_rswitch_capable(hdev))
711 		link_policy |= HCI_LP_RSWITCH;
712 	if (lmp_hold_capable(hdev))
713 		link_policy |= HCI_LP_HOLD;
714 	if (lmp_sniff_capable(hdev))
715 		link_policy |= HCI_LP_SNIFF;
716 	if (lmp_park_capable(hdev))
717 		link_policy |= HCI_LP_PARK;
718 
719 	cp.policy = cpu_to_le16(link_policy);
720 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
721 }
722 
723 static void hci_cc_read_local_commands(struct hci_dev *hdev,
724 				       struct sk_buff *skb)
725 {
726 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
727 
728 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 
730 	if (rp->status)
731 		goto done;
732 
733 	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
734 
735 	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
736 		hci_setup_link_policy(hdev);
737 
738 done:
739 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
740 }
741 
742 static void hci_cc_read_local_features(struct hci_dev *hdev,
743 				       struct sk_buff *skb)
744 {
745 	struct hci_rp_read_local_features *rp = (void *) skb->data;
746 
747 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
748 
749 	if (rp->status)
750 		return;
751 
752 	memcpy(hdev->features, rp->features, 8);
753 
754 	/* Adjust default settings according to features
755 	 * supported by device. */
756 
757 	if (hdev->features[0] & LMP_3SLOT)
758 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
759 
760 	if (hdev->features[0] & LMP_5SLOT)
761 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
762 
763 	if (hdev->features[1] & LMP_HV2) {
764 		hdev->pkt_type  |= (HCI_HV2);
765 		hdev->esco_type |= (ESCO_HV2);
766 	}
767 
768 	if (hdev->features[1] & LMP_HV3) {
769 		hdev->pkt_type  |= (HCI_HV3);
770 		hdev->esco_type |= (ESCO_HV3);
771 	}
772 
773 	if (lmp_esco_capable(hdev))
774 		hdev->esco_type |= (ESCO_EV3);
775 
776 	if (hdev->features[4] & LMP_EV4)
777 		hdev->esco_type |= (ESCO_EV4);
778 
779 	if (hdev->features[4] & LMP_EV5)
780 		hdev->esco_type |= (ESCO_EV5);
781 
782 	if (hdev->features[5] & LMP_EDR_ESCO_2M)
783 		hdev->esco_type |= (ESCO_2EV3);
784 
785 	if (hdev->features[5] & LMP_EDR_ESCO_3M)
786 		hdev->esco_type |= (ESCO_3EV3);
787 
788 	if (hdev->features[5] & LMP_EDR_3S_ESCO)
789 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
790 
791 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
792 	       hdev->features[0], hdev->features[1],
793 	       hdev->features[2], hdev->features[3],
794 	       hdev->features[4], hdev->features[5],
795 	       hdev->features[6], hdev->features[7]);
796 }
797 
798 static void hci_set_le_support(struct hci_dev *hdev)
799 {
800 	struct hci_cp_write_le_host_supported cp;
801 
802 	memset(&cp, 0, sizeof(cp));
803 
804 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
805 		cp.le = 1;
806 		cp.simul = lmp_le_br_capable(hdev);
807 	}
808 
809 	if (cp.le != lmp_host_le_capable(hdev))
810 		hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
811 			     &cp);
812 }
813 
814 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
815 					   struct sk_buff *skb)
816 {
817 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
818 
819 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
820 
821 	if (rp->status)
822 		goto done;
823 
824 	switch (rp->page) {
825 	case 0:
826 		memcpy(hdev->features, rp->features, 8);
827 		break;
828 	case 1:
829 		memcpy(hdev->host_features, rp->features, 8);
830 		break;
831 	}
832 
833 	if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
834 		hci_set_le_support(hdev);
835 
836 done:
837 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
838 }
839 
840 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
841 					  struct sk_buff *skb)
842 {
843 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
844 
845 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846 
847 	if (rp->status)
848 		return;
849 
850 	hdev->flow_ctl_mode = rp->mode;
851 
852 	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
853 }
854 
855 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
856 {
857 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
858 
859 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
860 
861 	if (rp->status)
862 		return;
863 
864 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
865 	hdev->sco_mtu  = rp->sco_mtu;
866 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
867 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
868 
869 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
870 		hdev->sco_mtu  = 64;
871 		hdev->sco_pkts = 8;
872 	}
873 
874 	hdev->acl_cnt = hdev->acl_pkts;
875 	hdev->sco_cnt = hdev->sco_pkts;
876 
877 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
878 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
879 }
880 
881 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
882 {
883 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
884 
885 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
886 
887 	if (!rp->status)
888 		bacpy(&hdev->bdaddr, &rp->bdaddr);
889 
890 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
891 }
892 
893 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
894 					struct sk_buff *skb)
895 {
896 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
897 
898 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899 
900 	if (rp->status)
901 		return;
902 
903 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
904 	hdev->block_len = __le16_to_cpu(rp->block_len);
905 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
906 
907 	hdev->block_cnt = hdev->num_blocks;
908 
909 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
910 	       hdev->block_cnt, hdev->block_len);
911 
912 	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
913 }
914 
915 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
916 {
917 	__u8 status = *((__u8 *) skb->data);
918 
919 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
920 
921 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
922 }
923 
924 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
925 				       struct sk_buff *skb)
926 {
927 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
928 
929 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 
931 	if (rp->status)
932 		goto a2mp_rsp;
933 
934 	hdev->amp_status = rp->amp_status;
935 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
936 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
937 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
938 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
939 	hdev->amp_type = rp->amp_type;
940 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
941 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
942 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
943 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
944 
945 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
946 
947 a2mp_rsp:
948 	a2mp_send_getinfo_rsp(hdev);
949 }
950 
951 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
952 					struct sk_buff *skb)
953 {
954 	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
955 	struct amp_assoc *assoc = &hdev->loc_assoc;
956 	size_t rem_len, frag_len;
957 
958 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
959 
960 	if (rp->status)
961 		goto a2mp_rsp;
962 
963 	frag_len = skb->len - sizeof(*rp);
964 	rem_len = __le16_to_cpu(rp->rem_len);
965 
966 	if (rem_len > frag_len) {
967 		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
968 
969 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
970 		assoc->offset += frag_len;
971 
972 		/* Read other fragments */
973 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
974 
975 		return;
976 	}
977 
978 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
979 	assoc->len = assoc->offset + rem_len;
980 	assoc->offset = 0;
981 
982 a2mp_rsp:
983 	/* Send A2MP Rsp when all fragments are received */
984 	a2mp_send_getampassoc_rsp(hdev, rp->status);
985 	a2mp_send_create_phy_link_req(hdev, rp->status);
986 }
987 
988 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
989 					  struct sk_buff *skb)
990 {
991 	__u8 status = *((__u8 *) skb->data);
992 
993 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
994 
995 	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
996 }
997 
998 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
999 {
1000 	__u8 status = *((__u8 *) skb->data);
1001 
1002 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1003 
1004 	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
1005 }
1006 
1007 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
1008 				      struct sk_buff *skb)
1009 {
1010 	__u8 status = *((__u8 *) skb->data);
1011 
1012 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1013 
1014 	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
1015 }
1016 
1017 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
1018 					 struct sk_buff *skb)
1019 {
1020 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
1021 
1022 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1023 
1024 	if (!rp->status)
1025 		hdev->inq_tx_power = rp->tx_power;
1026 
1027 	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
1028 }
1029 
1030 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
1031 {
1032 	__u8 status = *((__u8 *) skb->data);
1033 
1034 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1035 
1036 	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
1037 }
1038 
1039 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
1040 {
1041 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
1042 	struct hci_cp_pin_code_reply *cp;
1043 	struct hci_conn *conn;
1044 
1045 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046 
1047 	hci_dev_lock(hdev);
1048 
1049 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1050 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1051 
1052 	if (rp->status)
1053 		goto unlock;
1054 
1055 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1056 	if (!cp)
1057 		goto unlock;
1058 
1059 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1060 	if (conn)
1061 		conn->pin_length = cp->pin_len;
1062 
1063 unlock:
1064 	hci_dev_unlock(hdev);
1065 }
1066 
1067 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1068 {
1069 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1070 
1071 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1072 
1073 	hci_dev_lock(hdev);
1074 
1075 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1076 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1077 						 rp->status);
1078 
1079 	hci_dev_unlock(hdev);
1080 }
1081 
1082 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1083 				       struct sk_buff *skb)
1084 {
1085 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1086 
1087 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1088 
1089 	if (rp->status)
1090 		return;
1091 
1092 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1093 	hdev->le_pkts = rp->le_max_pkt;
1094 
1095 	hdev->le_cnt = hdev->le_pkts;
1096 
1097 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1098 
1099 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
1100 }
1101 
1102 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1103 					  struct sk_buff *skb)
1104 {
1105 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1106 
1107 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1108 
1109 	if (!rp->status)
1110 		memcpy(hdev->le_features, rp->features, 8);
1111 
1112 	hci_req_complete(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, rp->status);
1113 }
1114 
1115 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1116 					struct sk_buff *skb)
1117 {
1118 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1119 
1120 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1121 
1122 	if (!rp->status) {
1123 		hdev->adv_tx_power = rp->tx_power;
1124 		if (!test_bit(HCI_INIT, &hdev->flags))
1125 			hci_update_ad(hdev);
1126 	}
1127 
1128 	hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
1129 }
1130 
1131 static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
1132 {
1133 	__u8 status = *((__u8 *) skb->data);
1134 
1135 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1136 
1137 	hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
1138 }
1139 
1140 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1141 {
1142 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1143 
1144 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1145 
1146 	hci_dev_lock(hdev);
1147 
1148 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1149 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1150 						 rp->status);
1151 
1152 	hci_dev_unlock(hdev);
1153 }
1154 
1155 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1156 					  struct sk_buff *skb)
1157 {
1158 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1159 
1160 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1161 
1162 	hci_dev_lock(hdev);
1163 
1164 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1165 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1166 						     ACL_LINK, 0, rp->status);
1167 
1168 	hci_dev_unlock(hdev);
1169 }
1170 
1171 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1172 {
1173 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1174 
1175 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1176 
1177 	hci_dev_lock(hdev);
1178 
1179 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1180 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1181 						 0, rp->status);
1182 
1183 	hci_dev_unlock(hdev);
1184 }
1185 
1186 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1187 					  struct sk_buff *skb)
1188 {
1189 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1190 
1191 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1192 
1193 	hci_dev_lock(hdev);
1194 
1195 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1196 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1197 						     ACL_LINK, 0, rp->status);
1198 
1199 	hci_dev_unlock(hdev);
1200 }
1201 
1202 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1203 					     struct sk_buff *skb)
1204 {
1205 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1206 
1207 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1208 
1209 	hci_dev_lock(hdev);
1210 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1211 						rp->randomizer, rp->status);
1212 	hci_dev_unlock(hdev);
1213 }
1214 
1215 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1216 {
1217 	__u8 *sent, status = *((__u8 *) skb->data);
1218 
1219 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1220 
1221 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1222 	if (!sent)
1223 		return;
1224 
1225 	hci_dev_lock(hdev);
1226 
1227 	if (!status) {
1228 		if (*sent)
1229 			set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1230 		else
1231 			clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1232 	}
1233 
1234 	hci_dev_unlock(hdev);
1235 
1236 	if (!test_bit(HCI_INIT, &hdev->flags))
1237 		hci_update_ad(hdev);
1238 
1239 	hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status);
1240 }
1241 
1242 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1243 {
1244 	__u8 status = *((__u8 *) skb->data);
1245 
1246 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1247 
1248 	hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1249 
1250 	if (status) {
1251 		hci_dev_lock(hdev);
1252 		mgmt_start_discovery_failed(hdev, status);
1253 		hci_dev_unlock(hdev);
1254 		return;
1255 	}
1256 }
1257 
1258 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1259 				      struct sk_buff *skb)
1260 {
1261 	struct hci_cp_le_set_scan_enable *cp;
1262 	__u8 status = *((__u8 *) skb->data);
1263 
1264 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1265 
1266 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1267 	if (!cp)
1268 		return;
1269 
1270 	switch (cp->enable) {
1271 	case LE_SCANNING_ENABLED:
1272 		hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1273 
1274 		if (status) {
1275 			hci_dev_lock(hdev);
1276 			mgmt_start_discovery_failed(hdev, status);
1277 			hci_dev_unlock(hdev);
1278 			return;
1279 		}
1280 
1281 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1282 
1283 		hci_dev_lock(hdev);
1284 		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1285 		hci_dev_unlock(hdev);
1286 		break;
1287 
1288 	case LE_SCANNING_DISABLED:
1289 		if (status) {
1290 			hci_dev_lock(hdev);
1291 			mgmt_stop_discovery_failed(hdev, status);
1292 			hci_dev_unlock(hdev);
1293 			return;
1294 		}
1295 
1296 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1297 
1298 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1299 		    hdev->discovery.state == DISCOVERY_FINDING) {
1300 			mgmt_interleaved_discovery(hdev);
1301 		} else {
1302 			hci_dev_lock(hdev);
1303 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1304 			hci_dev_unlock(hdev);
1305 		}
1306 
1307 		break;
1308 
1309 	default:
1310 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1311 		break;
1312 	}
1313 }
1314 
1315 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1316 					   struct sk_buff *skb)
1317 {
1318 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1319 
1320 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1321 
1322 	if (!rp->status)
1323 		hdev->le_white_list_size = rp->size;
1324 
1325 	hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status);
1326 }
1327 
1328 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1329 {
1330 	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1331 
1332 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1333 
1334 	if (rp->status)
1335 		return;
1336 
1337 	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1338 }
1339 
1340 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1341 {
1342 	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1343 
1344 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1345 
1346 	if (rp->status)
1347 		return;
1348 
1349 	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1350 }
1351 
1352 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1353 					    struct sk_buff *skb)
1354 {
1355 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1356 
1357 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1358 
1359 	if (!rp->status)
1360 		memcpy(hdev->le_states, rp->le_states, 8);
1361 
1362 	hci_req_complete(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, rp->status);
1363 }
1364 
1365 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1366 					   struct sk_buff *skb)
1367 {
1368 	struct hci_cp_write_le_host_supported *sent;
1369 	__u8 status = *((__u8 *) skb->data);
1370 
1371 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1372 
1373 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1374 	if (!sent)
1375 		return;
1376 
1377 	if (!status) {
1378 		if (sent->le)
1379 			hdev->host_features[0] |= LMP_HOST_LE;
1380 		else
1381 			hdev->host_features[0] &= ~LMP_HOST_LE;
1382 
1383 		if (sent->simul)
1384 			hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1385 		else
1386 			hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1387 	}
1388 
1389 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1390 	    !test_bit(HCI_INIT, &hdev->flags))
1391 		mgmt_le_enable_complete(hdev, sent->le, status);
1392 
1393 	hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1394 }
1395 
1396 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1397 					  struct sk_buff *skb)
1398 {
1399 	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1400 
1401 	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1402 	       hdev->name, rp->status, rp->phy_handle);
1403 
1404 	if (rp->status)
1405 		return;
1406 
1407 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1408 }
1409 
1410 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1411 {
1412 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1413 
1414 	if (status) {
1415 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1416 		hci_conn_check_pending(hdev);
1417 		hci_dev_lock(hdev);
1418 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1419 			mgmt_start_discovery_failed(hdev, status);
1420 		hci_dev_unlock(hdev);
1421 		return;
1422 	}
1423 
1424 	set_bit(HCI_INQUIRY, &hdev->flags);
1425 
1426 	hci_dev_lock(hdev);
1427 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1428 	hci_dev_unlock(hdev);
1429 }
1430 
1431 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1432 {
1433 	struct hci_cp_create_conn *cp;
1434 	struct hci_conn *conn;
1435 
1436 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1437 
1438 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1439 	if (!cp)
1440 		return;
1441 
1442 	hci_dev_lock(hdev);
1443 
1444 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1445 
1446 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1447 
1448 	if (status) {
1449 		if (conn && conn->state == BT_CONNECT) {
1450 			if (status != 0x0c || conn->attempt > 2) {
1451 				conn->state = BT_CLOSED;
1452 				hci_proto_connect_cfm(conn, status);
1453 				hci_conn_del(conn);
1454 			} else
1455 				conn->state = BT_CONNECT2;
1456 		}
1457 	} else {
1458 		if (!conn) {
1459 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1460 			if (conn) {
1461 				conn->out = true;
1462 				conn->link_mode |= HCI_LM_MASTER;
1463 			} else
1464 				BT_ERR("No memory for new connection");
1465 		}
1466 	}
1467 
1468 	hci_dev_unlock(hdev);
1469 }
1470 
1471 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1472 {
1473 	struct hci_cp_add_sco *cp;
1474 	struct hci_conn *acl, *sco;
1475 	__u16 handle;
1476 
1477 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1478 
1479 	if (!status)
1480 		return;
1481 
1482 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1483 	if (!cp)
1484 		return;
1485 
1486 	handle = __le16_to_cpu(cp->handle);
1487 
1488 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1489 
1490 	hci_dev_lock(hdev);
1491 
1492 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1493 	if (acl) {
1494 		sco = acl->link;
1495 		if (sco) {
1496 			sco->state = BT_CLOSED;
1497 
1498 			hci_proto_connect_cfm(sco, status);
1499 			hci_conn_del(sco);
1500 		}
1501 	}
1502 
1503 	hci_dev_unlock(hdev);
1504 }
1505 
1506 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1507 {
1508 	struct hci_cp_auth_requested *cp;
1509 	struct hci_conn *conn;
1510 
1511 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1512 
1513 	if (!status)
1514 		return;
1515 
1516 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1517 	if (!cp)
1518 		return;
1519 
1520 	hci_dev_lock(hdev);
1521 
1522 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1523 	if (conn) {
1524 		if (conn->state == BT_CONFIG) {
1525 			hci_proto_connect_cfm(conn, status);
1526 			hci_conn_put(conn);
1527 		}
1528 	}
1529 
1530 	hci_dev_unlock(hdev);
1531 }
1532 
1533 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1534 {
1535 	struct hci_cp_set_conn_encrypt *cp;
1536 	struct hci_conn *conn;
1537 
1538 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1539 
1540 	if (!status)
1541 		return;
1542 
1543 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1544 	if (!cp)
1545 		return;
1546 
1547 	hci_dev_lock(hdev);
1548 
1549 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1550 	if (conn) {
1551 		if (conn->state == BT_CONFIG) {
1552 			hci_proto_connect_cfm(conn, status);
1553 			hci_conn_put(conn);
1554 		}
1555 	}
1556 
1557 	hci_dev_unlock(hdev);
1558 }
1559 
1560 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1561 				    struct hci_conn *conn)
1562 {
1563 	if (conn->state != BT_CONFIG || !conn->out)
1564 		return 0;
1565 
1566 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1567 		return 0;
1568 
1569 	/* Only request authentication for SSP connections or non-SSP
1570 	 * devices with sec_level HIGH or if MITM protection is requested */
1571 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1572 	    conn->pending_sec_level != BT_SECURITY_HIGH)
1573 		return 0;
1574 
1575 	return 1;
1576 }
1577 
1578 static int hci_resolve_name(struct hci_dev *hdev,
1579 				   struct inquiry_entry *e)
1580 {
1581 	struct hci_cp_remote_name_req cp;
1582 
1583 	memset(&cp, 0, sizeof(cp));
1584 
1585 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1586 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1587 	cp.pscan_mode = e->data.pscan_mode;
1588 	cp.clock_offset = e->data.clock_offset;
1589 
1590 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1591 }
1592 
1593 static bool hci_resolve_next_name(struct hci_dev *hdev)
1594 {
1595 	struct discovery_state *discov = &hdev->discovery;
1596 	struct inquiry_entry *e;
1597 
1598 	if (list_empty(&discov->resolve))
1599 		return false;
1600 
1601 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1602 	if (!e)
1603 		return false;
1604 
1605 	if (hci_resolve_name(hdev, e) == 0) {
1606 		e->name_state = NAME_PENDING;
1607 		return true;
1608 	}
1609 
1610 	return false;
1611 }
1612 
1613 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1614 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1615 {
1616 	struct discovery_state *discov = &hdev->discovery;
1617 	struct inquiry_entry *e;
1618 
1619 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1620 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1621 				      name_len, conn->dev_class);
1622 
1623 	if (discov->state == DISCOVERY_STOPPED)
1624 		return;
1625 
1626 	if (discov->state == DISCOVERY_STOPPING)
1627 		goto discov_complete;
1628 
1629 	if (discov->state != DISCOVERY_RESOLVING)
1630 		return;
1631 
1632 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1633 	/* If the device was not found in a list of found devices names of which
1634 	 * are pending. there is no need to continue resolving a next name as it
1635 	 * will be done upon receiving another Remote Name Request Complete
1636 	 * Event */
1637 	if (!e)
1638 		return;
1639 
1640 	list_del(&e->list);
1641 	if (name) {
1642 		e->name_state = NAME_KNOWN;
1643 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1644 				 e->data.rssi, name, name_len);
1645 	} else {
1646 		e->name_state = NAME_NOT_KNOWN;
1647 	}
1648 
1649 	if (hci_resolve_next_name(hdev))
1650 		return;
1651 
1652 discov_complete:
1653 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1654 }
1655 
1656 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1657 {
1658 	struct hci_cp_remote_name_req *cp;
1659 	struct hci_conn *conn;
1660 
1661 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1662 
1663 	/* If successful wait for the name req complete event before
1664 	 * checking for the need to do authentication */
1665 	if (!status)
1666 		return;
1667 
1668 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1669 	if (!cp)
1670 		return;
1671 
1672 	hci_dev_lock(hdev);
1673 
1674 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1675 
1676 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1677 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1678 
1679 	if (!conn)
1680 		goto unlock;
1681 
1682 	if (!hci_outgoing_auth_needed(hdev, conn))
1683 		goto unlock;
1684 
1685 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1686 		struct hci_cp_auth_requested cp;
1687 		cp.handle = __cpu_to_le16(conn->handle);
1688 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1689 	}
1690 
1691 unlock:
1692 	hci_dev_unlock(hdev);
1693 }
1694 
1695 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1696 {
1697 	struct hci_cp_read_remote_features *cp;
1698 	struct hci_conn *conn;
1699 
1700 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1701 
1702 	if (!status)
1703 		return;
1704 
1705 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1706 	if (!cp)
1707 		return;
1708 
1709 	hci_dev_lock(hdev);
1710 
1711 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1712 	if (conn) {
1713 		if (conn->state == BT_CONFIG) {
1714 			hci_proto_connect_cfm(conn, status);
1715 			hci_conn_put(conn);
1716 		}
1717 	}
1718 
1719 	hci_dev_unlock(hdev);
1720 }
1721 
1722 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1723 {
1724 	struct hci_cp_read_remote_ext_features *cp;
1725 	struct hci_conn *conn;
1726 
1727 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1728 
1729 	if (!status)
1730 		return;
1731 
1732 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1733 	if (!cp)
1734 		return;
1735 
1736 	hci_dev_lock(hdev);
1737 
1738 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1739 	if (conn) {
1740 		if (conn->state == BT_CONFIG) {
1741 			hci_proto_connect_cfm(conn, status);
1742 			hci_conn_put(conn);
1743 		}
1744 	}
1745 
1746 	hci_dev_unlock(hdev);
1747 }
1748 
1749 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1750 {
1751 	struct hci_cp_setup_sync_conn *cp;
1752 	struct hci_conn *acl, *sco;
1753 	__u16 handle;
1754 
1755 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1756 
1757 	if (!status)
1758 		return;
1759 
1760 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1761 	if (!cp)
1762 		return;
1763 
1764 	handle = __le16_to_cpu(cp->handle);
1765 
1766 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1767 
1768 	hci_dev_lock(hdev);
1769 
1770 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1771 	if (acl) {
1772 		sco = acl->link;
1773 		if (sco) {
1774 			sco->state = BT_CLOSED;
1775 
1776 			hci_proto_connect_cfm(sco, status);
1777 			hci_conn_del(sco);
1778 		}
1779 	}
1780 
1781 	hci_dev_unlock(hdev);
1782 }
1783 
1784 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1785 {
1786 	struct hci_cp_sniff_mode *cp;
1787 	struct hci_conn *conn;
1788 
1789 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1790 
1791 	if (!status)
1792 		return;
1793 
1794 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1795 	if (!cp)
1796 		return;
1797 
1798 	hci_dev_lock(hdev);
1799 
1800 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1801 	if (conn) {
1802 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1803 
1804 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1805 			hci_sco_setup(conn, status);
1806 	}
1807 
1808 	hci_dev_unlock(hdev);
1809 }
1810 
1811 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1812 {
1813 	struct hci_cp_exit_sniff_mode *cp;
1814 	struct hci_conn *conn;
1815 
1816 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1817 
1818 	if (!status)
1819 		return;
1820 
1821 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1822 	if (!cp)
1823 		return;
1824 
1825 	hci_dev_lock(hdev);
1826 
1827 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1828 	if (conn) {
1829 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1830 
1831 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1832 			hci_sco_setup(conn, status);
1833 	}
1834 
1835 	hci_dev_unlock(hdev);
1836 }
1837 
1838 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1839 {
1840 	struct hci_cp_disconnect *cp;
1841 	struct hci_conn *conn;
1842 
1843 	if (!status)
1844 		return;
1845 
1846 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1847 	if (!cp)
1848 		return;
1849 
1850 	hci_dev_lock(hdev);
1851 
1852 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1853 	if (conn)
1854 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1855 				       conn->dst_type, status);
1856 
1857 	hci_dev_unlock(hdev);
1858 }
1859 
1860 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1861 {
1862 	struct hci_conn *conn;
1863 
1864 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1865 
1866 	if (status) {
1867 		hci_dev_lock(hdev);
1868 
1869 		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1870 		if (!conn) {
1871 			hci_dev_unlock(hdev);
1872 			return;
1873 		}
1874 
1875 		BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1876 
1877 		conn->state = BT_CLOSED;
1878 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1879 				    conn->dst_type, status);
1880 		hci_proto_connect_cfm(conn, status);
1881 		hci_conn_del(conn);
1882 
1883 		hci_dev_unlock(hdev);
1884 	}
1885 }
1886 
1887 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1888 {
1889 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1890 }
1891 
1892 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1893 {
1894 	struct hci_cp_create_phy_link *cp;
1895 
1896 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1897 
1898 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1899 	if (!cp)
1900 		return;
1901 
1902 	hci_dev_lock(hdev);
1903 
1904 	if (status) {
1905 		struct hci_conn *hcon;
1906 
1907 		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1908 		if (hcon)
1909 			hci_conn_del(hcon);
1910 	} else {
1911 		amp_write_remote_assoc(hdev, cp->phy_handle);
1912 	}
1913 
1914 	hci_dev_unlock(hdev);
1915 }
1916 
1917 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1918 {
1919 	struct hci_cp_accept_phy_link *cp;
1920 
1921 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1922 
1923 	if (status)
1924 		return;
1925 
1926 	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1927 	if (!cp)
1928 		return;
1929 
1930 	amp_write_remote_assoc(hdev, cp->phy_handle);
1931 }
1932 
1933 static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status)
1934 {
1935 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1936 }
1937 
1938 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1939 {
1940 	__u8 status = *((__u8 *) skb->data);
1941 	struct discovery_state *discov = &hdev->discovery;
1942 	struct inquiry_entry *e;
1943 
1944 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1945 
1946 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1947 
1948 	hci_conn_check_pending(hdev);
1949 
1950 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1951 		return;
1952 
1953 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1954 		return;
1955 
1956 	hci_dev_lock(hdev);
1957 
1958 	if (discov->state != DISCOVERY_FINDING)
1959 		goto unlock;
1960 
1961 	if (list_empty(&discov->resolve)) {
1962 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1963 		goto unlock;
1964 	}
1965 
1966 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1967 	if (e && hci_resolve_name(hdev, e) == 0) {
1968 		e->name_state = NAME_PENDING;
1969 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1970 	} else {
1971 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1972 	}
1973 
1974 unlock:
1975 	hci_dev_unlock(hdev);
1976 }
1977 
1978 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1979 {
1980 	struct inquiry_data data;
1981 	struct inquiry_info *info = (void *) (skb->data + 1);
1982 	int num_rsp = *((__u8 *) skb->data);
1983 
1984 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1985 
1986 	if (!num_rsp)
1987 		return;
1988 
1989 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1990 		return;
1991 
1992 	hci_dev_lock(hdev);
1993 
1994 	for (; num_rsp; num_rsp--, info++) {
1995 		bool name_known, ssp;
1996 
1997 		bacpy(&data.bdaddr, &info->bdaddr);
1998 		data.pscan_rep_mode	= info->pscan_rep_mode;
1999 		data.pscan_period_mode	= info->pscan_period_mode;
2000 		data.pscan_mode		= info->pscan_mode;
2001 		memcpy(data.dev_class, info->dev_class, 3);
2002 		data.clock_offset	= info->clock_offset;
2003 		data.rssi		= 0x00;
2004 		data.ssp_mode		= 0x00;
2005 
2006 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
2007 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2008 				  info->dev_class, 0, !name_known, ssp, NULL,
2009 				  0);
2010 	}
2011 
2012 	hci_dev_unlock(hdev);
2013 }
2014 
2015 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2016 {
2017 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2018 	struct hci_conn *conn;
2019 
2020 	BT_DBG("%s", hdev->name);
2021 
2022 	hci_dev_lock(hdev);
2023 
2024 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2025 	if (!conn) {
2026 		if (ev->link_type != SCO_LINK)
2027 			goto unlock;
2028 
2029 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2030 		if (!conn)
2031 			goto unlock;
2032 
2033 		conn->type = SCO_LINK;
2034 	}
2035 
2036 	if (!ev->status) {
2037 		conn->handle = __le16_to_cpu(ev->handle);
2038 
2039 		if (conn->type == ACL_LINK) {
2040 			conn->state = BT_CONFIG;
2041 			hci_conn_hold(conn);
2042 
2043 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2044 			    !hci_find_link_key(hdev, &ev->bdaddr))
2045 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2046 			else
2047 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2048 		} else
2049 			conn->state = BT_CONNECTED;
2050 
2051 		hci_conn_hold_device(conn);
2052 		hci_conn_add_sysfs(conn);
2053 
2054 		if (test_bit(HCI_AUTH, &hdev->flags))
2055 			conn->link_mode |= HCI_LM_AUTH;
2056 
2057 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2058 			conn->link_mode |= HCI_LM_ENCRYPT;
2059 
2060 		/* Get remote features */
2061 		if (conn->type == ACL_LINK) {
2062 			struct hci_cp_read_remote_features cp;
2063 			cp.handle = ev->handle;
2064 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2065 				     sizeof(cp), &cp);
2066 		}
2067 
2068 		/* Set packet type for incoming connection */
2069 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2070 			struct hci_cp_change_conn_ptype cp;
2071 			cp.handle = ev->handle;
2072 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2073 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2074 				     &cp);
2075 		}
2076 	} else {
2077 		conn->state = BT_CLOSED;
2078 		if (conn->type == ACL_LINK)
2079 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
2080 					    conn->dst_type, ev->status);
2081 	}
2082 
2083 	if (conn->type == ACL_LINK)
2084 		hci_sco_setup(conn, ev->status);
2085 
2086 	if (ev->status) {
2087 		hci_proto_connect_cfm(conn, ev->status);
2088 		hci_conn_del(conn);
2089 	} else if (ev->link_type != ACL_LINK)
2090 		hci_proto_connect_cfm(conn, ev->status);
2091 
2092 unlock:
2093 	hci_dev_unlock(hdev);
2094 
2095 	hci_conn_check_pending(hdev);
2096 }
2097 
2098 void hci_conn_accept(struct hci_conn *conn, int mask)
2099 {
2100 	struct hci_dev *hdev = conn->hdev;
2101 
2102 	BT_DBG("conn %p", conn);
2103 
2104 	conn->state = BT_CONFIG;
2105 
2106 	if (!lmp_esco_capable(hdev)) {
2107 		struct hci_cp_accept_conn_req cp;
2108 
2109 		bacpy(&cp.bdaddr, &conn->dst);
2110 
2111 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2112 			cp.role = 0x00; /* Become master */
2113 		else
2114 			cp.role = 0x01; /* Remain slave */
2115 
2116 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2117 	} else /* lmp_esco_capable(hdev)) */ {
2118 		struct hci_cp_accept_sync_conn_req cp;
2119 
2120 		bacpy(&cp.bdaddr, &conn->dst);
2121 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2122 
2123 		cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
2124 		cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
2125 		cp.max_latency    = __constant_cpu_to_le16(0xffff);
2126 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2127 		cp.retrans_effort = 0xff;
2128 
2129 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2130 			     sizeof(cp), &cp);
2131 	}
2132 }
2133 
2134 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2135 {
2136 	struct hci_ev_conn_request *ev = (void *) skb->data;
2137 	int mask = hdev->link_mode;
2138 	__u8 flags = 0;
2139 
2140 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2141 	       ev->link_type);
2142 
2143 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2144 				      &flags);
2145 
2146 	if ((mask & HCI_LM_ACCEPT) &&
2147 	    !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
2148 		/* Connection accepted */
2149 		struct inquiry_entry *ie;
2150 		struct hci_conn *conn;
2151 
2152 		hci_dev_lock(hdev);
2153 
2154 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2155 		if (ie)
2156 			memcpy(ie->data.dev_class, ev->dev_class, 3);
2157 
2158 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2159 					       &ev->bdaddr);
2160 		if (!conn) {
2161 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2162 			if (!conn) {
2163 				BT_ERR("No memory for new connection");
2164 				hci_dev_unlock(hdev);
2165 				return;
2166 			}
2167 		}
2168 
2169 		memcpy(conn->dev_class, ev->dev_class, 3);
2170 
2171 		hci_dev_unlock(hdev);
2172 
2173 		if (ev->link_type == ACL_LINK ||
2174 		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2175 			struct hci_cp_accept_conn_req cp;
2176 			conn->state = BT_CONNECT;
2177 
2178 			bacpy(&cp.bdaddr, &ev->bdaddr);
2179 
2180 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2181 				cp.role = 0x00; /* Become master */
2182 			else
2183 				cp.role = 0x01; /* Remain slave */
2184 
2185 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2186 				     &cp);
2187 		} else if (!(flags & HCI_PROTO_DEFER)) {
2188 			struct hci_cp_accept_sync_conn_req cp;
2189 			conn->state = BT_CONNECT;
2190 
2191 			bacpy(&cp.bdaddr, &ev->bdaddr);
2192 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2193 
2194 			cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
2195 			cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
2196 			cp.max_latency    = __constant_cpu_to_le16(0xffff);
2197 			cp.content_format = cpu_to_le16(hdev->voice_setting);
2198 			cp.retrans_effort = 0xff;
2199 
2200 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2201 				     sizeof(cp), &cp);
2202 		} else {
2203 			conn->state = BT_CONNECT2;
2204 			hci_proto_connect_cfm(conn, 0);
2205 			hci_conn_put(conn);
2206 		}
2207 	} else {
2208 		/* Connection rejected */
2209 		struct hci_cp_reject_conn_req cp;
2210 
2211 		bacpy(&cp.bdaddr, &ev->bdaddr);
2212 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2213 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2214 	}
2215 }
2216 
2217 static u8 hci_to_mgmt_reason(u8 err)
2218 {
2219 	switch (err) {
2220 	case HCI_ERROR_CONNECTION_TIMEOUT:
2221 		return MGMT_DEV_DISCONN_TIMEOUT;
2222 	case HCI_ERROR_REMOTE_USER_TERM:
2223 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2224 	case HCI_ERROR_REMOTE_POWER_OFF:
2225 		return MGMT_DEV_DISCONN_REMOTE;
2226 	case HCI_ERROR_LOCAL_HOST_TERM:
2227 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2228 	default:
2229 		return MGMT_DEV_DISCONN_UNKNOWN;
2230 	}
2231 }
2232 
2233 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2234 {
2235 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2236 	struct hci_conn *conn;
2237 
2238 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2239 
2240 	hci_dev_lock(hdev);
2241 
2242 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2243 	if (!conn)
2244 		goto unlock;
2245 
2246 	if (ev->status == 0)
2247 		conn->state = BT_CLOSED;
2248 
2249 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
2250 	    (conn->type == ACL_LINK || conn->type == LE_LINK)) {
2251 		if (ev->status) {
2252 			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2253 					       conn->dst_type, ev->status);
2254 		} else {
2255 			u8 reason = hci_to_mgmt_reason(ev->reason);
2256 
2257 			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
2258 						 conn->dst_type, reason);
2259 		}
2260 	}
2261 
2262 	if (ev->status == 0) {
2263 		if (conn->type == ACL_LINK && conn->flush_key)
2264 			hci_remove_link_key(hdev, &conn->dst);
2265 		hci_proto_disconn_cfm(conn, ev->reason);
2266 		hci_conn_del(conn);
2267 	}
2268 
2269 unlock:
2270 	hci_dev_unlock(hdev);
2271 }
2272 
2273 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2274 {
2275 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2276 	struct hci_conn *conn;
2277 
2278 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2279 
2280 	hci_dev_lock(hdev);
2281 
2282 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2283 	if (!conn)
2284 		goto unlock;
2285 
2286 	if (!ev->status) {
2287 		if (!hci_conn_ssp_enabled(conn) &&
2288 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2289 			BT_INFO("re-auth of legacy device is not possible.");
2290 		} else {
2291 			conn->link_mode |= HCI_LM_AUTH;
2292 			conn->sec_level = conn->pending_sec_level;
2293 		}
2294 	} else {
2295 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2296 				 ev->status);
2297 	}
2298 
2299 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2300 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2301 
2302 	if (conn->state == BT_CONFIG) {
2303 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2304 			struct hci_cp_set_conn_encrypt cp;
2305 			cp.handle  = ev->handle;
2306 			cp.encrypt = 0x01;
2307 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2308 				     &cp);
2309 		} else {
2310 			conn->state = BT_CONNECTED;
2311 			hci_proto_connect_cfm(conn, ev->status);
2312 			hci_conn_put(conn);
2313 		}
2314 	} else {
2315 		hci_auth_cfm(conn, ev->status);
2316 
2317 		hci_conn_hold(conn);
2318 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2319 		hci_conn_put(conn);
2320 	}
2321 
2322 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2323 		if (!ev->status) {
2324 			struct hci_cp_set_conn_encrypt cp;
2325 			cp.handle  = ev->handle;
2326 			cp.encrypt = 0x01;
2327 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2328 				     &cp);
2329 		} else {
2330 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2331 			hci_encrypt_cfm(conn, ev->status, 0x00);
2332 		}
2333 	}
2334 
2335 unlock:
2336 	hci_dev_unlock(hdev);
2337 }
2338 
2339 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2340 {
2341 	struct hci_ev_remote_name *ev = (void *) skb->data;
2342 	struct hci_conn *conn;
2343 
2344 	BT_DBG("%s", hdev->name);
2345 
2346 	hci_conn_check_pending(hdev);
2347 
2348 	hci_dev_lock(hdev);
2349 
2350 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2351 
2352 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2353 		goto check_auth;
2354 
2355 	if (ev->status == 0)
2356 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2357 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2358 	else
2359 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2360 
2361 check_auth:
2362 	if (!conn)
2363 		goto unlock;
2364 
2365 	if (!hci_outgoing_auth_needed(hdev, conn))
2366 		goto unlock;
2367 
2368 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2369 		struct hci_cp_auth_requested cp;
2370 		cp.handle = __cpu_to_le16(conn->handle);
2371 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2372 	}
2373 
2374 unlock:
2375 	hci_dev_unlock(hdev);
2376 }
2377 
2378 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2379 {
2380 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2381 	struct hci_conn *conn;
2382 
2383 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2384 
2385 	hci_dev_lock(hdev);
2386 
2387 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2388 	if (conn) {
2389 		if (!ev->status) {
2390 			if (ev->encrypt) {
2391 				/* Encryption implies authentication */
2392 				conn->link_mode |= HCI_LM_AUTH;
2393 				conn->link_mode |= HCI_LM_ENCRYPT;
2394 				conn->sec_level = conn->pending_sec_level;
2395 			} else
2396 				conn->link_mode &= ~HCI_LM_ENCRYPT;
2397 		}
2398 
2399 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2400 
2401 		if (ev->status && conn->state == BT_CONNECTED) {
2402 			hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2403 			hci_conn_put(conn);
2404 			goto unlock;
2405 		}
2406 
2407 		if (conn->state == BT_CONFIG) {
2408 			if (!ev->status)
2409 				conn->state = BT_CONNECTED;
2410 
2411 			hci_proto_connect_cfm(conn, ev->status);
2412 			hci_conn_put(conn);
2413 		} else
2414 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2415 	}
2416 
2417 unlock:
2418 	hci_dev_unlock(hdev);
2419 }
2420 
2421 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2422 					     struct sk_buff *skb)
2423 {
2424 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2425 	struct hci_conn *conn;
2426 
2427 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2428 
2429 	hci_dev_lock(hdev);
2430 
2431 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2432 	if (conn) {
2433 		if (!ev->status)
2434 			conn->link_mode |= HCI_LM_SECURE;
2435 
2436 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2437 
2438 		hci_key_change_cfm(conn, ev->status);
2439 	}
2440 
2441 	hci_dev_unlock(hdev);
2442 }
2443 
2444 static void hci_remote_features_evt(struct hci_dev *hdev,
2445 				    struct sk_buff *skb)
2446 {
2447 	struct hci_ev_remote_features *ev = (void *) skb->data;
2448 	struct hci_conn *conn;
2449 
2450 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2451 
2452 	hci_dev_lock(hdev);
2453 
2454 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2455 	if (!conn)
2456 		goto unlock;
2457 
2458 	if (!ev->status)
2459 		memcpy(conn->features, ev->features, 8);
2460 
2461 	if (conn->state != BT_CONFIG)
2462 		goto unlock;
2463 
2464 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2465 		struct hci_cp_read_remote_ext_features cp;
2466 		cp.handle = ev->handle;
2467 		cp.page = 0x01;
2468 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2469 			     sizeof(cp), &cp);
2470 		goto unlock;
2471 	}
2472 
2473 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2474 		struct hci_cp_remote_name_req cp;
2475 		memset(&cp, 0, sizeof(cp));
2476 		bacpy(&cp.bdaddr, &conn->dst);
2477 		cp.pscan_rep_mode = 0x02;
2478 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2479 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2480 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2481 				      conn->dst_type, 0, NULL, 0,
2482 				      conn->dev_class);
2483 
2484 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2485 		conn->state = BT_CONNECTED;
2486 		hci_proto_connect_cfm(conn, ev->status);
2487 		hci_conn_put(conn);
2488 	}
2489 
2490 unlock:
2491 	hci_dev_unlock(hdev);
2492 }
2493 
2494 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495 {
2496 	BT_DBG("%s", hdev->name);
2497 }
2498 
2499 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2500 				       struct sk_buff *skb)
2501 {
2502 	BT_DBG("%s", hdev->name);
2503 }
2504 
2505 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2506 {
2507 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2508 	__u16 opcode;
2509 
2510 	skb_pull(skb, sizeof(*ev));
2511 
2512 	opcode = __le16_to_cpu(ev->opcode);
2513 
2514 	switch (opcode) {
2515 	case HCI_OP_INQUIRY_CANCEL:
2516 		hci_cc_inquiry_cancel(hdev, skb);
2517 		break;
2518 
2519 	case HCI_OP_PERIODIC_INQ:
2520 		hci_cc_periodic_inq(hdev, skb);
2521 		break;
2522 
2523 	case HCI_OP_EXIT_PERIODIC_INQ:
2524 		hci_cc_exit_periodic_inq(hdev, skb);
2525 		break;
2526 
2527 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2528 		hci_cc_remote_name_req_cancel(hdev, skb);
2529 		break;
2530 
2531 	case HCI_OP_ROLE_DISCOVERY:
2532 		hci_cc_role_discovery(hdev, skb);
2533 		break;
2534 
2535 	case HCI_OP_READ_LINK_POLICY:
2536 		hci_cc_read_link_policy(hdev, skb);
2537 		break;
2538 
2539 	case HCI_OP_WRITE_LINK_POLICY:
2540 		hci_cc_write_link_policy(hdev, skb);
2541 		break;
2542 
2543 	case HCI_OP_READ_DEF_LINK_POLICY:
2544 		hci_cc_read_def_link_policy(hdev, skb);
2545 		break;
2546 
2547 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2548 		hci_cc_write_def_link_policy(hdev, skb);
2549 		break;
2550 
2551 	case HCI_OP_RESET:
2552 		hci_cc_reset(hdev, skb);
2553 		break;
2554 
2555 	case HCI_OP_WRITE_LOCAL_NAME:
2556 		hci_cc_write_local_name(hdev, skb);
2557 		break;
2558 
2559 	case HCI_OP_READ_LOCAL_NAME:
2560 		hci_cc_read_local_name(hdev, skb);
2561 		break;
2562 
2563 	case HCI_OP_WRITE_AUTH_ENABLE:
2564 		hci_cc_write_auth_enable(hdev, skb);
2565 		break;
2566 
2567 	case HCI_OP_WRITE_ENCRYPT_MODE:
2568 		hci_cc_write_encrypt_mode(hdev, skb);
2569 		break;
2570 
2571 	case HCI_OP_WRITE_SCAN_ENABLE:
2572 		hci_cc_write_scan_enable(hdev, skb);
2573 		break;
2574 
2575 	case HCI_OP_READ_CLASS_OF_DEV:
2576 		hci_cc_read_class_of_dev(hdev, skb);
2577 		break;
2578 
2579 	case HCI_OP_WRITE_CLASS_OF_DEV:
2580 		hci_cc_write_class_of_dev(hdev, skb);
2581 		break;
2582 
2583 	case HCI_OP_READ_VOICE_SETTING:
2584 		hci_cc_read_voice_setting(hdev, skb);
2585 		break;
2586 
2587 	case HCI_OP_WRITE_VOICE_SETTING:
2588 		hci_cc_write_voice_setting(hdev, skb);
2589 		break;
2590 
2591 	case HCI_OP_HOST_BUFFER_SIZE:
2592 		hci_cc_host_buffer_size(hdev, skb);
2593 		break;
2594 
2595 	case HCI_OP_WRITE_SSP_MODE:
2596 		hci_cc_write_ssp_mode(hdev, skb);
2597 		break;
2598 
2599 	case HCI_OP_READ_LOCAL_VERSION:
2600 		hci_cc_read_local_version(hdev, skb);
2601 		break;
2602 
2603 	case HCI_OP_READ_LOCAL_COMMANDS:
2604 		hci_cc_read_local_commands(hdev, skb);
2605 		break;
2606 
2607 	case HCI_OP_READ_LOCAL_FEATURES:
2608 		hci_cc_read_local_features(hdev, skb);
2609 		break;
2610 
2611 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2612 		hci_cc_read_local_ext_features(hdev, skb);
2613 		break;
2614 
2615 	case HCI_OP_READ_BUFFER_SIZE:
2616 		hci_cc_read_buffer_size(hdev, skb);
2617 		break;
2618 
2619 	case HCI_OP_READ_BD_ADDR:
2620 		hci_cc_read_bd_addr(hdev, skb);
2621 		break;
2622 
2623 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2624 		hci_cc_read_data_block_size(hdev, skb);
2625 		break;
2626 
2627 	case HCI_OP_WRITE_CA_TIMEOUT:
2628 		hci_cc_write_ca_timeout(hdev, skb);
2629 		break;
2630 
2631 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2632 		hci_cc_read_flow_control_mode(hdev, skb);
2633 		break;
2634 
2635 	case HCI_OP_READ_LOCAL_AMP_INFO:
2636 		hci_cc_read_local_amp_info(hdev, skb);
2637 		break;
2638 
2639 	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2640 		hci_cc_read_local_amp_assoc(hdev, skb);
2641 		break;
2642 
2643 	case HCI_OP_DELETE_STORED_LINK_KEY:
2644 		hci_cc_delete_stored_link_key(hdev, skb);
2645 		break;
2646 
2647 	case HCI_OP_SET_EVENT_MASK:
2648 		hci_cc_set_event_mask(hdev, skb);
2649 		break;
2650 
2651 	case HCI_OP_WRITE_INQUIRY_MODE:
2652 		hci_cc_write_inquiry_mode(hdev, skb);
2653 		break;
2654 
2655 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2656 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2657 		break;
2658 
2659 	case HCI_OP_SET_EVENT_FLT:
2660 		hci_cc_set_event_flt(hdev, skb);
2661 		break;
2662 
2663 	case HCI_OP_PIN_CODE_REPLY:
2664 		hci_cc_pin_code_reply(hdev, skb);
2665 		break;
2666 
2667 	case HCI_OP_PIN_CODE_NEG_REPLY:
2668 		hci_cc_pin_code_neg_reply(hdev, skb);
2669 		break;
2670 
2671 	case HCI_OP_READ_LOCAL_OOB_DATA:
2672 		hci_cc_read_local_oob_data_reply(hdev, skb);
2673 		break;
2674 
2675 	case HCI_OP_LE_READ_BUFFER_SIZE:
2676 		hci_cc_le_read_buffer_size(hdev, skb);
2677 		break;
2678 
2679 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2680 		hci_cc_le_read_local_features(hdev, skb);
2681 		break;
2682 
2683 	case HCI_OP_LE_READ_ADV_TX_POWER:
2684 		hci_cc_le_read_adv_tx_power(hdev, skb);
2685 		break;
2686 
2687 	case HCI_OP_LE_SET_EVENT_MASK:
2688 		hci_cc_le_set_event_mask(hdev, skb);
2689 		break;
2690 
2691 	case HCI_OP_USER_CONFIRM_REPLY:
2692 		hci_cc_user_confirm_reply(hdev, skb);
2693 		break;
2694 
2695 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2696 		hci_cc_user_confirm_neg_reply(hdev, skb);
2697 		break;
2698 
2699 	case HCI_OP_USER_PASSKEY_REPLY:
2700 		hci_cc_user_passkey_reply(hdev, skb);
2701 		break;
2702 
2703 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2704 		hci_cc_user_passkey_neg_reply(hdev, skb);
2705 		break;
2706 
2707 	case HCI_OP_LE_SET_SCAN_PARAM:
2708 		hci_cc_le_set_scan_param(hdev, skb);
2709 		break;
2710 
2711 	case HCI_OP_LE_SET_ADV_ENABLE:
2712 		hci_cc_le_set_adv_enable(hdev, skb);
2713 		break;
2714 
2715 	case HCI_OP_LE_SET_SCAN_ENABLE:
2716 		hci_cc_le_set_scan_enable(hdev, skb);
2717 		break;
2718 
2719 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2720 		hci_cc_le_read_white_list_size(hdev, skb);
2721 		break;
2722 
2723 	case HCI_OP_LE_LTK_REPLY:
2724 		hci_cc_le_ltk_reply(hdev, skb);
2725 		break;
2726 
2727 	case HCI_OP_LE_LTK_NEG_REPLY:
2728 		hci_cc_le_ltk_neg_reply(hdev, skb);
2729 		break;
2730 
2731 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2732 		hci_cc_le_read_supported_states(hdev, skb);
2733 		break;
2734 
2735 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2736 		hci_cc_write_le_host_supported(hdev, skb);
2737 		break;
2738 
2739 	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2740 		hci_cc_write_remote_amp_assoc(hdev, skb);
2741 		break;
2742 
2743 	default:
2744 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2745 		break;
2746 	}
2747 
2748 	if (ev->opcode != HCI_OP_NOP)
2749 		del_timer(&hdev->cmd_timer);
2750 
2751 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2752 		atomic_set(&hdev->cmd_cnt, 1);
2753 		if (!skb_queue_empty(&hdev->cmd_q))
2754 			queue_work(hdev->workqueue, &hdev->cmd_work);
2755 	}
2756 }
2757 
2758 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2759 {
2760 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2761 	__u16 opcode;
2762 
2763 	skb_pull(skb, sizeof(*ev));
2764 
2765 	opcode = __le16_to_cpu(ev->opcode);
2766 
2767 	switch (opcode) {
2768 	case HCI_OP_INQUIRY:
2769 		hci_cs_inquiry(hdev, ev->status);
2770 		break;
2771 
2772 	case HCI_OP_CREATE_CONN:
2773 		hci_cs_create_conn(hdev, ev->status);
2774 		break;
2775 
2776 	case HCI_OP_ADD_SCO:
2777 		hci_cs_add_sco(hdev, ev->status);
2778 		break;
2779 
2780 	case HCI_OP_AUTH_REQUESTED:
2781 		hci_cs_auth_requested(hdev, ev->status);
2782 		break;
2783 
2784 	case HCI_OP_SET_CONN_ENCRYPT:
2785 		hci_cs_set_conn_encrypt(hdev, ev->status);
2786 		break;
2787 
2788 	case HCI_OP_REMOTE_NAME_REQ:
2789 		hci_cs_remote_name_req(hdev, ev->status);
2790 		break;
2791 
2792 	case HCI_OP_READ_REMOTE_FEATURES:
2793 		hci_cs_read_remote_features(hdev, ev->status);
2794 		break;
2795 
2796 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2797 		hci_cs_read_remote_ext_features(hdev, ev->status);
2798 		break;
2799 
2800 	case HCI_OP_SETUP_SYNC_CONN:
2801 		hci_cs_setup_sync_conn(hdev, ev->status);
2802 		break;
2803 
2804 	case HCI_OP_SNIFF_MODE:
2805 		hci_cs_sniff_mode(hdev, ev->status);
2806 		break;
2807 
2808 	case HCI_OP_EXIT_SNIFF_MODE:
2809 		hci_cs_exit_sniff_mode(hdev, ev->status);
2810 		break;
2811 
2812 	case HCI_OP_DISCONNECT:
2813 		hci_cs_disconnect(hdev, ev->status);
2814 		break;
2815 
2816 	case HCI_OP_LE_CREATE_CONN:
2817 		hci_cs_le_create_conn(hdev, ev->status);
2818 		break;
2819 
2820 	case HCI_OP_LE_START_ENC:
2821 		hci_cs_le_start_enc(hdev, ev->status);
2822 		break;
2823 
2824 	case HCI_OP_CREATE_PHY_LINK:
2825 		hci_cs_create_phylink(hdev, ev->status);
2826 		break;
2827 
2828 	case HCI_OP_ACCEPT_PHY_LINK:
2829 		hci_cs_accept_phylink(hdev, ev->status);
2830 		break;
2831 
2832 	case HCI_OP_CREATE_LOGICAL_LINK:
2833 		hci_cs_create_logical_link(hdev, ev->status);
2834 		break;
2835 
2836 	default:
2837 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2838 		break;
2839 	}
2840 
2841 	if (ev->opcode != HCI_OP_NOP)
2842 		del_timer(&hdev->cmd_timer);
2843 
2844 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2845 		atomic_set(&hdev->cmd_cnt, 1);
2846 		if (!skb_queue_empty(&hdev->cmd_q))
2847 			queue_work(hdev->workqueue, &hdev->cmd_work);
2848 	}
2849 }
2850 
2851 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2852 {
2853 	struct hci_ev_role_change *ev = (void *) skb->data;
2854 	struct hci_conn *conn;
2855 
2856 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2857 
2858 	hci_dev_lock(hdev);
2859 
2860 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2861 	if (conn) {
2862 		if (!ev->status) {
2863 			if (ev->role)
2864 				conn->link_mode &= ~HCI_LM_MASTER;
2865 			else
2866 				conn->link_mode |= HCI_LM_MASTER;
2867 		}
2868 
2869 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2870 
2871 		hci_role_switch_cfm(conn, ev->status, ev->role);
2872 	}
2873 
2874 	hci_dev_unlock(hdev);
2875 }
2876 
2877 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2878 {
2879 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2880 	int i;
2881 
2882 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2883 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2884 		return;
2885 	}
2886 
2887 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2888 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2889 		BT_DBG("%s bad parameters", hdev->name);
2890 		return;
2891 	}
2892 
2893 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2894 
2895 	for (i = 0; i < ev->num_hndl; i++) {
2896 		struct hci_comp_pkts_info *info = &ev->handles[i];
2897 		struct hci_conn *conn;
2898 		__u16  handle, count;
2899 
2900 		handle = __le16_to_cpu(info->handle);
2901 		count  = __le16_to_cpu(info->count);
2902 
2903 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2904 		if (!conn)
2905 			continue;
2906 
2907 		conn->sent -= count;
2908 
2909 		switch (conn->type) {
2910 		case ACL_LINK:
2911 			hdev->acl_cnt += count;
2912 			if (hdev->acl_cnt > hdev->acl_pkts)
2913 				hdev->acl_cnt = hdev->acl_pkts;
2914 			break;
2915 
2916 		case LE_LINK:
2917 			if (hdev->le_pkts) {
2918 				hdev->le_cnt += count;
2919 				if (hdev->le_cnt > hdev->le_pkts)
2920 					hdev->le_cnt = hdev->le_pkts;
2921 			} else {
2922 				hdev->acl_cnt += count;
2923 				if (hdev->acl_cnt > hdev->acl_pkts)
2924 					hdev->acl_cnt = hdev->acl_pkts;
2925 			}
2926 			break;
2927 
2928 		case SCO_LINK:
2929 			hdev->sco_cnt += count;
2930 			if (hdev->sco_cnt > hdev->sco_pkts)
2931 				hdev->sco_cnt = hdev->sco_pkts;
2932 			break;
2933 
2934 		default:
2935 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2936 			break;
2937 		}
2938 	}
2939 
2940 	queue_work(hdev->workqueue, &hdev->tx_work);
2941 }
2942 
2943 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2944 						 __u16 handle)
2945 {
2946 	struct hci_chan *chan;
2947 
2948 	switch (hdev->dev_type) {
2949 	case HCI_BREDR:
2950 		return hci_conn_hash_lookup_handle(hdev, handle);
2951 	case HCI_AMP:
2952 		chan = hci_chan_lookup_handle(hdev, handle);
2953 		if (chan)
2954 			return chan->conn;
2955 		break;
2956 	default:
2957 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2958 		break;
2959 	}
2960 
2961 	return NULL;
2962 }
2963 
2964 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2965 {
2966 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2967 	int i;
2968 
2969 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2970 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2971 		return;
2972 	}
2973 
2974 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2975 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2976 		BT_DBG("%s bad parameters", hdev->name);
2977 		return;
2978 	}
2979 
2980 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2981 	       ev->num_hndl);
2982 
2983 	for (i = 0; i < ev->num_hndl; i++) {
2984 		struct hci_comp_blocks_info *info = &ev->handles[i];
2985 		struct hci_conn *conn = NULL;
2986 		__u16  handle, block_count;
2987 
2988 		handle = __le16_to_cpu(info->handle);
2989 		block_count = __le16_to_cpu(info->blocks);
2990 
2991 		conn = __hci_conn_lookup_handle(hdev, handle);
2992 		if (!conn)
2993 			continue;
2994 
2995 		conn->sent -= block_count;
2996 
2997 		switch (conn->type) {
2998 		case ACL_LINK:
2999 		case AMP_LINK:
3000 			hdev->block_cnt += block_count;
3001 			if (hdev->block_cnt > hdev->num_blocks)
3002 				hdev->block_cnt = hdev->num_blocks;
3003 			break;
3004 
3005 		default:
3006 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3007 			break;
3008 		}
3009 	}
3010 
3011 	queue_work(hdev->workqueue, &hdev->tx_work);
3012 }
3013 
3014 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3015 {
3016 	struct hci_ev_mode_change *ev = (void *) skb->data;
3017 	struct hci_conn *conn;
3018 
3019 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3020 
3021 	hci_dev_lock(hdev);
3022 
3023 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3024 	if (conn) {
3025 		conn->mode = ev->mode;
3026 		conn->interval = __le16_to_cpu(ev->interval);
3027 
3028 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3029 					&conn->flags)) {
3030 			if (conn->mode == HCI_CM_ACTIVE)
3031 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3032 			else
3033 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3034 		}
3035 
3036 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3037 			hci_sco_setup(conn, ev->status);
3038 	}
3039 
3040 	hci_dev_unlock(hdev);
3041 }
3042 
3043 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3044 {
3045 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3046 	struct hci_conn *conn;
3047 
3048 	BT_DBG("%s", hdev->name);
3049 
3050 	hci_dev_lock(hdev);
3051 
3052 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3053 	if (!conn)
3054 		goto unlock;
3055 
3056 	if (conn->state == BT_CONNECTED) {
3057 		hci_conn_hold(conn);
3058 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3059 		hci_conn_put(conn);
3060 	}
3061 
3062 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3063 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3064 			     sizeof(ev->bdaddr), &ev->bdaddr);
3065 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3066 		u8 secure;
3067 
3068 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3069 			secure = 1;
3070 		else
3071 			secure = 0;
3072 
3073 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3074 	}
3075 
3076 unlock:
3077 	hci_dev_unlock(hdev);
3078 }
3079 
3080 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3081 {
3082 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3083 	struct hci_cp_link_key_reply cp;
3084 	struct hci_conn *conn;
3085 	struct link_key *key;
3086 
3087 	BT_DBG("%s", hdev->name);
3088 
3089 	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
3090 		return;
3091 
3092 	hci_dev_lock(hdev);
3093 
3094 	key = hci_find_link_key(hdev, &ev->bdaddr);
3095 	if (!key) {
3096 		BT_DBG("%s link key not found for %pMR", hdev->name,
3097 		       &ev->bdaddr);
3098 		goto not_found;
3099 	}
3100 
3101 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3102 	       &ev->bdaddr);
3103 
3104 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3105 	    key->type == HCI_LK_DEBUG_COMBINATION) {
3106 		BT_DBG("%s ignoring debug key", hdev->name);
3107 		goto not_found;
3108 	}
3109 
3110 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3111 	if (conn) {
3112 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
3113 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3114 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3115 			goto not_found;
3116 		}
3117 
3118 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3119 		    conn->pending_sec_level == BT_SECURITY_HIGH) {
3120 			BT_DBG("%s ignoring key unauthenticated for high security",
3121 			       hdev->name);
3122 			goto not_found;
3123 		}
3124 
3125 		conn->key_type = key->type;
3126 		conn->pin_length = key->pin_len;
3127 	}
3128 
3129 	bacpy(&cp.bdaddr, &ev->bdaddr);
3130 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3131 
3132 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3133 
3134 	hci_dev_unlock(hdev);
3135 
3136 	return;
3137 
3138 not_found:
3139 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3140 	hci_dev_unlock(hdev);
3141 }
3142 
3143 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3144 {
3145 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3146 	struct hci_conn *conn;
3147 	u8 pin_len = 0;
3148 
3149 	BT_DBG("%s", hdev->name);
3150 
3151 	hci_dev_lock(hdev);
3152 
3153 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3154 	if (conn) {
3155 		hci_conn_hold(conn);
3156 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3157 		pin_len = conn->pin_length;
3158 
3159 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3160 			conn->key_type = ev->key_type;
3161 
3162 		hci_conn_put(conn);
3163 	}
3164 
3165 	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
3166 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3167 				 ev->key_type, pin_len);
3168 
3169 	hci_dev_unlock(hdev);
3170 }
3171 
3172 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3173 {
3174 	struct hci_ev_clock_offset *ev = (void *) skb->data;
3175 	struct hci_conn *conn;
3176 
3177 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3178 
3179 	hci_dev_lock(hdev);
3180 
3181 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3182 	if (conn && !ev->status) {
3183 		struct inquiry_entry *ie;
3184 
3185 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3186 		if (ie) {
3187 			ie->data.clock_offset = ev->clock_offset;
3188 			ie->timestamp = jiffies;
3189 		}
3190 	}
3191 
3192 	hci_dev_unlock(hdev);
3193 }
3194 
3195 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3196 {
3197 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3198 	struct hci_conn *conn;
3199 
3200 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3201 
3202 	hci_dev_lock(hdev);
3203 
3204 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3205 	if (conn && !ev->status)
3206 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3207 
3208 	hci_dev_unlock(hdev);
3209 }
3210 
3211 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3212 {
3213 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3214 	struct inquiry_entry *ie;
3215 
3216 	BT_DBG("%s", hdev->name);
3217 
3218 	hci_dev_lock(hdev);
3219 
3220 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3221 	if (ie) {
3222 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3223 		ie->timestamp = jiffies;
3224 	}
3225 
3226 	hci_dev_unlock(hdev);
3227 }
3228 
3229 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3230 					     struct sk_buff *skb)
3231 {
3232 	struct inquiry_data data;
3233 	int num_rsp = *((__u8 *) skb->data);
3234 	bool name_known, ssp;
3235 
3236 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3237 
3238 	if (!num_rsp)
3239 		return;
3240 
3241 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3242 		return;
3243 
3244 	hci_dev_lock(hdev);
3245 
3246 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3247 		struct inquiry_info_with_rssi_and_pscan_mode *info;
3248 		info = (void *) (skb->data + 1);
3249 
3250 		for (; num_rsp; num_rsp--, info++) {
3251 			bacpy(&data.bdaddr, &info->bdaddr);
3252 			data.pscan_rep_mode	= info->pscan_rep_mode;
3253 			data.pscan_period_mode	= info->pscan_period_mode;
3254 			data.pscan_mode		= info->pscan_mode;
3255 			memcpy(data.dev_class, info->dev_class, 3);
3256 			data.clock_offset	= info->clock_offset;
3257 			data.rssi		= info->rssi;
3258 			data.ssp_mode		= 0x00;
3259 
3260 			name_known = hci_inquiry_cache_update(hdev, &data,
3261 							      false, &ssp);
3262 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3263 					  info->dev_class, info->rssi,
3264 					  !name_known, ssp, NULL, 0);
3265 		}
3266 	} else {
3267 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3268 
3269 		for (; num_rsp; num_rsp--, info++) {
3270 			bacpy(&data.bdaddr, &info->bdaddr);
3271 			data.pscan_rep_mode	= info->pscan_rep_mode;
3272 			data.pscan_period_mode	= info->pscan_period_mode;
3273 			data.pscan_mode		= 0x00;
3274 			memcpy(data.dev_class, info->dev_class, 3);
3275 			data.clock_offset	= info->clock_offset;
3276 			data.rssi		= info->rssi;
3277 			data.ssp_mode		= 0x00;
3278 			name_known = hci_inquiry_cache_update(hdev, &data,
3279 							      false, &ssp);
3280 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3281 					  info->dev_class, info->rssi,
3282 					  !name_known, ssp, NULL, 0);
3283 		}
3284 	}
3285 
3286 	hci_dev_unlock(hdev);
3287 }
3288 
3289 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3290 					struct sk_buff *skb)
3291 {
3292 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3293 	struct hci_conn *conn;
3294 
3295 	BT_DBG("%s", hdev->name);
3296 
3297 	hci_dev_lock(hdev);
3298 
3299 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3300 	if (!conn)
3301 		goto unlock;
3302 
3303 	if (!ev->status && ev->page == 0x01) {
3304 		struct inquiry_entry *ie;
3305 
3306 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3307 		if (ie)
3308 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3309 
3310 		if (ev->features[0] & LMP_HOST_SSP)
3311 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3312 	}
3313 
3314 	if (conn->state != BT_CONFIG)
3315 		goto unlock;
3316 
3317 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3318 		struct hci_cp_remote_name_req cp;
3319 		memset(&cp, 0, sizeof(cp));
3320 		bacpy(&cp.bdaddr, &conn->dst);
3321 		cp.pscan_rep_mode = 0x02;
3322 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3323 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3324 		mgmt_device_connected(hdev, &conn->dst, conn->type,
3325 				      conn->dst_type, 0, NULL, 0,
3326 				      conn->dev_class);
3327 
3328 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3329 		conn->state = BT_CONNECTED;
3330 		hci_proto_connect_cfm(conn, ev->status);
3331 		hci_conn_put(conn);
3332 	}
3333 
3334 unlock:
3335 	hci_dev_unlock(hdev);
3336 }
3337 
3338 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3339 				       struct sk_buff *skb)
3340 {
3341 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3342 	struct hci_conn *conn;
3343 
3344 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3345 
3346 	hci_dev_lock(hdev);
3347 
3348 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3349 	if (!conn) {
3350 		if (ev->link_type == ESCO_LINK)
3351 			goto unlock;
3352 
3353 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3354 		if (!conn)
3355 			goto unlock;
3356 
3357 		conn->type = SCO_LINK;
3358 	}
3359 
3360 	switch (ev->status) {
3361 	case 0x00:
3362 		conn->handle = __le16_to_cpu(ev->handle);
3363 		conn->state  = BT_CONNECTED;
3364 
3365 		hci_conn_hold_device(conn);
3366 		hci_conn_add_sysfs(conn);
3367 		break;
3368 
3369 	case 0x11:	/* Unsupported Feature or Parameter Value */
3370 	case 0x1c:	/* SCO interval rejected */
3371 	case 0x1a:	/* Unsupported Remote Feature */
3372 	case 0x1f:	/* Unspecified error */
3373 		if (conn->out && conn->attempt < 2) {
3374 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3375 					(hdev->esco_type & EDR_ESCO_MASK);
3376 			hci_setup_sync(conn, conn->link->handle);
3377 			goto unlock;
3378 		}
3379 		/* fall through */
3380 
3381 	default:
3382 		conn->state = BT_CLOSED;
3383 		break;
3384 	}
3385 
3386 	hci_proto_connect_cfm(conn, ev->status);
3387 	if (ev->status)
3388 		hci_conn_del(conn);
3389 
3390 unlock:
3391 	hci_dev_unlock(hdev);
3392 }
3393 
3394 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3395 {
3396 	BT_DBG("%s", hdev->name);
3397 }
3398 
3399 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3400 {
3401 	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3402 
3403 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3404 }
3405 
3406 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3407 					    struct sk_buff *skb)
3408 {
3409 	struct inquiry_data data;
3410 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3411 	int num_rsp = *((__u8 *) skb->data);
3412 	size_t eir_len;
3413 
3414 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3415 
3416 	if (!num_rsp)
3417 		return;
3418 
3419 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3420 		return;
3421 
3422 	hci_dev_lock(hdev);
3423 
3424 	for (; num_rsp; num_rsp--, info++) {
3425 		bool name_known, ssp;
3426 
3427 		bacpy(&data.bdaddr, &info->bdaddr);
3428 		data.pscan_rep_mode	= info->pscan_rep_mode;
3429 		data.pscan_period_mode	= info->pscan_period_mode;
3430 		data.pscan_mode		= 0x00;
3431 		memcpy(data.dev_class, info->dev_class, 3);
3432 		data.clock_offset	= info->clock_offset;
3433 		data.rssi		= info->rssi;
3434 		data.ssp_mode		= 0x01;
3435 
3436 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3437 			name_known = eir_has_data_type(info->data,
3438 						       sizeof(info->data),
3439 						       EIR_NAME_COMPLETE);
3440 		else
3441 			name_known = true;
3442 
3443 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3444 						      &ssp);
3445 		eir_len = eir_get_length(info->data, sizeof(info->data));
3446 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3447 				  info->dev_class, info->rssi, !name_known,
3448 				  ssp, info->data, eir_len);
3449 	}
3450 
3451 	hci_dev_unlock(hdev);
3452 }
3453 
3454 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3455 					 struct sk_buff *skb)
3456 {
3457 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3458 	struct hci_conn *conn;
3459 
3460 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3461 	       __le16_to_cpu(ev->handle));
3462 
3463 	hci_dev_lock(hdev);
3464 
3465 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3466 	if (!conn)
3467 		goto unlock;
3468 
3469 	if (!ev->status)
3470 		conn->sec_level = conn->pending_sec_level;
3471 
3472 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3473 
3474 	if (ev->status && conn->state == BT_CONNECTED) {
3475 		hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3476 		hci_conn_put(conn);
3477 		goto unlock;
3478 	}
3479 
3480 	if (conn->state == BT_CONFIG) {
3481 		if (!ev->status)
3482 			conn->state = BT_CONNECTED;
3483 
3484 		hci_proto_connect_cfm(conn, ev->status);
3485 		hci_conn_put(conn);
3486 	} else {
3487 		hci_auth_cfm(conn, ev->status);
3488 
3489 		hci_conn_hold(conn);
3490 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3491 		hci_conn_put(conn);
3492 	}
3493 
3494 unlock:
3495 	hci_dev_unlock(hdev);
3496 }
3497 
3498 static u8 hci_get_auth_req(struct hci_conn *conn)
3499 {
3500 	/* If remote requests dedicated bonding follow that lead */
3501 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3502 		/* If both remote and local IO capabilities allow MITM
3503 		 * protection then require it, otherwise don't */
3504 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3505 			return 0x02;
3506 		else
3507 			return 0x03;
3508 	}
3509 
3510 	/* If remote requests no-bonding follow that lead */
3511 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3512 		return conn->remote_auth | (conn->auth_type & 0x01);
3513 
3514 	return conn->auth_type;
3515 }
3516 
3517 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3518 {
3519 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3520 	struct hci_conn *conn;
3521 
3522 	BT_DBG("%s", hdev->name);
3523 
3524 	hci_dev_lock(hdev);
3525 
3526 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3527 	if (!conn)
3528 		goto unlock;
3529 
3530 	hci_conn_hold(conn);
3531 
3532 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3533 		goto unlock;
3534 
3535 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3536 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3537 		struct hci_cp_io_capability_reply cp;
3538 
3539 		bacpy(&cp.bdaddr, &ev->bdaddr);
3540 		/* Change the IO capability from KeyboardDisplay
3541 		 * to DisplayYesNo as it is not supported by BT spec. */
3542 		cp.capability = (conn->io_capability == 0x04) ?
3543 						0x01 : conn->io_capability;
3544 		conn->auth_type = hci_get_auth_req(conn);
3545 		cp.authentication = conn->auth_type;
3546 
3547 		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3548 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3549 			cp.oob_data = 0x01;
3550 		else
3551 			cp.oob_data = 0x00;
3552 
3553 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3554 			     sizeof(cp), &cp);
3555 	} else {
3556 		struct hci_cp_io_capability_neg_reply cp;
3557 
3558 		bacpy(&cp.bdaddr, &ev->bdaddr);
3559 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3560 
3561 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3562 			     sizeof(cp), &cp);
3563 	}
3564 
3565 unlock:
3566 	hci_dev_unlock(hdev);
3567 }
3568 
3569 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3570 {
3571 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3572 	struct hci_conn *conn;
3573 
3574 	BT_DBG("%s", hdev->name);
3575 
3576 	hci_dev_lock(hdev);
3577 
3578 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3579 	if (!conn)
3580 		goto unlock;
3581 
3582 	conn->remote_cap = ev->capability;
3583 	conn->remote_auth = ev->authentication;
3584 	if (ev->oob_data)
3585 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3586 
3587 unlock:
3588 	hci_dev_unlock(hdev);
3589 }
3590 
3591 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3592 					 struct sk_buff *skb)
3593 {
3594 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3595 	int loc_mitm, rem_mitm, confirm_hint = 0;
3596 	struct hci_conn *conn;
3597 
3598 	BT_DBG("%s", hdev->name);
3599 
3600 	hci_dev_lock(hdev);
3601 
3602 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3603 		goto unlock;
3604 
3605 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3606 	if (!conn)
3607 		goto unlock;
3608 
3609 	loc_mitm = (conn->auth_type & 0x01);
3610 	rem_mitm = (conn->remote_auth & 0x01);
3611 
3612 	/* If we require MITM but the remote device can't provide that
3613 	 * (it has NoInputNoOutput) then reject the confirmation
3614 	 * request. The only exception is when we're dedicated bonding
3615 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3616 	 * bit set. */
3617 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3618 		BT_DBG("Rejecting request: remote device can't provide MITM");
3619 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3620 			     sizeof(ev->bdaddr), &ev->bdaddr);
3621 		goto unlock;
3622 	}
3623 
3624 	/* If no side requires MITM protection; auto-accept */
3625 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3626 	    (!rem_mitm || conn->io_capability == 0x03)) {
3627 
3628 		/* If we're not the initiators request authorization to
3629 		 * proceed from user space (mgmt_user_confirm with
3630 		 * confirm_hint set to 1). */
3631 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3632 			BT_DBG("Confirming auto-accept as acceptor");
3633 			confirm_hint = 1;
3634 			goto confirm;
3635 		}
3636 
3637 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3638 		       hdev->auto_accept_delay);
3639 
3640 		if (hdev->auto_accept_delay > 0) {
3641 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3642 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3643 			goto unlock;
3644 		}
3645 
3646 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3647 			     sizeof(ev->bdaddr), &ev->bdaddr);
3648 		goto unlock;
3649 	}
3650 
3651 confirm:
3652 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3653 				  confirm_hint);
3654 
3655 unlock:
3656 	hci_dev_unlock(hdev);
3657 }
3658 
3659 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3660 					 struct sk_buff *skb)
3661 {
3662 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3663 
3664 	BT_DBG("%s", hdev->name);
3665 
3666 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3667 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3668 }
3669 
3670 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3671 					struct sk_buff *skb)
3672 {
3673 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3674 	struct hci_conn *conn;
3675 
3676 	BT_DBG("%s", hdev->name);
3677 
3678 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3679 	if (!conn)
3680 		return;
3681 
3682 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3683 	conn->passkey_entered = 0;
3684 
3685 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3686 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3687 					 conn->dst_type, conn->passkey_notify,
3688 					 conn->passkey_entered);
3689 }
3690 
3691 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3692 {
3693 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3694 	struct hci_conn *conn;
3695 
3696 	BT_DBG("%s", hdev->name);
3697 
3698 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3699 	if (!conn)
3700 		return;
3701 
3702 	switch (ev->type) {
3703 	case HCI_KEYPRESS_STARTED:
3704 		conn->passkey_entered = 0;
3705 		return;
3706 
3707 	case HCI_KEYPRESS_ENTERED:
3708 		conn->passkey_entered++;
3709 		break;
3710 
3711 	case HCI_KEYPRESS_ERASED:
3712 		conn->passkey_entered--;
3713 		break;
3714 
3715 	case HCI_KEYPRESS_CLEARED:
3716 		conn->passkey_entered = 0;
3717 		break;
3718 
3719 	case HCI_KEYPRESS_COMPLETED:
3720 		return;
3721 	}
3722 
3723 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3724 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3725 					 conn->dst_type, conn->passkey_notify,
3726 					 conn->passkey_entered);
3727 }
3728 
3729 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3730 					 struct sk_buff *skb)
3731 {
3732 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3733 	struct hci_conn *conn;
3734 
3735 	BT_DBG("%s", hdev->name);
3736 
3737 	hci_dev_lock(hdev);
3738 
3739 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3740 	if (!conn)
3741 		goto unlock;
3742 
3743 	/* To avoid duplicate auth_failed events to user space we check
3744 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3745 	 * initiated the authentication. A traditional auth_complete
3746 	 * event gets always produced as initiator and is also mapped to
3747 	 * the mgmt_auth_failed event */
3748 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3749 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3750 				 ev->status);
3751 
3752 	hci_conn_put(conn);
3753 
3754 unlock:
3755 	hci_dev_unlock(hdev);
3756 }
3757 
3758 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3759 					 struct sk_buff *skb)
3760 {
3761 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3762 	struct inquiry_entry *ie;
3763 
3764 	BT_DBG("%s", hdev->name);
3765 
3766 	hci_dev_lock(hdev);
3767 
3768 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3769 	if (ie)
3770 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3771 
3772 	hci_dev_unlock(hdev);
3773 }
3774 
3775 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3776 					    struct sk_buff *skb)
3777 {
3778 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3779 	struct oob_data *data;
3780 
3781 	BT_DBG("%s", hdev->name);
3782 
3783 	hci_dev_lock(hdev);
3784 
3785 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3786 		goto unlock;
3787 
3788 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3789 	if (data) {
3790 		struct hci_cp_remote_oob_data_reply cp;
3791 
3792 		bacpy(&cp.bdaddr, &ev->bdaddr);
3793 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3794 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3795 
3796 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3797 			     &cp);
3798 	} else {
3799 		struct hci_cp_remote_oob_data_neg_reply cp;
3800 
3801 		bacpy(&cp.bdaddr, &ev->bdaddr);
3802 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3803 			     &cp);
3804 	}
3805 
3806 unlock:
3807 	hci_dev_unlock(hdev);
3808 }
3809 
3810 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3811 				      struct sk_buff *skb)
3812 {
3813 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3814 	struct hci_conn *hcon, *bredr_hcon;
3815 
3816 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3817 	       ev->status);
3818 
3819 	hci_dev_lock(hdev);
3820 
3821 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3822 	if (!hcon) {
3823 		hci_dev_unlock(hdev);
3824 		return;
3825 	}
3826 
3827 	if (ev->status) {
3828 		hci_conn_del(hcon);
3829 		hci_dev_unlock(hdev);
3830 		return;
3831 	}
3832 
3833 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3834 
3835 	hcon->state = BT_CONNECTED;
3836 	bacpy(&hcon->dst, &bredr_hcon->dst);
3837 
3838 	hci_conn_hold(hcon);
3839 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3840 	hci_conn_put(hcon);
3841 
3842 	hci_conn_hold_device(hcon);
3843 	hci_conn_add_sysfs(hcon);
3844 
3845 	amp_physical_cfm(bredr_hcon, hcon);
3846 
3847 	hci_dev_unlock(hdev);
3848 }
3849 
3850 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3851 {
3852 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3853 	struct hci_conn *hcon;
3854 	struct hci_chan *hchan;
3855 	struct amp_mgr *mgr;
3856 
3857 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3858 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3859 	       ev->status);
3860 
3861 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3862 	if (!hcon)
3863 		return;
3864 
3865 	/* Create AMP hchan */
3866 	hchan = hci_chan_create(hcon);
3867 	if (!hchan)
3868 		return;
3869 
3870 	hchan->handle = le16_to_cpu(ev->handle);
3871 
3872 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3873 
3874 	mgr = hcon->amp_mgr;
3875 	if (mgr && mgr->bredr_chan) {
3876 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3877 
3878 		l2cap_chan_lock(bredr_chan);
3879 
3880 		bredr_chan->conn->mtu = hdev->block_mtu;
3881 		l2cap_logical_cfm(bredr_chan, hchan, 0);
3882 		hci_conn_hold(hcon);
3883 
3884 		l2cap_chan_unlock(bredr_chan);
3885 	}
3886 }
3887 
3888 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3889 					     struct sk_buff *skb)
3890 {
3891 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3892 	struct hci_chan *hchan;
3893 
3894 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3895 	       le16_to_cpu(ev->handle), ev->status);
3896 
3897 	if (ev->status)
3898 		return;
3899 
3900 	hci_dev_lock(hdev);
3901 
3902 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3903 	if (!hchan)
3904 		goto unlock;
3905 
3906 	amp_destroy_logical_link(hchan, ev->reason);
3907 
3908 unlock:
3909 	hci_dev_unlock(hdev);
3910 }
3911 
3912 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3913 					     struct sk_buff *skb)
3914 {
3915 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3916 	struct hci_conn *hcon;
3917 
3918 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3919 
3920 	if (ev->status)
3921 		return;
3922 
3923 	hci_dev_lock(hdev);
3924 
3925 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3926 	if (hcon) {
3927 		hcon->state = BT_CLOSED;
3928 		hci_conn_del(hcon);
3929 	}
3930 
3931 	hci_dev_unlock(hdev);
3932 }
3933 
3934 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3935 {
3936 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3937 	struct hci_conn *conn;
3938 
3939 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3940 
3941 	hci_dev_lock(hdev);
3942 
3943 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3944 	if (!conn) {
3945 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3946 		if (!conn) {
3947 			BT_ERR("No memory for new connection");
3948 			goto unlock;
3949 		}
3950 
3951 		conn->dst_type = ev->bdaddr_type;
3952 
3953 		if (ev->role == LE_CONN_ROLE_MASTER) {
3954 			conn->out = true;
3955 			conn->link_mode |= HCI_LM_MASTER;
3956 		}
3957 	}
3958 
3959 	if (ev->status) {
3960 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
3961 				    conn->dst_type, ev->status);
3962 		hci_proto_connect_cfm(conn, ev->status);
3963 		conn->state = BT_CLOSED;
3964 		hci_conn_del(conn);
3965 		goto unlock;
3966 	}
3967 
3968 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3969 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3970 				      conn->dst_type, 0, NULL, 0, NULL);
3971 
3972 	conn->sec_level = BT_SECURITY_LOW;
3973 	conn->handle = __le16_to_cpu(ev->handle);
3974 	conn->state = BT_CONNECTED;
3975 
3976 	hci_conn_hold_device(conn);
3977 	hci_conn_add_sysfs(conn);
3978 
3979 	hci_proto_connect_cfm(conn, ev->status);
3980 
3981 unlock:
3982 	hci_dev_unlock(hdev);
3983 }
3984 
3985 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3986 {
3987 	u8 num_reports = skb->data[0];
3988 	void *ptr = &skb->data[1];
3989 	s8 rssi;
3990 
3991 	while (num_reports--) {
3992 		struct hci_ev_le_advertising_info *ev = ptr;
3993 
3994 		rssi = ev->data[ev->length];
3995 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3996 				  NULL, rssi, 0, 1, ev->data, ev->length);
3997 
3998 		ptr += sizeof(*ev) + ev->length + 1;
3999 	}
4000 }
4001 
4002 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4003 {
4004 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4005 	struct hci_cp_le_ltk_reply cp;
4006 	struct hci_cp_le_ltk_neg_reply neg;
4007 	struct hci_conn *conn;
4008 	struct smp_ltk *ltk;
4009 
4010 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4011 
4012 	hci_dev_lock(hdev);
4013 
4014 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4015 	if (conn == NULL)
4016 		goto not_found;
4017 
4018 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
4019 	if (ltk == NULL)
4020 		goto not_found;
4021 
4022 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4023 	cp.handle = cpu_to_le16(conn->handle);
4024 
4025 	if (ltk->authenticated)
4026 		conn->sec_level = BT_SECURITY_HIGH;
4027 
4028 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4029 
4030 	if (ltk->type & HCI_SMP_STK) {
4031 		list_del(&ltk->list);
4032 		kfree(ltk);
4033 	}
4034 
4035 	hci_dev_unlock(hdev);
4036 
4037 	return;
4038 
4039 not_found:
4040 	neg.handle = ev->handle;
4041 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4042 	hci_dev_unlock(hdev);
4043 }
4044 
4045 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4046 {
4047 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4048 
4049 	skb_pull(skb, sizeof(*le_ev));
4050 
4051 	switch (le_ev->subevent) {
4052 	case HCI_EV_LE_CONN_COMPLETE:
4053 		hci_le_conn_complete_evt(hdev, skb);
4054 		break;
4055 
4056 	case HCI_EV_LE_ADVERTISING_REPORT:
4057 		hci_le_adv_report_evt(hdev, skb);
4058 		break;
4059 
4060 	case HCI_EV_LE_LTK_REQ:
4061 		hci_le_ltk_request_evt(hdev, skb);
4062 		break;
4063 
4064 	default:
4065 		break;
4066 	}
4067 }
4068 
4069 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4070 {
4071 	struct hci_ev_channel_selected *ev = (void *) skb->data;
4072 	struct hci_conn *hcon;
4073 
4074 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4075 
4076 	skb_pull(skb, sizeof(*ev));
4077 
4078 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4079 	if (!hcon)
4080 		return;
4081 
4082 	amp_read_loc_assoc_final_data(hdev, hcon);
4083 }
4084 
4085 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4086 {
4087 	struct hci_event_hdr *hdr = (void *) skb->data;
4088 	__u8 event = hdr->evt;
4089 
4090 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4091 
4092 	switch (event) {
4093 	case HCI_EV_INQUIRY_COMPLETE:
4094 		hci_inquiry_complete_evt(hdev, skb);
4095 		break;
4096 
4097 	case HCI_EV_INQUIRY_RESULT:
4098 		hci_inquiry_result_evt(hdev, skb);
4099 		break;
4100 
4101 	case HCI_EV_CONN_COMPLETE:
4102 		hci_conn_complete_evt(hdev, skb);
4103 		break;
4104 
4105 	case HCI_EV_CONN_REQUEST:
4106 		hci_conn_request_evt(hdev, skb);
4107 		break;
4108 
4109 	case HCI_EV_DISCONN_COMPLETE:
4110 		hci_disconn_complete_evt(hdev, skb);
4111 		break;
4112 
4113 	case HCI_EV_AUTH_COMPLETE:
4114 		hci_auth_complete_evt(hdev, skb);
4115 		break;
4116 
4117 	case HCI_EV_REMOTE_NAME:
4118 		hci_remote_name_evt(hdev, skb);
4119 		break;
4120 
4121 	case HCI_EV_ENCRYPT_CHANGE:
4122 		hci_encrypt_change_evt(hdev, skb);
4123 		break;
4124 
4125 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4126 		hci_change_link_key_complete_evt(hdev, skb);
4127 		break;
4128 
4129 	case HCI_EV_REMOTE_FEATURES:
4130 		hci_remote_features_evt(hdev, skb);
4131 		break;
4132 
4133 	case HCI_EV_REMOTE_VERSION:
4134 		hci_remote_version_evt(hdev, skb);
4135 		break;
4136 
4137 	case HCI_EV_QOS_SETUP_COMPLETE:
4138 		hci_qos_setup_complete_evt(hdev, skb);
4139 		break;
4140 
4141 	case HCI_EV_CMD_COMPLETE:
4142 		hci_cmd_complete_evt(hdev, skb);
4143 		break;
4144 
4145 	case HCI_EV_CMD_STATUS:
4146 		hci_cmd_status_evt(hdev, skb);
4147 		break;
4148 
4149 	case HCI_EV_ROLE_CHANGE:
4150 		hci_role_change_evt(hdev, skb);
4151 		break;
4152 
4153 	case HCI_EV_NUM_COMP_PKTS:
4154 		hci_num_comp_pkts_evt(hdev, skb);
4155 		break;
4156 
4157 	case HCI_EV_MODE_CHANGE:
4158 		hci_mode_change_evt(hdev, skb);
4159 		break;
4160 
4161 	case HCI_EV_PIN_CODE_REQ:
4162 		hci_pin_code_request_evt(hdev, skb);
4163 		break;
4164 
4165 	case HCI_EV_LINK_KEY_REQ:
4166 		hci_link_key_request_evt(hdev, skb);
4167 		break;
4168 
4169 	case HCI_EV_LINK_KEY_NOTIFY:
4170 		hci_link_key_notify_evt(hdev, skb);
4171 		break;
4172 
4173 	case HCI_EV_CLOCK_OFFSET:
4174 		hci_clock_offset_evt(hdev, skb);
4175 		break;
4176 
4177 	case HCI_EV_PKT_TYPE_CHANGE:
4178 		hci_pkt_type_change_evt(hdev, skb);
4179 		break;
4180 
4181 	case HCI_EV_PSCAN_REP_MODE:
4182 		hci_pscan_rep_mode_evt(hdev, skb);
4183 		break;
4184 
4185 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4186 		hci_inquiry_result_with_rssi_evt(hdev, skb);
4187 		break;
4188 
4189 	case HCI_EV_REMOTE_EXT_FEATURES:
4190 		hci_remote_ext_features_evt(hdev, skb);
4191 		break;
4192 
4193 	case HCI_EV_SYNC_CONN_COMPLETE:
4194 		hci_sync_conn_complete_evt(hdev, skb);
4195 		break;
4196 
4197 	case HCI_EV_SYNC_CONN_CHANGED:
4198 		hci_sync_conn_changed_evt(hdev, skb);
4199 		break;
4200 
4201 	case HCI_EV_SNIFF_SUBRATE:
4202 		hci_sniff_subrate_evt(hdev, skb);
4203 		break;
4204 
4205 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4206 		hci_extended_inquiry_result_evt(hdev, skb);
4207 		break;
4208 
4209 	case HCI_EV_KEY_REFRESH_COMPLETE:
4210 		hci_key_refresh_complete_evt(hdev, skb);
4211 		break;
4212 
4213 	case HCI_EV_IO_CAPA_REQUEST:
4214 		hci_io_capa_request_evt(hdev, skb);
4215 		break;
4216 
4217 	case HCI_EV_IO_CAPA_REPLY:
4218 		hci_io_capa_reply_evt(hdev, skb);
4219 		break;
4220 
4221 	case HCI_EV_USER_CONFIRM_REQUEST:
4222 		hci_user_confirm_request_evt(hdev, skb);
4223 		break;
4224 
4225 	case HCI_EV_USER_PASSKEY_REQUEST:
4226 		hci_user_passkey_request_evt(hdev, skb);
4227 		break;
4228 
4229 	case HCI_EV_USER_PASSKEY_NOTIFY:
4230 		hci_user_passkey_notify_evt(hdev, skb);
4231 		break;
4232 
4233 	case HCI_EV_KEYPRESS_NOTIFY:
4234 		hci_keypress_notify_evt(hdev, skb);
4235 		break;
4236 
4237 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4238 		hci_simple_pair_complete_evt(hdev, skb);
4239 		break;
4240 
4241 	case HCI_EV_REMOTE_HOST_FEATURES:
4242 		hci_remote_host_features_evt(hdev, skb);
4243 		break;
4244 
4245 	case HCI_EV_LE_META:
4246 		hci_le_meta_evt(hdev, skb);
4247 		break;
4248 
4249 	case HCI_EV_CHANNEL_SELECTED:
4250 		hci_chan_selected_evt(hdev, skb);
4251 		break;
4252 
4253 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4254 		hci_remote_oob_data_request_evt(hdev, skb);
4255 		break;
4256 
4257 	case HCI_EV_PHY_LINK_COMPLETE:
4258 		hci_phy_link_complete_evt(hdev, skb);
4259 		break;
4260 
4261 	case HCI_EV_LOGICAL_LINK_COMPLETE:
4262 		hci_loglink_complete_evt(hdev, skb);
4263 		break;
4264 
4265 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4266 		hci_disconn_loglink_complete_evt(hdev, skb);
4267 		break;
4268 
4269 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4270 		hci_disconn_phylink_complete_evt(hdev, skb);
4271 		break;
4272 
4273 	case HCI_EV_NUM_COMP_BLOCKS:
4274 		hci_num_comp_blocks_evt(hdev, skb);
4275 		break;
4276 
4277 	default:
4278 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4279 		break;
4280 	}
4281 
4282 	kfree_skb(skb);
4283 	hdev->stat.evt_rx++;
4284 }
4285