xref: /linux/net/bluetooth/hci_event.c (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40 
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44 
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47 
48 static bool enable_le;
49 
50 /* Handle HCI Event packets */
51 
52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 {
54 	__u8 status = *((__u8 *) skb->data);
55 
56 	BT_DBG("%s status 0x%x", hdev->name, status);
57 
58 	if (status) {
59 		hci_dev_lock(hdev);
60 		mgmt_stop_discovery_failed(hdev, status);
61 		hci_dev_unlock(hdev);
62 		return;
63 	}
64 
65 	clear_bit(HCI_INQUIRY, &hdev->flags);
66 
67 	hci_dev_lock(hdev);
68 	mgmt_discovering(hdev, 0);
69 	hci_dev_unlock(hdev);
70 
71 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72 
73 	hci_conn_check_pending(hdev);
74 }
75 
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 	__u8 status = *((__u8 *) skb->data);
79 
80 	BT_DBG("%s status 0x%x", hdev->name, status);
81 
82 	if (status)
83 		return;
84 
85 	hci_conn_check_pending(hdev);
86 }
87 
88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 	BT_DBG("%s", hdev->name);
91 }
92 
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 	struct hci_rp_role_discovery *rp = (void *) skb->data;
96 	struct hci_conn *conn;
97 
98 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
99 
100 	if (rp->status)
101 		return;
102 
103 	hci_dev_lock(hdev);
104 
105 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 	if (conn) {
107 		if (rp->role)
108 			conn->link_mode &= ~HCI_LM_MASTER;
109 		else
110 			conn->link_mode |= HCI_LM_MASTER;
111 	}
112 
113 	hci_dev_unlock(hdev);
114 }
115 
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 	struct hci_conn *conn;
120 
121 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
122 
123 	if (rp->status)
124 		return;
125 
126 	hci_dev_lock(hdev);
127 
128 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 	if (conn)
130 		conn->link_policy = __le16_to_cpu(rp->policy);
131 
132 	hci_dev_unlock(hdev);
133 }
134 
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 	struct hci_conn *conn;
139 	void *sent;
140 
141 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
142 
143 	if (rp->status)
144 		return;
145 
146 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 	if (!sent)
148 		return;
149 
150 	hci_dev_lock(hdev);
151 
152 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 	if (conn)
154 		conn->link_policy = get_unaligned_le16(sent + 2);
155 
156 	hci_dev_unlock(hdev);
157 }
158 
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162 
163 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
164 
165 	if (rp->status)
166 		return;
167 
168 	hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170 
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172 {
173 	__u8 status = *((__u8 *) skb->data);
174 	void *sent;
175 
176 	BT_DBG("%s status 0x%x", hdev->name, status);
177 
178 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 	if (!sent)
180 		return;
181 
182 	if (!status)
183 		hdev->link_policy = get_unaligned_le16(sent);
184 
185 	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187 
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 	__u8 status = *((__u8 *) skb->data);
191 
192 	BT_DBG("%s status 0x%x", hdev->name, status);
193 
194 	clear_bit(HCI_RESET, &hdev->flags);
195 
196 	hci_req_complete(hdev, HCI_OP_RESET, status);
197 
198 	hdev->dev_flags = 0;
199 }
200 
201 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203 	__u8 status = *((__u8 *) skb->data);
204 	void *sent;
205 
206 	BT_DBG("%s status 0x%x", hdev->name, status);
207 
208 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
209 	if (!sent)
210 		return;
211 
212 	hci_dev_lock(hdev);
213 
214 	if (test_bit(HCI_MGMT, &hdev->flags))
215 		mgmt_set_local_name_complete(hdev, sent, status);
216 
217 	if (status == 0)
218 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
219 
220 	hci_dev_unlock(hdev);
221 }
222 
223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 	struct hci_rp_read_local_name *rp = (void *) skb->data;
226 
227 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
228 
229 	if (rp->status)
230 		return;
231 
232 	memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
233 }
234 
235 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
236 {
237 	__u8 status = *((__u8 *) skb->data);
238 	void *sent;
239 
240 	BT_DBG("%s status 0x%x", hdev->name, status);
241 
242 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
243 	if (!sent)
244 		return;
245 
246 	if (!status) {
247 		__u8 param = *((__u8 *) sent);
248 
249 		if (param == AUTH_ENABLED)
250 			set_bit(HCI_AUTH, &hdev->flags);
251 		else
252 			clear_bit(HCI_AUTH, &hdev->flags);
253 	}
254 
255 	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
256 }
257 
258 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
259 {
260 	__u8 status = *((__u8 *) skb->data);
261 	void *sent;
262 
263 	BT_DBG("%s status 0x%x", hdev->name, status);
264 
265 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
266 	if (!sent)
267 		return;
268 
269 	if (!status) {
270 		__u8 param = *((__u8 *) sent);
271 
272 		if (param)
273 			set_bit(HCI_ENCRYPT, &hdev->flags);
274 		else
275 			clear_bit(HCI_ENCRYPT, &hdev->flags);
276 	}
277 
278 	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
279 }
280 
281 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
282 {
283 	__u8 param, status = *((__u8 *) skb->data);
284 	int old_pscan, old_iscan;
285 	void *sent;
286 
287 	BT_DBG("%s status 0x%x", hdev->name, status);
288 
289 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
290 	if (!sent)
291 		return;
292 
293 	param = *((__u8 *) sent);
294 
295 	hci_dev_lock(hdev);
296 
297 	if (status != 0) {
298 		mgmt_write_scan_failed(hdev, param, status);
299 		hdev->discov_timeout = 0;
300 		goto done;
301 	}
302 
303 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
304 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
305 
306 	if (param & SCAN_INQUIRY) {
307 		set_bit(HCI_ISCAN, &hdev->flags);
308 		if (!old_iscan)
309 			mgmt_discoverable(hdev, 1);
310 		if (hdev->discov_timeout > 0) {
311 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
312 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
313 									to);
314 		}
315 	} else if (old_iscan)
316 		mgmt_discoverable(hdev, 0);
317 
318 	if (param & SCAN_PAGE) {
319 		set_bit(HCI_PSCAN, &hdev->flags);
320 		if (!old_pscan)
321 			mgmt_connectable(hdev, 1);
322 	} else if (old_pscan)
323 		mgmt_connectable(hdev, 0);
324 
325 done:
326 	hci_dev_unlock(hdev);
327 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
328 }
329 
330 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
331 {
332 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
333 
334 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
335 
336 	if (rp->status)
337 		return;
338 
339 	memcpy(hdev->dev_class, rp->dev_class, 3);
340 
341 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
342 		hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
343 }
344 
345 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
346 {
347 	__u8 status = *((__u8 *) skb->data);
348 	void *sent;
349 
350 	BT_DBG("%s status 0x%x", hdev->name, status);
351 
352 	if (status)
353 		return;
354 
355 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
356 	if (!sent)
357 		return;
358 
359 	memcpy(hdev->dev_class, sent, 3);
360 }
361 
362 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
363 {
364 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
365 	__u16 setting;
366 
367 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
368 
369 	if (rp->status)
370 		return;
371 
372 	setting = __le16_to_cpu(rp->voice_setting);
373 
374 	if (hdev->voice_setting == setting)
375 		return;
376 
377 	hdev->voice_setting = setting;
378 
379 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
380 
381 	if (hdev->notify) {
382 		tasklet_disable(&hdev->tx_task);
383 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
384 		tasklet_enable(&hdev->tx_task);
385 	}
386 }
387 
388 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
389 {
390 	__u8 status = *((__u8 *) skb->data);
391 	__u16 setting;
392 	void *sent;
393 
394 	BT_DBG("%s status 0x%x", hdev->name, status);
395 
396 	if (status)
397 		return;
398 
399 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
400 	if (!sent)
401 		return;
402 
403 	setting = get_unaligned_le16(sent);
404 
405 	if (hdev->voice_setting == setting)
406 		return;
407 
408 	hdev->voice_setting = setting;
409 
410 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
411 
412 	if (hdev->notify) {
413 		tasklet_disable(&hdev->tx_task);
414 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
415 		tasklet_enable(&hdev->tx_task);
416 	}
417 }
418 
419 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
420 {
421 	__u8 status = *((__u8 *) skb->data);
422 
423 	BT_DBG("%s status 0x%x", hdev->name, status);
424 
425 	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
426 }
427 
428 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
429 {
430 	struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
431 
432 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
433 
434 	if (rp->status)
435 		return;
436 
437 	hdev->ssp_mode = rp->mode;
438 }
439 
440 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
441 {
442 	__u8 status = *((__u8 *) skb->data);
443 	void *sent;
444 
445 	BT_DBG("%s status 0x%x", hdev->name, status);
446 
447 	if (status)
448 		return;
449 
450 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
451 	if (!sent)
452 		return;
453 
454 	hdev->ssp_mode = *((__u8 *) sent);
455 }
456 
457 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
458 {
459 	if (hdev->features[6] & LMP_EXT_INQ)
460 		return 2;
461 
462 	if (hdev->features[3] & LMP_RSSI_INQ)
463 		return 1;
464 
465 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
466 						hdev->lmp_subver == 0x0757)
467 		return 1;
468 
469 	if (hdev->manufacturer == 15) {
470 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
471 			return 1;
472 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
473 			return 1;
474 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
475 			return 1;
476 	}
477 
478 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
479 						hdev->lmp_subver == 0x1805)
480 		return 1;
481 
482 	return 0;
483 }
484 
485 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
486 {
487 	u8 mode;
488 
489 	mode = hci_get_inquiry_mode(hdev);
490 
491 	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
492 }
493 
494 static void hci_setup_event_mask(struct hci_dev *hdev)
495 {
496 	/* The second byte is 0xff instead of 0x9f (two reserved bits
497 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
498 	 * command otherwise */
499 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
500 
501 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
502 	 * any event mask for pre 1.2 devices */
503 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
504 		return;
505 
506 	events[4] |= 0x01; /* Flow Specification Complete */
507 	events[4] |= 0x02; /* Inquiry Result with RSSI */
508 	events[4] |= 0x04; /* Read Remote Extended Features Complete */
509 	events[5] |= 0x08; /* Synchronous Connection Complete */
510 	events[5] |= 0x10; /* Synchronous Connection Changed */
511 
512 	if (hdev->features[3] & LMP_RSSI_INQ)
513 		events[4] |= 0x04; /* Inquiry Result with RSSI */
514 
515 	if (hdev->features[5] & LMP_SNIFF_SUBR)
516 		events[5] |= 0x20; /* Sniff Subrating */
517 
518 	if (hdev->features[5] & LMP_PAUSE_ENC)
519 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
520 
521 	if (hdev->features[6] & LMP_EXT_INQ)
522 		events[5] |= 0x40; /* Extended Inquiry Result */
523 
524 	if (hdev->features[6] & LMP_NO_FLUSH)
525 		events[7] |= 0x01; /* Enhanced Flush Complete */
526 
527 	if (hdev->features[7] & LMP_LSTO)
528 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
529 
530 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
531 		events[6] |= 0x01;	/* IO Capability Request */
532 		events[6] |= 0x02;	/* IO Capability Response */
533 		events[6] |= 0x04;	/* User Confirmation Request */
534 		events[6] |= 0x08;	/* User Passkey Request */
535 		events[6] |= 0x10;	/* Remote OOB Data Request */
536 		events[6] |= 0x20;	/* Simple Pairing Complete */
537 		events[7] |= 0x04;	/* User Passkey Notification */
538 		events[7] |= 0x08;	/* Keypress Notification */
539 		events[7] |= 0x10;	/* Remote Host Supported
540 					 * Features Notification */
541 	}
542 
543 	if (hdev->features[4] & LMP_LE)
544 		events[7] |= 0x20;	/* LE Meta-Event */
545 
546 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
547 }
548 
549 static void hci_set_le_support(struct hci_dev *hdev)
550 {
551 	struct hci_cp_write_le_host_supported cp;
552 
553 	memset(&cp, 0, sizeof(cp));
554 
555 	if (enable_le) {
556 		cp.le = 1;
557 		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
558 	}
559 
560 	hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
561 }
562 
563 static void hci_setup(struct hci_dev *hdev)
564 {
565 	hci_setup_event_mask(hdev);
566 
567 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
568 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
569 
570 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
571 		u8 mode = 0x01;
572 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
573 	}
574 
575 	if (hdev->features[3] & LMP_RSSI_INQ)
576 		hci_setup_inquiry_mode(hdev);
577 
578 	if (hdev->features[7] & LMP_INQ_TX_PWR)
579 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
580 
581 	if (hdev->features[7] & LMP_EXTFEATURES) {
582 		struct hci_cp_read_local_ext_features cp;
583 
584 		cp.page = 0x01;
585 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
586 							sizeof(cp), &cp);
587 	}
588 
589 	if (hdev->features[4] & LMP_LE)
590 		hci_set_le_support(hdev);
591 }
592 
593 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
594 {
595 	struct hci_rp_read_local_version *rp = (void *) skb->data;
596 
597 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
598 
599 	if (rp->status)
600 		return;
601 
602 	hdev->hci_ver = rp->hci_ver;
603 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
604 	hdev->lmp_ver = rp->lmp_ver;
605 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
606 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
607 
608 	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
609 					hdev->manufacturer,
610 					hdev->hci_ver, hdev->hci_rev);
611 
612 	if (test_bit(HCI_INIT, &hdev->flags))
613 		hci_setup(hdev);
614 }
615 
616 static void hci_setup_link_policy(struct hci_dev *hdev)
617 {
618 	u16 link_policy = 0;
619 
620 	if (hdev->features[0] & LMP_RSWITCH)
621 		link_policy |= HCI_LP_RSWITCH;
622 	if (hdev->features[0] & LMP_HOLD)
623 		link_policy |= HCI_LP_HOLD;
624 	if (hdev->features[0] & LMP_SNIFF)
625 		link_policy |= HCI_LP_SNIFF;
626 	if (hdev->features[1] & LMP_PARK)
627 		link_policy |= HCI_LP_PARK;
628 
629 	link_policy = cpu_to_le16(link_policy);
630 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
631 					sizeof(link_policy), &link_policy);
632 }
633 
634 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
635 {
636 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
637 
638 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
639 
640 	if (rp->status)
641 		goto done;
642 
643 	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
644 
645 	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
646 		hci_setup_link_policy(hdev);
647 
648 done:
649 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
650 }
651 
652 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
653 {
654 	struct hci_rp_read_local_features *rp = (void *) skb->data;
655 
656 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
657 
658 	if (rp->status)
659 		return;
660 
661 	memcpy(hdev->features, rp->features, 8);
662 
663 	/* Adjust default settings according to features
664 	 * supported by device. */
665 
666 	if (hdev->features[0] & LMP_3SLOT)
667 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
668 
669 	if (hdev->features[0] & LMP_5SLOT)
670 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
671 
672 	if (hdev->features[1] & LMP_HV2) {
673 		hdev->pkt_type  |= (HCI_HV2);
674 		hdev->esco_type |= (ESCO_HV2);
675 	}
676 
677 	if (hdev->features[1] & LMP_HV3) {
678 		hdev->pkt_type  |= (HCI_HV3);
679 		hdev->esco_type |= (ESCO_HV3);
680 	}
681 
682 	if (hdev->features[3] & LMP_ESCO)
683 		hdev->esco_type |= (ESCO_EV3);
684 
685 	if (hdev->features[4] & LMP_EV4)
686 		hdev->esco_type |= (ESCO_EV4);
687 
688 	if (hdev->features[4] & LMP_EV5)
689 		hdev->esco_type |= (ESCO_EV5);
690 
691 	if (hdev->features[5] & LMP_EDR_ESCO_2M)
692 		hdev->esco_type |= (ESCO_2EV3);
693 
694 	if (hdev->features[5] & LMP_EDR_ESCO_3M)
695 		hdev->esco_type |= (ESCO_3EV3);
696 
697 	if (hdev->features[5] & LMP_EDR_3S_ESCO)
698 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
699 
700 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
701 					hdev->features[0], hdev->features[1],
702 					hdev->features[2], hdev->features[3],
703 					hdev->features[4], hdev->features[5],
704 					hdev->features[6], hdev->features[7]);
705 }
706 
707 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
708 							struct sk_buff *skb)
709 {
710 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
711 
712 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
713 
714 	if (rp->status)
715 		return;
716 
717 	memcpy(hdev->extfeatures, rp->features, 8);
718 
719 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
720 }
721 
722 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
723 						struct sk_buff *skb)
724 {
725 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
726 
727 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
728 
729 	if (rp->status)
730 		return;
731 
732 	hdev->flow_ctl_mode = rp->mode;
733 
734 	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
735 }
736 
737 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
738 {
739 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
740 
741 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
742 
743 	if (rp->status)
744 		return;
745 
746 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
747 	hdev->sco_mtu  = rp->sco_mtu;
748 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
749 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
750 
751 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
752 		hdev->sco_mtu  = 64;
753 		hdev->sco_pkts = 8;
754 	}
755 
756 	hdev->acl_cnt = hdev->acl_pkts;
757 	hdev->sco_cnt = hdev->sco_pkts;
758 
759 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
760 					hdev->acl_mtu, hdev->acl_pkts,
761 					hdev->sco_mtu, hdev->sco_pkts);
762 }
763 
764 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
765 {
766 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
767 
768 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
769 
770 	if (!rp->status)
771 		bacpy(&hdev->bdaddr, &rp->bdaddr);
772 
773 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
774 }
775 
776 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
777 {
778 	__u8 status = *((__u8 *) skb->data);
779 
780 	BT_DBG("%s status 0x%x", hdev->name, status);
781 
782 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
783 }
784 
785 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
786 		struct sk_buff *skb)
787 {
788 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
789 
790 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
791 
792 	if (rp->status)
793 		return;
794 
795 	hdev->amp_status = rp->amp_status;
796 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
797 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
798 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
799 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
800 	hdev->amp_type = rp->amp_type;
801 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
802 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
803 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
804 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
805 
806 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
807 }
808 
809 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
810 							struct sk_buff *skb)
811 {
812 	__u8 status = *((__u8 *) skb->data);
813 
814 	BT_DBG("%s status 0x%x", hdev->name, status);
815 
816 	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
817 }
818 
819 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
820 {
821 	__u8 status = *((__u8 *) skb->data);
822 
823 	BT_DBG("%s status 0x%x", hdev->name, status);
824 
825 	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
826 }
827 
828 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
829 							struct sk_buff *skb)
830 {
831 	__u8 status = *((__u8 *) skb->data);
832 
833 	BT_DBG("%s status 0x%x", hdev->name, status);
834 
835 	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
836 }
837 
838 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
839 							struct sk_buff *skb)
840 {
841 	__u8 status = *((__u8 *) skb->data);
842 
843 	BT_DBG("%s status 0x%x", hdev->name, status);
844 
845 	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
846 }
847 
848 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
849 {
850 	__u8 status = *((__u8 *) skb->data);
851 
852 	BT_DBG("%s status 0x%x", hdev->name, status);
853 
854 	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
855 }
856 
857 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
858 {
859 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
860 	struct hci_cp_pin_code_reply *cp;
861 	struct hci_conn *conn;
862 
863 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
864 
865 	hci_dev_lock(hdev);
866 
867 	if (test_bit(HCI_MGMT, &hdev->flags))
868 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
869 
870 	if (rp->status != 0)
871 		goto unlock;
872 
873 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
874 	if (!cp)
875 		goto unlock;
876 
877 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
878 	if (conn)
879 		conn->pin_length = cp->pin_len;
880 
881 unlock:
882 	hci_dev_unlock(hdev);
883 }
884 
885 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
886 {
887 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
888 
889 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
890 
891 	hci_dev_lock(hdev);
892 
893 	if (test_bit(HCI_MGMT, &hdev->flags))
894 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
895 								rp->status);
896 
897 	hci_dev_unlock(hdev);
898 }
899 
900 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
901 				       struct sk_buff *skb)
902 {
903 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
904 
905 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
906 
907 	if (rp->status)
908 		return;
909 
910 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
911 	hdev->le_pkts = rp->le_max_pkt;
912 
913 	hdev->le_cnt = hdev->le_pkts;
914 
915 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
916 
917 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
918 }
919 
920 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
921 {
922 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
923 
924 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
925 
926 	hci_dev_lock(hdev);
927 
928 	if (test_bit(HCI_MGMT, &hdev->flags))
929 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
930 								rp->status);
931 
932 	hci_dev_unlock(hdev);
933 }
934 
935 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
936 							struct sk_buff *skb)
937 {
938 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
939 
940 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
941 
942 	hci_dev_lock(hdev);
943 
944 	if (test_bit(HCI_MGMT, &hdev->flags))
945 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
946 								rp->status);
947 
948 	hci_dev_unlock(hdev);
949 }
950 
951 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
952 {
953 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
954 
955 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
956 
957 	hci_dev_lock(hdev);
958 
959 	if (test_bit(HCI_MGMT, &hdev->flags))
960 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
961 								rp->status);
962 
963 	hci_dev_unlock(hdev);
964 }
965 
966 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
967 							struct sk_buff *skb)
968 {
969 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
970 
971 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
972 
973 	hci_dev_lock(hdev);
974 
975 	if (test_bit(HCI_MGMT, &hdev->flags))
976 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
977 								rp->status);
978 
979 	hci_dev_unlock(hdev);
980 }
981 
982 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
983 							struct sk_buff *skb)
984 {
985 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
986 
987 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
988 
989 	hci_dev_lock(hdev);
990 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
991 						rp->randomizer, rp->status);
992 	hci_dev_unlock(hdev);
993 }
994 
995 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
996 {
997 	__u8 status = *((__u8 *) skb->data);
998 
999 	BT_DBG("%s status 0x%x", hdev->name, status);
1000 }
1001 
1002 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1003 					struct sk_buff *skb)
1004 {
1005 	struct hci_cp_le_set_scan_enable *cp;
1006 	__u8 status = *((__u8 *) skb->data);
1007 
1008 	BT_DBG("%s status 0x%x", hdev->name, status);
1009 
1010 	if (status)
1011 		return;
1012 
1013 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1014 	if (!cp)
1015 		return;
1016 
1017 	if (cp->enable == 0x01) {
1018 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1019 
1020 		del_timer(&hdev->adv_timer);
1021 
1022 		hci_dev_lock(hdev);
1023 		hci_adv_entries_clear(hdev);
1024 		hci_dev_unlock(hdev);
1025 	} else if (cp->enable == 0x00) {
1026 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1027 
1028 		mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
1029 	}
1030 }
1031 
1032 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1033 {
1034 	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1035 
1036 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1037 
1038 	if (rp->status)
1039 		return;
1040 
1041 	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1042 }
1043 
1044 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1045 {
1046 	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1047 
1048 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1049 
1050 	if (rp->status)
1051 		return;
1052 
1053 	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1054 }
1055 
1056 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1057 							struct sk_buff *skb)
1058 {
1059 	struct hci_cp_read_local_ext_features cp;
1060 	__u8 status = *((__u8 *) skb->data);
1061 
1062 	BT_DBG("%s status 0x%x", hdev->name, status);
1063 
1064 	if (status)
1065 		return;
1066 
1067 	cp.page = 0x01;
1068 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1069 }
1070 
1071 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1072 {
1073 	BT_DBG("%s status 0x%x", hdev->name, status);
1074 
1075 	if (status) {
1076 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1077 		hci_conn_check_pending(hdev);
1078 		hci_dev_lock(hdev);
1079 		if (test_bit(HCI_MGMT, &hdev->flags))
1080 			mgmt_start_discovery_failed(hdev, status);
1081 		hci_dev_unlock(hdev);
1082 		return;
1083 	}
1084 
1085 	set_bit(HCI_INQUIRY, &hdev->flags);
1086 
1087 	hci_dev_lock(hdev);
1088 	mgmt_discovering(hdev, 1);
1089 	hci_dev_unlock(hdev);
1090 }
1091 
1092 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1093 {
1094 	struct hci_cp_create_conn *cp;
1095 	struct hci_conn *conn;
1096 
1097 	BT_DBG("%s status 0x%x", hdev->name, status);
1098 
1099 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1100 	if (!cp)
1101 		return;
1102 
1103 	hci_dev_lock(hdev);
1104 
1105 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1106 
1107 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1108 
1109 	if (status) {
1110 		if (conn && conn->state == BT_CONNECT) {
1111 			if (status != 0x0c || conn->attempt > 2) {
1112 				conn->state = BT_CLOSED;
1113 				hci_proto_connect_cfm(conn, status);
1114 				hci_conn_del(conn);
1115 			} else
1116 				conn->state = BT_CONNECT2;
1117 		}
1118 	} else {
1119 		if (!conn) {
1120 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1121 			if (conn) {
1122 				conn->out = 1;
1123 				conn->link_mode |= HCI_LM_MASTER;
1124 			} else
1125 				BT_ERR("No memory for new connection");
1126 		}
1127 	}
1128 
1129 	hci_dev_unlock(hdev);
1130 }
1131 
1132 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1133 {
1134 	struct hci_cp_add_sco *cp;
1135 	struct hci_conn *acl, *sco;
1136 	__u16 handle;
1137 
1138 	BT_DBG("%s status 0x%x", hdev->name, status);
1139 
1140 	if (!status)
1141 		return;
1142 
1143 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1144 	if (!cp)
1145 		return;
1146 
1147 	handle = __le16_to_cpu(cp->handle);
1148 
1149 	BT_DBG("%s handle %d", hdev->name, handle);
1150 
1151 	hci_dev_lock(hdev);
1152 
1153 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1154 	if (acl) {
1155 		sco = acl->link;
1156 		if (sco) {
1157 			sco->state = BT_CLOSED;
1158 
1159 			hci_proto_connect_cfm(sco, status);
1160 			hci_conn_del(sco);
1161 		}
1162 	}
1163 
1164 	hci_dev_unlock(hdev);
1165 }
1166 
1167 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1168 {
1169 	struct hci_cp_auth_requested *cp;
1170 	struct hci_conn *conn;
1171 
1172 	BT_DBG("%s status 0x%x", hdev->name, status);
1173 
1174 	if (!status)
1175 		return;
1176 
1177 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1178 	if (!cp)
1179 		return;
1180 
1181 	hci_dev_lock(hdev);
1182 
1183 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1184 	if (conn) {
1185 		if (conn->state == BT_CONFIG) {
1186 			hci_proto_connect_cfm(conn, status);
1187 			hci_conn_put(conn);
1188 		}
1189 	}
1190 
1191 	hci_dev_unlock(hdev);
1192 }
1193 
1194 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1195 {
1196 	struct hci_cp_set_conn_encrypt *cp;
1197 	struct hci_conn *conn;
1198 
1199 	BT_DBG("%s status 0x%x", hdev->name, status);
1200 
1201 	if (!status)
1202 		return;
1203 
1204 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1205 	if (!cp)
1206 		return;
1207 
1208 	hci_dev_lock(hdev);
1209 
1210 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1211 	if (conn) {
1212 		if (conn->state == BT_CONFIG) {
1213 			hci_proto_connect_cfm(conn, status);
1214 			hci_conn_put(conn);
1215 		}
1216 	}
1217 
1218 	hci_dev_unlock(hdev);
1219 }
1220 
1221 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1222 							struct hci_conn *conn)
1223 {
1224 	if (conn->state != BT_CONFIG || !conn->out)
1225 		return 0;
1226 
1227 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1228 		return 0;
1229 
1230 	/* Only request authentication for SSP connections or non-SSP
1231 	 * devices with sec_level HIGH or if MITM protection is requested */
1232 	if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1233 				conn->pending_sec_level != BT_SECURITY_HIGH &&
1234 				!(conn->auth_type & 0x01))
1235 		return 0;
1236 
1237 	return 1;
1238 }
1239 
1240 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1241 {
1242 	struct hci_cp_remote_name_req *cp;
1243 	struct hci_conn *conn;
1244 
1245 	BT_DBG("%s status 0x%x", hdev->name, status);
1246 
1247 	/* If successful wait for the name req complete event before
1248 	 * checking for the need to do authentication */
1249 	if (!status)
1250 		return;
1251 
1252 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1253 	if (!cp)
1254 		return;
1255 
1256 	hci_dev_lock(hdev);
1257 
1258 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1259 	if (!conn)
1260 		goto unlock;
1261 
1262 	if (!hci_outgoing_auth_needed(hdev, conn))
1263 		goto unlock;
1264 
1265 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1266 		struct hci_cp_auth_requested cp;
1267 		cp.handle = __cpu_to_le16(conn->handle);
1268 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1269 	}
1270 
1271 unlock:
1272 	hci_dev_unlock(hdev);
1273 }
1274 
1275 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1276 {
1277 	struct hci_cp_read_remote_features *cp;
1278 	struct hci_conn *conn;
1279 
1280 	BT_DBG("%s status 0x%x", hdev->name, status);
1281 
1282 	if (!status)
1283 		return;
1284 
1285 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1286 	if (!cp)
1287 		return;
1288 
1289 	hci_dev_lock(hdev);
1290 
1291 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1292 	if (conn) {
1293 		if (conn->state == BT_CONFIG) {
1294 			hci_proto_connect_cfm(conn, status);
1295 			hci_conn_put(conn);
1296 		}
1297 	}
1298 
1299 	hci_dev_unlock(hdev);
1300 }
1301 
1302 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1303 {
1304 	struct hci_cp_read_remote_ext_features *cp;
1305 	struct hci_conn *conn;
1306 
1307 	BT_DBG("%s status 0x%x", hdev->name, status);
1308 
1309 	if (!status)
1310 		return;
1311 
1312 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1313 	if (!cp)
1314 		return;
1315 
1316 	hci_dev_lock(hdev);
1317 
1318 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1319 	if (conn) {
1320 		if (conn->state == BT_CONFIG) {
1321 			hci_proto_connect_cfm(conn, status);
1322 			hci_conn_put(conn);
1323 		}
1324 	}
1325 
1326 	hci_dev_unlock(hdev);
1327 }
1328 
1329 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1330 {
1331 	struct hci_cp_setup_sync_conn *cp;
1332 	struct hci_conn *acl, *sco;
1333 	__u16 handle;
1334 
1335 	BT_DBG("%s status 0x%x", hdev->name, status);
1336 
1337 	if (!status)
1338 		return;
1339 
1340 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1341 	if (!cp)
1342 		return;
1343 
1344 	handle = __le16_to_cpu(cp->handle);
1345 
1346 	BT_DBG("%s handle %d", hdev->name, handle);
1347 
1348 	hci_dev_lock(hdev);
1349 
1350 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1351 	if (acl) {
1352 		sco = acl->link;
1353 		if (sco) {
1354 			sco->state = BT_CLOSED;
1355 
1356 			hci_proto_connect_cfm(sco, status);
1357 			hci_conn_del(sco);
1358 		}
1359 	}
1360 
1361 	hci_dev_unlock(hdev);
1362 }
1363 
1364 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1365 {
1366 	struct hci_cp_sniff_mode *cp;
1367 	struct hci_conn *conn;
1368 
1369 	BT_DBG("%s status 0x%x", hdev->name, status);
1370 
1371 	if (!status)
1372 		return;
1373 
1374 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1375 	if (!cp)
1376 		return;
1377 
1378 	hci_dev_lock(hdev);
1379 
1380 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1381 	if (conn) {
1382 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1383 
1384 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1385 			hci_sco_setup(conn, status);
1386 	}
1387 
1388 	hci_dev_unlock(hdev);
1389 }
1390 
1391 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1392 {
1393 	struct hci_cp_exit_sniff_mode *cp;
1394 	struct hci_conn *conn;
1395 
1396 	BT_DBG("%s status 0x%x", hdev->name, status);
1397 
1398 	if (!status)
1399 		return;
1400 
1401 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1402 	if (!cp)
1403 		return;
1404 
1405 	hci_dev_lock(hdev);
1406 
1407 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1408 	if (conn) {
1409 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1410 
1411 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1412 			hci_sco_setup(conn, status);
1413 	}
1414 
1415 	hci_dev_unlock(hdev);
1416 }
1417 
1418 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1419 {
1420 	struct hci_cp_le_create_conn *cp;
1421 	struct hci_conn *conn;
1422 
1423 	BT_DBG("%s status 0x%x", hdev->name, status);
1424 
1425 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1426 	if (!cp)
1427 		return;
1428 
1429 	hci_dev_lock(hdev);
1430 
1431 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1432 
1433 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1434 		conn);
1435 
1436 	if (status) {
1437 		if (conn && conn->state == BT_CONNECT) {
1438 			conn->state = BT_CLOSED;
1439 			hci_proto_connect_cfm(conn, status);
1440 			hci_conn_del(conn);
1441 		}
1442 	} else {
1443 		if (!conn) {
1444 			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1445 			if (conn) {
1446 				conn->dst_type = cp->peer_addr_type;
1447 				conn->out = 1;
1448 			} else {
1449 				BT_ERR("No memory for new connection");
1450 			}
1451 		}
1452 	}
1453 
1454 	hci_dev_unlock(hdev);
1455 }
1456 
1457 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1458 {
1459 	BT_DBG("%s status 0x%x", hdev->name, status);
1460 }
1461 
1462 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1463 {
1464 	__u8 status = *((__u8 *) skb->data);
1465 
1466 	BT_DBG("%s status %d", hdev->name, status);
1467 
1468 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1469 
1470 	hci_conn_check_pending(hdev);
1471 
1472 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1473 		return;
1474 
1475 	hci_dev_lock(hdev);
1476 	mgmt_discovering(hdev, 0);
1477 	hci_dev_unlock(hdev);
1478 }
1479 
1480 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1481 {
1482 	struct inquiry_data data;
1483 	struct inquiry_info *info = (void *) (skb->data + 1);
1484 	int num_rsp = *((__u8 *) skb->data);
1485 
1486 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1487 
1488 	if (!num_rsp)
1489 		return;
1490 
1491 	hci_dev_lock(hdev);
1492 
1493 	for (; num_rsp; num_rsp--, info++) {
1494 		bacpy(&data.bdaddr, &info->bdaddr);
1495 		data.pscan_rep_mode	= info->pscan_rep_mode;
1496 		data.pscan_period_mode	= info->pscan_period_mode;
1497 		data.pscan_mode		= info->pscan_mode;
1498 		memcpy(data.dev_class, info->dev_class, 3);
1499 		data.clock_offset	= info->clock_offset;
1500 		data.rssi		= 0x00;
1501 		data.ssp_mode		= 0x00;
1502 		hci_inquiry_cache_update(hdev, &data);
1503 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1504 						info->dev_class, 0, NULL);
1505 	}
1506 
1507 	hci_dev_unlock(hdev);
1508 }
1509 
1510 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1511 {
1512 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1513 	struct hci_conn *conn;
1514 
1515 	BT_DBG("%s", hdev->name);
1516 
1517 	hci_dev_lock(hdev);
1518 
1519 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1520 	if (!conn) {
1521 		if (ev->link_type != SCO_LINK)
1522 			goto unlock;
1523 
1524 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1525 		if (!conn)
1526 			goto unlock;
1527 
1528 		conn->type = SCO_LINK;
1529 	}
1530 
1531 	if (!ev->status) {
1532 		conn->handle = __le16_to_cpu(ev->handle);
1533 
1534 		if (conn->type == ACL_LINK) {
1535 			conn->state = BT_CONFIG;
1536 			hci_conn_hold(conn);
1537 			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1538 			mgmt_connected(hdev, &ev->bdaddr, conn->type,
1539 							conn->dst_type);
1540 		} else
1541 			conn->state = BT_CONNECTED;
1542 
1543 		hci_conn_hold_device(conn);
1544 		hci_conn_add_sysfs(conn);
1545 
1546 		if (test_bit(HCI_AUTH, &hdev->flags))
1547 			conn->link_mode |= HCI_LM_AUTH;
1548 
1549 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1550 			conn->link_mode |= HCI_LM_ENCRYPT;
1551 
1552 		/* Get remote features */
1553 		if (conn->type == ACL_LINK) {
1554 			struct hci_cp_read_remote_features cp;
1555 			cp.handle = ev->handle;
1556 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1557 							sizeof(cp), &cp);
1558 		}
1559 
1560 		/* Set packet type for incoming connection */
1561 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1562 			struct hci_cp_change_conn_ptype cp;
1563 			cp.handle = ev->handle;
1564 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1565 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1566 							sizeof(cp), &cp);
1567 		}
1568 	} else {
1569 		conn->state = BT_CLOSED;
1570 		if (conn->type == ACL_LINK)
1571 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1572 						conn->dst_type, ev->status);
1573 	}
1574 
1575 	if (conn->type == ACL_LINK)
1576 		hci_sco_setup(conn, ev->status);
1577 
1578 	if (ev->status) {
1579 		hci_proto_connect_cfm(conn, ev->status);
1580 		hci_conn_del(conn);
1581 	} else if (ev->link_type != ACL_LINK)
1582 		hci_proto_connect_cfm(conn, ev->status);
1583 
1584 unlock:
1585 	hci_dev_unlock(hdev);
1586 
1587 	hci_conn_check_pending(hdev);
1588 }
1589 
1590 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1591 {
1592 	struct hci_ev_conn_request *ev = (void *) skb->data;
1593 	int mask = hdev->link_mode;
1594 
1595 	BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1596 					batostr(&ev->bdaddr), ev->link_type);
1597 
1598 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1599 
1600 	if ((mask & HCI_LM_ACCEPT) &&
1601 			!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1602 		/* Connection accepted */
1603 		struct inquiry_entry *ie;
1604 		struct hci_conn *conn;
1605 
1606 		hci_dev_lock(hdev);
1607 
1608 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1609 		if (ie)
1610 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1611 
1612 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1613 		if (!conn) {
1614 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1615 			if (!conn) {
1616 				BT_ERR("No memory for new connection");
1617 				hci_dev_unlock(hdev);
1618 				return;
1619 			}
1620 		}
1621 
1622 		memcpy(conn->dev_class, ev->dev_class, 3);
1623 		conn->state = BT_CONNECT;
1624 
1625 		hci_dev_unlock(hdev);
1626 
1627 		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1628 			struct hci_cp_accept_conn_req cp;
1629 
1630 			bacpy(&cp.bdaddr, &ev->bdaddr);
1631 
1632 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1633 				cp.role = 0x00; /* Become master */
1634 			else
1635 				cp.role = 0x01; /* Remain slave */
1636 
1637 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1638 							sizeof(cp), &cp);
1639 		} else {
1640 			struct hci_cp_accept_sync_conn_req cp;
1641 
1642 			bacpy(&cp.bdaddr, &ev->bdaddr);
1643 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1644 
1645 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1646 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1647 			cp.max_latency    = cpu_to_le16(0xffff);
1648 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1649 			cp.retrans_effort = 0xff;
1650 
1651 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1652 							sizeof(cp), &cp);
1653 		}
1654 	} else {
1655 		/* Connection rejected */
1656 		struct hci_cp_reject_conn_req cp;
1657 
1658 		bacpy(&cp.bdaddr, &ev->bdaddr);
1659 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1660 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1661 	}
1662 }
1663 
1664 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1665 {
1666 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1667 	struct hci_conn *conn;
1668 
1669 	BT_DBG("%s status %d", hdev->name, ev->status);
1670 
1671 	hci_dev_lock(hdev);
1672 
1673 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1674 	if (!conn)
1675 		goto unlock;
1676 
1677 	if (ev->status == 0)
1678 		conn->state = BT_CLOSED;
1679 
1680 	if (conn->type == ACL_LINK || conn->type == LE_LINK) {
1681 		if (ev->status != 0)
1682 			mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
1683 		else
1684 			mgmt_disconnected(hdev, &conn->dst, conn->type,
1685 							conn->dst_type);
1686 	}
1687 
1688 	if (ev->status == 0) {
1689 		hci_proto_disconn_cfm(conn, ev->reason);
1690 		hci_conn_del(conn);
1691 	}
1692 
1693 unlock:
1694 	hci_dev_unlock(hdev);
1695 }
1696 
1697 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1698 {
1699 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1700 	struct hci_conn *conn;
1701 
1702 	BT_DBG("%s status %d", hdev->name, ev->status);
1703 
1704 	hci_dev_lock(hdev);
1705 
1706 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1707 	if (!conn)
1708 		goto unlock;
1709 
1710 	if (!ev->status) {
1711 		if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1712 				test_bit(HCI_CONN_REAUTH_PEND,	&conn->pend)) {
1713 			BT_INFO("re-auth of legacy device is not possible.");
1714 		} else {
1715 			conn->link_mode |= HCI_LM_AUTH;
1716 			conn->sec_level = conn->pending_sec_level;
1717 		}
1718 	} else {
1719 		mgmt_auth_failed(hdev, &conn->dst, ev->status);
1720 	}
1721 
1722 	clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1723 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1724 
1725 	if (conn->state == BT_CONFIG) {
1726 		if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1727 			struct hci_cp_set_conn_encrypt cp;
1728 			cp.handle  = ev->handle;
1729 			cp.encrypt = 0x01;
1730 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1731 									&cp);
1732 		} else {
1733 			conn->state = BT_CONNECTED;
1734 			hci_proto_connect_cfm(conn, ev->status);
1735 			hci_conn_put(conn);
1736 		}
1737 	} else {
1738 		hci_auth_cfm(conn, ev->status);
1739 
1740 		hci_conn_hold(conn);
1741 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1742 		hci_conn_put(conn);
1743 	}
1744 
1745 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1746 		if (!ev->status) {
1747 			struct hci_cp_set_conn_encrypt cp;
1748 			cp.handle  = ev->handle;
1749 			cp.encrypt = 0x01;
1750 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1751 									&cp);
1752 		} else {
1753 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1754 			hci_encrypt_cfm(conn, ev->status, 0x00);
1755 		}
1756 	}
1757 
1758 unlock:
1759 	hci_dev_unlock(hdev);
1760 }
1761 
1762 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1763 {
1764 	struct hci_ev_remote_name *ev = (void *) skb->data;
1765 	struct hci_conn *conn;
1766 
1767 	BT_DBG("%s", hdev->name);
1768 
1769 	hci_conn_check_pending(hdev);
1770 
1771 	hci_dev_lock(hdev);
1772 
1773 	if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1774 		mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
1775 
1776 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1777 	if (!conn)
1778 		goto unlock;
1779 
1780 	if (!hci_outgoing_auth_needed(hdev, conn))
1781 		goto unlock;
1782 
1783 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1784 		struct hci_cp_auth_requested cp;
1785 		cp.handle = __cpu_to_le16(conn->handle);
1786 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1787 	}
1788 
1789 unlock:
1790 	hci_dev_unlock(hdev);
1791 }
1792 
1793 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1794 {
1795 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
1796 	struct hci_conn *conn;
1797 
1798 	BT_DBG("%s status %d", hdev->name, ev->status);
1799 
1800 	hci_dev_lock(hdev);
1801 
1802 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1803 	if (conn) {
1804 		if (!ev->status) {
1805 			if (ev->encrypt) {
1806 				/* Encryption implies authentication */
1807 				conn->link_mode |= HCI_LM_AUTH;
1808 				conn->link_mode |= HCI_LM_ENCRYPT;
1809 				conn->sec_level = conn->pending_sec_level;
1810 			} else
1811 				conn->link_mode &= ~HCI_LM_ENCRYPT;
1812 		}
1813 
1814 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1815 
1816 		if (conn->state == BT_CONFIG) {
1817 			if (!ev->status)
1818 				conn->state = BT_CONNECTED;
1819 
1820 			hci_proto_connect_cfm(conn, ev->status);
1821 			hci_conn_put(conn);
1822 		} else
1823 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1824 	}
1825 
1826 	hci_dev_unlock(hdev);
1827 }
1828 
1829 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1830 {
1831 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1832 	struct hci_conn *conn;
1833 
1834 	BT_DBG("%s status %d", hdev->name, ev->status);
1835 
1836 	hci_dev_lock(hdev);
1837 
1838 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1839 	if (conn) {
1840 		if (!ev->status)
1841 			conn->link_mode |= HCI_LM_SECURE;
1842 
1843 		clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1844 
1845 		hci_key_change_cfm(conn, ev->status);
1846 	}
1847 
1848 	hci_dev_unlock(hdev);
1849 }
1850 
1851 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1852 {
1853 	struct hci_ev_remote_features *ev = (void *) skb->data;
1854 	struct hci_conn *conn;
1855 
1856 	BT_DBG("%s status %d", hdev->name, ev->status);
1857 
1858 	hci_dev_lock(hdev);
1859 
1860 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1861 	if (!conn)
1862 		goto unlock;
1863 
1864 	if (!ev->status)
1865 		memcpy(conn->features, ev->features, 8);
1866 
1867 	if (conn->state != BT_CONFIG)
1868 		goto unlock;
1869 
1870 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1871 		struct hci_cp_read_remote_ext_features cp;
1872 		cp.handle = ev->handle;
1873 		cp.page = 0x01;
1874 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1875 							sizeof(cp), &cp);
1876 		goto unlock;
1877 	}
1878 
1879 	if (!ev->status) {
1880 		struct hci_cp_remote_name_req cp;
1881 		memset(&cp, 0, sizeof(cp));
1882 		bacpy(&cp.bdaddr, &conn->dst);
1883 		cp.pscan_rep_mode = 0x02;
1884 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1885 	}
1886 
1887 	if (!hci_outgoing_auth_needed(hdev, conn)) {
1888 		conn->state = BT_CONNECTED;
1889 		hci_proto_connect_cfm(conn, ev->status);
1890 		hci_conn_put(conn);
1891 	}
1892 
1893 unlock:
1894 	hci_dev_unlock(hdev);
1895 }
1896 
1897 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1898 {
1899 	BT_DBG("%s", hdev->name);
1900 }
1901 
1902 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1903 {
1904 	BT_DBG("%s", hdev->name);
1905 }
1906 
1907 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1908 {
1909 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
1910 	__u16 opcode;
1911 
1912 	skb_pull(skb, sizeof(*ev));
1913 
1914 	opcode = __le16_to_cpu(ev->opcode);
1915 
1916 	switch (opcode) {
1917 	case HCI_OP_INQUIRY_CANCEL:
1918 		hci_cc_inquiry_cancel(hdev, skb);
1919 		break;
1920 
1921 	case HCI_OP_EXIT_PERIODIC_INQ:
1922 		hci_cc_exit_periodic_inq(hdev, skb);
1923 		break;
1924 
1925 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1926 		hci_cc_remote_name_req_cancel(hdev, skb);
1927 		break;
1928 
1929 	case HCI_OP_ROLE_DISCOVERY:
1930 		hci_cc_role_discovery(hdev, skb);
1931 		break;
1932 
1933 	case HCI_OP_READ_LINK_POLICY:
1934 		hci_cc_read_link_policy(hdev, skb);
1935 		break;
1936 
1937 	case HCI_OP_WRITE_LINK_POLICY:
1938 		hci_cc_write_link_policy(hdev, skb);
1939 		break;
1940 
1941 	case HCI_OP_READ_DEF_LINK_POLICY:
1942 		hci_cc_read_def_link_policy(hdev, skb);
1943 		break;
1944 
1945 	case HCI_OP_WRITE_DEF_LINK_POLICY:
1946 		hci_cc_write_def_link_policy(hdev, skb);
1947 		break;
1948 
1949 	case HCI_OP_RESET:
1950 		hci_cc_reset(hdev, skb);
1951 		break;
1952 
1953 	case HCI_OP_WRITE_LOCAL_NAME:
1954 		hci_cc_write_local_name(hdev, skb);
1955 		break;
1956 
1957 	case HCI_OP_READ_LOCAL_NAME:
1958 		hci_cc_read_local_name(hdev, skb);
1959 		break;
1960 
1961 	case HCI_OP_WRITE_AUTH_ENABLE:
1962 		hci_cc_write_auth_enable(hdev, skb);
1963 		break;
1964 
1965 	case HCI_OP_WRITE_ENCRYPT_MODE:
1966 		hci_cc_write_encrypt_mode(hdev, skb);
1967 		break;
1968 
1969 	case HCI_OP_WRITE_SCAN_ENABLE:
1970 		hci_cc_write_scan_enable(hdev, skb);
1971 		break;
1972 
1973 	case HCI_OP_READ_CLASS_OF_DEV:
1974 		hci_cc_read_class_of_dev(hdev, skb);
1975 		break;
1976 
1977 	case HCI_OP_WRITE_CLASS_OF_DEV:
1978 		hci_cc_write_class_of_dev(hdev, skb);
1979 		break;
1980 
1981 	case HCI_OP_READ_VOICE_SETTING:
1982 		hci_cc_read_voice_setting(hdev, skb);
1983 		break;
1984 
1985 	case HCI_OP_WRITE_VOICE_SETTING:
1986 		hci_cc_write_voice_setting(hdev, skb);
1987 		break;
1988 
1989 	case HCI_OP_HOST_BUFFER_SIZE:
1990 		hci_cc_host_buffer_size(hdev, skb);
1991 		break;
1992 
1993 	case HCI_OP_READ_SSP_MODE:
1994 		hci_cc_read_ssp_mode(hdev, skb);
1995 		break;
1996 
1997 	case HCI_OP_WRITE_SSP_MODE:
1998 		hci_cc_write_ssp_mode(hdev, skb);
1999 		break;
2000 
2001 	case HCI_OP_READ_LOCAL_VERSION:
2002 		hci_cc_read_local_version(hdev, skb);
2003 		break;
2004 
2005 	case HCI_OP_READ_LOCAL_COMMANDS:
2006 		hci_cc_read_local_commands(hdev, skb);
2007 		break;
2008 
2009 	case HCI_OP_READ_LOCAL_FEATURES:
2010 		hci_cc_read_local_features(hdev, skb);
2011 		break;
2012 
2013 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2014 		hci_cc_read_local_ext_features(hdev, skb);
2015 		break;
2016 
2017 	case HCI_OP_READ_BUFFER_SIZE:
2018 		hci_cc_read_buffer_size(hdev, skb);
2019 		break;
2020 
2021 	case HCI_OP_READ_BD_ADDR:
2022 		hci_cc_read_bd_addr(hdev, skb);
2023 		break;
2024 
2025 	case HCI_OP_WRITE_CA_TIMEOUT:
2026 		hci_cc_write_ca_timeout(hdev, skb);
2027 		break;
2028 
2029 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2030 		hci_cc_read_flow_control_mode(hdev, skb);
2031 		break;
2032 
2033 	case HCI_OP_READ_LOCAL_AMP_INFO:
2034 		hci_cc_read_local_amp_info(hdev, skb);
2035 		break;
2036 
2037 	case HCI_OP_DELETE_STORED_LINK_KEY:
2038 		hci_cc_delete_stored_link_key(hdev, skb);
2039 		break;
2040 
2041 	case HCI_OP_SET_EVENT_MASK:
2042 		hci_cc_set_event_mask(hdev, skb);
2043 		break;
2044 
2045 	case HCI_OP_WRITE_INQUIRY_MODE:
2046 		hci_cc_write_inquiry_mode(hdev, skb);
2047 		break;
2048 
2049 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2050 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2051 		break;
2052 
2053 	case HCI_OP_SET_EVENT_FLT:
2054 		hci_cc_set_event_flt(hdev, skb);
2055 		break;
2056 
2057 	case HCI_OP_PIN_CODE_REPLY:
2058 		hci_cc_pin_code_reply(hdev, skb);
2059 		break;
2060 
2061 	case HCI_OP_PIN_CODE_NEG_REPLY:
2062 		hci_cc_pin_code_neg_reply(hdev, skb);
2063 		break;
2064 
2065 	case HCI_OP_READ_LOCAL_OOB_DATA:
2066 		hci_cc_read_local_oob_data_reply(hdev, skb);
2067 		break;
2068 
2069 	case HCI_OP_LE_READ_BUFFER_SIZE:
2070 		hci_cc_le_read_buffer_size(hdev, skb);
2071 		break;
2072 
2073 	case HCI_OP_USER_CONFIRM_REPLY:
2074 		hci_cc_user_confirm_reply(hdev, skb);
2075 		break;
2076 
2077 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2078 		hci_cc_user_confirm_neg_reply(hdev, skb);
2079 		break;
2080 
2081 	case HCI_OP_USER_PASSKEY_REPLY:
2082 		hci_cc_user_passkey_reply(hdev, skb);
2083 		break;
2084 
2085 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2086 		hci_cc_user_passkey_neg_reply(hdev, skb);
2087 
2088 	case HCI_OP_LE_SET_SCAN_PARAM:
2089 		hci_cc_le_set_scan_param(hdev, skb);
2090 		break;
2091 
2092 	case HCI_OP_LE_SET_SCAN_ENABLE:
2093 		hci_cc_le_set_scan_enable(hdev, skb);
2094 		break;
2095 
2096 	case HCI_OP_LE_LTK_REPLY:
2097 		hci_cc_le_ltk_reply(hdev, skb);
2098 		break;
2099 
2100 	case HCI_OP_LE_LTK_NEG_REPLY:
2101 		hci_cc_le_ltk_neg_reply(hdev, skb);
2102 		break;
2103 
2104 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2105 		hci_cc_write_le_host_supported(hdev, skb);
2106 		break;
2107 
2108 	default:
2109 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2110 		break;
2111 	}
2112 
2113 	if (ev->opcode != HCI_OP_NOP)
2114 		del_timer(&hdev->cmd_timer);
2115 
2116 	if (ev->ncmd) {
2117 		atomic_set(&hdev->cmd_cnt, 1);
2118 		if (!skb_queue_empty(&hdev->cmd_q))
2119 			tasklet_schedule(&hdev->cmd_task);
2120 	}
2121 }
2122 
2123 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2124 {
2125 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2126 	__u16 opcode;
2127 
2128 	skb_pull(skb, sizeof(*ev));
2129 
2130 	opcode = __le16_to_cpu(ev->opcode);
2131 
2132 	switch (opcode) {
2133 	case HCI_OP_INQUIRY:
2134 		hci_cs_inquiry(hdev, ev->status);
2135 		break;
2136 
2137 	case HCI_OP_CREATE_CONN:
2138 		hci_cs_create_conn(hdev, ev->status);
2139 		break;
2140 
2141 	case HCI_OP_ADD_SCO:
2142 		hci_cs_add_sco(hdev, ev->status);
2143 		break;
2144 
2145 	case HCI_OP_AUTH_REQUESTED:
2146 		hci_cs_auth_requested(hdev, ev->status);
2147 		break;
2148 
2149 	case HCI_OP_SET_CONN_ENCRYPT:
2150 		hci_cs_set_conn_encrypt(hdev, ev->status);
2151 		break;
2152 
2153 	case HCI_OP_REMOTE_NAME_REQ:
2154 		hci_cs_remote_name_req(hdev, ev->status);
2155 		break;
2156 
2157 	case HCI_OP_READ_REMOTE_FEATURES:
2158 		hci_cs_read_remote_features(hdev, ev->status);
2159 		break;
2160 
2161 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2162 		hci_cs_read_remote_ext_features(hdev, ev->status);
2163 		break;
2164 
2165 	case HCI_OP_SETUP_SYNC_CONN:
2166 		hci_cs_setup_sync_conn(hdev, ev->status);
2167 		break;
2168 
2169 	case HCI_OP_SNIFF_MODE:
2170 		hci_cs_sniff_mode(hdev, ev->status);
2171 		break;
2172 
2173 	case HCI_OP_EXIT_SNIFF_MODE:
2174 		hci_cs_exit_sniff_mode(hdev, ev->status);
2175 		break;
2176 
2177 	case HCI_OP_DISCONNECT:
2178 		if (ev->status != 0)
2179 			mgmt_disconnect_failed(hdev, NULL, ev->status);
2180 		break;
2181 
2182 	case HCI_OP_LE_CREATE_CONN:
2183 		hci_cs_le_create_conn(hdev, ev->status);
2184 		break;
2185 
2186 	case HCI_OP_LE_START_ENC:
2187 		hci_cs_le_start_enc(hdev, ev->status);
2188 		break;
2189 
2190 	default:
2191 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2192 		break;
2193 	}
2194 
2195 	if (ev->opcode != HCI_OP_NOP)
2196 		del_timer(&hdev->cmd_timer);
2197 
2198 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2199 		atomic_set(&hdev->cmd_cnt, 1);
2200 		if (!skb_queue_empty(&hdev->cmd_q))
2201 			tasklet_schedule(&hdev->cmd_task);
2202 	}
2203 }
2204 
2205 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2206 {
2207 	struct hci_ev_role_change *ev = (void *) skb->data;
2208 	struct hci_conn *conn;
2209 
2210 	BT_DBG("%s status %d", hdev->name, ev->status);
2211 
2212 	hci_dev_lock(hdev);
2213 
2214 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2215 	if (conn) {
2216 		if (!ev->status) {
2217 			if (ev->role)
2218 				conn->link_mode &= ~HCI_LM_MASTER;
2219 			else
2220 				conn->link_mode |= HCI_LM_MASTER;
2221 		}
2222 
2223 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
2224 
2225 		hci_role_switch_cfm(conn, ev->status, ev->role);
2226 	}
2227 
2228 	hci_dev_unlock(hdev);
2229 }
2230 
2231 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2232 {
2233 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2234 	__le16 *ptr;
2235 	int i;
2236 
2237 	skb_pull(skb, sizeof(*ev));
2238 
2239 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2240 
2241 	if (skb->len < ev->num_hndl * 4) {
2242 		BT_DBG("%s bad parameters", hdev->name);
2243 		return;
2244 	}
2245 
2246 	tasklet_disable(&hdev->tx_task);
2247 
2248 	for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
2249 		struct hci_conn *conn;
2250 		__u16  handle, count;
2251 
2252 		handle = get_unaligned_le16(ptr++);
2253 		count  = get_unaligned_le16(ptr++);
2254 
2255 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2256 		if (conn) {
2257 			conn->sent -= count;
2258 
2259 			if (conn->type == ACL_LINK) {
2260 				hdev->acl_cnt += count;
2261 				if (hdev->acl_cnt > hdev->acl_pkts)
2262 					hdev->acl_cnt = hdev->acl_pkts;
2263 			} else if (conn->type == LE_LINK) {
2264 				if (hdev->le_pkts) {
2265 					hdev->le_cnt += count;
2266 					if (hdev->le_cnt > hdev->le_pkts)
2267 						hdev->le_cnt = hdev->le_pkts;
2268 				} else {
2269 					hdev->acl_cnt += count;
2270 					if (hdev->acl_cnt > hdev->acl_pkts)
2271 						hdev->acl_cnt = hdev->acl_pkts;
2272 				}
2273 			} else {
2274 				hdev->sco_cnt += count;
2275 				if (hdev->sco_cnt > hdev->sco_pkts)
2276 					hdev->sco_cnt = hdev->sco_pkts;
2277 			}
2278 		}
2279 	}
2280 
2281 	tasklet_schedule(&hdev->tx_task);
2282 
2283 	tasklet_enable(&hdev->tx_task);
2284 }
2285 
2286 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2287 {
2288 	struct hci_ev_mode_change *ev = (void *) skb->data;
2289 	struct hci_conn *conn;
2290 
2291 	BT_DBG("%s status %d", hdev->name, ev->status);
2292 
2293 	hci_dev_lock(hdev);
2294 
2295 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2296 	if (conn) {
2297 		conn->mode = ev->mode;
2298 		conn->interval = __le16_to_cpu(ev->interval);
2299 
2300 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
2301 			if (conn->mode == HCI_CM_ACTIVE)
2302 				conn->power_save = 1;
2303 			else
2304 				conn->power_save = 0;
2305 		}
2306 
2307 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
2308 			hci_sco_setup(conn, ev->status);
2309 	}
2310 
2311 	hci_dev_unlock(hdev);
2312 }
2313 
2314 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2315 {
2316 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2317 	struct hci_conn *conn;
2318 
2319 	BT_DBG("%s", hdev->name);
2320 
2321 	hci_dev_lock(hdev);
2322 
2323 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2324 	if (!conn)
2325 		goto unlock;
2326 
2327 	if (conn->state == BT_CONNECTED) {
2328 		hci_conn_hold(conn);
2329 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2330 		hci_conn_put(conn);
2331 	}
2332 
2333 	if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2334 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2335 					sizeof(ev->bdaddr), &ev->bdaddr);
2336 	else if (test_bit(HCI_MGMT, &hdev->flags)) {
2337 		u8 secure;
2338 
2339 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2340 			secure = 1;
2341 		else
2342 			secure = 0;
2343 
2344 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2345 	}
2346 
2347 unlock:
2348 	hci_dev_unlock(hdev);
2349 }
2350 
2351 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2352 {
2353 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2354 	struct hci_cp_link_key_reply cp;
2355 	struct hci_conn *conn;
2356 	struct link_key *key;
2357 
2358 	BT_DBG("%s", hdev->name);
2359 
2360 	if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2361 		return;
2362 
2363 	hci_dev_lock(hdev);
2364 
2365 	key = hci_find_link_key(hdev, &ev->bdaddr);
2366 	if (!key) {
2367 		BT_DBG("%s link key not found for %s", hdev->name,
2368 							batostr(&ev->bdaddr));
2369 		goto not_found;
2370 	}
2371 
2372 	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2373 							batostr(&ev->bdaddr));
2374 
2375 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2376 				key->type == HCI_LK_DEBUG_COMBINATION) {
2377 		BT_DBG("%s ignoring debug key", hdev->name);
2378 		goto not_found;
2379 	}
2380 
2381 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2382 	if (conn) {
2383 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2384 				conn->auth_type != 0xff &&
2385 				(conn->auth_type & 0x01)) {
2386 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2387 			goto not_found;
2388 		}
2389 
2390 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2391 				conn->pending_sec_level == BT_SECURITY_HIGH) {
2392 			BT_DBG("%s ignoring key unauthenticated for high \
2393 							security", hdev->name);
2394 			goto not_found;
2395 		}
2396 
2397 		conn->key_type = key->type;
2398 		conn->pin_length = key->pin_len;
2399 	}
2400 
2401 	bacpy(&cp.bdaddr, &ev->bdaddr);
2402 	memcpy(cp.link_key, key->val, 16);
2403 
2404 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2405 
2406 	hci_dev_unlock(hdev);
2407 
2408 	return;
2409 
2410 not_found:
2411 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2412 	hci_dev_unlock(hdev);
2413 }
2414 
2415 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2416 {
2417 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2418 	struct hci_conn *conn;
2419 	u8 pin_len = 0;
2420 
2421 	BT_DBG("%s", hdev->name);
2422 
2423 	hci_dev_lock(hdev);
2424 
2425 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2426 	if (conn) {
2427 		hci_conn_hold(conn);
2428 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2429 		pin_len = conn->pin_length;
2430 
2431 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2432 			conn->key_type = ev->key_type;
2433 
2434 		hci_conn_put(conn);
2435 	}
2436 
2437 	if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2438 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2439 							ev->key_type, pin_len);
2440 
2441 	hci_dev_unlock(hdev);
2442 }
2443 
2444 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2445 {
2446 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2447 	struct hci_conn *conn;
2448 
2449 	BT_DBG("%s status %d", hdev->name, ev->status);
2450 
2451 	hci_dev_lock(hdev);
2452 
2453 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2454 	if (conn && !ev->status) {
2455 		struct inquiry_entry *ie;
2456 
2457 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2458 		if (ie) {
2459 			ie->data.clock_offset = ev->clock_offset;
2460 			ie->timestamp = jiffies;
2461 		}
2462 	}
2463 
2464 	hci_dev_unlock(hdev);
2465 }
2466 
2467 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2468 {
2469 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2470 	struct hci_conn *conn;
2471 
2472 	BT_DBG("%s status %d", hdev->name, ev->status);
2473 
2474 	hci_dev_lock(hdev);
2475 
2476 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2477 	if (conn && !ev->status)
2478 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2479 
2480 	hci_dev_unlock(hdev);
2481 }
2482 
2483 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2484 {
2485 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2486 	struct inquiry_entry *ie;
2487 
2488 	BT_DBG("%s", hdev->name);
2489 
2490 	hci_dev_lock(hdev);
2491 
2492 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2493 	if (ie) {
2494 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2495 		ie->timestamp = jiffies;
2496 	}
2497 
2498 	hci_dev_unlock(hdev);
2499 }
2500 
2501 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2502 {
2503 	struct inquiry_data data;
2504 	int num_rsp = *((__u8 *) skb->data);
2505 
2506 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2507 
2508 	if (!num_rsp)
2509 		return;
2510 
2511 	hci_dev_lock(hdev);
2512 
2513 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2514 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2515 		info = (void *) (skb->data + 1);
2516 
2517 		for (; num_rsp; num_rsp--, info++) {
2518 			bacpy(&data.bdaddr, &info->bdaddr);
2519 			data.pscan_rep_mode	= info->pscan_rep_mode;
2520 			data.pscan_period_mode	= info->pscan_period_mode;
2521 			data.pscan_mode		= info->pscan_mode;
2522 			memcpy(data.dev_class, info->dev_class, 3);
2523 			data.clock_offset	= info->clock_offset;
2524 			data.rssi		= info->rssi;
2525 			data.ssp_mode		= 0x00;
2526 			hci_inquiry_cache_update(hdev, &data);
2527 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2528 						info->dev_class, info->rssi,
2529 						NULL);
2530 		}
2531 	} else {
2532 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2533 
2534 		for (; num_rsp; num_rsp--, info++) {
2535 			bacpy(&data.bdaddr, &info->bdaddr);
2536 			data.pscan_rep_mode	= info->pscan_rep_mode;
2537 			data.pscan_period_mode	= info->pscan_period_mode;
2538 			data.pscan_mode		= 0x00;
2539 			memcpy(data.dev_class, info->dev_class, 3);
2540 			data.clock_offset	= info->clock_offset;
2541 			data.rssi		= info->rssi;
2542 			data.ssp_mode		= 0x00;
2543 			hci_inquiry_cache_update(hdev, &data);
2544 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2545 						info->dev_class, info->rssi,
2546 						NULL);
2547 		}
2548 	}
2549 
2550 	hci_dev_unlock(hdev);
2551 }
2552 
2553 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2554 {
2555 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2556 	struct hci_conn *conn;
2557 
2558 	BT_DBG("%s", hdev->name);
2559 
2560 	hci_dev_lock(hdev);
2561 
2562 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2563 	if (!conn)
2564 		goto unlock;
2565 
2566 	if (!ev->status && ev->page == 0x01) {
2567 		struct inquiry_entry *ie;
2568 
2569 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2570 		if (ie)
2571 			ie->data.ssp_mode = (ev->features[0] & 0x01);
2572 
2573 		conn->ssp_mode = (ev->features[0] & 0x01);
2574 	}
2575 
2576 	if (conn->state != BT_CONFIG)
2577 		goto unlock;
2578 
2579 	if (!ev->status) {
2580 		struct hci_cp_remote_name_req cp;
2581 		memset(&cp, 0, sizeof(cp));
2582 		bacpy(&cp.bdaddr, &conn->dst);
2583 		cp.pscan_rep_mode = 0x02;
2584 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2585 	}
2586 
2587 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2588 		conn->state = BT_CONNECTED;
2589 		hci_proto_connect_cfm(conn, ev->status);
2590 		hci_conn_put(conn);
2591 	}
2592 
2593 unlock:
2594 	hci_dev_unlock(hdev);
2595 }
2596 
2597 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2598 {
2599 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2600 	struct hci_conn *conn;
2601 
2602 	BT_DBG("%s status %d", hdev->name, ev->status);
2603 
2604 	hci_dev_lock(hdev);
2605 
2606 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2607 	if (!conn) {
2608 		if (ev->link_type == ESCO_LINK)
2609 			goto unlock;
2610 
2611 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2612 		if (!conn)
2613 			goto unlock;
2614 
2615 		conn->type = SCO_LINK;
2616 	}
2617 
2618 	switch (ev->status) {
2619 	case 0x00:
2620 		conn->handle = __le16_to_cpu(ev->handle);
2621 		conn->state  = BT_CONNECTED;
2622 
2623 		hci_conn_hold_device(conn);
2624 		hci_conn_add_sysfs(conn);
2625 		break;
2626 
2627 	case 0x11:	/* Unsupported Feature or Parameter Value */
2628 	case 0x1c:	/* SCO interval rejected */
2629 	case 0x1a:	/* Unsupported Remote Feature */
2630 	case 0x1f:	/* Unspecified error */
2631 		if (conn->out && conn->attempt < 2) {
2632 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2633 					(hdev->esco_type & EDR_ESCO_MASK);
2634 			hci_setup_sync(conn, conn->link->handle);
2635 			goto unlock;
2636 		}
2637 		/* fall through */
2638 
2639 	default:
2640 		conn->state = BT_CLOSED;
2641 		break;
2642 	}
2643 
2644 	hci_proto_connect_cfm(conn, ev->status);
2645 	if (ev->status)
2646 		hci_conn_del(conn);
2647 
2648 unlock:
2649 	hci_dev_unlock(hdev);
2650 }
2651 
2652 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2653 {
2654 	BT_DBG("%s", hdev->name);
2655 }
2656 
2657 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2658 {
2659 	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2660 
2661 	BT_DBG("%s status %d", hdev->name, ev->status);
2662 }
2663 
2664 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2665 {
2666 	struct inquiry_data data;
2667 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2668 	int num_rsp = *((__u8 *) skb->data);
2669 
2670 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2671 
2672 	if (!num_rsp)
2673 		return;
2674 
2675 	hci_dev_lock(hdev);
2676 
2677 	for (; num_rsp; num_rsp--, info++) {
2678 		bacpy(&data.bdaddr, &info->bdaddr);
2679 		data.pscan_rep_mode	= info->pscan_rep_mode;
2680 		data.pscan_period_mode	= info->pscan_period_mode;
2681 		data.pscan_mode		= 0x00;
2682 		memcpy(data.dev_class, info->dev_class, 3);
2683 		data.clock_offset	= info->clock_offset;
2684 		data.rssi		= info->rssi;
2685 		data.ssp_mode		= 0x01;
2686 		hci_inquiry_cache_update(hdev, &data);
2687 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2688 				info->dev_class, info->rssi, info->data);
2689 	}
2690 
2691 	hci_dev_unlock(hdev);
2692 }
2693 
2694 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2695 {
2696 	/* If remote requests dedicated bonding follow that lead */
2697 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2698 		/* If both remote and local IO capabilities allow MITM
2699 		 * protection then require it, otherwise don't */
2700 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2701 			return 0x02;
2702 		else
2703 			return 0x03;
2704 	}
2705 
2706 	/* If remote requests no-bonding follow that lead */
2707 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2708 		return conn->remote_auth | (conn->auth_type & 0x01);
2709 
2710 	return conn->auth_type;
2711 }
2712 
2713 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2714 {
2715 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
2716 	struct hci_conn *conn;
2717 
2718 	BT_DBG("%s", hdev->name);
2719 
2720 	hci_dev_lock(hdev);
2721 
2722 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2723 	if (!conn)
2724 		goto unlock;
2725 
2726 	hci_conn_hold(conn);
2727 
2728 	if (!test_bit(HCI_MGMT, &hdev->flags))
2729 		goto unlock;
2730 
2731 	if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2732 			(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2733 		struct hci_cp_io_capability_reply cp;
2734 
2735 		bacpy(&cp.bdaddr, &ev->bdaddr);
2736 		cp.capability = conn->io_capability;
2737 		conn->auth_type = hci_get_auth_req(conn);
2738 		cp.authentication = conn->auth_type;
2739 
2740 		if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2741 				hci_find_remote_oob_data(hdev, &conn->dst))
2742 			cp.oob_data = 0x01;
2743 		else
2744 			cp.oob_data = 0x00;
2745 
2746 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2747 							sizeof(cp), &cp);
2748 	} else {
2749 		struct hci_cp_io_capability_neg_reply cp;
2750 
2751 		bacpy(&cp.bdaddr, &ev->bdaddr);
2752 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
2753 
2754 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2755 							sizeof(cp), &cp);
2756 	}
2757 
2758 unlock:
2759 	hci_dev_unlock(hdev);
2760 }
2761 
2762 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2763 {
2764 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2765 	struct hci_conn *conn;
2766 
2767 	BT_DBG("%s", hdev->name);
2768 
2769 	hci_dev_lock(hdev);
2770 
2771 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2772 	if (!conn)
2773 		goto unlock;
2774 
2775 	conn->remote_cap = ev->capability;
2776 	conn->remote_oob = ev->oob_data;
2777 	conn->remote_auth = ev->authentication;
2778 
2779 unlock:
2780 	hci_dev_unlock(hdev);
2781 }
2782 
2783 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2784 							struct sk_buff *skb)
2785 {
2786 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2787 	int loc_mitm, rem_mitm, confirm_hint = 0;
2788 	struct hci_conn *conn;
2789 
2790 	BT_DBG("%s", hdev->name);
2791 
2792 	hci_dev_lock(hdev);
2793 
2794 	if (!test_bit(HCI_MGMT, &hdev->flags))
2795 		goto unlock;
2796 
2797 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2798 	if (!conn)
2799 		goto unlock;
2800 
2801 	loc_mitm = (conn->auth_type & 0x01);
2802 	rem_mitm = (conn->remote_auth & 0x01);
2803 
2804 	/* If we require MITM but the remote device can't provide that
2805 	 * (it has NoInputNoOutput) then reject the confirmation
2806 	 * request. The only exception is when we're dedicated bonding
2807 	 * initiators (connect_cfm_cb set) since then we always have the MITM
2808 	 * bit set. */
2809 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2810 		BT_DBG("Rejecting request: remote device can't provide MITM");
2811 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2812 					sizeof(ev->bdaddr), &ev->bdaddr);
2813 		goto unlock;
2814 	}
2815 
2816 	/* If no side requires MITM protection; auto-accept */
2817 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
2818 				(!rem_mitm || conn->io_capability == 0x03)) {
2819 
2820 		/* If we're not the initiators request authorization to
2821 		 * proceed from user space (mgmt_user_confirm with
2822 		 * confirm_hint set to 1). */
2823 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2824 			BT_DBG("Confirming auto-accept as acceptor");
2825 			confirm_hint = 1;
2826 			goto confirm;
2827 		}
2828 
2829 		BT_DBG("Auto-accept of user confirmation with %ums delay",
2830 						hdev->auto_accept_delay);
2831 
2832 		if (hdev->auto_accept_delay > 0) {
2833 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2834 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
2835 			goto unlock;
2836 		}
2837 
2838 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2839 						sizeof(ev->bdaddr), &ev->bdaddr);
2840 		goto unlock;
2841 	}
2842 
2843 confirm:
2844 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
2845 								confirm_hint);
2846 
2847 unlock:
2848 	hci_dev_unlock(hdev);
2849 }
2850 
2851 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
2852 							struct sk_buff *skb)
2853 {
2854 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
2855 
2856 	BT_DBG("%s", hdev->name);
2857 
2858 	hci_dev_lock(hdev);
2859 
2860 	if (test_bit(HCI_MGMT, &hdev->flags))
2861 		mgmt_user_passkey_request(hdev, &ev->bdaddr);
2862 
2863 	hci_dev_unlock(hdev);
2864 }
2865 
2866 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2867 {
2868 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2869 	struct hci_conn *conn;
2870 
2871 	BT_DBG("%s", hdev->name);
2872 
2873 	hci_dev_lock(hdev);
2874 
2875 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2876 	if (!conn)
2877 		goto unlock;
2878 
2879 	/* To avoid duplicate auth_failed events to user space we check
2880 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
2881 	 * initiated the authentication. A traditional auth_complete
2882 	 * event gets always produced as initiator and is also mapped to
2883 	 * the mgmt_auth_failed event */
2884 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2885 		mgmt_auth_failed(hdev, &conn->dst, ev->status);
2886 
2887 	hci_conn_put(conn);
2888 
2889 unlock:
2890 	hci_dev_unlock(hdev);
2891 }
2892 
2893 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2894 {
2895 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
2896 	struct inquiry_entry *ie;
2897 
2898 	BT_DBG("%s", hdev->name);
2899 
2900 	hci_dev_lock(hdev);
2901 
2902 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2903 	if (ie)
2904 		ie->data.ssp_mode = (ev->features[0] & 0x01);
2905 
2906 	hci_dev_unlock(hdev);
2907 }
2908 
2909 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2910 							struct sk_buff *skb)
2911 {
2912 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2913 	struct oob_data *data;
2914 
2915 	BT_DBG("%s", hdev->name);
2916 
2917 	hci_dev_lock(hdev);
2918 
2919 	if (!test_bit(HCI_MGMT, &hdev->flags))
2920 		goto unlock;
2921 
2922 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2923 	if (data) {
2924 		struct hci_cp_remote_oob_data_reply cp;
2925 
2926 		bacpy(&cp.bdaddr, &ev->bdaddr);
2927 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
2928 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2929 
2930 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2931 									&cp);
2932 	} else {
2933 		struct hci_cp_remote_oob_data_neg_reply cp;
2934 
2935 		bacpy(&cp.bdaddr, &ev->bdaddr);
2936 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2937 									&cp);
2938 	}
2939 
2940 unlock:
2941 	hci_dev_unlock(hdev);
2942 }
2943 
2944 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2945 {
2946 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2947 	struct hci_conn *conn;
2948 
2949 	BT_DBG("%s status %d", hdev->name, ev->status);
2950 
2951 	hci_dev_lock(hdev);
2952 
2953 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2954 	if (!conn) {
2955 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2956 		if (!conn) {
2957 			BT_ERR("No memory for new connection");
2958 			hci_dev_unlock(hdev);
2959 			return;
2960 		}
2961 
2962 		conn->dst_type = ev->bdaddr_type;
2963 	}
2964 
2965 	if (ev->status) {
2966 		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
2967 						conn->dst_type, ev->status);
2968 		hci_proto_connect_cfm(conn, ev->status);
2969 		conn->state = BT_CLOSED;
2970 		hci_conn_del(conn);
2971 		goto unlock;
2972 	}
2973 
2974 	mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
2975 
2976 	conn->sec_level = BT_SECURITY_LOW;
2977 	conn->handle = __le16_to_cpu(ev->handle);
2978 	conn->state = BT_CONNECTED;
2979 
2980 	hci_conn_hold_device(conn);
2981 	hci_conn_add_sysfs(conn);
2982 
2983 	hci_proto_connect_cfm(conn, ev->status);
2984 
2985 unlock:
2986 	hci_dev_unlock(hdev);
2987 }
2988 
2989 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
2990 						struct sk_buff *skb)
2991 {
2992 	u8 num_reports = skb->data[0];
2993 	void *ptr = &skb->data[1];
2994 
2995 	hci_dev_lock(hdev);
2996 
2997 	while (num_reports--) {
2998 		struct hci_ev_le_advertising_info *ev = ptr;
2999 
3000 		hci_add_adv_entry(hdev, ev);
3001 
3002 		ptr += sizeof(*ev) + ev->length + 1;
3003 	}
3004 
3005 	hci_dev_unlock(hdev);
3006 }
3007 
3008 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3009 						struct sk_buff *skb)
3010 {
3011 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3012 	struct hci_cp_le_ltk_reply cp;
3013 	struct hci_cp_le_ltk_neg_reply neg;
3014 	struct hci_conn *conn;
3015 	struct link_key *ltk;
3016 
3017 	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3018 
3019 	hci_dev_lock(hdev);
3020 
3021 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3022 	if (conn == NULL)
3023 		goto not_found;
3024 
3025 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3026 	if (ltk == NULL)
3027 		goto not_found;
3028 
3029 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3030 	cp.handle = cpu_to_le16(conn->handle);
3031 	conn->pin_length = ltk->pin_len;
3032 
3033 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3034 
3035 	hci_dev_unlock(hdev);
3036 
3037 	return;
3038 
3039 not_found:
3040 	neg.handle = ev->handle;
3041 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3042 	hci_dev_unlock(hdev);
3043 }
3044 
3045 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3046 {
3047 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3048 
3049 	skb_pull(skb, sizeof(*le_ev));
3050 
3051 	switch (le_ev->subevent) {
3052 	case HCI_EV_LE_CONN_COMPLETE:
3053 		hci_le_conn_complete_evt(hdev, skb);
3054 		break;
3055 
3056 	case HCI_EV_LE_ADVERTISING_REPORT:
3057 		hci_le_adv_report_evt(hdev, skb);
3058 		break;
3059 
3060 	case HCI_EV_LE_LTK_REQ:
3061 		hci_le_ltk_request_evt(hdev, skb);
3062 		break;
3063 
3064 	default:
3065 		break;
3066 	}
3067 }
3068 
3069 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3070 {
3071 	struct hci_event_hdr *hdr = (void *) skb->data;
3072 	__u8 event = hdr->evt;
3073 
3074 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3075 
3076 	switch (event) {
3077 	case HCI_EV_INQUIRY_COMPLETE:
3078 		hci_inquiry_complete_evt(hdev, skb);
3079 		break;
3080 
3081 	case HCI_EV_INQUIRY_RESULT:
3082 		hci_inquiry_result_evt(hdev, skb);
3083 		break;
3084 
3085 	case HCI_EV_CONN_COMPLETE:
3086 		hci_conn_complete_evt(hdev, skb);
3087 		break;
3088 
3089 	case HCI_EV_CONN_REQUEST:
3090 		hci_conn_request_evt(hdev, skb);
3091 		break;
3092 
3093 	case HCI_EV_DISCONN_COMPLETE:
3094 		hci_disconn_complete_evt(hdev, skb);
3095 		break;
3096 
3097 	case HCI_EV_AUTH_COMPLETE:
3098 		hci_auth_complete_evt(hdev, skb);
3099 		break;
3100 
3101 	case HCI_EV_REMOTE_NAME:
3102 		hci_remote_name_evt(hdev, skb);
3103 		break;
3104 
3105 	case HCI_EV_ENCRYPT_CHANGE:
3106 		hci_encrypt_change_evt(hdev, skb);
3107 		break;
3108 
3109 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3110 		hci_change_link_key_complete_evt(hdev, skb);
3111 		break;
3112 
3113 	case HCI_EV_REMOTE_FEATURES:
3114 		hci_remote_features_evt(hdev, skb);
3115 		break;
3116 
3117 	case HCI_EV_REMOTE_VERSION:
3118 		hci_remote_version_evt(hdev, skb);
3119 		break;
3120 
3121 	case HCI_EV_QOS_SETUP_COMPLETE:
3122 		hci_qos_setup_complete_evt(hdev, skb);
3123 		break;
3124 
3125 	case HCI_EV_CMD_COMPLETE:
3126 		hci_cmd_complete_evt(hdev, skb);
3127 		break;
3128 
3129 	case HCI_EV_CMD_STATUS:
3130 		hci_cmd_status_evt(hdev, skb);
3131 		break;
3132 
3133 	case HCI_EV_ROLE_CHANGE:
3134 		hci_role_change_evt(hdev, skb);
3135 		break;
3136 
3137 	case HCI_EV_NUM_COMP_PKTS:
3138 		hci_num_comp_pkts_evt(hdev, skb);
3139 		break;
3140 
3141 	case HCI_EV_MODE_CHANGE:
3142 		hci_mode_change_evt(hdev, skb);
3143 		break;
3144 
3145 	case HCI_EV_PIN_CODE_REQ:
3146 		hci_pin_code_request_evt(hdev, skb);
3147 		break;
3148 
3149 	case HCI_EV_LINK_KEY_REQ:
3150 		hci_link_key_request_evt(hdev, skb);
3151 		break;
3152 
3153 	case HCI_EV_LINK_KEY_NOTIFY:
3154 		hci_link_key_notify_evt(hdev, skb);
3155 		break;
3156 
3157 	case HCI_EV_CLOCK_OFFSET:
3158 		hci_clock_offset_evt(hdev, skb);
3159 		break;
3160 
3161 	case HCI_EV_PKT_TYPE_CHANGE:
3162 		hci_pkt_type_change_evt(hdev, skb);
3163 		break;
3164 
3165 	case HCI_EV_PSCAN_REP_MODE:
3166 		hci_pscan_rep_mode_evt(hdev, skb);
3167 		break;
3168 
3169 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3170 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3171 		break;
3172 
3173 	case HCI_EV_REMOTE_EXT_FEATURES:
3174 		hci_remote_ext_features_evt(hdev, skb);
3175 		break;
3176 
3177 	case HCI_EV_SYNC_CONN_COMPLETE:
3178 		hci_sync_conn_complete_evt(hdev, skb);
3179 		break;
3180 
3181 	case HCI_EV_SYNC_CONN_CHANGED:
3182 		hci_sync_conn_changed_evt(hdev, skb);
3183 		break;
3184 
3185 	case HCI_EV_SNIFF_SUBRATE:
3186 		hci_sniff_subrate_evt(hdev, skb);
3187 		break;
3188 
3189 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3190 		hci_extended_inquiry_result_evt(hdev, skb);
3191 		break;
3192 
3193 	case HCI_EV_IO_CAPA_REQUEST:
3194 		hci_io_capa_request_evt(hdev, skb);
3195 		break;
3196 
3197 	case HCI_EV_IO_CAPA_REPLY:
3198 		hci_io_capa_reply_evt(hdev, skb);
3199 		break;
3200 
3201 	case HCI_EV_USER_CONFIRM_REQUEST:
3202 		hci_user_confirm_request_evt(hdev, skb);
3203 		break;
3204 
3205 	case HCI_EV_USER_PASSKEY_REQUEST:
3206 		hci_user_passkey_request_evt(hdev, skb);
3207 		break;
3208 
3209 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3210 		hci_simple_pair_complete_evt(hdev, skb);
3211 		break;
3212 
3213 	case HCI_EV_REMOTE_HOST_FEATURES:
3214 		hci_remote_host_features_evt(hdev, skb);
3215 		break;
3216 
3217 	case HCI_EV_LE_META:
3218 		hci_le_meta_evt(hdev, skb);
3219 		break;
3220 
3221 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3222 		hci_remote_oob_data_request_evt(hdev, skb);
3223 		break;
3224 
3225 	default:
3226 		BT_DBG("%s event 0x%x", hdev->name, event);
3227 		break;
3228 	}
3229 
3230 	kfree_skb(skb);
3231 	hdev->stat.evt_rx++;
3232 }
3233 
3234 /* Generate internal stack event */
3235 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3236 {
3237 	struct hci_event_hdr *hdr;
3238 	struct hci_ev_stack_internal *ev;
3239 	struct sk_buff *skb;
3240 
3241 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3242 	if (!skb)
3243 		return;
3244 
3245 	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3246 	hdr->evt  = HCI_EV_STACK_INTERNAL;
3247 	hdr->plen = sizeof(*ev) + dlen;
3248 
3249 	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
3250 	ev->type = type;
3251 	memcpy(ev->data, data, dlen);
3252 
3253 	bt_cb(skb)->incoming = 1;
3254 	__net_timestamp(skb);
3255 
3256 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3257 	skb->dev = (void *) hdev;
3258 	hci_send_to_sock(hdev, skb, NULL);
3259 	kfree_skb(skb);
3260 }
3261 
3262 module_param(enable_le, bool, 0644);
3263 MODULE_PARM_DESC(enable_le, "Enable LE support");
3264