xref: /linux/net/bluetooth/hci_event.c (revision 2dbf708448c836754d25fe6108c5bfe1f5697c95)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39 
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
42 
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
45 
46 /* Handle HCI Event packets */
47 
48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
49 {
50 	__u8 status = *((__u8 *) skb->data);
51 
52 	BT_DBG("%s status 0x%x", hdev->name, status);
53 
54 	if (status) {
55 		hci_dev_lock(hdev);
56 		mgmt_stop_discovery_failed(hdev, status);
57 		hci_dev_unlock(hdev);
58 		return;
59 	}
60 
61 	clear_bit(HCI_INQUIRY, &hdev->flags);
62 
63 	hci_dev_lock(hdev);
64 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
65 	hci_dev_unlock(hdev);
66 
67 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
68 
69 	hci_conn_check_pending(hdev);
70 }
71 
72 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73 {
74 	__u8 status = *((__u8 *) skb->data);
75 
76 	BT_DBG("%s status 0x%x", hdev->name, status);
77 
78 	if (status)
79 		return;
80 
81 	hci_conn_check_pending(hdev);
82 }
83 
84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
85 {
86 	BT_DBG("%s", hdev->name);
87 }
88 
89 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
90 {
91 	struct hci_rp_role_discovery *rp = (void *) skb->data;
92 	struct hci_conn *conn;
93 
94 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
95 
96 	if (rp->status)
97 		return;
98 
99 	hci_dev_lock(hdev);
100 
101 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
102 	if (conn) {
103 		if (rp->role)
104 			conn->link_mode &= ~HCI_LM_MASTER;
105 		else
106 			conn->link_mode |= HCI_LM_MASTER;
107 	}
108 
109 	hci_dev_unlock(hdev);
110 }
111 
112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
113 {
114 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 	struct hci_conn *conn;
116 
117 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
118 
119 	if (rp->status)
120 		return;
121 
122 	hci_dev_lock(hdev);
123 
124 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
125 	if (conn)
126 		conn->link_policy = __le16_to_cpu(rp->policy);
127 
128 	hci_dev_unlock(hdev);
129 }
130 
131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
132 {
133 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 	struct hci_conn *conn;
135 	void *sent;
136 
137 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
138 
139 	if (rp->status)
140 		return;
141 
142 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
143 	if (!sent)
144 		return;
145 
146 	hci_dev_lock(hdev);
147 
148 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 	if (conn)
150 		conn->link_policy = get_unaligned_le16(sent + 2);
151 
152 	hci_dev_unlock(hdev);
153 }
154 
155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 {
157 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158 
159 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
160 
161 	if (rp->status)
162 		return;
163 
164 	hdev->link_policy = __le16_to_cpu(rp->policy);
165 }
166 
167 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
168 {
169 	__u8 status = *((__u8 *) skb->data);
170 	void *sent;
171 
172 	BT_DBG("%s status 0x%x", hdev->name, status);
173 
174 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
175 	if (!sent)
176 		return;
177 
178 	if (!status)
179 		hdev->link_policy = get_unaligned_le16(sent);
180 
181 	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
182 }
183 
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 	__u8 status = *((__u8 *) skb->data);
187 
188 	BT_DBG("%s status 0x%x", hdev->name, status);
189 
190 	clear_bit(HCI_RESET, &hdev->flags);
191 
192 	hci_req_complete(hdev, HCI_OP_RESET, status);
193 
194 	/* Reset all non-persistent flags */
195 	hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
196 
197 	hdev->discovery.state = DISCOVERY_STOPPED;
198 }
199 
200 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
201 {
202 	__u8 status = *((__u8 *) skb->data);
203 	void *sent;
204 
205 	BT_DBG("%s status 0x%x", hdev->name, status);
206 
207 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
208 	if (!sent)
209 		return;
210 
211 	hci_dev_lock(hdev);
212 
213 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
214 		mgmt_set_local_name_complete(hdev, sent, status);
215 	else if (!status)
216 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
217 
218 	hci_dev_unlock(hdev);
219 
220 	hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
221 }
222 
223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 	struct hci_rp_read_local_name *rp = (void *) skb->data;
226 
227 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
228 
229 	if (rp->status)
230 		return;
231 
232 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
233 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
234 }
235 
236 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 	__u8 status = *((__u8 *) skb->data);
239 	void *sent;
240 
241 	BT_DBG("%s status 0x%x", hdev->name, status);
242 
243 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
244 	if (!sent)
245 		return;
246 
247 	if (!status) {
248 		__u8 param = *((__u8 *) sent);
249 
250 		if (param == AUTH_ENABLED)
251 			set_bit(HCI_AUTH, &hdev->flags);
252 		else
253 			clear_bit(HCI_AUTH, &hdev->flags);
254 	}
255 
256 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
257 		mgmt_auth_enable_complete(hdev, status);
258 
259 	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
260 }
261 
262 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
263 {
264 	__u8 status = *((__u8 *) skb->data);
265 	void *sent;
266 
267 	BT_DBG("%s status 0x%x", hdev->name, status);
268 
269 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
270 	if (!sent)
271 		return;
272 
273 	if (!status) {
274 		__u8 param = *((__u8 *) sent);
275 
276 		if (param)
277 			set_bit(HCI_ENCRYPT, &hdev->flags);
278 		else
279 			clear_bit(HCI_ENCRYPT, &hdev->flags);
280 	}
281 
282 	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
283 }
284 
285 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
286 {
287 	__u8 param, status = *((__u8 *) skb->data);
288 	int old_pscan, old_iscan;
289 	void *sent;
290 
291 	BT_DBG("%s status 0x%x", hdev->name, status);
292 
293 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
294 	if (!sent)
295 		return;
296 
297 	param = *((__u8 *) sent);
298 
299 	hci_dev_lock(hdev);
300 
301 	if (status != 0) {
302 		mgmt_write_scan_failed(hdev, param, status);
303 		hdev->discov_timeout = 0;
304 		goto done;
305 	}
306 
307 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
308 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
309 
310 	if (param & SCAN_INQUIRY) {
311 		set_bit(HCI_ISCAN, &hdev->flags);
312 		if (!old_iscan)
313 			mgmt_discoverable(hdev, 1);
314 		if (hdev->discov_timeout > 0) {
315 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
316 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
317 									to);
318 		}
319 	} else if (old_iscan)
320 		mgmt_discoverable(hdev, 0);
321 
322 	if (param & SCAN_PAGE) {
323 		set_bit(HCI_PSCAN, &hdev->flags);
324 		if (!old_pscan)
325 			mgmt_connectable(hdev, 1);
326 	} else if (old_pscan)
327 		mgmt_connectable(hdev, 0);
328 
329 done:
330 	hci_dev_unlock(hdev);
331 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
332 }
333 
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337 
338 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
339 
340 	if (rp->status)
341 		return;
342 
343 	memcpy(hdev->dev_class, rp->dev_class, 3);
344 
345 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 		hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348 
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 	__u8 status = *((__u8 *) skb->data);
352 	void *sent;
353 
354 	BT_DBG("%s status 0x%x", hdev->name, status);
355 
356 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 	if (!sent)
358 		return;
359 
360 	hci_dev_lock(hdev);
361 
362 	if (status == 0)
363 		memcpy(hdev->dev_class, sent, 3);
364 
365 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 		mgmt_set_class_of_dev_complete(hdev, sent, status);
367 
368 	hci_dev_unlock(hdev);
369 }
370 
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 	__u16 setting;
375 
376 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
377 
378 	if (rp->status)
379 		return;
380 
381 	setting = __le16_to_cpu(rp->voice_setting);
382 
383 	if (hdev->voice_setting == setting)
384 		return;
385 
386 	hdev->voice_setting = setting;
387 
388 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
389 
390 	if (hdev->notify)
391 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393 
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
395 {
396 	__u8 status = *((__u8 *) skb->data);
397 	__u16 setting;
398 	void *sent;
399 
400 	BT_DBG("%s status 0x%x", hdev->name, status);
401 
402 	if (status)
403 		return;
404 
405 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
406 	if (!sent)
407 		return;
408 
409 	setting = get_unaligned_le16(sent);
410 
411 	if (hdev->voice_setting == setting)
412 		return;
413 
414 	hdev->voice_setting = setting;
415 
416 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
417 
418 	if (hdev->notify)
419 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
420 }
421 
422 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
423 {
424 	__u8 status = *((__u8 *) skb->data);
425 
426 	BT_DBG("%s status 0x%x", hdev->name, status);
427 
428 	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
429 }
430 
431 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
432 {
433 	__u8 status = *((__u8 *) skb->data);
434 	void *sent;
435 
436 	BT_DBG("%s status 0x%x", hdev->name, status);
437 
438 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
439 	if (!sent)
440 		return;
441 
442 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
443 		mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
444 	else if (!status) {
445 		if (*((u8 *) sent))
446 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
447 		else
448 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
449 	}
450 }
451 
452 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
453 {
454 	if (hdev->features[6] & LMP_EXT_INQ)
455 		return 2;
456 
457 	if (hdev->features[3] & LMP_RSSI_INQ)
458 		return 1;
459 
460 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
461 						hdev->lmp_subver == 0x0757)
462 		return 1;
463 
464 	if (hdev->manufacturer == 15) {
465 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
466 			return 1;
467 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
468 			return 1;
469 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
470 			return 1;
471 	}
472 
473 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
474 						hdev->lmp_subver == 0x1805)
475 		return 1;
476 
477 	return 0;
478 }
479 
480 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
481 {
482 	u8 mode;
483 
484 	mode = hci_get_inquiry_mode(hdev);
485 
486 	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 }
488 
489 static void hci_setup_event_mask(struct hci_dev *hdev)
490 {
491 	/* The second byte is 0xff instead of 0x9f (two reserved bits
492 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
493 	 * command otherwise */
494 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
495 
496 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
497 	 * any event mask for pre 1.2 devices */
498 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
499 		return;
500 
501 	events[4] |= 0x01; /* Flow Specification Complete */
502 	events[4] |= 0x02; /* Inquiry Result with RSSI */
503 	events[4] |= 0x04; /* Read Remote Extended Features Complete */
504 	events[5] |= 0x08; /* Synchronous Connection Complete */
505 	events[5] |= 0x10; /* Synchronous Connection Changed */
506 
507 	if (hdev->features[3] & LMP_RSSI_INQ)
508 		events[4] |= 0x04; /* Inquiry Result with RSSI */
509 
510 	if (hdev->features[5] & LMP_SNIFF_SUBR)
511 		events[5] |= 0x20; /* Sniff Subrating */
512 
513 	if (hdev->features[5] & LMP_PAUSE_ENC)
514 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
515 
516 	if (hdev->features[6] & LMP_EXT_INQ)
517 		events[5] |= 0x40; /* Extended Inquiry Result */
518 
519 	if (hdev->features[6] & LMP_NO_FLUSH)
520 		events[7] |= 0x01; /* Enhanced Flush Complete */
521 
522 	if (hdev->features[7] & LMP_LSTO)
523 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
524 
525 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
526 		events[6] |= 0x01;	/* IO Capability Request */
527 		events[6] |= 0x02;	/* IO Capability Response */
528 		events[6] |= 0x04;	/* User Confirmation Request */
529 		events[6] |= 0x08;	/* User Passkey Request */
530 		events[6] |= 0x10;	/* Remote OOB Data Request */
531 		events[6] |= 0x20;	/* Simple Pairing Complete */
532 		events[7] |= 0x04;	/* User Passkey Notification */
533 		events[7] |= 0x08;	/* Keypress Notification */
534 		events[7] |= 0x10;	/* Remote Host Supported
535 					 * Features Notification */
536 	}
537 
538 	if (hdev->features[4] & LMP_LE)
539 		events[7] |= 0x20;	/* LE Meta-Event */
540 
541 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
542 }
543 
544 static void hci_setup(struct hci_dev *hdev)
545 {
546 	if (hdev->dev_type != HCI_BREDR)
547 		return;
548 
549 	hci_setup_event_mask(hdev);
550 
551 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
552 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
553 
554 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
555 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
556 			u8 mode = 0x01;
557 			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
558 				     sizeof(mode), &mode);
559 		} else {
560 			struct hci_cp_write_eir cp;
561 
562 			memset(hdev->eir, 0, sizeof(hdev->eir));
563 			memset(&cp, 0, sizeof(cp));
564 
565 			hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
566 		}
567 	}
568 
569 	if (hdev->features[3] & LMP_RSSI_INQ)
570 		hci_setup_inquiry_mode(hdev);
571 
572 	if (hdev->features[7] & LMP_INQ_TX_PWR)
573 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
574 
575 	if (hdev->features[7] & LMP_EXTFEATURES) {
576 		struct hci_cp_read_local_ext_features cp;
577 
578 		cp.page = 0x01;
579 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
580 			     &cp);
581 	}
582 
583 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
584 		u8 enable = 1;
585 		hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
586 			     &enable);
587 	}
588 }
589 
590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 {
592 	struct hci_rp_read_local_version *rp = (void *) skb->data;
593 
594 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
595 
596 	if (rp->status)
597 		goto done;
598 
599 	hdev->hci_ver = rp->hci_ver;
600 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 	hdev->lmp_ver = rp->lmp_ver;
602 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
604 
605 	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
606 					hdev->manufacturer,
607 					hdev->hci_ver, hdev->hci_rev);
608 
609 	if (test_bit(HCI_INIT, &hdev->flags))
610 		hci_setup(hdev);
611 
612 done:
613 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
614 }
615 
616 static void hci_setup_link_policy(struct hci_dev *hdev)
617 {
618 	u16 link_policy = 0;
619 
620 	if (hdev->features[0] & LMP_RSWITCH)
621 		link_policy |= HCI_LP_RSWITCH;
622 	if (hdev->features[0] & LMP_HOLD)
623 		link_policy |= HCI_LP_HOLD;
624 	if (hdev->features[0] & LMP_SNIFF)
625 		link_policy |= HCI_LP_SNIFF;
626 	if (hdev->features[1] & LMP_PARK)
627 		link_policy |= HCI_LP_PARK;
628 
629 	link_policy = cpu_to_le16(link_policy);
630 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
631 		     &link_policy);
632 }
633 
634 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
635 {
636 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
637 
638 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
639 
640 	if (rp->status)
641 		goto done;
642 
643 	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
644 
645 	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
646 		hci_setup_link_policy(hdev);
647 
648 done:
649 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
650 }
651 
652 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
653 {
654 	struct hci_rp_read_local_features *rp = (void *) skb->data;
655 
656 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
657 
658 	if (rp->status)
659 		return;
660 
661 	memcpy(hdev->features, rp->features, 8);
662 
663 	/* Adjust default settings according to features
664 	 * supported by device. */
665 
666 	if (hdev->features[0] & LMP_3SLOT)
667 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
668 
669 	if (hdev->features[0] & LMP_5SLOT)
670 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
671 
672 	if (hdev->features[1] & LMP_HV2) {
673 		hdev->pkt_type  |= (HCI_HV2);
674 		hdev->esco_type |= (ESCO_HV2);
675 	}
676 
677 	if (hdev->features[1] & LMP_HV3) {
678 		hdev->pkt_type  |= (HCI_HV3);
679 		hdev->esco_type |= (ESCO_HV3);
680 	}
681 
682 	if (hdev->features[3] & LMP_ESCO)
683 		hdev->esco_type |= (ESCO_EV3);
684 
685 	if (hdev->features[4] & LMP_EV4)
686 		hdev->esco_type |= (ESCO_EV4);
687 
688 	if (hdev->features[4] & LMP_EV5)
689 		hdev->esco_type |= (ESCO_EV5);
690 
691 	if (hdev->features[5] & LMP_EDR_ESCO_2M)
692 		hdev->esco_type |= (ESCO_2EV3);
693 
694 	if (hdev->features[5] & LMP_EDR_ESCO_3M)
695 		hdev->esco_type |= (ESCO_3EV3);
696 
697 	if (hdev->features[5] & LMP_EDR_3S_ESCO)
698 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
699 
700 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
701 					hdev->features[0], hdev->features[1],
702 					hdev->features[2], hdev->features[3],
703 					hdev->features[4], hdev->features[5],
704 					hdev->features[6], hdev->features[7]);
705 }
706 
707 static void hci_set_le_support(struct hci_dev *hdev)
708 {
709 	struct hci_cp_write_le_host_supported cp;
710 
711 	memset(&cp, 0, sizeof(cp));
712 
713 	if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
714 		cp.le = 1;
715 		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
716 	}
717 
718 	if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
719 		hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
720 			     &cp);
721 }
722 
723 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
724 							struct sk_buff *skb)
725 {
726 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
727 
728 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
729 
730 	if (rp->status)
731 		goto done;
732 
733 	switch (rp->page) {
734 	case 0:
735 		memcpy(hdev->features, rp->features, 8);
736 		break;
737 	case 1:
738 		memcpy(hdev->host_features, rp->features, 8);
739 		break;
740 	}
741 
742 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
743 		hci_set_le_support(hdev);
744 
745 done:
746 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
747 }
748 
749 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
750 						struct sk_buff *skb)
751 {
752 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
753 
754 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
755 
756 	if (rp->status)
757 		return;
758 
759 	hdev->flow_ctl_mode = rp->mode;
760 
761 	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
762 }
763 
764 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
765 {
766 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
767 
768 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
769 
770 	if (rp->status)
771 		return;
772 
773 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
774 	hdev->sco_mtu  = rp->sco_mtu;
775 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
776 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
777 
778 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
779 		hdev->sco_mtu  = 64;
780 		hdev->sco_pkts = 8;
781 	}
782 
783 	hdev->acl_cnt = hdev->acl_pkts;
784 	hdev->sco_cnt = hdev->sco_pkts;
785 
786 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
787 					hdev->acl_mtu, hdev->acl_pkts,
788 					hdev->sco_mtu, hdev->sco_pkts);
789 }
790 
791 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
792 {
793 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
794 
795 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
796 
797 	if (!rp->status)
798 		bacpy(&hdev->bdaddr, &rp->bdaddr);
799 
800 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
801 }
802 
803 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
804 							struct sk_buff *skb)
805 {
806 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
807 
808 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
809 
810 	if (rp->status)
811 		return;
812 
813 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
814 	hdev->block_len = __le16_to_cpu(rp->block_len);
815 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
816 
817 	hdev->block_cnt = hdev->num_blocks;
818 
819 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
820 					hdev->block_cnt, hdev->block_len);
821 
822 	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
823 }
824 
825 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
826 {
827 	__u8 status = *((__u8 *) skb->data);
828 
829 	BT_DBG("%s status 0x%x", hdev->name, status);
830 
831 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
832 }
833 
834 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
835 		struct sk_buff *skb)
836 {
837 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
838 
839 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
840 
841 	if (rp->status)
842 		return;
843 
844 	hdev->amp_status = rp->amp_status;
845 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
846 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
847 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
848 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
849 	hdev->amp_type = rp->amp_type;
850 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
851 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
852 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
853 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
854 
855 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
856 }
857 
858 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
859 							struct sk_buff *skb)
860 {
861 	__u8 status = *((__u8 *) skb->data);
862 
863 	BT_DBG("%s status 0x%x", hdev->name, status);
864 
865 	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
866 }
867 
868 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 	__u8 status = *((__u8 *) skb->data);
871 
872 	BT_DBG("%s status 0x%x", hdev->name, status);
873 
874 	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
875 }
876 
877 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
878 							struct sk_buff *skb)
879 {
880 	__u8 status = *((__u8 *) skb->data);
881 
882 	BT_DBG("%s status 0x%x", hdev->name, status);
883 
884 	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
885 }
886 
887 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
888 							struct sk_buff *skb)
889 {
890 	__u8 status = *((__u8 *) skb->data);
891 
892 	BT_DBG("%s status 0x%x", hdev->name, status);
893 
894 	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
895 }
896 
897 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
898 {
899 	__u8 status = *((__u8 *) skb->data);
900 
901 	BT_DBG("%s status 0x%x", hdev->name, status);
902 
903 	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
904 }
905 
906 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
909 	struct hci_cp_pin_code_reply *cp;
910 	struct hci_conn *conn;
911 
912 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
913 
914 	hci_dev_lock(hdev);
915 
916 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
917 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
918 
919 	if (rp->status != 0)
920 		goto unlock;
921 
922 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
923 	if (!cp)
924 		goto unlock;
925 
926 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
927 	if (conn)
928 		conn->pin_length = cp->pin_len;
929 
930 unlock:
931 	hci_dev_unlock(hdev);
932 }
933 
934 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
935 {
936 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
937 
938 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
939 
940 	hci_dev_lock(hdev);
941 
942 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
943 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
944 								rp->status);
945 
946 	hci_dev_unlock(hdev);
947 }
948 
949 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
950 				       struct sk_buff *skb)
951 {
952 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
953 
954 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
955 
956 	if (rp->status)
957 		return;
958 
959 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
960 	hdev->le_pkts = rp->le_max_pkt;
961 
962 	hdev->le_cnt = hdev->le_pkts;
963 
964 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
965 
966 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
967 }
968 
969 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
970 {
971 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972 
973 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
974 
975 	hci_dev_lock(hdev);
976 
977 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
979 						 rp->status);
980 
981 	hci_dev_unlock(hdev);
982 }
983 
984 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
985 							struct sk_buff *skb)
986 {
987 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
988 
989 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
990 
991 	hci_dev_lock(hdev);
992 
993 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
994 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
995 						     ACL_LINK, 0, rp->status);
996 
997 	hci_dev_unlock(hdev);
998 }
999 
1000 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1001 {
1002 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1003 
1004 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1005 
1006 	hci_dev_lock(hdev);
1007 
1008 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1009 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1010 						 0, rp->status);
1011 
1012 	hci_dev_unlock(hdev);
1013 }
1014 
1015 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1016 							struct sk_buff *skb)
1017 {
1018 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1019 
1020 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1021 
1022 	hci_dev_lock(hdev);
1023 
1024 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1025 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1026 						     ACL_LINK, 0, rp->status);
1027 
1028 	hci_dev_unlock(hdev);
1029 }
1030 
1031 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1032 							struct sk_buff *skb)
1033 {
1034 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1035 
1036 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1037 
1038 	hci_dev_lock(hdev);
1039 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1040 						rp->randomizer, rp->status);
1041 	hci_dev_unlock(hdev);
1042 }
1043 
1044 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1045 {
1046 	__u8 status = *((__u8 *) skb->data);
1047 
1048 	BT_DBG("%s status 0x%x", hdev->name, status);
1049 
1050 	hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1051 
1052 	if (status) {
1053 		hci_dev_lock(hdev);
1054 		mgmt_start_discovery_failed(hdev, status);
1055 		hci_dev_unlock(hdev);
1056 		return;
1057 	}
1058 }
1059 
1060 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1061 					struct sk_buff *skb)
1062 {
1063 	struct hci_cp_le_set_scan_enable *cp;
1064 	__u8 status = *((__u8 *) skb->data);
1065 
1066 	BT_DBG("%s status 0x%x", hdev->name, status);
1067 
1068 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1069 	if (!cp)
1070 		return;
1071 
1072 	switch (cp->enable) {
1073 	case LE_SCANNING_ENABLED:
1074 		hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1075 
1076 		if (status) {
1077 			hci_dev_lock(hdev);
1078 			mgmt_start_discovery_failed(hdev, status);
1079 			hci_dev_unlock(hdev);
1080 			return;
1081 		}
1082 
1083 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1084 
1085 		cancel_delayed_work_sync(&hdev->adv_work);
1086 
1087 		hci_dev_lock(hdev);
1088 		hci_adv_entries_clear(hdev);
1089 		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1090 		hci_dev_unlock(hdev);
1091 		break;
1092 
1093 	case LE_SCANNING_DISABLED:
1094 		if (status)
1095 			return;
1096 
1097 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1098 
1099 		schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1100 
1101 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1102 			mgmt_interleaved_discovery(hdev);
1103 		} else {
1104 			hci_dev_lock(hdev);
1105 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1106 			hci_dev_unlock(hdev);
1107 		}
1108 
1109 		break;
1110 
1111 	default:
1112 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1113 		break;
1114 	}
1115 }
1116 
1117 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1118 {
1119 	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1120 
1121 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1122 
1123 	if (rp->status)
1124 		return;
1125 
1126 	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1127 }
1128 
1129 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1130 {
1131 	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1132 
1133 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1134 
1135 	if (rp->status)
1136 		return;
1137 
1138 	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1139 }
1140 
1141 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1142 							struct sk_buff *skb)
1143 {
1144 	struct hci_cp_write_le_host_supported *sent;
1145 	__u8 status = *((__u8 *) skb->data);
1146 
1147 	BT_DBG("%s status 0x%x", hdev->name, status);
1148 
1149 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1150 	if (!sent)
1151 		return;
1152 
1153 	if (!status) {
1154 		if (sent->le)
1155 			hdev->host_features[0] |= LMP_HOST_LE;
1156 		else
1157 			hdev->host_features[0] &= ~LMP_HOST_LE;
1158 	}
1159 
1160 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1161 					!test_bit(HCI_INIT, &hdev->flags))
1162 		mgmt_le_enable_complete(hdev, sent->le, status);
1163 
1164 	hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1165 }
1166 
1167 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1168 {
1169 	BT_DBG("%s status 0x%x", hdev->name, status);
1170 
1171 	if (status) {
1172 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1173 		hci_conn_check_pending(hdev);
1174 		hci_dev_lock(hdev);
1175 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1176 			mgmt_start_discovery_failed(hdev, status);
1177 		hci_dev_unlock(hdev);
1178 		return;
1179 	}
1180 
1181 	set_bit(HCI_INQUIRY, &hdev->flags);
1182 
1183 	hci_dev_lock(hdev);
1184 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1185 	hci_dev_unlock(hdev);
1186 }
1187 
1188 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1189 {
1190 	struct hci_cp_create_conn *cp;
1191 	struct hci_conn *conn;
1192 
1193 	BT_DBG("%s status 0x%x", hdev->name, status);
1194 
1195 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1196 	if (!cp)
1197 		return;
1198 
1199 	hci_dev_lock(hdev);
1200 
1201 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1202 
1203 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1204 
1205 	if (status) {
1206 		if (conn && conn->state == BT_CONNECT) {
1207 			if (status != 0x0c || conn->attempt > 2) {
1208 				conn->state = BT_CLOSED;
1209 				hci_proto_connect_cfm(conn, status);
1210 				hci_conn_del(conn);
1211 			} else
1212 				conn->state = BT_CONNECT2;
1213 		}
1214 	} else {
1215 		if (!conn) {
1216 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1217 			if (conn) {
1218 				conn->out = true;
1219 				conn->link_mode |= HCI_LM_MASTER;
1220 			} else
1221 				BT_ERR("No memory for new connection");
1222 		}
1223 	}
1224 
1225 	hci_dev_unlock(hdev);
1226 }
1227 
1228 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1229 {
1230 	struct hci_cp_add_sco *cp;
1231 	struct hci_conn *acl, *sco;
1232 	__u16 handle;
1233 
1234 	BT_DBG("%s status 0x%x", hdev->name, status);
1235 
1236 	if (!status)
1237 		return;
1238 
1239 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1240 	if (!cp)
1241 		return;
1242 
1243 	handle = __le16_to_cpu(cp->handle);
1244 
1245 	BT_DBG("%s handle %d", hdev->name, handle);
1246 
1247 	hci_dev_lock(hdev);
1248 
1249 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1250 	if (acl) {
1251 		sco = acl->link;
1252 		if (sco) {
1253 			sco->state = BT_CLOSED;
1254 
1255 			hci_proto_connect_cfm(sco, status);
1256 			hci_conn_del(sco);
1257 		}
1258 	}
1259 
1260 	hci_dev_unlock(hdev);
1261 }
1262 
1263 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1264 {
1265 	struct hci_cp_auth_requested *cp;
1266 	struct hci_conn *conn;
1267 
1268 	BT_DBG("%s status 0x%x", hdev->name, status);
1269 
1270 	if (!status)
1271 		return;
1272 
1273 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1274 	if (!cp)
1275 		return;
1276 
1277 	hci_dev_lock(hdev);
1278 
1279 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1280 	if (conn) {
1281 		if (conn->state == BT_CONFIG) {
1282 			hci_proto_connect_cfm(conn, status);
1283 			hci_conn_put(conn);
1284 		}
1285 	}
1286 
1287 	hci_dev_unlock(hdev);
1288 }
1289 
1290 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1291 {
1292 	struct hci_cp_set_conn_encrypt *cp;
1293 	struct hci_conn *conn;
1294 
1295 	BT_DBG("%s status 0x%x", hdev->name, status);
1296 
1297 	if (!status)
1298 		return;
1299 
1300 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1301 	if (!cp)
1302 		return;
1303 
1304 	hci_dev_lock(hdev);
1305 
1306 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1307 	if (conn) {
1308 		if (conn->state == BT_CONFIG) {
1309 			hci_proto_connect_cfm(conn, status);
1310 			hci_conn_put(conn);
1311 		}
1312 	}
1313 
1314 	hci_dev_unlock(hdev);
1315 }
1316 
1317 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1318 							struct hci_conn *conn)
1319 {
1320 	if (conn->state != BT_CONFIG || !conn->out)
1321 		return 0;
1322 
1323 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1324 		return 0;
1325 
1326 	/* Only request authentication for SSP connections or non-SSP
1327 	 * devices with sec_level HIGH or if MITM protection is requested */
1328 	if (!hci_conn_ssp_enabled(conn) &&
1329 				conn->pending_sec_level != BT_SECURITY_HIGH &&
1330 				!(conn->auth_type & 0x01))
1331 		return 0;
1332 
1333 	return 1;
1334 }
1335 
1336 static inline int hci_resolve_name(struct hci_dev *hdev,
1337 				   struct inquiry_entry *e)
1338 {
1339 	struct hci_cp_remote_name_req cp;
1340 
1341 	memset(&cp, 0, sizeof(cp));
1342 
1343 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1344 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1345 	cp.pscan_mode = e->data.pscan_mode;
1346 	cp.clock_offset = e->data.clock_offset;
1347 
1348 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1349 }
1350 
1351 static bool hci_resolve_next_name(struct hci_dev *hdev)
1352 {
1353 	struct discovery_state *discov = &hdev->discovery;
1354 	struct inquiry_entry *e;
1355 
1356 	if (list_empty(&discov->resolve))
1357 		return false;
1358 
1359 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1360 	if (hci_resolve_name(hdev, e) == 0) {
1361 		e->name_state = NAME_PENDING;
1362 		return true;
1363 	}
1364 
1365 	return false;
1366 }
1367 
1368 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1369 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1370 {
1371 	struct discovery_state *discov = &hdev->discovery;
1372 	struct inquiry_entry *e;
1373 
1374 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1375 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1376 				      name_len, conn->dev_class);
1377 
1378 	if (discov->state == DISCOVERY_STOPPED)
1379 		return;
1380 
1381 	if (discov->state == DISCOVERY_STOPPING)
1382 		goto discov_complete;
1383 
1384 	if (discov->state != DISCOVERY_RESOLVING)
1385 		return;
1386 
1387 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1388 	if (e) {
1389 		e->name_state = NAME_KNOWN;
1390 		list_del(&e->list);
1391 		if (name)
1392 			mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1393 					 e->data.rssi, name, name_len);
1394 	}
1395 
1396 	if (hci_resolve_next_name(hdev))
1397 		return;
1398 
1399 discov_complete:
1400 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1401 }
1402 
1403 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1404 {
1405 	struct hci_cp_remote_name_req *cp;
1406 	struct hci_conn *conn;
1407 
1408 	BT_DBG("%s status 0x%x", hdev->name, status);
1409 
1410 	/* If successful wait for the name req complete event before
1411 	 * checking for the need to do authentication */
1412 	if (!status)
1413 		return;
1414 
1415 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1416 	if (!cp)
1417 		return;
1418 
1419 	hci_dev_lock(hdev);
1420 
1421 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1422 
1423 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1424 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1425 
1426 	if (!conn)
1427 		goto unlock;
1428 
1429 	if (!hci_outgoing_auth_needed(hdev, conn))
1430 		goto unlock;
1431 
1432 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1433 		struct hci_cp_auth_requested cp;
1434 		cp.handle = __cpu_to_le16(conn->handle);
1435 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1436 	}
1437 
1438 unlock:
1439 	hci_dev_unlock(hdev);
1440 }
1441 
1442 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1443 {
1444 	struct hci_cp_read_remote_features *cp;
1445 	struct hci_conn *conn;
1446 
1447 	BT_DBG("%s status 0x%x", hdev->name, status);
1448 
1449 	if (!status)
1450 		return;
1451 
1452 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1453 	if (!cp)
1454 		return;
1455 
1456 	hci_dev_lock(hdev);
1457 
1458 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1459 	if (conn) {
1460 		if (conn->state == BT_CONFIG) {
1461 			hci_proto_connect_cfm(conn, status);
1462 			hci_conn_put(conn);
1463 		}
1464 	}
1465 
1466 	hci_dev_unlock(hdev);
1467 }
1468 
1469 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1470 {
1471 	struct hci_cp_read_remote_ext_features *cp;
1472 	struct hci_conn *conn;
1473 
1474 	BT_DBG("%s status 0x%x", hdev->name, status);
1475 
1476 	if (!status)
1477 		return;
1478 
1479 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1480 	if (!cp)
1481 		return;
1482 
1483 	hci_dev_lock(hdev);
1484 
1485 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1486 	if (conn) {
1487 		if (conn->state == BT_CONFIG) {
1488 			hci_proto_connect_cfm(conn, status);
1489 			hci_conn_put(conn);
1490 		}
1491 	}
1492 
1493 	hci_dev_unlock(hdev);
1494 }
1495 
1496 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1497 {
1498 	struct hci_cp_setup_sync_conn *cp;
1499 	struct hci_conn *acl, *sco;
1500 	__u16 handle;
1501 
1502 	BT_DBG("%s status 0x%x", hdev->name, status);
1503 
1504 	if (!status)
1505 		return;
1506 
1507 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1508 	if (!cp)
1509 		return;
1510 
1511 	handle = __le16_to_cpu(cp->handle);
1512 
1513 	BT_DBG("%s handle %d", hdev->name, handle);
1514 
1515 	hci_dev_lock(hdev);
1516 
1517 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1518 	if (acl) {
1519 		sco = acl->link;
1520 		if (sco) {
1521 			sco->state = BT_CLOSED;
1522 
1523 			hci_proto_connect_cfm(sco, status);
1524 			hci_conn_del(sco);
1525 		}
1526 	}
1527 
1528 	hci_dev_unlock(hdev);
1529 }
1530 
1531 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1532 {
1533 	struct hci_cp_sniff_mode *cp;
1534 	struct hci_conn *conn;
1535 
1536 	BT_DBG("%s status 0x%x", hdev->name, status);
1537 
1538 	if (!status)
1539 		return;
1540 
1541 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1542 	if (!cp)
1543 		return;
1544 
1545 	hci_dev_lock(hdev);
1546 
1547 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1548 	if (conn) {
1549 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1550 
1551 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1552 			hci_sco_setup(conn, status);
1553 	}
1554 
1555 	hci_dev_unlock(hdev);
1556 }
1557 
1558 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1559 {
1560 	struct hci_cp_exit_sniff_mode *cp;
1561 	struct hci_conn *conn;
1562 
1563 	BT_DBG("%s status 0x%x", hdev->name, status);
1564 
1565 	if (!status)
1566 		return;
1567 
1568 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1569 	if (!cp)
1570 		return;
1571 
1572 	hci_dev_lock(hdev);
1573 
1574 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1575 	if (conn) {
1576 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1577 
1578 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1579 			hci_sco_setup(conn, status);
1580 	}
1581 
1582 	hci_dev_unlock(hdev);
1583 }
1584 
1585 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1586 {
1587 	struct hci_cp_disconnect *cp;
1588 	struct hci_conn *conn;
1589 
1590 	if (!status)
1591 		return;
1592 
1593 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1594 	if (!cp)
1595 		return;
1596 
1597 	hci_dev_lock(hdev);
1598 
1599 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1600 	if (conn)
1601 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1602 				       conn->dst_type, status);
1603 
1604 	hci_dev_unlock(hdev);
1605 }
1606 
1607 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1608 {
1609 	struct hci_cp_le_create_conn *cp;
1610 	struct hci_conn *conn;
1611 
1612 	BT_DBG("%s status 0x%x", hdev->name, status);
1613 
1614 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1615 	if (!cp)
1616 		return;
1617 
1618 	hci_dev_lock(hdev);
1619 
1620 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1621 
1622 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1623 		conn);
1624 
1625 	if (status) {
1626 		if (conn && conn->state == BT_CONNECT) {
1627 			conn->state = BT_CLOSED;
1628 			hci_proto_connect_cfm(conn, status);
1629 			hci_conn_del(conn);
1630 		}
1631 	} else {
1632 		if (!conn) {
1633 			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1634 			if (conn) {
1635 				conn->dst_type = cp->peer_addr_type;
1636 				conn->out = true;
1637 			} else {
1638 				BT_ERR("No memory for new connection");
1639 			}
1640 		}
1641 	}
1642 
1643 	hci_dev_unlock(hdev);
1644 }
1645 
1646 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1647 {
1648 	BT_DBG("%s status 0x%x", hdev->name, status);
1649 }
1650 
1651 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1652 {
1653 	__u8 status = *((__u8 *) skb->data);
1654 	struct discovery_state *discov = &hdev->discovery;
1655 	struct inquiry_entry *e;
1656 
1657 	BT_DBG("%s status %d", hdev->name, status);
1658 
1659 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1660 
1661 	hci_conn_check_pending(hdev);
1662 
1663 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1664 		return;
1665 
1666 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1667 		return;
1668 
1669 	hci_dev_lock(hdev);
1670 
1671 	if (discov->state != DISCOVERY_FINDING)
1672 		goto unlock;
1673 
1674 	if (list_empty(&discov->resolve)) {
1675 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1676 		goto unlock;
1677 	}
1678 
1679 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1680 	if (e && hci_resolve_name(hdev, e) == 0) {
1681 		e->name_state = NAME_PENDING;
1682 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1683 	} else {
1684 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1685 	}
1686 
1687 unlock:
1688 	hci_dev_unlock(hdev);
1689 }
1690 
1691 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1692 {
1693 	struct inquiry_data data;
1694 	struct inquiry_info *info = (void *) (skb->data + 1);
1695 	int num_rsp = *((__u8 *) skb->data);
1696 
1697 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1698 
1699 	if (!num_rsp)
1700 		return;
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	for (; num_rsp; num_rsp--, info++) {
1705 		bool name_known, ssp;
1706 
1707 		bacpy(&data.bdaddr, &info->bdaddr);
1708 		data.pscan_rep_mode	= info->pscan_rep_mode;
1709 		data.pscan_period_mode	= info->pscan_period_mode;
1710 		data.pscan_mode		= info->pscan_mode;
1711 		memcpy(data.dev_class, info->dev_class, 3);
1712 		data.clock_offset	= info->clock_offset;
1713 		data.rssi		= 0x00;
1714 		data.ssp_mode		= 0x00;
1715 
1716 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1717 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1718 				  info->dev_class, 0, !name_known, ssp, NULL,
1719 				  0);
1720 	}
1721 
1722 	hci_dev_unlock(hdev);
1723 }
1724 
1725 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1726 {
1727 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1728 	struct hci_conn *conn;
1729 
1730 	BT_DBG("%s", hdev->name);
1731 
1732 	hci_dev_lock(hdev);
1733 
1734 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1735 	if (!conn) {
1736 		if (ev->link_type != SCO_LINK)
1737 			goto unlock;
1738 
1739 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1740 		if (!conn)
1741 			goto unlock;
1742 
1743 		conn->type = SCO_LINK;
1744 	}
1745 
1746 	if (!ev->status) {
1747 		conn->handle = __le16_to_cpu(ev->handle);
1748 
1749 		if (conn->type == ACL_LINK) {
1750 			conn->state = BT_CONFIG;
1751 			hci_conn_hold(conn);
1752 			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1753 		} else
1754 			conn->state = BT_CONNECTED;
1755 
1756 		hci_conn_hold_device(conn);
1757 		hci_conn_add_sysfs(conn);
1758 
1759 		if (test_bit(HCI_AUTH, &hdev->flags))
1760 			conn->link_mode |= HCI_LM_AUTH;
1761 
1762 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1763 			conn->link_mode |= HCI_LM_ENCRYPT;
1764 
1765 		/* Get remote features */
1766 		if (conn->type == ACL_LINK) {
1767 			struct hci_cp_read_remote_features cp;
1768 			cp.handle = ev->handle;
1769 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1770 				     sizeof(cp), &cp);
1771 		}
1772 
1773 		/* Set packet type for incoming connection */
1774 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1775 			struct hci_cp_change_conn_ptype cp;
1776 			cp.handle = ev->handle;
1777 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1778 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1779 				     &cp);
1780 		}
1781 	} else {
1782 		conn->state = BT_CLOSED;
1783 		if (conn->type == ACL_LINK)
1784 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1785 					    conn->dst_type, ev->status);
1786 	}
1787 
1788 	if (conn->type == ACL_LINK)
1789 		hci_sco_setup(conn, ev->status);
1790 
1791 	if (ev->status) {
1792 		hci_proto_connect_cfm(conn, ev->status);
1793 		hci_conn_del(conn);
1794 	} else if (ev->link_type != ACL_LINK)
1795 		hci_proto_connect_cfm(conn, ev->status);
1796 
1797 unlock:
1798 	hci_dev_unlock(hdev);
1799 
1800 	hci_conn_check_pending(hdev);
1801 }
1802 
1803 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1804 {
1805 	struct hci_ev_conn_request *ev = (void *) skb->data;
1806 	int mask = hdev->link_mode;
1807 
1808 	BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1809 					batostr(&ev->bdaddr), ev->link_type);
1810 
1811 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1812 
1813 	if ((mask & HCI_LM_ACCEPT) &&
1814 			!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1815 		/* Connection accepted */
1816 		struct inquiry_entry *ie;
1817 		struct hci_conn *conn;
1818 
1819 		hci_dev_lock(hdev);
1820 
1821 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1822 		if (ie)
1823 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1824 
1825 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1826 		if (!conn) {
1827 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1828 			if (!conn) {
1829 				BT_ERR("No memory for new connection");
1830 				hci_dev_unlock(hdev);
1831 				return;
1832 			}
1833 		}
1834 
1835 		memcpy(conn->dev_class, ev->dev_class, 3);
1836 		conn->state = BT_CONNECT;
1837 
1838 		hci_dev_unlock(hdev);
1839 
1840 		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1841 			struct hci_cp_accept_conn_req cp;
1842 
1843 			bacpy(&cp.bdaddr, &ev->bdaddr);
1844 
1845 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1846 				cp.role = 0x00; /* Become master */
1847 			else
1848 				cp.role = 0x01; /* Remain slave */
1849 
1850 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1851 				     &cp);
1852 		} else {
1853 			struct hci_cp_accept_sync_conn_req cp;
1854 
1855 			bacpy(&cp.bdaddr, &ev->bdaddr);
1856 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1857 
1858 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1859 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1860 			cp.max_latency    = cpu_to_le16(0xffff);
1861 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1862 			cp.retrans_effort = 0xff;
1863 
1864 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1865 				     sizeof(cp), &cp);
1866 		}
1867 	} else {
1868 		/* Connection rejected */
1869 		struct hci_cp_reject_conn_req cp;
1870 
1871 		bacpy(&cp.bdaddr, &ev->bdaddr);
1872 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1873 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1874 	}
1875 }
1876 
1877 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1878 {
1879 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1880 	struct hci_conn *conn;
1881 
1882 	BT_DBG("%s status %d", hdev->name, ev->status);
1883 
1884 	hci_dev_lock(hdev);
1885 
1886 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1887 	if (!conn)
1888 		goto unlock;
1889 
1890 	if (ev->status == 0)
1891 		conn->state = BT_CLOSED;
1892 
1893 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1894 			(conn->type == ACL_LINK || conn->type == LE_LINK)) {
1895 		if (ev->status != 0)
1896 			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1897 						conn->dst_type, ev->status);
1898 		else
1899 			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1900 						 conn->dst_type);
1901 	}
1902 
1903 	if (ev->status == 0) {
1904 		hci_proto_disconn_cfm(conn, ev->reason);
1905 		hci_conn_del(conn);
1906 	}
1907 
1908 unlock:
1909 	hci_dev_unlock(hdev);
1910 }
1911 
1912 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1913 {
1914 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1915 	struct hci_conn *conn;
1916 
1917 	BT_DBG("%s status %d", hdev->name, ev->status);
1918 
1919 	hci_dev_lock(hdev);
1920 
1921 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1922 	if (!conn)
1923 		goto unlock;
1924 
1925 	if (!ev->status) {
1926 		if (!hci_conn_ssp_enabled(conn) &&
1927 				test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1928 			BT_INFO("re-auth of legacy device is not possible.");
1929 		} else {
1930 			conn->link_mode |= HCI_LM_AUTH;
1931 			conn->sec_level = conn->pending_sec_level;
1932 		}
1933 	} else {
1934 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1935 				 ev->status);
1936 	}
1937 
1938 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1939 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1940 
1941 	if (conn->state == BT_CONFIG) {
1942 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1943 			struct hci_cp_set_conn_encrypt cp;
1944 			cp.handle  = ev->handle;
1945 			cp.encrypt = 0x01;
1946 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1947 									&cp);
1948 		} else {
1949 			conn->state = BT_CONNECTED;
1950 			hci_proto_connect_cfm(conn, ev->status);
1951 			hci_conn_put(conn);
1952 		}
1953 	} else {
1954 		hci_auth_cfm(conn, ev->status);
1955 
1956 		hci_conn_hold(conn);
1957 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1958 		hci_conn_put(conn);
1959 	}
1960 
1961 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1962 		if (!ev->status) {
1963 			struct hci_cp_set_conn_encrypt cp;
1964 			cp.handle  = ev->handle;
1965 			cp.encrypt = 0x01;
1966 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1967 									&cp);
1968 		} else {
1969 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1970 			hci_encrypt_cfm(conn, ev->status, 0x00);
1971 		}
1972 	}
1973 
1974 unlock:
1975 	hci_dev_unlock(hdev);
1976 }
1977 
1978 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1979 {
1980 	struct hci_ev_remote_name *ev = (void *) skb->data;
1981 	struct hci_conn *conn;
1982 
1983 	BT_DBG("%s", hdev->name);
1984 
1985 	hci_conn_check_pending(hdev);
1986 
1987 	hci_dev_lock(hdev);
1988 
1989 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1990 
1991 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1992 		goto check_auth;
1993 
1994 	if (ev->status == 0)
1995 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1996 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1997 	else
1998 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1999 
2000 check_auth:
2001 	if (!conn)
2002 		goto unlock;
2003 
2004 	if (!hci_outgoing_auth_needed(hdev, conn))
2005 		goto unlock;
2006 
2007 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2008 		struct hci_cp_auth_requested cp;
2009 		cp.handle = __cpu_to_le16(conn->handle);
2010 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2011 	}
2012 
2013 unlock:
2014 	hci_dev_unlock(hdev);
2015 }
2016 
2017 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2018 {
2019 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2020 	struct hci_conn *conn;
2021 
2022 	BT_DBG("%s status %d", hdev->name, ev->status);
2023 
2024 	hci_dev_lock(hdev);
2025 
2026 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2027 	if (conn) {
2028 		if (!ev->status) {
2029 			if (ev->encrypt) {
2030 				/* Encryption implies authentication */
2031 				conn->link_mode |= HCI_LM_AUTH;
2032 				conn->link_mode |= HCI_LM_ENCRYPT;
2033 				conn->sec_level = conn->pending_sec_level;
2034 			} else
2035 				conn->link_mode &= ~HCI_LM_ENCRYPT;
2036 		}
2037 
2038 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2039 
2040 		if (conn->state == BT_CONFIG) {
2041 			if (!ev->status)
2042 				conn->state = BT_CONNECTED;
2043 
2044 			hci_proto_connect_cfm(conn, ev->status);
2045 			hci_conn_put(conn);
2046 		} else
2047 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2048 	}
2049 
2050 	hci_dev_unlock(hdev);
2051 }
2052 
2053 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2054 {
2055 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2056 	struct hci_conn *conn;
2057 
2058 	BT_DBG("%s status %d", hdev->name, ev->status);
2059 
2060 	hci_dev_lock(hdev);
2061 
2062 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2063 	if (conn) {
2064 		if (!ev->status)
2065 			conn->link_mode |= HCI_LM_SECURE;
2066 
2067 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2068 
2069 		hci_key_change_cfm(conn, ev->status);
2070 	}
2071 
2072 	hci_dev_unlock(hdev);
2073 }
2074 
2075 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2076 {
2077 	struct hci_ev_remote_features *ev = (void *) skb->data;
2078 	struct hci_conn *conn;
2079 
2080 	BT_DBG("%s status %d", hdev->name, ev->status);
2081 
2082 	hci_dev_lock(hdev);
2083 
2084 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2085 	if (!conn)
2086 		goto unlock;
2087 
2088 	if (!ev->status)
2089 		memcpy(conn->features, ev->features, 8);
2090 
2091 	if (conn->state != BT_CONFIG)
2092 		goto unlock;
2093 
2094 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2095 		struct hci_cp_read_remote_ext_features cp;
2096 		cp.handle = ev->handle;
2097 		cp.page = 0x01;
2098 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2099 							sizeof(cp), &cp);
2100 		goto unlock;
2101 	}
2102 
2103 	if (!ev->status) {
2104 		struct hci_cp_remote_name_req cp;
2105 		memset(&cp, 0, sizeof(cp));
2106 		bacpy(&cp.bdaddr, &conn->dst);
2107 		cp.pscan_rep_mode = 0x02;
2108 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2109 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2110 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2111 				      conn->dst_type, 0, NULL, 0,
2112 				      conn->dev_class);
2113 
2114 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2115 		conn->state = BT_CONNECTED;
2116 		hci_proto_connect_cfm(conn, ev->status);
2117 		hci_conn_put(conn);
2118 	}
2119 
2120 unlock:
2121 	hci_dev_unlock(hdev);
2122 }
2123 
2124 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2125 {
2126 	BT_DBG("%s", hdev->name);
2127 }
2128 
2129 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2130 {
2131 	BT_DBG("%s", hdev->name);
2132 }
2133 
2134 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2135 {
2136 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2137 	__u16 opcode;
2138 
2139 	skb_pull(skb, sizeof(*ev));
2140 
2141 	opcode = __le16_to_cpu(ev->opcode);
2142 
2143 	switch (opcode) {
2144 	case HCI_OP_INQUIRY_CANCEL:
2145 		hci_cc_inquiry_cancel(hdev, skb);
2146 		break;
2147 
2148 	case HCI_OP_EXIT_PERIODIC_INQ:
2149 		hci_cc_exit_periodic_inq(hdev, skb);
2150 		break;
2151 
2152 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2153 		hci_cc_remote_name_req_cancel(hdev, skb);
2154 		break;
2155 
2156 	case HCI_OP_ROLE_DISCOVERY:
2157 		hci_cc_role_discovery(hdev, skb);
2158 		break;
2159 
2160 	case HCI_OP_READ_LINK_POLICY:
2161 		hci_cc_read_link_policy(hdev, skb);
2162 		break;
2163 
2164 	case HCI_OP_WRITE_LINK_POLICY:
2165 		hci_cc_write_link_policy(hdev, skb);
2166 		break;
2167 
2168 	case HCI_OP_READ_DEF_LINK_POLICY:
2169 		hci_cc_read_def_link_policy(hdev, skb);
2170 		break;
2171 
2172 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2173 		hci_cc_write_def_link_policy(hdev, skb);
2174 		break;
2175 
2176 	case HCI_OP_RESET:
2177 		hci_cc_reset(hdev, skb);
2178 		break;
2179 
2180 	case HCI_OP_WRITE_LOCAL_NAME:
2181 		hci_cc_write_local_name(hdev, skb);
2182 		break;
2183 
2184 	case HCI_OP_READ_LOCAL_NAME:
2185 		hci_cc_read_local_name(hdev, skb);
2186 		break;
2187 
2188 	case HCI_OP_WRITE_AUTH_ENABLE:
2189 		hci_cc_write_auth_enable(hdev, skb);
2190 		break;
2191 
2192 	case HCI_OP_WRITE_ENCRYPT_MODE:
2193 		hci_cc_write_encrypt_mode(hdev, skb);
2194 		break;
2195 
2196 	case HCI_OP_WRITE_SCAN_ENABLE:
2197 		hci_cc_write_scan_enable(hdev, skb);
2198 		break;
2199 
2200 	case HCI_OP_READ_CLASS_OF_DEV:
2201 		hci_cc_read_class_of_dev(hdev, skb);
2202 		break;
2203 
2204 	case HCI_OP_WRITE_CLASS_OF_DEV:
2205 		hci_cc_write_class_of_dev(hdev, skb);
2206 		break;
2207 
2208 	case HCI_OP_READ_VOICE_SETTING:
2209 		hci_cc_read_voice_setting(hdev, skb);
2210 		break;
2211 
2212 	case HCI_OP_WRITE_VOICE_SETTING:
2213 		hci_cc_write_voice_setting(hdev, skb);
2214 		break;
2215 
2216 	case HCI_OP_HOST_BUFFER_SIZE:
2217 		hci_cc_host_buffer_size(hdev, skb);
2218 		break;
2219 
2220 	case HCI_OP_WRITE_SSP_MODE:
2221 		hci_cc_write_ssp_mode(hdev, skb);
2222 		break;
2223 
2224 	case HCI_OP_READ_LOCAL_VERSION:
2225 		hci_cc_read_local_version(hdev, skb);
2226 		break;
2227 
2228 	case HCI_OP_READ_LOCAL_COMMANDS:
2229 		hci_cc_read_local_commands(hdev, skb);
2230 		break;
2231 
2232 	case HCI_OP_READ_LOCAL_FEATURES:
2233 		hci_cc_read_local_features(hdev, skb);
2234 		break;
2235 
2236 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2237 		hci_cc_read_local_ext_features(hdev, skb);
2238 		break;
2239 
2240 	case HCI_OP_READ_BUFFER_SIZE:
2241 		hci_cc_read_buffer_size(hdev, skb);
2242 		break;
2243 
2244 	case HCI_OP_READ_BD_ADDR:
2245 		hci_cc_read_bd_addr(hdev, skb);
2246 		break;
2247 
2248 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2249 		hci_cc_read_data_block_size(hdev, skb);
2250 		break;
2251 
2252 	case HCI_OP_WRITE_CA_TIMEOUT:
2253 		hci_cc_write_ca_timeout(hdev, skb);
2254 		break;
2255 
2256 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2257 		hci_cc_read_flow_control_mode(hdev, skb);
2258 		break;
2259 
2260 	case HCI_OP_READ_LOCAL_AMP_INFO:
2261 		hci_cc_read_local_amp_info(hdev, skb);
2262 		break;
2263 
2264 	case HCI_OP_DELETE_STORED_LINK_KEY:
2265 		hci_cc_delete_stored_link_key(hdev, skb);
2266 		break;
2267 
2268 	case HCI_OP_SET_EVENT_MASK:
2269 		hci_cc_set_event_mask(hdev, skb);
2270 		break;
2271 
2272 	case HCI_OP_WRITE_INQUIRY_MODE:
2273 		hci_cc_write_inquiry_mode(hdev, skb);
2274 		break;
2275 
2276 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2277 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2278 		break;
2279 
2280 	case HCI_OP_SET_EVENT_FLT:
2281 		hci_cc_set_event_flt(hdev, skb);
2282 		break;
2283 
2284 	case HCI_OP_PIN_CODE_REPLY:
2285 		hci_cc_pin_code_reply(hdev, skb);
2286 		break;
2287 
2288 	case HCI_OP_PIN_CODE_NEG_REPLY:
2289 		hci_cc_pin_code_neg_reply(hdev, skb);
2290 		break;
2291 
2292 	case HCI_OP_READ_LOCAL_OOB_DATA:
2293 		hci_cc_read_local_oob_data_reply(hdev, skb);
2294 		break;
2295 
2296 	case HCI_OP_LE_READ_BUFFER_SIZE:
2297 		hci_cc_le_read_buffer_size(hdev, skb);
2298 		break;
2299 
2300 	case HCI_OP_USER_CONFIRM_REPLY:
2301 		hci_cc_user_confirm_reply(hdev, skb);
2302 		break;
2303 
2304 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2305 		hci_cc_user_confirm_neg_reply(hdev, skb);
2306 		break;
2307 
2308 	case HCI_OP_USER_PASSKEY_REPLY:
2309 		hci_cc_user_passkey_reply(hdev, skb);
2310 		break;
2311 
2312 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2313 		hci_cc_user_passkey_neg_reply(hdev, skb);
2314 
2315 	case HCI_OP_LE_SET_SCAN_PARAM:
2316 		hci_cc_le_set_scan_param(hdev, skb);
2317 		break;
2318 
2319 	case HCI_OP_LE_SET_SCAN_ENABLE:
2320 		hci_cc_le_set_scan_enable(hdev, skb);
2321 		break;
2322 
2323 	case HCI_OP_LE_LTK_REPLY:
2324 		hci_cc_le_ltk_reply(hdev, skb);
2325 		break;
2326 
2327 	case HCI_OP_LE_LTK_NEG_REPLY:
2328 		hci_cc_le_ltk_neg_reply(hdev, skb);
2329 		break;
2330 
2331 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2332 		hci_cc_write_le_host_supported(hdev, skb);
2333 		break;
2334 
2335 	default:
2336 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2337 		break;
2338 	}
2339 
2340 	if (ev->opcode != HCI_OP_NOP)
2341 		del_timer(&hdev->cmd_timer);
2342 
2343 	if (ev->ncmd) {
2344 		atomic_set(&hdev->cmd_cnt, 1);
2345 		if (!skb_queue_empty(&hdev->cmd_q))
2346 			queue_work(hdev->workqueue, &hdev->cmd_work);
2347 	}
2348 }
2349 
2350 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2351 {
2352 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2353 	__u16 opcode;
2354 
2355 	skb_pull(skb, sizeof(*ev));
2356 
2357 	opcode = __le16_to_cpu(ev->opcode);
2358 
2359 	switch (opcode) {
2360 	case HCI_OP_INQUIRY:
2361 		hci_cs_inquiry(hdev, ev->status);
2362 		break;
2363 
2364 	case HCI_OP_CREATE_CONN:
2365 		hci_cs_create_conn(hdev, ev->status);
2366 		break;
2367 
2368 	case HCI_OP_ADD_SCO:
2369 		hci_cs_add_sco(hdev, ev->status);
2370 		break;
2371 
2372 	case HCI_OP_AUTH_REQUESTED:
2373 		hci_cs_auth_requested(hdev, ev->status);
2374 		break;
2375 
2376 	case HCI_OP_SET_CONN_ENCRYPT:
2377 		hci_cs_set_conn_encrypt(hdev, ev->status);
2378 		break;
2379 
2380 	case HCI_OP_REMOTE_NAME_REQ:
2381 		hci_cs_remote_name_req(hdev, ev->status);
2382 		break;
2383 
2384 	case HCI_OP_READ_REMOTE_FEATURES:
2385 		hci_cs_read_remote_features(hdev, ev->status);
2386 		break;
2387 
2388 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2389 		hci_cs_read_remote_ext_features(hdev, ev->status);
2390 		break;
2391 
2392 	case HCI_OP_SETUP_SYNC_CONN:
2393 		hci_cs_setup_sync_conn(hdev, ev->status);
2394 		break;
2395 
2396 	case HCI_OP_SNIFF_MODE:
2397 		hci_cs_sniff_mode(hdev, ev->status);
2398 		break;
2399 
2400 	case HCI_OP_EXIT_SNIFF_MODE:
2401 		hci_cs_exit_sniff_mode(hdev, ev->status);
2402 		break;
2403 
2404 	case HCI_OP_DISCONNECT:
2405 		hci_cs_disconnect(hdev, ev->status);
2406 		break;
2407 
2408 	case HCI_OP_LE_CREATE_CONN:
2409 		hci_cs_le_create_conn(hdev, ev->status);
2410 		break;
2411 
2412 	case HCI_OP_LE_START_ENC:
2413 		hci_cs_le_start_enc(hdev, ev->status);
2414 		break;
2415 
2416 	default:
2417 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2418 		break;
2419 	}
2420 
2421 	if (ev->opcode != HCI_OP_NOP)
2422 		del_timer(&hdev->cmd_timer);
2423 
2424 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2425 		atomic_set(&hdev->cmd_cnt, 1);
2426 		if (!skb_queue_empty(&hdev->cmd_q))
2427 			queue_work(hdev->workqueue, &hdev->cmd_work);
2428 	}
2429 }
2430 
2431 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2432 {
2433 	struct hci_ev_role_change *ev = (void *) skb->data;
2434 	struct hci_conn *conn;
2435 
2436 	BT_DBG("%s status %d", hdev->name, ev->status);
2437 
2438 	hci_dev_lock(hdev);
2439 
2440 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2441 	if (conn) {
2442 		if (!ev->status) {
2443 			if (ev->role)
2444 				conn->link_mode &= ~HCI_LM_MASTER;
2445 			else
2446 				conn->link_mode |= HCI_LM_MASTER;
2447 		}
2448 
2449 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2450 
2451 		hci_role_switch_cfm(conn, ev->status, ev->role);
2452 	}
2453 
2454 	hci_dev_unlock(hdev);
2455 }
2456 
2457 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2458 {
2459 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2460 	int i;
2461 
2462 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2463 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2464 		return;
2465 	}
2466 
2467 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2468 			ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2469 		BT_DBG("%s bad parameters", hdev->name);
2470 		return;
2471 	}
2472 
2473 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2474 
2475 	for (i = 0; i < ev->num_hndl; i++) {
2476 		struct hci_comp_pkts_info *info = &ev->handles[i];
2477 		struct hci_conn *conn;
2478 		__u16  handle, count;
2479 
2480 		handle = __le16_to_cpu(info->handle);
2481 		count  = __le16_to_cpu(info->count);
2482 
2483 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2484 		if (!conn)
2485 			continue;
2486 
2487 		conn->sent -= count;
2488 
2489 		switch (conn->type) {
2490 		case ACL_LINK:
2491 			hdev->acl_cnt += count;
2492 			if (hdev->acl_cnt > hdev->acl_pkts)
2493 				hdev->acl_cnt = hdev->acl_pkts;
2494 			break;
2495 
2496 		case LE_LINK:
2497 			if (hdev->le_pkts) {
2498 				hdev->le_cnt += count;
2499 				if (hdev->le_cnt > hdev->le_pkts)
2500 					hdev->le_cnt = hdev->le_pkts;
2501 			} else {
2502 				hdev->acl_cnt += count;
2503 				if (hdev->acl_cnt > hdev->acl_pkts)
2504 					hdev->acl_cnt = hdev->acl_pkts;
2505 			}
2506 			break;
2507 
2508 		case SCO_LINK:
2509 			hdev->sco_cnt += count;
2510 			if (hdev->sco_cnt > hdev->sco_pkts)
2511 				hdev->sco_cnt = hdev->sco_pkts;
2512 			break;
2513 
2514 		default:
2515 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2516 			break;
2517 		}
2518 	}
2519 
2520 	queue_work(hdev->workqueue, &hdev->tx_work);
2521 }
2522 
2523 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2524 					   struct sk_buff *skb)
2525 {
2526 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2527 	int i;
2528 
2529 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2530 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2531 		return;
2532 	}
2533 
2534 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2535 			ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2536 		BT_DBG("%s bad parameters", hdev->name);
2537 		return;
2538 	}
2539 
2540 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2541 								ev->num_hndl);
2542 
2543 	for (i = 0; i < ev->num_hndl; i++) {
2544 		struct hci_comp_blocks_info *info = &ev->handles[i];
2545 		struct hci_conn *conn;
2546 		__u16  handle, block_count;
2547 
2548 		handle = __le16_to_cpu(info->handle);
2549 		block_count = __le16_to_cpu(info->blocks);
2550 
2551 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2552 		if (!conn)
2553 			continue;
2554 
2555 		conn->sent -= block_count;
2556 
2557 		switch (conn->type) {
2558 		case ACL_LINK:
2559 			hdev->block_cnt += block_count;
2560 			if (hdev->block_cnt > hdev->num_blocks)
2561 				hdev->block_cnt = hdev->num_blocks;
2562 			break;
2563 
2564 		default:
2565 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2566 			break;
2567 		}
2568 	}
2569 
2570 	queue_work(hdev->workqueue, &hdev->tx_work);
2571 }
2572 
2573 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2574 {
2575 	struct hci_ev_mode_change *ev = (void *) skb->data;
2576 	struct hci_conn *conn;
2577 
2578 	BT_DBG("%s status %d", hdev->name, ev->status);
2579 
2580 	hci_dev_lock(hdev);
2581 
2582 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2583 	if (conn) {
2584 		conn->mode = ev->mode;
2585 		conn->interval = __le16_to_cpu(ev->interval);
2586 
2587 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2588 			if (conn->mode == HCI_CM_ACTIVE)
2589 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2590 			else
2591 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2592 		}
2593 
2594 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2595 			hci_sco_setup(conn, ev->status);
2596 	}
2597 
2598 	hci_dev_unlock(hdev);
2599 }
2600 
2601 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2602 {
2603 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2604 	struct hci_conn *conn;
2605 
2606 	BT_DBG("%s", hdev->name);
2607 
2608 	hci_dev_lock(hdev);
2609 
2610 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2611 	if (!conn)
2612 		goto unlock;
2613 
2614 	if (conn->state == BT_CONNECTED) {
2615 		hci_conn_hold(conn);
2616 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2617 		hci_conn_put(conn);
2618 	}
2619 
2620 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2621 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2622 					sizeof(ev->bdaddr), &ev->bdaddr);
2623 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2624 		u8 secure;
2625 
2626 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2627 			secure = 1;
2628 		else
2629 			secure = 0;
2630 
2631 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2632 	}
2633 
2634 unlock:
2635 	hci_dev_unlock(hdev);
2636 }
2637 
2638 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639 {
2640 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2641 	struct hci_cp_link_key_reply cp;
2642 	struct hci_conn *conn;
2643 	struct link_key *key;
2644 
2645 	BT_DBG("%s", hdev->name);
2646 
2647 	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2648 		return;
2649 
2650 	hci_dev_lock(hdev);
2651 
2652 	key = hci_find_link_key(hdev, &ev->bdaddr);
2653 	if (!key) {
2654 		BT_DBG("%s link key not found for %s", hdev->name,
2655 							batostr(&ev->bdaddr));
2656 		goto not_found;
2657 	}
2658 
2659 	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2660 							batostr(&ev->bdaddr));
2661 
2662 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2663 				key->type == HCI_LK_DEBUG_COMBINATION) {
2664 		BT_DBG("%s ignoring debug key", hdev->name);
2665 		goto not_found;
2666 	}
2667 
2668 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2669 	if (conn) {
2670 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2671 				conn->auth_type != 0xff &&
2672 				(conn->auth_type & 0x01)) {
2673 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2674 			goto not_found;
2675 		}
2676 
2677 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2678 				conn->pending_sec_level == BT_SECURITY_HIGH) {
2679 			BT_DBG("%s ignoring key unauthenticated for high \
2680 							security", hdev->name);
2681 			goto not_found;
2682 		}
2683 
2684 		conn->key_type = key->type;
2685 		conn->pin_length = key->pin_len;
2686 	}
2687 
2688 	bacpy(&cp.bdaddr, &ev->bdaddr);
2689 	memcpy(cp.link_key, key->val, 16);
2690 
2691 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2692 
2693 	hci_dev_unlock(hdev);
2694 
2695 	return;
2696 
2697 not_found:
2698 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2699 	hci_dev_unlock(hdev);
2700 }
2701 
2702 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2703 {
2704 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2705 	struct hci_conn *conn;
2706 	u8 pin_len = 0;
2707 
2708 	BT_DBG("%s", hdev->name);
2709 
2710 	hci_dev_lock(hdev);
2711 
2712 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2713 	if (conn) {
2714 		hci_conn_hold(conn);
2715 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2716 		pin_len = conn->pin_length;
2717 
2718 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2719 			conn->key_type = ev->key_type;
2720 
2721 		hci_conn_put(conn);
2722 	}
2723 
2724 	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2725 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2726 							ev->key_type, pin_len);
2727 
2728 	hci_dev_unlock(hdev);
2729 }
2730 
2731 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2732 {
2733 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2734 	struct hci_conn *conn;
2735 
2736 	BT_DBG("%s status %d", hdev->name, ev->status);
2737 
2738 	hci_dev_lock(hdev);
2739 
2740 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2741 	if (conn && !ev->status) {
2742 		struct inquiry_entry *ie;
2743 
2744 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2745 		if (ie) {
2746 			ie->data.clock_offset = ev->clock_offset;
2747 			ie->timestamp = jiffies;
2748 		}
2749 	}
2750 
2751 	hci_dev_unlock(hdev);
2752 }
2753 
2754 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2755 {
2756 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2757 	struct hci_conn *conn;
2758 
2759 	BT_DBG("%s status %d", hdev->name, ev->status);
2760 
2761 	hci_dev_lock(hdev);
2762 
2763 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2764 	if (conn && !ev->status)
2765 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2766 
2767 	hci_dev_unlock(hdev);
2768 }
2769 
2770 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2771 {
2772 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2773 	struct inquiry_entry *ie;
2774 
2775 	BT_DBG("%s", hdev->name);
2776 
2777 	hci_dev_lock(hdev);
2778 
2779 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2780 	if (ie) {
2781 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2782 		ie->timestamp = jiffies;
2783 	}
2784 
2785 	hci_dev_unlock(hdev);
2786 }
2787 
2788 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2789 {
2790 	struct inquiry_data data;
2791 	int num_rsp = *((__u8 *) skb->data);
2792 	bool name_known, ssp;
2793 
2794 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2795 
2796 	if (!num_rsp)
2797 		return;
2798 
2799 	hci_dev_lock(hdev);
2800 
2801 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2802 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2803 		info = (void *) (skb->data + 1);
2804 
2805 		for (; num_rsp; num_rsp--, info++) {
2806 			bacpy(&data.bdaddr, &info->bdaddr);
2807 			data.pscan_rep_mode	= info->pscan_rep_mode;
2808 			data.pscan_period_mode	= info->pscan_period_mode;
2809 			data.pscan_mode		= info->pscan_mode;
2810 			memcpy(data.dev_class, info->dev_class, 3);
2811 			data.clock_offset	= info->clock_offset;
2812 			data.rssi		= info->rssi;
2813 			data.ssp_mode		= 0x00;
2814 
2815 			name_known = hci_inquiry_cache_update(hdev, &data,
2816 							      false, &ssp);
2817 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2818 					  info->dev_class, info->rssi,
2819 					  !name_known, ssp, NULL, 0);
2820 		}
2821 	} else {
2822 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2823 
2824 		for (; num_rsp; num_rsp--, info++) {
2825 			bacpy(&data.bdaddr, &info->bdaddr);
2826 			data.pscan_rep_mode	= info->pscan_rep_mode;
2827 			data.pscan_period_mode	= info->pscan_period_mode;
2828 			data.pscan_mode		= 0x00;
2829 			memcpy(data.dev_class, info->dev_class, 3);
2830 			data.clock_offset	= info->clock_offset;
2831 			data.rssi		= info->rssi;
2832 			data.ssp_mode		= 0x00;
2833 			name_known = hci_inquiry_cache_update(hdev, &data,
2834 							      false, &ssp);
2835 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2836 					  info->dev_class, info->rssi,
2837 					  !name_known, ssp, NULL, 0);
2838 		}
2839 	}
2840 
2841 	hci_dev_unlock(hdev);
2842 }
2843 
2844 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2845 {
2846 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2847 	struct hci_conn *conn;
2848 
2849 	BT_DBG("%s", hdev->name);
2850 
2851 	hci_dev_lock(hdev);
2852 
2853 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2854 	if (!conn)
2855 		goto unlock;
2856 
2857 	if (!ev->status && ev->page == 0x01) {
2858 		struct inquiry_entry *ie;
2859 
2860 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2861 		if (ie)
2862 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2863 
2864 		if (ev->features[0] & LMP_HOST_SSP)
2865 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2866 	}
2867 
2868 	if (conn->state != BT_CONFIG)
2869 		goto unlock;
2870 
2871 	if (!ev->status) {
2872 		struct hci_cp_remote_name_req cp;
2873 		memset(&cp, 0, sizeof(cp));
2874 		bacpy(&cp.bdaddr, &conn->dst);
2875 		cp.pscan_rep_mode = 0x02;
2876 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2877 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2878 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2879 				      conn->dst_type, 0, NULL, 0,
2880 				      conn->dev_class);
2881 
2882 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2883 		conn->state = BT_CONNECTED;
2884 		hci_proto_connect_cfm(conn, ev->status);
2885 		hci_conn_put(conn);
2886 	}
2887 
2888 unlock:
2889 	hci_dev_unlock(hdev);
2890 }
2891 
2892 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2893 {
2894 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2895 	struct hci_conn *conn;
2896 
2897 	BT_DBG("%s status %d", hdev->name, ev->status);
2898 
2899 	hci_dev_lock(hdev);
2900 
2901 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2902 	if (!conn) {
2903 		if (ev->link_type == ESCO_LINK)
2904 			goto unlock;
2905 
2906 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2907 		if (!conn)
2908 			goto unlock;
2909 
2910 		conn->type = SCO_LINK;
2911 	}
2912 
2913 	switch (ev->status) {
2914 	case 0x00:
2915 		conn->handle = __le16_to_cpu(ev->handle);
2916 		conn->state  = BT_CONNECTED;
2917 
2918 		hci_conn_hold_device(conn);
2919 		hci_conn_add_sysfs(conn);
2920 		break;
2921 
2922 	case 0x11:	/* Unsupported Feature or Parameter Value */
2923 	case 0x1c:	/* SCO interval rejected */
2924 	case 0x1a:	/* Unsupported Remote Feature */
2925 	case 0x1f:	/* Unspecified error */
2926 		if (conn->out && conn->attempt < 2) {
2927 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2928 					(hdev->esco_type & EDR_ESCO_MASK);
2929 			hci_setup_sync(conn, conn->link->handle);
2930 			goto unlock;
2931 		}
2932 		/* fall through */
2933 
2934 	default:
2935 		conn->state = BT_CLOSED;
2936 		break;
2937 	}
2938 
2939 	hci_proto_connect_cfm(conn, ev->status);
2940 	if (ev->status)
2941 		hci_conn_del(conn);
2942 
2943 unlock:
2944 	hci_dev_unlock(hdev);
2945 }
2946 
2947 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2948 {
2949 	BT_DBG("%s", hdev->name);
2950 }
2951 
2952 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2953 {
2954 	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2955 
2956 	BT_DBG("%s status %d", hdev->name, ev->status);
2957 }
2958 
2959 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2960 {
2961 	struct inquiry_data data;
2962 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2963 	int num_rsp = *((__u8 *) skb->data);
2964 
2965 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2966 
2967 	if (!num_rsp)
2968 		return;
2969 
2970 	hci_dev_lock(hdev);
2971 
2972 	for (; num_rsp; num_rsp--, info++) {
2973 		bool name_known, ssp;
2974 
2975 		bacpy(&data.bdaddr, &info->bdaddr);
2976 		data.pscan_rep_mode	= info->pscan_rep_mode;
2977 		data.pscan_period_mode	= info->pscan_period_mode;
2978 		data.pscan_mode		= 0x00;
2979 		memcpy(data.dev_class, info->dev_class, 3);
2980 		data.clock_offset	= info->clock_offset;
2981 		data.rssi		= info->rssi;
2982 		data.ssp_mode		= 0x01;
2983 
2984 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
2985 			name_known = eir_has_data_type(info->data,
2986 						       sizeof(info->data),
2987 						       EIR_NAME_COMPLETE);
2988 		else
2989 			name_known = true;
2990 
2991 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2992 						      &ssp);
2993 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2994 				  info->dev_class, info->rssi, !name_known,
2995 				  ssp, info->data, sizeof(info->data));
2996 	}
2997 
2998 	hci_dev_unlock(hdev);
2999 }
3000 
3001 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3002 {
3003 	/* If remote requests dedicated bonding follow that lead */
3004 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3005 		/* If both remote and local IO capabilities allow MITM
3006 		 * protection then require it, otherwise don't */
3007 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3008 			return 0x02;
3009 		else
3010 			return 0x03;
3011 	}
3012 
3013 	/* If remote requests no-bonding follow that lead */
3014 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3015 		return conn->remote_auth | (conn->auth_type & 0x01);
3016 
3017 	return conn->auth_type;
3018 }
3019 
3020 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3021 {
3022 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3023 	struct hci_conn *conn;
3024 
3025 	BT_DBG("%s", hdev->name);
3026 
3027 	hci_dev_lock(hdev);
3028 
3029 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3030 	if (!conn)
3031 		goto unlock;
3032 
3033 	hci_conn_hold(conn);
3034 
3035 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3036 		goto unlock;
3037 
3038 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3039 			(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3040 		struct hci_cp_io_capability_reply cp;
3041 
3042 		bacpy(&cp.bdaddr, &ev->bdaddr);
3043 		/* Change the IO capability from KeyboardDisplay
3044 		 * to DisplayYesNo as it is not supported by BT spec. */
3045 		cp.capability = (conn->io_capability == 0x04) ?
3046 						0x01 : conn->io_capability;
3047 		conn->auth_type = hci_get_auth_req(conn);
3048 		cp.authentication = conn->auth_type;
3049 
3050 		if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3051 				hci_find_remote_oob_data(hdev, &conn->dst))
3052 			cp.oob_data = 0x01;
3053 		else
3054 			cp.oob_data = 0x00;
3055 
3056 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3057 							sizeof(cp), &cp);
3058 	} else {
3059 		struct hci_cp_io_capability_neg_reply cp;
3060 
3061 		bacpy(&cp.bdaddr, &ev->bdaddr);
3062 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3063 
3064 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3065 							sizeof(cp), &cp);
3066 	}
3067 
3068 unlock:
3069 	hci_dev_unlock(hdev);
3070 }
3071 
3072 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3073 {
3074 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3075 	struct hci_conn *conn;
3076 
3077 	BT_DBG("%s", hdev->name);
3078 
3079 	hci_dev_lock(hdev);
3080 
3081 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3082 	if (!conn)
3083 		goto unlock;
3084 
3085 	conn->remote_cap = ev->capability;
3086 	conn->remote_auth = ev->authentication;
3087 	if (ev->oob_data)
3088 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3089 
3090 unlock:
3091 	hci_dev_unlock(hdev);
3092 }
3093 
3094 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3095 							struct sk_buff *skb)
3096 {
3097 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3098 	int loc_mitm, rem_mitm, confirm_hint = 0;
3099 	struct hci_conn *conn;
3100 
3101 	BT_DBG("%s", hdev->name);
3102 
3103 	hci_dev_lock(hdev);
3104 
3105 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3106 		goto unlock;
3107 
3108 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3109 	if (!conn)
3110 		goto unlock;
3111 
3112 	loc_mitm = (conn->auth_type & 0x01);
3113 	rem_mitm = (conn->remote_auth & 0x01);
3114 
3115 	/* If we require MITM but the remote device can't provide that
3116 	 * (it has NoInputNoOutput) then reject the confirmation
3117 	 * request. The only exception is when we're dedicated bonding
3118 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3119 	 * bit set. */
3120 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3121 		BT_DBG("Rejecting request: remote device can't provide MITM");
3122 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3123 					sizeof(ev->bdaddr), &ev->bdaddr);
3124 		goto unlock;
3125 	}
3126 
3127 	/* If no side requires MITM protection; auto-accept */
3128 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3129 				(!rem_mitm || conn->io_capability == 0x03)) {
3130 
3131 		/* If we're not the initiators request authorization to
3132 		 * proceed from user space (mgmt_user_confirm with
3133 		 * confirm_hint set to 1). */
3134 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3135 			BT_DBG("Confirming auto-accept as acceptor");
3136 			confirm_hint = 1;
3137 			goto confirm;
3138 		}
3139 
3140 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3141 						hdev->auto_accept_delay);
3142 
3143 		if (hdev->auto_accept_delay > 0) {
3144 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3145 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3146 			goto unlock;
3147 		}
3148 
3149 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3150 						sizeof(ev->bdaddr), &ev->bdaddr);
3151 		goto unlock;
3152 	}
3153 
3154 confirm:
3155 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3156 				  confirm_hint);
3157 
3158 unlock:
3159 	hci_dev_unlock(hdev);
3160 }
3161 
3162 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3163 							struct sk_buff *skb)
3164 {
3165 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3166 
3167 	BT_DBG("%s", hdev->name);
3168 
3169 	hci_dev_lock(hdev);
3170 
3171 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3172 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3173 
3174 	hci_dev_unlock(hdev);
3175 }
3176 
3177 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3178 {
3179 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3180 	struct hci_conn *conn;
3181 
3182 	BT_DBG("%s", hdev->name);
3183 
3184 	hci_dev_lock(hdev);
3185 
3186 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3187 	if (!conn)
3188 		goto unlock;
3189 
3190 	/* To avoid duplicate auth_failed events to user space we check
3191 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3192 	 * initiated the authentication. A traditional auth_complete
3193 	 * event gets always produced as initiator and is also mapped to
3194 	 * the mgmt_auth_failed event */
3195 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3196 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3197 				 ev->status);
3198 
3199 	hci_conn_put(conn);
3200 
3201 unlock:
3202 	hci_dev_unlock(hdev);
3203 }
3204 
3205 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3206 {
3207 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3208 	struct inquiry_entry *ie;
3209 
3210 	BT_DBG("%s", hdev->name);
3211 
3212 	hci_dev_lock(hdev);
3213 
3214 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3215 	if (ie)
3216 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3217 
3218 	hci_dev_unlock(hdev);
3219 }
3220 
3221 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3222 						   struct sk_buff *skb)
3223 {
3224 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3225 	struct oob_data *data;
3226 
3227 	BT_DBG("%s", hdev->name);
3228 
3229 	hci_dev_lock(hdev);
3230 
3231 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3232 		goto unlock;
3233 
3234 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3235 	if (data) {
3236 		struct hci_cp_remote_oob_data_reply cp;
3237 
3238 		bacpy(&cp.bdaddr, &ev->bdaddr);
3239 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3240 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3241 
3242 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3243 									&cp);
3244 	} else {
3245 		struct hci_cp_remote_oob_data_neg_reply cp;
3246 
3247 		bacpy(&cp.bdaddr, &ev->bdaddr);
3248 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3249 									&cp);
3250 	}
3251 
3252 unlock:
3253 	hci_dev_unlock(hdev);
3254 }
3255 
3256 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3257 {
3258 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3259 	struct hci_conn *conn;
3260 
3261 	BT_DBG("%s status %d", hdev->name, ev->status);
3262 
3263 	hci_dev_lock(hdev);
3264 
3265 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3266 	if (!conn) {
3267 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3268 		if (!conn) {
3269 			BT_ERR("No memory for new connection");
3270 			hci_dev_unlock(hdev);
3271 			return;
3272 		}
3273 
3274 		conn->dst_type = ev->bdaddr_type;
3275 	}
3276 
3277 	if (ev->status) {
3278 		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3279 						conn->dst_type, ev->status);
3280 		hci_proto_connect_cfm(conn, ev->status);
3281 		conn->state = BT_CLOSED;
3282 		hci_conn_del(conn);
3283 		goto unlock;
3284 	}
3285 
3286 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3287 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3288 				      conn->dst_type, 0, NULL, 0, NULL);
3289 
3290 	conn->sec_level = BT_SECURITY_LOW;
3291 	conn->handle = __le16_to_cpu(ev->handle);
3292 	conn->state = BT_CONNECTED;
3293 
3294 	hci_conn_hold_device(conn);
3295 	hci_conn_add_sysfs(conn);
3296 
3297 	hci_proto_connect_cfm(conn, ev->status);
3298 
3299 unlock:
3300 	hci_dev_unlock(hdev);
3301 }
3302 
3303 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3304 						struct sk_buff *skb)
3305 {
3306 	u8 num_reports = skb->data[0];
3307 	void *ptr = &skb->data[1];
3308 	s8 rssi;
3309 
3310 	hci_dev_lock(hdev);
3311 
3312 	while (num_reports--) {
3313 		struct hci_ev_le_advertising_info *ev = ptr;
3314 
3315 		hci_add_adv_entry(hdev, ev);
3316 
3317 		rssi = ev->data[ev->length];
3318 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3319 				  NULL, rssi, 0, 1, ev->data, ev->length);
3320 
3321 		ptr += sizeof(*ev) + ev->length + 1;
3322 	}
3323 
3324 	hci_dev_unlock(hdev);
3325 }
3326 
3327 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3328 						struct sk_buff *skb)
3329 {
3330 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3331 	struct hci_cp_le_ltk_reply cp;
3332 	struct hci_cp_le_ltk_neg_reply neg;
3333 	struct hci_conn *conn;
3334 	struct smp_ltk *ltk;
3335 
3336 	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3337 
3338 	hci_dev_lock(hdev);
3339 
3340 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3341 	if (conn == NULL)
3342 		goto not_found;
3343 
3344 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3345 	if (ltk == NULL)
3346 		goto not_found;
3347 
3348 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3349 	cp.handle = cpu_to_le16(conn->handle);
3350 
3351 	if (ltk->authenticated)
3352 		conn->sec_level = BT_SECURITY_HIGH;
3353 
3354 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3355 
3356 	if (ltk->type & HCI_SMP_STK) {
3357 		list_del(&ltk->list);
3358 		kfree(ltk);
3359 	}
3360 
3361 	hci_dev_unlock(hdev);
3362 
3363 	return;
3364 
3365 not_found:
3366 	neg.handle = ev->handle;
3367 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3368 	hci_dev_unlock(hdev);
3369 }
3370 
3371 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3372 {
3373 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3374 
3375 	skb_pull(skb, sizeof(*le_ev));
3376 
3377 	switch (le_ev->subevent) {
3378 	case HCI_EV_LE_CONN_COMPLETE:
3379 		hci_le_conn_complete_evt(hdev, skb);
3380 		break;
3381 
3382 	case HCI_EV_LE_ADVERTISING_REPORT:
3383 		hci_le_adv_report_evt(hdev, skb);
3384 		break;
3385 
3386 	case HCI_EV_LE_LTK_REQ:
3387 		hci_le_ltk_request_evt(hdev, skb);
3388 		break;
3389 
3390 	default:
3391 		break;
3392 	}
3393 }
3394 
3395 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3396 {
3397 	struct hci_event_hdr *hdr = (void *) skb->data;
3398 	__u8 event = hdr->evt;
3399 
3400 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3401 
3402 	switch (event) {
3403 	case HCI_EV_INQUIRY_COMPLETE:
3404 		hci_inquiry_complete_evt(hdev, skb);
3405 		break;
3406 
3407 	case HCI_EV_INQUIRY_RESULT:
3408 		hci_inquiry_result_evt(hdev, skb);
3409 		break;
3410 
3411 	case HCI_EV_CONN_COMPLETE:
3412 		hci_conn_complete_evt(hdev, skb);
3413 		break;
3414 
3415 	case HCI_EV_CONN_REQUEST:
3416 		hci_conn_request_evt(hdev, skb);
3417 		break;
3418 
3419 	case HCI_EV_DISCONN_COMPLETE:
3420 		hci_disconn_complete_evt(hdev, skb);
3421 		break;
3422 
3423 	case HCI_EV_AUTH_COMPLETE:
3424 		hci_auth_complete_evt(hdev, skb);
3425 		break;
3426 
3427 	case HCI_EV_REMOTE_NAME:
3428 		hci_remote_name_evt(hdev, skb);
3429 		break;
3430 
3431 	case HCI_EV_ENCRYPT_CHANGE:
3432 		hci_encrypt_change_evt(hdev, skb);
3433 		break;
3434 
3435 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3436 		hci_change_link_key_complete_evt(hdev, skb);
3437 		break;
3438 
3439 	case HCI_EV_REMOTE_FEATURES:
3440 		hci_remote_features_evt(hdev, skb);
3441 		break;
3442 
3443 	case HCI_EV_REMOTE_VERSION:
3444 		hci_remote_version_evt(hdev, skb);
3445 		break;
3446 
3447 	case HCI_EV_QOS_SETUP_COMPLETE:
3448 		hci_qos_setup_complete_evt(hdev, skb);
3449 		break;
3450 
3451 	case HCI_EV_CMD_COMPLETE:
3452 		hci_cmd_complete_evt(hdev, skb);
3453 		break;
3454 
3455 	case HCI_EV_CMD_STATUS:
3456 		hci_cmd_status_evt(hdev, skb);
3457 		break;
3458 
3459 	case HCI_EV_ROLE_CHANGE:
3460 		hci_role_change_evt(hdev, skb);
3461 		break;
3462 
3463 	case HCI_EV_NUM_COMP_PKTS:
3464 		hci_num_comp_pkts_evt(hdev, skb);
3465 		break;
3466 
3467 	case HCI_EV_MODE_CHANGE:
3468 		hci_mode_change_evt(hdev, skb);
3469 		break;
3470 
3471 	case HCI_EV_PIN_CODE_REQ:
3472 		hci_pin_code_request_evt(hdev, skb);
3473 		break;
3474 
3475 	case HCI_EV_LINK_KEY_REQ:
3476 		hci_link_key_request_evt(hdev, skb);
3477 		break;
3478 
3479 	case HCI_EV_LINK_KEY_NOTIFY:
3480 		hci_link_key_notify_evt(hdev, skb);
3481 		break;
3482 
3483 	case HCI_EV_CLOCK_OFFSET:
3484 		hci_clock_offset_evt(hdev, skb);
3485 		break;
3486 
3487 	case HCI_EV_PKT_TYPE_CHANGE:
3488 		hci_pkt_type_change_evt(hdev, skb);
3489 		break;
3490 
3491 	case HCI_EV_PSCAN_REP_MODE:
3492 		hci_pscan_rep_mode_evt(hdev, skb);
3493 		break;
3494 
3495 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3496 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3497 		break;
3498 
3499 	case HCI_EV_REMOTE_EXT_FEATURES:
3500 		hci_remote_ext_features_evt(hdev, skb);
3501 		break;
3502 
3503 	case HCI_EV_SYNC_CONN_COMPLETE:
3504 		hci_sync_conn_complete_evt(hdev, skb);
3505 		break;
3506 
3507 	case HCI_EV_SYNC_CONN_CHANGED:
3508 		hci_sync_conn_changed_evt(hdev, skb);
3509 		break;
3510 
3511 	case HCI_EV_SNIFF_SUBRATE:
3512 		hci_sniff_subrate_evt(hdev, skb);
3513 		break;
3514 
3515 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3516 		hci_extended_inquiry_result_evt(hdev, skb);
3517 		break;
3518 
3519 	case HCI_EV_IO_CAPA_REQUEST:
3520 		hci_io_capa_request_evt(hdev, skb);
3521 		break;
3522 
3523 	case HCI_EV_IO_CAPA_REPLY:
3524 		hci_io_capa_reply_evt(hdev, skb);
3525 		break;
3526 
3527 	case HCI_EV_USER_CONFIRM_REQUEST:
3528 		hci_user_confirm_request_evt(hdev, skb);
3529 		break;
3530 
3531 	case HCI_EV_USER_PASSKEY_REQUEST:
3532 		hci_user_passkey_request_evt(hdev, skb);
3533 		break;
3534 
3535 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3536 		hci_simple_pair_complete_evt(hdev, skb);
3537 		break;
3538 
3539 	case HCI_EV_REMOTE_HOST_FEATURES:
3540 		hci_remote_host_features_evt(hdev, skb);
3541 		break;
3542 
3543 	case HCI_EV_LE_META:
3544 		hci_le_meta_evt(hdev, skb);
3545 		break;
3546 
3547 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3548 		hci_remote_oob_data_request_evt(hdev, skb);
3549 		break;
3550 
3551 	case HCI_EV_NUM_COMP_BLOCKS:
3552 		hci_num_comp_blocks_evt(hdev, skb);
3553 		break;
3554 
3555 	default:
3556 		BT_DBG("%s event 0x%x", hdev->name, event);
3557 		break;
3558 	}
3559 
3560 	kfree_skb(skb);
3561 	hdev->stat.evt_rx++;
3562 }
3563