xref: /linux/net/bluetooth/hci_event.c (revision d584e204ff574b43d4dcaa87ae233c4e9d08e1fb)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39 
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
42 
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
45 
46 /* Handle HCI Event packets */
47 
48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
49 {
50 	__u8 status = *((__u8 *) skb->data);
51 
52 	BT_DBG("%s status 0x%x", hdev->name, status);
53 
54 	if (status) {
55 		hci_dev_lock(hdev);
56 		mgmt_stop_discovery_failed(hdev, status);
57 		hci_dev_unlock(hdev);
58 		return;
59 	}
60 
61 	clear_bit(HCI_INQUIRY, &hdev->flags);
62 
63 	hci_dev_lock(hdev);
64 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
65 	hci_dev_unlock(hdev);
66 
67 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
68 
69 	hci_conn_check_pending(hdev);
70 }
71 
72 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73 {
74 	__u8 status = *((__u8 *) skb->data);
75 
76 	BT_DBG("%s status 0x%x", hdev->name, status);
77 
78 	if (status)
79 		return;
80 
81 	hci_conn_check_pending(hdev);
82 }
83 
84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
85 {
86 	BT_DBG("%s", hdev->name);
87 }
88 
89 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
90 {
91 	struct hci_rp_role_discovery *rp = (void *) skb->data;
92 	struct hci_conn *conn;
93 
94 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
95 
96 	if (rp->status)
97 		return;
98 
99 	hci_dev_lock(hdev);
100 
101 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
102 	if (conn) {
103 		if (rp->role)
104 			conn->link_mode &= ~HCI_LM_MASTER;
105 		else
106 			conn->link_mode |= HCI_LM_MASTER;
107 	}
108 
109 	hci_dev_unlock(hdev);
110 }
111 
112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
113 {
114 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 	struct hci_conn *conn;
116 
117 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
118 
119 	if (rp->status)
120 		return;
121 
122 	hci_dev_lock(hdev);
123 
124 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
125 	if (conn)
126 		conn->link_policy = __le16_to_cpu(rp->policy);
127 
128 	hci_dev_unlock(hdev);
129 }
130 
131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
132 {
133 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 	struct hci_conn *conn;
135 	void *sent;
136 
137 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
138 
139 	if (rp->status)
140 		return;
141 
142 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
143 	if (!sent)
144 		return;
145 
146 	hci_dev_lock(hdev);
147 
148 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 	if (conn)
150 		conn->link_policy = get_unaligned_le16(sent + 2);
151 
152 	hci_dev_unlock(hdev);
153 }
154 
155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 {
157 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158 
159 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
160 
161 	if (rp->status)
162 		return;
163 
164 	hdev->link_policy = __le16_to_cpu(rp->policy);
165 }
166 
167 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
168 {
169 	__u8 status = *((__u8 *) skb->data);
170 	void *sent;
171 
172 	BT_DBG("%s status 0x%x", hdev->name, status);
173 
174 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
175 	if (!sent)
176 		return;
177 
178 	if (!status)
179 		hdev->link_policy = get_unaligned_le16(sent);
180 
181 	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
182 }
183 
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 	__u8 status = *((__u8 *) skb->data);
187 
188 	BT_DBG("%s status 0x%x", hdev->name, status);
189 
190 	clear_bit(HCI_RESET, &hdev->flags);
191 
192 	hci_req_complete(hdev, HCI_OP_RESET, status);
193 
194 	/* Reset all non-persistent flags */
195 	hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
196 
197 	hdev->discovery.state = DISCOVERY_STOPPED;
198 }
199 
200 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
201 {
202 	__u8 status = *((__u8 *) skb->data);
203 	void *sent;
204 
205 	BT_DBG("%s status 0x%x", hdev->name, status);
206 
207 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
208 	if (!sent)
209 		return;
210 
211 	hci_dev_lock(hdev);
212 
213 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
214 		mgmt_set_local_name_complete(hdev, sent, status);
215 	else if (!status)
216 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
217 
218 	hci_dev_unlock(hdev);
219 
220 	hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
221 }
222 
223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 	struct hci_rp_read_local_name *rp = (void *) skb->data;
226 
227 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
228 
229 	if (rp->status)
230 		return;
231 
232 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
233 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
234 }
235 
236 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 	__u8 status = *((__u8 *) skb->data);
239 	void *sent;
240 
241 	BT_DBG("%s status 0x%x", hdev->name, status);
242 
243 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
244 	if (!sent)
245 		return;
246 
247 	if (!status) {
248 		__u8 param = *((__u8 *) sent);
249 
250 		if (param == AUTH_ENABLED)
251 			set_bit(HCI_AUTH, &hdev->flags);
252 		else
253 			clear_bit(HCI_AUTH, &hdev->flags);
254 	}
255 
256 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
257 		mgmt_auth_enable_complete(hdev, status);
258 
259 	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
260 }
261 
262 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
263 {
264 	__u8 status = *((__u8 *) skb->data);
265 	void *sent;
266 
267 	BT_DBG("%s status 0x%x", hdev->name, status);
268 
269 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
270 	if (!sent)
271 		return;
272 
273 	if (!status) {
274 		__u8 param = *((__u8 *) sent);
275 
276 		if (param)
277 			set_bit(HCI_ENCRYPT, &hdev->flags);
278 		else
279 			clear_bit(HCI_ENCRYPT, &hdev->flags);
280 	}
281 
282 	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
283 }
284 
285 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
286 {
287 	__u8 param, status = *((__u8 *) skb->data);
288 	int old_pscan, old_iscan;
289 	void *sent;
290 
291 	BT_DBG("%s status 0x%x", hdev->name, status);
292 
293 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
294 	if (!sent)
295 		return;
296 
297 	param = *((__u8 *) sent);
298 
299 	hci_dev_lock(hdev);
300 
301 	if (status != 0) {
302 		mgmt_write_scan_failed(hdev, param, status);
303 		hdev->discov_timeout = 0;
304 		goto done;
305 	}
306 
307 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
308 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
309 
310 	if (param & SCAN_INQUIRY) {
311 		set_bit(HCI_ISCAN, &hdev->flags);
312 		if (!old_iscan)
313 			mgmt_discoverable(hdev, 1);
314 		if (hdev->discov_timeout > 0) {
315 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
316 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
317 									to);
318 		}
319 	} else if (old_iscan)
320 		mgmt_discoverable(hdev, 0);
321 
322 	if (param & SCAN_PAGE) {
323 		set_bit(HCI_PSCAN, &hdev->flags);
324 		if (!old_pscan)
325 			mgmt_connectable(hdev, 1);
326 	} else if (old_pscan)
327 		mgmt_connectable(hdev, 0);
328 
329 done:
330 	hci_dev_unlock(hdev);
331 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
332 }
333 
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337 
338 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
339 
340 	if (rp->status)
341 		return;
342 
343 	memcpy(hdev->dev_class, rp->dev_class, 3);
344 
345 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 		hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348 
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 	__u8 status = *((__u8 *) skb->data);
352 	void *sent;
353 
354 	BT_DBG("%s status 0x%x", hdev->name, status);
355 
356 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 	if (!sent)
358 		return;
359 
360 	hci_dev_lock(hdev);
361 
362 	if (status == 0)
363 		memcpy(hdev->dev_class, sent, 3);
364 
365 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 		mgmt_set_class_of_dev_complete(hdev, sent, status);
367 
368 	hci_dev_unlock(hdev);
369 }
370 
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 	__u16 setting;
375 
376 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
377 
378 	if (rp->status)
379 		return;
380 
381 	setting = __le16_to_cpu(rp->voice_setting);
382 
383 	if (hdev->voice_setting == setting)
384 		return;
385 
386 	hdev->voice_setting = setting;
387 
388 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
389 
390 	if (hdev->notify)
391 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393 
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
395 {
396 	__u8 status = *((__u8 *) skb->data);
397 	__u16 setting;
398 	void *sent;
399 
400 	BT_DBG("%s status 0x%x", hdev->name, status);
401 
402 	if (status)
403 		return;
404 
405 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
406 	if (!sent)
407 		return;
408 
409 	setting = get_unaligned_le16(sent);
410 
411 	if (hdev->voice_setting == setting)
412 		return;
413 
414 	hdev->voice_setting = setting;
415 
416 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
417 
418 	if (hdev->notify)
419 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
420 }
421 
422 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
423 {
424 	__u8 status = *((__u8 *) skb->data);
425 
426 	BT_DBG("%s status 0x%x", hdev->name, status);
427 
428 	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
429 }
430 
431 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
432 {
433 	__u8 status = *((__u8 *) skb->data);
434 	void *sent;
435 
436 	BT_DBG("%s status 0x%x", hdev->name, status);
437 
438 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
439 	if (!sent)
440 		return;
441 
442 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
443 		mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
444 	else if (!status) {
445 		if (*((u8 *) sent))
446 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
447 		else
448 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
449 	}
450 }
451 
452 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
453 {
454 	if (hdev->features[6] & LMP_EXT_INQ)
455 		return 2;
456 
457 	if (hdev->features[3] & LMP_RSSI_INQ)
458 		return 1;
459 
460 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
461 						hdev->lmp_subver == 0x0757)
462 		return 1;
463 
464 	if (hdev->manufacturer == 15) {
465 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
466 			return 1;
467 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
468 			return 1;
469 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
470 			return 1;
471 	}
472 
473 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
474 						hdev->lmp_subver == 0x1805)
475 		return 1;
476 
477 	return 0;
478 }
479 
480 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
481 {
482 	u8 mode;
483 
484 	mode = hci_get_inquiry_mode(hdev);
485 
486 	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 }
488 
489 static void hci_setup_event_mask(struct hci_dev *hdev)
490 {
491 	/* The second byte is 0xff instead of 0x9f (two reserved bits
492 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
493 	 * command otherwise */
494 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
495 
496 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
497 	 * any event mask for pre 1.2 devices */
498 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
499 		return;
500 
501 	events[4] |= 0x01; /* Flow Specification Complete */
502 	events[4] |= 0x02; /* Inquiry Result with RSSI */
503 	events[4] |= 0x04; /* Read Remote Extended Features Complete */
504 	events[5] |= 0x08; /* Synchronous Connection Complete */
505 	events[5] |= 0x10; /* Synchronous Connection Changed */
506 
507 	if (hdev->features[3] & LMP_RSSI_INQ)
508 		events[4] |= 0x04; /* Inquiry Result with RSSI */
509 
510 	if (hdev->features[5] & LMP_SNIFF_SUBR)
511 		events[5] |= 0x20; /* Sniff Subrating */
512 
513 	if (hdev->features[5] & LMP_PAUSE_ENC)
514 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
515 
516 	if (hdev->features[6] & LMP_EXT_INQ)
517 		events[5] |= 0x40; /* Extended Inquiry Result */
518 
519 	if (hdev->features[6] & LMP_NO_FLUSH)
520 		events[7] |= 0x01; /* Enhanced Flush Complete */
521 
522 	if (hdev->features[7] & LMP_LSTO)
523 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
524 
525 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
526 		events[6] |= 0x01;	/* IO Capability Request */
527 		events[6] |= 0x02;	/* IO Capability Response */
528 		events[6] |= 0x04;	/* User Confirmation Request */
529 		events[6] |= 0x08;	/* User Passkey Request */
530 		events[6] |= 0x10;	/* Remote OOB Data Request */
531 		events[6] |= 0x20;	/* Simple Pairing Complete */
532 		events[7] |= 0x04;	/* User Passkey Notification */
533 		events[7] |= 0x08;	/* Keypress Notification */
534 		events[7] |= 0x10;	/* Remote Host Supported
535 					 * Features Notification */
536 	}
537 
538 	if (hdev->features[4] & LMP_LE)
539 		events[7] |= 0x20;	/* LE Meta-Event */
540 
541 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
542 }
543 
544 static void hci_setup(struct hci_dev *hdev)
545 {
546 	if (hdev->dev_type != HCI_BREDR)
547 		return;
548 
549 	hci_setup_event_mask(hdev);
550 
551 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
552 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
553 
554 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
555 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
556 			u8 mode = 0x01;
557 			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
558 				     sizeof(mode), &mode);
559 		} else {
560 			struct hci_cp_write_eir cp;
561 
562 			memset(hdev->eir, 0, sizeof(hdev->eir));
563 			memset(&cp, 0, sizeof(cp));
564 
565 			hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
566 		}
567 	}
568 
569 	if (hdev->features[3] & LMP_RSSI_INQ)
570 		hci_setup_inquiry_mode(hdev);
571 
572 	if (hdev->features[7] & LMP_INQ_TX_PWR)
573 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
574 
575 	if (hdev->features[7] & LMP_EXTFEATURES) {
576 		struct hci_cp_read_local_ext_features cp;
577 
578 		cp.page = 0x01;
579 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
580 			     &cp);
581 	}
582 
583 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
584 		u8 enable = 1;
585 		hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
586 			     &enable);
587 	}
588 }
589 
590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 {
592 	struct hci_rp_read_local_version *rp = (void *) skb->data;
593 
594 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
595 
596 	if (rp->status)
597 		goto done;
598 
599 	hdev->hci_ver = rp->hci_ver;
600 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 	hdev->lmp_ver = rp->lmp_ver;
602 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
604 
605 	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
606 					hdev->manufacturer,
607 					hdev->hci_ver, hdev->hci_rev);
608 
609 	if (test_bit(HCI_INIT, &hdev->flags))
610 		hci_setup(hdev);
611 
612 done:
613 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
614 }
615 
616 static void hci_setup_link_policy(struct hci_dev *hdev)
617 {
618 	u16 link_policy = 0;
619 
620 	if (hdev->features[0] & LMP_RSWITCH)
621 		link_policy |= HCI_LP_RSWITCH;
622 	if (hdev->features[0] & LMP_HOLD)
623 		link_policy |= HCI_LP_HOLD;
624 	if (hdev->features[0] & LMP_SNIFF)
625 		link_policy |= HCI_LP_SNIFF;
626 	if (hdev->features[1] & LMP_PARK)
627 		link_policy |= HCI_LP_PARK;
628 
629 	link_policy = cpu_to_le16(link_policy);
630 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
631 		     &link_policy);
632 }
633 
634 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
635 {
636 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
637 
638 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
639 
640 	if (rp->status)
641 		goto done;
642 
643 	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
644 
645 	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
646 		hci_setup_link_policy(hdev);
647 
648 done:
649 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
650 }
651 
652 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
653 {
654 	struct hci_rp_read_local_features *rp = (void *) skb->data;
655 
656 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
657 
658 	if (rp->status)
659 		return;
660 
661 	memcpy(hdev->features, rp->features, 8);
662 
663 	/* Adjust default settings according to features
664 	 * supported by device. */
665 
666 	if (hdev->features[0] & LMP_3SLOT)
667 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
668 
669 	if (hdev->features[0] & LMP_5SLOT)
670 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
671 
672 	if (hdev->features[1] & LMP_HV2) {
673 		hdev->pkt_type  |= (HCI_HV2);
674 		hdev->esco_type |= (ESCO_HV2);
675 	}
676 
677 	if (hdev->features[1] & LMP_HV3) {
678 		hdev->pkt_type  |= (HCI_HV3);
679 		hdev->esco_type |= (ESCO_HV3);
680 	}
681 
682 	if (hdev->features[3] & LMP_ESCO)
683 		hdev->esco_type |= (ESCO_EV3);
684 
685 	if (hdev->features[4] & LMP_EV4)
686 		hdev->esco_type |= (ESCO_EV4);
687 
688 	if (hdev->features[4] & LMP_EV5)
689 		hdev->esco_type |= (ESCO_EV5);
690 
691 	if (hdev->features[5] & LMP_EDR_ESCO_2M)
692 		hdev->esco_type |= (ESCO_2EV3);
693 
694 	if (hdev->features[5] & LMP_EDR_ESCO_3M)
695 		hdev->esco_type |= (ESCO_3EV3);
696 
697 	if (hdev->features[5] & LMP_EDR_3S_ESCO)
698 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
699 
700 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
701 					hdev->features[0], hdev->features[1],
702 					hdev->features[2], hdev->features[3],
703 					hdev->features[4], hdev->features[5],
704 					hdev->features[6], hdev->features[7]);
705 }
706 
707 static void hci_set_le_support(struct hci_dev *hdev)
708 {
709 	struct hci_cp_write_le_host_supported cp;
710 
711 	memset(&cp, 0, sizeof(cp));
712 
713 	if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
714 		cp.le = 1;
715 		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
716 	}
717 
718 	if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
719 		hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
720 			     &cp);
721 }
722 
723 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
724 							struct sk_buff *skb)
725 {
726 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
727 
728 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
729 
730 	if (rp->status)
731 		goto done;
732 
733 	switch (rp->page) {
734 	case 0:
735 		memcpy(hdev->features, rp->features, 8);
736 		break;
737 	case 1:
738 		memcpy(hdev->host_features, rp->features, 8);
739 		break;
740 	}
741 
742 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
743 		hci_set_le_support(hdev);
744 
745 done:
746 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
747 }
748 
749 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
750 						struct sk_buff *skb)
751 {
752 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
753 
754 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
755 
756 	if (rp->status)
757 		return;
758 
759 	hdev->flow_ctl_mode = rp->mode;
760 
761 	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
762 }
763 
764 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
765 {
766 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
767 
768 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
769 
770 	if (rp->status)
771 		return;
772 
773 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
774 	hdev->sco_mtu  = rp->sco_mtu;
775 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
776 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
777 
778 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
779 		hdev->sco_mtu  = 64;
780 		hdev->sco_pkts = 8;
781 	}
782 
783 	hdev->acl_cnt = hdev->acl_pkts;
784 	hdev->sco_cnt = hdev->sco_pkts;
785 
786 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
787 					hdev->acl_mtu, hdev->acl_pkts,
788 					hdev->sco_mtu, hdev->sco_pkts);
789 }
790 
791 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
792 {
793 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
794 
795 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
796 
797 	if (!rp->status)
798 		bacpy(&hdev->bdaddr, &rp->bdaddr);
799 
800 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
801 }
802 
803 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
804 							struct sk_buff *skb)
805 {
806 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
807 
808 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
809 
810 	if (rp->status)
811 		return;
812 
813 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
814 	hdev->block_len = __le16_to_cpu(rp->block_len);
815 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
816 
817 	hdev->block_cnt = hdev->num_blocks;
818 
819 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
820 					hdev->block_cnt, hdev->block_len);
821 
822 	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
823 }
824 
825 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
826 {
827 	__u8 status = *((__u8 *) skb->data);
828 
829 	BT_DBG("%s status 0x%x", hdev->name, status);
830 
831 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
832 }
833 
834 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
835 		struct sk_buff *skb)
836 {
837 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
838 
839 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
840 
841 	if (rp->status)
842 		return;
843 
844 	hdev->amp_status = rp->amp_status;
845 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
846 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
847 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
848 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
849 	hdev->amp_type = rp->amp_type;
850 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
851 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
852 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
853 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
854 
855 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
856 }
857 
858 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
859 							struct sk_buff *skb)
860 {
861 	__u8 status = *((__u8 *) skb->data);
862 
863 	BT_DBG("%s status 0x%x", hdev->name, status);
864 
865 	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
866 }
867 
868 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 	__u8 status = *((__u8 *) skb->data);
871 
872 	BT_DBG("%s status 0x%x", hdev->name, status);
873 
874 	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
875 }
876 
877 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
878 							struct sk_buff *skb)
879 {
880 	__u8 status = *((__u8 *) skb->data);
881 
882 	BT_DBG("%s status 0x%x", hdev->name, status);
883 
884 	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
885 }
886 
887 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
888 							struct sk_buff *skb)
889 {
890 	__u8 status = *((__u8 *) skb->data);
891 
892 	BT_DBG("%s status 0x%x", hdev->name, status);
893 
894 	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
895 }
896 
897 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
898 {
899 	__u8 status = *((__u8 *) skb->data);
900 
901 	BT_DBG("%s status 0x%x", hdev->name, status);
902 
903 	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
904 }
905 
906 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
909 	struct hci_cp_pin_code_reply *cp;
910 	struct hci_conn *conn;
911 
912 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
913 
914 	hci_dev_lock(hdev);
915 
916 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
917 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
918 
919 	if (rp->status != 0)
920 		goto unlock;
921 
922 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
923 	if (!cp)
924 		goto unlock;
925 
926 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
927 	if (conn)
928 		conn->pin_length = cp->pin_len;
929 
930 unlock:
931 	hci_dev_unlock(hdev);
932 }
933 
934 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
935 {
936 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
937 
938 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
939 
940 	hci_dev_lock(hdev);
941 
942 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
943 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
944 								rp->status);
945 
946 	hci_dev_unlock(hdev);
947 }
948 
949 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
950 				       struct sk_buff *skb)
951 {
952 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
953 
954 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
955 
956 	if (rp->status)
957 		return;
958 
959 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
960 	hdev->le_pkts = rp->le_max_pkt;
961 
962 	hdev->le_cnt = hdev->le_pkts;
963 
964 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
965 
966 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
967 }
968 
969 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
970 {
971 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972 
973 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
974 
975 	hci_dev_lock(hdev);
976 
977 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
979 						 rp->status);
980 
981 	hci_dev_unlock(hdev);
982 }
983 
984 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
985 							struct sk_buff *skb)
986 {
987 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
988 
989 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
990 
991 	hci_dev_lock(hdev);
992 
993 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
994 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
995 						     ACL_LINK, 0, rp->status);
996 
997 	hci_dev_unlock(hdev);
998 }
999 
1000 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1001 {
1002 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1003 
1004 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1005 
1006 	hci_dev_lock(hdev);
1007 
1008 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1009 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1010 						 0, rp->status);
1011 
1012 	hci_dev_unlock(hdev);
1013 }
1014 
1015 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1016 							struct sk_buff *skb)
1017 {
1018 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1019 
1020 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1021 
1022 	hci_dev_lock(hdev);
1023 
1024 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1025 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1026 						     ACL_LINK, 0, rp->status);
1027 
1028 	hci_dev_unlock(hdev);
1029 }
1030 
1031 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1032 							struct sk_buff *skb)
1033 {
1034 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1035 
1036 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1037 
1038 	hci_dev_lock(hdev);
1039 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1040 						rp->randomizer, rp->status);
1041 	hci_dev_unlock(hdev);
1042 }
1043 
1044 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1045 {
1046 	__u8 status = *((__u8 *) skb->data);
1047 
1048 	BT_DBG("%s status 0x%x", hdev->name, status);
1049 
1050 	hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1051 
1052 	if (status) {
1053 		hci_dev_lock(hdev);
1054 		mgmt_start_discovery_failed(hdev, status);
1055 		hci_dev_unlock(hdev);
1056 		return;
1057 	}
1058 }
1059 
1060 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1061 					struct sk_buff *skb)
1062 {
1063 	struct hci_cp_le_set_scan_enable *cp;
1064 	__u8 status = *((__u8 *) skb->data);
1065 
1066 	BT_DBG("%s status 0x%x", hdev->name, status);
1067 
1068 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1069 	if (!cp)
1070 		return;
1071 
1072 	switch (cp->enable) {
1073 	case LE_SCANNING_ENABLED:
1074 		hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1075 
1076 		if (status) {
1077 			hci_dev_lock(hdev);
1078 			mgmt_start_discovery_failed(hdev, status);
1079 			hci_dev_unlock(hdev);
1080 			return;
1081 		}
1082 
1083 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1084 
1085 		cancel_delayed_work_sync(&hdev->adv_work);
1086 
1087 		hci_dev_lock(hdev);
1088 		hci_adv_entries_clear(hdev);
1089 		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1090 		hci_dev_unlock(hdev);
1091 		break;
1092 
1093 	case LE_SCANNING_DISABLED:
1094 		if (status)
1095 			return;
1096 
1097 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1098 
1099 		schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1100 
1101 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1102 			mgmt_interleaved_discovery(hdev);
1103 		} else {
1104 			hci_dev_lock(hdev);
1105 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1106 			hci_dev_unlock(hdev);
1107 		}
1108 
1109 		break;
1110 
1111 	default:
1112 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1113 		break;
1114 	}
1115 }
1116 
1117 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1118 {
1119 	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1120 
1121 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1122 
1123 	if (rp->status)
1124 		return;
1125 
1126 	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1127 }
1128 
1129 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1130 {
1131 	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1132 
1133 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1134 
1135 	if (rp->status)
1136 		return;
1137 
1138 	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1139 }
1140 
1141 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1142 							struct sk_buff *skb)
1143 {
1144 	struct hci_cp_write_le_host_supported *sent;
1145 	__u8 status = *((__u8 *) skb->data);
1146 
1147 	BT_DBG("%s status 0x%x", hdev->name, status);
1148 
1149 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1150 	if (!sent)
1151 		return;
1152 
1153 	if (!status) {
1154 		if (sent->le)
1155 			hdev->host_features[0] |= LMP_HOST_LE;
1156 		else
1157 			hdev->host_features[0] &= ~LMP_HOST_LE;
1158 	}
1159 
1160 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1161 					!test_bit(HCI_INIT, &hdev->flags))
1162 		mgmt_le_enable_complete(hdev, sent->le, status);
1163 
1164 	hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1165 }
1166 
1167 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1168 {
1169 	BT_DBG("%s status 0x%x", hdev->name, status);
1170 
1171 	if (status) {
1172 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1173 		hci_conn_check_pending(hdev);
1174 		hci_dev_lock(hdev);
1175 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1176 			mgmt_start_discovery_failed(hdev, status);
1177 		hci_dev_unlock(hdev);
1178 		return;
1179 	}
1180 
1181 	set_bit(HCI_INQUIRY, &hdev->flags);
1182 
1183 	hci_dev_lock(hdev);
1184 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1185 	hci_dev_unlock(hdev);
1186 }
1187 
1188 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1189 {
1190 	struct hci_cp_create_conn *cp;
1191 	struct hci_conn *conn;
1192 
1193 	BT_DBG("%s status 0x%x", hdev->name, status);
1194 
1195 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1196 	if (!cp)
1197 		return;
1198 
1199 	hci_dev_lock(hdev);
1200 
1201 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1202 
1203 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1204 
1205 	if (status) {
1206 		if (conn && conn->state == BT_CONNECT) {
1207 			if (status != 0x0c || conn->attempt > 2) {
1208 				conn->state = BT_CLOSED;
1209 				hci_proto_connect_cfm(conn, status);
1210 				hci_conn_del(conn);
1211 			} else
1212 				conn->state = BT_CONNECT2;
1213 		}
1214 	} else {
1215 		if (!conn) {
1216 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1217 			if (conn) {
1218 				conn->out = true;
1219 				conn->link_mode |= HCI_LM_MASTER;
1220 			} else
1221 				BT_ERR("No memory for new connection");
1222 		}
1223 	}
1224 
1225 	hci_dev_unlock(hdev);
1226 }
1227 
1228 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1229 {
1230 	struct hci_cp_add_sco *cp;
1231 	struct hci_conn *acl, *sco;
1232 	__u16 handle;
1233 
1234 	BT_DBG("%s status 0x%x", hdev->name, status);
1235 
1236 	if (!status)
1237 		return;
1238 
1239 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1240 	if (!cp)
1241 		return;
1242 
1243 	handle = __le16_to_cpu(cp->handle);
1244 
1245 	BT_DBG("%s handle %d", hdev->name, handle);
1246 
1247 	hci_dev_lock(hdev);
1248 
1249 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1250 	if (acl) {
1251 		sco = acl->link;
1252 		if (sco) {
1253 			sco->state = BT_CLOSED;
1254 
1255 			hci_proto_connect_cfm(sco, status);
1256 			hci_conn_del(sco);
1257 		}
1258 	}
1259 
1260 	hci_dev_unlock(hdev);
1261 }
1262 
1263 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1264 {
1265 	struct hci_cp_auth_requested *cp;
1266 	struct hci_conn *conn;
1267 
1268 	BT_DBG("%s status 0x%x", hdev->name, status);
1269 
1270 	if (!status)
1271 		return;
1272 
1273 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1274 	if (!cp)
1275 		return;
1276 
1277 	hci_dev_lock(hdev);
1278 
1279 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1280 	if (conn) {
1281 		if (conn->state == BT_CONFIG) {
1282 			hci_proto_connect_cfm(conn, status);
1283 			hci_conn_put(conn);
1284 		}
1285 	}
1286 
1287 	hci_dev_unlock(hdev);
1288 }
1289 
1290 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1291 {
1292 	struct hci_cp_set_conn_encrypt *cp;
1293 	struct hci_conn *conn;
1294 
1295 	BT_DBG("%s status 0x%x", hdev->name, status);
1296 
1297 	if (!status)
1298 		return;
1299 
1300 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1301 	if (!cp)
1302 		return;
1303 
1304 	hci_dev_lock(hdev);
1305 
1306 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1307 	if (conn) {
1308 		if (conn->state == BT_CONFIG) {
1309 			hci_proto_connect_cfm(conn, status);
1310 			hci_conn_put(conn);
1311 		}
1312 	}
1313 
1314 	hci_dev_unlock(hdev);
1315 }
1316 
1317 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1318 							struct hci_conn *conn)
1319 {
1320 	if (conn->state != BT_CONFIG || !conn->out)
1321 		return 0;
1322 
1323 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1324 		return 0;
1325 
1326 	/* Only request authentication for SSP connections or non-SSP
1327 	 * devices with sec_level HIGH or if MITM protection is requested */
1328 	if (!hci_conn_ssp_enabled(conn) &&
1329 				conn->pending_sec_level != BT_SECURITY_HIGH &&
1330 				!(conn->auth_type & 0x01))
1331 		return 0;
1332 
1333 	return 1;
1334 }
1335 
1336 static inline int hci_resolve_name(struct hci_dev *hdev,
1337 				   struct inquiry_entry *e)
1338 {
1339 	struct hci_cp_remote_name_req cp;
1340 
1341 	memset(&cp, 0, sizeof(cp));
1342 
1343 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1344 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1345 	cp.pscan_mode = e->data.pscan_mode;
1346 	cp.clock_offset = e->data.clock_offset;
1347 
1348 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1349 }
1350 
1351 static bool hci_resolve_next_name(struct hci_dev *hdev)
1352 {
1353 	struct discovery_state *discov = &hdev->discovery;
1354 	struct inquiry_entry *e;
1355 
1356 	if (list_empty(&discov->resolve))
1357 		return false;
1358 
1359 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1360 	if (hci_resolve_name(hdev, e) == 0) {
1361 		e->name_state = NAME_PENDING;
1362 		return true;
1363 	}
1364 
1365 	return false;
1366 }
1367 
1368 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1369 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1370 {
1371 	struct discovery_state *discov = &hdev->discovery;
1372 	struct inquiry_entry *e;
1373 
1374 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1375 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1376 				      name_len, conn->dev_class);
1377 
1378 	if (discov->state == DISCOVERY_STOPPED)
1379 		return;
1380 
1381 	if (discov->state == DISCOVERY_STOPPING)
1382 		goto discov_complete;
1383 
1384 	if (discov->state != DISCOVERY_RESOLVING)
1385 		return;
1386 
1387 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1388 	if (e) {
1389 		e->name_state = NAME_KNOWN;
1390 		list_del(&e->list);
1391 		if (name)
1392 			mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1393 					 e->data.rssi, name, name_len);
1394 	}
1395 
1396 	if (hci_resolve_next_name(hdev))
1397 		return;
1398 
1399 discov_complete:
1400 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1401 }
1402 
1403 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1404 {
1405 	struct hci_cp_remote_name_req *cp;
1406 	struct hci_conn *conn;
1407 
1408 	BT_DBG("%s status 0x%x", hdev->name, status);
1409 
1410 	/* If successful wait for the name req complete event before
1411 	 * checking for the need to do authentication */
1412 	if (!status)
1413 		return;
1414 
1415 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1416 	if (!cp)
1417 		return;
1418 
1419 	hci_dev_lock(hdev);
1420 
1421 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1422 
1423 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1424 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1425 
1426 	if (!conn)
1427 		goto unlock;
1428 
1429 	if (!hci_outgoing_auth_needed(hdev, conn))
1430 		goto unlock;
1431 
1432 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1433 		struct hci_cp_auth_requested cp;
1434 		cp.handle = __cpu_to_le16(conn->handle);
1435 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1436 	}
1437 
1438 unlock:
1439 	hci_dev_unlock(hdev);
1440 }
1441 
1442 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1443 {
1444 	struct hci_cp_read_remote_features *cp;
1445 	struct hci_conn *conn;
1446 
1447 	BT_DBG("%s status 0x%x", hdev->name, status);
1448 
1449 	if (!status)
1450 		return;
1451 
1452 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1453 	if (!cp)
1454 		return;
1455 
1456 	hci_dev_lock(hdev);
1457 
1458 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1459 	if (conn) {
1460 		if (conn->state == BT_CONFIG) {
1461 			hci_proto_connect_cfm(conn, status);
1462 			hci_conn_put(conn);
1463 		}
1464 	}
1465 
1466 	hci_dev_unlock(hdev);
1467 }
1468 
1469 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1470 {
1471 	struct hci_cp_read_remote_ext_features *cp;
1472 	struct hci_conn *conn;
1473 
1474 	BT_DBG("%s status 0x%x", hdev->name, status);
1475 
1476 	if (!status)
1477 		return;
1478 
1479 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1480 	if (!cp)
1481 		return;
1482 
1483 	hci_dev_lock(hdev);
1484 
1485 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1486 	if (conn) {
1487 		if (conn->state == BT_CONFIG) {
1488 			hci_proto_connect_cfm(conn, status);
1489 			hci_conn_put(conn);
1490 		}
1491 	}
1492 
1493 	hci_dev_unlock(hdev);
1494 }
1495 
1496 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1497 {
1498 	struct hci_cp_setup_sync_conn *cp;
1499 	struct hci_conn *acl, *sco;
1500 	__u16 handle;
1501 
1502 	BT_DBG("%s status 0x%x", hdev->name, status);
1503 
1504 	if (!status)
1505 		return;
1506 
1507 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1508 	if (!cp)
1509 		return;
1510 
1511 	handle = __le16_to_cpu(cp->handle);
1512 
1513 	BT_DBG("%s handle %d", hdev->name, handle);
1514 
1515 	hci_dev_lock(hdev);
1516 
1517 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1518 	if (acl) {
1519 		sco = acl->link;
1520 		if (sco) {
1521 			sco->state = BT_CLOSED;
1522 
1523 			hci_proto_connect_cfm(sco, status);
1524 			hci_conn_del(sco);
1525 		}
1526 	}
1527 
1528 	hci_dev_unlock(hdev);
1529 }
1530 
1531 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1532 {
1533 	struct hci_cp_sniff_mode *cp;
1534 	struct hci_conn *conn;
1535 
1536 	BT_DBG("%s status 0x%x", hdev->name, status);
1537 
1538 	if (!status)
1539 		return;
1540 
1541 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1542 	if (!cp)
1543 		return;
1544 
1545 	hci_dev_lock(hdev);
1546 
1547 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1548 	if (conn) {
1549 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1550 
1551 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1552 			hci_sco_setup(conn, status);
1553 	}
1554 
1555 	hci_dev_unlock(hdev);
1556 }
1557 
1558 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1559 {
1560 	struct hci_cp_exit_sniff_mode *cp;
1561 	struct hci_conn *conn;
1562 
1563 	BT_DBG("%s status 0x%x", hdev->name, status);
1564 
1565 	if (!status)
1566 		return;
1567 
1568 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1569 	if (!cp)
1570 		return;
1571 
1572 	hci_dev_lock(hdev);
1573 
1574 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1575 	if (conn) {
1576 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1577 
1578 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1579 			hci_sco_setup(conn, status);
1580 	}
1581 
1582 	hci_dev_unlock(hdev);
1583 }
1584 
1585 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1586 {
1587 	struct hci_cp_disconnect *cp;
1588 	struct hci_conn *conn;
1589 
1590 	if (!status)
1591 		return;
1592 
1593 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1594 	if (!cp)
1595 		return;
1596 
1597 	hci_dev_lock(hdev);
1598 
1599 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1600 	if (conn)
1601 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1602 				       conn->dst_type, status);
1603 
1604 	hci_dev_unlock(hdev);
1605 }
1606 
1607 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1608 {
1609 	struct hci_cp_le_create_conn *cp;
1610 	struct hci_conn *conn;
1611 
1612 	BT_DBG("%s status 0x%x", hdev->name, status);
1613 
1614 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1615 	if (!cp)
1616 		return;
1617 
1618 	hci_dev_lock(hdev);
1619 
1620 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1621 
1622 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1623 		conn);
1624 
1625 	if (status) {
1626 		if (conn && conn->state == BT_CONNECT) {
1627 			conn->state = BT_CLOSED;
1628 			hci_proto_connect_cfm(conn, status);
1629 			hci_conn_del(conn);
1630 		}
1631 	} else {
1632 		if (!conn) {
1633 			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1634 			if (conn) {
1635 				conn->dst_type = cp->peer_addr_type;
1636 				conn->out = true;
1637 			} else {
1638 				BT_ERR("No memory for new connection");
1639 			}
1640 		}
1641 	}
1642 
1643 	hci_dev_unlock(hdev);
1644 }
1645 
1646 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1647 {
1648 	BT_DBG("%s status 0x%x", hdev->name, status);
1649 }
1650 
1651 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1652 {
1653 	__u8 status = *((__u8 *) skb->data);
1654 	struct discovery_state *discov = &hdev->discovery;
1655 	struct inquiry_entry *e;
1656 
1657 	BT_DBG("%s status %d", hdev->name, status);
1658 
1659 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1660 
1661 	hci_conn_check_pending(hdev);
1662 
1663 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1664 		return;
1665 
1666 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1667 		return;
1668 
1669 	hci_dev_lock(hdev);
1670 
1671 	if (discov->state != DISCOVERY_FINDING)
1672 		goto unlock;
1673 
1674 	if (list_empty(&discov->resolve)) {
1675 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1676 		goto unlock;
1677 	}
1678 
1679 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1680 	if (e && hci_resolve_name(hdev, e) == 0) {
1681 		e->name_state = NAME_PENDING;
1682 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1683 	} else {
1684 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1685 	}
1686 
1687 unlock:
1688 	hci_dev_unlock(hdev);
1689 }
1690 
1691 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1692 {
1693 	struct inquiry_data data;
1694 	struct inquiry_info *info = (void *) (skb->data + 1);
1695 	int num_rsp = *((__u8 *) skb->data);
1696 
1697 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1698 
1699 	if (!num_rsp)
1700 		return;
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	for (; num_rsp; num_rsp--, info++) {
1705 		bool name_known, ssp;
1706 
1707 		bacpy(&data.bdaddr, &info->bdaddr);
1708 		data.pscan_rep_mode	= info->pscan_rep_mode;
1709 		data.pscan_period_mode	= info->pscan_period_mode;
1710 		data.pscan_mode		= info->pscan_mode;
1711 		memcpy(data.dev_class, info->dev_class, 3);
1712 		data.clock_offset	= info->clock_offset;
1713 		data.rssi		= 0x00;
1714 		data.ssp_mode		= 0x00;
1715 
1716 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1717 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1718 				  info->dev_class, 0, !name_known, ssp, NULL,
1719 				  0);
1720 	}
1721 
1722 	hci_dev_unlock(hdev);
1723 }
1724 
1725 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1726 {
1727 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1728 	struct hci_conn *conn;
1729 
1730 	BT_DBG("%s", hdev->name);
1731 
1732 	hci_dev_lock(hdev);
1733 
1734 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1735 	if (!conn) {
1736 		if (ev->link_type != SCO_LINK)
1737 			goto unlock;
1738 
1739 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1740 		if (!conn)
1741 			goto unlock;
1742 
1743 		conn->type = SCO_LINK;
1744 	}
1745 
1746 	if (!ev->status) {
1747 		conn->handle = __le16_to_cpu(ev->handle);
1748 
1749 		if (conn->type == ACL_LINK) {
1750 			conn->state = BT_CONFIG;
1751 			hci_conn_hold(conn);
1752 			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1753 		} else
1754 			conn->state = BT_CONNECTED;
1755 
1756 		hci_conn_hold_device(conn);
1757 		hci_conn_add_sysfs(conn);
1758 
1759 		if (test_bit(HCI_AUTH, &hdev->flags))
1760 			conn->link_mode |= HCI_LM_AUTH;
1761 
1762 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1763 			conn->link_mode |= HCI_LM_ENCRYPT;
1764 
1765 		/* Get remote features */
1766 		if (conn->type == ACL_LINK) {
1767 			struct hci_cp_read_remote_features cp;
1768 			cp.handle = ev->handle;
1769 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1770 				     sizeof(cp), &cp);
1771 		}
1772 
1773 		/* Set packet type for incoming connection */
1774 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1775 			struct hci_cp_change_conn_ptype cp;
1776 			cp.handle = ev->handle;
1777 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1778 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1779 				     &cp);
1780 		}
1781 	} else {
1782 		conn->state = BT_CLOSED;
1783 		if (conn->type == ACL_LINK)
1784 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1785 					    conn->dst_type, ev->status);
1786 	}
1787 
1788 	if (conn->type == ACL_LINK)
1789 		hci_sco_setup(conn, ev->status);
1790 
1791 	if (ev->status) {
1792 		hci_proto_connect_cfm(conn, ev->status);
1793 		hci_conn_del(conn);
1794 	} else if (ev->link_type != ACL_LINK)
1795 		hci_proto_connect_cfm(conn, ev->status);
1796 
1797 unlock:
1798 	hci_dev_unlock(hdev);
1799 
1800 	hci_conn_check_pending(hdev);
1801 }
1802 
1803 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1804 {
1805 	struct hci_ev_conn_request *ev = (void *) skb->data;
1806 	int mask = hdev->link_mode;
1807 
1808 	BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1809 					batostr(&ev->bdaddr), ev->link_type);
1810 
1811 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1812 
1813 	if ((mask & HCI_LM_ACCEPT) &&
1814 			!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1815 		/* Connection accepted */
1816 		struct inquiry_entry *ie;
1817 		struct hci_conn *conn;
1818 
1819 		hci_dev_lock(hdev);
1820 
1821 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1822 		if (ie)
1823 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1824 
1825 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1826 		if (!conn) {
1827 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1828 			if (!conn) {
1829 				BT_ERR("No memory for new connection");
1830 				hci_dev_unlock(hdev);
1831 				return;
1832 			}
1833 		}
1834 
1835 		memcpy(conn->dev_class, ev->dev_class, 3);
1836 		conn->state = BT_CONNECT;
1837 
1838 		hci_dev_unlock(hdev);
1839 
1840 		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1841 			struct hci_cp_accept_conn_req cp;
1842 
1843 			bacpy(&cp.bdaddr, &ev->bdaddr);
1844 
1845 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1846 				cp.role = 0x00; /* Become master */
1847 			else
1848 				cp.role = 0x01; /* Remain slave */
1849 
1850 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1851 				     &cp);
1852 		} else {
1853 			struct hci_cp_accept_sync_conn_req cp;
1854 
1855 			bacpy(&cp.bdaddr, &ev->bdaddr);
1856 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1857 
1858 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1859 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1860 			cp.max_latency    = cpu_to_le16(0xffff);
1861 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1862 			cp.retrans_effort = 0xff;
1863 
1864 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1865 				     sizeof(cp), &cp);
1866 		}
1867 	} else {
1868 		/* Connection rejected */
1869 		struct hci_cp_reject_conn_req cp;
1870 
1871 		bacpy(&cp.bdaddr, &ev->bdaddr);
1872 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1873 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1874 	}
1875 }
1876 
1877 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1878 {
1879 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1880 	struct hci_conn *conn;
1881 
1882 	BT_DBG("%s status %d", hdev->name, ev->status);
1883 
1884 	hci_dev_lock(hdev);
1885 
1886 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1887 	if (!conn)
1888 		goto unlock;
1889 
1890 	if (ev->status == 0)
1891 		conn->state = BT_CLOSED;
1892 
1893 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1894 			(conn->type == ACL_LINK || conn->type == LE_LINK)) {
1895 		if (ev->status != 0)
1896 			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1897 						conn->dst_type, ev->status);
1898 		else
1899 			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1900 						 conn->dst_type);
1901 	}
1902 
1903 	if (ev->status == 0) {
1904 		if (conn->type == ACL_LINK && conn->flush_key)
1905 			hci_remove_link_key(hdev, &conn->dst);
1906 		hci_proto_disconn_cfm(conn, ev->reason);
1907 		hci_conn_del(conn);
1908 	}
1909 
1910 unlock:
1911 	hci_dev_unlock(hdev);
1912 }
1913 
1914 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1915 {
1916 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1917 	struct hci_conn *conn;
1918 
1919 	BT_DBG("%s status %d", hdev->name, ev->status);
1920 
1921 	hci_dev_lock(hdev);
1922 
1923 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1924 	if (!conn)
1925 		goto unlock;
1926 
1927 	if (!ev->status) {
1928 		if (!hci_conn_ssp_enabled(conn) &&
1929 				test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1930 			BT_INFO("re-auth of legacy device is not possible.");
1931 		} else {
1932 			conn->link_mode |= HCI_LM_AUTH;
1933 			conn->sec_level = conn->pending_sec_level;
1934 		}
1935 	} else {
1936 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1937 				 ev->status);
1938 	}
1939 
1940 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1941 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1942 
1943 	if (conn->state == BT_CONFIG) {
1944 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1945 			struct hci_cp_set_conn_encrypt cp;
1946 			cp.handle  = ev->handle;
1947 			cp.encrypt = 0x01;
1948 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1949 									&cp);
1950 		} else {
1951 			conn->state = BT_CONNECTED;
1952 			hci_proto_connect_cfm(conn, ev->status);
1953 			hci_conn_put(conn);
1954 		}
1955 	} else {
1956 		hci_auth_cfm(conn, ev->status);
1957 
1958 		hci_conn_hold(conn);
1959 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1960 		hci_conn_put(conn);
1961 	}
1962 
1963 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1964 		if (!ev->status) {
1965 			struct hci_cp_set_conn_encrypt cp;
1966 			cp.handle  = ev->handle;
1967 			cp.encrypt = 0x01;
1968 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1969 									&cp);
1970 		} else {
1971 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1972 			hci_encrypt_cfm(conn, ev->status, 0x00);
1973 		}
1974 	}
1975 
1976 unlock:
1977 	hci_dev_unlock(hdev);
1978 }
1979 
1980 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1981 {
1982 	struct hci_ev_remote_name *ev = (void *) skb->data;
1983 	struct hci_conn *conn;
1984 
1985 	BT_DBG("%s", hdev->name);
1986 
1987 	hci_conn_check_pending(hdev);
1988 
1989 	hci_dev_lock(hdev);
1990 
1991 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1992 
1993 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1994 		goto check_auth;
1995 
1996 	if (ev->status == 0)
1997 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1998 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1999 	else
2000 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2001 
2002 check_auth:
2003 	if (!conn)
2004 		goto unlock;
2005 
2006 	if (!hci_outgoing_auth_needed(hdev, conn))
2007 		goto unlock;
2008 
2009 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2010 		struct hci_cp_auth_requested cp;
2011 		cp.handle = __cpu_to_le16(conn->handle);
2012 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2013 	}
2014 
2015 unlock:
2016 	hci_dev_unlock(hdev);
2017 }
2018 
2019 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2020 {
2021 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2022 	struct hci_conn *conn;
2023 
2024 	BT_DBG("%s status %d", hdev->name, ev->status);
2025 
2026 	hci_dev_lock(hdev);
2027 
2028 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2029 	if (conn) {
2030 		if (!ev->status) {
2031 			if (ev->encrypt) {
2032 				/* Encryption implies authentication */
2033 				conn->link_mode |= HCI_LM_AUTH;
2034 				conn->link_mode |= HCI_LM_ENCRYPT;
2035 				conn->sec_level = conn->pending_sec_level;
2036 			} else
2037 				conn->link_mode &= ~HCI_LM_ENCRYPT;
2038 		}
2039 
2040 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2041 
2042 		if (conn->state == BT_CONFIG) {
2043 			if (!ev->status)
2044 				conn->state = BT_CONNECTED;
2045 
2046 			hci_proto_connect_cfm(conn, ev->status);
2047 			hci_conn_put(conn);
2048 		} else
2049 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2050 	}
2051 
2052 	hci_dev_unlock(hdev);
2053 }
2054 
2055 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2056 {
2057 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2058 	struct hci_conn *conn;
2059 
2060 	BT_DBG("%s status %d", hdev->name, ev->status);
2061 
2062 	hci_dev_lock(hdev);
2063 
2064 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2065 	if (conn) {
2066 		if (!ev->status)
2067 			conn->link_mode |= HCI_LM_SECURE;
2068 
2069 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2070 
2071 		hci_key_change_cfm(conn, ev->status);
2072 	}
2073 
2074 	hci_dev_unlock(hdev);
2075 }
2076 
2077 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2078 {
2079 	struct hci_ev_remote_features *ev = (void *) skb->data;
2080 	struct hci_conn *conn;
2081 
2082 	BT_DBG("%s status %d", hdev->name, ev->status);
2083 
2084 	hci_dev_lock(hdev);
2085 
2086 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2087 	if (!conn)
2088 		goto unlock;
2089 
2090 	if (!ev->status)
2091 		memcpy(conn->features, ev->features, 8);
2092 
2093 	if (conn->state != BT_CONFIG)
2094 		goto unlock;
2095 
2096 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2097 		struct hci_cp_read_remote_ext_features cp;
2098 		cp.handle = ev->handle;
2099 		cp.page = 0x01;
2100 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2101 							sizeof(cp), &cp);
2102 		goto unlock;
2103 	}
2104 
2105 	if (!ev->status) {
2106 		struct hci_cp_remote_name_req cp;
2107 		memset(&cp, 0, sizeof(cp));
2108 		bacpy(&cp.bdaddr, &conn->dst);
2109 		cp.pscan_rep_mode = 0x02;
2110 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2111 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2112 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2113 				      conn->dst_type, 0, NULL, 0,
2114 				      conn->dev_class);
2115 
2116 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2117 		conn->state = BT_CONNECTED;
2118 		hci_proto_connect_cfm(conn, ev->status);
2119 		hci_conn_put(conn);
2120 	}
2121 
2122 unlock:
2123 	hci_dev_unlock(hdev);
2124 }
2125 
2126 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2127 {
2128 	BT_DBG("%s", hdev->name);
2129 }
2130 
2131 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2132 {
2133 	BT_DBG("%s", hdev->name);
2134 }
2135 
2136 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2137 {
2138 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2139 	__u16 opcode;
2140 
2141 	skb_pull(skb, sizeof(*ev));
2142 
2143 	opcode = __le16_to_cpu(ev->opcode);
2144 
2145 	switch (opcode) {
2146 	case HCI_OP_INQUIRY_CANCEL:
2147 		hci_cc_inquiry_cancel(hdev, skb);
2148 		break;
2149 
2150 	case HCI_OP_EXIT_PERIODIC_INQ:
2151 		hci_cc_exit_periodic_inq(hdev, skb);
2152 		break;
2153 
2154 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2155 		hci_cc_remote_name_req_cancel(hdev, skb);
2156 		break;
2157 
2158 	case HCI_OP_ROLE_DISCOVERY:
2159 		hci_cc_role_discovery(hdev, skb);
2160 		break;
2161 
2162 	case HCI_OP_READ_LINK_POLICY:
2163 		hci_cc_read_link_policy(hdev, skb);
2164 		break;
2165 
2166 	case HCI_OP_WRITE_LINK_POLICY:
2167 		hci_cc_write_link_policy(hdev, skb);
2168 		break;
2169 
2170 	case HCI_OP_READ_DEF_LINK_POLICY:
2171 		hci_cc_read_def_link_policy(hdev, skb);
2172 		break;
2173 
2174 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2175 		hci_cc_write_def_link_policy(hdev, skb);
2176 		break;
2177 
2178 	case HCI_OP_RESET:
2179 		hci_cc_reset(hdev, skb);
2180 		break;
2181 
2182 	case HCI_OP_WRITE_LOCAL_NAME:
2183 		hci_cc_write_local_name(hdev, skb);
2184 		break;
2185 
2186 	case HCI_OP_READ_LOCAL_NAME:
2187 		hci_cc_read_local_name(hdev, skb);
2188 		break;
2189 
2190 	case HCI_OP_WRITE_AUTH_ENABLE:
2191 		hci_cc_write_auth_enable(hdev, skb);
2192 		break;
2193 
2194 	case HCI_OP_WRITE_ENCRYPT_MODE:
2195 		hci_cc_write_encrypt_mode(hdev, skb);
2196 		break;
2197 
2198 	case HCI_OP_WRITE_SCAN_ENABLE:
2199 		hci_cc_write_scan_enable(hdev, skb);
2200 		break;
2201 
2202 	case HCI_OP_READ_CLASS_OF_DEV:
2203 		hci_cc_read_class_of_dev(hdev, skb);
2204 		break;
2205 
2206 	case HCI_OP_WRITE_CLASS_OF_DEV:
2207 		hci_cc_write_class_of_dev(hdev, skb);
2208 		break;
2209 
2210 	case HCI_OP_READ_VOICE_SETTING:
2211 		hci_cc_read_voice_setting(hdev, skb);
2212 		break;
2213 
2214 	case HCI_OP_WRITE_VOICE_SETTING:
2215 		hci_cc_write_voice_setting(hdev, skb);
2216 		break;
2217 
2218 	case HCI_OP_HOST_BUFFER_SIZE:
2219 		hci_cc_host_buffer_size(hdev, skb);
2220 		break;
2221 
2222 	case HCI_OP_WRITE_SSP_MODE:
2223 		hci_cc_write_ssp_mode(hdev, skb);
2224 		break;
2225 
2226 	case HCI_OP_READ_LOCAL_VERSION:
2227 		hci_cc_read_local_version(hdev, skb);
2228 		break;
2229 
2230 	case HCI_OP_READ_LOCAL_COMMANDS:
2231 		hci_cc_read_local_commands(hdev, skb);
2232 		break;
2233 
2234 	case HCI_OP_READ_LOCAL_FEATURES:
2235 		hci_cc_read_local_features(hdev, skb);
2236 		break;
2237 
2238 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2239 		hci_cc_read_local_ext_features(hdev, skb);
2240 		break;
2241 
2242 	case HCI_OP_READ_BUFFER_SIZE:
2243 		hci_cc_read_buffer_size(hdev, skb);
2244 		break;
2245 
2246 	case HCI_OP_READ_BD_ADDR:
2247 		hci_cc_read_bd_addr(hdev, skb);
2248 		break;
2249 
2250 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2251 		hci_cc_read_data_block_size(hdev, skb);
2252 		break;
2253 
2254 	case HCI_OP_WRITE_CA_TIMEOUT:
2255 		hci_cc_write_ca_timeout(hdev, skb);
2256 		break;
2257 
2258 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2259 		hci_cc_read_flow_control_mode(hdev, skb);
2260 		break;
2261 
2262 	case HCI_OP_READ_LOCAL_AMP_INFO:
2263 		hci_cc_read_local_amp_info(hdev, skb);
2264 		break;
2265 
2266 	case HCI_OP_DELETE_STORED_LINK_KEY:
2267 		hci_cc_delete_stored_link_key(hdev, skb);
2268 		break;
2269 
2270 	case HCI_OP_SET_EVENT_MASK:
2271 		hci_cc_set_event_mask(hdev, skb);
2272 		break;
2273 
2274 	case HCI_OP_WRITE_INQUIRY_MODE:
2275 		hci_cc_write_inquiry_mode(hdev, skb);
2276 		break;
2277 
2278 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2279 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2280 		break;
2281 
2282 	case HCI_OP_SET_EVENT_FLT:
2283 		hci_cc_set_event_flt(hdev, skb);
2284 		break;
2285 
2286 	case HCI_OP_PIN_CODE_REPLY:
2287 		hci_cc_pin_code_reply(hdev, skb);
2288 		break;
2289 
2290 	case HCI_OP_PIN_CODE_NEG_REPLY:
2291 		hci_cc_pin_code_neg_reply(hdev, skb);
2292 		break;
2293 
2294 	case HCI_OP_READ_LOCAL_OOB_DATA:
2295 		hci_cc_read_local_oob_data_reply(hdev, skb);
2296 		break;
2297 
2298 	case HCI_OP_LE_READ_BUFFER_SIZE:
2299 		hci_cc_le_read_buffer_size(hdev, skb);
2300 		break;
2301 
2302 	case HCI_OP_USER_CONFIRM_REPLY:
2303 		hci_cc_user_confirm_reply(hdev, skb);
2304 		break;
2305 
2306 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2307 		hci_cc_user_confirm_neg_reply(hdev, skb);
2308 		break;
2309 
2310 	case HCI_OP_USER_PASSKEY_REPLY:
2311 		hci_cc_user_passkey_reply(hdev, skb);
2312 		break;
2313 
2314 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2315 		hci_cc_user_passkey_neg_reply(hdev, skb);
2316 		break;
2317 
2318 	case HCI_OP_LE_SET_SCAN_PARAM:
2319 		hci_cc_le_set_scan_param(hdev, skb);
2320 		break;
2321 
2322 	case HCI_OP_LE_SET_SCAN_ENABLE:
2323 		hci_cc_le_set_scan_enable(hdev, skb);
2324 		break;
2325 
2326 	case HCI_OP_LE_LTK_REPLY:
2327 		hci_cc_le_ltk_reply(hdev, skb);
2328 		break;
2329 
2330 	case HCI_OP_LE_LTK_NEG_REPLY:
2331 		hci_cc_le_ltk_neg_reply(hdev, skb);
2332 		break;
2333 
2334 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2335 		hci_cc_write_le_host_supported(hdev, skb);
2336 		break;
2337 
2338 	default:
2339 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2340 		break;
2341 	}
2342 
2343 	if (ev->opcode != HCI_OP_NOP)
2344 		del_timer(&hdev->cmd_timer);
2345 
2346 	if (ev->ncmd) {
2347 		atomic_set(&hdev->cmd_cnt, 1);
2348 		if (!skb_queue_empty(&hdev->cmd_q))
2349 			queue_work(hdev->workqueue, &hdev->cmd_work);
2350 	}
2351 }
2352 
2353 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2354 {
2355 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2356 	__u16 opcode;
2357 
2358 	skb_pull(skb, sizeof(*ev));
2359 
2360 	opcode = __le16_to_cpu(ev->opcode);
2361 
2362 	switch (opcode) {
2363 	case HCI_OP_INQUIRY:
2364 		hci_cs_inquiry(hdev, ev->status);
2365 		break;
2366 
2367 	case HCI_OP_CREATE_CONN:
2368 		hci_cs_create_conn(hdev, ev->status);
2369 		break;
2370 
2371 	case HCI_OP_ADD_SCO:
2372 		hci_cs_add_sco(hdev, ev->status);
2373 		break;
2374 
2375 	case HCI_OP_AUTH_REQUESTED:
2376 		hci_cs_auth_requested(hdev, ev->status);
2377 		break;
2378 
2379 	case HCI_OP_SET_CONN_ENCRYPT:
2380 		hci_cs_set_conn_encrypt(hdev, ev->status);
2381 		break;
2382 
2383 	case HCI_OP_REMOTE_NAME_REQ:
2384 		hci_cs_remote_name_req(hdev, ev->status);
2385 		break;
2386 
2387 	case HCI_OP_READ_REMOTE_FEATURES:
2388 		hci_cs_read_remote_features(hdev, ev->status);
2389 		break;
2390 
2391 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2392 		hci_cs_read_remote_ext_features(hdev, ev->status);
2393 		break;
2394 
2395 	case HCI_OP_SETUP_SYNC_CONN:
2396 		hci_cs_setup_sync_conn(hdev, ev->status);
2397 		break;
2398 
2399 	case HCI_OP_SNIFF_MODE:
2400 		hci_cs_sniff_mode(hdev, ev->status);
2401 		break;
2402 
2403 	case HCI_OP_EXIT_SNIFF_MODE:
2404 		hci_cs_exit_sniff_mode(hdev, ev->status);
2405 		break;
2406 
2407 	case HCI_OP_DISCONNECT:
2408 		hci_cs_disconnect(hdev, ev->status);
2409 		break;
2410 
2411 	case HCI_OP_LE_CREATE_CONN:
2412 		hci_cs_le_create_conn(hdev, ev->status);
2413 		break;
2414 
2415 	case HCI_OP_LE_START_ENC:
2416 		hci_cs_le_start_enc(hdev, ev->status);
2417 		break;
2418 
2419 	default:
2420 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2421 		break;
2422 	}
2423 
2424 	if (ev->opcode != HCI_OP_NOP)
2425 		del_timer(&hdev->cmd_timer);
2426 
2427 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2428 		atomic_set(&hdev->cmd_cnt, 1);
2429 		if (!skb_queue_empty(&hdev->cmd_q))
2430 			queue_work(hdev->workqueue, &hdev->cmd_work);
2431 	}
2432 }
2433 
2434 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2435 {
2436 	struct hci_ev_role_change *ev = (void *) skb->data;
2437 	struct hci_conn *conn;
2438 
2439 	BT_DBG("%s status %d", hdev->name, ev->status);
2440 
2441 	hci_dev_lock(hdev);
2442 
2443 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2444 	if (conn) {
2445 		if (!ev->status) {
2446 			if (ev->role)
2447 				conn->link_mode &= ~HCI_LM_MASTER;
2448 			else
2449 				conn->link_mode |= HCI_LM_MASTER;
2450 		}
2451 
2452 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2453 
2454 		hci_role_switch_cfm(conn, ev->status, ev->role);
2455 	}
2456 
2457 	hci_dev_unlock(hdev);
2458 }
2459 
2460 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2461 {
2462 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2463 	int i;
2464 
2465 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2466 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2467 		return;
2468 	}
2469 
2470 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2471 			ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2472 		BT_DBG("%s bad parameters", hdev->name);
2473 		return;
2474 	}
2475 
2476 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2477 
2478 	for (i = 0; i < ev->num_hndl; i++) {
2479 		struct hci_comp_pkts_info *info = &ev->handles[i];
2480 		struct hci_conn *conn;
2481 		__u16  handle, count;
2482 
2483 		handle = __le16_to_cpu(info->handle);
2484 		count  = __le16_to_cpu(info->count);
2485 
2486 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2487 		if (!conn)
2488 			continue;
2489 
2490 		conn->sent -= count;
2491 
2492 		switch (conn->type) {
2493 		case ACL_LINK:
2494 			hdev->acl_cnt += count;
2495 			if (hdev->acl_cnt > hdev->acl_pkts)
2496 				hdev->acl_cnt = hdev->acl_pkts;
2497 			break;
2498 
2499 		case LE_LINK:
2500 			if (hdev->le_pkts) {
2501 				hdev->le_cnt += count;
2502 				if (hdev->le_cnt > hdev->le_pkts)
2503 					hdev->le_cnt = hdev->le_pkts;
2504 			} else {
2505 				hdev->acl_cnt += count;
2506 				if (hdev->acl_cnt > hdev->acl_pkts)
2507 					hdev->acl_cnt = hdev->acl_pkts;
2508 			}
2509 			break;
2510 
2511 		case SCO_LINK:
2512 			hdev->sco_cnt += count;
2513 			if (hdev->sco_cnt > hdev->sco_pkts)
2514 				hdev->sco_cnt = hdev->sco_pkts;
2515 			break;
2516 
2517 		default:
2518 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2519 			break;
2520 		}
2521 	}
2522 
2523 	queue_work(hdev->workqueue, &hdev->tx_work);
2524 }
2525 
2526 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2527 					   struct sk_buff *skb)
2528 {
2529 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2530 	int i;
2531 
2532 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2533 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2534 		return;
2535 	}
2536 
2537 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2538 			ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2539 		BT_DBG("%s bad parameters", hdev->name);
2540 		return;
2541 	}
2542 
2543 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2544 								ev->num_hndl);
2545 
2546 	for (i = 0; i < ev->num_hndl; i++) {
2547 		struct hci_comp_blocks_info *info = &ev->handles[i];
2548 		struct hci_conn *conn;
2549 		__u16  handle, block_count;
2550 
2551 		handle = __le16_to_cpu(info->handle);
2552 		block_count = __le16_to_cpu(info->blocks);
2553 
2554 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2555 		if (!conn)
2556 			continue;
2557 
2558 		conn->sent -= block_count;
2559 
2560 		switch (conn->type) {
2561 		case ACL_LINK:
2562 			hdev->block_cnt += block_count;
2563 			if (hdev->block_cnt > hdev->num_blocks)
2564 				hdev->block_cnt = hdev->num_blocks;
2565 			break;
2566 
2567 		default:
2568 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2569 			break;
2570 		}
2571 	}
2572 
2573 	queue_work(hdev->workqueue, &hdev->tx_work);
2574 }
2575 
2576 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2577 {
2578 	struct hci_ev_mode_change *ev = (void *) skb->data;
2579 	struct hci_conn *conn;
2580 
2581 	BT_DBG("%s status %d", hdev->name, ev->status);
2582 
2583 	hci_dev_lock(hdev);
2584 
2585 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2586 	if (conn) {
2587 		conn->mode = ev->mode;
2588 		conn->interval = __le16_to_cpu(ev->interval);
2589 
2590 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2591 			if (conn->mode == HCI_CM_ACTIVE)
2592 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2593 			else
2594 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2595 		}
2596 
2597 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2598 			hci_sco_setup(conn, ev->status);
2599 	}
2600 
2601 	hci_dev_unlock(hdev);
2602 }
2603 
2604 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2605 {
2606 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2607 	struct hci_conn *conn;
2608 
2609 	BT_DBG("%s", hdev->name);
2610 
2611 	hci_dev_lock(hdev);
2612 
2613 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2614 	if (!conn)
2615 		goto unlock;
2616 
2617 	if (conn->state == BT_CONNECTED) {
2618 		hci_conn_hold(conn);
2619 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2620 		hci_conn_put(conn);
2621 	}
2622 
2623 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2624 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2625 					sizeof(ev->bdaddr), &ev->bdaddr);
2626 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2627 		u8 secure;
2628 
2629 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2630 			secure = 1;
2631 		else
2632 			secure = 0;
2633 
2634 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2635 	}
2636 
2637 unlock:
2638 	hci_dev_unlock(hdev);
2639 }
2640 
2641 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2642 {
2643 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2644 	struct hci_cp_link_key_reply cp;
2645 	struct hci_conn *conn;
2646 	struct link_key *key;
2647 
2648 	BT_DBG("%s", hdev->name);
2649 
2650 	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2651 		return;
2652 
2653 	hci_dev_lock(hdev);
2654 
2655 	key = hci_find_link_key(hdev, &ev->bdaddr);
2656 	if (!key) {
2657 		BT_DBG("%s link key not found for %s", hdev->name,
2658 							batostr(&ev->bdaddr));
2659 		goto not_found;
2660 	}
2661 
2662 	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2663 							batostr(&ev->bdaddr));
2664 
2665 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2666 				key->type == HCI_LK_DEBUG_COMBINATION) {
2667 		BT_DBG("%s ignoring debug key", hdev->name);
2668 		goto not_found;
2669 	}
2670 
2671 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2672 	if (conn) {
2673 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2674 				conn->auth_type != 0xff &&
2675 				(conn->auth_type & 0x01)) {
2676 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2677 			goto not_found;
2678 		}
2679 
2680 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2681 				conn->pending_sec_level == BT_SECURITY_HIGH) {
2682 			BT_DBG("%s ignoring key unauthenticated for high \
2683 							security", hdev->name);
2684 			goto not_found;
2685 		}
2686 
2687 		conn->key_type = key->type;
2688 		conn->pin_length = key->pin_len;
2689 	}
2690 
2691 	bacpy(&cp.bdaddr, &ev->bdaddr);
2692 	memcpy(cp.link_key, key->val, 16);
2693 
2694 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2695 
2696 	hci_dev_unlock(hdev);
2697 
2698 	return;
2699 
2700 not_found:
2701 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2702 	hci_dev_unlock(hdev);
2703 }
2704 
2705 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2706 {
2707 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2708 	struct hci_conn *conn;
2709 	u8 pin_len = 0;
2710 
2711 	BT_DBG("%s", hdev->name);
2712 
2713 	hci_dev_lock(hdev);
2714 
2715 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2716 	if (conn) {
2717 		hci_conn_hold(conn);
2718 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2719 		pin_len = conn->pin_length;
2720 
2721 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2722 			conn->key_type = ev->key_type;
2723 
2724 		hci_conn_put(conn);
2725 	}
2726 
2727 	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2728 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2729 							ev->key_type, pin_len);
2730 
2731 	hci_dev_unlock(hdev);
2732 }
2733 
2734 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2735 {
2736 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2737 	struct hci_conn *conn;
2738 
2739 	BT_DBG("%s status %d", hdev->name, ev->status);
2740 
2741 	hci_dev_lock(hdev);
2742 
2743 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2744 	if (conn && !ev->status) {
2745 		struct inquiry_entry *ie;
2746 
2747 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2748 		if (ie) {
2749 			ie->data.clock_offset = ev->clock_offset;
2750 			ie->timestamp = jiffies;
2751 		}
2752 	}
2753 
2754 	hci_dev_unlock(hdev);
2755 }
2756 
2757 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2758 {
2759 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2760 	struct hci_conn *conn;
2761 
2762 	BT_DBG("%s status %d", hdev->name, ev->status);
2763 
2764 	hci_dev_lock(hdev);
2765 
2766 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2767 	if (conn && !ev->status)
2768 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2769 
2770 	hci_dev_unlock(hdev);
2771 }
2772 
2773 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2774 {
2775 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2776 	struct inquiry_entry *ie;
2777 
2778 	BT_DBG("%s", hdev->name);
2779 
2780 	hci_dev_lock(hdev);
2781 
2782 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2783 	if (ie) {
2784 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2785 		ie->timestamp = jiffies;
2786 	}
2787 
2788 	hci_dev_unlock(hdev);
2789 }
2790 
2791 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792 {
2793 	struct inquiry_data data;
2794 	int num_rsp = *((__u8 *) skb->data);
2795 	bool name_known, ssp;
2796 
2797 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2798 
2799 	if (!num_rsp)
2800 		return;
2801 
2802 	hci_dev_lock(hdev);
2803 
2804 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2805 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2806 		info = (void *) (skb->data + 1);
2807 
2808 		for (; num_rsp; num_rsp--, info++) {
2809 			bacpy(&data.bdaddr, &info->bdaddr);
2810 			data.pscan_rep_mode	= info->pscan_rep_mode;
2811 			data.pscan_period_mode	= info->pscan_period_mode;
2812 			data.pscan_mode		= info->pscan_mode;
2813 			memcpy(data.dev_class, info->dev_class, 3);
2814 			data.clock_offset	= info->clock_offset;
2815 			data.rssi		= info->rssi;
2816 			data.ssp_mode		= 0x00;
2817 
2818 			name_known = hci_inquiry_cache_update(hdev, &data,
2819 							      false, &ssp);
2820 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2821 					  info->dev_class, info->rssi,
2822 					  !name_known, ssp, NULL, 0);
2823 		}
2824 	} else {
2825 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2826 
2827 		for (; num_rsp; num_rsp--, info++) {
2828 			bacpy(&data.bdaddr, &info->bdaddr);
2829 			data.pscan_rep_mode	= info->pscan_rep_mode;
2830 			data.pscan_period_mode	= info->pscan_period_mode;
2831 			data.pscan_mode		= 0x00;
2832 			memcpy(data.dev_class, info->dev_class, 3);
2833 			data.clock_offset	= info->clock_offset;
2834 			data.rssi		= info->rssi;
2835 			data.ssp_mode		= 0x00;
2836 			name_known = hci_inquiry_cache_update(hdev, &data,
2837 							      false, &ssp);
2838 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2839 					  info->dev_class, info->rssi,
2840 					  !name_known, ssp, NULL, 0);
2841 		}
2842 	}
2843 
2844 	hci_dev_unlock(hdev);
2845 }
2846 
2847 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2848 {
2849 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2850 	struct hci_conn *conn;
2851 
2852 	BT_DBG("%s", hdev->name);
2853 
2854 	hci_dev_lock(hdev);
2855 
2856 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2857 	if (!conn)
2858 		goto unlock;
2859 
2860 	if (!ev->status && ev->page == 0x01) {
2861 		struct inquiry_entry *ie;
2862 
2863 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2864 		if (ie)
2865 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2866 
2867 		if (ev->features[0] & LMP_HOST_SSP)
2868 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2869 	}
2870 
2871 	if (conn->state != BT_CONFIG)
2872 		goto unlock;
2873 
2874 	if (!ev->status) {
2875 		struct hci_cp_remote_name_req cp;
2876 		memset(&cp, 0, sizeof(cp));
2877 		bacpy(&cp.bdaddr, &conn->dst);
2878 		cp.pscan_rep_mode = 0x02;
2879 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2880 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2881 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2882 				      conn->dst_type, 0, NULL, 0,
2883 				      conn->dev_class);
2884 
2885 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2886 		conn->state = BT_CONNECTED;
2887 		hci_proto_connect_cfm(conn, ev->status);
2888 		hci_conn_put(conn);
2889 	}
2890 
2891 unlock:
2892 	hci_dev_unlock(hdev);
2893 }
2894 
2895 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2896 {
2897 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2898 	struct hci_conn *conn;
2899 
2900 	BT_DBG("%s status %d", hdev->name, ev->status);
2901 
2902 	hci_dev_lock(hdev);
2903 
2904 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2905 	if (!conn) {
2906 		if (ev->link_type == ESCO_LINK)
2907 			goto unlock;
2908 
2909 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2910 		if (!conn)
2911 			goto unlock;
2912 
2913 		conn->type = SCO_LINK;
2914 	}
2915 
2916 	switch (ev->status) {
2917 	case 0x00:
2918 		conn->handle = __le16_to_cpu(ev->handle);
2919 		conn->state  = BT_CONNECTED;
2920 
2921 		hci_conn_hold_device(conn);
2922 		hci_conn_add_sysfs(conn);
2923 		break;
2924 
2925 	case 0x11:	/* Unsupported Feature or Parameter Value */
2926 	case 0x1c:	/* SCO interval rejected */
2927 	case 0x1a:	/* Unsupported Remote Feature */
2928 	case 0x1f:	/* Unspecified error */
2929 		if (conn->out && conn->attempt < 2) {
2930 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2931 					(hdev->esco_type & EDR_ESCO_MASK);
2932 			hci_setup_sync(conn, conn->link->handle);
2933 			goto unlock;
2934 		}
2935 		/* fall through */
2936 
2937 	default:
2938 		conn->state = BT_CLOSED;
2939 		break;
2940 	}
2941 
2942 	hci_proto_connect_cfm(conn, ev->status);
2943 	if (ev->status)
2944 		hci_conn_del(conn);
2945 
2946 unlock:
2947 	hci_dev_unlock(hdev);
2948 }
2949 
2950 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2951 {
2952 	BT_DBG("%s", hdev->name);
2953 }
2954 
2955 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2956 {
2957 	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2958 
2959 	BT_DBG("%s status %d", hdev->name, ev->status);
2960 }
2961 
2962 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2963 {
2964 	struct inquiry_data data;
2965 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2966 	int num_rsp = *((__u8 *) skb->data);
2967 
2968 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2969 
2970 	if (!num_rsp)
2971 		return;
2972 
2973 	hci_dev_lock(hdev);
2974 
2975 	for (; num_rsp; num_rsp--, info++) {
2976 		bool name_known, ssp;
2977 
2978 		bacpy(&data.bdaddr, &info->bdaddr);
2979 		data.pscan_rep_mode	= info->pscan_rep_mode;
2980 		data.pscan_period_mode	= info->pscan_period_mode;
2981 		data.pscan_mode		= 0x00;
2982 		memcpy(data.dev_class, info->dev_class, 3);
2983 		data.clock_offset	= info->clock_offset;
2984 		data.rssi		= info->rssi;
2985 		data.ssp_mode		= 0x01;
2986 
2987 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
2988 			name_known = eir_has_data_type(info->data,
2989 						       sizeof(info->data),
2990 						       EIR_NAME_COMPLETE);
2991 		else
2992 			name_known = true;
2993 
2994 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2995 						      &ssp);
2996 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2997 				  info->dev_class, info->rssi, !name_known,
2998 				  ssp, info->data, sizeof(info->data));
2999 	}
3000 
3001 	hci_dev_unlock(hdev);
3002 }
3003 
3004 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3005 {
3006 	/* If remote requests dedicated bonding follow that lead */
3007 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3008 		/* If both remote and local IO capabilities allow MITM
3009 		 * protection then require it, otherwise don't */
3010 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3011 			return 0x02;
3012 		else
3013 			return 0x03;
3014 	}
3015 
3016 	/* If remote requests no-bonding follow that lead */
3017 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3018 		return conn->remote_auth | (conn->auth_type & 0x01);
3019 
3020 	return conn->auth_type;
3021 }
3022 
3023 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3024 {
3025 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3026 	struct hci_conn *conn;
3027 
3028 	BT_DBG("%s", hdev->name);
3029 
3030 	hci_dev_lock(hdev);
3031 
3032 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3033 	if (!conn)
3034 		goto unlock;
3035 
3036 	hci_conn_hold(conn);
3037 
3038 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3039 		goto unlock;
3040 
3041 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3042 			(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3043 		struct hci_cp_io_capability_reply cp;
3044 
3045 		bacpy(&cp.bdaddr, &ev->bdaddr);
3046 		/* Change the IO capability from KeyboardDisplay
3047 		 * to DisplayYesNo as it is not supported by BT spec. */
3048 		cp.capability = (conn->io_capability == 0x04) ?
3049 						0x01 : conn->io_capability;
3050 		conn->auth_type = hci_get_auth_req(conn);
3051 		cp.authentication = conn->auth_type;
3052 
3053 		if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3054 				hci_find_remote_oob_data(hdev, &conn->dst))
3055 			cp.oob_data = 0x01;
3056 		else
3057 			cp.oob_data = 0x00;
3058 
3059 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3060 							sizeof(cp), &cp);
3061 	} else {
3062 		struct hci_cp_io_capability_neg_reply cp;
3063 
3064 		bacpy(&cp.bdaddr, &ev->bdaddr);
3065 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3066 
3067 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3068 							sizeof(cp), &cp);
3069 	}
3070 
3071 unlock:
3072 	hci_dev_unlock(hdev);
3073 }
3074 
3075 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3076 {
3077 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3078 	struct hci_conn *conn;
3079 
3080 	BT_DBG("%s", hdev->name);
3081 
3082 	hci_dev_lock(hdev);
3083 
3084 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3085 	if (!conn)
3086 		goto unlock;
3087 
3088 	conn->remote_cap = ev->capability;
3089 	conn->remote_auth = ev->authentication;
3090 	if (ev->oob_data)
3091 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3092 
3093 unlock:
3094 	hci_dev_unlock(hdev);
3095 }
3096 
3097 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3098 							struct sk_buff *skb)
3099 {
3100 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3101 	int loc_mitm, rem_mitm, confirm_hint = 0;
3102 	struct hci_conn *conn;
3103 
3104 	BT_DBG("%s", hdev->name);
3105 
3106 	hci_dev_lock(hdev);
3107 
3108 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3109 		goto unlock;
3110 
3111 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3112 	if (!conn)
3113 		goto unlock;
3114 
3115 	loc_mitm = (conn->auth_type & 0x01);
3116 	rem_mitm = (conn->remote_auth & 0x01);
3117 
3118 	/* If we require MITM but the remote device can't provide that
3119 	 * (it has NoInputNoOutput) then reject the confirmation
3120 	 * request. The only exception is when we're dedicated bonding
3121 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3122 	 * bit set. */
3123 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3124 		BT_DBG("Rejecting request: remote device can't provide MITM");
3125 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3126 					sizeof(ev->bdaddr), &ev->bdaddr);
3127 		goto unlock;
3128 	}
3129 
3130 	/* If no side requires MITM protection; auto-accept */
3131 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3132 				(!rem_mitm || conn->io_capability == 0x03)) {
3133 
3134 		/* If we're not the initiators request authorization to
3135 		 * proceed from user space (mgmt_user_confirm with
3136 		 * confirm_hint set to 1). */
3137 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3138 			BT_DBG("Confirming auto-accept as acceptor");
3139 			confirm_hint = 1;
3140 			goto confirm;
3141 		}
3142 
3143 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3144 						hdev->auto_accept_delay);
3145 
3146 		if (hdev->auto_accept_delay > 0) {
3147 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3148 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3149 			goto unlock;
3150 		}
3151 
3152 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3153 						sizeof(ev->bdaddr), &ev->bdaddr);
3154 		goto unlock;
3155 	}
3156 
3157 confirm:
3158 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3159 				  confirm_hint);
3160 
3161 unlock:
3162 	hci_dev_unlock(hdev);
3163 }
3164 
3165 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3166 							struct sk_buff *skb)
3167 {
3168 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3169 
3170 	BT_DBG("%s", hdev->name);
3171 
3172 	hci_dev_lock(hdev);
3173 
3174 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3175 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3176 
3177 	hci_dev_unlock(hdev);
3178 }
3179 
3180 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3181 {
3182 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3183 	struct hci_conn *conn;
3184 
3185 	BT_DBG("%s", hdev->name);
3186 
3187 	hci_dev_lock(hdev);
3188 
3189 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3190 	if (!conn)
3191 		goto unlock;
3192 
3193 	/* To avoid duplicate auth_failed events to user space we check
3194 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3195 	 * initiated the authentication. A traditional auth_complete
3196 	 * event gets always produced as initiator and is also mapped to
3197 	 * the mgmt_auth_failed event */
3198 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3199 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3200 				 ev->status);
3201 
3202 	hci_conn_put(conn);
3203 
3204 unlock:
3205 	hci_dev_unlock(hdev);
3206 }
3207 
3208 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3209 {
3210 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3211 	struct inquiry_entry *ie;
3212 
3213 	BT_DBG("%s", hdev->name);
3214 
3215 	hci_dev_lock(hdev);
3216 
3217 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3218 	if (ie)
3219 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3220 
3221 	hci_dev_unlock(hdev);
3222 }
3223 
3224 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3225 						   struct sk_buff *skb)
3226 {
3227 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3228 	struct oob_data *data;
3229 
3230 	BT_DBG("%s", hdev->name);
3231 
3232 	hci_dev_lock(hdev);
3233 
3234 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3235 		goto unlock;
3236 
3237 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3238 	if (data) {
3239 		struct hci_cp_remote_oob_data_reply cp;
3240 
3241 		bacpy(&cp.bdaddr, &ev->bdaddr);
3242 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3243 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3244 
3245 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3246 									&cp);
3247 	} else {
3248 		struct hci_cp_remote_oob_data_neg_reply cp;
3249 
3250 		bacpy(&cp.bdaddr, &ev->bdaddr);
3251 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3252 									&cp);
3253 	}
3254 
3255 unlock:
3256 	hci_dev_unlock(hdev);
3257 }
3258 
3259 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3260 {
3261 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3262 	struct hci_conn *conn;
3263 
3264 	BT_DBG("%s status %d", hdev->name, ev->status);
3265 
3266 	hci_dev_lock(hdev);
3267 
3268 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3269 	if (!conn) {
3270 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3271 		if (!conn) {
3272 			BT_ERR("No memory for new connection");
3273 			hci_dev_unlock(hdev);
3274 			return;
3275 		}
3276 
3277 		conn->dst_type = ev->bdaddr_type;
3278 	}
3279 
3280 	if (ev->status) {
3281 		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3282 						conn->dst_type, ev->status);
3283 		hci_proto_connect_cfm(conn, ev->status);
3284 		conn->state = BT_CLOSED;
3285 		hci_conn_del(conn);
3286 		goto unlock;
3287 	}
3288 
3289 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3290 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3291 				      conn->dst_type, 0, NULL, 0, NULL);
3292 
3293 	conn->sec_level = BT_SECURITY_LOW;
3294 	conn->handle = __le16_to_cpu(ev->handle);
3295 	conn->state = BT_CONNECTED;
3296 
3297 	hci_conn_hold_device(conn);
3298 	hci_conn_add_sysfs(conn);
3299 
3300 	hci_proto_connect_cfm(conn, ev->status);
3301 
3302 unlock:
3303 	hci_dev_unlock(hdev);
3304 }
3305 
3306 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3307 						struct sk_buff *skb)
3308 {
3309 	u8 num_reports = skb->data[0];
3310 	void *ptr = &skb->data[1];
3311 	s8 rssi;
3312 
3313 	hci_dev_lock(hdev);
3314 
3315 	while (num_reports--) {
3316 		struct hci_ev_le_advertising_info *ev = ptr;
3317 
3318 		hci_add_adv_entry(hdev, ev);
3319 
3320 		rssi = ev->data[ev->length];
3321 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3322 				  NULL, rssi, 0, 1, ev->data, ev->length);
3323 
3324 		ptr += sizeof(*ev) + ev->length + 1;
3325 	}
3326 
3327 	hci_dev_unlock(hdev);
3328 }
3329 
3330 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3331 						struct sk_buff *skb)
3332 {
3333 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3334 	struct hci_cp_le_ltk_reply cp;
3335 	struct hci_cp_le_ltk_neg_reply neg;
3336 	struct hci_conn *conn;
3337 	struct smp_ltk *ltk;
3338 
3339 	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3340 
3341 	hci_dev_lock(hdev);
3342 
3343 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3344 	if (conn == NULL)
3345 		goto not_found;
3346 
3347 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3348 	if (ltk == NULL)
3349 		goto not_found;
3350 
3351 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3352 	cp.handle = cpu_to_le16(conn->handle);
3353 
3354 	if (ltk->authenticated)
3355 		conn->sec_level = BT_SECURITY_HIGH;
3356 
3357 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3358 
3359 	if (ltk->type & HCI_SMP_STK) {
3360 		list_del(&ltk->list);
3361 		kfree(ltk);
3362 	}
3363 
3364 	hci_dev_unlock(hdev);
3365 
3366 	return;
3367 
3368 not_found:
3369 	neg.handle = ev->handle;
3370 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3371 	hci_dev_unlock(hdev);
3372 }
3373 
3374 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3375 {
3376 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3377 
3378 	skb_pull(skb, sizeof(*le_ev));
3379 
3380 	switch (le_ev->subevent) {
3381 	case HCI_EV_LE_CONN_COMPLETE:
3382 		hci_le_conn_complete_evt(hdev, skb);
3383 		break;
3384 
3385 	case HCI_EV_LE_ADVERTISING_REPORT:
3386 		hci_le_adv_report_evt(hdev, skb);
3387 		break;
3388 
3389 	case HCI_EV_LE_LTK_REQ:
3390 		hci_le_ltk_request_evt(hdev, skb);
3391 		break;
3392 
3393 	default:
3394 		break;
3395 	}
3396 }
3397 
3398 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3399 {
3400 	struct hci_event_hdr *hdr = (void *) skb->data;
3401 	__u8 event = hdr->evt;
3402 
3403 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3404 
3405 	switch (event) {
3406 	case HCI_EV_INQUIRY_COMPLETE:
3407 		hci_inquiry_complete_evt(hdev, skb);
3408 		break;
3409 
3410 	case HCI_EV_INQUIRY_RESULT:
3411 		hci_inquiry_result_evt(hdev, skb);
3412 		break;
3413 
3414 	case HCI_EV_CONN_COMPLETE:
3415 		hci_conn_complete_evt(hdev, skb);
3416 		break;
3417 
3418 	case HCI_EV_CONN_REQUEST:
3419 		hci_conn_request_evt(hdev, skb);
3420 		break;
3421 
3422 	case HCI_EV_DISCONN_COMPLETE:
3423 		hci_disconn_complete_evt(hdev, skb);
3424 		break;
3425 
3426 	case HCI_EV_AUTH_COMPLETE:
3427 		hci_auth_complete_evt(hdev, skb);
3428 		break;
3429 
3430 	case HCI_EV_REMOTE_NAME:
3431 		hci_remote_name_evt(hdev, skb);
3432 		break;
3433 
3434 	case HCI_EV_ENCRYPT_CHANGE:
3435 		hci_encrypt_change_evt(hdev, skb);
3436 		break;
3437 
3438 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3439 		hci_change_link_key_complete_evt(hdev, skb);
3440 		break;
3441 
3442 	case HCI_EV_REMOTE_FEATURES:
3443 		hci_remote_features_evt(hdev, skb);
3444 		break;
3445 
3446 	case HCI_EV_REMOTE_VERSION:
3447 		hci_remote_version_evt(hdev, skb);
3448 		break;
3449 
3450 	case HCI_EV_QOS_SETUP_COMPLETE:
3451 		hci_qos_setup_complete_evt(hdev, skb);
3452 		break;
3453 
3454 	case HCI_EV_CMD_COMPLETE:
3455 		hci_cmd_complete_evt(hdev, skb);
3456 		break;
3457 
3458 	case HCI_EV_CMD_STATUS:
3459 		hci_cmd_status_evt(hdev, skb);
3460 		break;
3461 
3462 	case HCI_EV_ROLE_CHANGE:
3463 		hci_role_change_evt(hdev, skb);
3464 		break;
3465 
3466 	case HCI_EV_NUM_COMP_PKTS:
3467 		hci_num_comp_pkts_evt(hdev, skb);
3468 		break;
3469 
3470 	case HCI_EV_MODE_CHANGE:
3471 		hci_mode_change_evt(hdev, skb);
3472 		break;
3473 
3474 	case HCI_EV_PIN_CODE_REQ:
3475 		hci_pin_code_request_evt(hdev, skb);
3476 		break;
3477 
3478 	case HCI_EV_LINK_KEY_REQ:
3479 		hci_link_key_request_evt(hdev, skb);
3480 		break;
3481 
3482 	case HCI_EV_LINK_KEY_NOTIFY:
3483 		hci_link_key_notify_evt(hdev, skb);
3484 		break;
3485 
3486 	case HCI_EV_CLOCK_OFFSET:
3487 		hci_clock_offset_evt(hdev, skb);
3488 		break;
3489 
3490 	case HCI_EV_PKT_TYPE_CHANGE:
3491 		hci_pkt_type_change_evt(hdev, skb);
3492 		break;
3493 
3494 	case HCI_EV_PSCAN_REP_MODE:
3495 		hci_pscan_rep_mode_evt(hdev, skb);
3496 		break;
3497 
3498 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3499 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3500 		break;
3501 
3502 	case HCI_EV_REMOTE_EXT_FEATURES:
3503 		hci_remote_ext_features_evt(hdev, skb);
3504 		break;
3505 
3506 	case HCI_EV_SYNC_CONN_COMPLETE:
3507 		hci_sync_conn_complete_evt(hdev, skb);
3508 		break;
3509 
3510 	case HCI_EV_SYNC_CONN_CHANGED:
3511 		hci_sync_conn_changed_evt(hdev, skb);
3512 		break;
3513 
3514 	case HCI_EV_SNIFF_SUBRATE:
3515 		hci_sniff_subrate_evt(hdev, skb);
3516 		break;
3517 
3518 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3519 		hci_extended_inquiry_result_evt(hdev, skb);
3520 		break;
3521 
3522 	case HCI_EV_IO_CAPA_REQUEST:
3523 		hci_io_capa_request_evt(hdev, skb);
3524 		break;
3525 
3526 	case HCI_EV_IO_CAPA_REPLY:
3527 		hci_io_capa_reply_evt(hdev, skb);
3528 		break;
3529 
3530 	case HCI_EV_USER_CONFIRM_REQUEST:
3531 		hci_user_confirm_request_evt(hdev, skb);
3532 		break;
3533 
3534 	case HCI_EV_USER_PASSKEY_REQUEST:
3535 		hci_user_passkey_request_evt(hdev, skb);
3536 		break;
3537 
3538 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3539 		hci_simple_pair_complete_evt(hdev, skb);
3540 		break;
3541 
3542 	case HCI_EV_REMOTE_HOST_FEATURES:
3543 		hci_remote_host_features_evt(hdev, skb);
3544 		break;
3545 
3546 	case HCI_EV_LE_META:
3547 		hci_le_meta_evt(hdev, skb);
3548 		break;
3549 
3550 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3551 		hci_remote_oob_data_request_evt(hdev, skb);
3552 		break;
3553 
3554 	case HCI_EV_NUM_COMP_BLOCKS:
3555 		hci_num_comp_blocks_evt(hdev, skb);
3556 		break;
3557 
3558 	default:
3559 		BT_DBG("%s event 0x%x", hdev->name, event);
3560 		break;
3561 	}
3562 
3563 	kfree_skb(skb);
3564 	hdev->stat.evt_rx++;
3565 }
3566