xref: /linux/net/bluetooth/hci_event.c (revision 9c736ace0666efe68efd53fcdfa2c6653c3e0e72)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023-2024 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI event handling. */
27 
28 #include <linux/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_debugfs.h"
37 #include "hci_codec.h"
38 #include "smp.h"
39 #include "msft.h"
40 #include "eir.h"
41 
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
44 
45 /* Handle HCI Event packets */
46 
hci_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)47 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
48 			     u8 ev, size_t len)
49 {
50 	void *data;
51 
52 	data = skb_pull_data(skb, len);
53 	if (!data)
54 		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
55 
56 	return data;
57 }
58 
hci_cc_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u16 op,size_t len)59 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
60 			     u16 op, size_t len)
61 {
62 	void *data;
63 
64 	data = skb_pull_data(skb, len);
65 	if (!data)
66 		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
67 
68 	return data;
69 }
70 
hci_le_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)71 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
72 				u8 ev, size_t len)
73 {
74 	void *data;
75 
76 	data = skb_pull_data(skb, len);
77 	if (!data)
78 		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
79 
80 	return data;
81 }
82 
hci_cc_inquiry_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)83 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
84 				struct sk_buff *skb)
85 {
86 	struct hci_ev_status *rp = data;
87 
88 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
89 
90 	/* It is possible that we receive Inquiry Complete event right
91 	 * before we receive Inquiry Cancel Command Complete event, in
92 	 * which case the latter event should have status of Command
93 	 * Disallowed. This should not be treated as error, since
94 	 * we actually achieve what Inquiry Cancel wants to achieve,
95 	 * which is to end the last Inquiry session.
96 	 */
97 	if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
98 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
99 		rp->status = 0x00;
100 	}
101 
102 	if (rp->status)
103 		return rp->status;
104 
105 	clear_bit(HCI_INQUIRY, &hdev->flags);
106 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
107 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
108 
109 	hci_dev_lock(hdev);
110 	/* Set discovery state to stopped if we're not doing LE active
111 	 * scanning.
112 	 */
113 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
114 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
115 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
116 	hci_dev_unlock(hdev);
117 
118 	return rp->status;
119 }
120 
hci_cc_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)121 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
122 			      struct sk_buff *skb)
123 {
124 	struct hci_ev_status *rp = data;
125 
126 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
127 
128 	if (rp->status)
129 		return rp->status;
130 
131 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
132 
133 	return rp->status;
134 }
135 
hci_cc_exit_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)136 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
137 				   struct sk_buff *skb)
138 {
139 	struct hci_ev_status *rp = data;
140 
141 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
142 
143 	if (rp->status)
144 		return rp->status;
145 
146 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
147 
148 	return rp->status;
149 }
150 
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)151 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
152 					struct sk_buff *skb)
153 {
154 	struct hci_rp_remote_name_req_cancel *rp = data;
155 
156 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
157 
158 	return rp->status;
159 }
160 
hci_cc_role_discovery(struct hci_dev * hdev,void * data,struct sk_buff * skb)161 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
162 				struct sk_buff *skb)
163 {
164 	struct hci_rp_role_discovery *rp = data;
165 	struct hci_conn *conn;
166 
167 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
168 
169 	if (rp->status)
170 		return rp->status;
171 
172 	hci_dev_lock(hdev);
173 
174 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
175 	if (conn)
176 		conn->role = rp->role;
177 
178 	hci_dev_unlock(hdev);
179 
180 	return rp->status;
181 }
182 
hci_cc_read_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)183 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
184 				  struct sk_buff *skb)
185 {
186 	struct hci_rp_read_link_policy *rp = data;
187 	struct hci_conn *conn;
188 
189 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
190 
191 	if (rp->status)
192 		return rp->status;
193 
194 	hci_dev_lock(hdev);
195 
196 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
197 	if (conn)
198 		conn->link_policy = __le16_to_cpu(rp->policy);
199 
200 	hci_dev_unlock(hdev);
201 
202 	return rp->status;
203 }
204 
hci_cc_write_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)205 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
206 				   struct sk_buff *skb)
207 {
208 	struct hci_rp_write_link_policy *rp = data;
209 	struct hci_conn *conn;
210 	void *sent;
211 
212 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
213 
214 	if (rp->status)
215 		return rp->status;
216 
217 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
218 	if (!sent)
219 		return rp->status;
220 
221 	hci_dev_lock(hdev);
222 
223 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
224 	if (conn)
225 		conn->link_policy = get_unaligned_le16(sent + 2);
226 
227 	hci_dev_unlock(hdev);
228 
229 	return rp->status;
230 }
231 
hci_cc_read_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)232 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
233 				      struct sk_buff *skb)
234 {
235 	struct hci_rp_read_def_link_policy *rp = data;
236 
237 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
238 
239 	if (rp->status)
240 		return rp->status;
241 
242 	hdev->link_policy = __le16_to_cpu(rp->policy);
243 
244 	return rp->status;
245 }
246 
hci_cc_write_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)247 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
248 				       struct sk_buff *skb)
249 {
250 	struct hci_ev_status *rp = data;
251 	void *sent;
252 
253 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
254 
255 	if (rp->status)
256 		return rp->status;
257 
258 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
259 	if (!sent)
260 		return rp->status;
261 
262 	hdev->link_policy = get_unaligned_le16(sent);
263 
264 	return rp->status;
265 }
266 
hci_cc_reset(struct hci_dev * hdev,void * data,struct sk_buff * skb)267 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
268 {
269 	struct hci_ev_status *rp = data;
270 
271 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
272 
273 	clear_bit(HCI_RESET, &hdev->flags);
274 
275 	if (rp->status)
276 		return rp->status;
277 
278 	/* Reset all non-persistent flags */
279 	hci_dev_clear_volatile_flags(hdev);
280 
281 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
282 
283 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
284 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
285 
286 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
287 	hdev->adv_data_len = 0;
288 
289 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
290 	hdev->scan_rsp_data_len = 0;
291 
292 	hdev->le_scan_type = LE_SCAN_PASSIVE;
293 
294 	hdev->ssp_debug_mode = 0;
295 
296 	hci_bdaddr_list_clear(&hdev->le_accept_list);
297 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
298 
299 	return rp->status;
300 }
301 
hci_cc_read_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)302 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
303 				      struct sk_buff *skb)
304 {
305 	struct hci_rp_read_stored_link_key *rp = data;
306 	struct hci_cp_read_stored_link_key *sent;
307 
308 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
309 
310 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
311 	if (!sent)
312 		return rp->status;
313 
314 	if (!rp->status && sent->read_all == 0x01) {
315 		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
316 		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
317 	}
318 
319 	return rp->status;
320 }
321 
hci_cc_delete_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)322 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
323 					struct sk_buff *skb)
324 {
325 	struct hci_rp_delete_stored_link_key *rp = data;
326 	u16 num_keys;
327 
328 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
329 
330 	if (rp->status)
331 		return rp->status;
332 
333 	num_keys = le16_to_cpu(rp->num_keys);
334 
335 	if (num_keys <= hdev->stored_num_keys)
336 		hdev->stored_num_keys -= num_keys;
337 	else
338 		hdev->stored_num_keys = 0;
339 
340 	return rp->status;
341 }
342 
hci_cc_write_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)343 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
344 				  struct sk_buff *skb)
345 {
346 	struct hci_ev_status *rp = data;
347 	void *sent;
348 
349 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
350 
351 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
352 	if (!sent)
353 		return rp->status;
354 
355 	hci_dev_lock(hdev);
356 
357 	if (hci_dev_test_flag(hdev, HCI_MGMT))
358 		mgmt_set_local_name_complete(hdev, sent, rp->status);
359 	else if (!rp->status)
360 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
361 
362 	hci_dev_unlock(hdev);
363 
364 	return rp->status;
365 }
366 
hci_cc_read_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)367 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
368 				 struct sk_buff *skb)
369 {
370 	struct hci_rp_read_local_name *rp = data;
371 
372 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
373 
374 	if (rp->status)
375 		return rp->status;
376 
377 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
378 	    hci_dev_test_flag(hdev, HCI_CONFIG))
379 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
380 
381 	return rp->status;
382 }
383 
hci_cc_write_auth_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)384 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
385 				   struct sk_buff *skb)
386 {
387 	struct hci_ev_status *rp = data;
388 	void *sent;
389 
390 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
391 
392 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
393 	if (!sent)
394 		return rp->status;
395 
396 	hci_dev_lock(hdev);
397 
398 	if (!rp->status) {
399 		__u8 param = *((__u8 *) sent);
400 
401 		if (param == AUTH_ENABLED)
402 			set_bit(HCI_AUTH, &hdev->flags);
403 		else
404 			clear_bit(HCI_AUTH, &hdev->flags);
405 	}
406 
407 	if (hci_dev_test_flag(hdev, HCI_MGMT))
408 		mgmt_auth_enable_complete(hdev, rp->status);
409 
410 	hci_dev_unlock(hdev);
411 
412 	return rp->status;
413 }
414 
hci_cc_write_encrypt_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)415 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
416 				    struct sk_buff *skb)
417 {
418 	struct hci_ev_status *rp = data;
419 	__u8 param;
420 	void *sent;
421 
422 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
423 
424 	if (rp->status)
425 		return rp->status;
426 
427 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
428 	if (!sent)
429 		return rp->status;
430 
431 	param = *((__u8 *) sent);
432 
433 	if (param)
434 		set_bit(HCI_ENCRYPT, &hdev->flags);
435 	else
436 		clear_bit(HCI_ENCRYPT, &hdev->flags);
437 
438 	return rp->status;
439 }
440 
hci_cc_write_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)441 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
442 				   struct sk_buff *skb)
443 {
444 	struct hci_ev_status *rp = data;
445 	__u8 param;
446 	void *sent;
447 
448 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
449 
450 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
451 	if (!sent)
452 		return rp->status;
453 
454 	param = *((__u8 *) sent);
455 
456 	hci_dev_lock(hdev);
457 
458 	if (rp->status) {
459 		hdev->discov_timeout = 0;
460 		goto done;
461 	}
462 
463 	if (param & SCAN_INQUIRY)
464 		set_bit(HCI_ISCAN, &hdev->flags);
465 	else
466 		clear_bit(HCI_ISCAN, &hdev->flags);
467 
468 	if (param & SCAN_PAGE)
469 		set_bit(HCI_PSCAN, &hdev->flags);
470 	else
471 		clear_bit(HCI_PSCAN, &hdev->flags);
472 
473 done:
474 	hci_dev_unlock(hdev);
475 
476 	return rp->status;
477 }
478 
hci_cc_set_event_filter(struct hci_dev * hdev,void * data,struct sk_buff * skb)479 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
480 				  struct sk_buff *skb)
481 {
482 	struct hci_ev_status *rp = data;
483 	struct hci_cp_set_event_filter *cp;
484 	void *sent;
485 
486 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
487 
488 	if (rp->status)
489 		return rp->status;
490 
491 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
492 	if (!sent)
493 		return rp->status;
494 
495 	cp = (struct hci_cp_set_event_filter *)sent;
496 
497 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
498 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
499 	else
500 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
501 
502 	return rp->status;
503 }
504 
hci_cc_read_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)505 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
506 				   struct sk_buff *skb)
507 {
508 	struct hci_rp_read_class_of_dev *rp = data;
509 
510 	if (WARN_ON(!hdev))
511 		return HCI_ERROR_UNSPECIFIED;
512 
513 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
514 
515 	if (rp->status)
516 		return rp->status;
517 
518 	memcpy(hdev->dev_class, rp->dev_class, 3);
519 
520 	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
521 		   hdev->dev_class[1], hdev->dev_class[0]);
522 
523 	return rp->status;
524 }
525 
hci_cc_write_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)526 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
527 				    struct sk_buff *skb)
528 {
529 	struct hci_ev_status *rp = data;
530 	void *sent;
531 
532 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
533 
534 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
535 	if (!sent)
536 		return rp->status;
537 
538 	hci_dev_lock(hdev);
539 
540 	if (!rp->status)
541 		memcpy(hdev->dev_class, sent, 3);
542 
543 	if (hci_dev_test_flag(hdev, HCI_MGMT))
544 		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
545 
546 	hci_dev_unlock(hdev);
547 
548 	return rp->status;
549 }
550 
hci_cc_read_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)551 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
552 				    struct sk_buff *skb)
553 {
554 	struct hci_rp_read_voice_setting *rp = data;
555 	__u16 setting;
556 
557 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
558 
559 	if (rp->status)
560 		return rp->status;
561 
562 	setting = __le16_to_cpu(rp->voice_setting);
563 
564 	if (hdev->voice_setting == setting)
565 		return rp->status;
566 
567 	hdev->voice_setting = setting;
568 
569 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
570 
571 	if (hdev->notify)
572 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
573 
574 	return rp->status;
575 }
576 
hci_cc_write_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)577 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
578 				     struct sk_buff *skb)
579 {
580 	struct hci_ev_status *rp = data;
581 	__u16 setting;
582 	void *sent;
583 
584 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
585 
586 	if (rp->status)
587 		return rp->status;
588 
589 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
590 	if (!sent)
591 		return rp->status;
592 
593 	setting = get_unaligned_le16(sent);
594 
595 	if (hdev->voice_setting == setting)
596 		return rp->status;
597 
598 	hdev->voice_setting = setting;
599 
600 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
601 
602 	if (hdev->notify)
603 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
604 
605 	return rp->status;
606 }
607 
hci_cc_read_num_supported_iac(struct hci_dev * hdev,void * data,struct sk_buff * skb)608 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
609 					struct sk_buff *skb)
610 {
611 	struct hci_rp_read_num_supported_iac *rp = data;
612 
613 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
614 
615 	if (rp->status)
616 		return rp->status;
617 
618 	hdev->num_iac = rp->num_iac;
619 
620 	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
621 
622 	return rp->status;
623 }
624 
hci_cc_write_ssp_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)625 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
626 				struct sk_buff *skb)
627 {
628 	struct hci_ev_status *rp = data;
629 	struct hci_cp_write_ssp_mode *sent;
630 
631 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
632 
633 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
634 	if (!sent)
635 		return rp->status;
636 
637 	hci_dev_lock(hdev);
638 
639 	if (!rp->status) {
640 		if (sent->mode)
641 			hdev->features[1][0] |= LMP_HOST_SSP;
642 		else
643 			hdev->features[1][0] &= ~LMP_HOST_SSP;
644 	}
645 
646 	if (!rp->status) {
647 		if (sent->mode)
648 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
649 		else
650 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
651 	}
652 
653 	hci_dev_unlock(hdev);
654 
655 	return rp->status;
656 }
657 
hci_cc_write_sc_support(struct hci_dev * hdev,void * data,struct sk_buff * skb)658 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
659 				  struct sk_buff *skb)
660 {
661 	struct hci_ev_status *rp = data;
662 	struct hci_cp_write_sc_support *sent;
663 
664 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
665 
666 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
667 	if (!sent)
668 		return rp->status;
669 
670 	hci_dev_lock(hdev);
671 
672 	if (!rp->status) {
673 		if (sent->support)
674 			hdev->features[1][0] |= LMP_HOST_SC;
675 		else
676 			hdev->features[1][0] &= ~LMP_HOST_SC;
677 	}
678 
679 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
680 		if (sent->support)
681 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
682 		else
683 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
684 	}
685 
686 	hci_dev_unlock(hdev);
687 
688 	return rp->status;
689 }
690 
hci_cc_read_local_version(struct hci_dev * hdev,void * data,struct sk_buff * skb)691 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
692 				    struct sk_buff *skb)
693 {
694 	struct hci_rp_read_local_version *rp = data;
695 
696 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
697 
698 	if (rp->status)
699 		return rp->status;
700 
701 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
702 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
703 		hdev->hci_ver = rp->hci_ver;
704 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
705 		hdev->lmp_ver = rp->lmp_ver;
706 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
707 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
708 	}
709 
710 	return rp->status;
711 }
712 
hci_cc_read_enc_key_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)713 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
714 				   struct sk_buff *skb)
715 {
716 	struct hci_rp_read_enc_key_size *rp = data;
717 	struct hci_conn *conn;
718 	u16 handle;
719 	u8 status = rp->status;
720 
721 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
722 
723 	handle = le16_to_cpu(rp->handle);
724 
725 	hci_dev_lock(hdev);
726 
727 	conn = hci_conn_hash_lookup_handle(hdev, handle);
728 	if (!conn) {
729 		status = 0xFF;
730 		goto done;
731 	}
732 
733 	/* While unexpected, the read_enc_key_size command may fail. The most
734 	 * secure approach is to then assume the key size is 0 to force a
735 	 * disconnection.
736 	 */
737 	if (status) {
738 		bt_dev_err(hdev, "failed to read key size for handle %u",
739 			   handle);
740 		conn->enc_key_size = 0;
741 	} else {
742 		u8 *key_enc_size = hci_conn_key_enc_size(conn);
743 
744 		conn->enc_key_size = rp->key_size;
745 		status = 0;
746 
747 		/* Attempt to check if the key size is too small or if it has
748 		 * been downgraded from the last time it was stored as part of
749 		 * the link_key.
750 		 */
751 		if (conn->enc_key_size < hdev->min_enc_key_size ||
752 		    (key_enc_size && conn->enc_key_size < *key_enc_size)) {
753 			/* As slave role, the conn->state has been set to
754 			 * BT_CONNECTED and l2cap conn req might not be received
755 			 * yet, at this moment the l2cap layer almost does
756 			 * nothing with the non-zero status.
757 			 * So we also clear encrypt related bits, and then the
758 			 * handler of l2cap conn req will get the right secure
759 			 * state at a later time.
760 			 */
761 			status = HCI_ERROR_AUTH_FAILURE;
762 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
763 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
764 		}
765 
766 		/* Update the key encryption size with the connection one */
767 		if (key_enc_size && *key_enc_size != conn->enc_key_size)
768 			*key_enc_size = conn->enc_key_size;
769 	}
770 
771 	hci_encrypt_cfm(conn, status);
772 
773 done:
774 	hci_dev_unlock(hdev);
775 
776 	return status;
777 }
778 
hci_cc_read_local_commands(struct hci_dev * hdev,void * data,struct sk_buff * skb)779 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
780 				     struct sk_buff *skb)
781 {
782 	struct hci_rp_read_local_commands *rp = data;
783 
784 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
785 
786 	if (rp->status)
787 		return rp->status;
788 
789 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
790 	    hci_dev_test_flag(hdev, HCI_CONFIG))
791 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
792 
793 	return rp->status;
794 }
795 
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)796 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
797 					   struct sk_buff *skb)
798 {
799 	struct hci_rp_read_auth_payload_to *rp = data;
800 	struct hci_conn *conn;
801 
802 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
803 
804 	if (rp->status)
805 		return rp->status;
806 
807 	hci_dev_lock(hdev);
808 
809 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810 	if (conn)
811 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
812 
813 	hci_dev_unlock(hdev);
814 
815 	return rp->status;
816 }
817 
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)818 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
819 					    struct sk_buff *skb)
820 {
821 	struct hci_rp_write_auth_payload_to *rp = data;
822 	struct hci_conn *conn;
823 	void *sent;
824 
825 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
826 
827 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
828 	if (!sent)
829 		return rp->status;
830 
831 	hci_dev_lock(hdev);
832 
833 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
834 	if (!conn) {
835 		rp->status = 0xff;
836 		goto unlock;
837 	}
838 
839 	if (!rp->status)
840 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
841 
842 unlock:
843 	hci_dev_unlock(hdev);
844 
845 	return rp->status;
846 }
847 
hci_cc_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)848 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
849 				     struct sk_buff *skb)
850 {
851 	struct hci_rp_read_local_features *rp = data;
852 
853 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
854 
855 	if (rp->status)
856 		return rp->status;
857 
858 	memcpy(hdev->features, rp->features, 8);
859 
860 	/* Adjust default settings according to features
861 	 * supported by device. */
862 
863 	if (hdev->features[0][0] & LMP_3SLOT)
864 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
865 
866 	if (hdev->features[0][0] & LMP_5SLOT)
867 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
868 
869 	if (hdev->features[0][1] & LMP_HV2) {
870 		hdev->pkt_type  |= (HCI_HV2);
871 		hdev->esco_type |= (ESCO_HV2);
872 	}
873 
874 	if (hdev->features[0][1] & LMP_HV3) {
875 		hdev->pkt_type  |= (HCI_HV3);
876 		hdev->esco_type |= (ESCO_HV3);
877 	}
878 
879 	if (lmp_esco_capable(hdev))
880 		hdev->esco_type |= (ESCO_EV3);
881 
882 	if (hdev->features[0][4] & LMP_EV4)
883 		hdev->esco_type |= (ESCO_EV4);
884 
885 	if (hdev->features[0][4] & LMP_EV5)
886 		hdev->esco_type |= (ESCO_EV5);
887 
888 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
889 		hdev->esco_type |= (ESCO_2EV3);
890 
891 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
892 		hdev->esco_type |= (ESCO_3EV3);
893 
894 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
895 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
896 
897 	return rp->status;
898 }
899 
hci_cc_read_local_ext_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)900 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
901 					 struct sk_buff *skb)
902 {
903 	struct hci_rp_read_local_ext_features *rp = data;
904 
905 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
906 
907 	if (rp->status)
908 		return rp->status;
909 
910 	if (hdev->max_page < rp->max_page) {
911 		if (hci_test_quirk(hdev,
912 				   HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2))
913 			bt_dev_warn(hdev, "broken local ext features page 2");
914 		else
915 			hdev->max_page = rp->max_page;
916 	}
917 
918 	if (rp->page < HCI_MAX_PAGES)
919 		memcpy(hdev->features[rp->page], rp->features, 8);
920 
921 	return rp->status;
922 }
923 
hci_cc_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)924 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
925 				  struct sk_buff *skb)
926 {
927 	struct hci_rp_read_buffer_size *rp = data;
928 
929 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
930 
931 	if (rp->status)
932 		return rp->status;
933 
934 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
935 	hdev->sco_mtu  = rp->sco_mtu;
936 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
937 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
938 
939 	if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) {
940 		hdev->sco_mtu  = 64;
941 		hdev->sco_pkts = 8;
942 	}
943 
944 	if (!read_voice_setting_capable(hdev))
945 		hdev->sco_pkts = 0;
946 
947 	hdev->acl_cnt = hdev->acl_pkts;
948 	hdev->sco_cnt = hdev->sco_pkts;
949 
950 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
951 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
952 
953 	if (!hdev->acl_mtu || !hdev->acl_pkts)
954 		return HCI_ERROR_INVALID_PARAMETERS;
955 
956 	return rp->status;
957 }
958 
hci_cc_read_bd_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)959 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
960 			      struct sk_buff *skb)
961 {
962 	struct hci_rp_read_bd_addr *rp = data;
963 
964 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
965 
966 	if (rp->status)
967 		return rp->status;
968 
969 	if (test_bit(HCI_INIT, &hdev->flags))
970 		bacpy(&hdev->bdaddr, &rp->bdaddr);
971 
972 	if (hci_dev_test_flag(hdev, HCI_SETUP))
973 		bacpy(&hdev->setup_addr, &rp->bdaddr);
974 
975 	return rp->status;
976 }
977 
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,void * data,struct sk_buff * skb)978 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
979 					 struct sk_buff *skb)
980 {
981 	struct hci_rp_read_local_pairing_opts *rp = data;
982 
983 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
984 
985 	if (rp->status)
986 		return rp->status;
987 
988 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
989 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
990 		hdev->pairing_opts = rp->pairing_opts;
991 		hdev->max_enc_key_size = rp->max_key_size;
992 	}
993 
994 	return rp->status;
995 }
996 
hci_cc_read_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)997 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
998 					 struct sk_buff *skb)
999 {
1000 	struct hci_rp_read_page_scan_activity *rp = data;
1001 
1002 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1003 
1004 	if (rp->status)
1005 		return rp->status;
1006 
1007 	if (test_bit(HCI_INIT, &hdev->flags)) {
1008 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1009 		hdev->page_scan_window = __le16_to_cpu(rp->window);
1010 	}
1011 
1012 	return rp->status;
1013 }
1014 
hci_cc_write_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)1015 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1016 					  struct sk_buff *skb)
1017 {
1018 	struct hci_ev_status *rp = data;
1019 	struct hci_cp_write_page_scan_activity *sent;
1020 
1021 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1022 
1023 	if (rp->status)
1024 		return rp->status;
1025 
1026 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1027 	if (!sent)
1028 		return rp->status;
1029 
1030 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1031 	hdev->page_scan_window = __le16_to_cpu(sent->window);
1032 
1033 	return rp->status;
1034 }
1035 
hci_cc_read_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1036 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1037 				     struct sk_buff *skb)
1038 {
1039 	struct hci_rp_read_page_scan_type *rp = data;
1040 
1041 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1042 
1043 	if (rp->status)
1044 		return rp->status;
1045 
1046 	if (test_bit(HCI_INIT, &hdev->flags))
1047 		hdev->page_scan_type = rp->type;
1048 
1049 	return rp->status;
1050 }
1051 
hci_cc_write_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1052 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1053 				      struct sk_buff *skb)
1054 {
1055 	struct hci_ev_status *rp = data;
1056 	u8 *type;
1057 
1058 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1059 
1060 	if (rp->status)
1061 		return rp->status;
1062 
1063 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1064 	if (type)
1065 		hdev->page_scan_type = *type;
1066 
1067 	return rp->status;
1068 }
1069 
hci_cc_read_clock(struct hci_dev * hdev,void * data,struct sk_buff * skb)1070 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1071 			    struct sk_buff *skb)
1072 {
1073 	struct hci_rp_read_clock *rp = data;
1074 	struct hci_cp_read_clock *cp;
1075 	struct hci_conn *conn;
1076 
1077 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1078 
1079 	if (rp->status)
1080 		return rp->status;
1081 
1082 	hci_dev_lock(hdev);
1083 
1084 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1085 	if (!cp)
1086 		goto unlock;
1087 
1088 	if (cp->which == 0x00) {
1089 		hdev->clock = le32_to_cpu(rp->clock);
1090 		goto unlock;
1091 	}
1092 
1093 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1094 	if (conn) {
1095 		conn->clock = le32_to_cpu(rp->clock);
1096 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1097 	}
1098 
1099 unlock:
1100 	hci_dev_unlock(hdev);
1101 	return rp->status;
1102 }
1103 
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1104 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1105 				       struct sk_buff *skb)
1106 {
1107 	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1108 
1109 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1110 
1111 	if (rp->status)
1112 		return rp->status;
1113 
1114 	hdev->inq_tx_power = rp->tx_power;
1115 
1116 	return rp->status;
1117 }
1118 
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1119 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1120 					     struct sk_buff *skb)
1121 {
1122 	struct hci_rp_read_def_err_data_reporting *rp = data;
1123 
1124 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1125 
1126 	if (rp->status)
1127 		return rp->status;
1128 
1129 	hdev->err_data_reporting = rp->err_data_reporting;
1130 
1131 	return rp->status;
1132 }
1133 
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1134 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1135 					      struct sk_buff *skb)
1136 {
1137 	struct hci_ev_status *rp = data;
1138 	struct hci_cp_write_def_err_data_reporting *cp;
1139 
1140 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1141 
1142 	if (rp->status)
1143 		return rp->status;
1144 
1145 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1146 	if (!cp)
1147 		return rp->status;
1148 
1149 	hdev->err_data_reporting = cp->err_data_reporting;
1150 
1151 	return rp->status;
1152 }
1153 
hci_cc_pin_code_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1154 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1155 				struct sk_buff *skb)
1156 {
1157 	struct hci_rp_pin_code_reply *rp = data;
1158 	struct hci_cp_pin_code_reply *cp;
1159 	struct hci_conn *conn;
1160 
1161 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1162 
1163 	hci_dev_lock(hdev);
1164 
1165 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1166 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1167 
1168 	if (rp->status)
1169 		goto unlock;
1170 
1171 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1172 	if (!cp)
1173 		goto unlock;
1174 
1175 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1176 	if (conn)
1177 		conn->pin_length = cp->pin_len;
1178 
1179 unlock:
1180 	hci_dev_unlock(hdev);
1181 	return rp->status;
1182 }
1183 
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1184 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1185 				    struct sk_buff *skb)
1186 {
1187 	struct hci_rp_pin_code_neg_reply *rp = data;
1188 
1189 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1190 
1191 	hci_dev_lock(hdev);
1192 
1193 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1194 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1195 						 rp->status);
1196 
1197 	hci_dev_unlock(hdev);
1198 
1199 	return rp->status;
1200 }
1201 
hci_cc_le_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1202 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1203 				     struct sk_buff *skb)
1204 {
1205 	struct hci_rp_le_read_buffer_size *rp = data;
1206 
1207 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1208 
1209 	if (rp->status)
1210 		return rp->status;
1211 
1212 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1213 	hdev->le_pkts = rp->le_max_pkt;
1214 
1215 	hdev->le_cnt = hdev->le_pkts;
1216 
1217 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1218 
1219 	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1220 		return HCI_ERROR_INVALID_PARAMETERS;
1221 
1222 	return rp->status;
1223 }
1224 
hci_cc_le_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)1225 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1226 					struct sk_buff *skb)
1227 {
1228 	struct hci_rp_le_read_local_features *rp = data;
1229 
1230 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1231 
1232 	if (rp->status)
1233 		return rp->status;
1234 
1235 	memcpy(hdev->le_features, rp->features, 8);
1236 
1237 	return rp->status;
1238 }
1239 
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1240 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1241 				      struct sk_buff *skb)
1242 {
1243 	struct hci_rp_le_read_adv_tx_power *rp = data;
1244 
1245 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1246 
1247 	if (rp->status)
1248 		return rp->status;
1249 
1250 	hdev->adv_tx_power = rp->tx_power;
1251 
1252 	return rp->status;
1253 }
1254 
hci_cc_user_confirm_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1255 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1256 				    struct sk_buff *skb)
1257 {
1258 	struct hci_rp_user_confirm_reply *rp = data;
1259 
1260 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1261 
1262 	hci_dev_lock(hdev);
1263 
1264 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1265 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1266 						 rp->status);
1267 
1268 	hci_dev_unlock(hdev);
1269 
1270 	return rp->status;
1271 }
1272 
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1273 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1274 					struct sk_buff *skb)
1275 {
1276 	struct hci_rp_user_confirm_reply *rp = data;
1277 
1278 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1279 
1280 	hci_dev_lock(hdev);
1281 
1282 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1283 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1284 						     ACL_LINK, 0, rp->status);
1285 
1286 	hci_dev_unlock(hdev);
1287 
1288 	return rp->status;
1289 }
1290 
hci_cc_user_passkey_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1291 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1292 				    struct sk_buff *skb)
1293 {
1294 	struct hci_rp_user_confirm_reply *rp = data;
1295 
1296 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1297 
1298 	hci_dev_lock(hdev);
1299 
1300 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1301 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1302 						 0, rp->status);
1303 
1304 	hci_dev_unlock(hdev);
1305 
1306 	return rp->status;
1307 }
1308 
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1309 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1310 					struct sk_buff *skb)
1311 {
1312 	struct hci_rp_user_confirm_reply *rp = data;
1313 
1314 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1315 
1316 	hci_dev_lock(hdev);
1317 
1318 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1319 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1320 						     ACL_LINK, 0, rp->status);
1321 
1322 	hci_dev_unlock(hdev);
1323 
1324 	return rp->status;
1325 }
1326 
hci_cc_read_local_oob_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1327 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1328 				     struct sk_buff *skb)
1329 {
1330 	struct hci_rp_read_local_oob_data *rp = data;
1331 
1332 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1333 
1334 	return rp->status;
1335 }
1336 
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1337 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1338 					 struct sk_buff *skb)
1339 {
1340 	struct hci_rp_read_local_oob_ext_data *rp = data;
1341 
1342 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1343 
1344 	return rp->status;
1345 }
1346 
hci_cc_le_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1347 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1348 				    struct sk_buff *skb)
1349 {
1350 	struct hci_ev_status *rp = data;
1351 	bdaddr_t *sent;
1352 
1353 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1354 
1355 	if (rp->status)
1356 		return rp->status;
1357 
1358 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1359 	if (!sent)
1360 		return rp->status;
1361 
1362 	hci_dev_lock(hdev);
1363 
1364 	bacpy(&hdev->random_addr, sent);
1365 
1366 	if (!bacmp(&hdev->rpa, sent)) {
1367 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1368 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1369 				   secs_to_jiffies(hdev->rpa_timeout));
1370 	}
1371 
1372 	hci_dev_unlock(hdev);
1373 
1374 	return rp->status;
1375 }
1376 
hci_cc_le_set_default_phy(struct hci_dev * hdev,void * data,struct sk_buff * skb)1377 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1378 				    struct sk_buff *skb)
1379 {
1380 	struct hci_ev_status *rp = data;
1381 	struct hci_cp_le_set_default_phy *cp;
1382 
1383 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1384 
1385 	if (rp->status)
1386 		return rp->status;
1387 
1388 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1389 	if (!cp)
1390 		return rp->status;
1391 
1392 	hci_dev_lock(hdev);
1393 
1394 	hdev->le_tx_def_phys = cp->tx_phys;
1395 	hdev->le_rx_def_phys = cp->rx_phys;
1396 
1397 	hci_dev_unlock(hdev);
1398 
1399 	return rp->status;
1400 }
1401 
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1402 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1403 					    struct sk_buff *skb)
1404 {
1405 	struct hci_ev_status *rp = data;
1406 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1407 	struct adv_info *adv;
1408 
1409 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1410 
1411 	if (rp->status)
1412 		return rp->status;
1413 
1414 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1415 	/* Update only in case the adv instance since handle 0x00 shall be using
1416 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1417 	 * non-extended adverting.
1418 	 */
1419 	if (!cp || !cp->handle)
1420 		return rp->status;
1421 
1422 	hci_dev_lock(hdev);
1423 
1424 	adv = hci_find_adv_instance(hdev, cp->handle);
1425 	if (adv) {
1426 		bacpy(&adv->random_addr, &cp->bdaddr);
1427 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1428 			adv->rpa_expired = false;
1429 			queue_delayed_work(hdev->workqueue,
1430 					   &adv->rpa_expired_cb,
1431 					   secs_to_jiffies(hdev->rpa_timeout));
1432 		}
1433 	}
1434 
1435 	hci_dev_unlock(hdev);
1436 
1437 	return rp->status;
1438 }
1439 
hci_cc_le_remove_adv_set(struct hci_dev * hdev,void * data,struct sk_buff * skb)1440 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1441 				   struct sk_buff *skb)
1442 {
1443 	struct hci_ev_status *rp = data;
1444 	u8 *instance;
1445 	int err;
1446 
1447 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1448 
1449 	if (rp->status)
1450 		return rp->status;
1451 
1452 	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1453 	if (!instance)
1454 		return rp->status;
1455 
1456 	hci_dev_lock(hdev);
1457 
1458 	err = hci_remove_adv_instance(hdev, *instance);
1459 	if (!err)
1460 		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1461 					 *instance);
1462 
1463 	hci_dev_unlock(hdev);
1464 
1465 	return rp->status;
1466 }
1467 
hci_cc_le_clear_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1468 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1469 				   struct sk_buff *skb)
1470 {
1471 	struct hci_ev_status *rp = data;
1472 	struct adv_info *adv, *n;
1473 	int err;
1474 
1475 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1476 
1477 	if (rp->status)
1478 		return rp->status;
1479 
1480 	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1481 		return rp->status;
1482 
1483 	hci_dev_lock(hdev);
1484 
1485 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1486 		u8 instance = adv->instance;
1487 
1488 		err = hci_remove_adv_instance(hdev, instance);
1489 		if (!err)
1490 			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1491 						 hdev, instance);
1492 	}
1493 
1494 	hci_dev_unlock(hdev);
1495 
1496 	return rp->status;
1497 }
1498 
hci_cc_le_read_transmit_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1499 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1500 					struct sk_buff *skb)
1501 {
1502 	struct hci_rp_le_read_transmit_power *rp = data;
1503 
1504 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1505 
1506 	if (rp->status)
1507 		return rp->status;
1508 
1509 	hdev->min_le_tx_power = rp->min_le_tx_power;
1510 	hdev->max_le_tx_power = rp->max_le_tx_power;
1511 
1512 	return rp->status;
1513 }
1514 
hci_cc_le_set_privacy_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)1515 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1516 				     struct sk_buff *skb)
1517 {
1518 	struct hci_ev_status *rp = data;
1519 	struct hci_cp_le_set_privacy_mode *cp;
1520 	struct hci_conn_params *params;
1521 
1522 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1523 
1524 	if (rp->status)
1525 		return rp->status;
1526 
1527 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1528 	if (!cp)
1529 		return rp->status;
1530 
1531 	hci_dev_lock(hdev);
1532 
1533 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1534 	if (params)
1535 		WRITE_ONCE(params->privacy_mode, cp->mode);
1536 
1537 	hci_dev_unlock(hdev);
1538 
1539 	return rp->status;
1540 }
1541 
hci_cc_le_set_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1542 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1543 				   struct sk_buff *skb)
1544 {
1545 	struct hci_ev_status *rp = data;
1546 	__u8 *sent;
1547 
1548 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549 
1550 	if (rp->status)
1551 		return rp->status;
1552 
1553 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1554 	if (!sent)
1555 		return rp->status;
1556 
1557 	hci_dev_lock(hdev);
1558 
1559 	/* If we're doing connection initiation as peripheral. Set a
1560 	 * timeout in case something goes wrong.
1561 	 */
1562 	if (*sent) {
1563 		struct hci_conn *conn;
1564 
1565 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1566 
1567 		conn = hci_lookup_le_connect(hdev);
1568 		if (conn)
1569 			queue_delayed_work(hdev->workqueue,
1570 					   &conn->le_conn_timeout,
1571 					   conn->conn_timeout);
1572 	} else {
1573 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1574 	}
1575 
1576 	hci_dev_unlock(hdev);
1577 
1578 	return rp->status;
1579 }
1580 
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1581 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1582 				       struct sk_buff *skb)
1583 {
1584 	struct hci_cp_le_set_ext_adv_enable *cp;
1585 	struct hci_cp_ext_adv_set *set;
1586 	struct adv_info *adv = NULL, *n;
1587 	struct hci_ev_status *rp = data;
1588 
1589 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1590 
1591 	if (rp->status)
1592 		return rp->status;
1593 
1594 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1595 	if (!cp)
1596 		return rp->status;
1597 
1598 	set = (void *)cp->data;
1599 
1600 	hci_dev_lock(hdev);
1601 
1602 	if (cp->num_of_sets)
1603 		adv = hci_find_adv_instance(hdev, set->handle);
1604 
1605 	if (cp->enable) {
1606 		struct hci_conn *conn;
1607 
1608 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1609 
1610 		if (adv && !adv->periodic)
1611 			adv->enabled = true;
1612 
1613 		conn = hci_lookup_le_connect(hdev);
1614 		if (conn)
1615 			queue_delayed_work(hdev->workqueue,
1616 					   &conn->le_conn_timeout,
1617 					   conn->conn_timeout);
1618 	} else {
1619 		if (cp->num_of_sets) {
1620 			if (adv)
1621 				adv->enabled = false;
1622 
1623 			/* If just one instance was disabled check if there are
1624 			 * any other instance enabled before clearing HCI_LE_ADV
1625 			 */
1626 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1627 						 list) {
1628 				if (adv->enabled)
1629 					goto unlock;
1630 			}
1631 		} else {
1632 			/* All instances shall be considered disabled */
1633 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1634 						 list)
1635 				adv->enabled = false;
1636 		}
1637 
1638 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1639 	}
1640 
1641 unlock:
1642 	hci_dev_unlock(hdev);
1643 	return rp->status;
1644 }
1645 
hci_cc_le_set_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1646 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1647 				   struct sk_buff *skb)
1648 {
1649 	struct hci_cp_le_set_scan_param *cp;
1650 	struct hci_ev_status *rp = data;
1651 
1652 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1653 
1654 	if (rp->status)
1655 		return rp->status;
1656 
1657 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1658 	if (!cp)
1659 		return rp->status;
1660 
1661 	hci_dev_lock(hdev);
1662 
1663 	hdev->le_scan_type = cp->type;
1664 
1665 	hci_dev_unlock(hdev);
1666 
1667 	return rp->status;
1668 }
1669 
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1670 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1671 				       struct sk_buff *skb)
1672 {
1673 	struct hci_cp_le_set_ext_scan_params *cp;
1674 	struct hci_ev_status *rp = data;
1675 	struct hci_cp_le_scan_phy_params *phy_param;
1676 
1677 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1678 
1679 	if (rp->status)
1680 		return rp->status;
1681 
1682 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1683 	if (!cp)
1684 		return rp->status;
1685 
1686 	phy_param = (void *)cp->data;
1687 
1688 	hci_dev_lock(hdev);
1689 
1690 	hdev->le_scan_type = phy_param->type;
1691 
1692 	hci_dev_unlock(hdev);
1693 
1694 	return rp->status;
1695 }
1696 
has_pending_adv_report(struct hci_dev * hdev)1697 static bool has_pending_adv_report(struct hci_dev *hdev)
1698 {
1699 	struct discovery_state *d = &hdev->discovery;
1700 
1701 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1702 }
1703 
clear_pending_adv_report(struct hci_dev * hdev)1704 static void clear_pending_adv_report(struct hci_dev *hdev)
1705 {
1706 	struct discovery_state *d = &hdev->discovery;
1707 
1708 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1709 	d->last_adv_data_len = 0;
1710 }
1711 
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1712 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1713 				     u8 bdaddr_type, s8 rssi, u32 flags,
1714 				     u8 *data, u8 len)
1715 {
1716 	struct discovery_state *d = &hdev->discovery;
1717 
1718 	if (len > max_adv_len(hdev))
1719 		return;
1720 
1721 	bacpy(&d->last_adv_addr, bdaddr);
1722 	d->last_adv_addr_type = bdaddr_type;
1723 	d->last_adv_rssi = rssi;
1724 	d->last_adv_flags = flags;
1725 	memcpy(d->last_adv_data, data, len);
1726 	d->last_adv_data_len = len;
1727 }
1728 
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1729 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1730 {
1731 	hci_dev_lock(hdev);
1732 
1733 	switch (enable) {
1734 	case LE_SCAN_ENABLE:
1735 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1736 		if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
1737 			clear_pending_adv_report(hdev);
1738 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1739 		}
1740 		break;
1741 
1742 	case LE_SCAN_DISABLE:
1743 		/* We do this here instead of when setting DISCOVERY_STOPPED
1744 		 * since the latter would potentially require waiting for
1745 		 * inquiry to stop too.
1746 		 */
1747 		if (has_pending_adv_report(hdev)) {
1748 			struct discovery_state *d = &hdev->discovery;
1749 
1750 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1751 					  d->last_adv_addr_type, NULL,
1752 					  d->last_adv_rssi, d->last_adv_flags,
1753 					  d->last_adv_data,
1754 					  d->last_adv_data_len, NULL, 0, 0);
1755 		}
1756 
1757 		/* Cancel this timer so that we don't try to disable scanning
1758 		 * when it's already disabled.
1759 		 */
1760 		cancel_delayed_work(&hdev->le_scan_disable);
1761 
1762 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1763 
1764 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1765 		 * interrupted scanning due to a connect request. Mark
1766 		 * therefore discovery as stopped.
1767 		 */
1768 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1769 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1770 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1771 			 hdev->discovery.state == DISCOVERY_FINDING)
1772 			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1773 
1774 		break;
1775 
1776 	default:
1777 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1778 			   enable);
1779 		break;
1780 	}
1781 
1782 	hci_dev_unlock(hdev);
1783 }
1784 
hci_cc_le_set_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1785 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1786 				    struct sk_buff *skb)
1787 {
1788 	struct hci_cp_le_set_scan_enable *cp;
1789 	struct hci_ev_status *rp = data;
1790 
1791 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1792 
1793 	if (rp->status)
1794 		return rp->status;
1795 
1796 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1797 	if (!cp)
1798 		return rp->status;
1799 
1800 	le_set_scan_enable_complete(hdev, cp->enable);
1801 
1802 	return rp->status;
1803 }
1804 
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1805 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1806 					struct sk_buff *skb)
1807 {
1808 	struct hci_cp_le_set_ext_scan_enable *cp;
1809 	struct hci_ev_status *rp = data;
1810 
1811 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1812 
1813 	if (rp->status)
1814 		return rp->status;
1815 
1816 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1817 	if (!cp)
1818 		return rp->status;
1819 
1820 	le_set_scan_enable_complete(hdev, cp->enable);
1821 
1822 	return rp->status;
1823 }
1824 
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1825 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1826 				      struct sk_buff *skb)
1827 {
1828 	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1829 
1830 	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1831 		   rp->num_of_sets);
1832 
1833 	if (rp->status)
1834 		return rp->status;
1835 
1836 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1837 
1838 	return rp->status;
1839 }
1840 
hci_cc_le_read_accept_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1841 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1842 					  struct sk_buff *skb)
1843 {
1844 	struct hci_rp_le_read_accept_list_size *rp = data;
1845 
1846 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1847 
1848 	if (rp->status)
1849 		return rp->status;
1850 
1851 	hdev->le_accept_list_size = rp->size;
1852 
1853 	return rp->status;
1854 }
1855 
hci_cc_le_clear_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1856 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1857 				      struct sk_buff *skb)
1858 {
1859 	struct hci_ev_status *rp = data;
1860 
1861 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1862 
1863 	if (rp->status)
1864 		return rp->status;
1865 
1866 	hci_dev_lock(hdev);
1867 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1868 	hci_dev_unlock(hdev);
1869 
1870 	return rp->status;
1871 }
1872 
hci_cc_le_add_to_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1873 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1874 				       struct sk_buff *skb)
1875 {
1876 	struct hci_cp_le_add_to_accept_list *sent;
1877 	struct hci_ev_status *rp = data;
1878 
1879 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1880 
1881 	if (rp->status)
1882 		return rp->status;
1883 
1884 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1885 	if (!sent)
1886 		return rp->status;
1887 
1888 	hci_dev_lock(hdev);
1889 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1890 			    sent->bdaddr_type);
1891 	hci_dev_unlock(hdev);
1892 
1893 	return rp->status;
1894 }
1895 
hci_cc_le_del_from_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1896 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1897 					 struct sk_buff *skb)
1898 {
1899 	struct hci_cp_le_del_from_accept_list *sent;
1900 	struct hci_ev_status *rp = data;
1901 
1902 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1903 
1904 	if (rp->status)
1905 		return rp->status;
1906 
1907 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1908 	if (!sent)
1909 		return rp->status;
1910 
1911 	hci_dev_lock(hdev);
1912 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1913 			    sent->bdaddr_type);
1914 	hci_dev_unlock(hdev);
1915 
1916 	return rp->status;
1917 }
1918 
hci_cc_le_read_supported_states(struct hci_dev * hdev,void * data,struct sk_buff * skb)1919 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1920 					  struct sk_buff *skb)
1921 {
1922 	struct hci_rp_le_read_supported_states *rp = data;
1923 
1924 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1925 
1926 	if (rp->status)
1927 		return rp->status;
1928 
1929 	memcpy(hdev->le_states, rp->le_states, 8);
1930 
1931 	return rp->status;
1932 }
1933 
hci_cc_le_read_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1934 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1935 				      struct sk_buff *skb)
1936 {
1937 	struct hci_rp_le_read_def_data_len *rp = data;
1938 
1939 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1940 
1941 	if (rp->status)
1942 		return rp->status;
1943 
1944 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1945 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1946 
1947 	return rp->status;
1948 }
1949 
hci_cc_le_write_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1950 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1951 				       struct sk_buff *skb)
1952 {
1953 	struct hci_cp_le_write_def_data_len *sent;
1954 	struct hci_ev_status *rp = data;
1955 
1956 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1957 
1958 	if (rp->status)
1959 		return rp->status;
1960 
1961 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1962 	if (!sent)
1963 		return rp->status;
1964 
1965 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1966 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1967 
1968 	return rp->status;
1969 }
1970 
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1971 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1972 				       struct sk_buff *skb)
1973 {
1974 	struct hci_cp_le_add_to_resolv_list *sent;
1975 	struct hci_ev_status *rp = data;
1976 
1977 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1978 
1979 	if (rp->status)
1980 		return rp->status;
1981 
1982 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1983 	if (!sent)
1984 		return rp->status;
1985 
1986 	hci_dev_lock(hdev);
1987 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1988 				sent->bdaddr_type, sent->peer_irk,
1989 				sent->local_irk);
1990 	hci_dev_unlock(hdev);
1991 
1992 	return rp->status;
1993 }
1994 
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1995 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
1996 					 struct sk_buff *skb)
1997 {
1998 	struct hci_cp_le_del_from_resolv_list *sent;
1999 	struct hci_ev_status *rp = data;
2000 
2001 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2002 
2003 	if (rp->status)
2004 		return rp->status;
2005 
2006 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2007 	if (!sent)
2008 		return rp->status;
2009 
2010 	hci_dev_lock(hdev);
2011 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2012 			    sent->bdaddr_type);
2013 	hci_dev_unlock(hdev);
2014 
2015 	return rp->status;
2016 }
2017 
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)2018 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2019 				      struct sk_buff *skb)
2020 {
2021 	struct hci_ev_status *rp = data;
2022 
2023 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2024 
2025 	if (rp->status)
2026 		return rp->status;
2027 
2028 	hci_dev_lock(hdev);
2029 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2030 	hci_dev_unlock(hdev);
2031 
2032 	return rp->status;
2033 }
2034 
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)2035 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2036 					  struct sk_buff *skb)
2037 {
2038 	struct hci_rp_le_read_resolv_list_size *rp = data;
2039 
2040 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2041 
2042 	if (rp->status)
2043 		return rp->status;
2044 
2045 	hdev->le_resolv_list_size = rp->size;
2046 
2047 	return rp->status;
2048 }
2049 
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)2050 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2051 					       struct sk_buff *skb)
2052 {
2053 	struct hci_ev_status *rp = data;
2054 	__u8 *sent;
2055 
2056 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2057 
2058 	if (rp->status)
2059 		return rp->status;
2060 
2061 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2062 	if (!sent)
2063 		return rp->status;
2064 
2065 	hci_dev_lock(hdev);
2066 
2067 	if (*sent)
2068 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2069 	else
2070 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2071 
2072 	hci_dev_unlock(hdev);
2073 
2074 	return rp->status;
2075 }
2076 
hci_cc_le_read_max_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)2077 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2078 				      struct sk_buff *skb)
2079 {
2080 	struct hci_rp_le_read_max_data_len *rp = data;
2081 
2082 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2083 
2084 	if (rp->status)
2085 		return rp->status;
2086 
2087 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2088 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2089 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2090 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2091 
2092 	return rp->status;
2093 }
2094 
hci_cc_write_le_host_supported(struct hci_dev * hdev,void * data,struct sk_buff * skb)2095 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2096 					 struct sk_buff *skb)
2097 {
2098 	struct hci_cp_write_le_host_supported *sent;
2099 	struct hci_ev_status *rp = data;
2100 
2101 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2102 
2103 	if (rp->status)
2104 		return rp->status;
2105 
2106 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2107 	if (!sent)
2108 		return rp->status;
2109 
2110 	hci_dev_lock(hdev);
2111 
2112 	if (sent->le) {
2113 		hdev->features[1][0] |= LMP_HOST_LE;
2114 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2115 	} else {
2116 		hdev->features[1][0] &= ~LMP_HOST_LE;
2117 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2118 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2119 	}
2120 
2121 	if (sent->simul)
2122 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2123 	else
2124 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2125 
2126 	hci_dev_unlock(hdev);
2127 
2128 	return rp->status;
2129 }
2130 
hci_cc_set_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)2131 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2132 			       struct sk_buff *skb)
2133 {
2134 	struct hci_cp_le_set_adv_param *cp;
2135 	struct hci_ev_status *rp = data;
2136 
2137 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2138 
2139 	if (rp->status)
2140 		return rp->status;
2141 
2142 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2143 	if (!cp)
2144 		return rp->status;
2145 
2146 	hci_dev_lock(hdev);
2147 	hdev->adv_addr_type = cp->own_address_type;
2148 	hci_dev_unlock(hdev);
2149 
2150 	return rp->status;
2151 }
2152 
hci_cc_read_rssi(struct hci_dev * hdev,void * data,struct sk_buff * skb)2153 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2154 			   struct sk_buff *skb)
2155 {
2156 	struct hci_rp_read_rssi *rp = data;
2157 	struct hci_conn *conn;
2158 
2159 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2160 
2161 	if (rp->status)
2162 		return rp->status;
2163 
2164 	hci_dev_lock(hdev);
2165 
2166 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2167 	if (conn)
2168 		conn->rssi = rp->rssi;
2169 
2170 	hci_dev_unlock(hdev);
2171 
2172 	return rp->status;
2173 }
2174 
hci_cc_read_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)2175 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2176 			       struct sk_buff *skb)
2177 {
2178 	struct hci_cp_read_tx_power *sent;
2179 	struct hci_rp_read_tx_power *rp = data;
2180 	struct hci_conn *conn;
2181 
2182 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2183 
2184 	if (rp->status)
2185 		return rp->status;
2186 
2187 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2188 	if (!sent)
2189 		return rp->status;
2190 
2191 	hci_dev_lock(hdev);
2192 
2193 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2194 	if (!conn)
2195 		goto unlock;
2196 
2197 	switch (sent->type) {
2198 	case 0x00:
2199 		conn->tx_power = rp->tx_power;
2200 		break;
2201 	case 0x01:
2202 		conn->max_tx_power = rp->tx_power;
2203 		break;
2204 	}
2205 
2206 unlock:
2207 	hci_dev_unlock(hdev);
2208 	return rp->status;
2209 }
2210 
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)2211 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2212 				      struct sk_buff *skb)
2213 {
2214 	struct hci_ev_status *rp = data;
2215 	u8 *mode;
2216 
2217 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2218 
2219 	if (rp->status)
2220 		return rp->status;
2221 
2222 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2223 	if (mode)
2224 		hdev->ssp_debug_mode = *mode;
2225 
2226 	return rp->status;
2227 }
2228 
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)2229 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2230 {
2231 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2232 
2233 	if (status)
2234 		return;
2235 
2236 	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2237 		set_bit(HCI_INQUIRY, &hdev->flags);
2238 }
2239 
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)2240 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2241 {
2242 	struct hci_cp_create_conn *cp;
2243 	struct hci_conn *conn;
2244 
2245 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2246 
2247 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2248 	if (!cp)
2249 		return;
2250 
2251 	hci_dev_lock(hdev);
2252 
2253 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2254 
2255 	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2256 
2257 	if (status) {
2258 		if (conn && conn->state == BT_CONNECT) {
2259 			conn->state = BT_CLOSED;
2260 			hci_connect_cfm(conn, status);
2261 			hci_conn_del(conn);
2262 		}
2263 	} else {
2264 		if (!conn) {
2265 			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2266 						  HCI_ROLE_MASTER);
2267 			if (IS_ERR(conn))
2268 				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2269 		}
2270 	}
2271 
2272 	hci_dev_unlock(hdev);
2273 }
2274 
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)2275 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2276 {
2277 	struct hci_cp_add_sco *cp;
2278 	struct hci_conn *acl;
2279 	struct hci_link *link;
2280 	__u16 handle;
2281 
2282 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2283 
2284 	if (!status)
2285 		return;
2286 
2287 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2288 	if (!cp)
2289 		return;
2290 
2291 	handle = __le16_to_cpu(cp->handle);
2292 
2293 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2294 
2295 	hci_dev_lock(hdev);
2296 
2297 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2298 	if (acl) {
2299 		link = list_first_entry_or_null(&acl->link_list,
2300 						struct hci_link, list);
2301 		if (link && link->conn) {
2302 			link->conn->state = BT_CLOSED;
2303 
2304 			hci_connect_cfm(link->conn, status);
2305 			hci_conn_del(link->conn);
2306 		}
2307 	}
2308 
2309 	hci_dev_unlock(hdev);
2310 }
2311 
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)2312 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2313 {
2314 	struct hci_cp_auth_requested *cp;
2315 	struct hci_conn *conn;
2316 
2317 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2318 
2319 	if (!status)
2320 		return;
2321 
2322 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2323 	if (!cp)
2324 		return;
2325 
2326 	hci_dev_lock(hdev);
2327 
2328 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2329 	if (conn) {
2330 		if (conn->state == BT_CONFIG) {
2331 			hci_connect_cfm(conn, status);
2332 			hci_conn_drop(conn);
2333 		}
2334 	}
2335 
2336 	hci_dev_unlock(hdev);
2337 }
2338 
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)2339 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2340 {
2341 	struct hci_cp_set_conn_encrypt *cp;
2342 	struct hci_conn *conn;
2343 
2344 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2345 
2346 	if (!status)
2347 		return;
2348 
2349 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2350 	if (!cp)
2351 		return;
2352 
2353 	hci_dev_lock(hdev);
2354 
2355 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2356 	if (conn) {
2357 		if (conn->state == BT_CONFIG) {
2358 			hci_connect_cfm(conn, status);
2359 			hci_conn_drop(conn);
2360 		}
2361 	}
2362 
2363 	hci_dev_unlock(hdev);
2364 }
2365 
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)2366 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2367 				    struct hci_conn *conn)
2368 {
2369 	if (conn->state != BT_CONFIG || !conn->out)
2370 		return 0;
2371 
2372 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2373 		return 0;
2374 
2375 	/* Only request authentication for SSP connections or non-SSP
2376 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2377 	 * is requested.
2378 	 */
2379 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2380 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2381 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2382 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2383 		return 0;
2384 
2385 	return 1;
2386 }
2387 
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)2388 static int hci_resolve_name(struct hci_dev *hdev,
2389 				   struct inquiry_entry *e)
2390 {
2391 	struct hci_cp_remote_name_req cp;
2392 
2393 	memset(&cp, 0, sizeof(cp));
2394 
2395 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2396 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2397 	cp.pscan_mode = e->data.pscan_mode;
2398 	cp.clock_offset = e->data.clock_offset;
2399 
2400 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2401 }
2402 
hci_resolve_next_name(struct hci_dev * hdev)2403 static bool hci_resolve_next_name(struct hci_dev *hdev)
2404 {
2405 	struct discovery_state *discov = &hdev->discovery;
2406 	struct inquiry_entry *e;
2407 
2408 	if (list_empty(&discov->resolve))
2409 		return false;
2410 
2411 	/* We should stop if we already spent too much time resolving names. */
2412 	if (time_after(jiffies, discov->name_resolve_timeout)) {
2413 		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2414 		return false;
2415 	}
2416 
2417 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2418 	if (!e)
2419 		return false;
2420 
2421 	if (hci_resolve_name(hdev, e) == 0) {
2422 		e->name_state = NAME_PENDING;
2423 		return true;
2424 	}
2425 
2426 	return false;
2427 }
2428 
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2429 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2430 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2431 {
2432 	struct discovery_state *discov = &hdev->discovery;
2433 	struct inquiry_entry *e;
2434 
2435 	/* Update the mgmt connected state if necessary. Be careful with
2436 	 * conn objects that exist but are not (yet) connected however.
2437 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2438 	 * considered connected.
2439 	 */
2440 	if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2441 		mgmt_device_connected(hdev, conn, name, name_len);
2442 
2443 	if (discov->state == DISCOVERY_STOPPED)
2444 		return;
2445 
2446 	if (discov->state == DISCOVERY_STOPPING)
2447 		goto discov_complete;
2448 
2449 	if (discov->state != DISCOVERY_RESOLVING)
2450 		return;
2451 
2452 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2453 	/* If the device was not found in a list of found devices names of which
2454 	 * are pending. there is no need to continue resolving a next name as it
2455 	 * will be done upon receiving another Remote Name Request Complete
2456 	 * Event */
2457 	if (!e)
2458 		return;
2459 
2460 	list_del(&e->list);
2461 
2462 	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2463 	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2464 			 name, name_len);
2465 
2466 	if (hci_resolve_next_name(hdev))
2467 		return;
2468 
2469 discov_complete:
2470 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2471 }
2472 
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2473 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2474 {
2475 	struct hci_cp_remote_name_req *cp;
2476 	struct hci_conn *conn;
2477 
2478 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2479 
2480 	/* If successful wait for the name req complete event before
2481 	 * checking for the need to do authentication */
2482 	if (!status)
2483 		return;
2484 
2485 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2486 	if (!cp)
2487 		return;
2488 
2489 	hci_dev_lock(hdev);
2490 
2491 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2492 
2493 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2494 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2495 
2496 	if (!conn)
2497 		goto unlock;
2498 
2499 	if (!hci_outgoing_auth_needed(hdev, conn))
2500 		goto unlock;
2501 
2502 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2503 		struct hci_cp_auth_requested auth_cp;
2504 
2505 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2506 
2507 		auth_cp.handle = __cpu_to_le16(conn->handle);
2508 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2509 			     sizeof(auth_cp), &auth_cp);
2510 	}
2511 
2512 unlock:
2513 	hci_dev_unlock(hdev);
2514 }
2515 
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2516 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2517 {
2518 	struct hci_cp_read_remote_features *cp;
2519 	struct hci_conn *conn;
2520 
2521 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2522 
2523 	if (!status)
2524 		return;
2525 
2526 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2527 	if (!cp)
2528 		return;
2529 
2530 	hci_dev_lock(hdev);
2531 
2532 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2533 	if (conn) {
2534 		if (conn->state == BT_CONFIG) {
2535 			hci_connect_cfm(conn, status);
2536 			hci_conn_drop(conn);
2537 		}
2538 	}
2539 
2540 	hci_dev_unlock(hdev);
2541 }
2542 
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2543 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2544 {
2545 	struct hci_cp_read_remote_ext_features *cp;
2546 	struct hci_conn *conn;
2547 
2548 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2549 
2550 	if (!status)
2551 		return;
2552 
2553 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2554 	if (!cp)
2555 		return;
2556 
2557 	hci_dev_lock(hdev);
2558 
2559 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2560 	if (conn) {
2561 		if (conn->state == BT_CONFIG) {
2562 			hci_connect_cfm(conn, status);
2563 			hci_conn_drop(conn);
2564 		}
2565 	}
2566 
2567 	hci_dev_unlock(hdev);
2568 }
2569 
hci_setup_sync_conn_status(struct hci_dev * hdev,__u16 handle,__u8 status)2570 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2571 				       __u8 status)
2572 {
2573 	struct hci_conn *acl;
2574 	struct hci_link *link;
2575 
2576 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2577 
2578 	hci_dev_lock(hdev);
2579 
2580 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2581 	if (acl) {
2582 		link = list_first_entry_or_null(&acl->link_list,
2583 						struct hci_link, list);
2584 		if (link && link->conn) {
2585 			link->conn->state = BT_CLOSED;
2586 
2587 			hci_connect_cfm(link->conn, status);
2588 			hci_conn_del(link->conn);
2589 		}
2590 	}
2591 
2592 	hci_dev_unlock(hdev);
2593 }
2594 
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2595 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2596 {
2597 	struct hci_cp_setup_sync_conn *cp;
2598 
2599 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2600 
2601 	if (!status)
2602 		return;
2603 
2604 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2605 	if (!cp)
2606 		return;
2607 
2608 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2609 }
2610 
hci_cs_enhanced_setup_sync_conn(struct hci_dev * hdev,__u8 status)2611 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2612 {
2613 	struct hci_cp_enhanced_setup_sync_conn *cp;
2614 
2615 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2616 
2617 	if (!status)
2618 		return;
2619 
2620 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2621 	if (!cp)
2622 		return;
2623 
2624 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2625 }
2626 
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2627 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2628 {
2629 	struct hci_cp_sniff_mode *cp;
2630 	struct hci_conn *conn;
2631 
2632 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2633 
2634 	if (!status)
2635 		return;
2636 
2637 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2638 	if (!cp)
2639 		return;
2640 
2641 	hci_dev_lock(hdev);
2642 
2643 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2644 	if (conn) {
2645 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2646 
2647 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2648 			hci_sco_setup(conn, status);
2649 	}
2650 
2651 	hci_dev_unlock(hdev);
2652 }
2653 
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2654 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2655 {
2656 	struct hci_cp_exit_sniff_mode *cp;
2657 	struct hci_conn *conn;
2658 
2659 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2660 
2661 	if (!status)
2662 		return;
2663 
2664 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2665 	if (!cp)
2666 		return;
2667 
2668 	hci_dev_lock(hdev);
2669 
2670 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2671 	if (conn) {
2672 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2673 
2674 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2675 			hci_sco_setup(conn, status);
2676 	}
2677 
2678 	hci_dev_unlock(hdev);
2679 }
2680 
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2681 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2682 {
2683 	struct hci_cp_disconnect *cp;
2684 	struct hci_conn_params *params;
2685 	struct hci_conn *conn;
2686 	bool mgmt_conn;
2687 
2688 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2689 
2690 	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2691 	 * otherwise cleanup the connection immediately.
2692 	 */
2693 	if (!status && !hdev->suspended)
2694 		return;
2695 
2696 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2697 	if (!cp)
2698 		return;
2699 
2700 	hci_dev_lock(hdev);
2701 
2702 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2703 	if (!conn)
2704 		goto unlock;
2705 
2706 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) {
2707 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2708 				       conn->dst_type, status);
2709 
2710 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2711 			hdev->cur_adv_instance = conn->adv_instance;
2712 			hci_enable_advertising(hdev);
2713 		}
2714 
2715 		/* Inform sockets conn is gone before we delete it */
2716 		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2717 
2718 		goto done;
2719 	}
2720 
2721 	/* During suspend, mark connection as closed immediately
2722 	 * since we might not receive HCI_EV_DISCONN_COMPLETE
2723 	 */
2724 	if (hdev->suspended)
2725 		conn->state = BT_CLOSED;
2726 
2727 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2728 
2729 	if (conn->type == ACL_LINK) {
2730 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2731 			hci_remove_link_key(hdev, &conn->dst);
2732 	}
2733 
2734 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2735 	if (params) {
2736 		switch (params->auto_connect) {
2737 		case HCI_AUTO_CONN_LINK_LOSS:
2738 			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2739 				break;
2740 			fallthrough;
2741 
2742 		case HCI_AUTO_CONN_DIRECT:
2743 		case HCI_AUTO_CONN_ALWAYS:
2744 			hci_pend_le_list_del_init(params);
2745 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2746 			break;
2747 
2748 		default:
2749 			break;
2750 		}
2751 	}
2752 
2753 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2754 				 cp->reason, mgmt_conn);
2755 
2756 	hci_disconn_cfm(conn, cp->reason);
2757 
2758 done:
2759 	/* If the disconnection failed for any reason, the upper layer
2760 	 * does not retry to disconnect in current implementation.
2761 	 * Hence, we need to do some basic cleanup here and re-enable
2762 	 * advertising if necessary.
2763 	 */
2764 	hci_conn_del(conn);
2765 unlock:
2766 	hci_dev_unlock(hdev);
2767 }
2768 
ev_bdaddr_type(struct hci_dev * hdev,u8 type,bool * resolved)2769 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2770 {
2771 	/* When using controller based address resolution, then the new
2772 	 * address types 0x02 and 0x03 are used. These types need to be
2773 	 * converted back into either public address or random address type
2774 	 */
2775 	switch (type) {
2776 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2777 		if (resolved)
2778 			*resolved = true;
2779 		return ADDR_LE_DEV_PUBLIC;
2780 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2781 		if (resolved)
2782 			*resolved = true;
2783 		return ADDR_LE_DEV_RANDOM;
2784 	}
2785 
2786 	if (resolved)
2787 		*resolved = false;
2788 	return type;
2789 }
2790 
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2791 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2792 			      u8 peer_addr_type, u8 own_address_type,
2793 			      u8 filter_policy)
2794 {
2795 	struct hci_conn *conn;
2796 
2797 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2798 				       peer_addr_type);
2799 	if (!conn)
2800 		return;
2801 
2802 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2803 
2804 	/* Store the initiator and responder address information which
2805 	 * is needed for SMP. These values will not change during the
2806 	 * lifetime of the connection.
2807 	 */
2808 	conn->init_addr_type = own_address_type;
2809 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2810 		bacpy(&conn->init_addr, &hdev->random_addr);
2811 	else
2812 		bacpy(&conn->init_addr, &hdev->bdaddr);
2813 
2814 	conn->resp_addr_type = peer_addr_type;
2815 	bacpy(&conn->resp_addr, peer_addr);
2816 }
2817 
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2818 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2819 {
2820 	struct hci_cp_le_create_conn *cp;
2821 
2822 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2823 
2824 	/* All connection failure handling is taken care of by the
2825 	 * hci_conn_failed function which is triggered by the HCI
2826 	 * request completion callbacks used for connecting.
2827 	 */
2828 	if (status)
2829 		return;
2830 
2831 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2832 	if (!cp)
2833 		return;
2834 
2835 	hci_dev_lock(hdev);
2836 
2837 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2838 			  cp->own_address_type, cp->filter_policy);
2839 
2840 	hci_dev_unlock(hdev);
2841 }
2842 
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2843 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2844 {
2845 	struct hci_cp_le_ext_create_conn *cp;
2846 
2847 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2848 
2849 	/* All connection failure handling is taken care of by the
2850 	 * hci_conn_failed function which is triggered by the HCI
2851 	 * request completion callbacks used for connecting.
2852 	 */
2853 	if (status)
2854 		return;
2855 
2856 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2857 	if (!cp)
2858 		return;
2859 
2860 	hci_dev_lock(hdev);
2861 
2862 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2863 			  cp->own_addr_type, cp->filter_policy);
2864 
2865 	hci_dev_unlock(hdev);
2866 }
2867 
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2868 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2869 {
2870 	struct hci_cp_le_read_remote_features *cp;
2871 	struct hci_conn *conn;
2872 
2873 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2874 
2875 	if (!status)
2876 		return;
2877 
2878 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2879 	if (!cp)
2880 		return;
2881 
2882 	hci_dev_lock(hdev);
2883 
2884 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2885 	if (conn) {
2886 		if (conn->state == BT_CONFIG) {
2887 			hci_connect_cfm(conn, status);
2888 			hci_conn_drop(conn);
2889 		}
2890 	}
2891 
2892 	hci_dev_unlock(hdev);
2893 }
2894 
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2895 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2896 {
2897 	struct hci_cp_le_start_enc *cp;
2898 	struct hci_conn *conn;
2899 
2900 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2901 
2902 	if (!status)
2903 		return;
2904 
2905 	hci_dev_lock(hdev);
2906 
2907 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2908 	if (!cp)
2909 		goto unlock;
2910 
2911 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2912 	if (!conn)
2913 		goto unlock;
2914 
2915 	if (conn->state != BT_CONNECTED)
2916 		goto unlock;
2917 
2918 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2919 	hci_conn_drop(conn);
2920 
2921 unlock:
2922 	hci_dev_unlock(hdev);
2923 }
2924 
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2925 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2926 {
2927 	struct hci_cp_switch_role *cp;
2928 	struct hci_conn *conn;
2929 
2930 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2931 
2932 	if (!status)
2933 		return;
2934 
2935 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2936 	if (!cp)
2937 		return;
2938 
2939 	hci_dev_lock(hdev);
2940 
2941 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2942 	if (conn)
2943 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2944 
2945 	hci_dev_unlock(hdev);
2946 }
2947 
hci_inquiry_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)2948 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2949 				     struct sk_buff *skb)
2950 {
2951 	struct hci_ev_status *ev = data;
2952 	struct discovery_state *discov = &hdev->discovery;
2953 	struct inquiry_entry *e;
2954 
2955 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2956 
2957 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2958 		return;
2959 
2960 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2961 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2962 
2963 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2964 		return;
2965 
2966 	hci_dev_lock(hdev);
2967 
2968 	if (discov->state != DISCOVERY_FINDING)
2969 		goto unlock;
2970 
2971 	if (list_empty(&discov->resolve)) {
2972 		/* When BR/EDR inquiry is active and no LE scanning is in
2973 		 * progress, then change discovery state to indicate completion.
2974 		 *
2975 		 * When running LE scanning and BR/EDR inquiry simultaneously
2976 		 * and the LE scan already finished, then change the discovery
2977 		 * state to indicate completion.
2978 		 */
2979 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2980 		    !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
2981 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2982 		goto unlock;
2983 	}
2984 
2985 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2986 	if (e && hci_resolve_name(hdev, e) == 0) {
2987 		e->name_state = NAME_PENDING;
2988 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2989 		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
2990 	} else {
2991 		/* When BR/EDR inquiry is active and no LE scanning is in
2992 		 * progress, then change discovery state to indicate completion.
2993 		 *
2994 		 * When running LE scanning and BR/EDR inquiry simultaneously
2995 		 * and the LE scan already finished, then change the discovery
2996 		 * state to indicate completion.
2997 		 */
2998 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2999 		    !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
3000 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3001 	}
3002 
3003 unlock:
3004 	hci_dev_unlock(hdev);
3005 }
3006 
hci_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)3007 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3008 				   struct sk_buff *skb)
3009 {
3010 	struct hci_ev_inquiry_result *ev = edata;
3011 	struct inquiry_data data;
3012 	int i;
3013 
3014 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3015 			     flex_array_size(ev, info, ev->num)))
3016 		return;
3017 
3018 	bt_dev_dbg(hdev, "num %d", ev->num);
3019 
3020 	if (!ev->num)
3021 		return;
3022 
3023 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3024 		return;
3025 
3026 	hci_dev_lock(hdev);
3027 
3028 	for (i = 0; i < ev->num; i++) {
3029 		struct inquiry_info *info = &ev->info[i];
3030 		u32 flags;
3031 
3032 		bacpy(&data.bdaddr, &info->bdaddr);
3033 		data.pscan_rep_mode	= info->pscan_rep_mode;
3034 		data.pscan_period_mode	= info->pscan_period_mode;
3035 		data.pscan_mode		= info->pscan_mode;
3036 		memcpy(data.dev_class, info->dev_class, 3);
3037 		data.clock_offset	= info->clock_offset;
3038 		data.rssi		= HCI_RSSI_INVALID;
3039 		data.ssp_mode		= 0x00;
3040 
3041 		flags = hci_inquiry_cache_update(hdev, &data, false);
3042 
3043 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3044 				  info->dev_class, HCI_RSSI_INVALID,
3045 				  flags, NULL, 0, NULL, 0, 0);
3046 	}
3047 
3048 	hci_dev_unlock(hdev);
3049 }
3050 
hci_read_enc_key_size(struct hci_dev * hdev,struct hci_conn * conn)3051 static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn)
3052 {
3053 	struct hci_cp_read_enc_key_size cp;
3054 	u8 *key_enc_size = hci_conn_key_enc_size(conn);
3055 
3056 	if (!read_key_size_capable(hdev)) {
3057 		conn->enc_key_size = HCI_LINK_KEY_SIZE;
3058 		return -EOPNOTSUPP;
3059 	}
3060 
3061 	bt_dev_dbg(hdev, "hcon %p", conn);
3062 
3063 	memset(&cp, 0, sizeof(cp));
3064 	cp.handle = cpu_to_le16(conn->handle);
3065 
3066 	/* If the key enc_size is already known, use it as conn->enc_key_size,
3067 	 * otherwise use hdev->min_enc_key_size so the likes of
3068 	 * l2cap_check_enc_key_size don't fail while waiting for
3069 	 * HCI_OP_READ_ENC_KEY_SIZE response.
3070 	 */
3071 	if (key_enc_size && *key_enc_size)
3072 		conn->enc_key_size = *key_enc_size;
3073 	else
3074 		conn->enc_key_size = hdev->min_enc_key_size;
3075 
3076 	return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3077 }
3078 
hci_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3079 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3080 				  struct sk_buff *skb)
3081 {
3082 	struct hci_ev_conn_complete *ev = data;
3083 	struct hci_conn *conn;
3084 	u8 status = ev->status;
3085 
3086 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3087 
3088 	hci_dev_lock(hdev);
3089 
3090 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3091 	if (!conn) {
3092 		/* In case of error status and there is no connection pending
3093 		 * just unlock as there is nothing to cleanup.
3094 		 */
3095 		if (ev->status)
3096 			goto unlock;
3097 
3098 		/* Connection may not exist if auto-connected. Check the bredr
3099 		 * allowlist to see if this device is allowed to auto connect.
3100 		 * If link is an ACL type, create a connection class
3101 		 * automatically.
3102 		 *
3103 		 * Auto-connect will only occur if the event filter is
3104 		 * programmed with a given address. Right now, event filter is
3105 		 * only used during suspend.
3106 		 */
3107 		if (ev->link_type == ACL_LINK &&
3108 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3109 						      &ev->bdaddr,
3110 						      BDADDR_BREDR)) {
3111 			conn = hci_conn_add_unset(hdev, ev->link_type,
3112 						  &ev->bdaddr, HCI_ROLE_SLAVE);
3113 			if (IS_ERR(conn)) {
3114 				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3115 				goto unlock;
3116 			}
3117 		} else {
3118 			if (ev->link_type != SCO_LINK)
3119 				goto unlock;
3120 
3121 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3122 						       &ev->bdaddr);
3123 			if (!conn)
3124 				goto unlock;
3125 
3126 			conn->type = SCO_LINK;
3127 		}
3128 	}
3129 
3130 	/* The HCI_Connection_Complete event is only sent once per connection.
3131 	 * Processing it more than once per connection can corrupt kernel memory.
3132 	 *
3133 	 * As the connection handle is set here for the first time, it indicates
3134 	 * whether the connection is already set up.
3135 	 */
3136 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3137 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3138 		goto unlock;
3139 	}
3140 
3141 	if (!status) {
3142 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3143 		if (status)
3144 			goto done;
3145 
3146 		if (conn->type == ACL_LINK) {
3147 			conn->state = BT_CONFIG;
3148 			hci_conn_hold(conn);
3149 
3150 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3151 			    !hci_find_link_key(hdev, &ev->bdaddr))
3152 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3153 			else
3154 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3155 		} else
3156 			conn->state = BT_CONNECTED;
3157 
3158 		hci_debugfs_create_conn(conn);
3159 		hci_conn_add_sysfs(conn);
3160 
3161 		if (test_bit(HCI_AUTH, &hdev->flags))
3162 			set_bit(HCI_CONN_AUTH, &conn->flags);
3163 
3164 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3165 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3166 
3167 		/* "Link key request" completed ahead of "connect request" completes */
3168 		if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3169 		    ev->link_type == ACL_LINK) {
3170 			struct link_key *key;
3171 
3172 			key = hci_find_link_key(hdev, &ev->bdaddr);
3173 			if (key) {
3174 				set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3175 				hci_read_enc_key_size(hdev, conn);
3176 				hci_encrypt_cfm(conn, ev->status);
3177 			}
3178 		}
3179 
3180 		/* Get remote features */
3181 		if (conn->type == ACL_LINK) {
3182 			struct hci_cp_read_remote_features cp;
3183 			cp.handle = ev->handle;
3184 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3185 				     sizeof(cp), &cp);
3186 
3187 			hci_update_scan(hdev);
3188 		}
3189 
3190 		/* Set packet type for incoming connection */
3191 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3192 			struct hci_cp_change_conn_ptype cp;
3193 			cp.handle = ev->handle;
3194 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3195 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3196 				     &cp);
3197 		}
3198 	}
3199 
3200 	if (conn->type == ACL_LINK)
3201 		hci_sco_setup(conn, ev->status);
3202 
3203 done:
3204 	if (status) {
3205 		hci_conn_failed(conn, status);
3206 	} else if (ev->link_type == SCO_LINK) {
3207 		switch (conn->setting & SCO_AIRMODE_MASK) {
3208 		case SCO_AIRMODE_CVSD:
3209 			if (hdev->notify)
3210 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3211 			break;
3212 		}
3213 
3214 		hci_connect_cfm(conn, status);
3215 	}
3216 
3217 unlock:
3218 	hci_dev_unlock(hdev);
3219 }
3220 
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)3221 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3222 {
3223 	struct hci_cp_reject_conn_req cp;
3224 
3225 	bacpy(&cp.bdaddr, bdaddr);
3226 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3227 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3228 }
3229 
hci_conn_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3230 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3231 				 struct sk_buff *skb)
3232 {
3233 	struct hci_ev_conn_request *ev = data;
3234 	int mask = hdev->link_mode;
3235 	struct inquiry_entry *ie;
3236 	struct hci_conn *conn;
3237 	__u8 flags = 0;
3238 
3239 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3240 
3241 	/* Reject incoming connection from device with same BD ADDR against
3242 	 * CVE-2020-26555
3243 	 */
3244 	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3245 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3246 			   &ev->bdaddr);
3247 		hci_reject_conn(hdev, &ev->bdaddr);
3248 		return;
3249 	}
3250 
3251 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3252 				      &flags);
3253 
3254 	if (!(mask & HCI_LM_ACCEPT)) {
3255 		hci_reject_conn(hdev, &ev->bdaddr);
3256 		return;
3257 	}
3258 
3259 	hci_dev_lock(hdev);
3260 
3261 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3262 				   BDADDR_BREDR)) {
3263 		hci_reject_conn(hdev, &ev->bdaddr);
3264 		goto unlock;
3265 	}
3266 
3267 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3268 	 * connection. These features are only touched through mgmt so
3269 	 * only do the checks if HCI_MGMT is set.
3270 	 */
3271 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3272 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3273 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3274 					       BDADDR_BREDR)) {
3275 		hci_reject_conn(hdev, &ev->bdaddr);
3276 		goto unlock;
3277 	}
3278 
3279 	/* Connection accepted */
3280 
3281 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3282 	if (ie)
3283 		memcpy(ie->data.dev_class, ev->dev_class, 3);
3284 
3285 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3286 			&ev->bdaddr);
3287 	if (!conn) {
3288 		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3289 					  HCI_ROLE_SLAVE);
3290 		if (IS_ERR(conn)) {
3291 			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3292 			goto unlock;
3293 		}
3294 	}
3295 
3296 	memcpy(conn->dev_class, ev->dev_class, 3);
3297 
3298 	hci_dev_unlock(hdev);
3299 
3300 	if (ev->link_type == ACL_LINK ||
3301 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3302 		struct hci_cp_accept_conn_req cp;
3303 		conn->state = BT_CONNECT;
3304 
3305 		bacpy(&cp.bdaddr, &ev->bdaddr);
3306 
3307 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3308 			cp.role = 0x00; /* Become central */
3309 		else
3310 			cp.role = 0x01; /* Remain peripheral */
3311 
3312 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3313 	} else if (!(flags & HCI_PROTO_DEFER)) {
3314 		struct hci_cp_accept_sync_conn_req cp;
3315 		conn->state = BT_CONNECT;
3316 
3317 		bacpy(&cp.bdaddr, &ev->bdaddr);
3318 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3319 
3320 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3321 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3322 		cp.max_latency    = cpu_to_le16(0xffff);
3323 		cp.content_format = cpu_to_le16(hdev->voice_setting);
3324 		cp.retrans_effort = 0xff;
3325 
3326 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3327 			     &cp);
3328 	} else {
3329 		conn->state = BT_CONNECT2;
3330 		hci_connect_cfm(conn, 0);
3331 	}
3332 
3333 	return;
3334 unlock:
3335 	hci_dev_unlock(hdev);
3336 }
3337 
hci_to_mgmt_reason(u8 err)3338 static u8 hci_to_mgmt_reason(u8 err)
3339 {
3340 	switch (err) {
3341 	case HCI_ERROR_CONNECTION_TIMEOUT:
3342 		return MGMT_DEV_DISCONN_TIMEOUT;
3343 	case HCI_ERROR_REMOTE_USER_TERM:
3344 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3345 	case HCI_ERROR_REMOTE_POWER_OFF:
3346 		return MGMT_DEV_DISCONN_REMOTE;
3347 	case HCI_ERROR_LOCAL_HOST_TERM:
3348 		return MGMT_DEV_DISCONN_LOCAL_HOST;
3349 	default:
3350 		return MGMT_DEV_DISCONN_UNKNOWN;
3351 	}
3352 }
3353 
hci_disconn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3354 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3355 				     struct sk_buff *skb)
3356 {
3357 	struct hci_ev_disconn_complete *ev = data;
3358 	u8 reason;
3359 	struct hci_conn_params *params;
3360 	struct hci_conn *conn;
3361 	bool mgmt_connected;
3362 
3363 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3364 
3365 	hci_dev_lock(hdev);
3366 
3367 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3368 	if (!conn)
3369 		goto unlock;
3370 
3371 	if (ev->status) {
3372 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3373 				       conn->dst_type, ev->status);
3374 		goto unlock;
3375 	}
3376 
3377 	conn->state = BT_CLOSED;
3378 
3379 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3380 
3381 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3382 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3383 	else
3384 		reason = hci_to_mgmt_reason(ev->reason);
3385 
3386 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3387 				reason, mgmt_connected);
3388 
3389 	if (conn->type == ACL_LINK) {
3390 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3391 			hci_remove_link_key(hdev, &conn->dst);
3392 
3393 		hci_update_scan(hdev);
3394 	}
3395 
3396 	/* Re-enable passive scanning if disconnected device is marked
3397 	 * as auto-connectable.
3398 	 */
3399 	if (conn->type == LE_LINK) {
3400 		params = hci_conn_params_lookup(hdev, &conn->dst,
3401 						conn->dst_type);
3402 		if (params) {
3403 			switch (params->auto_connect) {
3404 			case HCI_AUTO_CONN_LINK_LOSS:
3405 				if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3406 					break;
3407 				fallthrough;
3408 
3409 			case HCI_AUTO_CONN_DIRECT:
3410 			case HCI_AUTO_CONN_ALWAYS:
3411 				hci_pend_le_list_del_init(params);
3412 				hci_pend_le_list_add(params,
3413 						     &hdev->pend_le_conns);
3414 				hci_update_passive_scan(hdev);
3415 				break;
3416 
3417 			default:
3418 				break;
3419 			}
3420 		}
3421 	}
3422 
3423 	hci_disconn_cfm(conn, ev->reason);
3424 
3425 	/* Re-enable advertising if necessary, since it might
3426 	 * have been disabled by the connection. From the
3427 	 * HCI_LE_Set_Advertise_Enable command description in
3428 	 * the core specification (v4.0):
3429 	 * "The Controller shall continue advertising until the Host
3430 	 * issues an LE_Set_Advertise_Enable command with
3431 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3432 	 * or until a connection is created or until the Advertising
3433 	 * is timed out due to Directed Advertising."
3434 	 */
3435 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3436 		hdev->cur_adv_instance = conn->adv_instance;
3437 		hci_enable_advertising(hdev);
3438 	}
3439 
3440 	hci_conn_del(conn);
3441 
3442 unlock:
3443 	hci_dev_unlock(hdev);
3444 }
3445 
hci_auth_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3446 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3447 				  struct sk_buff *skb)
3448 {
3449 	struct hci_ev_auth_complete *ev = data;
3450 	struct hci_conn *conn;
3451 
3452 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3453 
3454 	hci_dev_lock(hdev);
3455 
3456 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3457 	if (!conn)
3458 		goto unlock;
3459 
3460 	if (!ev->status) {
3461 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3462 		set_bit(HCI_CONN_AUTH, &conn->flags);
3463 		conn->sec_level = conn->pending_sec_level;
3464 	} else {
3465 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3466 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3467 
3468 		mgmt_auth_failed(conn, ev->status);
3469 	}
3470 
3471 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3472 
3473 	if (conn->state == BT_CONFIG) {
3474 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3475 			struct hci_cp_set_conn_encrypt cp;
3476 			cp.handle  = ev->handle;
3477 			cp.encrypt = 0x01;
3478 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3479 				     &cp);
3480 		} else {
3481 			conn->state = BT_CONNECTED;
3482 			hci_connect_cfm(conn, ev->status);
3483 			hci_conn_drop(conn);
3484 		}
3485 	} else {
3486 		hci_auth_cfm(conn, ev->status);
3487 
3488 		hci_conn_hold(conn);
3489 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3490 		hci_conn_drop(conn);
3491 	}
3492 
3493 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3494 		if (!ev->status) {
3495 			struct hci_cp_set_conn_encrypt cp;
3496 			cp.handle  = ev->handle;
3497 			cp.encrypt = 0x01;
3498 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3499 				     &cp);
3500 		} else {
3501 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3502 			hci_encrypt_cfm(conn, ev->status);
3503 		}
3504 	}
3505 
3506 unlock:
3507 	hci_dev_unlock(hdev);
3508 }
3509 
hci_remote_name_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3510 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3511 				struct sk_buff *skb)
3512 {
3513 	struct hci_ev_remote_name *ev = data;
3514 	struct hci_conn *conn;
3515 
3516 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3517 
3518 	hci_dev_lock(hdev);
3519 
3520 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3521 
3522 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3523 		goto check_auth;
3524 
3525 	if (ev->status == 0)
3526 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3527 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3528 	else
3529 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3530 
3531 check_auth:
3532 	if (!conn)
3533 		goto unlock;
3534 
3535 	if (!hci_outgoing_auth_needed(hdev, conn))
3536 		goto unlock;
3537 
3538 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3539 		struct hci_cp_auth_requested cp;
3540 
3541 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3542 
3543 		cp.handle = __cpu_to_le16(conn->handle);
3544 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3545 	}
3546 
3547 unlock:
3548 	hci_dev_unlock(hdev);
3549 }
3550 
hci_encrypt_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3551 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3552 				   struct sk_buff *skb)
3553 {
3554 	struct hci_ev_encrypt_change *ev = data;
3555 	struct hci_conn *conn;
3556 
3557 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3558 
3559 	hci_dev_lock(hdev);
3560 
3561 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3562 	if (!conn)
3563 		goto unlock;
3564 
3565 	if (!ev->status) {
3566 		if (ev->encrypt) {
3567 			/* Encryption implies authentication */
3568 			set_bit(HCI_CONN_AUTH, &conn->flags);
3569 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3570 			conn->sec_level = conn->pending_sec_level;
3571 
3572 			/* P-256 authentication key implies FIPS */
3573 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3574 				set_bit(HCI_CONN_FIPS, &conn->flags);
3575 
3576 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3577 			    conn->type == LE_LINK)
3578 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3579 		} else {
3580 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3581 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3582 		}
3583 	}
3584 
3585 	/* We should disregard the current RPA and generate a new one
3586 	 * whenever the encryption procedure fails.
3587 	 */
3588 	if (ev->status && conn->type == LE_LINK) {
3589 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3590 		hci_adv_instances_set_rpa_expired(hdev, true);
3591 	}
3592 
3593 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3594 
3595 	/* Check link security requirements are met */
3596 	if (!hci_conn_check_link_mode(conn))
3597 		ev->status = HCI_ERROR_AUTH_FAILURE;
3598 
3599 	if (ev->status && conn->state == BT_CONNECTED) {
3600 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3601 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3602 
3603 		/* Notify upper layers so they can cleanup before
3604 		 * disconnecting.
3605 		 */
3606 		hci_encrypt_cfm(conn, ev->status);
3607 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3608 		hci_conn_drop(conn);
3609 		goto unlock;
3610 	}
3611 
3612 	/* Try reading the encryption key size for encrypted ACL links */
3613 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3614 		if (hci_read_enc_key_size(hdev, conn))
3615 			goto notify;
3616 
3617 		goto unlock;
3618 	}
3619 
3620 	/* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
3621 	 * to avoid unexpected SMP command errors when pairing.
3622 	 */
3623 	if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT))
3624 		goto notify;
3625 
3626 	/* Set the default Authenticated Payload Timeout after
3627 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3628 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3629 	 * sent when the link is active and Encryption is enabled, the conn
3630 	 * type can be either LE or ACL and controller must support LMP Ping.
3631 	 * Ensure for AES-CCM encryption as well.
3632 	 */
3633 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3634 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3635 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3636 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3637 		struct hci_cp_write_auth_payload_to cp;
3638 
3639 		cp.handle = cpu_to_le16(conn->handle);
3640 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3641 		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3642 				 sizeof(cp), &cp))
3643 			bt_dev_err(hdev, "write auth payload timeout failed");
3644 	}
3645 
3646 notify:
3647 	hci_encrypt_cfm(conn, ev->status);
3648 
3649 unlock:
3650 	hci_dev_unlock(hdev);
3651 }
3652 
hci_change_link_key_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3653 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3654 					     struct sk_buff *skb)
3655 {
3656 	struct hci_ev_change_link_key_complete *ev = data;
3657 	struct hci_conn *conn;
3658 
3659 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3660 
3661 	hci_dev_lock(hdev);
3662 
3663 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3664 	if (conn) {
3665 		if (!ev->status)
3666 			set_bit(HCI_CONN_SECURE, &conn->flags);
3667 
3668 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3669 
3670 		hci_key_change_cfm(conn, ev->status);
3671 	}
3672 
3673 	hci_dev_unlock(hdev);
3674 }
3675 
hci_remote_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3676 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3677 				    struct sk_buff *skb)
3678 {
3679 	struct hci_ev_remote_features *ev = data;
3680 	struct hci_conn *conn;
3681 
3682 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3683 
3684 	hci_dev_lock(hdev);
3685 
3686 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3687 	if (!conn)
3688 		goto unlock;
3689 
3690 	if (!ev->status)
3691 		memcpy(conn->features[0], ev->features, 8);
3692 
3693 	if (conn->state != BT_CONFIG)
3694 		goto unlock;
3695 
3696 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3697 	    lmp_ext_feat_capable(conn)) {
3698 		struct hci_cp_read_remote_ext_features cp;
3699 		cp.handle = ev->handle;
3700 		cp.page = 0x01;
3701 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3702 			     sizeof(cp), &cp);
3703 		goto unlock;
3704 	}
3705 
3706 	if (!ev->status) {
3707 		struct hci_cp_remote_name_req cp;
3708 		memset(&cp, 0, sizeof(cp));
3709 		bacpy(&cp.bdaddr, &conn->dst);
3710 		cp.pscan_rep_mode = 0x02;
3711 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3712 	} else {
3713 		mgmt_device_connected(hdev, conn, NULL, 0);
3714 	}
3715 
3716 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3717 		conn->state = BT_CONNECTED;
3718 		hci_connect_cfm(conn, ev->status);
3719 		hci_conn_drop(conn);
3720 	}
3721 
3722 unlock:
3723 	hci_dev_unlock(hdev);
3724 }
3725 
handle_cmd_cnt_and_timer(struct hci_dev * hdev,u8 ncmd)3726 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3727 {
3728 	cancel_delayed_work(&hdev->cmd_timer);
3729 
3730 	rcu_read_lock();
3731 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3732 		if (ncmd) {
3733 			cancel_delayed_work(&hdev->ncmd_timer);
3734 			atomic_set(&hdev->cmd_cnt, 1);
3735 		} else {
3736 			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3737 				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3738 						   HCI_NCMD_TIMEOUT);
3739 		}
3740 	}
3741 	rcu_read_unlock();
3742 }
3743 
hci_cc_le_read_buffer_size_v2(struct hci_dev * hdev,void * data,struct sk_buff * skb)3744 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3745 					struct sk_buff *skb)
3746 {
3747 	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3748 
3749 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3750 
3751 	if (rp->status)
3752 		return rp->status;
3753 
3754 	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3755 	hdev->le_pkts  = rp->acl_max_pkt;
3756 	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3757 	hdev->iso_pkts = rp->iso_max_pkt;
3758 
3759 	hdev->le_cnt  = hdev->le_pkts;
3760 	hdev->iso_cnt = hdev->iso_pkts;
3761 
3762 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3763 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3764 
3765 	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3766 		return HCI_ERROR_INVALID_PARAMETERS;
3767 
3768 	return rp->status;
3769 }
3770 
hci_unbound_cis_failed(struct hci_dev * hdev,u8 cig,u8 status)3771 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3772 {
3773 	struct hci_conn *conn, *tmp;
3774 
3775 	lockdep_assert_held(&hdev->lock);
3776 
3777 	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3778 		if (conn->type != CIS_LINK ||
3779 		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3780 			continue;
3781 
3782 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3783 			hci_conn_failed(conn, status);
3784 	}
3785 }
3786 
hci_cc_le_set_cig_params(struct hci_dev * hdev,void * data,struct sk_buff * skb)3787 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3788 				   struct sk_buff *skb)
3789 {
3790 	struct hci_rp_le_set_cig_params *rp = data;
3791 	struct hci_cp_le_set_cig_params *cp;
3792 	struct hci_conn *conn;
3793 	u8 status = rp->status;
3794 	bool pending = false;
3795 	int i;
3796 
3797 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3798 
3799 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3800 	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3801 			    rp->cig_id != cp->cig_id)) {
3802 		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3803 		status = HCI_ERROR_UNSPECIFIED;
3804 	}
3805 
3806 	hci_dev_lock(hdev);
3807 
3808 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3809 	 *
3810 	 * If the Status return parameter is non-zero, then the state of the CIG
3811 	 * and its CIS configurations shall not be changed by the command. If
3812 	 * the CIG did not already exist, it shall not be created.
3813 	 */
3814 	if (status) {
3815 		/* Keep current configuration, fail only the unbound CIS */
3816 		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3817 		goto unlock;
3818 	}
3819 
3820 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3821 	 *
3822 	 * If the Status return parameter is zero, then the Controller shall
3823 	 * set the Connection_Handle arrayed return parameter to the connection
3824 	 * handle(s) corresponding to the CIS configurations specified in
3825 	 * the CIS_IDs command parameter, in the same order.
3826 	 */
3827 	for (i = 0; i < rp->num_handles; ++i) {
3828 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3829 						cp->cis[i].cis_id);
3830 		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3831 			continue;
3832 
3833 		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3834 			continue;
3835 
3836 		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3837 			continue;
3838 
3839 		if (conn->state == BT_CONNECT)
3840 			pending = true;
3841 	}
3842 
3843 unlock:
3844 	if (pending)
3845 		hci_le_create_cis_pending(hdev);
3846 
3847 	hci_dev_unlock(hdev);
3848 
3849 	return rp->status;
3850 }
3851 
hci_cc_le_setup_iso_path(struct hci_dev * hdev,void * data,struct sk_buff * skb)3852 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3853 				   struct sk_buff *skb)
3854 {
3855 	struct hci_rp_le_setup_iso_path *rp = data;
3856 	struct hci_cp_le_setup_iso_path *cp;
3857 	struct hci_conn *conn;
3858 
3859 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3860 
3861 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3862 	if (!cp)
3863 		return rp->status;
3864 
3865 	hci_dev_lock(hdev);
3866 
3867 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3868 	if (!conn)
3869 		goto unlock;
3870 
3871 	if (rp->status) {
3872 		hci_connect_cfm(conn, rp->status);
3873 		hci_conn_del(conn);
3874 		goto unlock;
3875 	}
3876 
3877 	switch (cp->direction) {
3878 	/* Input (Host to Controller) */
3879 	case 0x00:
3880 		/* Only confirm connection if output only */
3881 		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3882 			hci_connect_cfm(conn, rp->status);
3883 		break;
3884 	/* Output (Controller to Host) */
3885 	case 0x01:
3886 		/* Confirm connection since conn->iso_qos is always configured
3887 		 * last.
3888 		 */
3889 		hci_connect_cfm(conn, rp->status);
3890 
3891 		/* Notify device connected in case it is a BIG Sync */
3892 		if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3893 			mgmt_device_connected(hdev, conn, NULL, 0);
3894 
3895 		break;
3896 	}
3897 
3898 unlock:
3899 	hci_dev_unlock(hdev);
3900 	return rp->status;
3901 }
3902 
hci_cs_le_create_big(struct hci_dev * hdev,u8 status)3903 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3904 {
3905 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3906 }
3907 
hci_cc_set_per_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)3908 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3909 				   struct sk_buff *skb)
3910 {
3911 	struct hci_ev_status *rp = data;
3912 	struct hci_cp_le_set_per_adv_params *cp;
3913 
3914 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3915 
3916 	if (rp->status)
3917 		return rp->status;
3918 
3919 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3920 	if (!cp)
3921 		return rp->status;
3922 
3923 	/* TODO: set the conn state */
3924 	return rp->status;
3925 }
3926 
hci_cc_le_set_per_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)3927 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3928 				       struct sk_buff *skb)
3929 {
3930 	struct hci_ev_status *rp = data;
3931 	struct hci_cp_le_set_per_adv_enable *cp;
3932 	struct adv_info *adv = NULL, *n;
3933 	u8 per_adv_cnt = 0;
3934 
3935 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3936 
3937 	if (rp->status)
3938 		return rp->status;
3939 
3940 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3941 	if (!cp)
3942 		return rp->status;
3943 
3944 	hci_dev_lock(hdev);
3945 
3946 	adv = hci_find_adv_instance(hdev, cp->handle);
3947 
3948 	if (cp->enable) {
3949 		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3950 
3951 		if (adv)
3952 			adv->enabled = true;
3953 	} else {
3954 		/* If just one instance was disabled check if there are
3955 		 * any other instance enabled before clearing HCI_LE_PER_ADV.
3956 		 * The current periodic adv instance will be marked as
3957 		 * disabled once extended advertising is also disabled.
3958 		 */
3959 		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3960 					 list) {
3961 			if (adv->periodic && adv->enabled)
3962 				per_adv_cnt++;
3963 		}
3964 
3965 		if (per_adv_cnt > 1)
3966 			goto unlock;
3967 
3968 		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3969 	}
3970 
3971 unlock:
3972 	hci_dev_unlock(hdev);
3973 
3974 	return rp->status;
3975 }
3976 
3977 #define HCI_CC_VL(_op, _func, _min, _max) \
3978 { \
3979 	.op = _op, \
3980 	.func = _func, \
3981 	.min_len = _min, \
3982 	.max_len = _max, \
3983 }
3984 
3985 #define HCI_CC(_op, _func, _len) \
3986 	HCI_CC_VL(_op, _func, _len, _len)
3987 
3988 #define HCI_CC_STATUS(_op, _func) \
3989 	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
3990 
3991 static const struct hci_cc {
3992 	u16  op;
3993 	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
3994 	u16  min_len;
3995 	u16  max_len;
3996 } hci_cc_table[] = {
3997 	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
3998 	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
3999 	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4000 	HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel,
4001 	       sizeof(struct hci_rp_remote_name_req_cancel)),
4002 	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4003 	       sizeof(struct hci_rp_role_discovery)),
4004 	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4005 	       sizeof(struct hci_rp_read_link_policy)),
4006 	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4007 	       sizeof(struct hci_rp_write_link_policy)),
4008 	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4009 	       sizeof(struct hci_rp_read_def_link_policy)),
4010 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4011 		      hci_cc_write_def_link_policy),
4012 	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4013 	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4014 	       sizeof(struct hci_rp_read_stored_link_key)),
4015 	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4016 	       sizeof(struct hci_rp_delete_stored_link_key)),
4017 	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4018 	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4019 	       sizeof(struct hci_rp_read_local_name)),
4020 	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4021 	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4022 	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4023 	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4024 	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4025 	       sizeof(struct hci_rp_read_class_of_dev)),
4026 	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4027 	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4028 	       sizeof(struct hci_rp_read_voice_setting)),
4029 	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4030 	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4031 	       sizeof(struct hci_rp_read_num_supported_iac)),
4032 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4033 	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4034 	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4035 	       sizeof(struct hci_rp_read_auth_payload_to)),
4036 	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4037 	       sizeof(struct hci_rp_write_auth_payload_to)),
4038 	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4039 	       sizeof(struct hci_rp_read_local_version)),
4040 	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4041 	       sizeof(struct hci_rp_read_local_commands)),
4042 	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4043 	       sizeof(struct hci_rp_read_local_features)),
4044 	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4045 	       sizeof(struct hci_rp_read_local_ext_features)),
4046 	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4047 	       sizeof(struct hci_rp_read_buffer_size)),
4048 	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4049 	       sizeof(struct hci_rp_read_bd_addr)),
4050 	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4051 	       sizeof(struct hci_rp_read_local_pairing_opts)),
4052 	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4053 	       sizeof(struct hci_rp_read_page_scan_activity)),
4054 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4055 		      hci_cc_write_page_scan_activity),
4056 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4057 	       sizeof(struct hci_rp_read_page_scan_type)),
4058 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4059 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4060 	       sizeof(struct hci_rp_read_clock)),
4061 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4062 	       sizeof(struct hci_rp_read_enc_key_size)),
4063 	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4064 	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4065 	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4066 	       hci_cc_read_def_err_data_reporting,
4067 	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4068 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4069 		      hci_cc_write_def_err_data_reporting),
4070 	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4071 	       sizeof(struct hci_rp_pin_code_reply)),
4072 	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4073 	       sizeof(struct hci_rp_pin_code_neg_reply)),
4074 	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4075 	       sizeof(struct hci_rp_read_local_oob_data)),
4076 	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4077 	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4078 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4079 	       sizeof(struct hci_rp_le_read_buffer_size)),
4080 	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4081 	       sizeof(struct hci_rp_le_read_local_features)),
4082 	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4083 	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4084 	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4085 	       sizeof(struct hci_rp_user_confirm_reply)),
4086 	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4087 	       sizeof(struct hci_rp_user_confirm_reply)),
4088 	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4089 	       sizeof(struct hci_rp_user_confirm_reply)),
4090 	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4091 	       sizeof(struct hci_rp_user_confirm_reply)),
4092 	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4093 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4094 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4095 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4096 	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4097 	       hci_cc_le_read_accept_list_size,
4098 	       sizeof(struct hci_rp_le_read_accept_list_size)),
4099 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4100 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4101 		      hci_cc_le_add_to_accept_list),
4102 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4103 		      hci_cc_le_del_from_accept_list),
4104 	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4105 	       sizeof(struct hci_rp_le_read_supported_states)),
4106 	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4107 	       sizeof(struct hci_rp_le_read_def_data_len)),
4108 	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4109 		      hci_cc_le_write_def_data_len),
4110 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4111 		      hci_cc_le_add_to_resolv_list),
4112 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4113 		      hci_cc_le_del_from_resolv_list),
4114 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4115 		      hci_cc_le_clear_resolv_list),
4116 	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4117 	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4118 	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4119 		      hci_cc_le_set_addr_resolution_enable),
4120 	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4121 	       sizeof(struct hci_rp_le_read_max_data_len)),
4122 	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4123 		      hci_cc_write_le_host_supported),
4124 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4125 	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4126 	       sizeof(struct hci_rp_read_rssi)),
4127 	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4128 	       sizeof(struct hci_rp_read_tx_power)),
4129 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4130 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4131 		      hci_cc_le_set_ext_scan_param),
4132 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4133 		      hci_cc_le_set_ext_scan_enable),
4134 	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4135 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4136 	       hci_cc_le_read_num_adv_sets,
4137 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4138 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4139 		      hci_cc_le_set_ext_adv_enable),
4140 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4141 		      hci_cc_le_set_adv_set_random_addr),
4142 	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4143 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4144 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4145 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4146 		      hci_cc_le_set_per_adv_enable),
4147 	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4148 	       sizeof(struct hci_rp_le_read_transmit_power)),
4149 	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4150 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4151 	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4152 	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4153 		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4154 	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4155 	       sizeof(struct hci_rp_le_setup_iso_path)),
4156 };
4157 
hci_cc_func(struct hci_dev * hdev,const struct hci_cc * cc,struct sk_buff * skb)4158 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4159 		      struct sk_buff *skb)
4160 {
4161 	void *data;
4162 
4163 	if (skb->len < cc->min_len) {
4164 		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4165 			   cc->op, skb->len, cc->min_len);
4166 		return HCI_ERROR_UNSPECIFIED;
4167 	}
4168 
4169 	/* Just warn if the length is over max_len size it still be possible to
4170 	 * partially parse the cc so leave to callback to decide if that is
4171 	 * acceptable.
4172 	 */
4173 	if (skb->len > cc->max_len)
4174 		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4175 			    cc->op, skb->len, cc->max_len);
4176 
4177 	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4178 	if (!data)
4179 		return HCI_ERROR_UNSPECIFIED;
4180 
4181 	return cc->func(hdev, data, skb);
4182 }
4183 
hci_cmd_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4184 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4185 				 struct sk_buff *skb, u16 *opcode, u8 *status,
4186 				 hci_req_complete_t *req_complete,
4187 				 hci_req_complete_skb_t *req_complete_skb)
4188 {
4189 	struct hci_ev_cmd_complete *ev = data;
4190 	int i;
4191 
4192 	*opcode = __le16_to_cpu(ev->opcode);
4193 
4194 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4195 
4196 	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4197 		if (hci_cc_table[i].op == *opcode) {
4198 			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4199 			break;
4200 		}
4201 	}
4202 
4203 	if (i == ARRAY_SIZE(hci_cc_table)) {
4204 		/* Unknown opcode, assume byte 0 contains the status, so
4205 		 * that e.g. __hci_cmd_sync() properly returns errors
4206 		 * for vendor specific commands send by HCI drivers.
4207 		 * If a vendor doesn't actually follow this convention we may
4208 		 * need to introduce a vendor CC table in order to properly set
4209 		 * the status.
4210 		 */
4211 		*status = skb->data[0];
4212 	}
4213 
4214 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4215 
4216 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4217 			     req_complete_skb);
4218 
4219 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4220 		bt_dev_err(hdev,
4221 			   "unexpected event for opcode 0x%4.4x", *opcode);
4222 		return;
4223 	}
4224 
4225 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4226 		queue_work(hdev->workqueue, &hdev->cmd_work);
4227 }
4228 
hci_cs_le_create_cis(struct hci_dev * hdev,u8 status)4229 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4230 {
4231 	struct hci_cp_le_create_cis *cp;
4232 	bool pending = false;
4233 	int i;
4234 
4235 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4236 
4237 	if (!status)
4238 		return;
4239 
4240 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4241 	if (!cp)
4242 		return;
4243 
4244 	hci_dev_lock(hdev);
4245 
4246 	/* Remove connection if command failed */
4247 	for (i = 0; i < cp->num_cis; i++) {
4248 		struct hci_conn *conn;
4249 		u16 handle;
4250 
4251 		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4252 
4253 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4254 		if (conn) {
4255 			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4256 					       &conn->flags))
4257 				pending = true;
4258 			conn->state = BT_CLOSED;
4259 			hci_connect_cfm(conn, status);
4260 			hci_conn_del(conn);
4261 		}
4262 	}
4263 	cp->num_cis = 0;
4264 
4265 	if (pending)
4266 		hci_le_create_cis_pending(hdev);
4267 
4268 	hci_dev_unlock(hdev);
4269 }
4270 
4271 #define HCI_CS(_op, _func) \
4272 { \
4273 	.op = _op, \
4274 	.func = _func, \
4275 }
4276 
4277 static const struct hci_cs {
4278 	u16  op;
4279 	void (*func)(struct hci_dev *hdev, __u8 status);
4280 } hci_cs_table[] = {
4281 	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4282 	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4283 	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4284 	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4285 	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4286 	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4287 	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4288 	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4289 	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4290 	       hci_cs_read_remote_ext_features),
4291 	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4292 	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4293 	       hci_cs_enhanced_setup_sync_conn),
4294 	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4295 	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4296 	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4297 	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4298 	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4299 	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4300 	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4301 	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4302 	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4303 };
4304 
hci_cmd_status_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4305 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4306 			       struct sk_buff *skb, u16 *opcode, u8 *status,
4307 			       hci_req_complete_t *req_complete,
4308 			       hci_req_complete_skb_t *req_complete_skb)
4309 {
4310 	struct hci_ev_cmd_status *ev = data;
4311 	int i;
4312 
4313 	*opcode = __le16_to_cpu(ev->opcode);
4314 	*status = ev->status;
4315 
4316 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4317 
4318 	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4319 		if (hci_cs_table[i].op == *opcode) {
4320 			hci_cs_table[i].func(hdev, ev->status);
4321 			break;
4322 		}
4323 	}
4324 
4325 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4326 
4327 	/* Indicate request completion if the command failed. Also, if
4328 	 * we're not waiting for a special event and we get a success
4329 	 * command status we should try to flag the request as completed
4330 	 * (since for this kind of commands there will not be a command
4331 	 * complete event).
4332 	 */
4333 	if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4334 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4335 				     req_complete_skb);
4336 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4337 			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4338 				   *opcode);
4339 			return;
4340 		}
4341 	}
4342 
4343 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4344 		queue_work(hdev->workqueue, &hdev->cmd_work);
4345 }
4346 
hci_hardware_error_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4347 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4348 				   struct sk_buff *skb)
4349 {
4350 	struct hci_ev_hardware_error *ev = data;
4351 
4352 	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4353 
4354 	hdev->hw_error_code = ev->code;
4355 
4356 	queue_work(hdev->req_workqueue, &hdev->error_reset);
4357 }
4358 
hci_role_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4359 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4360 				struct sk_buff *skb)
4361 {
4362 	struct hci_ev_role_change *ev = data;
4363 	struct hci_conn *conn;
4364 
4365 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4366 
4367 	hci_dev_lock(hdev);
4368 
4369 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4370 	if (conn) {
4371 		if (!ev->status)
4372 			conn->role = ev->role;
4373 
4374 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4375 
4376 		hci_role_switch_cfm(conn, ev->status, ev->role);
4377 	}
4378 
4379 	hci_dev_unlock(hdev);
4380 }
4381 
hci_num_comp_pkts_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4382 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4383 				  struct sk_buff *skb)
4384 {
4385 	struct hci_ev_num_comp_pkts *ev = data;
4386 	int i;
4387 
4388 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4389 			     flex_array_size(ev, handles, ev->num)))
4390 		return;
4391 
4392 	bt_dev_dbg(hdev, "num %d", ev->num);
4393 
4394 	for (i = 0; i < ev->num; i++) {
4395 		struct hci_comp_pkts_info *info = &ev->handles[i];
4396 		struct hci_conn *conn;
4397 		__u16  handle, count;
4398 		unsigned int i;
4399 
4400 		handle = __le16_to_cpu(info->handle);
4401 		count  = __le16_to_cpu(info->count);
4402 
4403 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4404 		if (!conn)
4405 			continue;
4406 
4407 		/* Check if there is really enough packets outstanding before
4408 		 * attempting to decrease the sent counter otherwise it could
4409 		 * underflow..
4410 		 */
4411 		if (conn->sent >= count) {
4412 			conn->sent -= count;
4413 		} else {
4414 			bt_dev_warn(hdev, "hcon %p sent %u < count %u",
4415 				    conn, conn->sent, count);
4416 			conn->sent = 0;
4417 		}
4418 
4419 		for (i = 0; i < count; ++i)
4420 			hci_conn_tx_dequeue(conn);
4421 
4422 		switch (conn->type) {
4423 		case ACL_LINK:
4424 			hdev->acl_cnt += count;
4425 			if (hdev->acl_cnt > hdev->acl_pkts)
4426 				hdev->acl_cnt = hdev->acl_pkts;
4427 			break;
4428 
4429 		case LE_LINK:
4430 			if (hdev->le_pkts) {
4431 				hdev->le_cnt += count;
4432 				if (hdev->le_cnt > hdev->le_pkts)
4433 					hdev->le_cnt = hdev->le_pkts;
4434 			} else {
4435 				hdev->acl_cnt += count;
4436 				if (hdev->acl_cnt > hdev->acl_pkts)
4437 					hdev->acl_cnt = hdev->acl_pkts;
4438 			}
4439 			break;
4440 
4441 		case SCO_LINK:
4442 		case ESCO_LINK:
4443 			hdev->sco_cnt += count;
4444 			if (hdev->sco_cnt > hdev->sco_pkts)
4445 				hdev->sco_cnt = hdev->sco_pkts;
4446 
4447 			break;
4448 
4449 		case CIS_LINK:
4450 		case BIS_LINK:
4451 		case PA_LINK:
4452 			if (hdev->iso_pkts) {
4453 				hdev->iso_cnt += count;
4454 				if (hdev->iso_cnt > hdev->iso_pkts)
4455 					hdev->iso_cnt = hdev->iso_pkts;
4456 			} else if (hdev->le_pkts) {
4457 				hdev->le_cnt += count;
4458 				if (hdev->le_cnt > hdev->le_pkts)
4459 					hdev->le_cnt = hdev->le_pkts;
4460 			} else {
4461 				hdev->acl_cnt += count;
4462 				if (hdev->acl_cnt > hdev->acl_pkts)
4463 					hdev->acl_cnt = hdev->acl_pkts;
4464 			}
4465 			break;
4466 
4467 		default:
4468 			bt_dev_err(hdev, "unknown type %d conn %p",
4469 				   conn->type, conn);
4470 			break;
4471 		}
4472 	}
4473 
4474 	queue_work(hdev->workqueue, &hdev->tx_work);
4475 }
4476 
hci_mode_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4477 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4478 				struct sk_buff *skb)
4479 {
4480 	struct hci_ev_mode_change *ev = data;
4481 	struct hci_conn *conn;
4482 
4483 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4484 
4485 	hci_dev_lock(hdev);
4486 
4487 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4488 	if (conn) {
4489 		conn->mode = ev->mode;
4490 
4491 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4492 					&conn->flags)) {
4493 			if (conn->mode == HCI_CM_ACTIVE)
4494 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4495 			else
4496 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4497 		}
4498 
4499 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4500 			hci_sco_setup(conn, ev->status);
4501 	}
4502 
4503 	hci_dev_unlock(hdev);
4504 }
4505 
hci_pin_code_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4506 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4507 				     struct sk_buff *skb)
4508 {
4509 	struct hci_ev_pin_code_req *ev = data;
4510 	struct hci_conn *conn;
4511 
4512 	bt_dev_dbg(hdev, "");
4513 
4514 	hci_dev_lock(hdev);
4515 
4516 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4517 	if (!conn)
4518 		goto unlock;
4519 
4520 	if (conn->state == BT_CONNECTED) {
4521 		hci_conn_hold(conn);
4522 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4523 		hci_conn_drop(conn);
4524 	}
4525 
4526 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4527 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4528 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4529 			     sizeof(ev->bdaddr), &ev->bdaddr);
4530 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4531 		u8 secure;
4532 
4533 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4534 			secure = 1;
4535 		else
4536 			secure = 0;
4537 
4538 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4539 	}
4540 
4541 unlock:
4542 	hci_dev_unlock(hdev);
4543 }
4544 
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)4545 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4546 {
4547 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4548 		return;
4549 
4550 	conn->pin_length = pin_len;
4551 	conn->key_type = key_type;
4552 
4553 	switch (key_type) {
4554 	case HCI_LK_LOCAL_UNIT:
4555 	case HCI_LK_REMOTE_UNIT:
4556 	case HCI_LK_DEBUG_COMBINATION:
4557 		return;
4558 	case HCI_LK_COMBINATION:
4559 		if (pin_len == 16)
4560 			conn->pending_sec_level = BT_SECURITY_HIGH;
4561 		else
4562 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4563 		break;
4564 	case HCI_LK_UNAUTH_COMBINATION_P192:
4565 	case HCI_LK_UNAUTH_COMBINATION_P256:
4566 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4567 		break;
4568 	case HCI_LK_AUTH_COMBINATION_P192:
4569 		conn->pending_sec_level = BT_SECURITY_HIGH;
4570 		break;
4571 	case HCI_LK_AUTH_COMBINATION_P256:
4572 		conn->pending_sec_level = BT_SECURITY_FIPS;
4573 		break;
4574 	}
4575 }
4576 
hci_link_key_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4577 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4578 				     struct sk_buff *skb)
4579 {
4580 	struct hci_ev_link_key_req *ev = data;
4581 	struct hci_cp_link_key_reply cp;
4582 	struct hci_conn *conn;
4583 	struct link_key *key;
4584 
4585 	bt_dev_dbg(hdev, "");
4586 
4587 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4588 		return;
4589 
4590 	hci_dev_lock(hdev);
4591 
4592 	key = hci_find_link_key(hdev, &ev->bdaddr);
4593 	if (!key) {
4594 		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4595 		goto not_found;
4596 	}
4597 
4598 	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4599 
4600 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4601 	if (conn) {
4602 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4603 
4604 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4605 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4606 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4607 			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4608 			goto not_found;
4609 		}
4610 
4611 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4612 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4613 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4614 			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4615 			goto not_found;
4616 		}
4617 
4618 		conn_set_key(conn, key->type, key->pin_len);
4619 	}
4620 
4621 	bacpy(&cp.bdaddr, &ev->bdaddr);
4622 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4623 
4624 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4625 
4626 	hci_dev_unlock(hdev);
4627 
4628 	return;
4629 
4630 not_found:
4631 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4632 	hci_dev_unlock(hdev);
4633 }
4634 
hci_link_key_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4635 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4636 				    struct sk_buff *skb)
4637 {
4638 	struct hci_ev_link_key_notify *ev = data;
4639 	struct hci_conn *conn;
4640 	struct link_key *key;
4641 	bool persistent;
4642 	u8 pin_len = 0;
4643 
4644 	bt_dev_dbg(hdev, "");
4645 
4646 	hci_dev_lock(hdev);
4647 
4648 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4649 	if (!conn)
4650 		goto unlock;
4651 
4652 	/* Ignore NULL link key against CVE-2020-26555 */
4653 	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4654 		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4655 			   &ev->bdaddr);
4656 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4657 		hci_conn_drop(conn);
4658 		goto unlock;
4659 	}
4660 
4661 	hci_conn_hold(conn);
4662 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4663 	hci_conn_drop(conn);
4664 
4665 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4666 	conn_set_key(conn, ev->key_type, conn->pin_length);
4667 
4668 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4669 		goto unlock;
4670 
4671 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4672 			        ev->key_type, pin_len, &persistent);
4673 	if (!key)
4674 		goto unlock;
4675 
4676 	/* Update connection information since adding the key will have
4677 	 * fixed up the type in the case of changed combination keys.
4678 	 */
4679 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4680 		conn_set_key(conn, key->type, key->pin_len);
4681 
4682 	mgmt_new_link_key(hdev, key, persistent);
4683 
4684 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4685 	 * is set. If it's not set simply remove the key from the kernel
4686 	 * list (we've still notified user space about it but with
4687 	 * store_hint being 0).
4688 	 */
4689 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4690 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4691 		list_del_rcu(&key->list);
4692 		kfree_rcu(key, rcu);
4693 		goto unlock;
4694 	}
4695 
4696 	if (persistent)
4697 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4698 	else
4699 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4700 
4701 unlock:
4702 	hci_dev_unlock(hdev);
4703 }
4704 
hci_clock_offset_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4705 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4706 				 struct sk_buff *skb)
4707 {
4708 	struct hci_ev_clock_offset *ev = data;
4709 	struct hci_conn *conn;
4710 
4711 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4712 
4713 	hci_dev_lock(hdev);
4714 
4715 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4716 	if (conn && !ev->status) {
4717 		struct inquiry_entry *ie;
4718 
4719 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4720 		if (ie) {
4721 			ie->data.clock_offset = ev->clock_offset;
4722 			ie->timestamp = jiffies;
4723 		}
4724 	}
4725 
4726 	hci_dev_unlock(hdev);
4727 }
4728 
hci_pkt_type_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4729 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4730 				    struct sk_buff *skb)
4731 {
4732 	struct hci_ev_pkt_type_change *ev = data;
4733 	struct hci_conn *conn;
4734 
4735 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4736 
4737 	hci_dev_lock(hdev);
4738 
4739 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4740 	if (conn && !ev->status)
4741 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4742 
4743 	hci_dev_unlock(hdev);
4744 }
4745 
hci_pscan_rep_mode_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4746 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4747 				   struct sk_buff *skb)
4748 {
4749 	struct hci_ev_pscan_rep_mode *ev = data;
4750 	struct inquiry_entry *ie;
4751 
4752 	bt_dev_dbg(hdev, "");
4753 
4754 	hci_dev_lock(hdev);
4755 
4756 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4757 	if (ie) {
4758 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4759 		ie->timestamp = jiffies;
4760 	}
4761 
4762 	hci_dev_unlock(hdev);
4763 }
4764 
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)4765 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4766 					     struct sk_buff *skb)
4767 {
4768 	struct hci_ev_inquiry_result_rssi *ev = edata;
4769 	struct inquiry_data data;
4770 	int i;
4771 
4772 	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4773 
4774 	if (!ev->num)
4775 		return;
4776 
4777 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4778 		return;
4779 
4780 	hci_dev_lock(hdev);
4781 
4782 	if (skb->len == array_size(ev->num,
4783 				   sizeof(struct inquiry_info_rssi_pscan))) {
4784 		struct inquiry_info_rssi_pscan *info;
4785 
4786 		for (i = 0; i < ev->num; i++) {
4787 			u32 flags;
4788 
4789 			info = hci_ev_skb_pull(hdev, skb,
4790 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4791 					       sizeof(*info));
4792 			if (!info) {
4793 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4794 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4795 				goto unlock;
4796 			}
4797 
4798 			bacpy(&data.bdaddr, &info->bdaddr);
4799 			data.pscan_rep_mode	= info->pscan_rep_mode;
4800 			data.pscan_period_mode	= info->pscan_period_mode;
4801 			data.pscan_mode		= info->pscan_mode;
4802 			memcpy(data.dev_class, info->dev_class, 3);
4803 			data.clock_offset	= info->clock_offset;
4804 			data.rssi		= info->rssi;
4805 			data.ssp_mode		= 0x00;
4806 
4807 			flags = hci_inquiry_cache_update(hdev, &data, false);
4808 
4809 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4810 					  info->dev_class, info->rssi,
4811 					  flags, NULL, 0, NULL, 0, 0);
4812 		}
4813 	} else if (skb->len == array_size(ev->num,
4814 					  sizeof(struct inquiry_info_rssi))) {
4815 		struct inquiry_info_rssi *info;
4816 
4817 		for (i = 0; i < ev->num; i++) {
4818 			u32 flags;
4819 
4820 			info = hci_ev_skb_pull(hdev, skb,
4821 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4822 					       sizeof(*info));
4823 			if (!info) {
4824 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4825 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4826 				goto unlock;
4827 			}
4828 
4829 			bacpy(&data.bdaddr, &info->bdaddr);
4830 			data.pscan_rep_mode	= info->pscan_rep_mode;
4831 			data.pscan_period_mode	= info->pscan_period_mode;
4832 			data.pscan_mode		= 0x00;
4833 			memcpy(data.dev_class, info->dev_class, 3);
4834 			data.clock_offset	= info->clock_offset;
4835 			data.rssi		= info->rssi;
4836 			data.ssp_mode		= 0x00;
4837 
4838 			flags = hci_inquiry_cache_update(hdev, &data, false);
4839 
4840 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4841 					  info->dev_class, info->rssi,
4842 					  flags, NULL, 0, NULL, 0, 0);
4843 		}
4844 	} else {
4845 		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4846 			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4847 	}
4848 unlock:
4849 	hci_dev_unlock(hdev);
4850 }
4851 
hci_remote_ext_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4852 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4853 					struct sk_buff *skb)
4854 {
4855 	struct hci_ev_remote_ext_features *ev = data;
4856 	struct hci_conn *conn;
4857 
4858 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4859 
4860 	hci_dev_lock(hdev);
4861 
4862 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4863 	if (!conn)
4864 		goto unlock;
4865 
4866 	if (ev->page < HCI_MAX_PAGES)
4867 		memcpy(conn->features[ev->page], ev->features, 8);
4868 
4869 	if (!ev->status && ev->page == 0x01) {
4870 		struct inquiry_entry *ie;
4871 
4872 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4873 		if (ie)
4874 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4875 
4876 		if (ev->features[0] & LMP_HOST_SSP) {
4877 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4878 		} else {
4879 			/* It is mandatory by the Bluetooth specification that
4880 			 * Extended Inquiry Results are only used when Secure
4881 			 * Simple Pairing is enabled, but some devices violate
4882 			 * this.
4883 			 *
4884 			 * To make these devices work, the internal SSP
4885 			 * enabled flag needs to be cleared if the remote host
4886 			 * features do not indicate SSP support */
4887 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4888 		}
4889 
4890 		if (ev->features[0] & LMP_HOST_SC)
4891 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4892 	}
4893 
4894 	if (conn->state != BT_CONFIG)
4895 		goto unlock;
4896 
4897 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4898 		struct hci_cp_remote_name_req cp;
4899 		memset(&cp, 0, sizeof(cp));
4900 		bacpy(&cp.bdaddr, &conn->dst);
4901 		cp.pscan_rep_mode = 0x02;
4902 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4903 	} else {
4904 		mgmt_device_connected(hdev, conn, NULL, 0);
4905 	}
4906 
4907 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4908 		conn->state = BT_CONNECTED;
4909 		hci_connect_cfm(conn, ev->status);
4910 		hci_conn_drop(conn);
4911 	}
4912 
4913 unlock:
4914 	hci_dev_unlock(hdev);
4915 }
4916 
hci_sync_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4917 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4918 				       struct sk_buff *skb)
4919 {
4920 	struct hci_ev_sync_conn_complete *ev = data;
4921 	struct hci_conn *conn;
4922 	u8 status = ev->status;
4923 
4924 	switch (ev->link_type) {
4925 	case SCO_LINK:
4926 	case ESCO_LINK:
4927 		break;
4928 	default:
4929 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4930 		 * for HCI_Synchronous_Connection_Complete is limited to
4931 		 * either SCO or eSCO
4932 		 */
4933 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4934 		return;
4935 	}
4936 
4937 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4938 
4939 	hci_dev_lock(hdev);
4940 
4941 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4942 	if (!conn) {
4943 		if (ev->link_type == ESCO_LINK)
4944 			goto unlock;
4945 
4946 		/* When the link type in the event indicates SCO connection
4947 		 * and lookup of the connection object fails, then check
4948 		 * if an eSCO connection object exists.
4949 		 *
4950 		 * The core limits the synchronous connections to either
4951 		 * SCO or eSCO. The eSCO connection is preferred and tried
4952 		 * to be setup first and until successfully established,
4953 		 * the link type will be hinted as eSCO.
4954 		 */
4955 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4956 		if (!conn)
4957 			goto unlock;
4958 	}
4959 
4960 	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4961 	 * Processing it more than once per connection can corrupt kernel memory.
4962 	 *
4963 	 * As the connection handle is set here for the first time, it indicates
4964 	 * whether the connection is already set up.
4965 	 */
4966 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
4967 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4968 		goto unlock;
4969 	}
4970 
4971 	switch (status) {
4972 	case 0x00:
4973 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
4974 		if (status) {
4975 			conn->state = BT_CLOSED;
4976 			break;
4977 		}
4978 
4979 		conn->state  = BT_CONNECTED;
4980 		conn->type   = ev->link_type;
4981 
4982 		hci_debugfs_create_conn(conn);
4983 		hci_conn_add_sysfs(conn);
4984 		break;
4985 
4986 	case 0x10:	/* Connection Accept Timeout */
4987 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4988 	case 0x11:	/* Unsupported Feature or Parameter Value */
4989 	case 0x1c:	/* SCO interval rejected */
4990 	case 0x1a:	/* Unsupported Remote Feature */
4991 	case 0x1e:	/* Invalid LMP Parameters */
4992 	case 0x1f:	/* Unspecified error */
4993 	case 0x20:	/* Unsupported LMP Parameter value */
4994 		if (conn->out) {
4995 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4996 					(hdev->esco_type & EDR_ESCO_MASK);
4997 			if (hci_setup_sync(conn, conn->parent->handle))
4998 				goto unlock;
4999 		}
5000 		fallthrough;
5001 
5002 	default:
5003 		conn->state = BT_CLOSED;
5004 		break;
5005 	}
5006 
5007 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5008 	/* Notify only in case of SCO over HCI transport data path which
5009 	 * is zero and non-zero value shall be non-HCI transport data path
5010 	 */
5011 	if (conn->codec.data_path == 0 && hdev->notify) {
5012 		switch (ev->air_mode) {
5013 		case 0x02:
5014 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5015 			break;
5016 		case 0x03:
5017 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5018 			break;
5019 		}
5020 	}
5021 
5022 	hci_connect_cfm(conn, status);
5023 	if (status)
5024 		hci_conn_del(conn);
5025 
5026 unlock:
5027 	hci_dev_unlock(hdev);
5028 }
5029 
eir_get_length(u8 * eir,size_t eir_len)5030 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5031 {
5032 	size_t parsed = 0;
5033 
5034 	while (parsed < eir_len) {
5035 		u8 field_len = eir[0];
5036 
5037 		if (field_len == 0)
5038 			return parsed;
5039 
5040 		parsed += field_len + 1;
5041 		eir += field_len + 1;
5042 	}
5043 
5044 	return eir_len;
5045 }
5046 
hci_extended_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5047 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5048 					    struct sk_buff *skb)
5049 {
5050 	struct hci_ev_ext_inquiry_result *ev = edata;
5051 	struct inquiry_data data;
5052 	size_t eir_len;
5053 	int i;
5054 
5055 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5056 			     flex_array_size(ev, info, ev->num)))
5057 		return;
5058 
5059 	bt_dev_dbg(hdev, "num %d", ev->num);
5060 
5061 	if (!ev->num)
5062 		return;
5063 
5064 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5065 		return;
5066 
5067 	hci_dev_lock(hdev);
5068 
5069 	for (i = 0; i < ev->num; i++) {
5070 		struct extended_inquiry_info *info = &ev->info[i];
5071 		u32 flags;
5072 		bool name_known;
5073 
5074 		bacpy(&data.bdaddr, &info->bdaddr);
5075 		data.pscan_rep_mode	= info->pscan_rep_mode;
5076 		data.pscan_period_mode	= info->pscan_period_mode;
5077 		data.pscan_mode		= 0x00;
5078 		memcpy(data.dev_class, info->dev_class, 3);
5079 		data.clock_offset	= info->clock_offset;
5080 		data.rssi		= info->rssi;
5081 		data.ssp_mode		= 0x01;
5082 
5083 		if (hci_dev_test_flag(hdev, HCI_MGMT))
5084 			name_known = eir_get_data(info->data,
5085 						  sizeof(info->data),
5086 						  EIR_NAME_COMPLETE, NULL);
5087 		else
5088 			name_known = true;
5089 
5090 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5091 
5092 		eir_len = eir_get_length(info->data, sizeof(info->data));
5093 
5094 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5095 				  info->dev_class, info->rssi,
5096 				  flags, info->data, eir_len, NULL, 0, 0);
5097 	}
5098 
5099 	hci_dev_unlock(hdev);
5100 }
5101 
hci_key_refresh_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5102 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5103 					 struct sk_buff *skb)
5104 {
5105 	struct hci_ev_key_refresh_complete *ev = data;
5106 	struct hci_conn *conn;
5107 
5108 	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5109 		   __le16_to_cpu(ev->handle));
5110 
5111 	hci_dev_lock(hdev);
5112 
5113 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5114 	if (!conn)
5115 		goto unlock;
5116 
5117 	/* For BR/EDR the necessary steps are taken through the
5118 	 * auth_complete event.
5119 	 */
5120 	if (conn->type != LE_LINK)
5121 		goto unlock;
5122 
5123 	if (!ev->status)
5124 		conn->sec_level = conn->pending_sec_level;
5125 
5126 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5127 
5128 	if (ev->status && conn->state == BT_CONNECTED) {
5129 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5130 		hci_conn_drop(conn);
5131 		goto unlock;
5132 	}
5133 
5134 	if (conn->state == BT_CONFIG) {
5135 		if (!ev->status)
5136 			conn->state = BT_CONNECTED;
5137 
5138 		hci_connect_cfm(conn, ev->status);
5139 		hci_conn_drop(conn);
5140 	} else {
5141 		hci_auth_cfm(conn, ev->status);
5142 
5143 		hci_conn_hold(conn);
5144 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5145 		hci_conn_drop(conn);
5146 	}
5147 
5148 unlock:
5149 	hci_dev_unlock(hdev);
5150 }
5151 
hci_get_auth_req(struct hci_conn * conn)5152 static u8 hci_get_auth_req(struct hci_conn *conn)
5153 {
5154 	/* If remote requests no-bonding follow that lead */
5155 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5156 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5157 		return conn->remote_auth | (conn->auth_type & 0x01);
5158 
5159 	/* If both remote and local have enough IO capabilities, require
5160 	 * MITM protection
5161 	 */
5162 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5163 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5164 		return conn->remote_auth | 0x01;
5165 
5166 	/* No MITM protection possible so ignore remote requirement */
5167 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5168 }
5169 
bredr_oob_data_present(struct hci_conn * conn)5170 static u8 bredr_oob_data_present(struct hci_conn *conn)
5171 {
5172 	struct hci_dev *hdev = conn->hdev;
5173 	struct oob_data *data;
5174 
5175 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5176 	if (!data)
5177 		return 0x00;
5178 
5179 	if (bredr_sc_enabled(hdev)) {
5180 		/* When Secure Connections is enabled, then just
5181 		 * return the present value stored with the OOB
5182 		 * data. The stored value contains the right present
5183 		 * information. However it can only be trusted when
5184 		 * not in Secure Connection Only mode.
5185 		 */
5186 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5187 			return data->present;
5188 
5189 		/* When Secure Connections Only mode is enabled, then
5190 		 * the P-256 values are required. If they are not
5191 		 * available, then do not declare that OOB data is
5192 		 * present.
5193 		 */
5194 		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5195 		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5196 			return 0x00;
5197 
5198 		return 0x02;
5199 	}
5200 
5201 	/* When Secure Connections is not enabled or actually
5202 	 * not supported by the hardware, then check that if
5203 	 * P-192 data values are present.
5204 	 */
5205 	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5206 	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5207 		return 0x00;
5208 
5209 	return 0x01;
5210 }
5211 
hci_io_capa_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5212 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5213 				    struct sk_buff *skb)
5214 {
5215 	struct hci_ev_io_capa_request *ev = data;
5216 	struct hci_conn *conn;
5217 
5218 	bt_dev_dbg(hdev, "");
5219 
5220 	hci_dev_lock(hdev);
5221 
5222 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5223 	if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5224 		goto unlock;
5225 
5226 	/* Assume remote supports SSP since it has triggered this event */
5227 	set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5228 
5229 	hci_conn_hold(conn);
5230 
5231 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5232 		goto unlock;
5233 
5234 	/* Allow pairing if we're pairable, the initiators of the
5235 	 * pairing or if the remote is not requesting bonding.
5236 	 */
5237 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5238 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5239 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5240 		struct hci_cp_io_capability_reply cp;
5241 
5242 		bacpy(&cp.bdaddr, &ev->bdaddr);
5243 		/* Change the IO capability from KeyboardDisplay
5244 		 * to DisplayYesNo as it is not supported by BT spec. */
5245 		cp.capability = (conn->io_capability == 0x04) ?
5246 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5247 
5248 		/* If we are initiators, there is no remote information yet */
5249 		if (conn->remote_auth == 0xff) {
5250 			/* Request MITM protection if our IO caps allow it
5251 			 * except for the no-bonding case.
5252 			 */
5253 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5254 			    conn->auth_type != HCI_AT_NO_BONDING)
5255 				conn->auth_type |= 0x01;
5256 		} else {
5257 			conn->auth_type = hci_get_auth_req(conn);
5258 		}
5259 
5260 		/* If we're not bondable, force one of the non-bondable
5261 		 * authentication requirement values.
5262 		 */
5263 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5264 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5265 
5266 		cp.authentication = conn->auth_type;
5267 		cp.oob_data = bredr_oob_data_present(conn);
5268 
5269 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5270 			     sizeof(cp), &cp);
5271 	} else {
5272 		struct hci_cp_io_capability_neg_reply cp;
5273 
5274 		bacpy(&cp.bdaddr, &ev->bdaddr);
5275 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5276 
5277 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5278 			     sizeof(cp), &cp);
5279 	}
5280 
5281 unlock:
5282 	hci_dev_unlock(hdev);
5283 }
5284 
hci_io_capa_reply_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5285 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5286 				  struct sk_buff *skb)
5287 {
5288 	struct hci_ev_io_capa_reply *ev = data;
5289 	struct hci_conn *conn;
5290 
5291 	bt_dev_dbg(hdev, "");
5292 
5293 	hci_dev_lock(hdev);
5294 
5295 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5296 	if (!conn)
5297 		goto unlock;
5298 
5299 	conn->remote_cap = ev->capability;
5300 	conn->remote_auth = ev->authentication;
5301 
5302 unlock:
5303 	hci_dev_unlock(hdev);
5304 }
5305 
hci_user_confirm_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5306 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5307 					 struct sk_buff *skb)
5308 {
5309 	struct hci_ev_user_confirm_req *ev = data;
5310 	int loc_mitm, rem_mitm, confirm_hint = 0;
5311 	struct hci_conn *conn;
5312 
5313 	bt_dev_dbg(hdev, "");
5314 
5315 	hci_dev_lock(hdev);
5316 
5317 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5318 		goto unlock;
5319 
5320 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5321 	if (!conn)
5322 		goto unlock;
5323 
5324 	loc_mitm = (conn->auth_type & 0x01);
5325 	rem_mitm = (conn->remote_auth & 0x01);
5326 
5327 	/* If we require MITM but the remote device can't provide that
5328 	 * (it has NoInputNoOutput) then reject the confirmation
5329 	 * request. We check the security level here since it doesn't
5330 	 * necessarily match conn->auth_type.
5331 	 */
5332 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5333 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5334 		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5335 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5336 			     sizeof(ev->bdaddr), &ev->bdaddr);
5337 		goto unlock;
5338 	}
5339 
5340 	/* If no side requires MITM protection; use JUST_CFM method */
5341 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5342 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5343 
5344 		/* If we're not the initiator of request authorization and the
5345 		 * local IO capability is not NoInputNoOutput, use JUST_WORKS
5346 		 * method (mgmt_user_confirm with confirm_hint set to 1).
5347 		 */
5348 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5349 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) {
5350 			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5351 			confirm_hint = 1;
5352 			goto confirm;
5353 		}
5354 
5355 		/* If there already exists link key in local host, leave the
5356 		 * decision to user space since the remote device could be
5357 		 * legitimate or malicious.
5358 		 */
5359 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5360 			bt_dev_dbg(hdev, "Local host already has link key");
5361 			confirm_hint = 1;
5362 			goto confirm;
5363 		}
5364 
5365 		BT_DBG("Auto-accept of user confirmation with %ums delay",
5366 		       hdev->auto_accept_delay);
5367 
5368 		if (hdev->auto_accept_delay > 0) {
5369 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5370 			queue_delayed_work(conn->hdev->workqueue,
5371 					   &conn->auto_accept_work, delay);
5372 			goto unlock;
5373 		}
5374 
5375 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5376 			     sizeof(ev->bdaddr), &ev->bdaddr);
5377 		goto unlock;
5378 	}
5379 
5380 confirm:
5381 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5382 				  le32_to_cpu(ev->passkey), confirm_hint);
5383 
5384 unlock:
5385 	hci_dev_unlock(hdev);
5386 }
5387 
hci_user_passkey_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5388 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5389 					 struct sk_buff *skb)
5390 {
5391 	struct hci_ev_user_passkey_req *ev = data;
5392 
5393 	bt_dev_dbg(hdev, "");
5394 
5395 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5396 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5397 }
5398 
hci_user_passkey_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5399 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5400 					struct sk_buff *skb)
5401 {
5402 	struct hci_ev_user_passkey_notify *ev = data;
5403 	struct hci_conn *conn;
5404 
5405 	bt_dev_dbg(hdev, "");
5406 
5407 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5408 	if (!conn)
5409 		return;
5410 
5411 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5412 	conn->passkey_entered = 0;
5413 
5414 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5415 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5416 					 conn->dst_type, conn->passkey_notify,
5417 					 conn->passkey_entered);
5418 }
5419 
hci_keypress_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5420 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5421 				    struct sk_buff *skb)
5422 {
5423 	struct hci_ev_keypress_notify *ev = data;
5424 	struct hci_conn *conn;
5425 
5426 	bt_dev_dbg(hdev, "");
5427 
5428 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5429 	if (!conn)
5430 		return;
5431 
5432 	switch (ev->type) {
5433 	case HCI_KEYPRESS_STARTED:
5434 		conn->passkey_entered = 0;
5435 		return;
5436 
5437 	case HCI_KEYPRESS_ENTERED:
5438 		conn->passkey_entered++;
5439 		break;
5440 
5441 	case HCI_KEYPRESS_ERASED:
5442 		conn->passkey_entered--;
5443 		break;
5444 
5445 	case HCI_KEYPRESS_CLEARED:
5446 		conn->passkey_entered = 0;
5447 		break;
5448 
5449 	case HCI_KEYPRESS_COMPLETED:
5450 		return;
5451 	}
5452 
5453 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5454 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5455 					 conn->dst_type, conn->passkey_notify,
5456 					 conn->passkey_entered);
5457 }
5458 
hci_simple_pair_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5459 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5460 					 struct sk_buff *skb)
5461 {
5462 	struct hci_ev_simple_pair_complete *ev = data;
5463 	struct hci_conn *conn;
5464 
5465 	bt_dev_dbg(hdev, "");
5466 
5467 	hci_dev_lock(hdev);
5468 
5469 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5470 	if (!conn || !hci_conn_ssp_enabled(conn))
5471 		goto unlock;
5472 
5473 	/* Reset the authentication requirement to unknown */
5474 	conn->remote_auth = 0xff;
5475 
5476 	/* To avoid duplicate auth_failed events to user space we check
5477 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5478 	 * initiated the authentication. A traditional auth_complete
5479 	 * event gets always produced as initiator and is also mapped to
5480 	 * the mgmt_auth_failed event */
5481 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5482 		mgmt_auth_failed(conn, ev->status);
5483 
5484 	hci_conn_drop(conn);
5485 
5486 unlock:
5487 	hci_dev_unlock(hdev);
5488 }
5489 
hci_remote_host_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5490 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5491 					 struct sk_buff *skb)
5492 {
5493 	struct hci_ev_remote_host_features *ev = data;
5494 	struct inquiry_entry *ie;
5495 	struct hci_conn *conn;
5496 
5497 	bt_dev_dbg(hdev, "");
5498 
5499 	hci_dev_lock(hdev);
5500 
5501 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5502 	if (conn)
5503 		memcpy(conn->features[1], ev->features, 8);
5504 
5505 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5506 	if (ie)
5507 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5508 
5509 	hci_dev_unlock(hdev);
5510 }
5511 
hci_remote_oob_data_request_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5512 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5513 					    struct sk_buff *skb)
5514 {
5515 	struct hci_ev_remote_oob_data_request *ev = edata;
5516 	struct oob_data *data;
5517 
5518 	bt_dev_dbg(hdev, "");
5519 
5520 	hci_dev_lock(hdev);
5521 
5522 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5523 		goto unlock;
5524 
5525 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5526 	if (!data) {
5527 		struct hci_cp_remote_oob_data_neg_reply cp;
5528 
5529 		bacpy(&cp.bdaddr, &ev->bdaddr);
5530 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5531 			     sizeof(cp), &cp);
5532 		goto unlock;
5533 	}
5534 
5535 	if (bredr_sc_enabled(hdev)) {
5536 		struct hci_cp_remote_oob_ext_data_reply cp;
5537 
5538 		bacpy(&cp.bdaddr, &ev->bdaddr);
5539 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5540 			memset(cp.hash192, 0, sizeof(cp.hash192));
5541 			memset(cp.rand192, 0, sizeof(cp.rand192));
5542 		} else {
5543 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5544 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5545 		}
5546 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5547 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5548 
5549 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5550 			     sizeof(cp), &cp);
5551 	} else {
5552 		struct hci_cp_remote_oob_data_reply cp;
5553 
5554 		bacpy(&cp.bdaddr, &ev->bdaddr);
5555 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5556 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5557 
5558 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5559 			     sizeof(cp), &cp);
5560 	}
5561 
5562 unlock:
5563 	hci_dev_unlock(hdev);
5564 }
5565 
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5566 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5567 				u8 bdaddr_type, bdaddr_t *local_rpa)
5568 {
5569 	if (conn->out) {
5570 		conn->dst_type = bdaddr_type;
5571 		conn->resp_addr_type = bdaddr_type;
5572 		bacpy(&conn->resp_addr, bdaddr);
5573 
5574 		/* Check if the controller has set a Local RPA then it must be
5575 		 * used instead or hdev->rpa.
5576 		 */
5577 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5578 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5579 			bacpy(&conn->init_addr, local_rpa);
5580 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5581 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5582 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5583 		} else {
5584 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5585 						  &conn->init_addr_type);
5586 		}
5587 	} else {
5588 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5589 		/* Check if the controller has set a Local RPA then it must be
5590 		 * used instead or hdev->rpa.
5591 		 */
5592 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5593 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5594 			bacpy(&conn->resp_addr, local_rpa);
5595 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5596 			/* In case of ext adv, resp_addr will be updated in
5597 			 * Adv Terminated event.
5598 			 */
5599 			if (!ext_adv_capable(conn->hdev))
5600 				bacpy(&conn->resp_addr,
5601 				      &conn->hdev->random_addr);
5602 		} else {
5603 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5604 		}
5605 
5606 		conn->init_addr_type = bdaddr_type;
5607 		bacpy(&conn->init_addr, bdaddr);
5608 
5609 		/* For incoming connections, set the default minimum
5610 		 * and maximum connection interval. They will be used
5611 		 * to check if the parameters are in range and if not
5612 		 * trigger the connection update procedure.
5613 		 */
5614 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5615 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5616 	}
5617 }
5618 
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5619 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5620 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5621 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5622 				 u16 interval, u16 latency,
5623 				 u16 supervision_timeout)
5624 {
5625 	struct hci_conn_params *params;
5626 	struct hci_conn *conn;
5627 	struct smp_irk *irk;
5628 	u8 addr_type;
5629 
5630 	hci_dev_lock(hdev);
5631 
5632 	/* All controllers implicitly stop advertising in the event of a
5633 	 * connection, so ensure that the state bit is cleared.
5634 	 */
5635 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5636 
5637 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5638 	if (!conn) {
5639 		/* In case of error status and there is no connection pending
5640 		 * just unlock as there is nothing to cleanup.
5641 		 */
5642 		if (status)
5643 			goto unlock;
5644 
5645 		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5646 		if (IS_ERR(conn)) {
5647 			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5648 			goto unlock;
5649 		}
5650 
5651 		conn->dst_type = bdaddr_type;
5652 
5653 		/* If we didn't have a hci_conn object previously
5654 		 * but we're in central role this must be something
5655 		 * initiated using an accept list. Since accept list based
5656 		 * connections are not "first class citizens" we don't
5657 		 * have full tracking of them. Therefore, we go ahead
5658 		 * with a "best effort" approach of determining the
5659 		 * initiator address based on the HCI_PRIVACY flag.
5660 		 */
5661 		if (conn->out) {
5662 			conn->resp_addr_type = bdaddr_type;
5663 			bacpy(&conn->resp_addr, bdaddr);
5664 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5665 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5666 				bacpy(&conn->init_addr, &hdev->rpa);
5667 			} else {
5668 				hci_copy_identity_address(hdev,
5669 							  &conn->init_addr,
5670 							  &conn->init_addr_type);
5671 			}
5672 		}
5673 	} else {
5674 		cancel_delayed_work(&conn->le_conn_timeout);
5675 	}
5676 
5677 	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5678 	 * Processing it more than once per connection can corrupt kernel memory.
5679 	 *
5680 	 * As the connection handle is set here for the first time, it indicates
5681 	 * whether the connection is already set up.
5682 	 */
5683 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5684 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5685 		goto unlock;
5686 	}
5687 
5688 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5689 
5690 	/* Lookup the identity address from the stored connection
5691 	 * address and address type.
5692 	 *
5693 	 * When establishing connections to an identity address, the
5694 	 * connection procedure will store the resolvable random
5695 	 * address first. Now if it can be converted back into the
5696 	 * identity address, start using the identity address from
5697 	 * now on.
5698 	 */
5699 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5700 	if (irk) {
5701 		bacpy(&conn->dst, &irk->bdaddr);
5702 		conn->dst_type = irk->addr_type;
5703 	}
5704 
5705 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5706 
5707 	/* All connection failure handling is taken care of by the
5708 	 * hci_conn_failed function which is triggered by the HCI
5709 	 * request completion callbacks used for connecting.
5710 	 */
5711 	if (status || hci_conn_set_handle(conn, handle))
5712 		goto unlock;
5713 
5714 	/* Drop the connection if it has been aborted */
5715 	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5716 		hci_conn_drop(conn);
5717 		goto unlock;
5718 	}
5719 
5720 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5721 		addr_type = BDADDR_LE_PUBLIC;
5722 	else
5723 		addr_type = BDADDR_LE_RANDOM;
5724 
5725 	/* Drop the connection if the device is blocked */
5726 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5727 		hci_conn_drop(conn);
5728 		goto unlock;
5729 	}
5730 
5731 	mgmt_device_connected(hdev, conn, NULL, 0);
5732 
5733 	conn->sec_level = BT_SECURITY_LOW;
5734 	conn->state = BT_CONFIG;
5735 
5736 	/* Store current advertising instance as connection advertising instance
5737 	 * when software rotation is in use so it can be re-enabled when
5738 	 * disconnected.
5739 	 */
5740 	if (!ext_adv_capable(hdev))
5741 		conn->adv_instance = hdev->cur_adv_instance;
5742 
5743 	conn->le_conn_interval = interval;
5744 	conn->le_conn_latency = latency;
5745 	conn->le_supv_timeout = supervision_timeout;
5746 
5747 	hci_debugfs_create_conn(conn);
5748 	hci_conn_add_sysfs(conn);
5749 
5750 	/* The remote features procedure is defined for central
5751 	 * role only. So only in case of an initiated connection
5752 	 * request the remote features.
5753 	 *
5754 	 * If the local controller supports peripheral-initiated features
5755 	 * exchange, then requesting the remote features in peripheral
5756 	 * role is possible. Otherwise just transition into the
5757 	 * connected state without requesting the remote features.
5758 	 */
5759 	if (conn->out ||
5760 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5761 		struct hci_cp_le_read_remote_features cp;
5762 
5763 		cp.handle = __cpu_to_le16(conn->handle);
5764 
5765 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5766 			     sizeof(cp), &cp);
5767 
5768 		hci_conn_hold(conn);
5769 	} else {
5770 		conn->state = BT_CONNECTED;
5771 		hci_connect_cfm(conn, status);
5772 	}
5773 
5774 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5775 					   conn->dst_type);
5776 	if (params) {
5777 		hci_pend_le_list_del_init(params);
5778 		if (params->conn) {
5779 			hci_conn_drop(params->conn);
5780 			hci_conn_put(params->conn);
5781 			params->conn = NULL;
5782 		}
5783 	}
5784 
5785 unlock:
5786 	hci_update_passive_scan(hdev);
5787 	hci_dev_unlock(hdev);
5788 }
5789 
hci_le_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5790 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5791 				     struct sk_buff *skb)
5792 {
5793 	struct hci_ev_le_conn_complete *ev = data;
5794 
5795 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5796 
5797 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5798 			     NULL, ev->role, le16_to_cpu(ev->handle),
5799 			     le16_to_cpu(ev->interval),
5800 			     le16_to_cpu(ev->latency),
5801 			     le16_to_cpu(ev->supervision_timeout));
5802 }
5803 
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5804 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5805 					 struct sk_buff *skb)
5806 {
5807 	struct hci_ev_le_enh_conn_complete *ev = data;
5808 
5809 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5810 
5811 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5812 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5813 			     le16_to_cpu(ev->interval),
5814 			     le16_to_cpu(ev->latency),
5815 			     le16_to_cpu(ev->supervision_timeout));
5816 }
5817 
hci_le_ext_adv_term_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5818 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5819 				    struct sk_buff *skb)
5820 {
5821 	struct hci_evt_le_ext_adv_set_term *ev = data;
5822 	struct hci_conn *conn;
5823 	struct adv_info *adv, *n;
5824 
5825 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5826 
5827 	/* The Bluetooth Core 5.3 specification clearly states that this event
5828 	 * shall not be sent when the Host disables the advertising set. So in
5829 	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5830 	 *
5831 	 * When the Host disables an advertising set, all cleanup is done via
5832 	 * its command callback and not needed to be duplicated here.
5833 	 */
5834 	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5835 		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5836 		return;
5837 	}
5838 
5839 	hci_dev_lock(hdev);
5840 
5841 	adv = hci_find_adv_instance(hdev, ev->handle);
5842 
5843 	if (ev->status) {
5844 		if (!adv)
5845 			goto unlock;
5846 
5847 		/* Remove advertising as it has been terminated */
5848 		hci_remove_adv_instance(hdev, ev->handle);
5849 		mgmt_advertising_removed(NULL, hdev, ev->handle);
5850 
5851 		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5852 			if (adv->enabled)
5853 				goto unlock;
5854 		}
5855 
5856 		/* We are no longer advertising, clear HCI_LE_ADV */
5857 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
5858 		goto unlock;
5859 	}
5860 
5861 	if (adv)
5862 		adv->enabled = false;
5863 
5864 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5865 	if (conn) {
5866 		/* Store handle in the connection so the correct advertising
5867 		 * instance can be re-enabled when disconnected.
5868 		 */
5869 		conn->adv_instance = ev->handle;
5870 
5871 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5872 		    bacmp(&conn->resp_addr, BDADDR_ANY))
5873 			goto unlock;
5874 
5875 		if (!ev->handle) {
5876 			bacpy(&conn->resp_addr, &hdev->random_addr);
5877 			goto unlock;
5878 		}
5879 
5880 		if (adv)
5881 			bacpy(&conn->resp_addr, &adv->random_addr);
5882 	}
5883 
5884 unlock:
5885 	hci_dev_unlock(hdev);
5886 }
5887 
hci_le_conn_update_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5888 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5889 					    struct sk_buff *skb)
5890 {
5891 	struct hci_ev_le_conn_update_complete *ev = data;
5892 	struct hci_conn *conn;
5893 
5894 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5895 
5896 	if (ev->status)
5897 		return;
5898 
5899 	hci_dev_lock(hdev);
5900 
5901 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5902 	if (conn) {
5903 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5904 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5905 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5906 	}
5907 
5908 	hci_dev_unlock(hdev);
5909 }
5910 
5911 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,bool addr_resolved,u8 adv_type,u8 phy,u8 sec_phy)5912 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5913 					      bdaddr_t *addr,
5914 					      u8 addr_type, bool addr_resolved,
5915 					      u8 adv_type, u8 phy, u8 sec_phy)
5916 {
5917 	struct hci_conn *conn;
5918 	struct hci_conn_params *params;
5919 
5920 	/* If the event is not connectable don't proceed further */
5921 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5922 		return NULL;
5923 
5924 	/* Ignore if the device is blocked or hdev is suspended */
5925 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
5926 	    hdev->suspended)
5927 		return NULL;
5928 
5929 	/* Most controller will fail if we try to create new connections
5930 	 * while we have an existing one in peripheral role.
5931 	 */
5932 	if (hdev->conn_hash.le_num_peripheral > 0 &&
5933 	    (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) ||
5934 	     !(hdev->le_states[3] & 0x10)))
5935 		return NULL;
5936 
5937 	/* If we're not connectable only connect devices that we have in
5938 	 * our pend_le_conns list.
5939 	 */
5940 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5941 					   addr_type);
5942 	if (!params)
5943 		return NULL;
5944 
5945 	if (!params->explicit_connect) {
5946 		switch (params->auto_connect) {
5947 		case HCI_AUTO_CONN_DIRECT:
5948 			/* Only devices advertising with ADV_DIRECT_IND are
5949 			 * triggering a connection attempt. This is allowing
5950 			 * incoming connections from peripheral devices.
5951 			 */
5952 			if (adv_type != LE_ADV_DIRECT_IND)
5953 				return NULL;
5954 			break;
5955 		case HCI_AUTO_CONN_ALWAYS:
5956 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5957 			 * are triggering a connection attempt. This means
5958 			 * that incoming connections from peripheral device are
5959 			 * accepted and also outgoing connections to peripheral
5960 			 * devices are established when found.
5961 			 */
5962 			break;
5963 		default:
5964 			return NULL;
5965 		}
5966 	}
5967 
5968 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5969 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5970 			      HCI_ROLE_MASTER, phy, sec_phy);
5971 	if (!IS_ERR(conn)) {
5972 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5973 		 * by higher layer that tried to connect, if no then
5974 		 * store the pointer since we don't really have any
5975 		 * other owner of the object besides the params that
5976 		 * triggered it. This way we can abort the connection if
5977 		 * the parameters get removed and keep the reference
5978 		 * count consistent once the connection is established.
5979 		 */
5980 
5981 		if (!params->explicit_connect)
5982 			params->conn = hci_conn_get(conn);
5983 
5984 		return conn;
5985 	}
5986 
5987 	switch (PTR_ERR(conn)) {
5988 	case -EBUSY:
5989 		/* If hci_connect() returns -EBUSY it means there is already
5990 		 * an LE connection attempt going on. Since controllers don't
5991 		 * support more than one connection attempt at the time, we
5992 		 * don't consider this an error case.
5993 		 */
5994 		break;
5995 	default:
5996 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5997 		return NULL;
5998 	}
5999 
6000 	return NULL;
6001 }
6002 
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,u8 phy,u8 sec_phy,s8 rssi,u8 * data,u8 len,bool ext_adv,bool ctl_time,u64 instant)6003 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6004 			       u8 bdaddr_type, bdaddr_t *direct_addr,
6005 			       u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
6006 			       u8 *data, u8 len, bool ext_adv, bool ctl_time,
6007 			       u64 instant)
6008 {
6009 	struct discovery_state *d = &hdev->discovery;
6010 	struct smp_irk *irk;
6011 	struct hci_conn *conn;
6012 	bool match, bdaddr_resolved;
6013 	u32 flags;
6014 	u8 *ptr;
6015 
6016 	switch (type) {
6017 	case LE_ADV_IND:
6018 	case LE_ADV_DIRECT_IND:
6019 	case LE_ADV_SCAN_IND:
6020 	case LE_ADV_NONCONN_IND:
6021 	case LE_ADV_SCAN_RSP:
6022 		break;
6023 	default:
6024 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6025 				       "type: 0x%02x", type);
6026 		return;
6027 	}
6028 
6029 	if (len > max_adv_len(hdev)) {
6030 		bt_dev_err_ratelimited(hdev,
6031 				       "adv larger than maximum supported");
6032 		return;
6033 	}
6034 
6035 	/* Find the end of the data in case the report contains padded zero
6036 	 * bytes at the end causing an invalid length value.
6037 	 *
6038 	 * When data is NULL, len is 0 so there is no need for extra ptr
6039 	 * check as 'ptr < data + 0' is already false in such case.
6040 	 */
6041 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6042 		if (ptr + 1 + *ptr > data + len)
6043 			break;
6044 	}
6045 
6046 	/* Adjust for actual length. This handles the case when remote
6047 	 * device is advertising with incorrect data length.
6048 	 */
6049 	len = ptr - data;
6050 
6051 	/* If the direct address is present, then this report is from
6052 	 * a LE Direct Advertising Report event. In that case it is
6053 	 * important to see if the address is matching the local
6054 	 * controller address.
6055 	 *
6056 	 * If local privacy is not enable the controller shall not be
6057 	 * generating such event since according to its documentation it is only
6058 	 * valid for filter_policy 0x02 and 0x03, but the fact that it did
6059 	 * generate LE Direct Advertising Report means it is probably broken and
6060 	 * won't generate any other event which can potentially break
6061 	 * auto-connect logic so in case local privacy is not enable this
6062 	 * ignores the direct_addr so it works as a regular report.
6063 	 */
6064 	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr &&
6065 	    hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6066 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6067 						  &bdaddr_resolved);
6068 
6069 		/* Only resolvable random addresses are valid for these
6070 		 * kind of reports and others can be ignored.
6071 		 */
6072 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6073 			return;
6074 
6075 		/* If the local IRK of the controller does not match
6076 		 * with the resolvable random address provided, then
6077 		 * this report can be ignored.
6078 		 */
6079 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6080 			return;
6081 	}
6082 
6083 	/* Check if we need to convert to identity address */
6084 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6085 	if (irk) {
6086 		bdaddr = &irk->bdaddr;
6087 		bdaddr_type = irk->addr_type;
6088 	}
6089 
6090 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6091 
6092 	/* Check if we have been requested to connect to this device.
6093 	 *
6094 	 * direct_addr is set only for directed advertising reports (it is NULL
6095 	 * for advertising reports) and is already verified to be RPA above.
6096 	 */
6097 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6098 				     type, phy, sec_phy);
6099 	if (!ext_adv && conn && type == LE_ADV_IND &&
6100 	    len <= max_adv_len(hdev)) {
6101 		/* Store report for later inclusion by
6102 		 * mgmt_device_connected
6103 		 */
6104 		memcpy(conn->le_adv_data, data, len);
6105 		conn->le_adv_data_len = len;
6106 	}
6107 
6108 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6109 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6110 	else
6111 		flags = 0;
6112 
6113 	/* All scan results should be sent up for Mesh systems */
6114 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6115 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6116 				  rssi, flags, data, len, NULL, 0, instant);
6117 		return;
6118 	}
6119 
6120 	/* Passive scanning shouldn't trigger any device found events,
6121 	 * except for devices marked as CONN_REPORT for which we do send
6122 	 * device found events, or advertisement monitoring requested.
6123 	 */
6124 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6125 		if (type == LE_ADV_DIRECT_IND)
6126 			return;
6127 
6128 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6129 					       bdaddr, bdaddr_type) &&
6130 		    idr_is_empty(&hdev->adv_monitors_idr))
6131 			return;
6132 
6133 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6134 				  rssi, flags, data, len, NULL, 0, 0);
6135 		return;
6136 	}
6137 
6138 	/* When receiving a scan response, then there is no way to
6139 	 * know if the remote device is connectable or not. However
6140 	 * since scan responses are merged with a previously seen
6141 	 * advertising report, the flags field from that report
6142 	 * will be used.
6143 	 *
6144 	 * In the unlikely case that a controller just sends a scan
6145 	 * response event that doesn't match the pending report, then
6146 	 * it is marked as a standalone SCAN_RSP.
6147 	 */
6148 	if (type == LE_ADV_SCAN_RSP)
6149 		flags = MGMT_DEV_FOUND_SCAN_RSP;
6150 
6151 	/* If there's nothing pending either store the data from this
6152 	 * event or send an immediate device found event if the data
6153 	 * should not be stored for later.
6154 	 */
6155 	if (!has_pending_adv_report(hdev)) {
6156 		/* If the report will trigger a SCAN_REQ store it for
6157 		 * later merging.
6158 		 */
6159 		if (!ext_adv && (type == LE_ADV_IND ||
6160 				 type == LE_ADV_SCAN_IND)) {
6161 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6162 						 rssi, flags, data, len);
6163 			return;
6164 		}
6165 
6166 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6167 				  rssi, flags, data, len, NULL, 0, 0);
6168 		return;
6169 	}
6170 
6171 	/* Check if the pending report is for the same device as the new one */
6172 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6173 		 bdaddr_type == d->last_adv_addr_type);
6174 
6175 	/* If the pending data doesn't match this report or this isn't a
6176 	 * scan response (e.g. we got a duplicate ADV_IND) then force
6177 	 * sending of the pending data.
6178 	 */
6179 	if (type != LE_ADV_SCAN_RSP || !match) {
6180 		/* Send out whatever is in the cache, but skip duplicates */
6181 		if (!match)
6182 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6183 					  d->last_adv_addr_type, NULL,
6184 					  d->last_adv_rssi, d->last_adv_flags,
6185 					  d->last_adv_data,
6186 					  d->last_adv_data_len, NULL, 0, 0);
6187 
6188 		/* If the new report will trigger a SCAN_REQ store it for
6189 		 * later merging.
6190 		 */
6191 		if (!ext_adv && (type == LE_ADV_IND ||
6192 				 type == LE_ADV_SCAN_IND)) {
6193 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6194 						 rssi, flags, data, len);
6195 			return;
6196 		}
6197 
6198 		/* The advertising reports cannot be merged, so clear
6199 		 * the pending report and send out a device found event.
6200 		 */
6201 		clear_pending_adv_report(hdev);
6202 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6203 				  rssi, flags, data, len, NULL, 0, 0);
6204 		return;
6205 	}
6206 
6207 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6208 	 * the new event is a SCAN_RSP. We can therefore proceed with
6209 	 * sending a merged device found event.
6210 	 */
6211 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6212 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6213 			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6214 	clear_pending_adv_report(hdev);
6215 }
6216 
hci_le_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6217 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6218 				  struct sk_buff *skb)
6219 {
6220 	struct hci_ev_le_advertising_report *ev = data;
6221 	u64 instant = jiffies;
6222 
6223 	if (!ev->num)
6224 		return;
6225 
6226 	hci_dev_lock(hdev);
6227 
6228 	while (ev->num--) {
6229 		struct hci_ev_le_advertising_info *info;
6230 		s8 rssi;
6231 
6232 		info = hci_le_ev_skb_pull(hdev, skb,
6233 					  HCI_EV_LE_ADVERTISING_REPORT,
6234 					  sizeof(*info));
6235 		if (!info)
6236 			break;
6237 
6238 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6239 					info->length + 1))
6240 			break;
6241 
6242 		if (info->length <= max_adv_len(hdev)) {
6243 			rssi = info->data[info->length];
6244 			process_adv_report(hdev, info->type, &info->bdaddr,
6245 					   info->bdaddr_type, NULL, 0,
6246 					   HCI_ADV_PHY_1M, 0, rssi,
6247 					   info->data, info->length, false,
6248 					   false, instant);
6249 		} else {
6250 			bt_dev_err(hdev, "Dropping invalid advertising data");
6251 		}
6252 	}
6253 
6254 	hci_dev_unlock(hdev);
6255 }
6256 
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)6257 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6258 {
6259 	u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK;
6260 
6261 	if (!pdu_type)
6262 		return LE_ADV_NONCONN_IND;
6263 
6264 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6265 		switch (evt_type) {
6266 		case LE_LEGACY_ADV_IND:
6267 			return LE_ADV_IND;
6268 		case LE_LEGACY_ADV_DIRECT_IND:
6269 			return LE_ADV_DIRECT_IND;
6270 		case LE_LEGACY_ADV_SCAN_IND:
6271 			return LE_ADV_SCAN_IND;
6272 		case LE_LEGACY_NONCONN_IND:
6273 			return LE_ADV_NONCONN_IND;
6274 		case LE_LEGACY_SCAN_RSP_ADV:
6275 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6276 			return LE_ADV_SCAN_RSP;
6277 		}
6278 
6279 		goto invalid;
6280 	}
6281 
6282 	if (evt_type & LE_EXT_ADV_CONN_IND) {
6283 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6284 			return LE_ADV_DIRECT_IND;
6285 
6286 		return LE_ADV_IND;
6287 	}
6288 
6289 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6290 		return LE_ADV_SCAN_RSP;
6291 
6292 	if (evt_type & LE_EXT_ADV_SCAN_IND)
6293 		return LE_ADV_SCAN_IND;
6294 
6295 	if (evt_type & LE_EXT_ADV_DIRECT_IND)
6296 		return LE_ADV_NONCONN_IND;
6297 
6298 invalid:
6299 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6300 			       evt_type);
6301 
6302 	return LE_ADV_INVALID;
6303 }
6304 
hci_le_ext_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6305 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6306 				      struct sk_buff *skb)
6307 {
6308 	struct hci_ev_le_ext_adv_report *ev = data;
6309 	u64 instant = jiffies;
6310 
6311 	if (!ev->num)
6312 		return;
6313 
6314 	hci_dev_lock(hdev);
6315 
6316 	while (ev->num--) {
6317 		struct hci_ev_le_ext_adv_info *info;
6318 		u8 legacy_evt_type;
6319 		u16 evt_type;
6320 
6321 		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6322 					  sizeof(*info));
6323 		if (!info)
6324 			break;
6325 
6326 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6327 					info->length))
6328 			break;
6329 
6330 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6331 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6332 
6333 		if (hci_test_quirk(hdev,
6334 				   HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) {
6335 			info->primary_phy &= 0x1f;
6336 			info->secondary_phy &= 0x1f;
6337 		}
6338 
6339 		/* Check if PA Sync is pending and if the hci_conn SID has not
6340 		 * been set update it.
6341 		 */
6342 		if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
6343 			struct hci_conn *conn;
6344 
6345 			conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6346 			if (conn && conn->sid == HCI_SID_INVALID)
6347 				conn->sid = info->sid;
6348 		}
6349 
6350 		if (legacy_evt_type != LE_ADV_INVALID) {
6351 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6352 					   info->bdaddr_type, NULL, 0,
6353 					   info->primary_phy,
6354 					   info->secondary_phy,
6355 					   info->rssi, info->data, info->length,
6356 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6357 					   false, instant);
6358 		}
6359 	}
6360 
6361 	hci_dev_unlock(hdev);
6362 }
6363 
hci_le_pa_term_sync(struct hci_dev * hdev,__le16 handle)6364 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6365 {
6366 	struct hci_cp_le_pa_term_sync cp;
6367 
6368 	memset(&cp, 0, sizeof(cp));
6369 	cp.handle = handle;
6370 
6371 	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6372 }
6373 
hci_le_pa_sync_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6374 static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data,
6375 					   struct sk_buff *skb)
6376 {
6377 	struct hci_ev_le_pa_sync_established *ev = data;
6378 	int mask = hdev->link_mode;
6379 	__u8 flags = 0;
6380 	struct hci_conn *pa_sync, *conn;
6381 
6382 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6383 
6384 	hci_dev_lock(hdev);
6385 
6386 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6387 
6388 	conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6389 	if (!conn) {
6390 		bt_dev_err(hdev,
6391 			   "Unable to find connection for dst %pMR sid 0x%2.2x",
6392 			   &ev->bdaddr, ev->sid);
6393 		goto unlock;
6394 	}
6395 
6396 	clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
6397 
6398 	conn->sync_handle = le16_to_cpu(ev->handle);
6399 	conn->sid = HCI_SID_INVALID;
6400 
6401 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
6402 				      &flags);
6403 	if (!(mask & HCI_LM_ACCEPT)) {
6404 		hci_le_pa_term_sync(hdev, ev->handle);
6405 		goto unlock;
6406 	}
6407 
6408 	if (!(flags & HCI_PROTO_DEFER))
6409 		goto unlock;
6410 
6411 	/* Add connection to indicate PA sync event */
6412 	pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
6413 				     HCI_ROLE_SLAVE);
6414 
6415 	if (IS_ERR(pa_sync))
6416 		goto unlock;
6417 
6418 	pa_sync->sync_handle = le16_to_cpu(ev->handle);
6419 
6420 	if (ev->status) {
6421 		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6422 
6423 		/* Notify iso layer */
6424 		hci_connect_cfm(pa_sync, ev->status);
6425 	}
6426 
6427 unlock:
6428 	hci_dev_unlock(hdev);
6429 }
6430 
hci_le_per_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6431 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6432 				      struct sk_buff *skb)
6433 {
6434 	struct hci_ev_le_per_adv_report *ev = data;
6435 	int mask = hdev->link_mode;
6436 	__u8 flags = 0;
6437 	struct hci_conn *pa_sync;
6438 
6439 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6440 
6441 	hci_dev_lock(hdev);
6442 
6443 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags);
6444 	if (!(mask & HCI_LM_ACCEPT))
6445 		goto unlock;
6446 
6447 	if (!(flags & HCI_PROTO_DEFER))
6448 		goto unlock;
6449 
6450 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
6451 			(hdev,
6452 			le16_to_cpu(ev->sync_handle));
6453 
6454 	if (!pa_sync)
6455 		goto unlock;
6456 
6457 	if (ev->data_status == LE_PA_DATA_COMPLETE &&
6458 	    !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
6459 		/* Notify iso layer */
6460 		hci_connect_cfm(pa_sync, 0);
6461 
6462 		/* Notify MGMT layer */
6463 		mgmt_device_connected(hdev, pa_sync, NULL, 0);
6464 	}
6465 
6466 unlock:
6467 	hci_dev_unlock(hdev);
6468 }
6469 
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6470 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6471 					    struct sk_buff *skb)
6472 {
6473 	struct hci_ev_le_remote_feat_complete *ev = data;
6474 	struct hci_conn *conn;
6475 
6476 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6477 
6478 	hci_dev_lock(hdev);
6479 
6480 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6481 	if (conn) {
6482 		if (!ev->status)
6483 			memcpy(conn->features[0], ev->features, 8);
6484 
6485 		if (conn->state == BT_CONFIG) {
6486 			__u8 status;
6487 
6488 			/* If the local controller supports peripheral-initiated
6489 			 * features exchange, but the remote controller does
6490 			 * not, then it is possible that the error code 0x1a
6491 			 * for unsupported remote feature gets returned.
6492 			 *
6493 			 * In this specific case, allow the connection to
6494 			 * transition into connected state and mark it as
6495 			 * successful.
6496 			 */
6497 			if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6498 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6499 				status = 0x00;
6500 			else
6501 				status = ev->status;
6502 
6503 			conn->state = BT_CONNECTED;
6504 			hci_connect_cfm(conn, status);
6505 			hci_conn_drop(conn);
6506 		}
6507 	}
6508 
6509 	hci_dev_unlock(hdev);
6510 }
6511 
hci_le_ltk_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6512 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6513 				   struct sk_buff *skb)
6514 {
6515 	struct hci_ev_le_ltk_req *ev = data;
6516 	struct hci_cp_le_ltk_reply cp;
6517 	struct hci_cp_le_ltk_neg_reply neg;
6518 	struct hci_conn *conn;
6519 	struct smp_ltk *ltk;
6520 
6521 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6522 
6523 	hci_dev_lock(hdev);
6524 
6525 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6526 	if (conn == NULL)
6527 		goto not_found;
6528 
6529 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6530 	if (!ltk)
6531 		goto not_found;
6532 
6533 	if (smp_ltk_is_sc(ltk)) {
6534 		/* With SC both EDiv and Rand are set to zero */
6535 		if (ev->ediv || ev->rand)
6536 			goto not_found;
6537 	} else {
6538 		/* For non-SC keys check that EDiv and Rand match */
6539 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6540 			goto not_found;
6541 	}
6542 
6543 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6544 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6545 	cp.handle = cpu_to_le16(conn->handle);
6546 
6547 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6548 
6549 	conn->enc_key_size = ltk->enc_size;
6550 
6551 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6552 
6553 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6554 	 * temporary key used to encrypt a connection following
6555 	 * pairing. It is used during the Encrypted Session Setup to
6556 	 * distribute the keys. Later, security can be re-established
6557 	 * using a distributed LTK.
6558 	 */
6559 	if (ltk->type == SMP_STK) {
6560 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6561 		list_del_rcu(&ltk->list);
6562 		kfree_rcu(ltk, rcu);
6563 	} else {
6564 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6565 	}
6566 
6567 	hci_dev_unlock(hdev);
6568 
6569 	return;
6570 
6571 not_found:
6572 	neg.handle = ev->handle;
6573 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6574 	hci_dev_unlock(hdev);
6575 }
6576 
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)6577 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6578 				      u8 reason)
6579 {
6580 	struct hci_cp_le_conn_param_req_neg_reply cp;
6581 
6582 	cp.handle = cpu_to_le16(handle);
6583 	cp.reason = reason;
6584 
6585 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6586 		     &cp);
6587 }
6588 
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6589 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6590 					     struct sk_buff *skb)
6591 {
6592 	struct hci_ev_le_remote_conn_param_req *ev = data;
6593 	struct hci_cp_le_conn_param_req_reply cp;
6594 	struct hci_conn *hcon;
6595 	u16 handle, min, max, latency, timeout;
6596 
6597 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6598 
6599 	handle = le16_to_cpu(ev->handle);
6600 	min = le16_to_cpu(ev->interval_min);
6601 	max = le16_to_cpu(ev->interval_max);
6602 	latency = le16_to_cpu(ev->latency);
6603 	timeout = le16_to_cpu(ev->timeout);
6604 
6605 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6606 	if (!hcon || hcon->state != BT_CONNECTED)
6607 		return send_conn_param_neg_reply(hdev, handle,
6608 						 HCI_ERROR_UNKNOWN_CONN_ID);
6609 
6610 	if (max > hcon->le_conn_max_interval)
6611 		return send_conn_param_neg_reply(hdev, handle,
6612 						 HCI_ERROR_INVALID_LL_PARAMS);
6613 
6614 	if (hci_check_conn_params(min, max, latency, timeout))
6615 		return send_conn_param_neg_reply(hdev, handle,
6616 						 HCI_ERROR_INVALID_LL_PARAMS);
6617 
6618 	if (hcon->role == HCI_ROLE_MASTER) {
6619 		struct hci_conn_params *params;
6620 		u8 store_hint;
6621 
6622 		hci_dev_lock(hdev);
6623 
6624 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6625 						hcon->dst_type);
6626 		if (params) {
6627 			params->conn_min_interval = min;
6628 			params->conn_max_interval = max;
6629 			params->conn_latency = latency;
6630 			params->supervision_timeout = timeout;
6631 			store_hint = 0x01;
6632 		} else {
6633 			store_hint = 0x00;
6634 		}
6635 
6636 		hci_dev_unlock(hdev);
6637 
6638 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6639 				    store_hint, min, max, latency, timeout);
6640 	}
6641 
6642 	cp.handle = ev->handle;
6643 	cp.interval_min = ev->interval_min;
6644 	cp.interval_max = ev->interval_max;
6645 	cp.latency = ev->latency;
6646 	cp.timeout = ev->timeout;
6647 	cp.min_ce_len = 0;
6648 	cp.max_ce_len = 0;
6649 
6650 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6651 }
6652 
hci_le_direct_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6653 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6654 					 struct sk_buff *skb)
6655 {
6656 	struct hci_ev_le_direct_adv_report *ev = data;
6657 	u64 instant = jiffies;
6658 	int i;
6659 
6660 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6661 				flex_array_size(ev, info, ev->num)))
6662 		return;
6663 
6664 	if (!ev->num)
6665 		return;
6666 
6667 	hci_dev_lock(hdev);
6668 
6669 	for (i = 0; i < ev->num; i++) {
6670 		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6671 
6672 		process_adv_report(hdev, info->type, &info->bdaddr,
6673 				   info->bdaddr_type, &info->direct_addr,
6674 				   info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6675 				   info->rssi, NULL, 0, false, false, instant);
6676 	}
6677 
6678 	hci_dev_unlock(hdev);
6679 }
6680 
hci_le_phy_update_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6681 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6682 				  struct sk_buff *skb)
6683 {
6684 	struct hci_ev_le_phy_update_complete *ev = data;
6685 	struct hci_conn *conn;
6686 
6687 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6688 
6689 	if (ev->status)
6690 		return;
6691 
6692 	hci_dev_lock(hdev);
6693 
6694 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6695 	if (!conn)
6696 		goto unlock;
6697 
6698 	conn->le_tx_phy = ev->tx_phy;
6699 	conn->le_rx_phy = ev->rx_phy;
6700 
6701 unlock:
6702 	hci_dev_unlock(hdev);
6703 }
6704 
hci_le_cis_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6705 static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data,
6706 				       struct sk_buff *skb)
6707 {
6708 	struct hci_evt_le_cis_established *ev = data;
6709 	struct hci_conn *conn;
6710 	struct bt_iso_qos *qos;
6711 	bool pending = false;
6712 	u16 handle = __le16_to_cpu(ev->handle);
6713 	u32 c_sdu_interval, p_sdu_interval;
6714 
6715 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6716 
6717 	hci_dev_lock(hdev);
6718 
6719 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6720 	if (!conn) {
6721 		bt_dev_err(hdev,
6722 			   "Unable to find connection with handle 0x%4.4x",
6723 			   handle);
6724 		goto unlock;
6725 	}
6726 
6727 	if (conn->type != CIS_LINK) {
6728 		bt_dev_err(hdev,
6729 			   "Invalid connection link type handle 0x%4.4x",
6730 			   handle);
6731 		goto unlock;
6732 	}
6733 
6734 	qos = &conn->iso_qos;
6735 
6736 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6737 
6738 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
6739 	 * page 3075:
6740 	 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
6741 	 * ISO_Interval + SDU_Interval_C_To_P
6742 	 * ...
6743 	 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
6744 	 *					Transport_Latency
6745 	 */
6746 	c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6747 			 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
6748 			get_unaligned_le24(ev->c_latency);
6749 	p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6750 			 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
6751 			get_unaligned_le24(ev->p_latency);
6752 
6753 	switch (conn->role) {
6754 	case HCI_ROLE_SLAVE:
6755 		qos->ucast.in.interval = c_sdu_interval;
6756 		qos->ucast.out.interval = p_sdu_interval;
6757 		/* Convert Transport Latency (us) to Latency (msec) */
6758 		qos->ucast.in.latency =
6759 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6760 					  1000);
6761 		qos->ucast.out.latency =
6762 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6763 					  1000);
6764 		qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6765 		qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6766 		qos->ucast.in.phy = ev->c_phy;
6767 		qos->ucast.out.phy = ev->p_phy;
6768 		break;
6769 	case HCI_ROLE_MASTER:
6770 		qos->ucast.in.interval = p_sdu_interval;
6771 		qos->ucast.out.interval = c_sdu_interval;
6772 		/* Convert Transport Latency (us) to Latency (msec) */
6773 		qos->ucast.out.latency =
6774 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6775 					  1000);
6776 		qos->ucast.in.latency =
6777 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6778 					  1000);
6779 		qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6780 		qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6781 		qos->ucast.out.phy = ev->c_phy;
6782 		qos->ucast.in.phy = ev->p_phy;
6783 		break;
6784 	}
6785 
6786 	if (!ev->status) {
6787 		conn->state = BT_CONNECTED;
6788 		hci_debugfs_create_conn(conn);
6789 		hci_conn_add_sysfs(conn);
6790 		hci_iso_setup_path(conn);
6791 		goto unlock;
6792 	}
6793 
6794 	conn->state = BT_CLOSED;
6795 	hci_connect_cfm(conn, ev->status);
6796 	hci_conn_del(conn);
6797 
6798 unlock:
6799 	if (pending)
6800 		hci_le_create_cis_pending(hdev);
6801 
6802 	hci_dev_unlock(hdev);
6803 }
6804 
hci_le_reject_cis(struct hci_dev * hdev,__le16 handle)6805 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6806 {
6807 	struct hci_cp_le_reject_cis cp;
6808 
6809 	memset(&cp, 0, sizeof(cp));
6810 	cp.handle = handle;
6811 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6812 	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6813 }
6814 
hci_le_accept_cis(struct hci_dev * hdev,__le16 handle)6815 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6816 {
6817 	struct hci_cp_le_accept_cis cp;
6818 
6819 	memset(&cp, 0, sizeof(cp));
6820 	cp.handle = handle;
6821 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6822 }
6823 
hci_le_cis_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6824 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6825 			       struct sk_buff *skb)
6826 {
6827 	struct hci_evt_le_cis_req *ev = data;
6828 	u16 acl_handle, cis_handle;
6829 	struct hci_conn *acl, *cis;
6830 	int mask;
6831 	__u8 flags = 0;
6832 
6833 	acl_handle = __le16_to_cpu(ev->acl_handle);
6834 	cis_handle = __le16_to_cpu(ev->cis_handle);
6835 
6836 	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6837 		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6838 
6839 	hci_dev_lock(hdev);
6840 
6841 	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6842 	if (!acl)
6843 		goto unlock;
6844 
6845 	mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags);
6846 	if (!(mask & HCI_LM_ACCEPT)) {
6847 		hci_le_reject_cis(hdev, ev->cis_handle);
6848 		goto unlock;
6849 	}
6850 
6851 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6852 	if (!cis) {
6853 		cis = hci_conn_add(hdev, CIS_LINK, &acl->dst,
6854 				   HCI_ROLE_SLAVE, cis_handle);
6855 		if (IS_ERR(cis)) {
6856 			hci_le_reject_cis(hdev, ev->cis_handle);
6857 			goto unlock;
6858 		}
6859 	}
6860 
6861 	cis->iso_qos.ucast.cig = ev->cig_id;
6862 	cis->iso_qos.ucast.cis = ev->cis_id;
6863 
6864 	if (!(flags & HCI_PROTO_DEFER)) {
6865 		hci_le_accept_cis(hdev, ev->cis_handle);
6866 	} else {
6867 		cis->state = BT_CONNECT2;
6868 		hci_connect_cfm(cis, 0);
6869 	}
6870 
6871 unlock:
6872 	hci_dev_unlock(hdev);
6873 }
6874 
hci_iso_term_big_sync(struct hci_dev * hdev,void * data)6875 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6876 {
6877 	u8 handle = PTR_UINT(data);
6878 
6879 	return hci_le_terminate_big_sync(hdev, handle,
6880 					 HCI_ERROR_LOCAL_HOST_TERM);
6881 }
6882 
hci_le_create_big_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6883 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6884 					   struct sk_buff *skb)
6885 {
6886 	struct hci_evt_le_create_big_complete *ev = data;
6887 	struct hci_conn *conn;
6888 	__u8 i = 0;
6889 
6890 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6891 
6892 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6893 				flex_array_size(ev, bis_handle, ev->num_bis)))
6894 		return;
6895 
6896 	hci_dev_lock(hdev);
6897 
6898 	/* Connect all BISes that are bound to the BIG */
6899 	while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
6900 						      BT_BOUND,
6901 						      HCI_ROLE_MASTER))) {
6902 		if (ev->status) {
6903 			hci_connect_cfm(conn, ev->status);
6904 			hci_conn_del(conn);
6905 			continue;
6906 		}
6907 
6908 		if (hci_conn_set_handle(conn,
6909 					__le16_to_cpu(ev->bis_handle[i++])))
6910 			continue;
6911 
6912 		conn->state = BT_CONNECTED;
6913 		set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6914 		hci_debugfs_create_conn(conn);
6915 		hci_conn_add_sysfs(conn);
6916 		hci_iso_setup_path(conn);
6917 	}
6918 
6919 	if (!ev->status && !i)
6920 		/* If no BISes have been connected for the BIG,
6921 		 * terminate. This is in case all bound connections
6922 		 * have been closed before the BIG creation
6923 		 * has completed.
6924 		 */
6925 		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6926 				   UINT_PTR(ev->handle), NULL);
6927 
6928 	hci_dev_unlock(hdev);
6929 }
6930 
hci_le_big_sync_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6931 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6932 					    struct sk_buff *skb)
6933 {
6934 	struct hci_evt_le_big_sync_established *ev = data;
6935 	struct hci_conn *bis, *conn;
6936 	int i;
6937 
6938 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6939 
6940 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
6941 				flex_array_size(ev, bis, ev->num_bis)))
6942 		return;
6943 
6944 	hci_dev_lock(hdev);
6945 
6946 	conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
6947 						  ev->num_bis);
6948 	if (!conn) {
6949 		bt_dev_err(hdev,
6950 			   "Unable to find connection for big 0x%2.2x",
6951 			   ev->handle);
6952 		goto unlock;
6953 	}
6954 
6955 	clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
6956 
6957 	conn->num_bis = 0;
6958 	memset(conn->bis, 0, sizeof(conn->num_bis));
6959 
6960 	for (i = 0; i < ev->num_bis; i++) {
6961 		u16 handle = le16_to_cpu(ev->bis[i]);
6962 		__le32 interval;
6963 
6964 		bis = hci_conn_hash_lookup_handle(hdev, handle);
6965 		if (!bis) {
6966 			if (handle > HCI_CONN_HANDLE_MAX) {
6967 				bt_dev_dbg(hdev, "ignore too large handle %u", handle);
6968 				continue;
6969 			}
6970 			bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY,
6971 					   HCI_ROLE_SLAVE, handle);
6972 			if (IS_ERR(bis))
6973 				continue;
6974 		}
6975 
6976 		if (ev->status != 0x42) {
6977 			/* Mark PA sync as established */
6978 			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
6979 			/* Reset cleanup callback of PA Sync so it doesn't
6980 			 * terminate the sync when deleting the connection.
6981 			 */
6982 			conn->cleanup = NULL;
6983 		}
6984 
6985 		bis->sync_handle = conn->sync_handle;
6986 		bis->iso_qos.bcast.big = ev->handle;
6987 		memset(&interval, 0, sizeof(interval));
6988 		memcpy(&interval, ev->latency, sizeof(ev->latency));
6989 		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
6990 		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
6991 		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
6992 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
6993 
6994 		if (!ev->status) {
6995 			bis->state = BT_CONNECTED;
6996 			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
6997 			hci_debugfs_create_conn(bis);
6998 			hci_conn_add_sysfs(bis);
6999 			hci_iso_setup_path(bis);
7000 		}
7001 	}
7002 
7003 	/* In case BIG sync failed, notify each failed connection to
7004 	 * the user after all hci connections have been added
7005 	 */
7006 	if (ev->status)
7007 		for (i = 0; i < ev->num_bis; i++) {
7008 			u16 handle = le16_to_cpu(ev->bis[i]);
7009 
7010 			bis = hci_conn_hash_lookup_handle(hdev, handle);
7011 			if (!bis)
7012 				continue;
7013 
7014 			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7015 			hci_connect_cfm(bis, ev->status);
7016 		}
7017 
7018 unlock:
7019 	hci_dev_unlock(hdev);
7020 }
7021 
hci_le_big_sync_lost_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7022 static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data,
7023 				     struct sk_buff *skb)
7024 {
7025 	struct hci_evt_le_big_sync_lost *ev = data;
7026 	struct hci_conn *bis, *conn;
7027 	bool mgmt_conn;
7028 
7029 	bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle);
7030 
7031 	hci_dev_lock(hdev);
7032 
7033 	/* Delete the pa sync connection */
7034 	bis = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7035 	if (bis) {
7036 		conn = hci_conn_hash_lookup_pa_sync_handle(hdev,
7037 							   bis->sync_handle);
7038 		if (conn)
7039 			hci_conn_del(conn);
7040 	}
7041 
7042 	/* Delete each bis connection */
7043 	while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle,
7044 						     BT_CONNECTED,
7045 						     HCI_ROLE_SLAVE))) {
7046 		mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &bis->flags);
7047 		mgmt_device_disconnected(hdev, &bis->dst, bis->type, bis->dst_type,
7048 					 ev->reason, mgmt_conn);
7049 
7050 		clear_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7051 		hci_disconn_cfm(bis, ev->reason);
7052 		hci_conn_del(bis);
7053 	}
7054 
7055 	hci_dev_unlock(hdev);
7056 }
7057 
hci_le_big_info_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7058 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7059 					   struct sk_buff *skb)
7060 {
7061 	struct hci_evt_le_big_info_adv_report *ev = data;
7062 	int mask = hdev->link_mode;
7063 	__u8 flags = 0;
7064 	struct hci_conn *pa_sync;
7065 
7066 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7067 
7068 	hci_dev_lock(hdev);
7069 
7070 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags);
7071 	if (!(mask & HCI_LM_ACCEPT))
7072 		goto unlock;
7073 
7074 	if (!(flags & HCI_PROTO_DEFER))
7075 		goto unlock;
7076 
7077 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
7078 			(hdev,
7079 			le16_to_cpu(ev->sync_handle));
7080 
7081 	if (!pa_sync)
7082 		goto unlock;
7083 
7084 	pa_sync->iso_qos.bcast.encryption = ev->encryption;
7085 
7086 	/* Notify iso layer */
7087 	hci_connect_cfm(pa_sync, 0);
7088 
7089 unlock:
7090 	hci_dev_unlock(hdev);
7091 }
7092 
7093 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7094 [_op] = { \
7095 	.func = _func, \
7096 	.min_len = _min_len, \
7097 	.max_len = _max_len, \
7098 }
7099 
7100 #define HCI_LE_EV(_op, _func, _len) \
7101 	HCI_LE_EV_VL(_op, _func, _len, _len)
7102 
7103 #define HCI_LE_EV_STATUS(_op, _func) \
7104 	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7105 
7106 /* Entries in this table shall have their position according to the subevent
7107  * opcode they handle so the use of the macros above is recommend since it does
7108  * attempt to initialize at its proper index using Designated Initializers that
7109  * way events without a callback function can be omitted.
7110  */
7111 static const struct hci_le_ev {
7112 	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7113 	u16  min_len;
7114 	u16  max_len;
7115 } hci_le_ev_table[U8_MAX + 1] = {
7116 	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7117 	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7118 		  sizeof(struct hci_ev_le_conn_complete)),
7119 	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7120 	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7121 		     sizeof(struct hci_ev_le_advertising_report),
7122 		     HCI_MAX_EVENT_SIZE),
7123 	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7124 	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7125 		  hci_le_conn_update_complete_evt,
7126 		  sizeof(struct hci_ev_le_conn_update_complete)),
7127 	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7128 	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7129 		  hci_le_remote_feat_complete_evt,
7130 		  sizeof(struct hci_ev_le_remote_feat_complete)),
7131 	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7132 	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7133 		  sizeof(struct hci_ev_le_ltk_req)),
7134 	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7135 	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7136 		  hci_le_remote_conn_param_req_evt,
7137 		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7138 	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7139 	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7140 		  hci_le_enh_conn_complete_evt,
7141 		  sizeof(struct hci_ev_le_enh_conn_complete)),
7142 	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7143 	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7144 		     sizeof(struct hci_ev_le_direct_adv_report),
7145 		     HCI_MAX_EVENT_SIZE),
7146 	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7147 	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7148 		  sizeof(struct hci_ev_le_phy_update_complete)),
7149 	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7150 	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7151 		     sizeof(struct hci_ev_le_ext_adv_report),
7152 		     HCI_MAX_EVENT_SIZE),
7153 	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7154 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7155 		  hci_le_pa_sync_established_evt,
7156 		  sizeof(struct hci_ev_le_pa_sync_established)),
7157 	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7158 	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7159 				 hci_le_per_adv_report_evt,
7160 				 sizeof(struct hci_ev_le_per_adv_report),
7161 				 HCI_MAX_EVENT_SIZE),
7162 	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7163 	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7164 		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7165 	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7166 	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt,
7167 		  sizeof(struct hci_evt_le_cis_established)),
7168 	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7169 	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7170 		  sizeof(struct hci_evt_le_cis_req)),
7171 	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7172 	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7173 		     hci_le_create_big_complete_evt,
7174 		     sizeof(struct hci_evt_le_create_big_complete),
7175 		     HCI_MAX_EVENT_SIZE),
7176 	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
7177 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7178 		     hci_le_big_sync_established_evt,
7179 		     sizeof(struct hci_evt_le_big_sync_established),
7180 		     HCI_MAX_EVENT_SIZE),
7181 	/* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */
7182 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST,
7183 		     hci_le_big_sync_lost_evt,
7184 		     sizeof(struct hci_evt_le_big_sync_lost),
7185 		     HCI_MAX_EVENT_SIZE),
7186 	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7187 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7188 		     hci_le_big_info_adv_report_evt,
7189 		     sizeof(struct hci_evt_le_big_info_adv_report),
7190 		     HCI_MAX_EVENT_SIZE),
7191 };
7192 
hci_le_meta_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7193 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7194 			    struct sk_buff *skb, u16 *opcode, u8 *status,
7195 			    hci_req_complete_t *req_complete,
7196 			    hci_req_complete_skb_t *req_complete_skb)
7197 {
7198 	struct hci_ev_le_meta *ev = data;
7199 	const struct hci_le_ev *subev;
7200 
7201 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7202 
7203 	/* Only match event if command OGF is for LE */
7204 	if (hdev->req_skb &&
7205 	   (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 ||
7206 	    hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) &&
7207 	    hci_skb_event(hdev->req_skb) == ev->subevent) {
7208 		*opcode = hci_skb_opcode(hdev->req_skb);
7209 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7210 				     req_complete_skb);
7211 	}
7212 
7213 	subev = &hci_le_ev_table[ev->subevent];
7214 	if (!subev->func)
7215 		return;
7216 
7217 	if (skb->len < subev->min_len) {
7218 		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7219 			   ev->subevent, skb->len, subev->min_len);
7220 		return;
7221 	}
7222 
7223 	/* Just warn if the length is over max_len size it still be
7224 	 * possible to partially parse the event so leave to callback to
7225 	 * decide if that is acceptable.
7226 	 */
7227 	if (skb->len > subev->max_len)
7228 		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7229 			    ev->subevent, skb->len, subev->max_len);
7230 	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7231 	if (!data)
7232 		return;
7233 
7234 	subev->func(hdev, data, skb);
7235 }
7236 
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)7237 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7238 				 u8 event, struct sk_buff *skb)
7239 {
7240 	struct hci_ev_cmd_complete *ev;
7241 	struct hci_event_hdr *hdr;
7242 
7243 	if (!skb)
7244 		return false;
7245 
7246 	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7247 	if (!hdr)
7248 		return false;
7249 
7250 	if (event) {
7251 		if (hdr->evt != event)
7252 			return false;
7253 		return true;
7254 	}
7255 
7256 	/* Check if request ended in Command Status - no way to retrieve
7257 	 * any extra parameters in this case.
7258 	 */
7259 	if (hdr->evt == HCI_EV_CMD_STATUS)
7260 		return false;
7261 
7262 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7263 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7264 			   hdr->evt);
7265 		return false;
7266 	}
7267 
7268 	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7269 	if (!ev)
7270 		return false;
7271 
7272 	if (opcode != __le16_to_cpu(ev->opcode)) {
7273 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7274 		       __le16_to_cpu(ev->opcode));
7275 		return false;
7276 	}
7277 
7278 	return true;
7279 }
7280 
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)7281 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7282 				  struct sk_buff *skb)
7283 {
7284 	struct hci_ev_le_advertising_info *adv;
7285 	struct hci_ev_le_direct_adv_info *direct_adv;
7286 	struct hci_ev_le_ext_adv_info *ext_adv;
7287 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7288 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7289 
7290 	hci_dev_lock(hdev);
7291 
7292 	/* If we are currently suspended and this is the first BT event seen,
7293 	 * save the wake reason associated with the event.
7294 	 */
7295 	if (!hdev->suspended || hdev->wake_reason)
7296 		goto unlock;
7297 
7298 	/* Default to remote wake. Values for wake_reason are documented in the
7299 	 * Bluez mgmt api docs.
7300 	 */
7301 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7302 
7303 	/* Once configured for remote wakeup, we should only wake up for
7304 	 * reconnections. It's useful to see which device is waking us up so
7305 	 * keep track of the bdaddr of the connection event that woke us up.
7306 	 */
7307 	if (event == HCI_EV_CONN_REQUEST) {
7308 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7309 		hdev->wake_addr_type = BDADDR_BREDR;
7310 	} else if (event == HCI_EV_CONN_COMPLETE) {
7311 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7312 		hdev->wake_addr_type = BDADDR_BREDR;
7313 	} else if (event == HCI_EV_LE_META) {
7314 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7315 		u8 subevent = le_ev->subevent;
7316 		u8 *ptr = &skb->data[sizeof(*le_ev)];
7317 		u8 num_reports = *ptr;
7318 
7319 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7320 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7321 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7322 		    num_reports) {
7323 			adv = (void *)(ptr + 1);
7324 			direct_adv = (void *)(ptr + 1);
7325 			ext_adv = (void *)(ptr + 1);
7326 
7327 			switch (subevent) {
7328 			case HCI_EV_LE_ADVERTISING_REPORT:
7329 				bacpy(&hdev->wake_addr, &adv->bdaddr);
7330 				hdev->wake_addr_type = adv->bdaddr_type;
7331 				break;
7332 			case HCI_EV_LE_DIRECT_ADV_REPORT:
7333 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7334 				hdev->wake_addr_type = direct_adv->bdaddr_type;
7335 				break;
7336 			case HCI_EV_LE_EXT_ADV_REPORT:
7337 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7338 				hdev->wake_addr_type = ext_adv->bdaddr_type;
7339 				break;
7340 			}
7341 		}
7342 	} else {
7343 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7344 	}
7345 
7346 unlock:
7347 	hci_dev_unlock(hdev);
7348 }
7349 
7350 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7351 [_op] = { \
7352 	.req = false, \
7353 	.func = _func, \
7354 	.min_len = _min_len, \
7355 	.max_len = _max_len, \
7356 }
7357 
7358 #define HCI_EV(_op, _func, _len) \
7359 	HCI_EV_VL(_op, _func, _len, _len)
7360 
7361 #define HCI_EV_STATUS(_op, _func) \
7362 	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7363 
7364 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7365 [_op] = { \
7366 	.req = true, \
7367 	.func_req = _func, \
7368 	.min_len = _min_len, \
7369 	.max_len = _max_len, \
7370 }
7371 
7372 #define HCI_EV_REQ(_op, _func, _len) \
7373 	HCI_EV_REQ_VL(_op, _func, _len, _len)
7374 
7375 /* Entries in this table shall have their position according to the event opcode
7376  * they handle so the use of the macros above is recommend since it does attempt
7377  * to initialize at its proper index using Designated Initializers that way
7378  * events without a callback function don't have entered.
7379  */
7380 static const struct hci_ev {
7381 	bool req;
7382 	union {
7383 		void (*func)(struct hci_dev *hdev, void *data,
7384 			     struct sk_buff *skb);
7385 		void (*func_req)(struct hci_dev *hdev, void *data,
7386 				 struct sk_buff *skb, u16 *opcode, u8 *status,
7387 				 hci_req_complete_t *req_complete,
7388 				 hci_req_complete_skb_t *req_complete_skb);
7389 	};
7390 	u16  min_len;
7391 	u16  max_len;
7392 } hci_ev_table[U8_MAX + 1] = {
7393 	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7394 	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7395 	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7396 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7397 		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7398 	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7399 	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7400 	       sizeof(struct hci_ev_conn_complete)),
7401 	/* [0x04 = HCI_EV_CONN_REQUEST] */
7402 	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7403 	       sizeof(struct hci_ev_conn_request)),
7404 	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7405 	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7406 	       sizeof(struct hci_ev_disconn_complete)),
7407 	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7408 	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7409 	       sizeof(struct hci_ev_auth_complete)),
7410 	/* [0x07 = HCI_EV_REMOTE_NAME] */
7411 	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7412 	       sizeof(struct hci_ev_remote_name)),
7413 	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7414 	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7415 	       sizeof(struct hci_ev_encrypt_change)),
7416 	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7417 	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7418 	       hci_change_link_key_complete_evt,
7419 	       sizeof(struct hci_ev_change_link_key_complete)),
7420 	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7421 	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7422 	       sizeof(struct hci_ev_remote_features)),
7423 	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7424 	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7425 		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7426 	/* [0x0f = HCI_EV_CMD_STATUS] */
7427 	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7428 		   sizeof(struct hci_ev_cmd_status)),
7429 	/* [0x10 = HCI_EV_CMD_STATUS] */
7430 	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7431 	       sizeof(struct hci_ev_hardware_error)),
7432 	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7433 	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7434 	       sizeof(struct hci_ev_role_change)),
7435 	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7436 	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7437 		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7438 	/* [0x14 = HCI_EV_MODE_CHANGE] */
7439 	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7440 	       sizeof(struct hci_ev_mode_change)),
7441 	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7442 	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7443 	       sizeof(struct hci_ev_pin_code_req)),
7444 	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7445 	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7446 	       sizeof(struct hci_ev_link_key_req)),
7447 	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7448 	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7449 	       sizeof(struct hci_ev_link_key_notify)),
7450 	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7451 	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7452 	       sizeof(struct hci_ev_clock_offset)),
7453 	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7454 	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7455 	       sizeof(struct hci_ev_pkt_type_change)),
7456 	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7457 	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7458 	       sizeof(struct hci_ev_pscan_rep_mode)),
7459 	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7460 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7461 		  hci_inquiry_result_with_rssi_evt,
7462 		  sizeof(struct hci_ev_inquiry_result_rssi),
7463 		  HCI_MAX_EVENT_SIZE),
7464 	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7465 	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7466 	       sizeof(struct hci_ev_remote_ext_features)),
7467 	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7468 	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7469 	       sizeof(struct hci_ev_sync_conn_complete)),
7470 	/* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7471 	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7472 		  hci_extended_inquiry_result_evt,
7473 		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7474 	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7475 	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7476 	       sizeof(struct hci_ev_key_refresh_complete)),
7477 	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7478 	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7479 	       sizeof(struct hci_ev_io_capa_request)),
7480 	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7481 	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7482 	       sizeof(struct hci_ev_io_capa_reply)),
7483 	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7484 	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7485 	       sizeof(struct hci_ev_user_confirm_req)),
7486 	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7487 	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7488 	       sizeof(struct hci_ev_user_passkey_req)),
7489 	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7490 	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7491 	       sizeof(struct hci_ev_remote_oob_data_request)),
7492 	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7493 	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7494 	       sizeof(struct hci_ev_simple_pair_complete)),
7495 	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7496 	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7497 	       sizeof(struct hci_ev_user_passkey_notify)),
7498 	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7499 	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7500 	       sizeof(struct hci_ev_keypress_notify)),
7501 	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7502 	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7503 	       sizeof(struct hci_ev_remote_host_features)),
7504 	/* [0x3e = HCI_EV_LE_META] */
7505 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7506 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7507 	/* [0xff = HCI_EV_VENDOR] */
7508 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7509 };
7510 
hci_event_func(struct hci_dev * hdev,u8 event,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7511 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7512 			   u16 *opcode, u8 *status,
7513 			   hci_req_complete_t *req_complete,
7514 			   hci_req_complete_skb_t *req_complete_skb)
7515 {
7516 	const struct hci_ev *ev = &hci_ev_table[event];
7517 	void *data;
7518 
7519 	if (!ev->func)
7520 		return;
7521 
7522 	if (skb->len < ev->min_len) {
7523 		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7524 			   event, skb->len, ev->min_len);
7525 		return;
7526 	}
7527 
7528 	/* Just warn if the length is over max_len size it still be
7529 	 * possible to partially parse the event so leave to callback to
7530 	 * decide if that is acceptable.
7531 	 */
7532 	if (skb->len > ev->max_len)
7533 		bt_dev_warn_ratelimited(hdev,
7534 					"unexpected event 0x%2.2x length: %u > %u",
7535 					event, skb->len, ev->max_len);
7536 
7537 	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7538 	if (!data)
7539 		return;
7540 
7541 	if (ev->req)
7542 		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7543 			     req_complete_skb);
7544 	else
7545 		ev->func(hdev, data, skb);
7546 }
7547 
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)7548 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7549 {
7550 	struct hci_event_hdr *hdr = (void *) skb->data;
7551 	hci_req_complete_t req_complete = NULL;
7552 	hci_req_complete_skb_t req_complete_skb = NULL;
7553 	struct sk_buff *orig_skb = NULL;
7554 	u8 status = 0, event, req_evt = 0;
7555 	u16 opcode = HCI_OP_NOP;
7556 
7557 	if (skb->len < sizeof(*hdr)) {
7558 		bt_dev_err(hdev, "Malformed HCI Event");
7559 		goto done;
7560 	}
7561 
7562 	hci_dev_lock(hdev);
7563 	kfree_skb(hdev->recv_event);
7564 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7565 	hci_dev_unlock(hdev);
7566 
7567 	event = hdr->evt;
7568 	if (!event) {
7569 		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7570 			    event);
7571 		goto done;
7572 	}
7573 
7574 	/* Only match event if command OGF is not for LE */
7575 	if (hdev->req_skb &&
7576 	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7577 	    hci_skb_event(hdev->req_skb) == event) {
7578 		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7579 				     status, &req_complete, &req_complete_skb);
7580 		req_evt = event;
7581 	}
7582 
7583 	/* If it looks like we might end up having to call
7584 	 * req_complete_skb, store a pristine copy of the skb since the
7585 	 * various handlers may modify the original one through
7586 	 * skb_pull() calls, etc.
7587 	 */
7588 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7589 	    event == HCI_EV_CMD_COMPLETE)
7590 		orig_skb = skb_clone(skb, GFP_KERNEL);
7591 
7592 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7593 
7594 	/* Store wake reason if we're suspended */
7595 	hci_store_wake_reason(hdev, event, skb);
7596 
7597 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7598 
7599 	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7600 		       &req_complete_skb);
7601 
7602 	if (req_complete) {
7603 		req_complete(hdev, status, opcode);
7604 	} else if (req_complete_skb) {
7605 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7606 			kfree_skb(orig_skb);
7607 			orig_skb = NULL;
7608 		}
7609 		req_complete_skb(hdev, status, opcode, orig_skb);
7610 	}
7611 
7612 done:
7613 	kfree_skb(orig_skb);
7614 	kfree_skb(skb);
7615 	hdev->stat.evt_rx++;
7616 }
7617