1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 Copyright 2023-2024 NXP
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI event handling. */
27
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_debugfs.h"
37 #include "hci_codec.h"
38 #include "smp.h"
39 #include "msft.h"
40 #include "eir.h"
41
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 "\x00\x00\x00\x00\x00\x00\x00\x00"
44
45 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
46
47 /* Handle HCI Event packets */
48
hci_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)49 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
50 u8 ev, size_t len)
51 {
52 void *data;
53
54 data = skb_pull_data(skb, len);
55 if (!data)
56 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
57
58 return data;
59 }
60
hci_cc_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u16 op,size_t len)61 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
62 u16 op, size_t len)
63 {
64 void *data;
65
66 data = skb_pull_data(skb, len);
67 if (!data)
68 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
69
70 return data;
71 }
72
hci_le_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)73 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
74 u8 ev, size_t len)
75 {
76 void *data;
77
78 data = skb_pull_data(skb, len);
79 if (!data)
80 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
81
82 return data;
83 }
84
hci_cc_inquiry_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)85 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
86 struct sk_buff *skb)
87 {
88 struct hci_ev_status *rp = data;
89
90 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
91
92 /* It is possible that we receive Inquiry Complete event right
93 * before we receive Inquiry Cancel Command Complete event, in
94 * which case the latter event should have status of Command
95 * Disallowed. This should not be treated as error, since
96 * we actually achieve what Inquiry Cancel wants to achieve,
97 * which is to end the last Inquiry session.
98 */
99 if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
100 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
101 rp->status = 0x00;
102 }
103
104 if (rp->status)
105 return rp->status;
106
107 clear_bit(HCI_INQUIRY, &hdev->flags);
108 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
109 wake_up_bit(&hdev->flags, HCI_INQUIRY);
110
111 hci_dev_lock(hdev);
112 /* Set discovery state to stopped if we're not doing LE active
113 * scanning.
114 */
115 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
116 hdev->le_scan_type != LE_SCAN_ACTIVE)
117 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
118 hci_dev_unlock(hdev);
119
120 return rp->status;
121 }
122
hci_cc_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)123 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
124 struct sk_buff *skb)
125 {
126 struct hci_ev_status *rp = data;
127
128 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
129
130 if (rp->status)
131 return rp->status;
132
133 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
134
135 return rp->status;
136 }
137
hci_cc_exit_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)138 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
139 struct sk_buff *skb)
140 {
141 struct hci_ev_status *rp = data;
142
143 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
144
145 if (rp->status)
146 return rp->status;
147
148 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
149
150 return rp->status;
151 }
152
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)153 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
154 struct sk_buff *skb)
155 {
156 struct hci_ev_status *rp = data;
157
158 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
159
160 return rp->status;
161 }
162
hci_cc_role_discovery(struct hci_dev * hdev,void * data,struct sk_buff * skb)163 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
164 struct sk_buff *skb)
165 {
166 struct hci_rp_role_discovery *rp = data;
167 struct hci_conn *conn;
168
169 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
170
171 if (rp->status)
172 return rp->status;
173
174 hci_dev_lock(hdev);
175
176 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
177 if (conn)
178 conn->role = rp->role;
179
180 hci_dev_unlock(hdev);
181
182 return rp->status;
183 }
184
hci_cc_read_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)185 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
186 struct sk_buff *skb)
187 {
188 struct hci_rp_read_link_policy *rp = data;
189 struct hci_conn *conn;
190
191 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
192
193 if (rp->status)
194 return rp->status;
195
196 hci_dev_lock(hdev);
197
198 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
199 if (conn)
200 conn->link_policy = __le16_to_cpu(rp->policy);
201
202 hci_dev_unlock(hdev);
203
204 return rp->status;
205 }
206
hci_cc_write_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)207 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
208 struct sk_buff *skb)
209 {
210 struct hci_rp_write_link_policy *rp = data;
211 struct hci_conn *conn;
212 void *sent;
213
214 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
215
216 if (rp->status)
217 return rp->status;
218
219 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
220 if (!sent)
221 return rp->status;
222
223 hci_dev_lock(hdev);
224
225 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
226 if (conn)
227 conn->link_policy = get_unaligned_le16(sent + 2);
228
229 hci_dev_unlock(hdev);
230
231 return rp->status;
232 }
233
hci_cc_read_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)234 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
235 struct sk_buff *skb)
236 {
237 struct hci_rp_read_def_link_policy *rp = data;
238
239 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
240
241 if (rp->status)
242 return rp->status;
243
244 hdev->link_policy = __le16_to_cpu(rp->policy);
245
246 return rp->status;
247 }
248
hci_cc_write_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)249 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
250 struct sk_buff *skb)
251 {
252 struct hci_ev_status *rp = data;
253 void *sent;
254
255 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
256
257 if (rp->status)
258 return rp->status;
259
260 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
261 if (!sent)
262 return rp->status;
263
264 hdev->link_policy = get_unaligned_le16(sent);
265
266 return rp->status;
267 }
268
hci_cc_reset(struct hci_dev * hdev,void * data,struct sk_buff * skb)269 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
270 {
271 struct hci_ev_status *rp = data;
272
273 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
274
275 clear_bit(HCI_RESET, &hdev->flags);
276
277 if (rp->status)
278 return rp->status;
279
280 /* Reset all non-persistent flags */
281 hci_dev_clear_volatile_flags(hdev);
282
283 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
284
285 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
286 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
287
288 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
289 hdev->adv_data_len = 0;
290
291 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
292 hdev->scan_rsp_data_len = 0;
293
294 hdev->le_scan_type = LE_SCAN_PASSIVE;
295
296 hdev->ssp_debug_mode = 0;
297
298 hci_bdaddr_list_clear(&hdev->le_accept_list);
299 hci_bdaddr_list_clear(&hdev->le_resolv_list);
300
301 return rp->status;
302 }
303
hci_cc_read_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)304 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
305 struct sk_buff *skb)
306 {
307 struct hci_rp_read_stored_link_key *rp = data;
308 struct hci_cp_read_stored_link_key *sent;
309
310 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
311
312 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
313 if (!sent)
314 return rp->status;
315
316 if (!rp->status && sent->read_all == 0x01) {
317 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
318 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
319 }
320
321 return rp->status;
322 }
323
hci_cc_delete_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)324 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
325 struct sk_buff *skb)
326 {
327 struct hci_rp_delete_stored_link_key *rp = data;
328 u16 num_keys;
329
330 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
331
332 if (rp->status)
333 return rp->status;
334
335 num_keys = le16_to_cpu(rp->num_keys);
336
337 if (num_keys <= hdev->stored_num_keys)
338 hdev->stored_num_keys -= num_keys;
339 else
340 hdev->stored_num_keys = 0;
341
342 return rp->status;
343 }
344
hci_cc_write_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)345 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
346 struct sk_buff *skb)
347 {
348 struct hci_ev_status *rp = data;
349 void *sent;
350
351 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
352
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
354 if (!sent)
355 return rp->status;
356
357 hci_dev_lock(hdev);
358
359 if (hci_dev_test_flag(hdev, HCI_MGMT))
360 mgmt_set_local_name_complete(hdev, sent, rp->status);
361 else if (!rp->status)
362 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
363
364 hci_dev_unlock(hdev);
365
366 return rp->status;
367 }
368
hci_cc_read_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)369 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
370 struct sk_buff *skb)
371 {
372 struct hci_rp_read_local_name *rp = data;
373
374 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
375
376 if (rp->status)
377 return rp->status;
378
379 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
380 hci_dev_test_flag(hdev, HCI_CONFIG))
381 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
382
383 return rp->status;
384 }
385
hci_cc_write_auth_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)386 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
387 struct sk_buff *skb)
388 {
389 struct hci_ev_status *rp = data;
390 void *sent;
391
392 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
393
394 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
395 if (!sent)
396 return rp->status;
397
398 hci_dev_lock(hdev);
399
400 if (!rp->status) {
401 __u8 param = *((__u8 *) sent);
402
403 if (param == AUTH_ENABLED)
404 set_bit(HCI_AUTH, &hdev->flags);
405 else
406 clear_bit(HCI_AUTH, &hdev->flags);
407 }
408
409 if (hci_dev_test_flag(hdev, HCI_MGMT))
410 mgmt_auth_enable_complete(hdev, rp->status);
411
412 hci_dev_unlock(hdev);
413
414 return rp->status;
415 }
416
hci_cc_write_encrypt_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)417 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
418 struct sk_buff *skb)
419 {
420 struct hci_ev_status *rp = data;
421 __u8 param;
422 void *sent;
423
424 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
425
426 if (rp->status)
427 return rp->status;
428
429 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
430 if (!sent)
431 return rp->status;
432
433 param = *((__u8 *) sent);
434
435 if (param)
436 set_bit(HCI_ENCRYPT, &hdev->flags);
437 else
438 clear_bit(HCI_ENCRYPT, &hdev->flags);
439
440 return rp->status;
441 }
442
hci_cc_write_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)443 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
444 struct sk_buff *skb)
445 {
446 struct hci_ev_status *rp = data;
447 __u8 param;
448 void *sent;
449
450 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
451
452 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
453 if (!sent)
454 return rp->status;
455
456 param = *((__u8 *) sent);
457
458 hci_dev_lock(hdev);
459
460 if (rp->status) {
461 hdev->discov_timeout = 0;
462 goto done;
463 }
464
465 if (param & SCAN_INQUIRY)
466 set_bit(HCI_ISCAN, &hdev->flags);
467 else
468 clear_bit(HCI_ISCAN, &hdev->flags);
469
470 if (param & SCAN_PAGE)
471 set_bit(HCI_PSCAN, &hdev->flags);
472 else
473 clear_bit(HCI_PSCAN, &hdev->flags);
474
475 done:
476 hci_dev_unlock(hdev);
477
478 return rp->status;
479 }
480
hci_cc_set_event_filter(struct hci_dev * hdev,void * data,struct sk_buff * skb)481 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
482 struct sk_buff *skb)
483 {
484 struct hci_ev_status *rp = data;
485 struct hci_cp_set_event_filter *cp;
486 void *sent;
487
488 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
489
490 if (rp->status)
491 return rp->status;
492
493 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
494 if (!sent)
495 return rp->status;
496
497 cp = (struct hci_cp_set_event_filter *)sent;
498
499 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
500 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
501 else
502 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
503
504 return rp->status;
505 }
506
hci_cc_read_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)507 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
508 struct sk_buff *skb)
509 {
510 struct hci_rp_read_class_of_dev *rp = data;
511
512 if (WARN_ON(!hdev))
513 return HCI_ERROR_UNSPECIFIED;
514
515 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
516
517 if (rp->status)
518 return rp->status;
519
520 memcpy(hdev->dev_class, rp->dev_class, 3);
521
522 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
523 hdev->dev_class[1], hdev->dev_class[0]);
524
525 return rp->status;
526 }
527
hci_cc_write_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
529 struct sk_buff *skb)
530 {
531 struct hci_ev_status *rp = data;
532 void *sent;
533
534 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
535
536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
537 if (!sent)
538 return rp->status;
539
540 hci_dev_lock(hdev);
541
542 if (!rp->status)
543 memcpy(hdev->dev_class, sent, 3);
544
545 if (hci_dev_test_flag(hdev, HCI_MGMT))
546 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
547
548 hci_dev_unlock(hdev);
549
550 return rp->status;
551 }
552
hci_cc_read_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
554 struct sk_buff *skb)
555 {
556 struct hci_rp_read_voice_setting *rp = data;
557 __u16 setting;
558
559 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
560
561 if (rp->status)
562 return rp->status;
563
564 setting = __le16_to_cpu(rp->voice_setting);
565
566 if (hdev->voice_setting == setting)
567 return rp->status;
568
569 hdev->voice_setting = setting;
570
571 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
572
573 if (hdev->notify)
574 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
575
576 return rp->status;
577 }
578
hci_cc_write_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
580 struct sk_buff *skb)
581 {
582 struct hci_ev_status *rp = data;
583 __u16 setting;
584 void *sent;
585
586 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
587
588 if (rp->status)
589 return rp->status;
590
591 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
592 if (!sent)
593 return rp->status;
594
595 setting = get_unaligned_le16(sent);
596
597 if (hdev->voice_setting == setting)
598 return rp->status;
599
600 hdev->voice_setting = setting;
601
602 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
603
604 if (hdev->notify)
605 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
606
607 return rp->status;
608 }
609
hci_cc_read_num_supported_iac(struct hci_dev * hdev,void * data,struct sk_buff * skb)610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
611 struct sk_buff *skb)
612 {
613 struct hci_rp_read_num_supported_iac *rp = data;
614
615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
616
617 if (rp->status)
618 return rp->status;
619
620 hdev->num_iac = rp->num_iac;
621
622 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
623
624 return rp->status;
625 }
626
hci_cc_write_ssp_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
628 struct sk_buff *skb)
629 {
630 struct hci_ev_status *rp = data;
631 struct hci_cp_write_ssp_mode *sent;
632
633 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
634
635 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
636 if (!sent)
637 return rp->status;
638
639 hci_dev_lock(hdev);
640
641 if (!rp->status) {
642 if (sent->mode)
643 hdev->features[1][0] |= LMP_HOST_SSP;
644 else
645 hdev->features[1][0] &= ~LMP_HOST_SSP;
646 }
647
648 if (!rp->status) {
649 if (sent->mode)
650 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
651 else
652 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
653 }
654
655 hci_dev_unlock(hdev);
656
657 return rp->status;
658 }
659
hci_cc_write_sc_support(struct hci_dev * hdev,void * data,struct sk_buff * skb)660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
661 struct sk_buff *skb)
662 {
663 struct hci_ev_status *rp = data;
664 struct hci_cp_write_sc_support *sent;
665
666 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
667
668 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
669 if (!sent)
670 return rp->status;
671
672 hci_dev_lock(hdev);
673
674 if (!rp->status) {
675 if (sent->support)
676 hdev->features[1][0] |= LMP_HOST_SC;
677 else
678 hdev->features[1][0] &= ~LMP_HOST_SC;
679 }
680
681 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
682 if (sent->support)
683 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
684 else
685 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
686 }
687
688 hci_dev_unlock(hdev);
689
690 return rp->status;
691 }
692
hci_cc_read_local_version(struct hci_dev * hdev,void * data,struct sk_buff * skb)693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
694 struct sk_buff *skb)
695 {
696 struct hci_rp_read_local_version *rp = data;
697
698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
699
700 if (rp->status)
701 return rp->status;
702
703 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
704 hci_dev_test_flag(hdev, HCI_CONFIG)) {
705 hdev->hci_ver = rp->hci_ver;
706 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
707 hdev->lmp_ver = rp->lmp_ver;
708 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
709 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
710 }
711
712 return rp->status;
713 }
714
hci_cc_read_enc_key_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
716 struct sk_buff *skb)
717 {
718 struct hci_rp_read_enc_key_size *rp = data;
719 struct hci_conn *conn;
720 u16 handle;
721 u8 status = rp->status;
722
723 bt_dev_dbg(hdev, "status 0x%2.2x", status);
724
725 handle = le16_to_cpu(rp->handle);
726
727 hci_dev_lock(hdev);
728
729 conn = hci_conn_hash_lookup_handle(hdev, handle);
730 if (!conn) {
731 status = 0xFF;
732 goto done;
733 }
734
735 /* While unexpected, the read_enc_key_size command may fail. The most
736 * secure approach is to then assume the key size is 0 to force a
737 * disconnection.
738 */
739 if (status) {
740 bt_dev_err(hdev, "failed to read key size for handle %u",
741 handle);
742 conn->enc_key_size = 0;
743 } else {
744 conn->enc_key_size = rp->key_size;
745 status = 0;
746
747 if (conn->enc_key_size < hdev->min_enc_key_size) {
748 /* As slave role, the conn->state has been set to
749 * BT_CONNECTED and l2cap conn req might not be received
750 * yet, at this moment the l2cap layer almost does
751 * nothing with the non-zero status.
752 * So we also clear encrypt related bits, and then the
753 * handler of l2cap conn req will get the right secure
754 * state at a later time.
755 */
756 status = HCI_ERROR_AUTH_FAILURE;
757 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
758 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
759 }
760 }
761
762 hci_encrypt_cfm(conn, status);
763
764 done:
765 hci_dev_unlock(hdev);
766
767 return status;
768 }
769
hci_cc_read_local_commands(struct hci_dev * hdev,void * data,struct sk_buff * skb)770 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
771 struct sk_buff *skb)
772 {
773 struct hci_rp_read_local_commands *rp = data;
774
775 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
776
777 if (rp->status)
778 return rp->status;
779
780 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
781 hci_dev_test_flag(hdev, HCI_CONFIG))
782 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
783
784 return rp->status;
785 }
786
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)787 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
788 struct sk_buff *skb)
789 {
790 struct hci_rp_read_auth_payload_to *rp = data;
791 struct hci_conn *conn;
792
793 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
794
795 if (rp->status)
796 return rp->status;
797
798 hci_dev_lock(hdev);
799
800 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
801 if (conn)
802 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
803
804 hci_dev_unlock(hdev);
805
806 return rp->status;
807 }
808
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)809 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
810 struct sk_buff *skb)
811 {
812 struct hci_rp_write_auth_payload_to *rp = data;
813 struct hci_conn *conn;
814 void *sent;
815
816 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
817
818 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
819 if (!sent)
820 return rp->status;
821
822 hci_dev_lock(hdev);
823
824 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
825 if (!conn) {
826 rp->status = 0xff;
827 goto unlock;
828 }
829
830 if (!rp->status)
831 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
832
833 unlock:
834 hci_dev_unlock(hdev);
835
836 return rp->status;
837 }
838
hci_cc_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)839 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
840 struct sk_buff *skb)
841 {
842 struct hci_rp_read_local_features *rp = data;
843
844 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
845
846 if (rp->status)
847 return rp->status;
848
849 memcpy(hdev->features, rp->features, 8);
850
851 /* Adjust default settings according to features
852 * supported by device. */
853
854 if (hdev->features[0][0] & LMP_3SLOT)
855 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
856
857 if (hdev->features[0][0] & LMP_5SLOT)
858 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
859
860 if (hdev->features[0][1] & LMP_HV2) {
861 hdev->pkt_type |= (HCI_HV2);
862 hdev->esco_type |= (ESCO_HV2);
863 }
864
865 if (hdev->features[0][1] & LMP_HV3) {
866 hdev->pkt_type |= (HCI_HV3);
867 hdev->esco_type |= (ESCO_HV3);
868 }
869
870 if (lmp_esco_capable(hdev))
871 hdev->esco_type |= (ESCO_EV3);
872
873 if (hdev->features[0][4] & LMP_EV4)
874 hdev->esco_type |= (ESCO_EV4);
875
876 if (hdev->features[0][4] & LMP_EV5)
877 hdev->esco_type |= (ESCO_EV5);
878
879 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
880 hdev->esco_type |= (ESCO_2EV3);
881
882 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
883 hdev->esco_type |= (ESCO_3EV3);
884
885 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
886 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
887
888 return rp->status;
889 }
890
hci_cc_read_local_ext_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)891 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
892 struct sk_buff *skb)
893 {
894 struct hci_rp_read_local_ext_features *rp = data;
895
896 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
897
898 if (rp->status)
899 return rp->status;
900
901 if (hdev->max_page < rp->max_page) {
902 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
903 &hdev->quirks))
904 bt_dev_warn(hdev, "broken local ext features page 2");
905 else
906 hdev->max_page = rp->max_page;
907 }
908
909 if (rp->page < HCI_MAX_PAGES)
910 memcpy(hdev->features[rp->page], rp->features, 8);
911
912 return rp->status;
913 }
914
hci_cc_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)915 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
916 struct sk_buff *skb)
917 {
918 struct hci_rp_read_buffer_size *rp = data;
919
920 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
921
922 if (rp->status)
923 return rp->status;
924
925 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
926 hdev->sco_mtu = rp->sco_mtu;
927 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
928 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
929
930 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
931 hdev->sco_mtu = 64;
932 hdev->sco_pkts = 8;
933 }
934
935 hdev->acl_cnt = hdev->acl_pkts;
936 hdev->sco_cnt = hdev->sco_pkts;
937
938 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
939 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
940
941 if (!hdev->acl_mtu || !hdev->acl_pkts)
942 return HCI_ERROR_INVALID_PARAMETERS;
943
944 return rp->status;
945 }
946
hci_cc_read_bd_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)947 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
948 struct sk_buff *skb)
949 {
950 struct hci_rp_read_bd_addr *rp = data;
951
952 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
953
954 if (rp->status)
955 return rp->status;
956
957 if (test_bit(HCI_INIT, &hdev->flags))
958 bacpy(&hdev->bdaddr, &rp->bdaddr);
959
960 if (hci_dev_test_flag(hdev, HCI_SETUP))
961 bacpy(&hdev->setup_addr, &rp->bdaddr);
962
963 return rp->status;
964 }
965
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,void * data,struct sk_buff * skb)966 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
967 struct sk_buff *skb)
968 {
969 struct hci_rp_read_local_pairing_opts *rp = data;
970
971 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
972
973 if (rp->status)
974 return rp->status;
975
976 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
977 hci_dev_test_flag(hdev, HCI_CONFIG)) {
978 hdev->pairing_opts = rp->pairing_opts;
979 hdev->max_enc_key_size = rp->max_key_size;
980 }
981
982 return rp->status;
983 }
984
hci_cc_read_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)985 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
986 struct sk_buff *skb)
987 {
988 struct hci_rp_read_page_scan_activity *rp = data;
989
990 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
991
992 if (rp->status)
993 return rp->status;
994
995 if (test_bit(HCI_INIT, &hdev->flags)) {
996 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
997 hdev->page_scan_window = __le16_to_cpu(rp->window);
998 }
999
1000 return rp->status;
1001 }
1002
hci_cc_write_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)1003 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1004 struct sk_buff *skb)
1005 {
1006 struct hci_ev_status *rp = data;
1007 struct hci_cp_write_page_scan_activity *sent;
1008
1009 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1010
1011 if (rp->status)
1012 return rp->status;
1013
1014 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1015 if (!sent)
1016 return rp->status;
1017
1018 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1019 hdev->page_scan_window = __le16_to_cpu(sent->window);
1020
1021 return rp->status;
1022 }
1023
hci_cc_read_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1024 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1025 struct sk_buff *skb)
1026 {
1027 struct hci_rp_read_page_scan_type *rp = data;
1028
1029 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1030
1031 if (rp->status)
1032 return rp->status;
1033
1034 if (test_bit(HCI_INIT, &hdev->flags))
1035 hdev->page_scan_type = rp->type;
1036
1037 return rp->status;
1038 }
1039
hci_cc_write_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1040 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1041 struct sk_buff *skb)
1042 {
1043 struct hci_ev_status *rp = data;
1044 u8 *type;
1045
1046 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1047
1048 if (rp->status)
1049 return rp->status;
1050
1051 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1052 if (type)
1053 hdev->page_scan_type = *type;
1054
1055 return rp->status;
1056 }
1057
hci_cc_read_clock(struct hci_dev * hdev,void * data,struct sk_buff * skb)1058 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1059 struct sk_buff *skb)
1060 {
1061 struct hci_rp_read_clock *rp = data;
1062 struct hci_cp_read_clock *cp;
1063 struct hci_conn *conn;
1064
1065 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1066
1067 if (rp->status)
1068 return rp->status;
1069
1070 hci_dev_lock(hdev);
1071
1072 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1073 if (!cp)
1074 goto unlock;
1075
1076 if (cp->which == 0x00) {
1077 hdev->clock = le32_to_cpu(rp->clock);
1078 goto unlock;
1079 }
1080
1081 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1082 if (conn) {
1083 conn->clock = le32_to_cpu(rp->clock);
1084 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1085 }
1086
1087 unlock:
1088 hci_dev_unlock(hdev);
1089 return rp->status;
1090 }
1091
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1092 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1093 struct sk_buff *skb)
1094 {
1095 struct hci_rp_read_inq_rsp_tx_power *rp = data;
1096
1097 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1098
1099 if (rp->status)
1100 return rp->status;
1101
1102 hdev->inq_tx_power = rp->tx_power;
1103
1104 return rp->status;
1105 }
1106
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1107 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1108 struct sk_buff *skb)
1109 {
1110 struct hci_rp_read_def_err_data_reporting *rp = data;
1111
1112 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1113
1114 if (rp->status)
1115 return rp->status;
1116
1117 hdev->err_data_reporting = rp->err_data_reporting;
1118
1119 return rp->status;
1120 }
1121
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1122 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1123 struct sk_buff *skb)
1124 {
1125 struct hci_ev_status *rp = data;
1126 struct hci_cp_write_def_err_data_reporting *cp;
1127
1128 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1129
1130 if (rp->status)
1131 return rp->status;
1132
1133 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1134 if (!cp)
1135 return rp->status;
1136
1137 hdev->err_data_reporting = cp->err_data_reporting;
1138
1139 return rp->status;
1140 }
1141
hci_cc_pin_code_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1142 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1143 struct sk_buff *skb)
1144 {
1145 struct hci_rp_pin_code_reply *rp = data;
1146 struct hci_cp_pin_code_reply *cp;
1147 struct hci_conn *conn;
1148
1149 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1150
1151 hci_dev_lock(hdev);
1152
1153 if (hci_dev_test_flag(hdev, HCI_MGMT))
1154 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1155
1156 if (rp->status)
1157 goto unlock;
1158
1159 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1160 if (!cp)
1161 goto unlock;
1162
1163 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1164 if (conn)
1165 conn->pin_length = cp->pin_len;
1166
1167 unlock:
1168 hci_dev_unlock(hdev);
1169 return rp->status;
1170 }
1171
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1172 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1173 struct sk_buff *skb)
1174 {
1175 struct hci_rp_pin_code_neg_reply *rp = data;
1176
1177 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1178
1179 hci_dev_lock(hdev);
1180
1181 if (hci_dev_test_flag(hdev, HCI_MGMT))
1182 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1183 rp->status);
1184
1185 hci_dev_unlock(hdev);
1186
1187 return rp->status;
1188 }
1189
hci_cc_le_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1190 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1191 struct sk_buff *skb)
1192 {
1193 struct hci_rp_le_read_buffer_size *rp = data;
1194
1195 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1196
1197 if (rp->status)
1198 return rp->status;
1199
1200 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1201 hdev->le_pkts = rp->le_max_pkt;
1202
1203 hdev->le_cnt = hdev->le_pkts;
1204
1205 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1206
1207 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1208 return HCI_ERROR_INVALID_PARAMETERS;
1209
1210 return rp->status;
1211 }
1212
hci_cc_le_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)1213 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1214 struct sk_buff *skb)
1215 {
1216 struct hci_rp_le_read_local_features *rp = data;
1217
1218 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1219
1220 if (rp->status)
1221 return rp->status;
1222
1223 memcpy(hdev->le_features, rp->features, 8);
1224
1225 return rp->status;
1226 }
1227
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1228 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1229 struct sk_buff *skb)
1230 {
1231 struct hci_rp_le_read_adv_tx_power *rp = data;
1232
1233 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1234
1235 if (rp->status)
1236 return rp->status;
1237
1238 hdev->adv_tx_power = rp->tx_power;
1239
1240 return rp->status;
1241 }
1242
hci_cc_user_confirm_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1243 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1244 struct sk_buff *skb)
1245 {
1246 struct hci_rp_user_confirm_reply *rp = data;
1247
1248 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1249
1250 hci_dev_lock(hdev);
1251
1252 if (hci_dev_test_flag(hdev, HCI_MGMT))
1253 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1254 rp->status);
1255
1256 hci_dev_unlock(hdev);
1257
1258 return rp->status;
1259 }
1260
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1261 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1262 struct sk_buff *skb)
1263 {
1264 struct hci_rp_user_confirm_reply *rp = data;
1265
1266 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1267
1268 hci_dev_lock(hdev);
1269
1270 if (hci_dev_test_flag(hdev, HCI_MGMT))
1271 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1272 ACL_LINK, 0, rp->status);
1273
1274 hci_dev_unlock(hdev);
1275
1276 return rp->status;
1277 }
1278
hci_cc_user_passkey_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1279 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1280 struct sk_buff *skb)
1281 {
1282 struct hci_rp_user_confirm_reply *rp = data;
1283
1284 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1285
1286 hci_dev_lock(hdev);
1287
1288 if (hci_dev_test_flag(hdev, HCI_MGMT))
1289 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1290 0, rp->status);
1291
1292 hci_dev_unlock(hdev);
1293
1294 return rp->status;
1295 }
1296
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1297 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1298 struct sk_buff *skb)
1299 {
1300 struct hci_rp_user_confirm_reply *rp = data;
1301
1302 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1303
1304 hci_dev_lock(hdev);
1305
1306 if (hci_dev_test_flag(hdev, HCI_MGMT))
1307 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1308 ACL_LINK, 0, rp->status);
1309
1310 hci_dev_unlock(hdev);
1311
1312 return rp->status;
1313 }
1314
hci_cc_read_local_oob_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1315 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1316 struct sk_buff *skb)
1317 {
1318 struct hci_rp_read_local_oob_data *rp = data;
1319
1320 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1321
1322 return rp->status;
1323 }
1324
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1325 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1326 struct sk_buff *skb)
1327 {
1328 struct hci_rp_read_local_oob_ext_data *rp = data;
1329
1330 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1331
1332 return rp->status;
1333 }
1334
hci_cc_le_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1335 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1336 struct sk_buff *skb)
1337 {
1338 struct hci_ev_status *rp = data;
1339 bdaddr_t *sent;
1340
1341 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1342
1343 if (rp->status)
1344 return rp->status;
1345
1346 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1347 if (!sent)
1348 return rp->status;
1349
1350 hci_dev_lock(hdev);
1351
1352 bacpy(&hdev->random_addr, sent);
1353
1354 if (!bacmp(&hdev->rpa, sent)) {
1355 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1356 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1357 secs_to_jiffies(hdev->rpa_timeout));
1358 }
1359
1360 hci_dev_unlock(hdev);
1361
1362 return rp->status;
1363 }
1364
hci_cc_le_set_default_phy(struct hci_dev * hdev,void * data,struct sk_buff * skb)1365 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1366 struct sk_buff *skb)
1367 {
1368 struct hci_ev_status *rp = data;
1369 struct hci_cp_le_set_default_phy *cp;
1370
1371 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1372
1373 if (rp->status)
1374 return rp->status;
1375
1376 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1377 if (!cp)
1378 return rp->status;
1379
1380 hci_dev_lock(hdev);
1381
1382 hdev->le_tx_def_phys = cp->tx_phys;
1383 hdev->le_rx_def_phys = cp->rx_phys;
1384
1385 hci_dev_unlock(hdev);
1386
1387 return rp->status;
1388 }
1389
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1390 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1391 struct sk_buff *skb)
1392 {
1393 struct hci_ev_status *rp = data;
1394 struct hci_cp_le_set_adv_set_rand_addr *cp;
1395 struct adv_info *adv;
1396
1397 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1398
1399 if (rp->status)
1400 return rp->status;
1401
1402 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1403 /* Update only in case the adv instance since handle 0x00 shall be using
1404 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1405 * non-extended adverting.
1406 */
1407 if (!cp || !cp->handle)
1408 return rp->status;
1409
1410 hci_dev_lock(hdev);
1411
1412 adv = hci_find_adv_instance(hdev, cp->handle);
1413 if (adv) {
1414 bacpy(&adv->random_addr, &cp->bdaddr);
1415 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1416 adv->rpa_expired = false;
1417 queue_delayed_work(hdev->workqueue,
1418 &adv->rpa_expired_cb,
1419 secs_to_jiffies(hdev->rpa_timeout));
1420 }
1421 }
1422
1423 hci_dev_unlock(hdev);
1424
1425 return rp->status;
1426 }
1427
hci_cc_le_remove_adv_set(struct hci_dev * hdev,void * data,struct sk_buff * skb)1428 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1429 struct sk_buff *skb)
1430 {
1431 struct hci_ev_status *rp = data;
1432 u8 *instance;
1433 int err;
1434
1435 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1436
1437 if (rp->status)
1438 return rp->status;
1439
1440 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1441 if (!instance)
1442 return rp->status;
1443
1444 hci_dev_lock(hdev);
1445
1446 err = hci_remove_adv_instance(hdev, *instance);
1447 if (!err)
1448 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1449 *instance);
1450
1451 hci_dev_unlock(hdev);
1452
1453 return rp->status;
1454 }
1455
hci_cc_le_clear_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1456 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1457 struct sk_buff *skb)
1458 {
1459 struct hci_ev_status *rp = data;
1460 struct adv_info *adv, *n;
1461 int err;
1462
1463 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1464
1465 if (rp->status)
1466 return rp->status;
1467
1468 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1469 return rp->status;
1470
1471 hci_dev_lock(hdev);
1472
1473 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1474 u8 instance = adv->instance;
1475
1476 err = hci_remove_adv_instance(hdev, instance);
1477 if (!err)
1478 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1479 hdev, instance);
1480 }
1481
1482 hci_dev_unlock(hdev);
1483
1484 return rp->status;
1485 }
1486
hci_cc_le_read_transmit_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1487 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1488 struct sk_buff *skb)
1489 {
1490 struct hci_rp_le_read_transmit_power *rp = data;
1491
1492 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1493
1494 if (rp->status)
1495 return rp->status;
1496
1497 hdev->min_le_tx_power = rp->min_le_tx_power;
1498 hdev->max_le_tx_power = rp->max_le_tx_power;
1499
1500 return rp->status;
1501 }
1502
hci_cc_le_set_privacy_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)1503 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1504 struct sk_buff *skb)
1505 {
1506 struct hci_ev_status *rp = data;
1507 struct hci_cp_le_set_privacy_mode *cp;
1508 struct hci_conn_params *params;
1509
1510 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1511
1512 if (rp->status)
1513 return rp->status;
1514
1515 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1516 if (!cp)
1517 return rp->status;
1518
1519 hci_dev_lock(hdev);
1520
1521 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1522 if (params)
1523 WRITE_ONCE(params->privacy_mode, cp->mode);
1524
1525 hci_dev_unlock(hdev);
1526
1527 return rp->status;
1528 }
1529
hci_cc_le_set_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1530 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1531 struct sk_buff *skb)
1532 {
1533 struct hci_ev_status *rp = data;
1534 __u8 *sent;
1535
1536 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1537
1538 if (rp->status)
1539 return rp->status;
1540
1541 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1542 if (!sent)
1543 return rp->status;
1544
1545 hci_dev_lock(hdev);
1546
1547 /* If we're doing connection initiation as peripheral. Set a
1548 * timeout in case something goes wrong.
1549 */
1550 if (*sent) {
1551 struct hci_conn *conn;
1552
1553 hci_dev_set_flag(hdev, HCI_LE_ADV);
1554
1555 conn = hci_lookup_le_connect(hdev);
1556 if (conn)
1557 queue_delayed_work(hdev->workqueue,
1558 &conn->le_conn_timeout,
1559 conn->conn_timeout);
1560 } else {
1561 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1562 }
1563
1564 hci_dev_unlock(hdev);
1565
1566 return rp->status;
1567 }
1568
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1569 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1570 struct sk_buff *skb)
1571 {
1572 struct hci_cp_le_set_ext_adv_enable *cp;
1573 struct hci_cp_ext_adv_set *set;
1574 struct adv_info *adv = NULL, *n;
1575 struct hci_ev_status *rp = data;
1576
1577 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1578
1579 if (rp->status)
1580 return rp->status;
1581
1582 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1583 if (!cp)
1584 return rp->status;
1585
1586 set = (void *)cp->data;
1587
1588 hci_dev_lock(hdev);
1589
1590 if (cp->num_of_sets)
1591 adv = hci_find_adv_instance(hdev, set->handle);
1592
1593 if (cp->enable) {
1594 struct hci_conn *conn;
1595
1596 hci_dev_set_flag(hdev, HCI_LE_ADV);
1597
1598 if (adv && !adv->periodic)
1599 adv->enabled = true;
1600
1601 conn = hci_lookup_le_connect(hdev);
1602 if (conn)
1603 queue_delayed_work(hdev->workqueue,
1604 &conn->le_conn_timeout,
1605 conn->conn_timeout);
1606 } else {
1607 if (cp->num_of_sets) {
1608 if (adv)
1609 adv->enabled = false;
1610
1611 /* If just one instance was disabled check if there are
1612 * any other instance enabled before clearing HCI_LE_ADV
1613 */
1614 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1615 list) {
1616 if (adv->enabled)
1617 goto unlock;
1618 }
1619 } else {
1620 /* All instances shall be considered disabled */
1621 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1622 list)
1623 adv->enabled = false;
1624 }
1625
1626 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1627 }
1628
1629 unlock:
1630 hci_dev_unlock(hdev);
1631 return rp->status;
1632 }
1633
hci_cc_le_set_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1634 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1635 struct sk_buff *skb)
1636 {
1637 struct hci_cp_le_set_scan_param *cp;
1638 struct hci_ev_status *rp = data;
1639
1640 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1641
1642 if (rp->status)
1643 return rp->status;
1644
1645 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1646 if (!cp)
1647 return rp->status;
1648
1649 hci_dev_lock(hdev);
1650
1651 hdev->le_scan_type = cp->type;
1652
1653 hci_dev_unlock(hdev);
1654
1655 return rp->status;
1656 }
1657
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1658 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1659 struct sk_buff *skb)
1660 {
1661 struct hci_cp_le_set_ext_scan_params *cp;
1662 struct hci_ev_status *rp = data;
1663 struct hci_cp_le_scan_phy_params *phy_param;
1664
1665 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1666
1667 if (rp->status)
1668 return rp->status;
1669
1670 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1671 if (!cp)
1672 return rp->status;
1673
1674 phy_param = (void *)cp->data;
1675
1676 hci_dev_lock(hdev);
1677
1678 hdev->le_scan_type = phy_param->type;
1679
1680 hci_dev_unlock(hdev);
1681
1682 return rp->status;
1683 }
1684
has_pending_adv_report(struct hci_dev * hdev)1685 static bool has_pending_adv_report(struct hci_dev *hdev)
1686 {
1687 struct discovery_state *d = &hdev->discovery;
1688
1689 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1690 }
1691
clear_pending_adv_report(struct hci_dev * hdev)1692 static void clear_pending_adv_report(struct hci_dev *hdev)
1693 {
1694 struct discovery_state *d = &hdev->discovery;
1695
1696 bacpy(&d->last_adv_addr, BDADDR_ANY);
1697 d->last_adv_data_len = 0;
1698 }
1699
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1700 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1701 u8 bdaddr_type, s8 rssi, u32 flags,
1702 u8 *data, u8 len)
1703 {
1704 struct discovery_state *d = &hdev->discovery;
1705
1706 if (len > max_adv_len(hdev))
1707 return;
1708
1709 bacpy(&d->last_adv_addr, bdaddr);
1710 d->last_adv_addr_type = bdaddr_type;
1711 d->last_adv_rssi = rssi;
1712 d->last_adv_flags = flags;
1713 memcpy(d->last_adv_data, data, len);
1714 d->last_adv_data_len = len;
1715 }
1716
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1717 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1718 {
1719 hci_dev_lock(hdev);
1720
1721 switch (enable) {
1722 case LE_SCAN_ENABLE:
1723 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1724 if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
1725 clear_pending_adv_report(hdev);
1726 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1727 }
1728 break;
1729
1730 case LE_SCAN_DISABLE:
1731 /* We do this here instead of when setting DISCOVERY_STOPPED
1732 * since the latter would potentially require waiting for
1733 * inquiry to stop too.
1734 */
1735 if (has_pending_adv_report(hdev)) {
1736 struct discovery_state *d = &hdev->discovery;
1737
1738 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1739 d->last_adv_addr_type, NULL,
1740 d->last_adv_rssi, d->last_adv_flags,
1741 d->last_adv_data,
1742 d->last_adv_data_len, NULL, 0, 0);
1743 }
1744
1745 /* Cancel this timer so that we don't try to disable scanning
1746 * when it's already disabled.
1747 */
1748 cancel_delayed_work(&hdev->le_scan_disable);
1749
1750 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1751
1752 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1753 * interrupted scanning due to a connect request. Mark
1754 * therefore discovery as stopped.
1755 */
1756 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1757 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1758 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1759 hdev->discovery.state == DISCOVERY_FINDING)
1760 queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1761
1762 break;
1763
1764 default:
1765 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1766 enable);
1767 break;
1768 }
1769
1770 hci_dev_unlock(hdev);
1771 }
1772
hci_cc_le_set_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1773 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1774 struct sk_buff *skb)
1775 {
1776 struct hci_cp_le_set_scan_enable *cp;
1777 struct hci_ev_status *rp = data;
1778
1779 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1780
1781 if (rp->status)
1782 return rp->status;
1783
1784 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1785 if (!cp)
1786 return rp->status;
1787
1788 le_set_scan_enable_complete(hdev, cp->enable);
1789
1790 return rp->status;
1791 }
1792
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1793 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1794 struct sk_buff *skb)
1795 {
1796 struct hci_cp_le_set_ext_scan_enable *cp;
1797 struct hci_ev_status *rp = data;
1798
1799 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1800
1801 if (rp->status)
1802 return rp->status;
1803
1804 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1805 if (!cp)
1806 return rp->status;
1807
1808 le_set_scan_enable_complete(hdev, cp->enable);
1809
1810 return rp->status;
1811 }
1812
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1813 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1814 struct sk_buff *skb)
1815 {
1816 struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1817
1818 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1819 rp->num_of_sets);
1820
1821 if (rp->status)
1822 return rp->status;
1823
1824 hdev->le_num_of_adv_sets = rp->num_of_sets;
1825
1826 return rp->status;
1827 }
1828
hci_cc_le_read_accept_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1829 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1830 struct sk_buff *skb)
1831 {
1832 struct hci_rp_le_read_accept_list_size *rp = data;
1833
1834 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1835
1836 if (rp->status)
1837 return rp->status;
1838
1839 hdev->le_accept_list_size = rp->size;
1840
1841 return rp->status;
1842 }
1843
hci_cc_le_clear_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1844 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1845 struct sk_buff *skb)
1846 {
1847 struct hci_ev_status *rp = data;
1848
1849 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1850
1851 if (rp->status)
1852 return rp->status;
1853
1854 hci_dev_lock(hdev);
1855 hci_bdaddr_list_clear(&hdev->le_accept_list);
1856 hci_dev_unlock(hdev);
1857
1858 return rp->status;
1859 }
1860
hci_cc_le_add_to_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1861 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1862 struct sk_buff *skb)
1863 {
1864 struct hci_cp_le_add_to_accept_list *sent;
1865 struct hci_ev_status *rp = data;
1866
1867 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1868
1869 if (rp->status)
1870 return rp->status;
1871
1872 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1873 if (!sent)
1874 return rp->status;
1875
1876 hci_dev_lock(hdev);
1877 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1878 sent->bdaddr_type);
1879 hci_dev_unlock(hdev);
1880
1881 return rp->status;
1882 }
1883
hci_cc_le_del_from_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1884 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1885 struct sk_buff *skb)
1886 {
1887 struct hci_cp_le_del_from_accept_list *sent;
1888 struct hci_ev_status *rp = data;
1889
1890 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1891
1892 if (rp->status)
1893 return rp->status;
1894
1895 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1896 if (!sent)
1897 return rp->status;
1898
1899 hci_dev_lock(hdev);
1900 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1901 sent->bdaddr_type);
1902 hci_dev_unlock(hdev);
1903
1904 return rp->status;
1905 }
1906
hci_cc_le_read_supported_states(struct hci_dev * hdev,void * data,struct sk_buff * skb)1907 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1908 struct sk_buff *skb)
1909 {
1910 struct hci_rp_le_read_supported_states *rp = data;
1911
1912 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1913
1914 if (rp->status)
1915 return rp->status;
1916
1917 memcpy(hdev->le_states, rp->le_states, 8);
1918
1919 return rp->status;
1920 }
1921
hci_cc_le_read_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1922 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1923 struct sk_buff *skb)
1924 {
1925 struct hci_rp_le_read_def_data_len *rp = data;
1926
1927 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1928
1929 if (rp->status)
1930 return rp->status;
1931
1932 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1933 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1934
1935 return rp->status;
1936 }
1937
hci_cc_le_write_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1938 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1939 struct sk_buff *skb)
1940 {
1941 struct hci_cp_le_write_def_data_len *sent;
1942 struct hci_ev_status *rp = data;
1943
1944 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1945
1946 if (rp->status)
1947 return rp->status;
1948
1949 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1950 if (!sent)
1951 return rp->status;
1952
1953 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1954 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1955
1956 return rp->status;
1957 }
1958
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1959 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1960 struct sk_buff *skb)
1961 {
1962 struct hci_cp_le_add_to_resolv_list *sent;
1963 struct hci_ev_status *rp = data;
1964
1965 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1966
1967 if (rp->status)
1968 return rp->status;
1969
1970 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1971 if (!sent)
1972 return rp->status;
1973
1974 hci_dev_lock(hdev);
1975 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1976 sent->bdaddr_type, sent->peer_irk,
1977 sent->local_irk);
1978 hci_dev_unlock(hdev);
1979
1980 return rp->status;
1981 }
1982
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1983 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
1984 struct sk_buff *skb)
1985 {
1986 struct hci_cp_le_del_from_resolv_list *sent;
1987 struct hci_ev_status *rp = data;
1988
1989 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1990
1991 if (rp->status)
1992 return rp->status;
1993
1994 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1995 if (!sent)
1996 return rp->status;
1997
1998 hci_dev_lock(hdev);
1999 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2000 sent->bdaddr_type);
2001 hci_dev_unlock(hdev);
2002
2003 return rp->status;
2004 }
2005
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)2006 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2007 struct sk_buff *skb)
2008 {
2009 struct hci_ev_status *rp = data;
2010
2011 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2012
2013 if (rp->status)
2014 return rp->status;
2015
2016 hci_dev_lock(hdev);
2017 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2018 hci_dev_unlock(hdev);
2019
2020 return rp->status;
2021 }
2022
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)2023 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2024 struct sk_buff *skb)
2025 {
2026 struct hci_rp_le_read_resolv_list_size *rp = data;
2027
2028 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2029
2030 if (rp->status)
2031 return rp->status;
2032
2033 hdev->le_resolv_list_size = rp->size;
2034
2035 return rp->status;
2036 }
2037
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)2038 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2039 struct sk_buff *skb)
2040 {
2041 struct hci_ev_status *rp = data;
2042 __u8 *sent;
2043
2044 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2045
2046 if (rp->status)
2047 return rp->status;
2048
2049 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2050 if (!sent)
2051 return rp->status;
2052
2053 hci_dev_lock(hdev);
2054
2055 if (*sent)
2056 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2057 else
2058 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2059
2060 hci_dev_unlock(hdev);
2061
2062 return rp->status;
2063 }
2064
hci_cc_le_read_max_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)2065 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2066 struct sk_buff *skb)
2067 {
2068 struct hci_rp_le_read_max_data_len *rp = data;
2069
2070 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2071
2072 if (rp->status)
2073 return rp->status;
2074
2075 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2076 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2077 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2078 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2079
2080 return rp->status;
2081 }
2082
hci_cc_write_le_host_supported(struct hci_dev * hdev,void * data,struct sk_buff * skb)2083 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2084 struct sk_buff *skb)
2085 {
2086 struct hci_cp_write_le_host_supported *sent;
2087 struct hci_ev_status *rp = data;
2088
2089 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2090
2091 if (rp->status)
2092 return rp->status;
2093
2094 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2095 if (!sent)
2096 return rp->status;
2097
2098 hci_dev_lock(hdev);
2099
2100 if (sent->le) {
2101 hdev->features[1][0] |= LMP_HOST_LE;
2102 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2103 } else {
2104 hdev->features[1][0] &= ~LMP_HOST_LE;
2105 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2106 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2107 }
2108
2109 if (sent->simul)
2110 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2111 else
2112 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2113
2114 hci_dev_unlock(hdev);
2115
2116 return rp->status;
2117 }
2118
hci_cc_set_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)2119 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2120 struct sk_buff *skb)
2121 {
2122 struct hci_cp_le_set_adv_param *cp;
2123 struct hci_ev_status *rp = data;
2124
2125 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2126
2127 if (rp->status)
2128 return rp->status;
2129
2130 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2131 if (!cp)
2132 return rp->status;
2133
2134 hci_dev_lock(hdev);
2135 hdev->adv_addr_type = cp->own_address_type;
2136 hci_dev_unlock(hdev);
2137
2138 return rp->status;
2139 }
2140
hci_cc_set_ext_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)2141 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2142 struct sk_buff *skb)
2143 {
2144 struct hci_rp_le_set_ext_adv_params *rp = data;
2145 struct hci_cp_le_set_ext_adv_params *cp;
2146 struct adv_info *adv_instance;
2147
2148 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2149
2150 if (rp->status)
2151 return rp->status;
2152
2153 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2154 if (!cp)
2155 return rp->status;
2156
2157 hci_dev_lock(hdev);
2158 hdev->adv_addr_type = cp->own_addr_type;
2159 if (!cp->handle) {
2160 /* Store in hdev for instance 0 */
2161 hdev->adv_tx_power = rp->tx_power;
2162 } else {
2163 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2164 if (adv_instance)
2165 adv_instance->tx_power = rp->tx_power;
2166 }
2167 /* Update adv data as tx power is known now */
2168 hci_update_adv_data(hdev, cp->handle);
2169
2170 hci_dev_unlock(hdev);
2171
2172 return rp->status;
2173 }
2174
hci_cc_read_rssi(struct hci_dev * hdev,void * data,struct sk_buff * skb)2175 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2176 struct sk_buff *skb)
2177 {
2178 struct hci_rp_read_rssi *rp = data;
2179 struct hci_conn *conn;
2180
2181 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2182
2183 if (rp->status)
2184 return rp->status;
2185
2186 hci_dev_lock(hdev);
2187
2188 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2189 if (conn)
2190 conn->rssi = rp->rssi;
2191
2192 hci_dev_unlock(hdev);
2193
2194 return rp->status;
2195 }
2196
hci_cc_read_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)2197 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2198 struct sk_buff *skb)
2199 {
2200 struct hci_cp_read_tx_power *sent;
2201 struct hci_rp_read_tx_power *rp = data;
2202 struct hci_conn *conn;
2203
2204 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2205
2206 if (rp->status)
2207 return rp->status;
2208
2209 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2210 if (!sent)
2211 return rp->status;
2212
2213 hci_dev_lock(hdev);
2214
2215 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2216 if (!conn)
2217 goto unlock;
2218
2219 switch (sent->type) {
2220 case 0x00:
2221 conn->tx_power = rp->tx_power;
2222 break;
2223 case 0x01:
2224 conn->max_tx_power = rp->tx_power;
2225 break;
2226 }
2227
2228 unlock:
2229 hci_dev_unlock(hdev);
2230 return rp->status;
2231 }
2232
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)2233 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2234 struct sk_buff *skb)
2235 {
2236 struct hci_ev_status *rp = data;
2237 u8 *mode;
2238
2239 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2240
2241 if (rp->status)
2242 return rp->status;
2243
2244 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2245 if (mode)
2246 hdev->ssp_debug_mode = *mode;
2247
2248 return rp->status;
2249 }
2250
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)2251 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2252 {
2253 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2254
2255 if (status)
2256 return;
2257
2258 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2259 set_bit(HCI_INQUIRY, &hdev->flags);
2260 }
2261
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)2262 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2263 {
2264 struct hci_cp_create_conn *cp;
2265 struct hci_conn *conn;
2266
2267 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2268
2269 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2270 if (!cp)
2271 return;
2272
2273 hci_dev_lock(hdev);
2274
2275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2276
2277 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2278
2279 if (status) {
2280 if (conn && conn->state == BT_CONNECT) {
2281 conn->state = BT_CLOSED;
2282 hci_connect_cfm(conn, status);
2283 hci_conn_del(conn);
2284 }
2285 } else {
2286 if (!conn) {
2287 conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2288 HCI_ROLE_MASTER);
2289 if (IS_ERR(conn))
2290 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2291 }
2292 }
2293
2294 hci_dev_unlock(hdev);
2295 }
2296
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)2297 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2298 {
2299 struct hci_cp_add_sco *cp;
2300 struct hci_conn *acl;
2301 struct hci_link *link;
2302 __u16 handle;
2303
2304 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2305
2306 if (!status)
2307 return;
2308
2309 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2310 if (!cp)
2311 return;
2312
2313 handle = __le16_to_cpu(cp->handle);
2314
2315 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2316
2317 hci_dev_lock(hdev);
2318
2319 acl = hci_conn_hash_lookup_handle(hdev, handle);
2320 if (acl) {
2321 link = list_first_entry_or_null(&acl->link_list,
2322 struct hci_link, list);
2323 if (link && link->conn) {
2324 link->conn->state = BT_CLOSED;
2325
2326 hci_connect_cfm(link->conn, status);
2327 hci_conn_del(link->conn);
2328 }
2329 }
2330
2331 hci_dev_unlock(hdev);
2332 }
2333
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)2334 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2335 {
2336 struct hci_cp_auth_requested *cp;
2337 struct hci_conn *conn;
2338
2339 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2340
2341 if (!status)
2342 return;
2343
2344 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2345 if (!cp)
2346 return;
2347
2348 hci_dev_lock(hdev);
2349
2350 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2351 if (conn) {
2352 if (conn->state == BT_CONFIG) {
2353 hci_connect_cfm(conn, status);
2354 hci_conn_drop(conn);
2355 }
2356 }
2357
2358 hci_dev_unlock(hdev);
2359 }
2360
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)2361 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2362 {
2363 struct hci_cp_set_conn_encrypt *cp;
2364 struct hci_conn *conn;
2365
2366 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2367
2368 if (!status)
2369 return;
2370
2371 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2372 if (!cp)
2373 return;
2374
2375 hci_dev_lock(hdev);
2376
2377 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2378 if (conn) {
2379 if (conn->state == BT_CONFIG) {
2380 hci_connect_cfm(conn, status);
2381 hci_conn_drop(conn);
2382 }
2383 }
2384
2385 hci_dev_unlock(hdev);
2386 }
2387
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)2388 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2389 struct hci_conn *conn)
2390 {
2391 if (conn->state != BT_CONFIG || !conn->out)
2392 return 0;
2393
2394 if (conn->pending_sec_level == BT_SECURITY_SDP)
2395 return 0;
2396
2397 /* Only request authentication for SSP connections or non-SSP
2398 * devices with sec_level MEDIUM or HIGH or if MITM protection
2399 * is requested.
2400 */
2401 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2402 conn->pending_sec_level != BT_SECURITY_FIPS &&
2403 conn->pending_sec_level != BT_SECURITY_HIGH &&
2404 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2405 return 0;
2406
2407 return 1;
2408 }
2409
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)2410 static int hci_resolve_name(struct hci_dev *hdev,
2411 struct inquiry_entry *e)
2412 {
2413 struct hci_cp_remote_name_req cp;
2414
2415 memset(&cp, 0, sizeof(cp));
2416
2417 bacpy(&cp.bdaddr, &e->data.bdaddr);
2418 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2419 cp.pscan_mode = e->data.pscan_mode;
2420 cp.clock_offset = e->data.clock_offset;
2421
2422 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2423 }
2424
hci_resolve_next_name(struct hci_dev * hdev)2425 static bool hci_resolve_next_name(struct hci_dev *hdev)
2426 {
2427 struct discovery_state *discov = &hdev->discovery;
2428 struct inquiry_entry *e;
2429
2430 if (list_empty(&discov->resolve))
2431 return false;
2432
2433 /* We should stop if we already spent too much time resolving names. */
2434 if (time_after(jiffies, discov->name_resolve_timeout)) {
2435 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2436 return false;
2437 }
2438
2439 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2440 if (!e)
2441 return false;
2442
2443 if (hci_resolve_name(hdev, e) == 0) {
2444 e->name_state = NAME_PENDING;
2445 return true;
2446 }
2447
2448 return false;
2449 }
2450
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2451 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2452 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2453 {
2454 struct discovery_state *discov = &hdev->discovery;
2455 struct inquiry_entry *e;
2456
2457 /* Update the mgmt connected state if necessary. Be careful with
2458 * conn objects that exist but are not (yet) connected however.
2459 * Only those in BT_CONFIG or BT_CONNECTED states can be
2460 * considered connected.
2461 */
2462 if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2463 mgmt_device_connected(hdev, conn, name, name_len);
2464
2465 if (discov->state == DISCOVERY_STOPPED)
2466 return;
2467
2468 if (discov->state == DISCOVERY_STOPPING)
2469 goto discov_complete;
2470
2471 if (discov->state != DISCOVERY_RESOLVING)
2472 return;
2473
2474 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2475 /* If the device was not found in a list of found devices names of which
2476 * are pending. there is no need to continue resolving a next name as it
2477 * will be done upon receiving another Remote Name Request Complete
2478 * Event */
2479 if (!e)
2480 return;
2481
2482 list_del(&e->list);
2483
2484 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2485 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2486 name, name_len);
2487
2488 if (hci_resolve_next_name(hdev))
2489 return;
2490
2491 discov_complete:
2492 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2493 }
2494
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2495 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2496 {
2497 struct hci_cp_remote_name_req *cp;
2498 struct hci_conn *conn;
2499
2500 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2501
2502 /* If successful wait for the name req complete event before
2503 * checking for the need to do authentication */
2504 if (!status)
2505 return;
2506
2507 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2508 if (!cp)
2509 return;
2510
2511 hci_dev_lock(hdev);
2512
2513 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2514
2515 if (hci_dev_test_flag(hdev, HCI_MGMT))
2516 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2517
2518 if (!conn)
2519 goto unlock;
2520
2521 if (!hci_outgoing_auth_needed(hdev, conn))
2522 goto unlock;
2523
2524 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2525 struct hci_cp_auth_requested auth_cp;
2526
2527 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2528
2529 auth_cp.handle = __cpu_to_le16(conn->handle);
2530 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2531 sizeof(auth_cp), &auth_cp);
2532 }
2533
2534 unlock:
2535 hci_dev_unlock(hdev);
2536 }
2537
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2538 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2539 {
2540 struct hci_cp_read_remote_features *cp;
2541 struct hci_conn *conn;
2542
2543 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2544
2545 if (!status)
2546 return;
2547
2548 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2549 if (!cp)
2550 return;
2551
2552 hci_dev_lock(hdev);
2553
2554 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2555 if (conn) {
2556 if (conn->state == BT_CONFIG) {
2557 hci_connect_cfm(conn, status);
2558 hci_conn_drop(conn);
2559 }
2560 }
2561
2562 hci_dev_unlock(hdev);
2563 }
2564
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2565 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2566 {
2567 struct hci_cp_read_remote_ext_features *cp;
2568 struct hci_conn *conn;
2569
2570 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2571
2572 if (!status)
2573 return;
2574
2575 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2576 if (!cp)
2577 return;
2578
2579 hci_dev_lock(hdev);
2580
2581 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2582 if (conn) {
2583 if (conn->state == BT_CONFIG) {
2584 hci_connect_cfm(conn, status);
2585 hci_conn_drop(conn);
2586 }
2587 }
2588
2589 hci_dev_unlock(hdev);
2590 }
2591
hci_setup_sync_conn_status(struct hci_dev * hdev,__u16 handle,__u8 status)2592 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2593 __u8 status)
2594 {
2595 struct hci_conn *acl;
2596 struct hci_link *link;
2597
2598 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2599
2600 hci_dev_lock(hdev);
2601
2602 acl = hci_conn_hash_lookup_handle(hdev, handle);
2603 if (acl) {
2604 link = list_first_entry_or_null(&acl->link_list,
2605 struct hci_link, list);
2606 if (link && link->conn) {
2607 link->conn->state = BT_CLOSED;
2608
2609 hci_connect_cfm(link->conn, status);
2610 hci_conn_del(link->conn);
2611 }
2612 }
2613
2614 hci_dev_unlock(hdev);
2615 }
2616
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2617 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2618 {
2619 struct hci_cp_setup_sync_conn *cp;
2620
2621 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2622
2623 if (!status)
2624 return;
2625
2626 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2627 if (!cp)
2628 return;
2629
2630 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2631 }
2632
hci_cs_enhanced_setup_sync_conn(struct hci_dev * hdev,__u8 status)2633 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2634 {
2635 struct hci_cp_enhanced_setup_sync_conn *cp;
2636
2637 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2638
2639 if (!status)
2640 return;
2641
2642 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2643 if (!cp)
2644 return;
2645
2646 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2647 }
2648
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2649 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2650 {
2651 struct hci_cp_sniff_mode *cp;
2652 struct hci_conn *conn;
2653
2654 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2655
2656 if (!status)
2657 return;
2658
2659 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2660 if (!cp)
2661 return;
2662
2663 hci_dev_lock(hdev);
2664
2665 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2666 if (conn) {
2667 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2668
2669 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2670 hci_sco_setup(conn, status);
2671 }
2672
2673 hci_dev_unlock(hdev);
2674 }
2675
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2676 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2677 {
2678 struct hci_cp_exit_sniff_mode *cp;
2679 struct hci_conn *conn;
2680
2681 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2682
2683 if (!status)
2684 return;
2685
2686 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2687 if (!cp)
2688 return;
2689
2690 hci_dev_lock(hdev);
2691
2692 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2693 if (conn) {
2694 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2695
2696 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2697 hci_sco_setup(conn, status);
2698 }
2699
2700 hci_dev_unlock(hdev);
2701 }
2702
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2703 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2704 {
2705 struct hci_cp_disconnect *cp;
2706 struct hci_conn_params *params;
2707 struct hci_conn *conn;
2708 bool mgmt_conn;
2709
2710 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2711
2712 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2713 * otherwise cleanup the connection immediately.
2714 */
2715 if (!status && !hdev->suspended)
2716 return;
2717
2718 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2719 if (!cp)
2720 return;
2721
2722 hci_dev_lock(hdev);
2723
2724 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2725 if (!conn)
2726 goto unlock;
2727
2728 if (status) {
2729 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2730 conn->dst_type, status);
2731
2732 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2733 hdev->cur_adv_instance = conn->adv_instance;
2734 hci_enable_advertising(hdev);
2735 }
2736
2737 /* Inform sockets conn is gone before we delete it */
2738 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2739
2740 goto done;
2741 }
2742
2743 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2744
2745 if (conn->type == ACL_LINK) {
2746 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2747 hci_remove_link_key(hdev, &conn->dst);
2748 }
2749
2750 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2751 if (params) {
2752 switch (params->auto_connect) {
2753 case HCI_AUTO_CONN_LINK_LOSS:
2754 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2755 break;
2756 fallthrough;
2757
2758 case HCI_AUTO_CONN_DIRECT:
2759 case HCI_AUTO_CONN_ALWAYS:
2760 hci_pend_le_list_del_init(params);
2761 hci_pend_le_list_add(params, &hdev->pend_le_conns);
2762 break;
2763
2764 default:
2765 break;
2766 }
2767 }
2768
2769 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2770 cp->reason, mgmt_conn);
2771
2772 hci_disconn_cfm(conn, cp->reason);
2773
2774 done:
2775 /* If the disconnection failed for any reason, the upper layer
2776 * does not retry to disconnect in current implementation.
2777 * Hence, we need to do some basic cleanup here and re-enable
2778 * advertising if necessary.
2779 */
2780 hci_conn_del(conn);
2781 unlock:
2782 hci_dev_unlock(hdev);
2783 }
2784
ev_bdaddr_type(struct hci_dev * hdev,u8 type,bool * resolved)2785 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2786 {
2787 /* When using controller based address resolution, then the new
2788 * address types 0x02 and 0x03 are used. These types need to be
2789 * converted back into either public address or random address type
2790 */
2791 switch (type) {
2792 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2793 if (resolved)
2794 *resolved = true;
2795 return ADDR_LE_DEV_PUBLIC;
2796 case ADDR_LE_DEV_RANDOM_RESOLVED:
2797 if (resolved)
2798 *resolved = true;
2799 return ADDR_LE_DEV_RANDOM;
2800 }
2801
2802 if (resolved)
2803 *resolved = false;
2804 return type;
2805 }
2806
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2807 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2808 u8 peer_addr_type, u8 own_address_type,
2809 u8 filter_policy)
2810 {
2811 struct hci_conn *conn;
2812
2813 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2814 peer_addr_type);
2815 if (!conn)
2816 return;
2817
2818 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2819
2820 /* Store the initiator and responder address information which
2821 * is needed for SMP. These values will not change during the
2822 * lifetime of the connection.
2823 */
2824 conn->init_addr_type = own_address_type;
2825 if (own_address_type == ADDR_LE_DEV_RANDOM)
2826 bacpy(&conn->init_addr, &hdev->random_addr);
2827 else
2828 bacpy(&conn->init_addr, &hdev->bdaddr);
2829
2830 conn->resp_addr_type = peer_addr_type;
2831 bacpy(&conn->resp_addr, peer_addr);
2832 }
2833
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2834 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2835 {
2836 struct hci_cp_le_create_conn *cp;
2837
2838 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2839
2840 /* All connection failure handling is taken care of by the
2841 * hci_conn_failed function which is triggered by the HCI
2842 * request completion callbacks used for connecting.
2843 */
2844 if (status)
2845 return;
2846
2847 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2848 if (!cp)
2849 return;
2850
2851 hci_dev_lock(hdev);
2852
2853 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2854 cp->own_address_type, cp->filter_policy);
2855
2856 hci_dev_unlock(hdev);
2857 }
2858
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2859 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2860 {
2861 struct hci_cp_le_ext_create_conn *cp;
2862
2863 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2864
2865 /* All connection failure handling is taken care of by the
2866 * hci_conn_failed function which is triggered by the HCI
2867 * request completion callbacks used for connecting.
2868 */
2869 if (status)
2870 return;
2871
2872 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2873 if (!cp)
2874 return;
2875
2876 hci_dev_lock(hdev);
2877
2878 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2879 cp->own_addr_type, cp->filter_policy);
2880
2881 hci_dev_unlock(hdev);
2882 }
2883
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2884 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2885 {
2886 struct hci_cp_le_read_remote_features *cp;
2887 struct hci_conn *conn;
2888
2889 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2890
2891 if (!status)
2892 return;
2893
2894 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2895 if (!cp)
2896 return;
2897
2898 hci_dev_lock(hdev);
2899
2900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2901 if (conn) {
2902 if (conn->state == BT_CONFIG) {
2903 hci_connect_cfm(conn, status);
2904 hci_conn_drop(conn);
2905 }
2906 }
2907
2908 hci_dev_unlock(hdev);
2909 }
2910
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2911 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2912 {
2913 struct hci_cp_le_start_enc *cp;
2914 struct hci_conn *conn;
2915
2916 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2917
2918 if (!status)
2919 return;
2920
2921 hci_dev_lock(hdev);
2922
2923 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2924 if (!cp)
2925 goto unlock;
2926
2927 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2928 if (!conn)
2929 goto unlock;
2930
2931 if (conn->state != BT_CONNECTED)
2932 goto unlock;
2933
2934 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2935 hci_conn_drop(conn);
2936
2937 unlock:
2938 hci_dev_unlock(hdev);
2939 }
2940
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2941 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2942 {
2943 struct hci_cp_switch_role *cp;
2944 struct hci_conn *conn;
2945
2946 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2947
2948 if (!status)
2949 return;
2950
2951 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2952 if (!cp)
2953 return;
2954
2955 hci_dev_lock(hdev);
2956
2957 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2958 if (conn)
2959 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2960
2961 hci_dev_unlock(hdev);
2962 }
2963
hci_inquiry_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)2964 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2965 struct sk_buff *skb)
2966 {
2967 struct hci_ev_status *ev = data;
2968 struct discovery_state *discov = &hdev->discovery;
2969 struct inquiry_entry *e;
2970
2971 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2972
2973 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2974 return;
2975
2976 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2977 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2978
2979 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2980 return;
2981
2982 hci_dev_lock(hdev);
2983
2984 if (discov->state != DISCOVERY_FINDING)
2985 goto unlock;
2986
2987 if (list_empty(&discov->resolve)) {
2988 /* When BR/EDR inquiry is active and no LE scanning is in
2989 * progress, then change discovery state to indicate completion.
2990 *
2991 * When running LE scanning and BR/EDR inquiry simultaneously
2992 * and the LE scan already finished, then change the discovery
2993 * state to indicate completion.
2994 */
2995 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2996 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2997 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2998 goto unlock;
2999 }
3000
3001 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3002 if (e && hci_resolve_name(hdev, e) == 0) {
3003 e->name_state = NAME_PENDING;
3004 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3005 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3006 } else {
3007 /* When BR/EDR inquiry is active and no LE scanning is in
3008 * progress, then change discovery state to indicate completion.
3009 *
3010 * When running LE scanning and BR/EDR inquiry simultaneously
3011 * and the LE scan already finished, then change the discovery
3012 * state to indicate completion.
3013 */
3014 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3015 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3016 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3017 }
3018
3019 unlock:
3020 hci_dev_unlock(hdev);
3021 }
3022
hci_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)3023 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3024 struct sk_buff *skb)
3025 {
3026 struct hci_ev_inquiry_result *ev = edata;
3027 struct inquiry_data data;
3028 int i;
3029
3030 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3031 flex_array_size(ev, info, ev->num)))
3032 return;
3033
3034 bt_dev_dbg(hdev, "num %d", ev->num);
3035
3036 if (!ev->num)
3037 return;
3038
3039 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3040 return;
3041
3042 hci_dev_lock(hdev);
3043
3044 for (i = 0; i < ev->num; i++) {
3045 struct inquiry_info *info = &ev->info[i];
3046 u32 flags;
3047
3048 bacpy(&data.bdaddr, &info->bdaddr);
3049 data.pscan_rep_mode = info->pscan_rep_mode;
3050 data.pscan_period_mode = info->pscan_period_mode;
3051 data.pscan_mode = info->pscan_mode;
3052 memcpy(data.dev_class, info->dev_class, 3);
3053 data.clock_offset = info->clock_offset;
3054 data.rssi = HCI_RSSI_INVALID;
3055 data.ssp_mode = 0x00;
3056
3057 flags = hci_inquiry_cache_update(hdev, &data, false);
3058
3059 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3060 info->dev_class, HCI_RSSI_INVALID,
3061 flags, NULL, 0, NULL, 0, 0);
3062 }
3063
3064 hci_dev_unlock(hdev);
3065 }
3066
hci_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3067 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3068 struct sk_buff *skb)
3069 {
3070 struct hci_ev_conn_complete *ev = data;
3071 struct hci_conn *conn;
3072 u8 status = ev->status;
3073
3074 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3075
3076 hci_dev_lock(hdev);
3077
3078 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3079 if (!conn) {
3080 /* In case of error status and there is no connection pending
3081 * just unlock as there is nothing to cleanup.
3082 */
3083 if (ev->status)
3084 goto unlock;
3085
3086 /* Connection may not exist if auto-connected. Check the bredr
3087 * allowlist to see if this device is allowed to auto connect.
3088 * If link is an ACL type, create a connection class
3089 * automatically.
3090 *
3091 * Auto-connect will only occur if the event filter is
3092 * programmed with a given address. Right now, event filter is
3093 * only used during suspend.
3094 */
3095 if (ev->link_type == ACL_LINK &&
3096 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3097 &ev->bdaddr,
3098 BDADDR_BREDR)) {
3099 conn = hci_conn_add_unset(hdev, ev->link_type,
3100 &ev->bdaddr, HCI_ROLE_SLAVE);
3101 if (IS_ERR(conn)) {
3102 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3103 goto unlock;
3104 }
3105 } else {
3106 if (ev->link_type != SCO_LINK)
3107 goto unlock;
3108
3109 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3110 &ev->bdaddr);
3111 if (!conn)
3112 goto unlock;
3113
3114 conn->type = SCO_LINK;
3115 }
3116 }
3117
3118 /* The HCI_Connection_Complete event is only sent once per connection.
3119 * Processing it more than once per connection can corrupt kernel memory.
3120 *
3121 * As the connection handle is set here for the first time, it indicates
3122 * whether the connection is already set up.
3123 */
3124 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3125 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3126 goto unlock;
3127 }
3128
3129 if (!status) {
3130 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3131 if (status)
3132 goto done;
3133
3134 if (conn->type == ACL_LINK) {
3135 conn->state = BT_CONFIG;
3136 hci_conn_hold(conn);
3137
3138 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3139 !hci_find_link_key(hdev, &ev->bdaddr))
3140 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3141 else
3142 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3143 } else
3144 conn->state = BT_CONNECTED;
3145
3146 hci_debugfs_create_conn(conn);
3147 hci_conn_add_sysfs(conn);
3148
3149 if (test_bit(HCI_AUTH, &hdev->flags))
3150 set_bit(HCI_CONN_AUTH, &conn->flags);
3151
3152 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3153 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3154
3155 /* "Link key request" completed ahead of "connect request" completes */
3156 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3157 ev->link_type == ACL_LINK) {
3158 struct link_key *key;
3159 struct hci_cp_read_enc_key_size cp;
3160
3161 key = hci_find_link_key(hdev, &ev->bdaddr);
3162 if (key) {
3163 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3164
3165 if (!read_key_size_capable(hdev)) {
3166 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3167 } else {
3168 cp.handle = cpu_to_le16(conn->handle);
3169 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3170 sizeof(cp), &cp)) {
3171 bt_dev_err(hdev, "sending read key size failed");
3172 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3173 }
3174 }
3175
3176 hci_encrypt_cfm(conn, ev->status);
3177 }
3178 }
3179
3180 /* Get remote features */
3181 if (conn->type == ACL_LINK) {
3182 struct hci_cp_read_remote_features cp;
3183 cp.handle = ev->handle;
3184 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3185 sizeof(cp), &cp);
3186
3187 hci_update_scan(hdev);
3188 }
3189
3190 /* Set packet type for incoming connection */
3191 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3192 struct hci_cp_change_conn_ptype cp;
3193 cp.handle = ev->handle;
3194 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3195 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3196 &cp);
3197 }
3198 }
3199
3200 if (conn->type == ACL_LINK)
3201 hci_sco_setup(conn, ev->status);
3202
3203 done:
3204 if (status) {
3205 hci_conn_failed(conn, status);
3206 } else if (ev->link_type == SCO_LINK) {
3207 switch (conn->setting & SCO_AIRMODE_MASK) {
3208 case SCO_AIRMODE_CVSD:
3209 if (hdev->notify)
3210 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3211 break;
3212 }
3213
3214 hci_connect_cfm(conn, status);
3215 }
3216
3217 unlock:
3218 hci_dev_unlock(hdev);
3219 }
3220
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)3221 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3222 {
3223 struct hci_cp_reject_conn_req cp;
3224
3225 bacpy(&cp.bdaddr, bdaddr);
3226 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3227 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3228 }
3229
hci_conn_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3230 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3231 struct sk_buff *skb)
3232 {
3233 struct hci_ev_conn_request *ev = data;
3234 int mask = hdev->link_mode;
3235 struct inquiry_entry *ie;
3236 struct hci_conn *conn;
3237 __u8 flags = 0;
3238
3239 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3240
3241 /* Reject incoming connection from device with same BD ADDR against
3242 * CVE-2020-26555
3243 */
3244 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3245 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3246 &ev->bdaddr);
3247 hci_reject_conn(hdev, &ev->bdaddr);
3248 return;
3249 }
3250
3251 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3252 &flags);
3253
3254 if (!(mask & HCI_LM_ACCEPT)) {
3255 hci_reject_conn(hdev, &ev->bdaddr);
3256 return;
3257 }
3258
3259 hci_dev_lock(hdev);
3260
3261 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3262 BDADDR_BREDR)) {
3263 hci_reject_conn(hdev, &ev->bdaddr);
3264 goto unlock;
3265 }
3266
3267 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3268 * connection. These features are only touched through mgmt so
3269 * only do the checks if HCI_MGMT is set.
3270 */
3271 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3272 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3273 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3274 BDADDR_BREDR)) {
3275 hci_reject_conn(hdev, &ev->bdaddr);
3276 goto unlock;
3277 }
3278
3279 /* Connection accepted */
3280
3281 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3282 if (ie)
3283 memcpy(ie->data.dev_class, ev->dev_class, 3);
3284
3285 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3286 &ev->bdaddr);
3287 if (!conn) {
3288 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3289 HCI_ROLE_SLAVE);
3290 if (IS_ERR(conn)) {
3291 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3292 goto unlock;
3293 }
3294 }
3295
3296 memcpy(conn->dev_class, ev->dev_class, 3);
3297
3298 hci_dev_unlock(hdev);
3299
3300 if (ev->link_type == ACL_LINK ||
3301 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3302 struct hci_cp_accept_conn_req cp;
3303 conn->state = BT_CONNECT;
3304
3305 bacpy(&cp.bdaddr, &ev->bdaddr);
3306
3307 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3308 cp.role = 0x00; /* Become central */
3309 else
3310 cp.role = 0x01; /* Remain peripheral */
3311
3312 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3313 } else if (!(flags & HCI_PROTO_DEFER)) {
3314 struct hci_cp_accept_sync_conn_req cp;
3315 conn->state = BT_CONNECT;
3316
3317 bacpy(&cp.bdaddr, &ev->bdaddr);
3318 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3319
3320 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3321 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3322 cp.max_latency = cpu_to_le16(0xffff);
3323 cp.content_format = cpu_to_le16(hdev->voice_setting);
3324 cp.retrans_effort = 0xff;
3325
3326 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3327 &cp);
3328 } else {
3329 conn->state = BT_CONNECT2;
3330 hci_connect_cfm(conn, 0);
3331 }
3332
3333 return;
3334 unlock:
3335 hci_dev_unlock(hdev);
3336 }
3337
hci_to_mgmt_reason(u8 err)3338 static u8 hci_to_mgmt_reason(u8 err)
3339 {
3340 switch (err) {
3341 case HCI_ERROR_CONNECTION_TIMEOUT:
3342 return MGMT_DEV_DISCONN_TIMEOUT;
3343 case HCI_ERROR_REMOTE_USER_TERM:
3344 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3345 case HCI_ERROR_REMOTE_POWER_OFF:
3346 return MGMT_DEV_DISCONN_REMOTE;
3347 case HCI_ERROR_LOCAL_HOST_TERM:
3348 return MGMT_DEV_DISCONN_LOCAL_HOST;
3349 default:
3350 return MGMT_DEV_DISCONN_UNKNOWN;
3351 }
3352 }
3353
hci_disconn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3354 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3355 struct sk_buff *skb)
3356 {
3357 struct hci_ev_disconn_complete *ev = data;
3358 u8 reason;
3359 struct hci_conn_params *params;
3360 struct hci_conn *conn;
3361 bool mgmt_connected;
3362
3363 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3364
3365 hci_dev_lock(hdev);
3366
3367 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3368 if (!conn)
3369 goto unlock;
3370
3371 if (ev->status) {
3372 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3373 conn->dst_type, ev->status);
3374 goto unlock;
3375 }
3376
3377 conn->state = BT_CLOSED;
3378
3379 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3380
3381 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3382 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3383 else
3384 reason = hci_to_mgmt_reason(ev->reason);
3385
3386 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3387 reason, mgmt_connected);
3388
3389 if (conn->type == ACL_LINK) {
3390 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3391 hci_remove_link_key(hdev, &conn->dst);
3392
3393 hci_update_scan(hdev);
3394 }
3395
3396 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3397 if (params) {
3398 switch (params->auto_connect) {
3399 case HCI_AUTO_CONN_LINK_LOSS:
3400 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3401 break;
3402 fallthrough;
3403
3404 case HCI_AUTO_CONN_DIRECT:
3405 case HCI_AUTO_CONN_ALWAYS:
3406 hci_pend_le_list_del_init(params);
3407 hci_pend_le_list_add(params, &hdev->pend_le_conns);
3408 hci_update_passive_scan(hdev);
3409 break;
3410
3411 default:
3412 break;
3413 }
3414 }
3415
3416 hci_disconn_cfm(conn, ev->reason);
3417
3418 /* Re-enable advertising if necessary, since it might
3419 * have been disabled by the connection. From the
3420 * HCI_LE_Set_Advertise_Enable command description in
3421 * the core specification (v4.0):
3422 * "The Controller shall continue advertising until the Host
3423 * issues an LE_Set_Advertise_Enable command with
3424 * Advertising_Enable set to 0x00 (Advertising is disabled)
3425 * or until a connection is created or until the Advertising
3426 * is timed out due to Directed Advertising."
3427 */
3428 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3429 hdev->cur_adv_instance = conn->adv_instance;
3430 hci_enable_advertising(hdev);
3431 }
3432
3433 hci_conn_del(conn);
3434
3435 unlock:
3436 hci_dev_unlock(hdev);
3437 }
3438
hci_auth_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3439 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3440 struct sk_buff *skb)
3441 {
3442 struct hci_ev_auth_complete *ev = data;
3443 struct hci_conn *conn;
3444
3445 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3446
3447 hci_dev_lock(hdev);
3448
3449 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3450 if (!conn)
3451 goto unlock;
3452
3453 if (!ev->status) {
3454 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3455 set_bit(HCI_CONN_AUTH, &conn->flags);
3456 conn->sec_level = conn->pending_sec_level;
3457 } else {
3458 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3459 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3460
3461 mgmt_auth_failed(conn, ev->status);
3462 }
3463
3464 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3465
3466 if (conn->state == BT_CONFIG) {
3467 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3468 struct hci_cp_set_conn_encrypt cp;
3469 cp.handle = ev->handle;
3470 cp.encrypt = 0x01;
3471 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3472 &cp);
3473 } else {
3474 conn->state = BT_CONNECTED;
3475 hci_connect_cfm(conn, ev->status);
3476 hci_conn_drop(conn);
3477 }
3478 } else {
3479 hci_auth_cfm(conn, ev->status);
3480
3481 hci_conn_hold(conn);
3482 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3483 hci_conn_drop(conn);
3484 }
3485
3486 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3487 if (!ev->status) {
3488 struct hci_cp_set_conn_encrypt cp;
3489 cp.handle = ev->handle;
3490 cp.encrypt = 0x01;
3491 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3492 &cp);
3493 } else {
3494 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3495 hci_encrypt_cfm(conn, ev->status);
3496 }
3497 }
3498
3499 unlock:
3500 hci_dev_unlock(hdev);
3501 }
3502
hci_remote_name_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3503 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3504 struct sk_buff *skb)
3505 {
3506 struct hci_ev_remote_name *ev = data;
3507 struct hci_conn *conn;
3508
3509 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3510
3511 hci_dev_lock(hdev);
3512
3513 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3514
3515 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3516 goto check_auth;
3517
3518 if (ev->status == 0)
3519 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3520 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3521 else
3522 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3523
3524 check_auth:
3525 if (!conn)
3526 goto unlock;
3527
3528 if (!hci_outgoing_auth_needed(hdev, conn))
3529 goto unlock;
3530
3531 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3532 struct hci_cp_auth_requested cp;
3533
3534 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3535
3536 cp.handle = __cpu_to_le16(conn->handle);
3537 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3538 }
3539
3540 unlock:
3541 hci_dev_unlock(hdev);
3542 }
3543
hci_encrypt_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3544 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3545 struct sk_buff *skb)
3546 {
3547 struct hci_ev_encrypt_change *ev = data;
3548 struct hci_conn *conn;
3549
3550 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3551
3552 hci_dev_lock(hdev);
3553
3554 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3555 if (!conn)
3556 goto unlock;
3557
3558 if (!ev->status) {
3559 if (ev->encrypt) {
3560 /* Encryption implies authentication */
3561 set_bit(HCI_CONN_AUTH, &conn->flags);
3562 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3563 conn->sec_level = conn->pending_sec_level;
3564
3565 /* P-256 authentication key implies FIPS */
3566 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3567 set_bit(HCI_CONN_FIPS, &conn->flags);
3568
3569 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3570 conn->type == LE_LINK)
3571 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3572 } else {
3573 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3574 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3575 }
3576 }
3577
3578 /* We should disregard the current RPA and generate a new one
3579 * whenever the encryption procedure fails.
3580 */
3581 if (ev->status && conn->type == LE_LINK) {
3582 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3583 hci_adv_instances_set_rpa_expired(hdev, true);
3584 }
3585
3586 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3587
3588 /* Check link security requirements are met */
3589 if (!hci_conn_check_link_mode(conn))
3590 ev->status = HCI_ERROR_AUTH_FAILURE;
3591
3592 if (ev->status && conn->state == BT_CONNECTED) {
3593 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3594 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3595
3596 /* Notify upper layers so they can cleanup before
3597 * disconnecting.
3598 */
3599 hci_encrypt_cfm(conn, ev->status);
3600 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3601 hci_conn_drop(conn);
3602 goto unlock;
3603 }
3604
3605 /* Try reading the encryption key size for encrypted ACL links */
3606 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3607 struct hci_cp_read_enc_key_size cp;
3608
3609 /* Only send HCI_Read_Encryption_Key_Size if the
3610 * controller really supports it. If it doesn't, assume
3611 * the default size (16).
3612 */
3613 if (!read_key_size_capable(hdev)) {
3614 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3615 goto notify;
3616 }
3617
3618 cp.handle = cpu_to_le16(conn->handle);
3619 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3620 sizeof(cp), &cp)) {
3621 bt_dev_err(hdev, "sending read key size failed");
3622 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3623 goto notify;
3624 }
3625
3626 goto unlock;
3627 }
3628
3629 /* Set the default Authenticated Payload Timeout after
3630 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3631 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3632 * sent when the link is active and Encryption is enabled, the conn
3633 * type can be either LE or ACL and controller must support LMP Ping.
3634 * Ensure for AES-CCM encryption as well.
3635 */
3636 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3637 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3638 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3639 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3640 struct hci_cp_write_auth_payload_to cp;
3641
3642 cp.handle = cpu_to_le16(conn->handle);
3643 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3644 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3645 sizeof(cp), &cp))
3646 bt_dev_err(hdev, "write auth payload timeout failed");
3647 }
3648
3649 notify:
3650 hci_encrypt_cfm(conn, ev->status);
3651
3652 unlock:
3653 hci_dev_unlock(hdev);
3654 }
3655
hci_change_link_key_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3656 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3657 struct sk_buff *skb)
3658 {
3659 struct hci_ev_change_link_key_complete *ev = data;
3660 struct hci_conn *conn;
3661
3662 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3663
3664 hci_dev_lock(hdev);
3665
3666 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3667 if (conn) {
3668 if (!ev->status)
3669 set_bit(HCI_CONN_SECURE, &conn->flags);
3670
3671 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3672
3673 hci_key_change_cfm(conn, ev->status);
3674 }
3675
3676 hci_dev_unlock(hdev);
3677 }
3678
hci_remote_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3679 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3680 struct sk_buff *skb)
3681 {
3682 struct hci_ev_remote_features *ev = data;
3683 struct hci_conn *conn;
3684
3685 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3686
3687 hci_dev_lock(hdev);
3688
3689 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3690 if (!conn)
3691 goto unlock;
3692
3693 if (!ev->status)
3694 memcpy(conn->features[0], ev->features, 8);
3695
3696 if (conn->state != BT_CONFIG)
3697 goto unlock;
3698
3699 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3700 lmp_ext_feat_capable(conn)) {
3701 struct hci_cp_read_remote_ext_features cp;
3702 cp.handle = ev->handle;
3703 cp.page = 0x01;
3704 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3705 sizeof(cp), &cp);
3706 goto unlock;
3707 }
3708
3709 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3710 struct hci_cp_remote_name_req cp;
3711 memset(&cp, 0, sizeof(cp));
3712 bacpy(&cp.bdaddr, &conn->dst);
3713 cp.pscan_rep_mode = 0x02;
3714 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3715 } else {
3716 mgmt_device_connected(hdev, conn, NULL, 0);
3717 }
3718
3719 if (!hci_outgoing_auth_needed(hdev, conn)) {
3720 conn->state = BT_CONNECTED;
3721 hci_connect_cfm(conn, ev->status);
3722 hci_conn_drop(conn);
3723 }
3724
3725 unlock:
3726 hci_dev_unlock(hdev);
3727 }
3728
handle_cmd_cnt_and_timer(struct hci_dev * hdev,u8 ncmd)3729 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3730 {
3731 cancel_delayed_work(&hdev->cmd_timer);
3732
3733 rcu_read_lock();
3734 if (!test_bit(HCI_RESET, &hdev->flags)) {
3735 if (ncmd) {
3736 cancel_delayed_work(&hdev->ncmd_timer);
3737 atomic_set(&hdev->cmd_cnt, 1);
3738 } else {
3739 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3740 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3741 HCI_NCMD_TIMEOUT);
3742 }
3743 }
3744 rcu_read_unlock();
3745 }
3746
hci_cc_le_read_buffer_size_v2(struct hci_dev * hdev,void * data,struct sk_buff * skb)3747 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3748 struct sk_buff *skb)
3749 {
3750 struct hci_rp_le_read_buffer_size_v2 *rp = data;
3751
3752 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3753
3754 if (rp->status)
3755 return rp->status;
3756
3757 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3758 hdev->le_pkts = rp->acl_max_pkt;
3759 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3760 hdev->iso_pkts = rp->iso_max_pkt;
3761
3762 hdev->le_cnt = hdev->le_pkts;
3763 hdev->iso_cnt = hdev->iso_pkts;
3764
3765 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3766 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3767
3768 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3769 return HCI_ERROR_INVALID_PARAMETERS;
3770
3771 return rp->status;
3772 }
3773
hci_unbound_cis_failed(struct hci_dev * hdev,u8 cig,u8 status)3774 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3775 {
3776 struct hci_conn *conn, *tmp;
3777
3778 lockdep_assert_held(&hdev->lock);
3779
3780 list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3781 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3782 conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3783 continue;
3784
3785 if (HCI_CONN_HANDLE_UNSET(conn->handle))
3786 hci_conn_failed(conn, status);
3787 }
3788 }
3789
hci_cc_le_set_cig_params(struct hci_dev * hdev,void * data,struct sk_buff * skb)3790 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3791 struct sk_buff *skb)
3792 {
3793 struct hci_rp_le_set_cig_params *rp = data;
3794 struct hci_cp_le_set_cig_params *cp;
3795 struct hci_conn *conn;
3796 u8 status = rp->status;
3797 bool pending = false;
3798 int i;
3799
3800 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3801
3802 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3803 if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3804 rp->cig_id != cp->cig_id)) {
3805 bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3806 status = HCI_ERROR_UNSPECIFIED;
3807 }
3808
3809 hci_dev_lock(hdev);
3810
3811 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3812 *
3813 * If the Status return parameter is non-zero, then the state of the CIG
3814 * and its CIS configurations shall not be changed by the command. If
3815 * the CIG did not already exist, it shall not be created.
3816 */
3817 if (status) {
3818 /* Keep current configuration, fail only the unbound CIS */
3819 hci_unbound_cis_failed(hdev, rp->cig_id, status);
3820 goto unlock;
3821 }
3822
3823 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3824 *
3825 * If the Status return parameter is zero, then the Controller shall
3826 * set the Connection_Handle arrayed return parameter to the connection
3827 * handle(s) corresponding to the CIS configurations specified in
3828 * the CIS_IDs command parameter, in the same order.
3829 */
3830 for (i = 0; i < rp->num_handles; ++i) {
3831 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3832 cp->cis[i].cis_id);
3833 if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3834 continue;
3835
3836 if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3837 continue;
3838
3839 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3840 continue;
3841
3842 if (conn->state == BT_CONNECT)
3843 pending = true;
3844 }
3845
3846 unlock:
3847 if (pending)
3848 hci_le_create_cis_pending(hdev);
3849
3850 hci_dev_unlock(hdev);
3851
3852 return rp->status;
3853 }
3854
hci_cc_le_setup_iso_path(struct hci_dev * hdev,void * data,struct sk_buff * skb)3855 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3856 struct sk_buff *skb)
3857 {
3858 struct hci_rp_le_setup_iso_path *rp = data;
3859 struct hci_cp_le_setup_iso_path *cp;
3860 struct hci_conn *conn;
3861
3862 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3863
3864 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3865 if (!cp)
3866 return rp->status;
3867
3868 hci_dev_lock(hdev);
3869
3870 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3871 if (!conn)
3872 goto unlock;
3873
3874 if (rp->status) {
3875 hci_connect_cfm(conn, rp->status);
3876 hci_conn_del(conn);
3877 goto unlock;
3878 }
3879
3880 switch (cp->direction) {
3881 /* Input (Host to Controller) */
3882 case 0x00:
3883 /* Only confirm connection if output only */
3884 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3885 hci_connect_cfm(conn, rp->status);
3886 break;
3887 /* Output (Controller to Host) */
3888 case 0x01:
3889 /* Confirm connection since conn->iso_qos is always configured
3890 * last.
3891 */
3892 hci_connect_cfm(conn, rp->status);
3893
3894 /* Notify device connected in case it is a BIG Sync */
3895 if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3896 mgmt_device_connected(hdev, conn, NULL, 0);
3897
3898 break;
3899 }
3900
3901 unlock:
3902 hci_dev_unlock(hdev);
3903 return rp->status;
3904 }
3905
hci_cs_le_create_big(struct hci_dev * hdev,u8 status)3906 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3907 {
3908 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3909 }
3910
hci_cc_set_per_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)3911 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3912 struct sk_buff *skb)
3913 {
3914 struct hci_ev_status *rp = data;
3915 struct hci_cp_le_set_per_adv_params *cp;
3916
3917 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3918
3919 if (rp->status)
3920 return rp->status;
3921
3922 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3923 if (!cp)
3924 return rp->status;
3925
3926 /* TODO: set the conn state */
3927 return rp->status;
3928 }
3929
hci_cc_le_set_per_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)3930 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3931 struct sk_buff *skb)
3932 {
3933 struct hci_ev_status *rp = data;
3934 struct hci_cp_le_set_per_adv_enable *cp;
3935 struct adv_info *adv = NULL, *n;
3936 u8 per_adv_cnt = 0;
3937
3938 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3939
3940 if (rp->status)
3941 return rp->status;
3942
3943 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3944 if (!cp)
3945 return rp->status;
3946
3947 hci_dev_lock(hdev);
3948
3949 adv = hci_find_adv_instance(hdev, cp->handle);
3950
3951 if (cp->enable) {
3952 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3953
3954 if (adv)
3955 adv->enabled = true;
3956 } else {
3957 /* If just one instance was disabled check if there are
3958 * any other instance enabled before clearing HCI_LE_PER_ADV.
3959 * The current periodic adv instance will be marked as
3960 * disabled once extended advertising is also disabled.
3961 */
3962 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3963 list) {
3964 if (adv->periodic && adv->enabled)
3965 per_adv_cnt++;
3966 }
3967
3968 if (per_adv_cnt > 1)
3969 goto unlock;
3970
3971 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3972 }
3973
3974 unlock:
3975 hci_dev_unlock(hdev);
3976
3977 return rp->status;
3978 }
3979
3980 #define HCI_CC_VL(_op, _func, _min, _max) \
3981 { \
3982 .op = _op, \
3983 .func = _func, \
3984 .min_len = _min, \
3985 .max_len = _max, \
3986 }
3987
3988 #define HCI_CC(_op, _func, _len) \
3989 HCI_CC_VL(_op, _func, _len, _len)
3990
3991 #define HCI_CC_STATUS(_op, _func) \
3992 HCI_CC(_op, _func, sizeof(struct hci_ev_status))
3993
3994 static const struct hci_cc {
3995 u16 op;
3996 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
3997 u16 min_len;
3998 u16 max_len;
3999 } hci_cc_table[] = {
4000 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4001 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4002 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4003 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4004 hci_cc_remote_name_req_cancel),
4005 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4006 sizeof(struct hci_rp_role_discovery)),
4007 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4008 sizeof(struct hci_rp_read_link_policy)),
4009 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4010 sizeof(struct hci_rp_write_link_policy)),
4011 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4012 sizeof(struct hci_rp_read_def_link_policy)),
4013 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4014 hci_cc_write_def_link_policy),
4015 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4016 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4017 sizeof(struct hci_rp_read_stored_link_key)),
4018 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4019 sizeof(struct hci_rp_delete_stored_link_key)),
4020 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4021 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4022 sizeof(struct hci_rp_read_local_name)),
4023 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4024 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4025 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4026 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4027 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4028 sizeof(struct hci_rp_read_class_of_dev)),
4029 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4030 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4031 sizeof(struct hci_rp_read_voice_setting)),
4032 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4033 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4034 sizeof(struct hci_rp_read_num_supported_iac)),
4035 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4036 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4037 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4038 sizeof(struct hci_rp_read_auth_payload_to)),
4039 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4040 sizeof(struct hci_rp_write_auth_payload_to)),
4041 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4042 sizeof(struct hci_rp_read_local_version)),
4043 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4044 sizeof(struct hci_rp_read_local_commands)),
4045 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4046 sizeof(struct hci_rp_read_local_features)),
4047 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4048 sizeof(struct hci_rp_read_local_ext_features)),
4049 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4050 sizeof(struct hci_rp_read_buffer_size)),
4051 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4052 sizeof(struct hci_rp_read_bd_addr)),
4053 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4054 sizeof(struct hci_rp_read_local_pairing_opts)),
4055 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4056 sizeof(struct hci_rp_read_page_scan_activity)),
4057 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4058 hci_cc_write_page_scan_activity),
4059 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4060 sizeof(struct hci_rp_read_page_scan_type)),
4061 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4062 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4063 sizeof(struct hci_rp_read_clock)),
4064 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4065 sizeof(struct hci_rp_read_enc_key_size)),
4066 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4067 sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4068 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4069 hci_cc_read_def_err_data_reporting,
4070 sizeof(struct hci_rp_read_def_err_data_reporting)),
4071 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4072 hci_cc_write_def_err_data_reporting),
4073 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4074 sizeof(struct hci_rp_pin_code_reply)),
4075 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4076 sizeof(struct hci_rp_pin_code_neg_reply)),
4077 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4078 sizeof(struct hci_rp_read_local_oob_data)),
4079 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4080 sizeof(struct hci_rp_read_local_oob_ext_data)),
4081 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4082 sizeof(struct hci_rp_le_read_buffer_size)),
4083 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4084 sizeof(struct hci_rp_le_read_local_features)),
4085 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4086 sizeof(struct hci_rp_le_read_adv_tx_power)),
4087 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4088 sizeof(struct hci_rp_user_confirm_reply)),
4089 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4090 sizeof(struct hci_rp_user_confirm_reply)),
4091 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4092 sizeof(struct hci_rp_user_confirm_reply)),
4093 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4094 sizeof(struct hci_rp_user_confirm_reply)),
4095 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4096 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4097 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4098 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4099 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4100 hci_cc_le_read_accept_list_size,
4101 sizeof(struct hci_rp_le_read_accept_list_size)),
4102 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4103 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4104 hci_cc_le_add_to_accept_list),
4105 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4106 hci_cc_le_del_from_accept_list),
4107 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4108 sizeof(struct hci_rp_le_read_supported_states)),
4109 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4110 sizeof(struct hci_rp_le_read_def_data_len)),
4111 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4112 hci_cc_le_write_def_data_len),
4113 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4114 hci_cc_le_add_to_resolv_list),
4115 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4116 hci_cc_le_del_from_resolv_list),
4117 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4118 hci_cc_le_clear_resolv_list),
4119 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4120 sizeof(struct hci_rp_le_read_resolv_list_size)),
4121 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4122 hci_cc_le_set_addr_resolution_enable),
4123 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4124 sizeof(struct hci_rp_le_read_max_data_len)),
4125 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4126 hci_cc_write_le_host_supported),
4127 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4128 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4129 sizeof(struct hci_rp_read_rssi)),
4130 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4131 sizeof(struct hci_rp_read_tx_power)),
4132 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4133 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4134 hci_cc_le_set_ext_scan_param),
4135 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4136 hci_cc_le_set_ext_scan_enable),
4137 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4138 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4139 hci_cc_le_read_num_adv_sets,
4140 sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4141 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4142 sizeof(struct hci_rp_le_set_ext_adv_params)),
4143 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4144 hci_cc_le_set_ext_adv_enable),
4145 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4146 hci_cc_le_set_adv_set_random_addr),
4147 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4148 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4149 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4150 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4151 hci_cc_le_set_per_adv_enable),
4152 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4153 sizeof(struct hci_rp_le_read_transmit_power)),
4154 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4155 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4156 sizeof(struct hci_rp_le_read_buffer_size_v2)),
4157 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4158 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4159 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4160 sizeof(struct hci_rp_le_setup_iso_path)),
4161 };
4162
hci_cc_func(struct hci_dev * hdev,const struct hci_cc * cc,struct sk_buff * skb)4163 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4164 struct sk_buff *skb)
4165 {
4166 void *data;
4167
4168 if (skb->len < cc->min_len) {
4169 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4170 cc->op, skb->len, cc->min_len);
4171 return HCI_ERROR_UNSPECIFIED;
4172 }
4173
4174 /* Just warn if the length is over max_len size it still be possible to
4175 * partially parse the cc so leave to callback to decide if that is
4176 * acceptable.
4177 */
4178 if (skb->len > cc->max_len)
4179 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4180 cc->op, skb->len, cc->max_len);
4181
4182 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4183 if (!data)
4184 return HCI_ERROR_UNSPECIFIED;
4185
4186 return cc->func(hdev, data, skb);
4187 }
4188
hci_cmd_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4189 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4190 struct sk_buff *skb, u16 *opcode, u8 *status,
4191 hci_req_complete_t *req_complete,
4192 hci_req_complete_skb_t *req_complete_skb)
4193 {
4194 struct hci_ev_cmd_complete *ev = data;
4195 int i;
4196
4197 *opcode = __le16_to_cpu(ev->opcode);
4198
4199 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4200
4201 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4202 if (hci_cc_table[i].op == *opcode) {
4203 *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4204 break;
4205 }
4206 }
4207
4208 if (i == ARRAY_SIZE(hci_cc_table)) {
4209 /* Unknown opcode, assume byte 0 contains the status, so
4210 * that e.g. __hci_cmd_sync() properly returns errors
4211 * for vendor specific commands send by HCI drivers.
4212 * If a vendor doesn't actually follow this convention we may
4213 * need to introduce a vendor CC table in order to properly set
4214 * the status.
4215 */
4216 *status = skb->data[0];
4217 }
4218
4219 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4220
4221 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4222 req_complete_skb);
4223
4224 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4225 bt_dev_err(hdev,
4226 "unexpected event for opcode 0x%4.4x", *opcode);
4227 return;
4228 }
4229
4230 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4231 queue_work(hdev->workqueue, &hdev->cmd_work);
4232 }
4233
hci_cs_le_create_cis(struct hci_dev * hdev,u8 status)4234 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4235 {
4236 struct hci_cp_le_create_cis *cp;
4237 bool pending = false;
4238 int i;
4239
4240 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4241
4242 if (!status)
4243 return;
4244
4245 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4246 if (!cp)
4247 return;
4248
4249 hci_dev_lock(hdev);
4250
4251 /* Remove connection if command failed */
4252 for (i = 0; i < cp->num_cis; i++) {
4253 struct hci_conn *conn;
4254 u16 handle;
4255
4256 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4257
4258 conn = hci_conn_hash_lookup_handle(hdev, handle);
4259 if (conn) {
4260 if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4261 &conn->flags))
4262 pending = true;
4263 conn->state = BT_CLOSED;
4264 hci_connect_cfm(conn, status);
4265 hci_conn_del(conn);
4266 }
4267 }
4268 cp->num_cis = 0;
4269
4270 if (pending)
4271 hci_le_create_cis_pending(hdev);
4272
4273 hci_dev_unlock(hdev);
4274 }
4275
4276 #define HCI_CS(_op, _func) \
4277 { \
4278 .op = _op, \
4279 .func = _func, \
4280 }
4281
4282 static const struct hci_cs {
4283 u16 op;
4284 void (*func)(struct hci_dev *hdev, __u8 status);
4285 } hci_cs_table[] = {
4286 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4287 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4288 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4289 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4290 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4291 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4292 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4293 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4294 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4295 hci_cs_read_remote_ext_features),
4296 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4297 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4298 hci_cs_enhanced_setup_sync_conn),
4299 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4300 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4301 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4302 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4303 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4304 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4305 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4306 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4307 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4308 };
4309
hci_cmd_status_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4310 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4311 struct sk_buff *skb, u16 *opcode, u8 *status,
4312 hci_req_complete_t *req_complete,
4313 hci_req_complete_skb_t *req_complete_skb)
4314 {
4315 struct hci_ev_cmd_status *ev = data;
4316 int i;
4317
4318 *opcode = __le16_to_cpu(ev->opcode);
4319 *status = ev->status;
4320
4321 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4322
4323 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4324 if (hci_cs_table[i].op == *opcode) {
4325 hci_cs_table[i].func(hdev, ev->status);
4326 break;
4327 }
4328 }
4329
4330 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4331
4332 /* Indicate request completion if the command failed. Also, if
4333 * we're not waiting for a special event and we get a success
4334 * command status we should try to flag the request as completed
4335 * (since for this kind of commands there will not be a command
4336 * complete event).
4337 */
4338 if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4339 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4340 req_complete_skb);
4341 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4342 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4343 *opcode);
4344 return;
4345 }
4346 }
4347
4348 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4349 queue_work(hdev->workqueue, &hdev->cmd_work);
4350 }
4351
hci_hardware_error_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4352 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4353 struct sk_buff *skb)
4354 {
4355 struct hci_ev_hardware_error *ev = data;
4356
4357 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4358
4359 hdev->hw_error_code = ev->code;
4360
4361 queue_work(hdev->req_workqueue, &hdev->error_reset);
4362 }
4363
hci_role_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4364 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4365 struct sk_buff *skb)
4366 {
4367 struct hci_ev_role_change *ev = data;
4368 struct hci_conn *conn;
4369
4370 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4371
4372 hci_dev_lock(hdev);
4373
4374 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4375 if (conn) {
4376 if (!ev->status)
4377 conn->role = ev->role;
4378
4379 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4380
4381 hci_role_switch_cfm(conn, ev->status, ev->role);
4382 }
4383
4384 hci_dev_unlock(hdev);
4385 }
4386
hci_num_comp_pkts_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4387 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4388 struct sk_buff *skb)
4389 {
4390 struct hci_ev_num_comp_pkts *ev = data;
4391 int i;
4392
4393 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4394 flex_array_size(ev, handles, ev->num)))
4395 return;
4396
4397 bt_dev_dbg(hdev, "num %d", ev->num);
4398
4399 for (i = 0; i < ev->num; i++) {
4400 struct hci_comp_pkts_info *info = &ev->handles[i];
4401 struct hci_conn *conn;
4402 __u16 handle, count;
4403
4404 handle = __le16_to_cpu(info->handle);
4405 count = __le16_to_cpu(info->count);
4406
4407 conn = hci_conn_hash_lookup_handle(hdev, handle);
4408 if (!conn)
4409 continue;
4410
4411 conn->sent -= count;
4412
4413 switch (conn->type) {
4414 case ACL_LINK:
4415 hdev->acl_cnt += count;
4416 if (hdev->acl_cnt > hdev->acl_pkts)
4417 hdev->acl_cnt = hdev->acl_pkts;
4418 break;
4419
4420 case LE_LINK:
4421 if (hdev->le_pkts) {
4422 hdev->le_cnt += count;
4423 if (hdev->le_cnt > hdev->le_pkts)
4424 hdev->le_cnt = hdev->le_pkts;
4425 } else {
4426 hdev->acl_cnt += count;
4427 if (hdev->acl_cnt > hdev->acl_pkts)
4428 hdev->acl_cnt = hdev->acl_pkts;
4429 }
4430 break;
4431
4432 case SCO_LINK:
4433 hdev->sco_cnt += count;
4434 if (hdev->sco_cnt > hdev->sco_pkts)
4435 hdev->sco_cnt = hdev->sco_pkts;
4436 break;
4437
4438 case ISO_LINK:
4439 if (hdev->iso_pkts) {
4440 hdev->iso_cnt += count;
4441 if (hdev->iso_cnt > hdev->iso_pkts)
4442 hdev->iso_cnt = hdev->iso_pkts;
4443 } else if (hdev->le_pkts) {
4444 hdev->le_cnt += count;
4445 if (hdev->le_cnt > hdev->le_pkts)
4446 hdev->le_cnt = hdev->le_pkts;
4447 } else {
4448 hdev->acl_cnt += count;
4449 if (hdev->acl_cnt > hdev->acl_pkts)
4450 hdev->acl_cnt = hdev->acl_pkts;
4451 }
4452 break;
4453
4454 default:
4455 bt_dev_err(hdev, "unknown type %d conn %p",
4456 conn->type, conn);
4457 break;
4458 }
4459 }
4460
4461 queue_work(hdev->workqueue, &hdev->tx_work);
4462 }
4463
hci_mode_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4464 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4465 struct sk_buff *skb)
4466 {
4467 struct hci_ev_mode_change *ev = data;
4468 struct hci_conn *conn;
4469
4470 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4471
4472 hci_dev_lock(hdev);
4473
4474 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4475 if (conn) {
4476 conn->mode = ev->mode;
4477
4478 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4479 &conn->flags)) {
4480 if (conn->mode == HCI_CM_ACTIVE)
4481 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4482 else
4483 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4484 }
4485
4486 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4487 hci_sco_setup(conn, ev->status);
4488 }
4489
4490 hci_dev_unlock(hdev);
4491 }
4492
hci_pin_code_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4493 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4494 struct sk_buff *skb)
4495 {
4496 struct hci_ev_pin_code_req *ev = data;
4497 struct hci_conn *conn;
4498
4499 bt_dev_dbg(hdev, "");
4500
4501 hci_dev_lock(hdev);
4502
4503 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4504 if (!conn)
4505 goto unlock;
4506
4507 if (conn->state == BT_CONNECTED) {
4508 hci_conn_hold(conn);
4509 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4510 hci_conn_drop(conn);
4511 }
4512
4513 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4514 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4515 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4516 sizeof(ev->bdaddr), &ev->bdaddr);
4517 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4518 u8 secure;
4519
4520 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4521 secure = 1;
4522 else
4523 secure = 0;
4524
4525 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4526 }
4527
4528 unlock:
4529 hci_dev_unlock(hdev);
4530 }
4531
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)4532 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4533 {
4534 if (key_type == HCI_LK_CHANGED_COMBINATION)
4535 return;
4536
4537 conn->pin_length = pin_len;
4538 conn->key_type = key_type;
4539
4540 switch (key_type) {
4541 case HCI_LK_LOCAL_UNIT:
4542 case HCI_LK_REMOTE_UNIT:
4543 case HCI_LK_DEBUG_COMBINATION:
4544 return;
4545 case HCI_LK_COMBINATION:
4546 if (pin_len == 16)
4547 conn->pending_sec_level = BT_SECURITY_HIGH;
4548 else
4549 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4550 break;
4551 case HCI_LK_UNAUTH_COMBINATION_P192:
4552 case HCI_LK_UNAUTH_COMBINATION_P256:
4553 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4554 break;
4555 case HCI_LK_AUTH_COMBINATION_P192:
4556 conn->pending_sec_level = BT_SECURITY_HIGH;
4557 break;
4558 case HCI_LK_AUTH_COMBINATION_P256:
4559 conn->pending_sec_level = BT_SECURITY_FIPS;
4560 break;
4561 }
4562 }
4563
hci_link_key_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4564 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4565 struct sk_buff *skb)
4566 {
4567 struct hci_ev_link_key_req *ev = data;
4568 struct hci_cp_link_key_reply cp;
4569 struct hci_conn *conn;
4570 struct link_key *key;
4571
4572 bt_dev_dbg(hdev, "");
4573
4574 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4575 return;
4576
4577 hci_dev_lock(hdev);
4578
4579 key = hci_find_link_key(hdev, &ev->bdaddr);
4580 if (!key) {
4581 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4582 goto not_found;
4583 }
4584
4585 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4586
4587 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4588 if (conn) {
4589 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4590
4591 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4592 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4593 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4594 bt_dev_dbg(hdev, "ignoring unauthenticated key");
4595 goto not_found;
4596 }
4597
4598 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4599 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4600 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4601 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4602 goto not_found;
4603 }
4604
4605 conn_set_key(conn, key->type, key->pin_len);
4606 }
4607
4608 bacpy(&cp.bdaddr, &ev->bdaddr);
4609 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4610
4611 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4612
4613 hci_dev_unlock(hdev);
4614
4615 return;
4616
4617 not_found:
4618 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4619 hci_dev_unlock(hdev);
4620 }
4621
hci_link_key_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4622 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4623 struct sk_buff *skb)
4624 {
4625 struct hci_ev_link_key_notify *ev = data;
4626 struct hci_conn *conn;
4627 struct link_key *key;
4628 bool persistent;
4629 u8 pin_len = 0;
4630
4631 bt_dev_dbg(hdev, "");
4632
4633 hci_dev_lock(hdev);
4634
4635 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4636 if (!conn)
4637 goto unlock;
4638
4639 /* Ignore NULL link key against CVE-2020-26555 */
4640 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4641 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4642 &ev->bdaddr);
4643 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4644 hci_conn_drop(conn);
4645 goto unlock;
4646 }
4647
4648 hci_conn_hold(conn);
4649 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4650 hci_conn_drop(conn);
4651
4652 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4653 conn_set_key(conn, ev->key_type, conn->pin_length);
4654
4655 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4656 goto unlock;
4657
4658 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4659 ev->key_type, pin_len, &persistent);
4660 if (!key)
4661 goto unlock;
4662
4663 /* Update connection information since adding the key will have
4664 * fixed up the type in the case of changed combination keys.
4665 */
4666 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4667 conn_set_key(conn, key->type, key->pin_len);
4668
4669 mgmt_new_link_key(hdev, key, persistent);
4670
4671 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4672 * is set. If it's not set simply remove the key from the kernel
4673 * list (we've still notified user space about it but with
4674 * store_hint being 0).
4675 */
4676 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4677 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4678 list_del_rcu(&key->list);
4679 kfree_rcu(key, rcu);
4680 goto unlock;
4681 }
4682
4683 if (persistent)
4684 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4685 else
4686 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4687
4688 unlock:
4689 hci_dev_unlock(hdev);
4690 }
4691
hci_clock_offset_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4692 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4693 struct sk_buff *skb)
4694 {
4695 struct hci_ev_clock_offset *ev = data;
4696 struct hci_conn *conn;
4697
4698 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4699
4700 hci_dev_lock(hdev);
4701
4702 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4703 if (conn && !ev->status) {
4704 struct inquiry_entry *ie;
4705
4706 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4707 if (ie) {
4708 ie->data.clock_offset = ev->clock_offset;
4709 ie->timestamp = jiffies;
4710 }
4711 }
4712
4713 hci_dev_unlock(hdev);
4714 }
4715
hci_pkt_type_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4716 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4717 struct sk_buff *skb)
4718 {
4719 struct hci_ev_pkt_type_change *ev = data;
4720 struct hci_conn *conn;
4721
4722 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4723
4724 hci_dev_lock(hdev);
4725
4726 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4727 if (conn && !ev->status)
4728 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4729
4730 hci_dev_unlock(hdev);
4731 }
4732
hci_pscan_rep_mode_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4733 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4734 struct sk_buff *skb)
4735 {
4736 struct hci_ev_pscan_rep_mode *ev = data;
4737 struct inquiry_entry *ie;
4738
4739 bt_dev_dbg(hdev, "");
4740
4741 hci_dev_lock(hdev);
4742
4743 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4744 if (ie) {
4745 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4746 ie->timestamp = jiffies;
4747 }
4748
4749 hci_dev_unlock(hdev);
4750 }
4751
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)4752 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4753 struct sk_buff *skb)
4754 {
4755 struct hci_ev_inquiry_result_rssi *ev = edata;
4756 struct inquiry_data data;
4757 int i;
4758
4759 bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4760
4761 if (!ev->num)
4762 return;
4763
4764 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4765 return;
4766
4767 hci_dev_lock(hdev);
4768
4769 if (skb->len == array_size(ev->num,
4770 sizeof(struct inquiry_info_rssi_pscan))) {
4771 struct inquiry_info_rssi_pscan *info;
4772
4773 for (i = 0; i < ev->num; i++) {
4774 u32 flags;
4775
4776 info = hci_ev_skb_pull(hdev, skb,
4777 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4778 sizeof(*info));
4779 if (!info) {
4780 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4781 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4782 goto unlock;
4783 }
4784
4785 bacpy(&data.bdaddr, &info->bdaddr);
4786 data.pscan_rep_mode = info->pscan_rep_mode;
4787 data.pscan_period_mode = info->pscan_period_mode;
4788 data.pscan_mode = info->pscan_mode;
4789 memcpy(data.dev_class, info->dev_class, 3);
4790 data.clock_offset = info->clock_offset;
4791 data.rssi = info->rssi;
4792 data.ssp_mode = 0x00;
4793
4794 flags = hci_inquiry_cache_update(hdev, &data, false);
4795
4796 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4797 info->dev_class, info->rssi,
4798 flags, NULL, 0, NULL, 0, 0);
4799 }
4800 } else if (skb->len == array_size(ev->num,
4801 sizeof(struct inquiry_info_rssi))) {
4802 struct inquiry_info_rssi *info;
4803
4804 for (i = 0; i < ev->num; i++) {
4805 u32 flags;
4806
4807 info = hci_ev_skb_pull(hdev, skb,
4808 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4809 sizeof(*info));
4810 if (!info) {
4811 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4812 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4813 goto unlock;
4814 }
4815
4816 bacpy(&data.bdaddr, &info->bdaddr);
4817 data.pscan_rep_mode = info->pscan_rep_mode;
4818 data.pscan_period_mode = info->pscan_period_mode;
4819 data.pscan_mode = 0x00;
4820 memcpy(data.dev_class, info->dev_class, 3);
4821 data.clock_offset = info->clock_offset;
4822 data.rssi = info->rssi;
4823 data.ssp_mode = 0x00;
4824
4825 flags = hci_inquiry_cache_update(hdev, &data, false);
4826
4827 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4828 info->dev_class, info->rssi,
4829 flags, NULL, 0, NULL, 0, 0);
4830 }
4831 } else {
4832 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4833 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4834 }
4835 unlock:
4836 hci_dev_unlock(hdev);
4837 }
4838
hci_remote_ext_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4839 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4840 struct sk_buff *skb)
4841 {
4842 struct hci_ev_remote_ext_features *ev = data;
4843 struct hci_conn *conn;
4844
4845 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4846
4847 hci_dev_lock(hdev);
4848
4849 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4850 if (!conn)
4851 goto unlock;
4852
4853 if (ev->page < HCI_MAX_PAGES)
4854 memcpy(conn->features[ev->page], ev->features, 8);
4855
4856 if (!ev->status && ev->page == 0x01) {
4857 struct inquiry_entry *ie;
4858
4859 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4860 if (ie)
4861 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4862
4863 if (ev->features[0] & LMP_HOST_SSP) {
4864 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4865 } else {
4866 /* It is mandatory by the Bluetooth specification that
4867 * Extended Inquiry Results are only used when Secure
4868 * Simple Pairing is enabled, but some devices violate
4869 * this.
4870 *
4871 * To make these devices work, the internal SSP
4872 * enabled flag needs to be cleared if the remote host
4873 * features do not indicate SSP support */
4874 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4875 }
4876
4877 if (ev->features[0] & LMP_HOST_SC)
4878 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4879 }
4880
4881 if (conn->state != BT_CONFIG)
4882 goto unlock;
4883
4884 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4885 struct hci_cp_remote_name_req cp;
4886 memset(&cp, 0, sizeof(cp));
4887 bacpy(&cp.bdaddr, &conn->dst);
4888 cp.pscan_rep_mode = 0x02;
4889 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4890 } else {
4891 mgmt_device_connected(hdev, conn, NULL, 0);
4892 }
4893
4894 if (!hci_outgoing_auth_needed(hdev, conn)) {
4895 conn->state = BT_CONNECTED;
4896 hci_connect_cfm(conn, ev->status);
4897 hci_conn_drop(conn);
4898 }
4899
4900 unlock:
4901 hci_dev_unlock(hdev);
4902 }
4903
hci_sync_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4904 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4905 struct sk_buff *skb)
4906 {
4907 struct hci_ev_sync_conn_complete *ev = data;
4908 struct hci_conn *conn;
4909 u8 status = ev->status;
4910
4911 switch (ev->link_type) {
4912 case SCO_LINK:
4913 case ESCO_LINK:
4914 break;
4915 default:
4916 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4917 * for HCI_Synchronous_Connection_Complete is limited to
4918 * either SCO or eSCO
4919 */
4920 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4921 return;
4922 }
4923
4924 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4925
4926 hci_dev_lock(hdev);
4927
4928 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4929 if (!conn) {
4930 if (ev->link_type == ESCO_LINK)
4931 goto unlock;
4932
4933 /* When the link type in the event indicates SCO connection
4934 * and lookup of the connection object fails, then check
4935 * if an eSCO connection object exists.
4936 *
4937 * The core limits the synchronous connections to either
4938 * SCO or eSCO. The eSCO connection is preferred and tried
4939 * to be setup first and until successfully established,
4940 * the link type will be hinted as eSCO.
4941 */
4942 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4943 if (!conn)
4944 goto unlock;
4945 }
4946
4947 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4948 * Processing it more than once per connection can corrupt kernel memory.
4949 *
4950 * As the connection handle is set here for the first time, it indicates
4951 * whether the connection is already set up.
4952 */
4953 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
4954 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4955 goto unlock;
4956 }
4957
4958 switch (status) {
4959 case 0x00:
4960 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
4961 if (status) {
4962 conn->state = BT_CLOSED;
4963 break;
4964 }
4965
4966 conn->state = BT_CONNECTED;
4967 conn->type = ev->link_type;
4968
4969 hci_debugfs_create_conn(conn);
4970 hci_conn_add_sysfs(conn);
4971 break;
4972
4973 case 0x10: /* Connection Accept Timeout */
4974 case 0x0d: /* Connection Rejected due to Limited Resources */
4975 case 0x11: /* Unsupported Feature or Parameter Value */
4976 case 0x1c: /* SCO interval rejected */
4977 case 0x1a: /* Unsupported Remote Feature */
4978 case 0x1e: /* Invalid LMP Parameters */
4979 case 0x1f: /* Unspecified error */
4980 case 0x20: /* Unsupported LMP Parameter value */
4981 if (conn->out) {
4982 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4983 (hdev->esco_type & EDR_ESCO_MASK);
4984 if (hci_setup_sync(conn, conn->parent->handle))
4985 goto unlock;
4986 }
4987 fallthrough;
4988
4989 default:
4990 conn->state = BT_CLOSED;
4991 break;
4992 }
4993
4994 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4995 /* Notify only in case of SCO over HCI transport data path which
4996 * is zero and non-zero value shall be non-HCI transport data path
4997 */
4998 if (conn->codec.data_path == 0 && hdev->notify) {
4999 switch (ev->air_mode) {
5000 case 0x02:
5001 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5002 break;
5003 case 0x03:
5004 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5005 break;
5006 }
5007 }
5008
5009 hci_connect_cfm(conn, status);
5010 if (status)
5011 hci_conn_del(conn);
5012
5013 unlock:
5014 hci_dev_unlock(hdev);
5015 }
5016
eir_get_length(u8 * eir,size_t eir_len)5017 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5018 {
5019 size_t parsed = 0;
5020
5021 while (parsed < eir_len) {
5022 u8 field_len = eir[0];
5023
5024 if (field_len == 0)
5025 return parsed;
5026
5027 parsed += field_len + 1;
5028 eir += field_len + 1;
5029 }
5030
5031 return eir_len;
5032 }
5033
hci_extended_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5034 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5035 struct sk_buff *skb)
5036 {
5037 struct hci_ev_ext_inquiry_result *ev = edata;
5038 struct inquiry_data data;
5039 size_t eir_len;
5040 int i;
5041
5042 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5043 flex_array_size(ev, info, ev->num)))
5044 return;
5045
5046 bt_dev_dbg(hdev, "num %d", ev->num);
5047
5048 if (!ev->num)
5049 return;
5050
5051 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5052 return;
5053
5054 hci_dev_lock(hdev);
5055
5056 for (i = 0; i < ev->num; i++) {
5057 struct extended_inquiry_info *info = &ev->info[i];
5058 u32 flags;
5059 bool name_known;
5060
5061 bacpy(&data.bdaddr, &info->bdaddr);
5062 data.pscan_rep_mode = info->pscan_rep_mode;
5063 data.pscan_period_mode = info->pscan_period_mode;
5064 data.pscan_mode = 0x00;
5065 memcpy(data.dev_class, info->dev_class, 3);
5066 data.clock_offset = info->clock_offset;
5067 data.rssi = info->rssi;
5068 data.ssp_mode = 0x01;
5069
5070 if (hci_dev_test_flag(hdev, HCI_MGMT))
5071 name_known = eir_get_data(info->data,
5072 sizeof(info->data),
5073 EIR_NAME_COMPLETE, NULL);
5074 else
5075 name_known = true;
5076
5077 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5078
5079 eir_len = eir_get_length(info->data, sizeof(info->data));
5080
5081 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5082 info->dev_class, info->rssi,
5083 flags, info->data, eir_len, NULL, 0, 0);
5084 }
5085
5086 hci_dev_unlock(hdev);
5087 }
5088
hci_key_refresh_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5089 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5090 struct sk_buff *skb)
5091 {
5092 struct hci_ev_key_refresh_complete *ev = data;
5093 struct hci_conn *conn;
5094
5095 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5096 __le16_to_cpu(ev->handle));
5097
5098 hci_dev_lock(hdev);
5099
5100 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5101 if (!conn)
5102 goto unlock;
5103
5104 /* For BR/EDR the necessary steps are taken through the
5105 * auth_complete event.
5106 */
5107 if (conn->type != LE_LINK)
5108 goto unlock;
5109
5110 if (!ev->status)
5111 conn->sec_level = conn->pending_sec_level;
5112
5113 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5114
5115 if (ev->status && conn->state == BT_CONNECTED) {
5116 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5117 hci_conn_drop(conn);
5118 goto unlock;
5119 }
5120
5121 if (conn->state == BT_CONFIG) {
5122 if (!ev->status)
5123 conn->state = BT_CONNECTED;
5124
5125 hci_connect_cfm(conn, ev->status);
5126 hci_conn_drop(conn);
5127 } else {
5128 hci_auth_cfm(conn, ev->status);
5129
5130 hci_conn_hold(conn);
5131 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5132 hci_conn_drop(conn);
5133 }
5134
5135 unlock:
5136 hci_dev_unlock(hdev);
5137 }
5138
hci_get_auth_req(struct hci_conn * conn)5139 static u8 hci_get_auth_req(struct hci_conn *conn)
5140 {
5141 /* If remote requests no-bonding follow that lead */
5142 if (conn->remote_auth == HCI_AT_NO_BONDING ||
5143 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5144 return conn->remote_auth | (conn->auth_type & 0x01);
5145
5146 /* If both remote and local have enough IO capabilities, require
5147 * MITM protection
5148 */
5149 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5150 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5151 return conn->remote_auth | 0x01;
5152
5153 /* No MITM protection possible so ignore remote requirement */
5154 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5155 }
5156
bredr_oob_data_present(struct hci_conn * conn)5157 static u8 bredr_oob_data_present(struct hci_conn *conn)
5158 {
5159 struct hci_dev *hdev = conn->hdev;
5160 struct oob_data *data;
5161
5162 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5163 if (!data)
5164 return 0x00;
5165
5166 if (bredr_sc_enabled(hdev)) {
5167 /* When Secure Connections is enabled, then just
5168 * return the present value stored with the OOB
5169 * data. The stored value contains the right present
5170 * information. However it can only be trusted when
5171 * not in Secure Connection Only mode.
5172 */
5173 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5174 return data->present;
5175
5176 /* When Secure Connections Only mode is enabled, then
5177 * the P-256 values are required. If they are not
5178 * available, then do not declare that OOB data is
5179 * present.
5180 */
5181 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5182 !crypto_memneq(data->hash256, ZERO_KEY, 16))
5183 return 0x00;
5184
5185 return 0x02;
5186 }
5187
5188 /* When Secure Connections is not enabled or actually
5189 * not supported by the hardware, then check that if
5190 * P-192 data values are present.
5191 */
5192 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5193 !crypto_memneq(data->hash192, ZERO_KEY, 16))
5194 return 0x00;
5195
5196 return 0x01;
5197 }
5198
hci_io_capa_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5199 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5200 struct sk_buff *skb)
5201 {
5202 struct hci_ev_io_capa_request *ev = data;
5203 struct hci_conn *conn;
5204
5205 bt_dev_dbg(hdev, "");
5206
5207 hci_dev_lock(hdev);
5208
5209 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5210 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5211 goto unlock;
5212
5213 /* Assume remote supports SSP since it has triggered this event */
5214 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5215
5216 hci_conn_hold(conn);
5217
5218 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5219 goto unlock;
5220
5221 /* Allow pairing if we're pairable, the initiators of the
5222 * pairing or if the remote is not requesting bonding.
5223 */
5224 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5225 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5226 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5227 struct hci_cp_io_capability_reply cp;
5228
5229 bacpy(&cp.bdaddr, &ev->bdaddr);
5230 /* Change the IO capability from KeyboardDisplay
5231 * to DisplayYesNo as it is not supported by BT spec. */
5232 cp.capability = (conn->io_capability == 0x04) ?
5233 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5234
5235 /* If we are initiators, there is no remote information yet */
5236 if (conn->remote_auth == 0xff) {
5237 /* Request MITM protection if our IO caps allow it
5238 * except for the no-bonding case.
5239 */
5240 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5241 conn->auth_type != HCI_AT_NO_BONDING)
5242 conn->auth_type |= 0x01;
5243 } else {
5244 conn->auth_type = hci_get_auth_req(conn);
5245 }
5246
5247 /* If we're not bondable, force one of the non-bondable
5248 * authentication requirement values.
5249 */
5250 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5251 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5252
5253 cp.authentication = conn->auth_type;
5254 cp.oob_data = bredr_oob_data_present(conn);
5255
5256 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5257 sizeof(cp), &cp);
5258 } else {
5259 struct hci_cp_io_capability_neg_reply cp;
5260
5261 bacpy(&cp.bdaddr, &ev->bdaddr);
5262 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5263
5264 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5265 sizeof(cp), &cp);
5266 }
5267
5268 unlock:
5269 hci_dev_unlock(hdev);
5270 }
5271
hci_io_capa_reply_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5272 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5273 struct sk_buff *skb)
5274 {
5275 struct hci_ev_io_capa_reply *ev = data;
5276 struct hci_conn *conn;
5277
5278 bt_dev_dbg(hdev, "");
5279
5280 hci_dev_lock(hdev);
5281
5282 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5283 if (!conn)
5284 goto unlock;
5285
5286 conn->remote_cap = ev->capability;
5287 conn->remote_auth = ev->authentication;
5288
5289 unlock:
5290 hci_dev_unlock(hdev);
5291 }
5292
hci_user_confirm_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5293 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5294 struct sk_buff *skb)
5295 {
5296 struct hci_ev_user_confirm_req *ev = data;
5297 int loc_mitm, rem_mitm, confirm_hint = 0;
5298 struct hci_conn *conn;
5299
5300 bt_dev_dbg(hdev, "");
5301
5302 hci_dev_lock(hdev);
5303
5304 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5305 goto unlock;
5306
5307 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5308 if (!conn)
5309 goto unlock;
5310
5311 loc_mitm = (conn->auth_type & 0x01);
5312 rem_mitm = (conn->remote_auth & 0x01);
5313
5314 /* If we require MITM but the remote device can't provide that
5315 * (it has NoInputNoOutput) then reject the confirmation
5316 * request. We check the security level here since it doesn't
5317 * necessarily match conn->auth_type.
5318 */
5319 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5320 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5321 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5322 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5323 sizeof(ev->bdaddr), &ev->bdaddr);
5324 goto unlock;
5325 }
5326
5327 /* If no side requires MITM protection; auto-accept */
5328 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5329 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5330
5331 /* If we're not the initiators request authorization to
5332 * proceed from user space (mgmt_user_confirm with
5333 * confirm_hint set to 1). The exception is if neither
5334 * side had MITM or if the local IO capability is
5335 * NoInputNoOutput, in which case we do auto-accept
5336 */
5337 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5338 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5339 (loc_mitm || rem_mitm)) {
5340 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5341 confirm_hint = 1;
5342 goto confirm;
5343 }
5344
5345 /* If there already exists link key in local host, leave the
5346 * decision to user space since the remote device could be
5347 * legitimate or malicious.
5348 */
5349 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5350 bt_dev_dbg(hdev, "Local host already has link key");
5351 confirm_hint = 1;
5352 goto confirm;
5353 }
5354
5355 BT_DBG("Auto-accept of user confirmation with %ums delay",
5356 hdev->auto_accept_delay);
5357
5358 if (hdev->auto_accept_delay > 0) {
5359 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5360 queue_delayed_work(conn->hdev->workqueue,
5361 &conn->auto_accept_work, delay);
5362 goto unlock;
5363 }
5364
5365 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5366 sizeof(ev->bdaddr), &ev->bdaddr);
5367 goto unlock;
5368 }
5369
5370 confirm:
5371 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5372 le32_to_cpu(ev->passkey), confirm_hint);
5373
5374 unlock:
5375 hci_dev_unlock(hdev);
5376 }
5377
hci_user_passkey_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5378 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5379 struct sk_buff *skb)
5380 {
5381 struct hci_ev_user_passkey_req *ev = data;
5382
5383 bt_dev_dbg(hdev, "");
5384
5385 if (hci_dev_test_flag(hdev, HCI_MGMT))
5386 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5387 }
5388
hci_user_passkey_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5389 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5390 struct sk_buff *skb)
5391 {
5392 struct hci_ev_user_passkey_notify *ev = data;
5393 struct hci_conn *conn;
5394
5395 bt_dev_dbg(hdev, "");
5396
5397 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5398 if (!conn)
5399 return;
5400
5401 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5402 conn->passkey_entered = 0;
5403
5404 if (hci_dev_test_flag(hdev, HCI_MGMT))
5405 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5406 conn->dst_type, conn->passkey_notify,
5407 conn->passkey_entered);
5408 }
5409
hci_keypress_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5410 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5411 struct sk_buff *skb)
5412 {
5413 struct hci_ev_keypress_notify *ev = data;
5414 struct hci_conn *conn;
5415
5416 bt_dev_dbg(hdev, "");
5417
5418 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5419 if (!conn)
5420 return;
5421
5422 switch (ev->type) {
5423 case HCI_KEYPRESS_STARTED:
5424 conn->passkey_entered = 0;
5425 return;
5426
5427 case HCI_KEYPRESS_ENTERED:
5428 conn->passkey_entered++;
5429 break;
5430
5431 case HCI_KEYPRESS_ERASED:
5432 conn->passkey_entered--;
5433 break;
5434
5435 case HCI_KEYPRESS_CLEARED:
5436 conn->passkey_entered = 0;
5437 break;
5438
5439 case HCI_KEYPRESS_COMPLETED:
5440 return;
5441 }
5442
5443 if (hci_dev_test_flag(hdev, HCI_MGMT))
5444 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5445 conn->dst_type, conn->passkey_notify,
5446 conn->passkey_entered);
5447 }
5448
hci_simple_pair_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5449 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5450 struct sk_buff *skb)
5451 {
5452 struct hci_ev_simple_pair_complete *ev = data;
5453 struct hci_conn *conn;
5454
5455 bt_dev_dbg(hdev, "");
5456
5457 hci_dev_lock(hdev);
5458
5459 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5460 if (!conn || !hci_conn_ssp_enabled(conn))
5461 goto unlock;
5462
5463 /* Reset the authentication requirement to unknown */
5464 conn->remote_auth = 0xff;
5465
5466 /* To avoid duplicate auth_failed events to user space we check
5467 * the HCI_CONN_AUTH_PEND flag which will be set if we
5468 * initiated the authentication. A traditional auth_complete
5469 * event gets always produced as initiator and is also mapped to
5470 * the mgmt_auth_failed event */
5471 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5472 mgmt_auth_failed(conn, ev->status);
5473
5474 hci_conn_drop(conn);
5475
5476 unlock:
5477 hci_dev_unlock(hdev);
5478 }
5479
hci_remote_host_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5480 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5481 struct sk_buff *skb)
5482 {
5483 struct hci_ev_remote_host_features *ev = data;
5484 struct inquiry_entry *ie;
5485 struct hci_conn *conn;
5486
5487 bt_dev_dbg(hdev, "");
5488
5489 hci_dev_lock(hdev);
5490
5491 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5492 if (conn)
5493 memcpy(conn->features[1], ev->features, 8);
5494
5495 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5496 if (ie)
5497 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5498
5499 hci_dev_unlock(hdev);
5500 }
5501
hci_remote_oob_data_request_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5502 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5503 struct sk_buff *skb)
5504 {
5505 struct hci_ev_remote_oob_data_request *ev = edata;
5506 struct oob_data *data;
5507
5508 bt_dev_dbg(hdev, "");
5509
5510 hci_dev_lock(hdev);
5511
5512 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5513 goto unlock;
5514
5515 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5516 if (!data) {
5517 struct hci_cp_remote_oob_data_neg_reply cp;
5518
5519 bacpy(&cp.bdaddr, &ev->bdaddr);
5520 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5521 sizeof(cp), &cp);
5522 goto unlock;
5523 }
5524
5525 if (bredr_sc_enabled(hdev)) {
5526 struct hci_cp_remote_oob_ext_data_reply cp;
5527
5528 bacpy(&cp.bdaddr, &ev->bdaddr);
5529 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5530 memset(cp.hash192, 0, sizeof(cp.hash192));
5531 memset(cp.rand192, 0, sizeof(cp.rand192));
5532 } else {
5533 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5534 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5535 }
5536 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5537 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5538
5539 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5540 sizeof(cp), &cp);
5541 } else {
5542 struct hci_cp_remote_oob_data_reply cp;
5543
5544 bacpy(&cp.bdaddr, &ev->bdaddr);
5545 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5546 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5547
5548 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5549 sizeof(cp), &cp);
5550 }
5551
5552 unlock:
5553 hci_dev_unlock(hdev);
5554 }
5555
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5556 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5557 u8 bdaddr_type, bdaddr_t *local_rpa)
5558 {
5559 if (conn->out) {
5560 conn->dst_type = bdaddr_type;
5561 conn->resp_addr_type = bdaddr_type;
5562 bacpy(&conn->resp_addr, bdaddr);
5563
5564 /* Check if the controller has set a Local RPA then it must be
5565 * used instead or hdev->rpa.
5566 */
5567 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5568 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5569 bacpy(&conn->init_addr, local_rpa);
5570 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5571 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5572 bacpy(&conn->init_addr, &conn->hdev->rpa);
5573 } else {
5574 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5575 &conn->init_addr_type);
5576 }
5577 } else {
5578 conn->resp_addr_type = conn->hdev->adv_addr_type;
5579 /* Check if the controller has set a Local RPA then it must be
5580 * used instead or hdev->rpa.
5581 */
5582 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5583 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5584 bacpy(&conn->resp_addr, local_rpa);
5585 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5586 /* In case of ext adv, resp_addr will be updated in
5587 * Adv Terminated event.
5588 */
5589 if (!ext_adv_capable(conn->hdev))
5590 bacpy(&conn->resp_addr,
5591 &conn->hdev->random_addr);
5592 } else {
5593 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5594 }
5595
5596 conn->init_addr_type = bdaddr_type;
5597 bacpy(&conn->init_addr, bdaddr);
5598
5599 /* For incoming connections, set the default minimum
5600 * and maximum connection interval. They will be used
5601 * to check if the parameters are in range and if not
5602 * trigger the connection update procedure.
5603 */
5604 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5605 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5606 }
5607 }
5608
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5609 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5610 bdaddr_t *bdaddr, u8 bdaddr_type,
5611 bdaddr_t *local_rpa, u8 role, u16 handle,
5612 u16 interval, u16 latency,
5613 u16 supervision_timeout)
5614 {
5615 struct hci_conn_params *params;
5616 struct hci_conn *conn;
5617 struct smp_irk *irk;
5618 u8 addr_type;
5619
5620 hci_dev_lock(hdev);
5621
5622 /* All controllers implicitly stop advertising in the event of a
5623 * connection, so ensure that the state bit is cleared.
5624 */
5625 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5626
5627 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5628 if (!conn) {
5629 /* In case of error status and there is no connection pending
5630 * just unlock as there is nothing to cleanup.
5631 */
5632 if (status)
5633 goto unlock;
5634
5635 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5636 if (IS_ERR(conn)) {
5637 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5638 goto unlock;
5639 }
5640
5641 conn->dst_type = bdaddr_type;
5642
5643 /* If we didn't have a hci_conn object previously
5644 * but we're in central role this must be something
5645 * initiated using an accept list. Since accept list based
5646 * connections are not "first class citizens" we don't
5647 * have full tracking of them. Therefore, we go ahead
5648 * with a "best effort" approach of determining the
5649 * initiator address based on the HCI_PRIVACY flag.
5650 */
5651 if (conn->out) {
5652 conn->resp_addr_type = bdaddr_type;
5653 bacpy(&conn->resp_addr, bdaddr);
5654 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5655 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5656 bacpy(&conn->init_addr, &hdev->rpa);
5657 } else {
5658 hci_copy_identity_address(hdev,
5659 &conn->init_addr,
5660 &conn->init_addr_type);
5661 }
5662 }
5663 } else {
5664 cancel_delayed_work(&conn->le_conn_timeout);
5665 }
5666
5667 /* The HCI_LE_Connection_Complete event is only sent once per connection.
5668 * Processing it more than once per connection can corrupt kernel memory.
5669 *
5670 * As the connection handle is set here for the first time, it indicates
5671 * whether the connection is already set up.
5672 */
5673 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5674 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5675 goto unlock;
5676 }
5677
5678 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5679
5680 /* Lookup the identity address from the stored connection
5681 * address and address type.
5682 *
5683 * When establishing connections to an identity address, the
5684 * connection procedure will store the resolvable random
5685 * address first. Now if it can be converted back into the
5686 * identity address, start using the identity address from
5687 * now on.
5688 */
5689 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5690 if (irk) {
5691 bacpy(&conn->dst, &irk->bdaddr);
5692 conn->dst_type = irk->addr_type;
5693 }
5694
5695 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5696
5697 /* All connection failure handling is taken care of by the
5698 * hci_conn_failed function which is triggered by the HCI
5699 * request completion callbacks used for connecting.
5700 */
5701 if (status || hci_conn_set_handle(conn, handle))
5702 goto unlock;
5703
5704 /* Drop the connection if it has been aborted */
5705 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5706 hci_conn_drop(conn);
5707 goto unlock;
5708 }
5709
5710 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5711 addr_type = BDADDR_LE_PUBLIC;
5712 else
5713 addr_type = BDADDR_LE_RANDOM;
5714
5715 /* Drop the connection if the device is blocked */
5716 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5717 hci_conn_drop(conn);
5718 goto unlock;
5719 }
5720
5721 mgmt_device_connected(hdev, conn, NULL, 0);
5722
5723 conn->sec_level = BT_SECURITY_LOW;
5724 conn->state = BT_CONFIG;
5725
5726 /* Store current advertising instance as connection advertising instance
5727 * when sotfware rotation is in use so it can be re-enabled when
5728 * disconnected.
5729 */
5730 if (!ext_adv_capable(hdev))
5731 conn->adv_instance = hdev->cur_adv_instance;
5732
5733 conn->le_conn_interval = interval;
5734 conn->le_conn_latency = latency;
5735 conn->le_supv_timeout = supervision_timeout;
5736
5737 hci_debugfs_create_conn(conn);
5738 hci_conn_add_sysfs(conn);
5739
5740 /* The remote features procedure is defined for central
5741 * role only. So only in case of an initiated connection
5742 * request the remote features.
5743 *
5744 * If the local controller supports peripheral-initiated features
5745 * exchange, then requesting the remote features in peripheral
5746 * role is possible. Otherwise just transition into the
5747 * connected state without requesting the remote features.
5748 */
5749 if (conn->out ||
5750 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5751 struct hci_cp_le_read_remote_features cp;
5752
5753 cp.handle = __cpu_to_le16(conn->handle);
5754
5755 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5756 sizeof(cp), &cp);
5757
5758 hci_conn_hold(conn);
5759 } else {
5760 conn->state = BT_CONNECTED;
5761 hci_connect_cfm(conn, status);
5762 }
5763
5764 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5765 conn->dst_type);
5766 if (params) {
5767 hci_pend_le_list_del_init(params);
5768 if (params->conn) {
5769 hci_conn_drop(params->conn);
5770 hci_conn_put(params->conn);
5771 params->conn = NULL;
5772 }
5773 }
5774
5775 unlock:
5776 hci_update_passive_scan(hdev);
5777 hci_dev_unlock(hdev);
5778 }
5779
hci_le_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5780 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5781 struct sk_buff *skb)
5782 {
5783 struct hci_ev_le_conn_complete *ev = data;
5784
5785 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5786
5787 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5788 NULL, ev->role, le16_to_cpu(ev->handle),
5789 le16_to_cpu(ev->interval),
5790 le16_to_cpu(ev->latency),
5791 le16_to_cpu(ev->supervision_timeout));
5792 }
5793
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5794 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5795 struct sk_buff *skb)
5796 {
5797 struct hci_ev_le_enh_conn_complete *ev = data;
5798
5799 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5800
5801 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5802 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5803 le16_to_cpu(ev->interval),
5804 le16_to_cpu(ev->latency),
5805 le16_to_cpu(ev->supervision_timeout));
5806 }
5807
hci_le_ext_adv_term_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5808 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5809 struct sk_buff *skb)
5810 {
5811 struct hci_evt_le_ext_adv_set_term *ev = data;
5812 struct hci_conn *conn;
5813 struct adv_info *adv, *n;
5814
5815 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5816
5817 /* The Bluetooth Core 5.3 specification clearly states that this event
5818 * shall not be sent when the Host disables the advertising set. So in
5819 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5820 *
5821 * When the Host disables an advertising set, all cleanup is done via
5822 * its command callback and not needed to be duplicated here.
5823 */
5824 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5825 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5826 return;
5827 }
5828
5829 hci_dev_lock(hdev);
5830
5831 adv = hci_find_adv_instance(hdev, ev->handle);
5832
5833 if (ev->status) {
5834 if (!adv)
5835 goto unlock;
5836
5837 /* Remove advertising as it has been terminated */
5838 hci_remove_adv_instance(hdev, ev->handle);
5839 mgmt_advertising_removed(NULL, hdev, ev->handle);
5840
5841 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5842 if (adv->enabled)
5843 goto unlock;
5844 }
5845
5846 /* We are no longer advertising, clear HCI_LE_ADV */
5847 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5848 goto unlock;
5849 }
5850
5851 if (adv)
5852 adv->enabled = false;
5853
5854 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5855 if (conn) {
5856 /* Store handle in the connection so the correct advertising
5857 * instance can be re-enabled when disconnected.
5858 */
5859 conn->adv_instance = ev->handle;
5860
5861 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5862 bacmp(&conn->resp_addr, BDADDR_ANY))
5863 goto unlock;
5864
5865 if (!ev->handle) {
5866 bacpy(&conn->resp_addr, &hdev->random_addr);
5867 goto unlock;
5868 }
5869
5870 if (adv)
5871 bacpy(&conn->resp_addr, &adv->random_addr);
5872 }
5873
5874 unlock:
5875 hci_dev_unlock(hdev);
5876 }
5877
hci_le_conn_update_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5878 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5879 struct sk_buff *skb)
5880 {
5881 struct hci_ev_le_conn_update_complete *ev = data;
5882 struct hci_conn *conn;
5883
5884 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5885
5886 if (ev->status)
5887 return;
5888
5889 hci_dev_lock(hdev);
5890
5891 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5892 if (conn) {
5893 conn->le_conn_interval = le16_to_cpu(ev->interval);
5894 conn->le_conn_latency = le16_to_cpu(ev->latency);
5895 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5896 }
5897
5898 hci_dev_unlock(hdev);
5899 }
5900
5901 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,bool addr_resolved,u8 adv_type,u8 phy,u8 sec_phy)5902 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5903 bdaddr_t *addr,
5904 u8 addr_type, bool addr_resolved,
5905 u8 adv_type, u8 phy, u8 sec_phy)
5906 {
5907 struct hci_conn *conn;
5908 struct hci_conn_params *params;
5909
5910 /* If the event is not connectable don't proceed further */
5911 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5912 return NULL;
5913
5914 /* Ignore if the device is blocked or hdev is suspended */
5915 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
5916 hdev->suspended)
5917 return NULL;
5918
5919 /* Most controller will fail if we try to create new connections
5920 * while we have an existing one in peripheral role.
5921 */
5922 if (hdev->conn_hash.le_num_peripheral > 0 &&
5923 (test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) ||
5924 !(hdev->le_states[3] & 0x10)))
5925 return NULL;
5926
5927 /* If we're not connectable only connect devices that we have in
5928 * our pend_le_conns list.
5929 */
5930 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5931 addr_type);
5932 if (!params)
5933 return NULL;
5934
5935 if (!params->explicit_connect) {
5936 switch (params->auto_connect) {
5937 case HCI_AUTO_CONN_DIRECT:
5938 /* Only devices advertising with ADV_DIRECT_IND are
5939 * triggering a connection attempt. This is allowing
5940 * incoming connections from peripheral devices.
5941 */
5942 if (adv_type != LE_ADV_DIRECT_IND)
5943 return NULL;
5944 break;
5945 case HCI_AUTO_CONN_ALWAYS:
5946 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5947 * are triggering a connection attempt. This means
5948 * that incoming connections from peripheral device are
5949 * accepted and also outgoing connections to peripheral
5950 * devices are established when found.
5951 */
5952 break;
5953 default:
5954 return NULL;
5955 }
5956 }
5957
5958 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5959 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5960 HCI_ROLE_MASTER, phy, sec_phy);
5961 if (!IS_ERR(conn)) {
5962 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5963 * by higher layer that tried to connect, if no then
5964 * store the pointer since we don't really have any
5965 * other owner of the object besides the params that
5966 * triggered it. This way we can abort the connection if
5967 * the parameters get removed and keep the reference
5968 * count consistent once the connection is established.
5969 */
5970
5971 if (!params->explicit_connect)
5972 params->conn = hci_conn_get(conn);
5973
5974 return conn;
5975 }
5976
5977 switch (PTR_ERR(conn)) {
5978 case -EBUSY:
5979 /* If hci_connect() returns -EBUSY it means there is already
5980 * an LE connection attempt going on. Since controllers don't
5981 * support more than one connection attempt at the time, we
5982 * don't consider this an error case.
5983 */
5984 break;
5985 default:
5986 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5987 return NULL;
5988 }
5989
5990 return NULL;
5991 }
5992
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,u8 phy,u8 sec_phy,s8 rssi,u8 * data,u8 len,bool ext_adv,bool ctl_time,u64 instant)5993 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5994 u8 bdaddr_type, bdaddr_t *direct_addr,
5995 u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
5996 u8 *data, u8 len, bool ext_adv, bool ctl_time,
5997 u64 instant)
5998 {
5999 struct discovery_state *d = &hdev->discovery;
6000 struct smp_irk *irk;
6001 struct hci_conn *conn;
6002 bool match, bdaddr_resolved;
6003 u32 flags;
6004 u8 *ptr;
6005
6006 switch (type) {
6007 case LE_ADV_IND:
6008 case LE_ADV_DIRECT_IND:
6009 case LE_ADV_SCAN_IND:
6010 case LE_ADV_NONCONN_IND:
6011 case LE_ADV_SCAN_RSP:
6012 break;
6013 default:
6014 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6015 "type: 0x%02x", type);
6016 return;
6017 }
6018
6019 if (len > max_adv_len(hdev)) {
6020 bt_dev_err_ratelimited(hdev,
6021 "adv larger than maximum supported");
6022 return;
6023 }
6024
6025 /* Find the end of the data in case the report contains padded zero
6026 * bytes at the end causing an invalid length value.
6027 *
6028 * When data is NULL, len is 0 so there is no need for extra ptr
6029 * check as 'ptr < data + 0' is already false in such case.
6030 */
6031 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6032 if (ptr + 1 + *ptr > data + len)
6033 break;
6034 }
6035
6036 /* Adjust for actual length. This handles the case when remote
6037 * device is advertising with incorrect data length.
6038 */
6039 len = ptr - data;
6040
6041 /* If the direct address is present, then this report is from
6042 * a LE Direct Advertising Report event. In that case it is
6043 * important to see if the address is matching the local
6044 * controller address.
6045 */
6046 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6047 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6048 &bdaddr_resolved);
6049
6050 /* Only resolvable random addresses are valid for these
6051 * kind of reports and others can be ignored.
6052 */
6053 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6054 return;
6055
6056 /* If the controller is not using resolvable random
6057 * addresses, then this report can be ignored.
6058 */
6059 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6060 return;
6061
6062 /* If the local IRK of the controller does not match
6063 * with the resolvable random address provided, then
6064 * this report can be ignored.
6065 */
6066 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6067 return;
6068 }
6069
6070 /* Check if we need to convert to identity address */
6071 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6072 if (irk) {
6073 bdaddr = &irk->bdaddr;
6074 bdaddr_type = irk->addr_type;
6075 }
6076
6077 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6078
6079 /* Check if we have been requested to connect to this device.
6080 *
6081 * direct_addr is set only for directed advertising reports (it is NULL
6082 * for advertising reports) and is already verified to be RPA above.
6083 */
6084 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6085 type, phy, sec_phy);
6086 if (!ext_adv && conn && type == LE_ADV_IND &&
6087 len <= max_adv_len(hdev)) {
6088 /* Store report for later inclusion by
6089 * mgmt_device_connected
6090 */
6091 memcpy(conn->le_adv_data, data, len);
6092 conn->le_adv_data_len = len;
6093 }
6094
6095 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6096 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6097 else
6098 flags = 0;
6099
6100 /* All scan results should be sent up for Mesh systems */
6101 if (hci_dev_test_flag(hdev, HCI_MESH)) {
6102 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6103 rssi, flags, data, len, NULL, 0, instant);
6104 return;
6105 }
6106
6107 /* Passive scanning shouldn't trigger any device found events,
6108 * except for devices marked as CONN_REPORT for which we do send
6109 * device found events, or advertisement monitoring requested.
6110 */
6111 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6112 if (type == LE_ADV_DIRECT_IND)
6113 return;
6114
6115 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6116 bdaddr, bdaddr_type) &&
6117 idr_is_empty(&hdev->adv_monitors_idr))
6118 return;
6119
6120 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6121 rssi, flags, data, len, NULL, 0, 0);
6122 return;
6123 }
6124
6125 /* When receiving a scan response, then there is no way to
6126 * know if the remote device is connectable or not. However
6127 * since scan responses are merged with a previously seen
6128 * advertising report, the flags field from that report
6129 * will be used.
6130 *
6131 * In the unlikely case that a controller just sends a scan
6132 * response event that doesn't match the pending report, then
6133 * it is marked as a standalone SCAN_RSP.
6134 */
6135 if (type == LE_ADV_SCAN_RSP)
6136 flags = MGMT_DEV_FOUND_SCAN_RSP;
6137
6138 /* If there's nothing pending either store the data from this
6139 * event or send an immediate device found event if the data
6140 * should not be stored for later.
6141 */
6142 if (!ext_adv && !has_pending_adv_report(hdev)) {
6143 /* If the report will trigger a SCAN_REQ store it for
6144 * later merging.
6145 */
6146 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6147 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6148 rssi, flags, data, len);
6149 return;
6150 }
6151
6152 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6153 rssi, flags, data, len, NULL, 0, 0);
6154 return;
6155 }
6156
6157 /* Check if the pending report is for the same device as the new one */
6158 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6159 bdaddr_type == d->last_adv_addr_type);
6160
6161 /* If the pending data doesn't match this report or this isn't a
6162 * scan response (e.g. we got a duplicate ADV_IND) then force
6163 * sending of the pending data.
6164 */
6165 if (type != LE_ADV_SCAN_RSP || !match) {
6166 /* Send out whatever is in the cache, but skip duplicates */
6167 if (!match)
6168 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6169 d->last_adv_addr_type, NULL,
6170 d->last_adv_rssi, d->last_adv_flags,
6171 d->last_adv_data,
6172 d->last_adv_data_len, NULL, 0, 0);
6173
6174 /* If the new report will trigger a SCAN_REQ store it for
6175 * later merging.
6176 */
6177 if (!ext_adv && (type == LE_ADV_IND ||
6178 type == LE_ADV_SCAN_IND)) {
6179 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6180 rssi, flags, data, len);
6181 return;
6182 }
6183
6184 /* The advertising reports cannot be merged, so clear
6185 * the pending report and send out a device found event.
6186 */
6187 clear_pending_adv_report(hdev);
6188 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6189 rssi, flags, data, len, NULL, 0, 0);
6190 return;
6191 }
6192
6193 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6194 * the new event is a SCAN_RSP. We can therefore proceed with
6195 * sending a merged device found event.
6196 */
6197 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6198 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6199 d->last_adv_data, d->last_adv_data_len, data, len, 0);
6200 clear_pending_adv_report(hdev);
6201 }
6202
hci_le_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6203 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6204 struct sk_buff *skb)
6205 {
6206 struct hci_ev_le_advertising_report *ev = data;
6207 u64 instant = jiffies;
6208
6209 if (!ev->num)
6210 return;
6211
6212 hci_dev_lock(hdev);
6213
6214 while (ev->num--) {
6215 struct hci_ev_le_advertising_info *info;
6216 s8 rssi;
6217
6218 info = hci_le_ev_skb_pull(hdev, skb,
6219 HCI_EV_LE_ADVERTISING_REPORT,
6220 sizeof(*info));
6221 if (!info)
6222 break;
6223
6224 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6225 info->length + 1))
6226 break;
6227
6228 if (info->length <= max_adv_len(hdev)) {
6229 rssi = info->data[info->length];
6230 process_adv_report(hdev, info->type, &info->bdaddr,
6231 info->bdaddr_type, NULL, 0,
6232 HCI_ADV_PHY_1M, 0, rssi,
6233 info->data, info->length, false,
6234 false, instant);
6235 } else {
6236 bt_dev_err(hdev, "Dropping invalid advertising data");
6237 }
6238 }
6239
6240 hci_dev_unlock(hdev);
6241 }
6242
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)6243 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6244 {
6245 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6246 switch (evt_type) {
6247 case LE_LEGACY_ADV_IND:
6248 return LE_ADV_IND;
6249 case LE_LEGACY_ADV_DIRECT_IND:
6250 return LE_ADV_DIRECT_IND;
6251 case LE_LEGACY_ADV_SCAN_IND:
6252 return LE_ADV_SCAN_IND;
6253 case LE_LEGACY_NONCONN_IND:
6254 return LE_ADV_NONCONN_IND;
6255 case LE_LEGACY_SCAN_RSP_ADV:
6256 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6257 return LE_ADV_SCAN_RSP;
6258 }
6259
6260 goto invalid;
6261 }
6262
6263 if (evt_type & LE_EXT_ADV_CONN_IND) {
6264 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6265 return LE_ADV_DIRECT_IND;
6266
6267 return LE_ADV_IND;
6268 }
6269
6270 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6271 return LE_ADV_SCAN_RSP;
6272
6273 if (evt_type & LE_EXT_ADV_SCAN_IND)
6274 return LE_ADV_SCAN_IND;
6275
6276 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6277 evt_type & LE_EXT_ADV_DIRECT_IND)
6278 return LE_ADV_NONCONN_IND;
6279
6280 invalid:
6281 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6282 evt_type);
6283
6284 return LE_ADV_INVALID;
6285 }
6286
hci_le_ext_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6287 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6288 struct sk_buff *skb)
6289 {
6290 struct hci_ev_le_ext_adv_report *ev = data;
6291 u64 instant = jiffies;
6292
6293 if (!ev->num)
6294 return;
6295
6296 hci_dev_lock(hdev);
6297
6298 while (ev->num--) {
6299 struct hci_ev_le_ext_adv_info *info;
6300 u8 legacy_evt_type;
6301 u16 evt_type;
6302
6303 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6304 sizeof(*info));
6305 if (!info)
6306 break;
6307
6308 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6309 info->length))
6310 break;
6311
6312 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6313 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6314
6315 if (test_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
6316 &hdev->quirks)) {
6317 info->primary_phy &= 0x1f;
6318 info->secondary_phy &= 0x1f;
6319 }
6320
6321 if (legacy_evt_type != LE_ADV_INVALID) {
6322 process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6323 info->bdaddr_type, NULL, 0,
6324 info->primary_phy,
6325 info->secondary_phy,
6326 info->rssi, info->data, info->length,
6327 !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6328 false, instant);
6329 }
6330 }
6331
6332 hci_dev_unlock(hdev);
6333 }
6334
hci_le_pa_term_sync(struct hci_dev * hdev,__le16 handle)6335 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6336 {
6337 struct hci_cp_le_pa_term_sync cp;
6338
6339 memset(&cp, 0, sizeof(cp));
6340 cp.handle = handle;
6341
6342 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6343 }
6344
hci_le_pa_sync_estabilished_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6345 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6346 struct sk_buff *skb)
6347 {
6348 struct hci_ev_le_pa_sync_established *ev = data;
6349 int mask = hdev->link_mode;
6350 __u8 flags = 0;
6351 struct hci_conn *pa_sync;
6352
6353 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6354
6355 hci_dev_lock(hdev);
6356
6357 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6358
6359 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6360 if (!(mask & HCI_LM_ACCEPT)) {
6361 hci_le_pa_term_sync(hdev, ev->handle);
6362 goto unlock;
6363 }
6364
6365 if (!(flags & HCI_PROTO_DEFER))
6366 goto unlock;
6367
6368 /* Add connection to indicate PA sync event */
6369 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6370 HCI_ROLE_SLAVE);
6371
6372 if (IS_ERR(pa_sync))
6373 goto unlock;
6374
6375 pa_sync->sync_handle = le16_to_cpu(ev->handle);
6376
6377 if (ev->status) {
6378 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6379
6380 /* Notify iso layer */
6381 hci_connect_cfm(pa_sync, ev->status);
6382 }
6383
6384 unlock:
6385 hci_dev_unlock(hdev);
6386 }
6387
hci_le_per_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6388 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6389 struct sk_buff *skb)
6390 {
6391 struct hci_ev_le_per_adv_report *ev = data;
6392 int mask = hdev->link_mode;
6393 __u8 flags = 0;
6394 struct hci_conn *pa_sync;
6395
6396 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6397
6398 hci_dev_lock(hdev);
6399
6400 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6401 if (!(mask & HCI_LM_ACCEPT))
6402 goto unlock;
6403
6404 if (!(flags & HCI_PROTO_DEFER))
6405 goto unlock;
6406
6407 pa_sync = hci_conn_hash_lookup_pa_sync_handle
6408 (hdev,
6409 le16_to_cpu(ev->sync_handle));
6410
6411 if (!pa_sync)
6412 goto unlock;
6413
6414 if (ev->data_status == LE_PA_DATA_COMPLETE &&
6415 !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
6416 /* Notify iso layer */
6417 hci_connect_cfm(pa_sync, 0);
6418
6419 /* Notify MGMT layer */
6420 mgmt_device_connected(hdev, pa_sync, NULL, 0);
6421 }
6422
6423 unlock:
6424 hci_dev_unlock(hdev);
6425 }
6426
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6427 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6428 struct sk_buff *skb)
6429 {
6430 struct hci_ev_le_remote_feat_complete *ev = data;
6431 struct hci_conn *conn;
6432
6433 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6434
6435 hci_dev_lock(hdev);
6436
6437 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6438 if (conn) {
6439 if (!ev->status)
6440 memcpy(conn->features[0], ev->features, 8);
6441
6442 if (conn->state == BT_CONFIG) {
6443 __u8 status;
6444
6445 /* If the local controller supports peripheral-initiated
6446 * features exchange, but the remote controller does
6447 * not, then it is possible that the error code 0x1a
6448 * for unsupported remote feature gets returned.
6449 *
6450 * In this specific case, allow the connection to
6451 * transition into connected state and mark it as
6452 * successful.
6453 */
6454 if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6455 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6456 status = 0x00;
6457 else
6458 status = ev->status;
6459
6460 conn->state = BT_CONNECTED;
6461 hci_connect_cfm(conn, status);
6462 hci_conn_drop(conn);
6463 }
6464 }
6465
6466 hci_dev_unlock(hdev);
6467 }
6468
hci_le_ltk_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6469 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6470 struct sk_buff *skb)
6471 {
6472 struct hci_ev_le_ltk_req *ev = data;
6473 struct hci_cp_le_ltk_reply cp;
6474 struct hci_cp_le_ltk_neg_reply neg;
6475 struct hci_conn *conn;
6476 struct smp_ltk *ltk;
6477
6478 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6479
6480 hci_dev_lock(hdev);
6481
6482 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6483 if (conn == NULL)
6484 goto not_found;
6485
6486 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6487 if (!ltk)
6488 goto not_found;
6489
6490 if (smp_ltk_is_sc(ltk)) {
6491 /* With SC both EDiv and Rand are set to zero */
6492 if (ev->ediv || ev->rand)
6493 goto not_found;
6494 } else {
6495 /* For non-SC keys check that EDiv and Rand match */
6496 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6497 goto not_found;
6498 }
6499
6500 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6501 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6502 cp.handle = cpu_to_le16(conn->handle);
6503
6504 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6505
6506 conn->enc_key_size = ltk->enc_size;
6507
6508 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6509
6510 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6511 * temporary key used to encrypt a connection following
6512 * pairing. It is used during the Encrypted Session Setup to
6513 * distribute the keys. Later, security can be re-established
6514 * using a distributed LTK.
6515 */
6516 if (ltk->type == SMP_STK) {
6517 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6518 list_del_rcu(<k->list);
6519 kfree_rcu(ltk, rcu);
6520 } else {
6521 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6522 }
6523
6524 hci_dev_unlock(hdev);
6525
6526 return;
6527
6528 not_found:
6529 neg.handle = ev->handle;
6530 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6531 hci_dev_unlock(hdev);
6532 }
6533
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)6534 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6535 u8 reason)
6536 {
6537 struct hci_cp_le_conn_param_req_neg_reply cp;
6538
6539 cp.handle = cpu_to_le16(handle);
6540 cp.reason = reason;
6541
6542 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6543 &cp);
6544 }
6545
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6546 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6547 struct sk_buff *skb)
6548 {
6549 struct hci_ev_le_remote_conn_param_req *ev = data;
6550 struct hci_cp_le_conn_param_req_reply cp;
6551 struct hci_conn *hcon;
6552 u16 handle, min, max, latency, timeout;
6553
6554 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6555
6556 handle = le16_to_cpu(ev->handle);
6557 min = le16_to_cpu(ev->interval_min);
6558 max = le16_to_cpu(ev->interval_max);
6559 latency = le16_to_cpu(ev->latency);
6560 timeout = le16_to_cpu(ev->timeout);
6561
6562 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6563 if (!hcon || hcon->state != BT_CONNECTED)
6564 return send_conn_param_neg_reply(hdev, handle,
6565 HCI_ERROR_UNKNOWN_CONN_ID);
6566
6567 if (max > hcon->le_conn_max_interval)
6568 return send_conn_param_neg_reply(hdev, handle,
6569 HCI_ERROR_INVALID_LL_PARAMS);
6570
6571 if (hci_check_conn_params(min, max, latency, timeout))
6572 return send_conn_param_neg_reply(hdev, handle,
6573 HCI_ERROR_INVALID_LL_PARAMS);
6574
6575 if (hcon->role == HCI_ROLE_MASTER) {
6576 struct hci_conn_params *params;
6577 u8 store_hint;
6578
6579 hci_dev_lock(hdev);
6580
6581 params = hci_conn_params_lookup(hdev, &hcon->dst,
6582 hcon->dst_type);
6583 if (params) {
6584 params->conn_min_interval = min;
6585 params->conn_max_interval = max;
6586 params->conn_latency = latency;
6587 params->supervision_timeout = timeout;
6588 store_hint = 0x01;
6589 } else {
6590 store_hint = 0x00;
6591 }
6592
6593 hci_dev_unlock(hdev);
6594
6595 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6596 store_hint, min, max, latency, timeout);
6597 }
6598
6599 cp.handle = ev->handle;
6600 cp.interval_min = ev->interval_min;
6601 cp.interval_max = ev->interval_max;
6602 cp.latency = ev->latency;
6603 cp.timeout = ev->timeout;
6604 cp.min_ce_len = 0;
6605 cp.max_ce_len = 0;
6606
6607 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6608 }
6609
hci_le_direct_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6610 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6611 struct sk_buff *skb)
6612 {
6613 struct hci_ev_le_direct_adv_report *ev = data;
6614 u64 instant = jiffies;
6615 int i;
6616
6617 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6618 flex_array_size(ev, info, ev->num)))
6619 return;
6620
6621 if (!ev->num)
6622 return;
6623
6624 hci_dev_lock(hdev);
6625
6626 for (i = 0; i < ev->num; i++) {
6627 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6628
6629 process_adv_report(hdev, info->type, &info->bdaddr,
6630 info->bdaddr_type, &info->direct_addr,
6631 info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6632 info->rssi, NULL, 0, false, false, instant);
6633 }
6634
6635 hci_dev_unlock(hdev);
6636 }
6637
hci_le_phy_update_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6638 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6639 struct sk_buff *skb)
6640 {
6641 struct hci_ev_le_phy_update_complete *ev = data;
6642 struct hci_conn *conn;
6643
6644 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6645
6646 if (ev->status)
6647 return;
6648
6649 hci_dev_lock(hdev);
6650
6651 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6652 if (!conn)
6653 goto unlock;
6654
6655 conn->le_tx_phy = ev->tx_phy;
6656 conn->le_rx_phy = ev->rx_phy;
6657
6658 unlock:
6659 hci_dev_unlock(hdev);
6660 }
6661
hci_le_cis_estabilished_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6662 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6663 struct sk_buff *skb)
6664 {
6665 struct hci_evt_le_cis_established *ev = data;
6666 struct hci_conn *conn;
6667 struct bt_iso_qos *qos;
6668 bool pending = false;
6669 u16 handle = __le16_to_cpu(ev->handle);
6670 u32 c_sdu_interval, p_sdu_interval;
6671
6672 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6673
6674 hci_dev_lock(hdev);
6675
6676 conn = hci_conn_hash_lookup_handle(hdev, handle);
6677 if (!conn) {
6678 bt_dev_err(hdev,
6679 "Unable to find connection with handle 0x%4.4x",
6680 handle);
6681 goto unlock;
6682 }
6683
6684 if (conn->type != ISO_LINK) {
6685 bt_dev_err(hdev,
6686 "Invalid connection link type handle 0x%4.4x",
6687 handle);
6688 goto unlock;
6689 }
6690
6691 qos = &conn->iso_qos;
6692
6693 pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6694
6695 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
6696 * page 3075:
6697 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
6698 * ISO_Interval + SDU_Interval_C_To_P
6699 * ...
6700 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
6701 * Transport_Latency
6702 */
6703 c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6704 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
6705 get_unaligned_le24(ev->c_latency);
6706 p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6707 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
6708 get_unaligned_le24(ev->p_latency);
6709
6710 switch (conn->role) {
6711 case HCI_ROLE_SLAVE:
6712 qos->ucast.in.interval = c_sdu_interval;
6713 qos->ucast.out.interval = p_sdu_interval;
6714 /* Convert Transport Latency (us) to Latency (msec) */
6715 qos->ucast.in.latency =
6716 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6717 1000);
6718 qos->ucast.out.latency =
6719 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6720 1000);
6721 qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6722 qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6723 qos->ucast.in.phy = ev->c_phy;
6724 qos->ucast.out.phy = ev->p_phy;
6725 break;
6726 case HCI_ROLE_MASTER:
6727 qos->ucast.in.interval = p_sdu_interval;
6728 qos->ucast.out.interval = c_sdu_interval;
6729 /* Convert Transport Latency (us) to Latency (msec) */
6730 qos->ucast.out.latency =
6731 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6732 1000);
6733 qos->ucast.in.latency =
6734 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6735 1000);
6736 qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6737 qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6738 qos->ucast.out.phy = ev->c_phy;
6739 qos->ucast.in.phy = ev->p_phy;
6740 break;
6741 }
6742
6743 if (!ev->status) {
6744 conn->state = BT_CONNECTED;
6745 hci_debugfs_create_conn(conn);
6746 hci_conn_add_sysfs(conn);
6747 hci_iso_setup_path(conn);
6748 goto unlock;
6749 }
6750
6751 conn->state = BT_CLOSED;
6752 hci_connect_cfm(conn, ev->status);
6753 hci_conn_del(conn);
6754
6755 unlock:
6756 if (pending)
6757 hci_le_create_cis_pending(hdev);
6758
6759 hci_dev_unlock(hdev);
6760 }
6761
hci_le_reject_cis(struct hci_dev * hdev,__le16 handle)6762 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6763 {
6764 struct hci_cp_le_reject_cis cp;
6765
6766 memset(&cp, 0, sizeof(cp));
6767 cp.handle = handle;
6768 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6769 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6770 }
6771
hci_le_accept_cis(struct hci_dev * hdev,__le16 handle)6772 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6773 {
6774 struct hci_cp_le_accept_cis cp;
6775
6776 memset(&cp, 0, sizeof(cp));
6777 cp.handle = handle;
6778 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6779 }
6780
hci_le_cis_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6781 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6782 struct sk_buff *skb)
6783 {
6784 struct hci_evt_le_cis_req *ev = data;
6785 u16 acl_handle, cis_handle;
6786 struct hci_conn *acl, *cis;
6787 int mask;
6788 __u8 flags = 0;
6789
6790 acl_handle = __le16_to_cpu(ev->acl_handle);
6791 cis_handle = __le16_to_cpu(ev->cis_handle);
6792
6793 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6794 acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6795
6796 hci_dev_lock(hdev);
6797
6798 acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6799 if (!acl)
6800 goto unlock;
6801
6802 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6803 if (!(mask & HCI_LM_ACCEPT)) {
6804 hci_le_reject_cis(hdev, ev->cis_handle);
6805 goto unlock;
6806 }
6807
6808 cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6809 if (!cis) {
6810 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
6811 cis_handle);
6812 if (IS_ERR(cis)) {
6813 hci_le_reject_cis(hdev, ev->cis_handle);
6814 goto unlock;
6815 }
6816 }
6817
6818 cis->iso_qos.ucast.cig = ev->cig_id;
6819 cis->iso_qos.ucast.cis = ev->cis_id;
6820
6821 if (!(flags & HCI_PROTO_DEFER)) {
6822 hci_le_accept_cis(hdev, ev->cis_handle);
6823 } else {
6824 cis->state = BT_CONNECT2;
6825 hci_connect_cfm(cis, 0);
6826 }
6827
6828 unlock:
6829 hci_dev_unlock(hdev);
6830 }
6831
hci_iso_term_big_sync(struct hci_dev * hdev,void * data)6832 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6833 {
6834 u8 handle = PTR_UINT(data);
6835
6836 return hci_le_terminate_big_sync(hdev, handle,
6837 HCI_ERROR_LOCAL_HOST_TERM);
6838 }
6839
hci_le_create_big_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6840 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6841 struct sk_buff *skb)
6842 {
6843 struct hci_evt_le_create_big_complete *ev = data;
6844 struct hci_conn *conn;
6845 __u8 i = 0;
6846
6847 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6848
6849 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6850 flex_array_size(ev, bis_handle, ev->num_bis)))
6851 return;
6852
6853 hci_dev_lock(hdev);
6854 rcu_read_lock();
6855
6856 /* Connect all BISes that are bound to the BIG */
6857 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6858 if (bacmp(&conn->dst, BDADDR_ANY) ||
6859 conn->type != ISO_LINK ||
6860 conn->iso_qos.bcast.big != ev->handle)
6861 continue;
6862
6863 if (hci_conn_set_handle(conn,
6864 __le16_to_cpu(ev->bis_handle[i++])))
6865 continue;
6866
6867 if (!ev->status) {
6868 conn->state = BT_CONNECTED;
6869 set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6870 rcu_read_unlock();
6871 hci_debugfs_create_conn(conn);
6872 hci_conn_add_sysfs(conn);
6873 hci_iso_setup_path(conn);
6874 rcu_read_lock();
6875 continue;
6876 }
6877
6878 hci_connect_cfm(conn, ev->status);
6879 rcu_read_unlock();
6880 hci_conn_del(conn);
6881 rcu_read_lock();
6882 }
6883
6884 rcu_read_unlock();
6885
6886 if (!ev->status && !i)
6887 /* If no BISes have been connected for the BIG,
6888 * terminate. This is in case all bound connections
6889 * have been closed before the BIG creation
6890 * has completed.
6891 */
6892 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6893 UINT_PTR(ev->handle), NULL);
6894
6895 hci_dev_unlock(hdev);
6896 }
6897
hci_le_big_sync_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6898 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6899 struct sk_buff *skb)
6900 {
6901 struct hci_evt_le_big_sync_estabilished *ev = data;
6902 struct hci_conn *bis;
6903 int i;
6904
6905 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6906
6907 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6908 flex_array_size(ev, bis, ev->num_bis)))
6909 return;
6910
6911 hci_dev_lock(hdev);
6912
6913 for (i = 0; i < ev->num_bis; i++) {
6914 u16 handle = le16_to_cpu(ev->bis[i]);
6915 __le32 interval;
6916
6917 bis = hci_conn_hash_lookup_handle(hdev, handle);
6918 if (!bis) {
6919 if (handle > HCI_CONN_HANDLE_MAX) {
6920 bt_dev_dbg(hdev, "ignore too large handle %u", handle);
6921 continue;
6922 }
6923 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
6924 HCI_ROLE_SLAVE, handle);
6925 if (IS_ERR(bis))
6926 continue;
6927 }
6928
6929 if (ev->status != 0x42)
6930 /* Mark PA sync as established */
6931 set_bit(HCI_CONN_PA_SYNC, &bis->flags);
6932
6933 bis->iso_qos.bcast.big = ev->handle;
6934 memset(&interval, 0, sizeof(interval));
6935 memcpy(&interval, ev->latency, sizeof(ev->latency));
6936 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
6937 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
6938 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
6939 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
6940
6941 if (!ev->status) {
6942 set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
6943 hci_iso_setup_path(bis);
6944 }
6945 }
6946
6947 /* In case BIG sync failed, notify each failed connection to
6948 * the user after all hci connections have been added
6949 */
6950 if (ev->status)
6951 for (i = 0; i < ev->num_bis; i++) {
6952 u16 handle = le16_to_cpu(ev->bis[i]);
6953
6954 bis = hci_conn_hash_lookup_handle(hdev, handle);
6955 if (!bis)
6956 continue;
6957
6958 set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
6959 hci_connect_cfm(bis, ev->status);
6960 }
6961
6962 hci_dev_unlock(hdev);
6963 }
6964
hci_le_big_info_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6965 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
6966 struct sk_buff *skb)
6967 {
6968 struct hci_evt_le_big_info_adv_report *ev = data;
6969 int mask = hdev->link_mode;
6970 __u8 flags = 0;
6971 struct hci_conn *pa_sync;
6972
6973 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6974
6975 hci_dev_lock(hdev);
6976
6977 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6978 if (!(mask & HCI_LM_ACCEPT))
6979 goto unlock;
6980
6981 if (!(flags & HCI_PROTO_DEFER))
6982 goto unlock;
6983
6984 pa_sync = hci_conn_hash_lookup_pa_sync_handle
6985 (hdev,
6986 le16_to_cpu(ev->sync_handle));
6987
6988 if (!pa_sync)
6989 goto unlock;
6990
6991 pa_sync->iso_qos.bcast.encryption = ev->encryption;
6992
6993 /* Notify iso layer */
6994 hci_connect_cfm(pa_sync, 0);
6995
6996 unlock:
6997 hci_dev_unlock(hdev);
6998 }
6999
7000 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7001 [_op] = { \
7002 .func = _func, \
7003 .min_len = _min_len, \
7004 .max_len = _max_len, \
7005 }
7006
7007 #define HCI_LE_EV(_op, _func, _len) \
7008 HCI_LE_EV_VL(_op, _func, _len, _len)
7009
7010 #define HCI_LE_EV_STATUS(_op, _func) \
7011 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7012
7013 /* Entries in this table shall have their position according to the subevent
7014 * opcode they handle so the use of the macros above is recommend since it does
7015 * attempt to initialize at its proper index using Designated Initializers that
7016 * way events without a callback function can be ommited.
7017 */
7018 static const struct hci_le_ev {
7019 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7020 u16 min_len;
7021 u16 max_len;
7022 } hci_le_ev_table[U8_MAX + 1] = {
7023 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7024 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7025 sizeof(struct hci_ev_le_conn_complete)),
7026 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7027 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7028 sizeof(struct hci_ev_le_advertising_report),
7029 HCI_MAX_EVENT_SIZE),
7030 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7031 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7032 hci_le_conn_update_complete_evt,
7033 sizeof(struct hci_ev_le_conn_update_complete)),
7034 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7035 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7036 hci_le_remote_feat_complete_evt,
7037 sizeof(struct hci_ev_le_remote_feat_complete)),
7038 /* [0x05 = HCI_EV_LE_LTK_REQ] */
7039 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7040 sizeof(struct hci_ev_le_ltk_req)),
7041 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7042 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7043 hci_le_remote_conn_param_req_evt,
7044 sizeof(struct hci_ev_le_remote_conn_param_req)),
7045 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7046 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7047 hci_le_enh_conn_complete_evt,
7048 sizeof(struct hci_ev_le_enh_conn_complete)),
7049 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7050 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7051 sizeof(struct hci_ev_le_direct_adv_report),
7052 HCI_MAX_EVENT_SIZE),
7053 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7054 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7055 sizeof(struct hci_ev_le_phy_update_complete)),
7056 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7057 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7058 sizeof(struct hci_ev_le_ext_adv_report),
7059 HCI_MAX_EVENT_SIZE),
7060 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7061 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7062 hci_le_pa_sync_estabilished_evt,
7063 sizeof(struct hci_ev_le_pa_sync_established)),
7064 /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7065 HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7066 hci_le_per_adv_report_evt,
7067 sizeof(struct hci_ev_le_per_adv_report),
7068 HCI_MAX_EVENT_SIZE),
7069 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7070 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7071 sizeof(struct hci_evt_le_ext_adv_set_term)),
7072 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7073 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7074 sizeof(struct hci_evt_le_cis_established)),
7075 /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7076 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7077 sizeof(struct hci_evt_le_cis_req)),
7078 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7079 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7080 hci_le_create_big_complete_evt,
7081 sizeof(struct hci_evt_le_create_big_complete),
7082 HCI_MAX_EVENT_SIZE),
7083 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7084 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7085 hci_le_big_sync_established_evt,
7086 sizeof(struct hci_evt_le_big_sync_estabilished),
7087 HCI_MAX_EVENT_SIZE),
7088 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7089 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7090 hci_le_big_info_adv_report_evt,
7091 sizeof(struct hci_evt_le_big_info_adv_report),
7092 HCI_MAX_EVENT_SIZE),
7093 };
7094
hci_le_meta_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7095 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7096 struct sk_buff *skb, u16 *opcode, u8 *status,
7097 hci_req_complete_t *req_complete,
7098 hci_req_complete_skb_t *req_complete_skb)
7099 {
7100 struct hci_ev_le_meta *ev = data;
7101 const struct hci_le_ev *subev;
7102
7103 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7104
7105 /* Only match event if command OGF is for LE */
7106 if (hdev->req_skb &&
7107 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7108 hci_skb_event(hdev->req_skb) == ev->subevent) {
7109 *opcode = hci_skb_opcode(hdev->req_skb);
7110 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7111 req_complete_skb);
7112 }
7113
7114 subev = &hci_le_ev_table[ev->subevent];
7115 if (!subev->func)
7116 return;
7117
7118 if (skb->len < subev->min_len) {
7119 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7120 ev->subevent, skb->len, subev->min_len);
7121 return;
7122 }
7123
7124 /* Just warn if the length is over max_len size it still be
7125 * possible to partially parse the event so leave to callback to
7126 * decide if that is acceptable.
7127 */
7128 if (skb->len > subev->max_len)
7129 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7130 ev->subevent, skb->len, subev->max_len);
7131 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7132 if (!data)
7133 return;
7134
7135 subev->func(hdev, data, skb);
7136 }
7137
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)7138 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7139 u8 event, struct sk_buff *skb)
7140 {
7141 struct hci_ev_cmd_complete *ev;
7142 struct hci_event_hdr *hdr;
7143
7144 if (!skb)
7145 return false;
7146
7147 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7148 if (!hdr)
7149 return false;
7150
7151 if (event) {
7152 if (hdr->evt != event)
7153 return false;
7154 return true;
7155 }
7156
7157 /* Check if request ended in Command Status - no way to retrieve
7158 * any extra parameters in this case.
7159 */
7160 if (hdr->evt == HCI_EV_CMD_STATUS)
7161 return false;
7162
7163 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7164 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7165 hdr->evt);
7166 return false;
7167 }
7168
7169 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7170 if (!ev)
7171 return false;
7172
7173 if (opcode != __le16_to_cpu(ev->opcode)) {
7174 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7175 __le16_to_cpu(ev->opcode));
7176 return false;
7177 }
7178
7179 return true;
7180 }
7181
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)7182 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7183 struct sk_buff *skb)
7184 {
7185 struct hci_ev_le_advertising_info *adv;
7186 struct hci_ev_le_direct_adv_info *direct_adv;
7187 struct hci_ev_le_ext_adv_info *ext_adv;
7188 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7189 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7190
7191 hci_dev_lock(hdev);
7192
7193 /* If we are currently suspended and this is the first BT event seen,
7194 * save the wake reason associated with the event.
7195 */
7196 if (!hdev->suspended || hdev->wake_reason)
7197 goto unlock;
7198
7199 /* Default to remote wake. Values for wake_reason are documented in the
7200 * Bluez mgmt api docs.
7201 */
7202 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7203
7204 /* Once configured for remote wakeup, we should only wake up for
7205 * reconnections. It's useful to see which device is waking us up so
7206 * keep track of the bdaddr of the connection event that woke us up.
7207 */
7208 if (event == HCI_EV_CONN_REQUEST) {
7209 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7210 hdev->wake_addr_type = BDADDR_BREDR;
7211 } else if (event == HCI_EV_CONN_COMPLETE) {
7212 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7213 hdev->wake_addr_type = BDADDR_BREDR;
7214 } else if (event == HCI_EV_LE_META) {
7215 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7216 u8 subevent = le_ev->subevent;
7217 u8 *ptr = &skb->data[sizeof(*le_ev)];
7218 u8 num_reports = *ptr;
7219
7220 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7221 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7222 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7223 num_reports) {
7224 adv = (void *)(ptr + 1);
7225 direct_adv = (void *)(ptr + 1);
7226 ext_adv = (void *)(ptr + 1);
7227
7228 switch (subevent) {
7229 case HCI_EV_LE_ADVERTISING_REPORT:
7230 bacpy(&hdev->wake_addr, &adv->bdaddr);
7231 hdev->wake_addr_type = adv->bdaddr_type;
7232 break;
7233 case HCI_EV_LE_DIRECT_ADV_REPORT:
7234 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7235 hdev->wake_addr_type = direct_adv->bdaddr_type;
7236 break;
7237 case HCI_EV_LE_EXT_ADV_REPORT:
7238 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7239 hdev->wake_addr_type = ext_adv->bdaddr_type;
7240 break;
7241 }
7242 }
7243 } else {
7244 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7245 }
7246
7247 unlock:
7248 hci_dev_unlock(hdev);
7249 }
7250
7251 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7252 [_op] = { \
7253 .req = false, \
7254 .func = _func, \
7255 .min_len = _min_len, \
7256 .max_len = _max_len, \
7257 }
7258
7259 #define HCI_EV(_op, _func, _len) \
7260 HCI_EV_VL(_op, _func, _len, _len)
7261
7262 #define HCI_EV_STATUS(_op, _func) \
7263 HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7264
7265 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7266 [_op] = { \
7267 .req = true, \
7268 .func_req = _func, \
7269 .min_len = _min_len, \
7270 .max_len = _max_len, \
7271 }
7272
7273 #define HCI_EV_REQ(_op, _func, _len) \
7274 HCI_EV_REQ_VL(_op, _func, _len, _len)
7275
7276 /* Entries in this table shall have their position according to the event opcode
7277 * they handle so the use of the macros above is recommend since it does attempt
7278 * to initialize at its proper index using Designated Initializers that way
7279 * events without a callback function don't have entered.
7280 */
7281 static const struct hci_ev {
7282 bool req;
7283 union {
7284 void (*func)(struct hci_dev *hdev, void *data,
7285 struct sk_buff *skb);
7286 void (*func_req)(struct hci_dev *hdev, void *data,
7287 struct sk_buff *skb, u16 *opcode, u8 *status,
7288 hci_req_complete_t *req_complete,
7289 hci_req_complete_skb_t *req_complete_skb);
7290 };
7291 u16 min_len;
7292 u16 max_len;
7293 } hci_ev_table[U8_MAX + 1] = {
7294 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7295 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7296 /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7297 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7298 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7299 /* [0x03 = HCI_EV_CONN_COMPLETE] */
7300 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7301 sizeof(struct hci_ev_conn_complete)),
7302 /* [0x04 = HCI_EV_CONN_REQUEST] */
7303 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7304 sizeof(struct hci_ev_conn_request)),
7305 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7306 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7307 sizeof(struct hci_ev_disconn_complete)),
7308 /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7309 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7310 sizeof(struct hci_ev_auth_complete)),
7311 /* [0x07 = HCI_EV_REMOTE_NAME] */
7312 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7313 sizeof(struct hci_ev_remote_name)),
7314 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7315 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7316 sizeof(struct hci_ev_encrypt_change)),
7317 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7318 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7319 hci_change_link_key_complete_evt,
7320 sizeof(struct hci_ev_change_link_key_complete)),
7321 /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7322 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7323 sizeof(struct hci_ev_remote_features)),
7324 /* [0x0e = HCI_EV_CMD_COMPLETE] */
7325 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7326 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7327 /* [0x0f = HCI_EV_CMD_STATUS] */
7328 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7329 sizeof(struct hci_ev_cmd_status)),
7330 /* [0x10 = HCI_EV_CMD_STATUS] */
7331 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7332 sizeof(struct hci_ev_hardware_error)),
7333 /* [0x12 = HCI_EV_ROLE_CHANGE] */
7334 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7335 sizeof(struct hci_ev_role_change)),
7336 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7337 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7338 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7339 /* [0x14 = HCI_EV_MODE_CHANGE] */
7340 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7341 sizeof(struct hci_ev_mode_change)),
7342 /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7343 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7344 sizeof(struct hci_ev_pin_code_req)),
7345 /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7346 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7347 sizeof(struct hci_ev_link_key_req)),
7348 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7349 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7350 sizeof(struct hci_ev_link_key_notify)),
7351 /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7352 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7353 sizeof(struct hci_ev_clock_offset)),
7354 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7355 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7356 sizeof(struct hci_ev_pkt_type_change)),
7357 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7358 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7359 sizeof(struct hci_ev_pscan_rep_mode)),
7360 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7361 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7362 hci_inquiry_result_with_rssi_evt,
7363 sizeof(struct hci_ev_inquiry_result_rssi),
7364 HCI_MAX_EVENT_SIZE),
7365 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7366 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7367 sizeof(struct hci_ev_remote_ext_features)),
7368 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7369 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7370 sizeof(struct hci_ev_sync_conn_complete)),
7371 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7372 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7373 hci_extended_inquiry_result_evt,
7374 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7375 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7376 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7377 sizeof(struct hci_ev_key_refresh_complete)),
7378 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7379 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7380 sizeof(struct hci_ev_io_capa_request)),
7381 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7382 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7383 sizeof(struct hci_ev_io_capa_reply)),
7384 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7385 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7386 sizeof(struct hci_ev_user_confirm_req)),
7387 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7388 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7389 sizeof(struct hci_ev_user_passkey_req)),
7390 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7391 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7392 sizeof(struct hci_ev_remote_oob_data_request)),
7393 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7394 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7395 sizeof(struct hci_ev_simple_pair_complete)),
7396 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7397 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7398 sizeof(struct hci_ev_user_passkey_notify)),
7399 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7400 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7401 sizeof(struct hci_ev_keypress_notify)),
7402 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7403 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7404 sizeof(struct hci_ev_remote_host_features)),
7405 /* [0x3e = HCI_EV_LE_META] */
7406 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7407 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7408 /* [0xff = HCI_EV_VENDOR] */
7409 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7410 };
7411
hci_event_func(struct hci_dev * hdev,u8 event,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7412 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7413 u16 *opcode, u8 *status,
7414 hci_req_complete_t *req_complete,
7415 hci_req_complete_skb_t *req_complete_skb)
7416 {
7417 const struct hci_ev *ev = &hci_ev_table[event];
7418 void *data;
7419
7420 if (!ev->func)
7421 return;
7422
7423 if (skb->len < ev->min_len) {
7424 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7425 event, skb->len, ev->min_len);
7426 return;
7427 }
7428
7429 /* Just warn if the length is over max_len size it still be
7430 * possible to partially parse the event so leave to callback to
7431 * decide if that is acceptable.
7432 */
7433 if (skb->len > ev->max_len)
7434 bt_dev_warn_ratelimited(hdev,
7435 "unexpected event 0x%2.2x length: %u > %u",
7436 event, skb->len, ev->max_len);
7437
7438 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7439 if (!data)
7440 return;
7441
7442 if (ev->req)
7443 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7444 req_complete_skb);
7445 else
7446 ev->func(hdev, data, skb);
7447 }
7448
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)7449 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7450 {
7451 struct hci_event_hdr *hdr = (void *) skb->data;
7452 hci_req_complete_t req_complete = NULL;
7453 hci_req_complete_skb_t req_complete_skb = NULL;
7454 struct sk_buff *orig_skb = NULL;
7455 u8 status = 0, event, req_evt = 0;
7456 u16 opcode = HCI_OP_NOP;
7457
7458 if (skb->len < sizeof(*hdr)) {
7459 bt_dev_err(hdev, "Malformed HCI Event");
7460 goto done;
7461 }
7462
7463 kfree_skb(hdev->recv_event);
7464 hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7465
7466 event = hdr->evt;
7467 if (!event) {
7468 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7469 event);
7470 goto done;
7471 }
7472
7473 /* Only match event if command OGF is not for LE */
7474 if (hdev->req_skb &&
7475 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7476 hci_skb_event(hdev->req_skb) == event) {
7477 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7478 status, &req_complete, &req_complete_skb);
7479 req_evt = event;
7480 }
7481
7482 /* If it looks like we might end up having to call
7483 * req_complete_skb, store a pristine copy of the skb since the
7484 * various handlers may modify the original one through
7485 * skb_pull() calls, etc.
7486 */
7487 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7488 event == HCI_EV_CMD_COMPLETE)
7489 orig_skb = skb_clone(skb, GFP_KERNEL);
7490
7491 skb_pull(skb, HCI_EVENT_HDR_SIZE);
7492
7493 /* Store wake reason if we're suspended */
7494 hci_store_wake_reason(hdev, event, skb);
7495
7496 bt_dev_dbg(hdev, "event 0x%2.2x", event);
7497
7498 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7499 &req_complete_skb);
7500
7501 if (req_complete) {
7502 req_complete(hdev, status, opcode);
7503 } else if (req_complete_skb) {
7504 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7505 kfree_skb(orig_skb);
7506 orig_skb = NULL;
7507 }
7508 req_complete_skb(hdev, status, opcode, orig_skb);
7509 }
7510
7511 done:
7512 kfree_skb(orig_skb);
7513 kfree_skb(skb);
7514 hdev->stat.evt_rx++;
7515 }
7516