1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 Copyright 2023-2024 NXP
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI event handling. */
27
28 #include <linux/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_debugfs.h"
37 #include "hci_codec.h"
38 #include "smp.h"
39 #include "msft.h"
40 #include "eir.h"
41
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 "\x00\x00\x00\x00\x00\x00\x00\x00"
44
45 /* Handle HCI Event packets */
46
hci_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)47 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
48 u8 ev, size_t len)
49 {
50 void *data;
51
52 data = skb_pull_data(skb, len);
53 if (!data)
54 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
55
56 return data;
57 }
58
hci_cc_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u16 op,size_t len)59 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
60 u16 op, size_t len)
61 {
62 void *data;
63
64 data = skb_pull_data(skb, len);
65 if (!data)
66 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
67
68 return data;
69 }
70
hci_le_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)71 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
72 u8 ev, size_t len)
73 {
74 void *data;
75
76 data = skb_pull_data(skb, len);
77 if (!data)
78 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
79
80 return data;
81 }
82
hci_cc_inquiry_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)83 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
84 struct sk_buff *skb)
85 {
86 struct hci_ev_status *rp = data;
87
88 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
89
90 /* It is possible that we receive Inquiry Complete event right
91 * before we receive Inquiry Cancel Command Complete event, in
92 * which case the latter event should have status of Command
93 * Disallowed. This should not be treated as error, since
94 * we actually achieve what Inquiry Cancel wants to achieve,
95 * which is to end the last Inquiry session.
96 */
97 if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
98 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
99 rp->status = 0x00;
100 }
101
102 if (rp->status)
103 return rp->status;
104
105 clear_bit(HCI_INQUIRY, &hdev->flags);
106 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
107 wake_up_bit(&hdev->flags, HCI_INQUIRY);
108
109 hci_dev_lock(hdev);
110 /* Set discovery state to stopped if we're not doing LE active
111 * scanning.
112 */
113 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
114 hdev->le_scan_type != LE_SCAN_ACTIVE)
115 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
116 hci_dev_unlock(hdev);
117
118 return rp->status;
119 }
120
hci_cc_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)121 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
122 struct sk_buff *skb)
123 {
124 struct hci_ev_status *rp = data;
125
126 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
127
128 if (rp->status)
129 return rp->status;
130
131 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
132
133 return rp->status;
134 }
135
hci_cc_exit_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)136 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
137 struct sk_buff *skb)
138 {
139 struct hci_ev_status *rp = data;
140
141 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
142
143 if (rp->status)
144 return rp->status;
145
146 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
147
148 return rp->status;
149 }
150
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)151 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
152 struct sk_buff *skb)
153 {
154 struct hci_rp_remote_name_req_cancel *rp = data;
155
156 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
157
158 return rp->status;
159 }
160
hci_cc_role_discovery(struct hci_dev * hdev,void * data,struct sk_buff * skb)161 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
162 struct sk_buff *skb)
163 {
164 struct hci_rp_role_discovery *rp = data;
165 struct hci_conn *conn;
166
167 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
168
169 if (rp->status)
170 return rp->status;
171
172 hci_dev_lock(hdev);
173
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
175 if (conn)
176 conn->role = rp->role;
177
178 hci_dev_unlock(hdev);
179
180 return rp->status;
181 }
182
hci_cc_read_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)183 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
184 struct sk_buff *skb)
185 {
186 struct hci_rp_read_link_policy *rp = data;
187 struct hci_conn *conn;
188
189 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
190
191 if (rp->status)
192 return rp->status;
193
194 hci_dev_lock(hdev);
195
196 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
197 if (conn)
198 conn->link_policy = __le16_to_cpu(rp->policy);
199
200 hci_dev_unlock(hdev);
201
202 return rp->status;
203 }
204
hci_cc_write_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)205 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
206 struct sk_buff *skb)
207 {
208 struct hci_rp_write_link_policy *rp = data;
209 struct hci_conn *conn;
210 void *sent;
211
212 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
213
214 if (rp->status)
215 return rp->status;
216
217 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
218 if (!sent)
219 return rp->status;
220
221 hci_dev_lock(hdev);
222
223 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
224 if (conn)
225 conn->link_policy = get_unaligned_le16(sent + 2);
226
227 hci_dev_unlock(hdev);
228
229 return rp->status;
230 }
231
hci_cc_read_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)232 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
233 struct sk_buff *skb)
234 {
235 struct hci_rp_read_def_link_policy *rp = data;
236
237 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
238
239 if (rp->status)
240 return rp->status;
241
242 hdev->link_policy = __le16_to_cpu(rp->policy);
243
244 return rp->status;
245 }
246
hci_cc_write_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)247 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
248 struct sk_buff *skb)
249 {
250 struct hci_ev_status *rp = data;
251 void *sent;
252
253 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
254
255 if (rp->status)
256 return rp->status;
257
258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
259 if (!sent)
260 return rp->status;
261
262 hdev->link_policy = get_unaligned_le16(sent);
263
264 return rp->status;
265 }
266
hci_cc_reset(struct hci_dev * hdev,void * data,struct sk_buff * skb)267 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
268 {
269 struct hci_ev_status *rp = data;
270
271 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
272
273 clear_bit(HCI_RESET, &hdev->flags);
274
275 if (rp->status)
276 return rp->status;
277
278 /* Reset all non-persistent flags */
279 hci_dev_clear_volatile_flags(hdev);
280
281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
282
283 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
284 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
285
286 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
287 hdev->adv_data_len = 0;
288
289 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
290 hdev->scan_rsp_data_len = 0;
291
292 hdev->le_scan_type = LE_SCAN_PASSIVE;
293
294 hdev->ssp_debug_mode = 0;
295
296 hci_bdaddr_list_clear(&hdev->le_accept_list);
297 hci_bdaddr_list_clear(&hdev->le_resolv_list);
298
299 return rp->status;
300 }
301
hci_cc_read_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)302 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
303 struct sk_buff *skb)
304 {
305 struct hci_rp_read_stored_link_key *rp = data;
306 struct hci_cp_read_stored_link_key *sent;
307
308 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
309
310 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
311 if (!sent)
312 return rp->status;
313
314 if (!rp->status && sent->read_all == 0x01) {
315 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
316 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
317 }
318
319 return rp->status;
320 }
321
hci_cc_delete_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)322 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
323 struct sk_buff *skb)
324 {
325 struct hci_rp_delete_stored_link_key *rp = data;
326 u16 num_keys;
327
328 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
329
330 if (rp->status)
331 return rp->status;
332
333 num_keys = le16_to_cpu(rp->num_keys);
334
335 if (num_keys <= hdev->stored_num_keys)
336 hdev->stored_num_keys -= num_keys;
337 else
338 hdev->stored_num_keys = 0;
339
340 return rp->status;
341 }
342
hci_cc_write_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)343 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
344 struct sk_buff *skb)
345 {
346 struct hci_ev_status *rp = data;
347 void *sent;
348
349 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
350
351 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
352 if (!sent)
353 return rp->status;
354
355 hci_dev_lock(hdev);
356
357 if (hci_dev_test_flag(hdev, HCI_MGMT))
358 mgmt_set_local_name_complete(hdev, sent, rp->status);
359 else if (!rp->status)
360 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
361
362 hci_dev_unlock(hdev);
363
364 return rp->status;
365 }
366
hci_cc_read_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)367 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
368 struct sk_buff *skb)
369 {
370 struct hci_rp_read_local_name *rp = data;
371
372 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
373
374 if (rp->status)
375 return rp->status;
376
377 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
378 hci_dev_test_flag(hdev, HCI_CONFIG))
379 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
380
381 return rp->status;
382 }
383
hci_cc_write_auth_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)384 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
385 struct sk_buff *skb)
386 {
387 struct hci_ev_status *rp = data;
388 void *sent;
389
390 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
391
392 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
393 if (!sent)
394 return rp->status;
395
396 hci_dev_lock(hdev);
397
398 if (!rp->status) {
399 __u8 param = *((__u8 *) sent);
400
401 if (param == AUTH_ENABLED)
402 set_bit(HCI_AUTH, &hdev->flags);
403 else
404 clear_bit(HCI_AUTH, &hdev->flags);
405 }
406
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_auth_enable_complete(hdev, rp->status);
409
410 hci_dev_unlock(hdev);
411
412 return rp->status;
413 }
414
hci_cc_write_encrypt_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)415 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
416 struct sk_buff *skb)
417 {
418 struct hci_ev_status *rp = data;
419 __u8 param;
420 void *sent;
421
422 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
423
424 if (rp->status)
425 return rp->status;
426
427 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
428 if (!sent)
429 return rp->status;
430
431 param = *((__u8 *) sent);
432
433 if (param)
434 set_bit(HCI_ENCRYPT, &hdev->flags);
435 else
436 clear_bit(HCI_ENCRYPT, &hdev->flags);
437
438 return rp->status;
439 }
440
hci_cc_write_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)441 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
442 struct sk_buff *skb)
443 {
444 struct hci_ev_status *rp = data;
445 __u8 param;
446 void *sent;
447
448 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
449
450 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
451 if (!sent)
452 return rp->status;
453
454 param = *((__u8 *) sent);
455
456 hci_dev_lock(hdev);
457
458 if (rp->status) {
459 hdev->discov_timeout = 0;
460 goto done;
461 }
462
463 if (param & SCAN_INQUIRY)
464 set_bit(HCI_ISCAN, &hdev->flags);
465 else
466 clear_bit(HCI_ISCAN, &hdev->flags);
467
468 if (param & SCAN_PAGE)
469 set_bit(HCI_PSCAN, &hdev->flags);
470 else
471 clear_bit(HCI_PSCAN, &hdev->flags);
472
473 done:
474 hci_dev_unlock(hdev);
475
476 return rp->status;
477 }
478
hci_cc_set_event_filter(struct hci_dev * hdev,void * data,struct sk_buff * skb)479 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
480 struct sk_buff *skb)
481 {
482 struct hci_ev_status *rp = data;
483 struct hci_cp_set_event_filter *cp;
484 void *sent;
485
486 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
487
488 if (rp->status)
489 return rp->status;
490
491 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
492 if (!sent)
493 return rp->status;
494
495 cp = (struct hci_cp_set_event_filter *)sent;
496
497 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
498 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
499 else
500 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
501
502 return rp->status;
503 }
504
hci_cc_read_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)505 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
506 struct sk_buff *skb)
507 {
508 struct hci_rp_read_class_of_dev *rp = data;
509
510 if (WARN_ON(!hdev))
511 return HCI_ERROR_UNSPECIFIED;
512
513 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
514
515 if (rp->status)
516 return rp->status;
517
518 memcpy(hdev->dev_class, rp->dev_class, 3);
519
520 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
521 hdev->dev_class[1], hdev->dev_class[0]);
522
523 return rp->status;
524 }
525
hci_cc_write_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)526 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
527 struct sk_buff *skb)
528 {
529 struct hci_ev_status *rp = data;
530 void *sent;
531
532 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
533
534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
535 if (!sent)
536 return rp->status;
537
538 hci_dev_lock(hdev);
539
540 if (!rp->status)
541 memcpy(hdev->dev_class, sent, 3);
542
543 if (hci_dev_test_flag(hdev, HCI_MGMT))
544 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
545
546 hci_dev_unlock(hdev);
547
548 return rp->status;
549 }
550
hci_cc_read_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)551 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
552 struct sk_buff *skb)
553 {
554 struct hci_rp_read_voice_setting *rp = data;
555 __u16 setting;
556
557 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
558
559 if (rp->status)
560 return rp->status;
561
562 setting = __le16_to_cpu(rp->voice_setting);
563
564 if (hdev->voice_setting == setting)
565 return rp->status;
566
567 hdev->voice_setting = setting;
568
569 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
570
571 if (hdev->notify)
572 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
573
574 return rp->status;
575 }
576
hci_cc_write_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)577 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
578 struct sk_buff *skb)
579 {
580 struct hci_ev_status *rp = data;
581 __u16 setting;
582 void *sent;
583
584 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
585
586 if (rp->status)
587 return rp->status;
588
589 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
590 if (!sent)
591 return rp->status;
592
593 setting = get_unaligned_le16(sent);
594
595 if (hdev->voice_setting == setting)
596 return rp->status;
597
598 hdev->voice_setting = setting;
599
600 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
601
602 if (hdev->notify)
603 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
604
605 return rp->status;
606 }
607
hci_cc_read_num_supported_iac(struct hci_dev * hdev,void * data,struct sk_buff * skb)608 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
609 struct sk_buff *skb)
610 {
611 struct hci_rp_read_num_supported_iac *rp = data;
612
613 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
614
615 if (rp->status)
616 return rp->status;
617
618 hdev->num_iac = rp->num_iac;
619
620 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
621
622 return rp->status;
623 }
624
hci_cc_write_ssp_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)625 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
626 struct sk_buff *skb)
627 {
628 struct hci_ev_status *rp = data;
629 struct hci_cp_write_ssp_mode *sent;
630
631 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
632
633 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
634 if (!sent)
635 return rp->status;
636
637 hci_dev_lock(hdev);
638
639 if (!rp->status) {
640 if (sent->mode)
641 hdev->features[1][0] |= LMP_HOST_SSP;
642 else
643 hdev->features[1][0] &= ~LMP_HOST_SSP;
644 }
645
646 if (!rp->status) {
647 if (sent->mode)
648 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
649 else
650 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
651 }
652
653 hci_dev_unlock(hdev);
654
655 return rp->status;
656 }
657
hci_cc_write_sc_support(struct hci_dev * hdev,void * data,struct sk_buff * skb)658 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
659 struct sk_buff *skb)
660 {
661 struct hci_ev_status *rp = data;
662 struct hci_cp_write_sc_support *sent;
663
664 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
665
666 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
667 if (!sent)
668 return rp->status;
669
670 hci_dev_lock(hdev);
671
672 if (!rp->status) {
673 if (sent->support)
674 hdev->features[1][0] |= LMP_HOST_SC;
675 else
676 hdev->features[1][0] &= ~LMP_HOST_SC;
677 }
678
679 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
680 if (sent->support)
681 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
682 else
683 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
684 }
685
686 hci_dev_unlock(hdev);
687
688 return rp->status;
689 }
690
hci_cc_read_local_version(struct hci_dev * hdev,void * data,struct sk_buff * skb)691 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
692 struct sk_buff *skb)
693 {
694 struct hci_rp_read_local_version *rp = data;
695
696 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
697
698 if (rp->status)
699 return rp->status;
700
701 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
702 hci_dev_test_flag(hdev, HCI_CONFIG)) {
703 hdev->hci_ver = rp->hci_ver;
704 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
705 hdev->lmp_ver = rp->lmp_ver;
706 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
707 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
708 }
709
710 return rp->status;
711 }
712
hci_cc_read_enc_key_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)713 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
714 struct sk_buff *skb)
715 {
716 struct hci_rp_read_enc_key_size *rp = data;
717 struct hci_conn *conn;
718 u16 handle;
719 u8 status = rp->status;
720
721 bt_dev_dbg(hdev, "status 0x%2.2x", status);
722
723 handle = le16_to_cpu(rp->handle);
724
725 hci_dev_lock(hdev);
726
727 conn = hci_conn_hash_lookup_handle(hdev, handle);
728 if (!conn) {
729 status = 0xFF;
730 goto done;
731 }
732
733 /* While unexpected, the read_enc_key_size command may fail. The most
734 * secure approach is to then assume the key size is 0 to force a
735 * disconnection.
736 */
737 if (status) {
738 bt_dev_err(hdev, "failed to read key size for handle %u",
739 handle);
740 conn->enc_key_size = 0;
741 } else {
742 u8 *key_enc_size = hci_conn_key_enc_size(conn);
743
744 conn->enc_key_size = rp->key_size;
745 status = 0;
746
747 /* Attempt to check if the key size is too small or if it has
748 * been downgraded from the last time it was stored as part of
749 * the link_key.
750 */
751 if (conn->enc_key_size < hdev->min_enc_key_size ||
752 (key_enc_size && conn->enc_key_size < *key_enc_size)) {
753 /* As slave role, the conn->state has been set to
754 * BT_CONNECTED and l2cap conn req might not be received
755 * yet, at this moment the l2cap layer almost does
756 * nothing with the non-zero status.
757 * So we also clear encrypt related bits, and then the
758 * handler of l2cap conn req will get the right secure
759 * state at a later time.
760 */
761 status = HCI_ERROR_AUTH_FAILURE;
762 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
763 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
764 }
765
766 /* Update the key encryption size with the connection one */
767 if (key_enc_size && *key_enc_size != conn->enc_key_size)
768 *key_enc_size = conn->enc_key_size;
769 }
770
771 hci_encrypt_cfm(conn, status);
772
773 done:
774 hci_dev_unlock(hdev);
775
776 return status;
777 }
778
hci_cc_read_local_commands(struct hci_dev * hdev,void * data,struct sk_buff * skb)779 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
780 struct sk_buff *skb)
781 {
782 struct hci_rp_read_local_commands *rp = data;
783
784 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
785
786 if (rp->status)
787 return rp->status;
788
789 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
790 hci_dev_test_flag(hdev, HCI_CONFIG))
791 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
792
793 return rp->status;
794 }
795
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)796 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
797 struct sk_buff *skb)
798 {
799 struct hci_rp_read_auth_payload_to *rp = data;
800 struct hci_conn *conn;
801
802 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
803
804 if (rp->status)
805 return rp->status;
806
807 hci_dev_lock(hdev);
808
809 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810 if (conn)
811 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
812
813 hci_dev_unlock(hdev);
814
815 return rp->status;
816 }
817
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)818 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
819 struct sk_buff *skb)
820 {
821 struct hci_rp_write_auth_payload_to *rp = data;
822 struct hci_conn *conn;
823 void *sent;
824
825 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
826
827 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
828 if (!sent)
829 return rp->status;
830
831 hci_dev_lock(hdev);
832
833 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
834 if (!conn) {
835 rp->status = 0xff;
836 goto unlock;
837 }
838
839 if (!rp->status)
840 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
841
842 unlock:
843 hci_dev_unlock(hdev);
844
845 return rp->status;
846 }
847
hci_cc_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)848 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
849 struct sk_buff *skb)
850 {
851 struct hci_rp_read_local_features *rp = data;
852
853 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
854
855 if (rp->status)
856 return rp->status;
857
858 memcpy(hdev->features, rp->features, 8);
859
860 /* Adjust default settings according to features
861 * supported by device. */
862
863 if (hdev->features[0][0] & LMP_3SLOT)
864 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
865
866 if (hdev->features[0][0] & LMP_5SLOT)
867 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
868
869 if (hdev->features[0][1] & LMP_HV2) {
870 hdev->pkt_type |= (HCI_HV2);
871 hdev->esco_type |= (ESCO_HV2);
872 }
873
874 if (hdev->features[0][1] & LMP_HV3) {
875 hdev->pkt_type |= (HCI_HV3);
876 hdev->esco_type |= (ESCO_HV3);
877 }
878
879 if (lmp_esco_capable(hdev))
880 hdev->esco_type |= (ESCO_EV3);
881
882 if (hdev->features[0][4] & LMP_EV4)
883 hdev->esco_type |= (ESCO_EV4);
884
885 if (hdev->features[0][4] & LMP_EV5)
886 hdev->esco_type |= (ESCO_EV5);
887
888 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
889 hdev->esco_type |= (ESCO_2EV3);
890
891 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
892 hdev->esco_type |= (ESCO_3EV3);
893
894 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
895 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
896
897 return rp->status;
898 }
899
hci_cc_read_local_ext_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)900 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
901 struct sk_buff *skb)
902 {
903 struct hci_rp_read_local_ext_features *rp = data;
904
905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
906
907 if (rp->status)
908 return rp->status;
909
910 if (hdev->max_page < rp->max_page) {
911 if (hci_test_quirk(hdev,
912 HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2))
913 bt_dev_warn(hdev, "broken local ext features page 2");
914 else
915 hdev->max_page = rp->max_page;
916 }
917
918 if (rp->page < HCI_MAX_PAGES)
919 memcpy(hdev->features[rp->page], rp->features, 8);
920
921 return rp->status;
922 }
923
hci_cc_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)924 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
925 struct sk_buff *skb)
926 {
927 struct hci_rp_read_buffer_size *rp = data;
928
929 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
930
931 if (rp->status)
932 return rp->status;
933
934 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
935 hdev->sco_mtu = rp->sco_mtu;
936 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
937 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
938
939 if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) {
940 hdev->sco_mtu = 64;
941 hdev->sco_pkts = 8;
942 }
943
944 if (!read_voice_setting_capable(hdev))
945 hdev->sco_pkts = 0;
946
947 hdev->acl_cnt = hdev->acl_pkts;
948 hdev->sco_cnt = hdev->sco_pkts;
949
950 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
951 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
952
953 if (!hdev->acl_mtu || !hdev->acl_pkts)
954 return HCI_ERROR_INVALID_PARAMETERS;
955
956 return rp->status;
957 }
958
hci_cc_read_bd_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)959 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
960 struct sk_buff *skb)
961 {
962 struct hci_rp_read_bd_addr *rp = data;
963
964 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
965
966 if (rp->status)
967 return rp->status;
968
969 if (test_bit(HCI_INIT, &hdev->flags))
970 bacpy(&hdev->bdaddr, &rp->bdaddr);
971
972 if (hci_dev_test_flag(hdev, HCI_SETUP))
973 bacpy(&hdev->setup_addr, &rp->bdaddr);
974
975 return rp->status;
976 }
977
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,void * data,struct sk_buff * skb)978 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
979 struct sk_buff *skb)
980 {
981 struct hci_rp_read_local_pairing_opts *rp = data;
982
983 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
984
985 if (rp->status)
986 return rp->status;
987
988 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
989 hci_dev_test_flag(hdev, HCI_CONFIG)) {
990 hdev->pairing_opts = rp->pairing_opts;
991 hdev->max_enc_key_size = rp->max_key_size;
992 }
993
994 return rp->status;
995 }
996
hci_cc_read_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)997 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
998 struct sk_buff *skb)
999 {
1000 struct hci_rp_read_page_scan_activity *rp = data;
1001
1002 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1003
1004 if (rp->status)
1005 return rp->status;
1006
1007 if (test_bit(HCI_INIT, &hdev->flags)) {
1008 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1009 hdev->page_scan_window = __le16_to_cpu(rp->window);
1010 }
1011
1012 return rp->status;
1013 }
1014
hci_cc_write_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)1015 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1016 struct sk_buff *skb)
1017 {
1018 struct hci_ev_status *rp = data;
1019 struct hci_cp_write_page_scan_activity *sent;
1020
1021 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1022
1023 if (rp->status)
1024 return rp->status;
1025
1026 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1027 if (!sent)
1028 return rp->status;
1029
1030 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1031 hdev->page_scan_window = __le16_to_cpu(sent->window);
1032
1033 return rp->status;
1034 }
1035
hci_cc_read_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1036 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1037 struct sk_buff *skb)
1038 {
1039 struct hci_rp_read_page_scan_type *rp = data;
1040
1041 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1042
1043 if (rp->status)
1044 return rp->status;
1045
1046 if (test_bit(HCI_INIT, &hdev->flags))
1047 hdev->page_scan_type = rp->type;
1048
1049 return rp->status;
1050 }
1051
hci_cc_write_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1052 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1053 struct sk_buff *skb)
1054 {
1055 struct hci_ev_status *rp = data;
1056 u8 *type;
1057
1058 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1059
1060 if (rp->status)
1061 return rp->status;
1062
1063 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1064 if (type)
1065 hdev->page_scan_type = *type;
1066
1067 return rp->status;
1068 }
1069
hci_cc_read_clock(struct hci_dev * hdev,void * data,struct sk_buff * skb)1070 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1071 struct sk_buff *skb)
1072 {
1073 struct hci_rp_read_clock *rp = data;
1074 struct hci_cp_read_clock *cp;
1075 struct hci_conn *conn;
1076
1077 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1078
1079 if (rp->status)
1080 return rp->status;
1081
1082 hci_dev_lock(hdev);
1083
1084 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1085 if (!cp)
1086 goto unlock;
1087
1088 if (cp->which == 0x00) {
1089 hdev->clock = le32_to_cpu(rp->clock);
1090 goto unlock;
1091 }
1092
1093 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1094 if (conn) {
1095 conn->clock = le32_to_cpu(rp->clock);
1096 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1097 }
1098
1099 unlock:
1100 hci_dev_unlock(hdev);
1101 return rp->status;
1102 }
1103
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1104 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1105 struct sk_buff *skb)
1106 {
1107 struct hci_rp_read_inq_rsp_tx_power *rp = data;
1108
1109 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1110
1111 if (rp->status)
1112 return rp->status;
1113
1114 hdev->inq_tx_power = rp->tx_power;
1115
1116 return rp->status;
1117 }
1118
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1119 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1120 struct sk_buff *skb)
1121 {
1122 struct hci_rp_read_def_err_data_reporting *rp = data;
1123
1124 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1125
1126 if (rp->status)
1127 return rp->status;
1128
1129 hdev->err_data_reporting = rp->err_data_reporting;
1130
1131 return rp->status;
1132 }
1133
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1134 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1135 struct sk_buff *skb)
1136 {
1137 struct hci_ev_status *rp = data;
1138 struct hci_cp_write_def_err_data_reporting *cp;
1139
1140 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1141
1142 if (rp->status)
1143 return rp->status;
1144
1145 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1146 if (!cp)
1147 return rp->status;
1148
1149 hdev->err_data_reporting = cp->err_data_reporting;
1150
1151 return rp->status;
1152 }
1153
hci_cc_pin_code_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1154 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1155 struct sk_buff *skb)
1156 {
1157 struct hci_rp_pin_code_reply *rp = data;
1158 struct hci_cp_pin_code_reply *cp;
1159 struct hci_conn *conn;
1160
1161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1162
1163 hci_dev_lock(hdev);
1164
1165 if (hci_dev_test_flag(hdev, HCI_MGMT))
1166 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1167
1168 if (rp->status)
1169 goto unlock;
1170
1171 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1172 if (!cp)
1173 goto unlock;
1174
1175 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1176 if (conn)
1177 conn->pin_length = cp->pin_len;
1178
1179 unlock:
1180 hci_dev_unlock(hdev);
1181 return rp->status;
1182 }
1183
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1184 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1185 struct sk_buff *skb)
1186 {
1187 struct hci_rp_pin_code_neg_reply *rp = data;
1188
1189 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1190
1191 hci_dev_lock(hdev);
1192
1193 if (hci_dev_test_flag(hdev, HCI_MGMT))
1194 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1195 rp->status);
1196
1197 hci_dev_unlock(hdev);
1198
1199 return rp->status;
1200 }
1201
hci_cc_le_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1202 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1203 struct sk_buff *skb)
1204 {
1205 struct hci_rp_le_read_buffer_size *rp = data;
1206
1207 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1208
1209 if (rp->status)
1210 return rp->status;
1211
1212 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1213 hdev->le_pkts = rp->le_max_pkt;
1214
1215 hdev->le_cnt = hdev->le_pkts;
1216
1217 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1218
1219 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1220 return HCI_ERROR_INVALID_PARAMETERS;
1221
1222 return rp->status;
1223 }
1224
hci_cc_le_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)1225 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1226 struct sk_buff *skb)
1227 {
1228 struct hci_rp_le_read_local_features *rp = data;
1229
1230 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1231
1232 if (rp->status)
1233 return rp->status;
1234
1235 memcpy(hdev->le_features, rp->features, 8);
1236
1237 return rp->status;
1238 }
1239
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1240 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1241 struct sk_buff *skb)
1242 {
1243 struct hci_rp_le_read_adv_tx_power *rp = data;
1244
1245 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1246
1247 if (rp->status)
1248 return rp->status;
1249
1250 hdev->adv_tx_power = rp->tx_power;
1251
1252 return rp->status;
1253 }
1254
hci_cc_user_confirm_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1255 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1256 struct sk_buff *skb)
1257 {
1258 struct hci_rp_user_confirm_reply *rp = data;
1259
1260 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1261
1262 hci_dev_lock(hdev);
1263
1264 if (hci_dev_test_flag(hdev, HCI_MGMT))
1265 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1266 rp->status);
1267
1268 hci_dev_unlock(hdev);
1269
1270 return rp->status;
1271 }
1272
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1273 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1274 struct sk_buff *skb)
1275 {
1276 struct hci_rp_user_confirm_reply *rp = data;
1277
1278 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1279
1280 hci_dev_lock(hdev);
1281
1282 if (hci_dev_test_flag(hdev, HCI_MGMT))
1283 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1284 ACL_LINK, 0, rp->status);
1285
1286 hci_dev_unlock(hdev);
1287
1288 return rp->status;
1289 }
1290
hci_cc_user_passkey_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1291 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1292 struct sk_buff *skb)
1293 {
1294 struct hci_rp_user_confirm_reply *rp = data;
1295
1296 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1297
1298 hci_dev_lock(hdev);
1299
1300 if (hci_dev_test_flag(hdev, HCI_MGMT))
1301 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1302 0, rp->status);
1303
1304 hci_dev_unlock(hdev);
1305
1306 return rp->status;
1307 }
1308
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1309 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1310 struct sk_buff *skb)
1311 {
1312 struct hci_rp_user_confirm_reply *rp = data;
1313
1314 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1315
1316 hci_dev_lock(hdev);
1317
1318 if (hci_dev_test_flag(hdev, HCI_MGMT))
1319 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1320 ACL_LINK, 0, rp->status);
1321
1322 hci_dev_unlock(hdev);
1323
1324 return rp->status;
1325 }
1326
hci_cc_read_local_oob_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1327 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1328 struct sk_buff *skb)
1329 {
1330 struct hci_rp_read_local_oob_data *rp = data;
1331
1332 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1333
1334 return rp->status;
1335 }
1336
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1337 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1338 struct sk_buff *skb)
1339 {
1340 struct hci_rp_read_local_oob_ext_data *rp = data;
1341
1342 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1343
1344 return rp->status;
1345 }
1346
hci_cc_le_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1347 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1348 struct sk_buff *skb)
1349 {
1350 struct hci_ev_status *rp = data;
1351 bdaddr_t *sent;
1352
1353 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1354
1355 if (rp->status)
1356 return rp->status;
1357
1358 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1359 if (!sent)
1360 return rp->status;
1361
1362 hci_dev_lock(hdev);
1363
1364 bacpy(&hdev->random_addr, sent);
1365
1366 if (!bacmp(&hdev->rpa, sent)) {
1367 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1368 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1369 secs_to_jiffies(hdev->rpa_timeout));
1370 }
1371
1372 hci_dev_unlock(hdev);
1373
1374 return rp->status;
1375 }
1376
hci_cc_le_set_default_phy(struct hci_dev * hdev,void * data,struct sk_buff * skb)1377 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1378 struct sk_buff *skb)
1379 {
1380 struct hci_ev_status *rp = data;
1381 struct hci_cp_le_set_default_phy *cp;
1382
1383 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1384
1385 if (rp->status)
1386 return rp->status;
1387
1388 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1389 if (!cp)
1390 return rp->status;
1391
1392 hci_dev_lock(hdev);
1393
1394 hdev->le_tx_def_phys = cp->tx_phys;
1395 hdev->le_rx_def_phys = cp->rx_phys;
1396
1397 hci_dev_unlock(hdev);
1398
1399 return rp->status;
1400 }
1401
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1402 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1403 struct sk_buff *skb)
1404 {
1405 struct hci_ev_status *rp = data;
1406 struct hci_cp_le_set_adv_set_rand_addr *cp;
1407 struct adv_info *adv;
1408
1409 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1410
1411 if (rp->status)
1412 return rp->status;
1413
1414 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1415 /* Update only in case the adv instance since handle 0x00 shall be using
1416 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1417 * non-extended adverting.
1418 */
1419 if (!cp || !cp->handle)
1420 return rp->status;
1421
1422 hci_dev_lock(hdev);
1423
1424 adv = hci_find_adv_instance(hdev, cp->handle);
1425 if (adv) {
1426 bacpy(&adv->random_addr, &cp->bdaddr);
1427 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1428 adv->rpa_expired = false;
1429 queue_delayed_work(hdev->workqueue,
1430 &adv->rpa_expired_cb,
1431 secs_to_jiffies(hdev->rpa_timeout));
1432 }
1433 }
1434
1435 hci_dev_unlock(hdev);
1436
1437 return rp->status;
1438 }
1439
hci_cc_le_remove_adv_set(struct hci_dev * hdev,void * data,struct sk_buff * skb)1440 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1441 struct sk_buff *skb)
1442 {
1443 struct hci_ev_status *rp = data;
1444 u8 *instance;
1445 int err;
1446
1447 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1448
1449 if (rp->status)
1450 return rp->status;
1451
1452 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1453 if (!instance)
1454 return rp->status;
1455
1456 hci_dev_lock(hdev);
1457
1458 err = hci_remove_adv_instance(hdev, *instance);
1459 if (!err)
1460 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1461 *instance);
1462
1463 hci_dev_unlock(hdev);
1464
1465 return rp->status;
1466 }
1467
hci_cc_le_clear_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1468 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1469 struct sk_buff *skb)
1470 {
1471 struct hci_ev_status *rp = data;
1472 struct adv_info *adv, *n;
1473 int err;
1474
1475 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1476
1477 if (rp->status)
1478 return rp->status;
1479
1480 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1481 return rp->status;
1482
1483 hci_dev_lock(hdev);
1484
1485 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1486 u8 instance = adv->instance;
1487
1488 err = hci_remove_adv_instance(hdev, instance);
1489 if (!err)
1490 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1491 hdev, instance);
1492 }
1493
1494 hci_dev_unlock(hdev);
1495
1496 return rp->status;
1497 }
1498
hci_cc_le_read_transmit_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1499 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1500 struct sk_buff *skb)
1501 {
1502 struct hci_rp_le_read_transmit_power *rp = data;
1503
1504 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1505
1506 if (rp->status)
1507 return rp->status;
1508
1509 hdev->min_le_tx_power = rp->min_le_tx_power;
1510 hdev->max_le_tx_power = rp->max_le_tx_power;
1511
1512 return rp->status;
1513 }
1514
hci_cc_le_set_privacy_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)1515 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1516 struct sk_buff *skb)
1517 {
1518 struct hci_ev_status *rp = data;
1519 struct hci_cp_le_set_privacy_mode *cp;
1520 struct hci_conn_params *params;
1521
1522 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1523
1524 if (rp->status)
1525 return rp->status;
1526
1527 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1528 if (!cp)
1529 return rp->status;
1530
1531 hci_dev_lock(hdev);
1532
1533 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1534 if (params)
1535 WRITE_ONCE(params->privacy_mode, cp->mode);
1536
1537 hci_dev_unlock(hdev);
1538
1539 return rp->status;
1540 }
1541
hci_cc_le_set_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1542 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1543 struct sk_buff *skb)
1544 {
1545 struct hci_ev_status *rp = data;
1546 __u8 *sent;
1547
1548 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549
1550 if (rp->status)
1551 return rp->status;
1552
1553 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1554 if (!sent)
1555 return rp->status;
1556
1557 hci_dev_lock(hdev);
1558
1559 /* If we're doing connection initiation as peripheral. Set a
1560 * timeout in case something goes wrong.
1561 */
1562 if (*sent) {
1563 struct hci_conn *conn;
1564
1565 hci_dev_set_flag(hdev, HCI_LE_ADV);
1566
1567 conn = hci_lookup_le_connect(hdev);
1568 if (conn)
1569 queue_delayed_work(hdev->workqueue,
1570 &conn->le_conn_timeout,
1571 conn->conn_timeout);
1572 } else {
1573 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1574 }
1575
1576 hci_dev_unlock(hdev);
1577
1578 return rp->status;
1579 }
1580
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1581 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1582 struct sk_buff *skb)
1583 {
1584 struct hci_cp_le_set_ext_adv_enable *cp;
1585 struct hci_cp_ext_adv_set *set;
1586 struct adv_info *adv = NULL, *n;
1587 struct hci_ev_status *rp = data;
1588
1589 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1590
1591 if (rp->status)
1592 return rp->status;
1593
1594 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1595 if (!cp)
1596 return rp->status;
1597
1598 set = (void *)cp->data;
1599
1600 hci_dev_lock(hdev);
1601
1602 if (cp->num_of_sets)
1603 adv = hci_find_adv_instance(hdev, set->handle);
1604
1605 if (cp->enable) {
1606 struct hci_conn *conn;
1607
1608 hci_dev_set_flag(hdev, HCI_LE_ADV);
1609
1610 if (adv)
1611 adv->enabled = true;
1612 else if (!set->handle)
1613 hci_dev_set_flag(hdev, HCI_LE_ADV_0);
1614
1615 conn = hci_lookup_le_connect(hdev);
1616 if (conn)
1617 queue_delayed_work(hdev->workqueue,
1618 &conn->le_conn_timeout,
1619 conn->conn_timeout);
1620 } else {
1621 if (cp->num_of_sets) {
1622 if (adv)
1623 adv->enabled = false;
1624 else if (!set->handle)
1625 hci_dev_clear_flag(hdev, HCI_LE_ADV_0);
1626
1627 /* If just one instance was disabled check if there are
1628 * any other instance enabled before clearing HCI_LE_ADV
1629 */
1630 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1631 list) {
1632 if (adv->enabled)
1633 goto unlock;
1634 }
1635 } else {
1636 /* All instances shall be considered disabled */
1637 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1638 list)
1639 adv->enabled = false;
1640 }
1641
1642 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1643 }
1644
1645 unlock:
1646 hci_dev_unlock(hdev);
1647 return rp->status;
1648 }
1649
hci_cc_le_set_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1650 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1651 struct sk_buff *skb)
1652 {
1653 struct hci_cp_le_set_scan_param *cp;
1654 struct hci_ev_status *rp = data;
1655
1656 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1657
1658 if (rp->status)
1659 return rp->status;
1660
1661 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1662 if (!cp)
1663 return rp->status;
1664
1665 hci_dev_lock(hdev);
1666
1667 hdev->le_scan_type = cp->type;
1668
1669 hci_dev_unlock(hdev);
1670
1671 return rp->status;
1672 }
1673
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1674 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1675 struct sk_buff *skb)
1676 {
1677 struct hci_cp_le_set_ext_scan_params *cp;
1678 struct hci_ev_status *rp = data;
1679 struct hci_cp_le_scan_phy_params *phy_param;
1680
1681 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1682
1683 if (rp->status)
1684 return rp->status;
1685
1686 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1687 if (!cp)
1688 return rp->status;
1689
1690 phy_param = (void *)cp->data;
1691
1692 hci_dev_lock(hdev);
1693
1694 hdev->le_scan_type = phy_param->type;
1695
1696 hci_dev_unlock(hdev);
1697
1698 return rp->status;
1699 }
1700
has_pending_adv_report(struct hci_dev * hdev)1701 static bool has_pending_adv_report(struct hci_dev *hdev)
1702 {
1703 struct discovery_state *d = &hdev->discovery;
1704
1705 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1706 }
1707
clear_pending_adv_report(struct hci_dev * hdev)1708 static void clear_pending_adv_report(struct hci_dev *hdev)
1709 {
1710 struct discovery_state *d = &hdev->discovery;
1711
1712 bacpy(&d->last_adv_addr, BDADDR_ANY);
1713 d->last_adv_data_len = 0;
1714 }
1715
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1716 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1717 u8 bdaddr_type, s8 rssi, u32 flags,
1718 u8 *data, u8 len)
1719 {
1720 struct discovery_state *d = &hdev->discovery;
1721
1722 if (len > max_adv_len(hdev))
1723 return;
1724
1725 bacpy(&d->last_adv_addr, bdaddr);
1726 d->last_adv_addr_type = bdaddr_type;
1727 d->last_adv_rssi = rssi;
1728 d->last_adv_flags = flags;
1729 memcpy(d->last_adv_data, data, len);
1730 d->last_adv_data_len = len;
1731 }
1732
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1733 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1734 {
1735 hci_dev_lock(hdev);
1736
1737 switch (enable) {
1738 case LE_SCAN_ENABLE:
1739 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1740 if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
1741 clear_pending_adv_report(hdev);
1742 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1743 }
1744 break;
1745
1746 case LE_SCAN_DISABLE:
1747 /* We do this here instead of when setting DISCOVERY_STOPPED
1748 * since the latter would potentially require waiting for
1749 * inquiry to stop too.
1750 */
1751 if (has_pending_adv_report(hdev)) {
1752 struct discovery_state *d = &hdev->discovery;
1753
1754 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1755 d->last_adv_addr_type, NULL,
1756 d->last_adv_rssi, d->last_adv_flags,
1757 d->last_adv_data,
1758 d->last_adv_data_len, NULL, 0, 0);
1759 }
1760
1761 /* Cancel this timer so that we don't try to disable scanning
1762 * when it's already disabled.
1763 */
1764 cancel_delayed_work(&hdev->le_scan_disable);
1765
1766 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1767
1768 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1769 * interrupted scanning due to a connect request. Mark
1770 * therefore discovery as stopped.
1771 */
1772 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1773 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1774 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1775 hdev->discovery.state == DISCOVERY_FINDING)
1776 queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1777
1778 break;
1779
1780 default:
1781 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1782 enable);
1783 break;
1784 }
1785
1786 hci_dev_unlock(hdev);
1787 }
1788
hci_cc_le_set_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1789 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1790 struct sk_buff *skb)
1791 {
1792 struct hci_cp_le_set_scan_enable *cp;
1793 struct hci_ev_status *rp = data;
1794
1795 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1796
1797 if (rp->status)
1798 return rp->status;
1799
1800 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1801 if (!cp)
1802 return rp->status;
1803
1804 le_set_scan_enable_complete(hdev, cp->enable);
1805
1806 return rp->status;
1807 }
1808
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1809 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1810 struct sk_buff *skb)
1811 {
1812 struct hci_cp_le_set_ext_scan_enable *cp;
1813 struct hci_ev_status *rp = data;
1814
1815 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1816
1817 if (rp->status)
1818 return rp->status;
1819
1820 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1821 if (!cp)
1822 return rp->status;
1823
1824 le_set_scan_enable_complete(hdev, cp->enable);
1825
1826 return rp->status;
1827 }
1828
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1829 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1830 struct sk_buff *skb)
1831 {
1832 struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1833
1834 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1835 rp->num_of_sets);
1836
1837 if (rp->status)
1838 return rp->status;
1839
1840 hdev->le_num_of_adv_sets = rp->num_of_sets;
1841
1842 return rp->status;
1843 }
1844
hci_cc_le_read_accept_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1845 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1846 struct sk_buff *skb)
1847 {
1848 struct hci_rp_le_read_accept_list_size *rp = data;
1849
1850 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1851
1852 if (rp->status)
1853 return rp->status;
1854
1855 hdev->le_accept_list_size = rp->size;
1856
1857 return rp->status;
1858 }
1859
hci_cc_le_clear_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1860 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1861 struct sk_buff *skb)
1862 {
1863 struct hci_ev_status *rp = data;
1864
1865 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1866
1867 if (rp->status)
1868 return rp->status;
1869
1870 hci_dev_lock(hdev);
1871 hci_bdaddr_list_clear(&hdev->le_accept_list);
1872 hci_dev_unlock(hdev);
1873
1874 return rp->status;
1875 }
1876
hci_cc_le_add_to_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1877 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1878 struct sk_buff *skb)
1879 {
1880 struct hci_cp_le_add_to_accept_list *sent;
1881 struct hci_ev_status *rp = data;
1882
1883 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1884
1885 if (rp->status)
1886 return rp->status;
1887
1888 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1889 if (!sent)
1890 return rp->status;
1891
1892 hci_dev_lock(hdev);
1893 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1894 sent->bdaddr_type);
1895 hci_dev_unlock(hdev);
1896
1897 return rp->status;
1898 }
1899
hci_cc_le_del_from_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1900 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1901 struct sk_buff *skb)
1902 {
1903 struct hci_cp_le_del_from_accept_list *sent;
1904 struct hci_ev_status *rp = data;
1905
1906 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1907
1908 if (rp->status)
1909 return rp->status;
1910
1911 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1912 if (!sent)
1913 return rp->status;
1914
1915 hci_dev_lock(hdev);
1916 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1917 sent->bdaddr_type);
1918 hci_dev_unlock(hdev);
1919
1920 return rp->status;
1921 }
1922
hci_cc_le_read_supported_states(struct hci_dev * hdev,void * data,struct sk_buff * skb)1923 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1924 struct sk_buff *skb)
1925 {
1926 struct hci_rp_le_read_supported_states *rp = data;
1927
1928 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1929
1930 if (rp->status)
1931 return rp->status;
1932
1933 memcpy(hdev->le_states, rp->le_states, 8);
1934
1935 return rp->status;
1936 }
1937
hci_cc_le_read_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1938 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1939 struct sk_buff *skb)
1940 {
1941 struct hci_rp_le_read_def_data_len *rp = data;
1942
1943 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1944
1945 if (rp->status)
1946 return rp->status;
1947
1948 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1949 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1950
1951 return rp->status;
1952 }
1953
hci_cc_le_write_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1954 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1955 struct sk_buff *skb)
1956 {
1957 struct hci_cp_le_write_def_data_len *sent;
1958 struct hci_ev_status *rp = data;
1959
1960 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1961
1962 if (rp->status)
1963 return rp->status;
1964
1965 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1966 if (!sent)
1967 return rp->status;
1968
1969 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1970 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1971
1972 return rp->status;
1973 }
1974
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1975 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1976 struct sk_buff *skb)
1977 {
1978 struct hci_cp_le_add_to_resolv_list *sent;
1979 struct hci_ev_status *rp = data;
1980
1981 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1982
1983 if (rp->status)
1984 return rp->status;
1985
1986 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1987 if (!sent)
1988 return rp->status;
1989
1990 hci_dev_lock(hdev);
1991 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1992 sent->bdaddr_type, sent->peer_irk,
1993 sent->local_irk);
1994 hci_dev_unlock(hdev);
1995
1996 return rp->status;
1997 }
1998
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1999 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2000 struct sk_buff *skb)
2001 {
2002 struct hci_cp_le_del_from_resolv_list *sent;
2003 struct hci_ev_status *rp = data;
2004
2005 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2006
2007 if (rp->status)
2008 return rp->status;
2009
2010 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2011 if (!sent)
2012 return rp->status;
2013
2014 hci_dev_lock(hdev);
2015 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2016 sent->bdaddr_type);
2017 hci_dev_unlock(hdev);
2018
2019 return rp->status;
2020 }
2021
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)2022 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2023 struct sk_buff *skb)
2024 {
2025 struct hci_ev_status *rp = data;
2026
2027 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028
2029 if (rp->status)
2030 return rp->status;
2031
2032 hci_dev_lock(hdev);
2033 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2034 hci_dev_unlock(hdev);
2035
2036 return rp->status;
2037 }
2038
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)2039 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2040 struct sk_buff *skb)
2041 {
2042 struct hci_rp_le_read_resolv_list_size *rp = data;
2043
2044 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2045
2046 if (rp->status)
2047 return rp->status;
2048
2049 hdev->le_resolv_list_size = rp->size;
2050
2051 return rp->status;
2052 }
2053
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)2054 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2055 struct sk_buff *skb)
2056 {
2057 struct hci_ev_status *rp = data;
2058 __u8 *sent;
2059
2060 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2061
2062 if (rp->status)
2063 return rp->status;
2064
2065 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2066 if (!sent)
2067 return rp->status;
2068
2069 hci_dev_lock(hdev);
2070
2071 if (*sent)
2072 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2073 else
2074 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2075
2076 hci_dev_unlock(hdev);
2077
2078 return rp->status;
2079 }
2080
hci_cc_le_read_max_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)2081 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2082 struct sk_buff *skb)
2083 {
2084 struct hci_rp_le_read_max_data_len *rp = data;
2085
2086 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2087
2088 if (rp->status)
2089 return rp->status;
2090
2091 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2092 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2093 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2094 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2095
2096 return rp->status;
2097 }
2098
hci_cc_write_le_host_supported(struct hci_dev * hdev,void * data,struct sk_buff * skb)2099 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2100 struct sk_buff *skb)
2101 {
2102 struct hci_cp_write_le_host_supported *sent;
2103 struct hci_ev_status *rp = data;
2104
2105 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2106
2107 if (rp->status)
2108 return rp->status;
2109
2110 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2111 if (!sent)
2112 return rp->status;
2113
2114 hci_dev_lock(hdev);
2115
2116 if (sent->le) {
2117 hdev->features[1][0] |= LMP_HOST_LE;
2118 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2119 } else {
2120 hdev->features[1][0] &= ~LMP_HOST_LE;
2121 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2122 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2123 }
2124
2125 if (sent->simul)
2126 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2127 else
2128 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2129
2130 hci_dev_unlock(hdev);
2131
2132 return rp->status;
2133 }
2134
hci_cc_set_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)2135 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2136 struct sk_buff *skb)
2137 {
2138 struct hci_cp_le_set_adv_param *cp;
2139 struct hci_ev_status *rp = data;
2140
2141 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2142
2143 if (rp->status)
2144 return rp->status;
2145
2146 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2147 if (!cp)
2148 return rp->status;
2149
2150 hci_dev_lock(hdev);
2151 hdev->adv_addr_type = cp->own_address_type;
2152 hci_dev_unlock(hdev);
2153
2154 return rp->status;
2155 }
2156
hci_cc_read_rssi(struct hci_dev * hdev,void * data,struct sk_buff * skb)2157 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2158 struct sk_buff *skb)
2159 {
2160 struct hci_rp_read_rssi *rp = data;
2161 struct hci_conn *conn;
2162
2163 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2164
2165 if (rp->status)
2166 return rp->status;
2167
2168 hci_dev_lock(hdev);
2169
2170 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2171 if (conn)
2172 conn->rssi = rp->rssi;
2173
2174 hci_dev_unlock(hdev);
2175
2176 return rp->status;
2177 }
2178
hci_cc_read_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)2179 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2180 struct sk_buff *skb)
2181 {
2182 struct hci_cp_read_tx_power *sent;
2183 struct hci_rp_read_tx_power *rp = data;
2184 struct hci_conn *conn;
2185
2186 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2187
2188 if (rp->status)
2189 return rp->status;
2190
2191 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2192 if (!sent)
2193 return rp->status;
2194
2195 hci_dev_lock(hdev);
2196
2197 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2198 if (!conn)
2199 goto unlock;
2200
2201 switch (sent->type) {
2202 case 0x00:
2203 conn->tx_power = rp->tx_power;
2204 break;
2205 case 0x01:
2206 conn->max_tx_power = rp->tx_power;
2207 break;
2208 }
2209
2210 unlock:
2211 hci_dev_unlock(hdev);
2212 return rp->status;
2213 }
2214
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)2215 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2216 struct sk_buff *skb)
2217 {
2218 struct hci_ev_status *rp = data;
2219 u8 *mode;
2220
2221 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2222
2223 if (rp->status)
2224 return rp->status;
2225
2226 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2227 if (mode)
2228 hdev->ssp_debug_mode = *mode;
2229
2230 return rp->status;
2231 }
2232
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)2233 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2234 {
2235 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2236
2237 if (status)
2238 return;
2239
2240 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2241 set_bit(HCI_INQUIRY, &hdev->flags);
2242 }
2243
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)2244 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2245 {
2246 struct hci_cp_create_conn *cp;
2247 struct hci_conn *conn;
2248
2249 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2250
2251 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2252 if (!cp)
2253 return;
2254
2255 hci_dev_lock(hdev);
2256
2257 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2258
2259 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2260
2261 if (status) {
2262 if (conn && conn->state == BT_CONNECT) {
2263 conn->state = BT_CLOSED;
2264 hci_connect_cfm(conn, status);
2265 hci_conn_del(conn);
2266 }
2267 } else {
2268 if (!conn) {
2269 conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2270 HCI_ROLE_MASTER);
2271 if (IS_ERR(conn))
2272 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2273 }
2274 }
2275
2276 hci_dev_unlock(hdev);
2277 }
2278
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)2279 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2280 {
2281 struct hci_cp_add_sco *cp;
2282 struct hci_conn *acl;
2283 struct hci_link *link;
2284 __u16 handle;
2285
2286 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2287
2288 if (!status)
2289 return;
2290
2291 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2292 if (!cp)
2293 return;
2294
2295 handle = __le16_to_cpu(cp->handle);
2296
2297 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2298
2299 hci_dev_lock(hdev);
2300
2301 acl = hci_conn_hash_lookup_handle(hdev, handle);
2302 if (acl) {
2303 link = list_first_entry_or_null(&acl->link_list,
2304 struct hci_link, list);
2305 if (link && link->conn) {
2306 link->conn->state = BT_CLOSED;
2307
2308 hci_connect_cfm(link->conn, status);
2309 hci_conn_del(link->conn);
2310 }
2311 }
2312
2313 hci_dev_unlock(hdev);
2314 }
2315
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)2316 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2317 {
2318 struct hci_cp_auth_requested *cp;
2319 struct hci_conn *conn;
2320
2321 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2322
2323 if (!status)
2324 return;
2325
2326 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2327 if (!cp)
2328 return;
2329
2330 hci_dev_lock(hdev);
2331
2332 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2333 if (conn) {
2334 if (conn->state == BT_CONFIG) {
2335 hci_connect_cfm(conn, status);
2336 hci_conn_drop(conn);
2337 }
2338 }
2339
2340 hci_dev_unlock(hdev);
2341 }
2342
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)2343 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2344 {
2345 struct hci_cp_set_conn_encrypt *cp;
2346 struct hci_conn *conn;
2347
2348 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2349
2350 if (!status)
2351 return;
2352
2353 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2354 if (!cp)
2355 return;
2356
2357 hci_dev_lock(hdev);
2358
2359 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2360 if (conn) {
2361 if (conn->state == BT_CONFIG) {
2362 hci_connect_cfm(conn, status);
2363 hci_conn_drop(conn);
2364 }
2365 }
2366
2367 hci_dev_unlock(hdev);
2368 }
2369
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)2370 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2371 struct hci_conn *conn)
2372 {
2373 if (conn->state != BT_CONFIG || !conn->out)
2374 return 0;
2375
2376 if (conn->pending_sec_level == BT_SECURITY_SDP)
2377 return 0;
2378
2379 /* Only request authentication for SSP connections or non-SSP
2380 * devices with sec_level MEDIUM or HIGH or if MITM protection
2381 * is requested.
2382 */
2383 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2384 conn->pending_sec_level != BT_SECURITY_FIPS &&
2385 conn->pending_sec_level != BT_SECURITY_HIGH &&
2386 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2387 return 0;
2388
2389 return 1;
2390 }
2391
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)2392 static int hci_resolve_name(struct hci_dev *hdev,
2393 struct inquiry_entry *e)
2394 {
2395 struct hci_cp_remote_name_req cp;
2396
2397 memset(&cp, 0, sizeof(cp));
2398
2399 bacpy(&cp.bdaddr, &e->data.bdaddr);
2400 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2401 cp.pscan_mode = e->data.pscan_mode;
2402 cp.clock_offset = e->data.clock_offset;
2403
2404 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2405 }
2406
hci_resolve_next_name(struct hci_dev * hdev)2407 static bool hci_resolve_next_name(struct hci_dev *hdev)
2408 {
2409 struct discovery_state *discov = &hdev->discovery;
2410 struct inquiry_entry *e;
2411
2412 if (list_empty(&discov->resolve))
2413 return false;
2414
2415 /* We should stop if we already spent too much time resolving names. */
2416 if (time_after(jiffies, discov->name_resolve_timeout)) {
2417 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2418 return false;
2419 }
2420
2421 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2422 if (!e)
2423 return false;
2424
2425 if (hci_resolve_name(hdev, e) == 0) {
2426 e->name_state = NAME_PENDING;
2427 return true;
2428 }
2429
2430 return false;
2431 }
2432
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2433 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2434 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2435 {
2436 struct discovery_state *discov = &hdev->discovery;
2437 struct inquiry_entry *e;
2438
2439 /* Update the mgmt connected state if necessary. Be careful with
2440 * conn objects that exist but are not (yet) connected however.
2441 * Only those in BT_CONFIG or BT_CONNECTED states can be
2442 * considered connected.
2443 */
2444 if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2445 mgmt_device_connected(hdev, conn, name, name_len);
2446
2447 if (discov->state == DISCOVERY_STOPPED)
2448 return;
2449
2450 if (discov->state == DISCOVERY_STOPPING)
2451 goto discov_complete;
2452
2453 if (discov->state != DISCOVERY_RESOLVING)
2454 return;
2455
2456 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2457 /* If the device was not found in a list of found devices names of which
2458 * are pending. there is no need to continue resolving a next name as it
2459 * will be done upon receiving another Remote Name Request Complete
2460 * Event */
2461 if (!e)
2462 return;
2463
2464 list_del(&e->list);
2465
2466 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2467 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2468 name, name_len);
2469
2470 if (hci_resolve_next_name(hdev))
2471 return;
2472
2473 discov_complete:
2474 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2475 }
2476
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2477 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2478 {
2479 struct hci_cp_remote_name_req *cp;
2480 struct hci_conn *conn;
2481
2482 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2483
2484 /* If successful wait for the name req complete event before
2485 * checking for the need to do authentication */
2486 if (!status)
2487 return;
2488
2489 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2490 if (!cp)
2491 return;
2492
2493 hci_dev_lock(hdev);
2494
2495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2496
2497 if (hci_dev_test_flag(hdev, HCI_MGMT))
2498 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2499
2500 if (!conn)
2501 goto unlock;
2502
2503 if (!hci_outgoing_auth_needed(hdev, conn))
2504 goto unlock;
2505
2506 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2507 struct hci_cp_auth_requested auth_cp;
2508
2509 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2510
2511 auth_cp.handle = __cpu_to_le16(conn->handle);
2512 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2513 sizeof(auth_cp), &auth_cp);
2514 }
2515
2516 unlock:
2517 hci_dev_unlock(hdev);
2518 }
2519
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2520 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2521 {
2522 struct hci_cp_read_remote_features *cp;
2523 struct hci_conn *conn;
2524
2525 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2526
2527 if (!status)
2528 return;
2529
2530 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2531 if (!cp)
2532 return;
2533
2534 hci_dev_lock(hdev);
2535
2536 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2537 if (conn) {
2538 if (conn->state == BT_CONFIG) {
2539 hci_connect_cfm(conn, status);
2540 hci_conn_drop(conn);
2541 }
2542 }
2543
2544 hci_dev_unlock(hdev);
2545 }
2546
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2547 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2548 {
2549 struct hci_cp_read_remote_ext_features *cp;
2550 struct hci_conn *conn;
2551
2552 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2553
2554 if (!status)
2555 return;
2556
2557 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2558 if (!cp)
2559 return;
2560
2561 hci_dev_lock(hdev);
2562
2563 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2564 if (conn) {
2565 if (conn->state == BT_CONFIG) {
2566 hci_connect_cfm(conn, status);
2567 hci_conn_drop(conn);
2568 }
2569 }
2570
2571 hci_dev_unlock(hdev);
2572 }
2573
hci_setup_sync_conn_status(struct hci_dev * hdev,__u16 handle,__u8 status)2574 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2575 __u8 status)
2576 {
2577 struct hci_conn *acl;
2578 struct hci_link *link;
2579
2580 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2581
2582 hci_dev_lock(hdev);
2583
2584 acl = hci_conn_hash_lookup_handle(hdev, handle);
2585 if (acl) {
2586 link = list_first_entry_or_null(&acl->link_list,
2587 struct hci_link, list);
2588 if (link && link->conn) {
2589 link->conn->state = BT_CLOSED;
2590
2591 hci_connect_cfm(link->conn, status);
2592 hci_conn_del(link->conn);
2593 }
2594 }
2595
2596 hci_dev_unlock(hdev);
2597 }
2598
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2599 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2600 {
2601 struct hci_cp_setup_sync_conn *cp;
2602
2603 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2604
2605 if (!status)
2606 return;
2607
2608 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2609 if (!cp)
2610 return;
2611
2612 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2613 }
2614
hci_cs_enhanced_setup_sync_conn(struct hci_dev * hdev,__u8 status)2615 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2616 {
2617 struct hci_cp_enhanced_setup_sync_conn *cp;
2618
2619 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2620
2621 if (!status)
2622 return;
2623
2624 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2625 if (!cp)
2626 return;
2627
2628 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2629 }
2630
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2631 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2632 {
2633 struct hci_cp_sniff_mode *cp;
2634 struct hci_conn *conn;
2635
2636 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2637
2638 if (!status)
2639 return;
2640
2641 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2642 if (!cp)
2643 return;
2644
2645 hci_dev_lock(hdev);
2646
2647 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2648 if (conn) {
2649 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2650
2651 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2652 hci_sco_setup(conn, status);
2653 }
2654
2655 hci_dev_unlock(hdev);
2656 }
2657
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2658 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2659 {
2660 struct hci_cp_exit_sniff_mode *cp;
2661 struct hci_conn *conn;
2662
2663 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2664
2665 if (!status)
2666 return;
2667
2668 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2669 if (!cp)
2670 return;
2671
2672 hci_dev_lock(hdev);
2673
2674 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2675 if (conn) {
2676 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2677
2678 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2679 hci_sco_setup(conn, status);
2680 }
2681
2682 hci_dev_unlock(hdev);
2683 }
2684
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2685 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2686 {
2687 struct hci_cp_disconnect *cp;
2688 struct hci_conn_params *params;
2689 struct hci_conn *conn;
2690 bool mgmt_conn;
2691
2692 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2693
2694 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2695 * otherwise cleanup the connection immediately.
2696 */
2697 if (!status && !hdev->suspended)
2698 return;
2699
2700 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2701 if (!cp)
2702 return;
2703
2704 hci_dev_lock(hdev);
2705
2706 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2707 if (!conn)
2708 goto unlock;
2709
2710 if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) {
2711 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2712 conn->dst_type, status);
2713
2714 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2715 hdev->cur_adv_instance = conn->adv_instance;
2716 hci_enable_advertising(hdev);
2717 }
2718
2719 /* Inform sockets conn is gone before we delete it */
2720 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2721
2722 goto done;
2723 }
2724
2725 /* During suspend, mark connection as closed immediately
2726 * since we might not receive HCI_EV_DISCONN_COMPLETE
2727 */
2728 if (hdev->suspended)
2729 conn->state = BT_CLOSED;
2730
2731 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2732
2733 if (conn->type == ACL_LINK) {
2734 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2735 hci_remove_link_key(hdev, &conn->dst);
2736 }
2737
2738 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2739 if (params) {
2740 switch (params->auto_connect) {
2741 case HCI_AUTO_CONN_LINK_LOSS:
2742 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2743 break;
2744 fallthrough;
2745
2746 case HCI_AUTO_CONN_DIRECT:
2747 case HCI_AUTO_CONN_ALWAYS:
2748 hci_pend_le_list_del_init(params);
2749 hci_pend_le_list_add(params, &hdev->pend_le_conns);
2750 break;
2751
2752 default:
2753 break;
2754 }
2755 }
2756
2757 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2758 cp->reason, mgmt_conn);
2759
2760 hci_disconn_cfm(conn, cp->reason);
2761
2762 done:
2763 /* If the disconnection failed for any reason, the upper layer
2764 * does not retry to disconnect in current implementation.
2765 * Hence, we need to do some basic cleanup here and re-enable
2766 * advertising if necessary.
2767 */
2768 hci_conn_del(conn);
2769 unlock:
2770 hci_dev_unlock(hdev);
2771 }
2772
ev_bdaddr_type(struct hci_dev * hdev,u8 type,bool * resolved)2773 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2774 {
2775 /* When using controller based address resolution, then the new
2776 * address types 0x02 and 0x03 are used. These types need to be
2777 * converted back into either public address or random address type
2778 */
2779 switch (type) {
2780 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2781 if (resolved)
2782 *resolved = true;
2783 return ADDR_LE_DEV_PUBLIC;
2784 case ADDR_LE_DEV_RANDOM_RESOLVED:
2785 if (resolved)
2786 *resolved = true;
2787 return ADDR_LE_DEV_RANDOM;
2788 }
2789
2790 if (resolved)
2791 *resolved = false;
2792 return type;
2793 }
2794
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2795 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2796 u8 peer_addr_type, u8 own_address_type,
2797 u8 filter_policy)
2798 {
2799 struct hci_conn *conn;
2800
2801 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2802 peer_addr_type);
2803 if (!conn)
2804 return;
2805
2806 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2807
2808 /* Store the initiator and responder address information which
2809 * is needed for SMP. These values will not change during the
2810 * lifetime of the connection.
2811 */
2812 conn->init_addr_type = own_address_type;
2813 if (own_address_type == ADDR_LE_DEV_RANDOM)
2814 bacpy(&conn->init_addr, &hdev->random_addr);
2815 else
2816 bacpy(&conn->init_addr, &hdev->bdaddr);
2817
2818 conn->resp_addr_type = peer_addr_type;
2819 bacpy(&conn->resp_addr, peer_addr);
2820 }
2821
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2822 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2823 {
2824 struct hci_cp_le_create_conn *cp;
2825
2826 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2827
2828 /* All connection failure handling is taken care of by the
2829 * hci_conn_failed function which is triggered by the HCI
2830 * request completion callbacks used for connecting.
2831 */
2832 if (status)
2833 return;
2834
2835 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2836 if (!cp)
2837 return;
2838
2839 hci_dev_lock(hdev);
2840
2841 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2842 cp->own_address_type, cp->filter_policy);
2843
2844 hci_dev_unlock(hdev);
2845 }
2846
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2847 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2848 {
2849 struct hci_cp_le_ext_create_conn *cp;
2850
2851 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2852
2853 /* All connection failure handling is taken care of by the
2854 * hci_conn_failed function which is triggered by the HCI
2855 * request completion callbacks used for connecting.
2856 */
2857 if (status)
2858 return;
2859
2860 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2861 if (!cp)
2862 return;
2863
2864 hci_dev_lock(hdev);
2865
2866 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2867 cp->own_addr_type, cp->filter_policy);
2868
2869 hci_dev_unlock(hdev);
2870 }
2871
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2872 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2873 {
2874 struct hci_cp_le_read_remote_features *cp;
2875 struct hci_conn *conn;
2876
2877 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2878
2879 if (!status)
2880 return;
2881
2882 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2883 if (!cp)
2884 return;
2885
2886 hci_dev_lock(hdev);
2887
2888 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2889 if (conn) {
2890 if (conn->state == BT_CONFIG) {
2891 hci_connect_cfm(conn, status);
2892 hci_conn_drop(conn);
2893 }
2894 }
2895
2896 hci_dev_unlock(hdev);
2897 }
2898
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2899 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2900 {
2901 struct hci_cp_le_start_enc *cp;
2902 struct hci_conn *conn;
2903
2904 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2905
2906 if (!status)
2907 return;
2908
2909 hci_dev_lock(hdev);
2910
2911 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2912 if (!cp)
2913 goto unlock;
2914
2915 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2916 if (!conn)
2917 goto unlock;
2918
2919 if (conn->state != BT_CONNECTED)
2920 goto unlock;
2921
2922 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2923 hci_conn_drop(conn);
2924
2925 unlock:
2926 hci_dev_unlock(hdev);
2927 }
2928
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2929 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2930 {
2931 struct hci_cp_switch_role *cp;
2932 struct hci_conn *conn;
2933
2934 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2935
2936 if (!status)
2937 return;
2938
2939 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2940 if (!cp)
2941 return;
2942
2943 hci_dev_lock(hdev);
2944
2945 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2946 if (conn)
2947 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2948
2949 hci_dev_unlock(hdev);
2950 }
2951
hci_inquiry_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)2952 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2953 struct sk_buff *skb)
2954 {
2955 struct hci_ev_status *ev = data;
2956 struct discovery_state *discov = &hdev->discovery;
2957 struct inquiry_entry *e;
2958
2959 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2960
2961 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2962 return;
2963
2964 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2965 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2966
2967 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2968 return;
2969
2970 hci_dev_lock(hdev);
2971
2972 if (discov->state != DISCOVERY_FINDING)
2973 goto unlock;
2974
2975 if (list_empty(&discov->resolve)) {
2976 /* When BR/EDR inquiry is active and no LE scanning is in
2977 * progress, then change discovery state to indicate completion.
2978 *
2979 * When running LE scanning and BR/EDR inquiry simultaneously
2980 * and the LE scan already finished, then change the discovery
2981 * state to indicate completion.
2982 */
2983 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2984 !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
2985 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2986 goto unlock;
2987 }
2988
2989 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2990 if (e && hci_resolve_name(hdev, e) == 0) {
2991 e->name_state = NAME_PENDING;
2992 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2993 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
2994 } else {
2995 /* When BR/EDR inquiry is active and no LE scanning is in
2996 * progress, then change discovery state to indicate completion.
2997 *
2998 * When running LE scanning and BR/EDR inquiry simultaneously
2999 * and the LE scan already finished, then change the discovery
3000 * state to indicate completion.
3001 */
3002 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3003 !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
3004 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3005 }
3006
3007 unlock:
3008 hci_dev_unlock(hdev);
3009 }
3010
hci_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)3011 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3012 struct sk_buff *skb)
3013 {
3014 struct hci_ev_inquiry_result *ev = edata;
3015 struct inquiry_data data;
3016 int i;
3017
3018 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3019 flex_array_size(ev, info, ev->num)))
3020 return;
3021
3022 bt_dev_dbg(hdev, "num %d", ev->num);
3023
3024 if (!ev->num)
3025 return;
3026
3027 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3028 return;
3029
3030 hci_dev_lock(hdev);
3031
3032 for (i = 0; i < ev->num; i++) {
3033 struct inquiry_info *info = &ev->info[i];
3034 u32 flags;
3035
3036 bacpy(&data.bdaddr, &info->bdaddr);
3037 data.pscan_rep_mode = info->pscan_rep_mode;
3038 data.pscan_period_mode = info->pscan_period_mode;
3039 data.pscan_mode = info->pscan_mode;
3040 memcpy(data.dev_class, info->dev_class, 3);
3041 data.clock_offset = info->clock_offset;
3042 data.rssi = HCI_RSSI_INVALID;
3043 data.ssp_mode = 0x00;
3044
3045 flags = hci_inquiry_cache_update(hdev, &data, false);
3046
3047 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3048 info->dev_class, HCI_RSSI_INVALID,
3049 flags, NULL, 0, NULL, 0, 0);
3050 }
3051
3052 hci_dev_unlock(hdev);
3053 }
3054
hci_read_enc_key_size(struct hci_dev * hdev,struct hci_conn * conn)3055 static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn)
3056 {
3057 struct hci_cp_read_enc_key_size cp;
3058 u8 *key_enc_size = hci_conn_key_enc_size(conn);
3059
3060 if (!read_key_size_capable(hdev)) {
3061 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3062 return -EOPNOTSUPP;
3063 }
3064
3065 bt_dev_dbg(hdev, "hcon %p", conn);
3066
3067 memset(&cp, 0, sizeof(cp));
3068 cp.handle = cpu_to_le16(conn->handle);
3069
3070 /* If the key enc_size is already known, use it as conn->enc_key_size,
3071 * otherwise use hdev->min_enc_key_size so the likes of
3072 * l2cap_check_enc_key_size don't fail while waiting for
3073 * HCI_OP_READ_ENC_KEY_SIZE response.
3074 */
3075 if (key_enc_size && *key_enc_size)
3076 conn->enc_key_size = *key_enc_size;
3077 else
3078 conn->enc_key_size = hdev->min_enc_key_size;
3079
3080 return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3081 }
3082
hci_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3083 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3084 struct sk_buff *skb)
3085 {
3086 struct hci_ev_conn_complete *ev = data;
3087 struct hci_conn *conn;
3088 u8 status = ev->status;
3089
3090 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3091
3092 hci_dev_lock(hdev);
3093
3094 /* Check for existing connection:
3095 *
3096 * 1. If it doesn't exist then it must be receiver/slave role.
3097 * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
3098 * of initiator/master role since there could be a collision where
3099 * either side is attempting to connect or something like a fuzzing
3100 * testing is trying to play tricks to destroy the hcon object before
3101 * it even attempts to connect (e.g. hcon->state == BT_OPEN).
3102 */
3103 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3104 if (!conn ||
3105 (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
3106 /* In case of error status and there is no connection pending
3107 * just unlock as there is nothing to cleanup.
3108 */
3109 if (ev->status)
3110 goto unlock;
3111
3112 /* Connection may not exist if auto-connected. Check the bredr
3113 * allowlist to see if this device is allowed to auto connect.
3114 * If link is an ACL type, create a connection class
3115 * automatically.
3116 *
3117 * Auto-connect will only occur if the event filter is
3118 * programmed with a given address. Right now, event filter is
3119 * only used during suspend.
3120 */
3121 if (ev->link_type == ACL_LINK &&
3122 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3123 &ev->bdaddr,
3124 BDADDR_BREDR)) {
3125 conn = hci_conn_add_unset(hdev, ev->link_type,
3126 &ev->bdaddr, HCI_ROLE_SLAVE);
3127 if (IS_ERR(conn)) {
3128 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3129 goto unlock;
3130 }
3131 } else {
3132 if (ev->link_type != SCO_LINK)
3133 goto unlock;
3134
3135 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3136 &ev->bdaddr);
3137 if (!conn)
3138 goto unlock;
3139
3140 conn->type = SCO_LINK;
3141 }
3142 }
3143
3144 /* The HCI_Connection_Complete event is only sent once per connection.
3145 * Processing it more than once per connection can corrupt kernel memory.
3146 *
3147 * As the connection handle is set here for the first time, it indicates
3148 * whether the connection is already set up.
3149 */
3150 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3151 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3152 goto unlock;
3153 }
3154
3155 if (!status) {
3156 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3157 if (status)
3158 goto done;
3159
3160 if (conn->type == ACL_LINK) {
3161 conn->state = BT_CONFIG;
3162 hci_conn_hold(conn);
3163
3164 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3165 !hci_find_link_key(hdev, &ev->bdaddr))
3166 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3167 else
3168 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3169 } else
3170 conn->state = BT_CONNECTED;
3171
3172 hci_debugfs_create_conn(conn);
3173 hci_conn_add_sysfs(conn);
3174
3175 if (test_bit(HCI_AUTH, &hdev->flags))
3176 set_bit(HCI_CONN_AUTH, &conn->flags);
3177
3178 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3179 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3180
3181 /* "Link key request" completed ahead of "connect request" completes */
3182 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3183 ev->link_type == ACL_LINK) {
3184 struct link_key *key;
3185
3186 key = hci_find_link_key(hdev, &ev->bdaddr);
3187 if (key) {
3188 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3189 hci_read_enc_key_size(hdev, conn);
3190 hci_encrypt_cfm(conn, ev->status);
3191 }
3192 }
3193
3194 /* Get remote features */
3195 if (conn->type == ACL_LINK) {
3196 struct hci_cp_read_remote_features cp;
3197 cp.handle = ev->handle;
3198 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3199 sizeof(cp), &cp);
3200
3201 hci_update_scan(hdev);
3202 }
3203
3204 /* Set packet type for incoming connection */
3205 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3206 struct hci_cp_change_conn_ptype cp;
3207 cp.handle = ev->handle;
3208 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3209 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3210 &cp);
3211 }
3212 }
3213
3214 if (conn->type == ACL_LINK)
3215 hci_sco_setup(conn, ev->status);
3216
3217 done:
3218 if (status) {
3219 hci_conn_failed(conn, status);
3220 } else if (ev->link_type == SCO_LINK) {
3221 switch (conn->setting & SCO_AIRMODE_MASK) {
3222 case SCO_AIRMODE_CVSD:
3223 if (hdev->notify)
3224 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3225 break;
3226 }
3227
3228 hci_connect_cfm(conn, status);
3229 }
3230
3231 unlock:
3232 hci_dev_unlock(hdev);
3233 }
3234
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)3235 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3236 {
3237 struct hci_cp_reject_conn_req cp;
3238
3239 bacpy(&cp.bdaddr, bdaddr);
3240 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3241 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3242 }
3243
hci_conn_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3244 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3245 struct sk_buff *skb)
3246 {
3247 struct hci_ev_conn_request *ev = data;
3248 int mask = hdev->link_mode;
3249 struct inquiry_entry *ie;
3250 struct hci_conn *conn;
3251 __u8 flags = 0;
3252
3253 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3254
3255 /* Reject incoming connection from device with same BD ADDR against
3256 * CVE-2020-26555
3257 */
3258 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3259 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3260 &ev->bdaddr);
3261 hci_reject_conn(hdev, &ev->bdaddr);
3262 return;
3263 }
3264
3265 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3266 &flags);
3267
3268 if (!(mask & HCI_LM_ACCEPT)) {
3269 hci_reject_conn(hdev, &ev->bdaddr);
3270 return;
3271 }
3272
3273 hci_dev_lock(hdev);
3274
3275 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3276 BDADDR_BREDR)) {
3277 hci_reject_conn(hdev, &ev->bdaddr);
3278 goto unlock;
3279 }
3280
3281 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3282 * connection. These features are only touched through mgmt so
3283 * only do the checks if HCI_MGMT is set.
3284 */
3285 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3286 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3287 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3288 BDADDR_BREDR)) {
3289 hci_reject_conn(hdev, &ev->bdaddr);
3290 goto unlock;
3291 }
3292
3293 /* Connection accepted */
3294
3295 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3296 if (ie)
3297 memcpy(ie->data.dev_class, ev->dev_class, 3);
3298
3299 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3300 &ev->bdaddr);
3301 if (!conn) {
3302 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3303 HCI_ROLE_SLAVE);
3304 if (IS_ERR(conn)) {
3305 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3306 goto unlock;
3307 }
3308 }
3309
3310 memcpy(conn->dev_class, ev->dev_class, 3);
3311
3312 hci_dev_unlock(hdev);
3313
3314 if (ev->link_type == ACL_LINK ||
3315 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3316 struct hci_cp_accept_conn_req cp;
3317 conn->state = BT_CONNECT;
3318
3319 bacpy(&cp.bdaddr, &ev->bdaddr);
3320
3321 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3322 cp.role = 0x00; /* Become central */
3323 else
3324 cp.role = 0x01; /* Remain peripheral */
3325
3326 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3327 } else if (!(flags & HCI_PROTO_DEFER)) {
3328 struct hci_cp_accept_sync_conn_req cp;
3329 conn->state = BT_CONNECT;
3330
3331 bacpy(&cp.bdaddr, &ev->bdaddr);
3332 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3333
3334 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3335 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3336 cp.max_latency = cpu_to_le16(0xffff);
3337 cp.content_format = cpu_to_le16(hdev->voice_setting);
3338 cp.retrans_effort = 0xff;
3339
3340 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3341 &cp);
3342 } else {
3343 conn->state = BT_CONNECT2;
3344 hci_connect_cfm(conn, 0);
3345 }
3346
3347 return;
3348 unlock:
3349 hci_dev_unlock(hdev);
3350 }
3351
hci_to_mgmt_reason(u8 err)3352 static u8 hci_to_mgmt_reason(u8 err)
3353 {
3354 switch (err) {
3355 case HCI_ERROR_CONNECTION_TIMEOUT:
3356 return MGMT_DEV_DISCONN_TIMEOUT;
3357 case HCI_ERROR_REMOTE_USER_TERM:
3358 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3359 case HCI_ERROR_REMOTE_POWER_OFF:
3360 return MGMT_DEV_DISCONN_REMOTE;
3361 case HCI_ERROR_LOCAL_HOST_TERM:
3362 return MGMT_DEV_DISCONN_LOCAL_HOST;
3363 default:
3364 return MGMT_DEV_DISCONN_UNKNOWN;
3365 }
3366 }
3367
hci_disconn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3368 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3369 struct sk_buff *skb)
3370 {
3371 struct hci_ev_disconn_complete *ev = data;
3372 u8 reason;
3373 struct hci_conn_params *params;
3374 struct hci_conn *conn;
3375 bool mgmt_connected;
3376
3377 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3378
3379 hci_dev_lock(hdev);
3380
3381 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3382 if (!conn)
3383 goto unlock;
3384
3385 if (ev->status) {
3386 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3387 conn->dst_type, ev->status);
3388 goto unlock;
3389 }
3390
3391 conn->state = BT_CLOSED;
3392
3393 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3394
3395 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3396 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3397 else
3398 reason = hci_to_mgmt_reason(ev->reason);
3399
3400 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3401 reason, mgmt_connected);
3402
3403 if (conn->type == ACL_LINK) {
3404 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3405 hci_remove_link_key(hdev, &conn->dst);
3406
3407 hci_update_scan(hdev);
3408 }
3409
3410 /* Re-enable passive scanning if disconnected device is marked
3411 * as auto-connectable.
3412 */
3413 if (conn->type == LE_LINK) {
3414 params = hci_conn_params_lookup(hdev, &conn->dst,
3415 conn->dst_type);
3416 if (params) {
3417 switch (params->auto_connect) {
3418 case HCI_AUTO_CONN_LINK_LOSS:
3419 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3420 break;
3421 fallthrough;
3422
3423 case HCI_AUTO_CONN_DIRECT:
3424 case HCI_AUTO_CONN_ALWAYS:
3425 hci_pend_le_list_del_init(params);
3426 hci_pend_le_list_add(params,
3427 &hdev->pend_le_conns);
3428 hci_update_passive_scan(hdev);
3429 break;
3430
3431 default:
3432 break;
3433 }
3434 }
3435 }
3436
3437 hci_disconn_cfm(conn, ev->reason);
3438
3439 /* Re-enable advertising if necessary, since it might
3440 * have been disabled by the connection. From the
3441 * HCI_LE_Set_Advertise_Enable command description in
3442 * the core specification (v4.0):
3443 * "The Controller shall continue advertising until the Host
3444 * issues an LE_Set_Advertise_Enable command with
3445 * Advertising_Enable set to 0x00 (Advertising is disabled)
3446 * or until a connection is created or until the Advertising
3447 * is timed out due to Directed Advertising."
3448 */
3449 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3450 hdev->cur_adv_instance = conn->adv_instance;
3451 hci_enable_advertising(hdev);
3452 }
3453
3454 hci_conn_del(conn);
3455
3456 unlock:
3457 hci_dev_unlock(hdev);
3458 }
3459
hci_auth_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3460 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3461 struct sk_buff *skb)
3462 {
3463 struct hci_ev_auth_complete *ev = data;
3464 struct hci_conn *conn;
3465
3466 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3467
3468 hci_dev_lock(hdev);
3469
3470 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3471 if (!conn)
3472 goto unlock;
3473
3474 if (!ev->status) {
3475 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3476 set_bit(HCI_CONN_AUTH, &conn->flags);
3477 conn->sec_level = conn->pending_sec_level;
3478 } else {
3479 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3480 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3481
3482 mgmt_auth_failed(conn, ev->status);
3483 }
3484
3485 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3486
3487 if (conn->state == BT_CONFIG) {
3488 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3489 struct hci_cp_set_conn_encrypt cp;
3490 cp.handle = ev->handle;
3491 cp.encrypt = 0x01;
3492 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3493 &cp);
3494 } else {
3495 conn->state = BT_CONNECTED;
3496 hci_connect_cfm(conn, ev->status);
3497 hci_conn_drop(conn);
3498 }
3499 } else {
3500 hci_auth_cfm(conn, ev->status);
3501
3502 hci_conn_hold(conn);
3503 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3504 hci_conn_drop(conn);
3505 }
3506
3507 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3508 if (!ev->status) {
3509 struct hci_cp_set_conn_encrypt cp;
3510 cp.handle = ev->handle;
3511 cp.encrypt = 0x01;
3512 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3513 &cp);
3514 } else {
3515 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3516 hci_encrypt_cfm(conn, ev->status);
3517 }
3518 }
3519
3520 unlock:
3521 hci_dev_unlock(hdev);
3522 }
3523
hci_remote_name_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3524 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3525 struct sk_buff *skb)
3526 {
3527 struct hci_ev_remote_name *ev = data;
3528 struct hci_conn *conn;
3529
3530 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3531
3532 hci_dev_lock(hdev);
3533
3534 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3535
3536 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3537 goto check_auth;
3538
3539 if (ev->status == 0)
3540 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3541 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3542 else
3543 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3544
3545 check_auth:
3546 if (!conn)
3547 goto unlock;
3548
3549 if (!hci_outgoing_auth_needed(hdev, conn))
3550 goto unlock;
3551
3552 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3553 struct hci_cp_auth_requested cp;
3554
3555 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3556
3557 cp.handle = __cpu_to_le16(conn->handle);
3558 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3559 }
3560
3561 unlock:
3562 hci_dev_unlock(hdev);
3563 }
3564
hci_encrypt_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3565 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3566 struct sk_buff *skb)
3567 {
3568 struct hci_ev_encrypt_change *ev = data;
3569 struct hci_conn *conn;
3570
3571 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3572
3573 hci_dev_lock(hdev);
3574
3575 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3576 if (!conn)
3577 goto unlock;
3578
3579 if (!ev->status) {
3580 if (ev->encrypt) {
3581 /* Encryption implies authentication */
3582 set_bit(HCI_CONN_AUTH, &conn->flags);
3583 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3584 conn->sec_level = conn->pending_sec_level;
3585
3586 /* P-256 authentication key implies FIPS */
3587 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3588 set_bit(HCI_CONN_FIPS, &conn->flags);
3589
3590 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3591 conn->type == LE_LINK)
3592 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3593 } else {
3594 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3595 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3596 }
3597 }
3598
3599 /* We should disregard the current RPA and generate a new one
3600 * whenever the encryption procedure fails.
3601 */
3602 if (ev->status && conn->type == LE_LINK) {
3603 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3604 hci_adv_instances_set_rpa_expired(hdev, true);
3605 }
3606
3607 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3608
3609 /* Check link security requirements are met */
3610 if (!hci_conn_check_link_mode(conn))
3611 ev->status = HCI_ERROR_AUTH_FAILURE;
3612
3613 if (ev->status && conn->state == BT_CONNECTED) {
3614 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3615 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3616
3617 /* Notify upper layers so they can cleanup before
3618 * disconnecting.
3619 */
3620 hci_encrypt_cfm(conn, ev->status);
3621 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3622 hci_conn_drop(conn);
3623 goto unlock;
3624 }
3625
3626 /* Try reading the encryption key size for encrypted ACL links */
3627 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3628 if (hci_read_enc_key_size(hdev, conn))
3629 goto notify;
3630
3631 goto unlock;
3632 }
3633
3634 /* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
3635 * to avoid unexpected SMP command errors when pairing.
3636 */
3637 if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT))
3638 goto notify;
3639
3640 /* Set the default Authenticated Payload Timeout after
3641 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3642 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3643 * sent when the link is active and Encryption is enabled, the conn
3644 * type can be either LE or ACL and controller must support LMP Ping.
3645 * Ensure for AES-CCM encryption as well.
3646 */
3647 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3648 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3649 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3650 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3651 struct hci_cp_write_auth_payload_to cp;
3652
3653 cp.handle = cpu_to_le16(conn->handle);
3654 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3655 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3656 sizeof(cp), &cp))
3657 bt_dev_err(hdev, "write auth payload timeout failed");
3658 }
3659
3660 notify:
3661 hci_encrypt_cfm(conn, ev->status);
3662
3663 unlock:
3664 hci_dev_unlock(hdev);
3665 }
3666
hci_change_link_key_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3667 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3668 struct sk_buff *skb)
3669 {
3670 struct hci_ev_change_link_key_complete *ev = data;
3671 struct hci_conn *conn;
3672
3673 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3674
3675 hci_dev_lock(hdev);
3676
3677 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3678 if (conn) {
3679 if (!ev->status)
3680 set_bit(HCI_CONN_SECURE, &conn->flags);
3681
3682 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3683
3684 hci_key_change_cfm(conn, ev->status);
3685 }
3686
3687 hci_dev_unlock(hdev);
3688 }
3689
hci_remote_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3690 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3691 struct sk_buff *skb)
3692 {
3693 struct hci_ev_remote_features *ev = data;
3694 struct hci_conn *conn;
3695
3696 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3697
3698 hci_dev_lock(hdev);
3699
3700 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3701 if (!conn)
3702 goto unlock;
3703
3704 if (!ev->status)
3705 memcpy(conn->features[0], ev->features, 8);
3706
3707 if (conn->state != BT_CONFIG)
3708 goto unlock;
3709
3710 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3711 lmp_ext_feat_capable(conn)) {
3712 struct hci_cp_read_remote_ext_features cp;
3713 cp.handle = ev->handle;
3714 cp.page = 0x01;
3715 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3716 sizeof(cp), &cp);
3717 goto unlock;
3718 }
3719
3720 if (!ev->status) {
3721 struct hci_cp_remote_name_req cp;
3722 memset(&cp, 0, sizeof(cp));
3723 bacpy(&cp.bdaddr, &conn->dst);
3724 cp.pscan_rep_mode = 0x02;
3725 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3726 } else {
3727 mgmt_device_connected(hdev, conn, NULL, 0);
3728 }
3729
3730 if (!hci_outgoing_auth_needed(hdev, conn)) {
3731 conn->state = BT_CONNECTED;
3732 hci_connect_cfm(conn, ev->status);
3733 hci_conn_drop(conn);
3734 }
3735
3736 unlock:
3737 hci_dev_unlock(hdev);
3738 }
3739
handle_cmd_cnt_and_timer(struct hci_dev * hdev,u8 ncmd)3740 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3741 {
3742 cancel_delayed_work(&hdev->cmd_timer);
3743
3744 rcu_read_lock();
3745 if (!test_bit(HCI_RESET, &hdev->flags)) {
3746 if (ncmd) {
3747 cancel_delayed_work(&hdev->ncmd_timer);
3748 atomic_set(&hdev->cmd_cnt, 1);
3749 } else {
3750 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3751 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3752 HCI_NCMD_TIMEOUT);
3753 }
3754 }
3755 rcu_read_unlock();
3756 }
3757
hci_cc_le_read_buffer_size_v2(struct hci_dev * hdev,void * data,struct sk_buff * skb)3758 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3759 struct sk_buff *skb)
3760 {
3761 struct hci_rp_le_read_buffer_size_v2 *rp = data;
3762
3763 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3764
3765 if (rp->status)
3766 return rp->status;
3767
3768 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3769 hdev->le_pkts = rp->acl_max_pkt;
3770 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3771 hdev->iso_pkts = rp->iso_max_pkt;
3772
3773 hdev->le_cnt = hdev->le_pkts;
3774 hdev->iso_cnt = hdev->iso_pkts;
3775
3776 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3777 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3778
3779 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3780 return HCI_ERROR_INVALID_PARAMETERS;
3781
3782 return rp->status;
3783 }
3784
hci_unbound_cis_failed(struct hci_dev * hdev,u8 cig,u8 status)3785 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3786 {
3787 struct hci_conn *conn, *tmp;
3788
3789 lockdep_assert_held(&hdev->lock);
3790
3791 list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3792 if (conn->type != CIS_LINK ||
3793 conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3794 continue;
3795
3796 if (HCI_CONN_HANDLE_UNSET(conn->handle))
3797 hci_conn_failed(conn, status);
3798 }
3799 }
3800
hci_cc_le_set_cig_params(struct hci_dev * hdev,void * data,struct sk_buff * skb)3801 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3802 struct sk_buff *skb)
3803 {
3804 struct hci_rp_le_set_cig_params *rp = data;
3805 struct hci_cp_le_set_cig_params *cp;
3806 struct hci_conn *conn;
3807 u8 status = rp->status;
3808 bool pending = false;
3809 int i;
3810
3811 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3812
3813 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3814 if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3815 rp->cig_id != cp->cig_id)) {
3816 bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3817 status = HCI_ERROR_UNSPECIFIED;
3818 }
3819
3820 hci_dev_lock(hdev);
3821
3822 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3823 *
3824 * If the Status return parameter is non-zero, then the state of the CIG
3825 * and its CIS configurations shall not be changed by the command. If
3826 * the CIG did not already exist, it shall not be created.
3827 */
3828 if (status) {
3829 /* Keep current configuration, fail only the unbound CIS */
3830 hci_unbound_cis_failed(hdev, rp->cig_id, status);
3831 goto unlock;
3832 }
3833
3834 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3835 *
3836 * If the Status return parameter is zero, then the Controller shall
3837 * set the Connection_Handle arrayed return parameter to the connection
3838 * handle(s) corresponding to the CIS configurations specified in
3839 * the CIS_IDs command parameter, in the same order.
3840 */
3841 for (i = 0; i < rp->num_handles; ++i) {
3842 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3843 cp->cis[i].cis_id);
3844 if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3845 continue;
3846
3847 if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3848 continue;
3849
3850 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3851 continue;
3852
3853 if (conn->state == BT_CONNECT)
3854 pending = true;
3855 }
3856
3857 unlock:
3858 if (pending)
3859 hci_le_create_cis_pending(hdev);
3860
3861 hci_dev_unlock(hdev);
3862
3863 return rp->status;
3864 }
3865
hci_cc_le_setup_iso_path(struct hci_dev * hdev,void * data,struct sk_buff * skb)3866 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3867 struct sk_buff *skb)
3868 {
3869 struct hci_rp_le_setup_iso_path *rp = data;
3870 struct hci_cp_le_setup_iso_path *cp;
3871 struct hci_conn *conn;
3872
3873 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3874
3875 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3876 if (!cp)
3877 return rp->status;
3878
3879 hci_dev_lock(hdev);
3880
3881 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3882 if (!conn)
3883 goto unlock;
3884
3885 if (rp->status) {
3886 hci_connect_cfm(conn, rp->status);
3887 hci_conn_del(conn);
3888 goto unlock;
3889 }
3890
3891 switch (cp->direction) {
3892 /* Input (Host to Controller) */
3893 case 0x00:
3894 /* Only confirm connection if output only */
3895 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3896 hci_connect_cfm(conn, rp->status);
3897 break;
3898 /* Output (Controller to Host) */
3899 case 0x01:
3900 /* Confirm connection since conn->iso_qos is always configured
3901 * last.
3902 */
3903 hci_connect_cfm(conn, rp->status);
3904
3905 /* Notify device connected in case it is a BIG Sync */
3906 if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3907 mgmt_device_connected(hdev, conn, NULL, 0);
3908
3909 break;
3910 }
3911
3912 unlock:
3913 hci_dev_unlock(hdev);
3914 return rp->status;
3915 }
3916
hci_cs_le_create_big(struct hci_dev * hdev,u8 status)3917 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3918 {
3919 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3920 }
3921
hci_cc_set_per_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)3922 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3923 struct sk_buff *skb)
3924 {
3925 struct hci_ev_status *rp = data;
3926 struct hci_cp_le_set_per_adv_params *cp;
3927
3928 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3929
3930 if (rp->status)
3931 return rp->status;
3932
3933 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3934 if (!cp)
3935 return rp->status;
3936
3937 /* TODO: set the conn state */
3938 return rp->status;
3939 }
3940
hci_cc_le_set_per_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)3941 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3942 struct sk_buff *skb)
3943 {
3944 struct hci_ev_status *rp = data;
3945 struct hci_cp_le_set_per_adv_enable *cp;
3946 struct adv_info *adv = NULL, *n;
3947 u8 per_adv_cnt = 0;
3948
3949 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3950
3951 if (rp->status)
3952 return rp->status;
3953
3954 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3955 if (!cp)
3956 return rp->status;
3957
3958 hci_dev_lock(hdev);
3959
3960 adv = hci_find_adv_instance(hdev, cp->handle);
3961
3962 if (cp->enable) {
3963 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3964
3965 if (adv)
3966 adv->periodic_enabled = true;
3967 } else {
3968 if (adv)
3969 adv->periodic_enabled = false;
3970
3971 /* If just one instance was disabled check if there are
3972 * any other instance enabled before clearing HCI_LE_PER_ADV.
3973 * The current periodic adv instance will be marked as
3974 * disabled once extended advertising is also disabled.
3975 */
3976 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3977 list) {
3978 if (adv->periodic && adv->enabled)
3979 per_adv_cnt++;
3980 }
3981
3982 if (per_adv_cnt > 1)
3983 goto unlock;
3984
3985 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3986 }
3987
3988 unlock:
3989 hci_dev_unlock(hdev);
3990
3991 return rp->status;
3992 }
3993
3994 #define HCI_CC_VL(_op, _func, _min, _max) \
3995 { \
3996 .op = _op, \
3997 .func = _func, \
3998 .min_len = _min, \
3999 .max_len = _max, \
4000 }
4001
4002 #define HCI_CC(_op, _func, _len) \
4003 HCI_CC_VL(_op, _func, _len, _len)
4004
4005 #define HCI_CC_STATUS(_op, _func) \
4006 HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4007
4008 static const struct hci_cc {
4009 u16 op;
4010 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4011 u16 min_len;
4012 u16 max_len;
4013 } hci_cc_table[] = {
4014 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4015 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4016 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4017 HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel,
4018 sizeof(struct hci_rp_remote_name_req_cancel)),
4019 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4020 sizeof(struct hci_rp_role_discovery)),
4021 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4022 sizeof(struct hci_rp_read_link_policy)),
4023 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4024 sizeof(struct hci_rp_write_link_policy)),
4025 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4026 sizeof(struct hci_rp_read_def_link_policy)),
4027 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4028 hci_cc_write_def_link_policy),
4029 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4030 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4031 sizeof(struct hci_rp_read_stored_link_key)),
4032 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4033 sizeof(struct hci_rp_delete_stored_link_key)),
4034 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4035 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4036 sizeof(struct hci_rp_read_local_name)),
4037 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4038 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4039 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4040 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4041 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4042 sizeof(struct hci_rp_read_class_of_dev)),
4043 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4044 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4045 sizeof(struct hci_rp_read_voice_setting)),
4046 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4047 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4048 sizeof(struct hci_rp_read_num_supported_iac)),
4049 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4050 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4051 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4052 sizeof(struct hci_rp_read_auth_payload_to)),
4053 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4054 sizeof(struct hci_rp_write_auth_payload_to)),
4055 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4056 sizeof(struct hci_rp_read_local_version)),
4057 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4058 sizeof(struct hci_rp_read_local_commands)),
4059 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4060 sizeof(struct hci_rp_read_local_features)),
4061 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4062 sizeof(struct hci_rp_read_local_ext_features)),
4063 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4064 sizeof(struct hci_rp_read_buffer_size)),
4065 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4066 sizeof(struct hci_rp_read_bd_addr)),
4067 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4068 sizeof(struct hci_rp_read_local_pairing_opts)),
4069 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4070 sizeof(struct hci_rp_read_page_scan_activity)),
4071 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4072 hci_cc_write_page_scan_activity),
4073 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4074 sizeof(struct hci_rp_read_page_scan_type)),
4075 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4076 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4077 sizeof(struct hci_rp_read_clock)),
4078 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4079 sizeof(struct hci_rp_read_enc_key_size)),
4080 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4081 sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4082 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4083 hci_cc_read_def_err_data_reporting,
4084 sizeof(struct hci_rp_read_def_err_data_reporting)),
4085 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4086 hci_cc_write_def_err_data_reporting),
4087 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4088 sizeof(struct hci_rp_pin_code_reply)),
4089 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4090 sizeof(struct hci_rp_pin_code_neg_reply)),
4091 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4092 sizeof(struct hci_rp_read_local_oob_data)),
4093 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4094 sizeof(struct hci_rp_read_local_oob_ext_data)),
4095 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4096 sizeof(struct hci_rp_le_read_buffer_size)),
4097 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4098 sizeof(struct hci_rp_le_read_local_features)),
4099 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4100 sizeof(struct hci_rp_le_read_adv_tx_power)),
4101 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4102 sizeof(struct hci_rp_user_confirm_reply)),
4103 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4104 sizeof(struct hci_rp_user_confirm_reply)),
4105 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4106 sizeof(struct hci_rp_user_confirm_reply)),
4107 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4108 sizeof(struct hci_rp_user_confirm_reply)),
4109 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4110 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4111 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4112 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4113 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4114 hci_cc_le_read_accept_list_size,
4115 sizeof(struct hci_rp_le_read_accept_list_size)),
4116 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4117 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4118 hci_cc_le_add_to_accept_list),
4119 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4120 hci_cc_le_del_from_accept_list),
4121 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4122 sizeof(struct hci_rp_le_read_supported_states)),
4123 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4124 sizeof(struct hci_rp_le_read_def_data_len)),
4125 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4126 hci_cc_le_write_def_data_len),
4127 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4128 hci_cc_le_add_to_resolv_list),
4129 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4130 hci_cc_le_del_from_resolv_list),
4131 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4132 hci_cc_le_clear_resolv_list),
4133 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4134 sizeof(struct hci_rp_le_read_resolv_list_size)),
4135 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4136 hci_cc_le_set_addr_resolution_enable),
4137 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4138 sizeof(struct hci_rp_le_read_max_data_len)),
4139 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4140 hci_cc_write_le_host_supported),
4141 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4142 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4143 sizeof(struct hci_rp_read_rssi)),
4144 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4145 sizeof(struct hci_rp_read_tx_power)),
4146 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4147 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4148 hci_cc_le_set_ext_scan_param),
4149 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4150 hci_cc_le_set_ext_scan_enable),
4151 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4152 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4153 hci_cc_le_read_num_adv_sets,
4154 sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4155 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4156 hci_cc_le_set_ext_adv_enable),
4157 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4158 hci_cc_le_set_adv_set_random_addr),
4159 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4160 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4161 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4162 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4163 hci_cc_le_set_per_adv_enable),
4164 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4165 sizeof(struct hci_rp_le_read_transmit_power)),
4166 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4167 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4168 sizeof(struct hci_rp_le_read_buffer_size_v2)),
4169 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4170 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4171 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4172 sizeof(struct hci_rp_le_setup_iso_path)),
4173 };
4174
hci_cc_func(struct hci_dev * hdev,const struct hci_cc * cc,struct sk_buff * skb)4175 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4176 struct sk_buff *skb)
4177 {
4178 void *data;
4179
4180 if (skb->len < cc->min_len) {
4181 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4182 cc->op, skb->len, cc->min_len);
4183 return HCI_ERROR_UNSPECIFIED;
4184 }
4185
4186 /* Just warn if the length is over max_len size it still be possible to
4187 * partially parse the cc so leave to callback to decide if that is
4188 * acceptable.
4189 */
4190 if (skb->len > cc->max_len)
4191 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4192 cc->op, skb->len, cc->max_len);
4193
4194 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4195 if (!data)
4196 return HCI_ERROR_UNSPECIFIED;
4197
4198 return cc->func(hdev, data, skb);
4199 }
4200
hci_cmd_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4201 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4202 struct sk_buff *skb, u16 *opcode, u8 *status,
4203 hci_req_complete_t *req_complete,
4204 hci_req_complete_skb_t *req_complete_skb)
4205 {
4206 struct hci_ev_cmd_complete *ev = data;
4207 int i;
4208
4209 *opcode = __le16_to_cpu(ev->opcode);
4210
4211 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4212
4213 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4214 if (hci_cc_table[i].op == *opcode) {
4215 *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4216 break;
4217 }
4218 }
4219
4220 if (i == ARRAY_SIZE(hci_cc_table)) {
4221 if (!skb->len) {
4222 bt_dev_err(hdev, "Unexpected cc 0x%4.4x with no status",
4223 *opcode);
4224 *status = HCI_ERROR_UNSPECIFIED;
4225 return;
4226 }
4227
4228 /* Unknown opcode, assume byte 0 contains the status, so
4229 * that e.g. __hci_cmd_sync() properly returns errors
4230 * for vendor specific commands send by HCI drivers.
4231 * If a vendor doesn't actually follow this convention we may
4232 * need to introduce a vendor CC table in order to properly set
4233 * the status.
4234 */
4235 *status = skb->data[0];
4236 }
4237
4238 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4239
4240 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4241 req_complete_skb);
4242
4243 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4244 bt_dev_err(hdev,
4245 "unexpected event for opcode 0x%4.4x", *opcode);
4246 return;
4247 }
4248
4249 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4250 queue_work(hdev->workqueue, &hdev->cmd_work);
4251 }
4252
hci_cs_le_create_cis(struct hci_dev * hdev,u8 status)4253 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4254 {
4255 struct hci_cp_le_create_cis *cp;
4256 bool pending = false;
4257 int i;
4258
4259 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4260
4261 if (!status)
4262 return;
4263
4264 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4265 if (!cp)
4266 return;
4267
4268 hci_dev_lock(hdev);
4269
4270 /* Remove connection if command failed */
4271 for (i = 0; i < cp->num_cis; i++) {
4272 struct hci_conn *conn;
4273 u16 handle;
4274
4275 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4276
4277 conn = hci_conn_hash_lookup_handle(hdev, handle);
4278 if (conn) {
4279 if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4280 &conn->flags))
4281 pending = true;
4282 conn->state = BT_CLOSED;
4283 hci_connect_cfm(conn, status);
4284 hci_conn_del(conn);
4285 }
4286 }
4287 cp->num_cis = 0;
4288
4289 if (pending)
4290 hci_le_create_cis_pending(hdev);
4291
4292 hci_dev_unlock(hdev);
4293 }
4294
4295 #define HCI_CS(_op, _func) \
4296 { \
4297 .op = _op, \
4298 .func = _func, \
4299 }
4300
4301 static const struct hci_cs {
4302 u16 op;
4303 void (*func)(struct hci_dev *hdev, __u8 status);
4304 } hci_cs_table[] = {
4305 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4306 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4307 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4308 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4309 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4310 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4311 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4312 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4313 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4314 hci_cs_read_remote_ext_features),
4315 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4316 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4317 hci_cs_enhanced_setup_sync_conn),
4318 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4319 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4320 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4321 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4322 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4323 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4324 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4325 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4326 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4327 };
4328
hci_cmd_status_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4329 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4330 struct sk_buff *skb, u16 *opcode, u8 *status,
4331 hci_req_complete_t *req_complete,
4332 hci_req_complete_skb_t *req_complete_skb)
4333 {
4334 struct hci_ev_cmd_status *ev = data;
4335 int i;
4336
4337 *opcode = __le16_to_cpu(ev->opcode);
4338 *status = ev->status;
4339
4340 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4341
4342 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4343 if (hci_cs_table[i].op == *opcode) {
4344 hci_cs_table[i].func(hdev, ev->status);
4345 break;
4346 }
4347 }
4348
4349 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4350
4351 /* Indicate request completion if the command failed. Also, if
4352 * we're not waiting for a special event and we get a success
4353 * command status we should try to flag the request as completed
4354 * (since for this kind of commands there will not be a command
4355 * complete event).
4356 */
4357 if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4358 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4359 req_complete_skb);
4360 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4361 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4362 *opcode);
4363 return;
4364 }
4365 }
4366
4367 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4368 queue_work(hdev->workqueue, &hdev->cmd_work);
4369 }
4370
hci_hardware_error_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4371 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4372 struct sk_buff *skb)
4373 {
4374 struct hci_ev_hardware_error *ev = data;
4375
4376 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4377
4378 hdev->hw_error_code = ev->code;
4379
4380 queue_work(hdev->req_workqueue, &hdev->error_reset);
4381 }
4382
hci_role_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4383 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4384 struct sk_buff *skb)
4385 {
4386 struct hci_ev_role_change *ev = data;
4387 struct hci_conn *conn;
4388
4389 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4390
4391 hci_dev_lock(hdev);
4392
4393 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4394 if (conn) {
4395 if (!ev->status)
4396 conn->role = ev->role;
4397
4398 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4399
4400 hci_role_switch_cfm(conn, ev->status, ev->role);
4401 }
4402
4403 hci_dev_unlock(hdev);
4404 }
4405
hci_num_comp_pkts_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4406 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4407 struct sk_buff *skb)
4408 {
4409 struct hci_ev_num_comp_pkts *ev = data;
4410 int i;
4411
4412 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4413 flex_array_size(ev, handles, ev->num)))
4414 return;
4415
4416 bt_dev_dbg(hdev, "num %d", ev->num);
4417
4418 hci_dev_lock(hdev);
4419
4420 for (i = 0; i < ev->num; i++) {
4421 struct hci_comp_pkts_info *info = &ev->handles[i];
4422 struct hci_conn *conn;
4423 __u16 handle, count;
4424 unsigned int i;
4425
4426 handle = __le16_to_cpu(info->handle);
4427 count = __le16_to_cpu(info->count);
4428
4429 conn = hci_conn_hash_lookup_handle(hdev, handle);
4430 if (!conn)
4431 continue;
4432
4433 /* Check if there is really enough packets outstanding before
4434 * attempting to decrease the sent counter otherwise it could
4435 * underflow..
4436 */
4437 if (conn->sent >= count) {
4438 conn->sent -= count;
4439 } else {
4440 bt_dev_warn(hdev, "hcon %p sent %u < count %u",
4441 conn, conn->sent, count);
4442 conn->sent = 0;
4443 }
4444
4445 for (i = 0; i < count; ++i)
4446 hci_conn_tx_dequeue(conn);
4447
4448 switch (conn->type) {
4449 case ACL_LINK:
4450 hdev->acl_cnt += count;
4451 if (hdev->acl_cnt > hdev->acl_pkts)
4452 hdev->acl_cnt = hdev->acl_pkts;
4453 break;
4454
4455 case LE_LINK:
4456 if (hdev->le_pkts) {
4457 hdev->le_cnt += count;
4458 if (hdev->le_cnt > hdev->le_pkts)
4459 hdev->le_cnt = hdev->le_pkts;
4460 } else {
4461 hdev->acl_cnt += count;
4462 if (hdev->acl_cnt > hdev->acl_pkts)
4463 hdev->acl_cnt = hdev->acl_pkts;
4464 }
4465 break;
4466
4467 case SCO_LINK:
4468 case ESCO_LINK:
4469 hdev->sco_cnt += count;
4470 if (hdev->sco_cnt > hdev->sco_pkts)
4471 hdev->sco_cnt = hdev->sco_pkts;
4472
4473 break;
4474
4475 case CIS_LINK:
4476 case BIS_LINK:
4477 case PA_LINK:
4478 hdev->iso_cnt += count;
4479 if (hdev->iso_cnt > hdev->iso_pkts)
4480 hdev->iso_cnt = hdev->iso_pkts;
4481 break;
4482
4483 default:
4484 bt_dev_err(hdev, "unknown type %d conn %p",
4485 conn->type, conn);
4486 break;
4487 }
4488 }
4489
4490 queue_work(hdev->workqueue, &hdev->tx_work);
4491
4492 hci_dev_unlock(hdev);
4493 }
4494
hci_mode_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4495 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4496 struct sk_buff *skb)
4497 {
4498 struct hci_ev_mode_change *ev = data;
4499 struct hci_conn *conn;
4500
4501 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4502
4503 hci_dev_lock(hdev);
4504
4505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4506 if (conn) {
4507 conn->mode = ev->mode;
4508
4509 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4510 &conn->flags)) {
4511 if (conn->mode == HCI_CM_ACTIVE)
4512 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4513 else
4514 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4515 }
4516
4517 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4518 hci_sco_setup(conn, ev->status);
4519 }
4520
4521 hci_dev_unlock(hdev);
4522 }
4523
hci_pin_code_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4524 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4525 struct sk_buff *skb)
4526 {
4527 struct hci_ev_pin_code_req *ev = data;
4528 struct hci_conn *conn;
4529
4530 bt_dev_dbg(hdev, "");
4531
4532 hci_dev_lock(hdev);
4533
4534 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4535 if (!conn)
4536 goto unlock;
4537
4538 if (conn->state == BT_CONNECTED) {
4539 hci_conn_hold(conn);
4540 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4541 hci_conn_drop(conn);
4542 }
4543
4544 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4545 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4546 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4547 sizeof(ev->bdaddr), &ev->bdaddr);
4548 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4549 u8 secure;
4550
4551 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4552 secure = 1;
4553 else
4554 secure = 0;
4555
4556 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4557 }
4558
4559 unlock:
4560 hci_dev_unlock(hdev);
4561 }
4562
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)4563 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4564 {
4565 if (key_type == HCI_LK_CHANGED_COMBINATION)
4566 return;
4567
4568 conn->pin_length = pin_len;
4569 conn->key_type = key_type;
4570
4571 switch (key_type) {
4572 case HCI_LK_LOCAL_UNIT:
4573 case HCI_LK_REMOTE_UNIT:
4574 case HCI_LK_DEBUG_COMBINATION:
4575 return;
4576 case HCI_LK_COMBINATION:
4577 if (pin_len == 16)
4578 conn->pending_sec_level = BT_SECURITY_HIGH;
4579 else
4580 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4581 break;
4582 case HCI_LK_UNAUTH_COMBINATION_P192:
4583 case HCI_LK_UNAUTH_COMBINATION_P256:
4584 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4585 break;
4586 case HCI_LK_AUTH_COMBINATION_P192:
4587 conn->pending_sec_level = BT_SECURITY_HIGH;
4588 break;
4589 case HCI_LK_AUTH_COMBINATION_P256:
4590 conn->pending_sec_level = BT_SECURITY_FIPS;
4591 break;
4592 }
4593 }
4594
hci_link_key_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4595 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4596 struct sk_buff *skb)
4597 {
4598 struct hci_ev_link_key_req *ev = data;
4599 struct hci_cp_link_key_reply cp;
4600 struct hci_conn *conn;
4601 struct link_key *key;
4602
4603 bt_dev_dbg(hdev, "");
4604
4605 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4606 return;
4607
4608 hci_dev_lock(hdev);
4609
4610 key = hci_find_link_key(hdev, &ev->bdaddr);
4611 if (!key) {
4612 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4613 goto not_found;
4614 }
4615
4616 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4617
4618 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4619 if (conn) {
4620 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4621
4622 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4623 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4624 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4625 bt_dev_dbg(hdev, "ignoring unauthenticated key");
4626 goto not_found;
4627 }
4628
4629 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4630 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4631 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4632 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4633 goto not_found;
4634 }
4635
4636 conn_set_key(conn, key->type, key->pin_len);
4637 }
4638
4639 bacpy(&cp.bdaddr, &ev->bdaddr);
4640 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4641
4642 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4643
4644 hci_dev_unlock(hdev);
4645
4646 return;
4647
4648 not_found:
4649 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4650 hci_dev_unlock(hdev);
4651 }
4652
hci_link_key_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4653 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4654 struct sk_buff *skb)
4655 {
4656 struct hci_ev_link_key_notify *ev = data;
4657 struct hci_conn *conn;
4658 struct link_key *key;
4659 bool persistent;
4660 u8 pin_len = 0;
4661
4662 bt_dev_dbg(hdev, "");
4663
4664 hci_dev_lock(hdev);
4665
4666 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4667 if (!conn)
4668 goto unlock;
4669
4670 /* Ignore NULL link key against CVE-2020-26555 */
4671 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4672 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4673 &ev->bdaddr);
4674 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4675 hci_conn_drop(conn);
4676 goto unlock;
4677 }
4678
4679 hci_conn_hold(conn);
4680 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4681 hci_conn_drop(conn);
4682
4683 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4684 conn_set_key(conn, ev->key_type, conn->pin_length);
4685
4686 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4687 goto unlock;
4688
4689 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4690 ev->key_type, pin_len, &persistent);
4691 if (!key)
4692 goto unlock;
4693
4694 /* Update connection information since adding the key will have
4695 * fixed up the type in the case of changed combination keys.
4696 */
4697 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4698 conn_set_key(conn, key->type, key->pin_len);
4699
4700 mgmt_new_link_key(hdev, key, persistent);
4701
4702 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4703 * is set. If it's not set simply remove the key from the kernel
4704 * list (we've still notified user space about it but with
4705 * store_hint being 0).
4706 */
4707 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4708 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4709 list_del_rcu(&key->list);
4710 kfree_rcu(key, rcu);
4711 goto unlock;
4712 }
4713
4714 if (persistent)
4715 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4716 else
4717 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4718
4719 unlock:
4720 hci_dev_unlock(hdev);
4721 }
4722
hci_clock_offset_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4723 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4724 struct sk_buff *skb)
4725 {
4726 struct hci_ev_clock_offset *ev = data;
4727 struct hci_conn *conn;
4728
4729 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4730
4731 hci_dev_lock(hdev);
4732
4733 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4734 if (conn && !ev->status) {
4735 struct inquiry_entry *ie;
4736
4737 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4738 if (ie) {
4739 ie->data.clock_offset = ev->clock_offset;
4740 ie->timestamp = jiffies;
4741 }
4742 }
4743
4744 hci_dev_unlock(hdev);
4745 }
4746
hci_pkt_type_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4747 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4748 struct sk_buff *skb)
4749 {
4750 struct hci_ev_pkt_type_change *ev = data;
4751 struct hci_conn *conn;
4752
4753 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4754
4755 hci_dev_lock(hdev);
4756
4757 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4758 if (conn && !ev->status)
4759 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4760
4761 hci_dev_unlock(hdev);
4762 }
4763
hci_pscan_rep_mode_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4764 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4765 struct sk_buff *skb)
4766 {
4767 struct hci_ev_pscan_rep_mode *ev = data;
4768 struct inquiry_entry *ie;
4769
4770 bt_dev_dbg(hdev, "");
4771
4772 hci_dev_lock(hdev);
4773
4774 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4775 if (ie) {
4776 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4777 ie->timestamp = jiffies;
4778 }
4779
4780 hci_dev_unlock(hdev);
4781 }
4782
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)4783 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4784 struct sk_buff *skb)
4785 {
4786 struct hci_ev_inquiry_result_rssi *ev = edata;
4787 struct inquiry_data data;
4788 int i;
4789
4790 bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4791
4792 if (!ev->num)
4793 return;
4794
4795 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4796 return;
4797
4798 hci_dev_lock(hdev);
4799
4800 if (skb->len == array_size(ev->num,
4801 sizeof(struct inquiry_info_rssi_pscan))) {
4802 struct inquiry_info_rssi_pscan *info;
4803
4804 for (i = 0; i < ev->num; i++) {
4805 u32 flags;
4806
4807 info = hci_ev_skb_pull(hdev, skb,
4808 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4809 sizeof(*info));
4810 if (!info) {
4811 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4812 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4813 goto unlock;
4814 }
4815
4816 bacpy(&data.bdaddr, &info->bdaddr);
4817 data.pscan_rep_mode = info->pscan_rep_mode;
4818 data.pscan_period_mode = info->pscan_period_mode;
4819 data.pscan_mode = info->pscan_mode;
4820 memcpy(data.dev_class, info->dev_class, 3);
4821 data.clock_offset = info->clock_offset;
4822 data.rssi = info->rssi;
4823 data.ssp_mode = 0x00;
4824
4825 flags = hci_inquiry_cache_update(hdev, &data, false);
4826
4827 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4828 info->dev_class, info->rssi,
4829 flags, NULL, 0, NULL, 0, 0);
4830 }
4831 } else if (skb->len == array_size(ev->num,
4832 sizeof(struct inquiry_info_rssi))) {
4833 struct inquiry_info_rssi *info;
4834
4835 for (i = 0; i < ev->num; i++) {
4836 u32 flags;
4837
4838 info = hci_ev_skb_pull(hdev, skb,
4839 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4840 sizeof(*info));
4841 if (!info) {
4842 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4843 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4844 goto unlock;
4845 }
4846
4847 bacpy(&data.bdaddr, &info->bdaddr);
4848 data.pscan_rep_mode = info->pscan_rep_mode;
4849 data.pscan_period_mode = info->pscan_period_mode;
4850 data.pscan_mode = 0x00;
4851 memcpy(data.dev_class, info->dev_class, 3);
4852 data.clock_offset = info->clock_offset;
4853 data.rssi = info->rssi;
4854 data.ssp_mode = 0x00;
4855
4856 flags = hci_inquiry_cache_update(hdev, &data, false);
4857
4858 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4859 info->dev_class, info->rssi,
4860 flags, NULL, 0, NULL, 0, 0);
4861 }
4862 } else {
4863 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4864 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4865 }
4866 unlock:
4867 hci_dev_unlock(hdev);
4868 }
4869
hci_remote_ext_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4870 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4871 struct sk_buff *skb)
4872 {
4873 struct hci_ev_remote_ext_features *ev = data;
4874 struct hci_conn *conn;
4875
4876 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4877
4878 hci_dev_lock(hdev);
4879
4880 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4881 if (!conn)
4882 goto unlock;
4883
4884 if (ev->page < HCI_MAX_PAGES)
4885 memcpy(conn->features[ev->page], ev->features, 8);
4886
4887 if (!ev->status && ev->page == 0x01) {
4888 struct inquiry_entry *ie;
4889
4890 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4891 if (ie)
4892 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4893
4894 if (ev->features[0] & LMP_HOST_SSP) {
4895 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4896 } else {
4897 /* It is mandatory by the Bluetooth specification that
4898 * Extended Inquiry Results are only used when Secure
4899 * Simple Pairing is enabled, but some devices violate
4900 * this.
4901 *
4902 * To make these devices work, the internal SSP
4903 * enabled flag needs to be cleared if the remote host
4904 * features do not indicate SSP support */
4905 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4906 }
4907
4908 if (ev->features[0] & LMP_HOST_SC)
4909 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4910 }
4911
4912 if (conn->state != BT_CONFIG)
4913 goto unlock;
4914
4915 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4916 struct hci_cp_remote_name_req cp;
4917 memset(&cp, 0, sizeof(cp));
4918 bacpy(&cp.bdaddr, &conn->dst);
4919 cp.pscan_rep_mode = 0x02;
4920 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4921 } else {
4922 mgmt_device_connected(hdev, conn, NULL, 0);
4923 }
4924
4925 if (!hci_outgoing_auth_needed(hdev, conn)) {
4926 conn->state = BT_CONNECTED;
4927 hci_connect_cfm(conn, ev->status);
4928 hci_conn_drop(conn);
4929 }
4930
4931 unlock:
4932 hci_dev_unlock(hdev);
4933 }
4934
hci_sync_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4935 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4936 struct sk_buff *skb)
4937 {
4938 struct hci_ev_sync_conn_complete *ev = data;
4939 struct hci_conn *conn;
4940 u8 status = ev->status;
4941
4942 switch (ev->link_type) {
4943 case SCO_LINK:
4944 case ESCO_LINK:
4945 break;
4946 default:
4947 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4948 * for HCI_Synchronous_Connection_Complete is limited to
4949 * either SCO or eSCO
4950 */
4951 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4952 return;
4953 }
4954
4955 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4956
4957 hci_dev_lock(hdev);
4958
4959 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4960 if (!conn) {
4961 if (ev->link_type == ESCO_LINK)
4962 goto unlock;
4963
4964 /* When the link type in the event indicates SCO connection
4965 * and lookup of the connection object fails, then check
4966 * if an eSCO connection object exists.
4967 *
4968 * The core limits the synchronous connections to either
4969 * SCO or eSCO. The eSCO connection is preferred and tried
4970 * to be setup first and until successfully established,
4971 * the link type will be hinted as eSCO.
4972 */
4973 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4974 if (!conn)
4975 goto unlock;
4976 }
4977
4978 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4979 * Processing it more than once per connection can corrupt kernel memory.
4980 *
4981 * As the connection handle is set here for the first time, it indicates
4982 * whether the connection is already set up.
4983 */
4984 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
4985 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4986 goto unlock;
4987 }
4988
4989 switch (status) {
4990 case 0x00:
4991 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
4992 if (status) {
4993 conn->state = BT_CLOSED;
4994 break;
4995 }
4996
4997 conn->state = BT_CONNECTED;
4998 conn->type = ev->link_type;
4999
5000 hci_debugfs_create_conn(conn);
5001 hci_conn_add_sysfs(conn);
5002 break;
5003
5004 case 0x10: /* Connection Accept Timeout */
5005 case 0x0d: /* Connection Rejected due to Limited Resources */
5006 case 0x11: /* Unsupported Feature or Parameter Value */
5007 case 0x1c: /* SCO interval rejected */
5008 case 0x1a: /* Unsupported Remote Feature */
5009 case 0x1e: /* Invalid LMP Parameters */
5010 case 0x1f: /* Unspecified error */
5011 case 0x20: /* Unsupported LMP Parameter value */
5012 if (conn->out) {
5013 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5014 (hdev->esco_type & EDR_ESCO_MASK);
5015 if (hci_setup_sync(conn, conn->parent->handle))
5016 goto unlock;
5017 }
5018 fallthrough;
5019
5020 default:
5021 conn->state = BT_CLOSED;
5022 break;
5023 }
5024
5025 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5026 /* Notify only in case of SCO over HCI transport data path which
5027 * is zero and non-zero value shall be non-HCI transport data path
5028 */
5029 if (conn->codec.data_path == 0 && hdev->notify) {
5030 switch (ev->air_mode) {
5031 case 0x02:
5032 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5033 break;
5034 case 0x03:
5035 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5036 break;
5037 }
5038 }
5039
5040 hci_connect_cfm(conn, status);
5041 if (status)
5042 hci_conn_del(conn);
5043
5044 unlock:
5045 hci_dev_unlock(hdev);
5046 }
5047
eir_get_length(u8 * eir,size_t eir_len)5048 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5049 {
5050 size_t parsed = 0;
5051
5052 while (parsed < eir_len) {
5053 u8 field_len = eir[0];
5054
5055 if (field_len == 0)
5056 return parsed;
5057
5058 parsed += field_len + 1;
5059 eir += field_len + 1;
5060 }
5061
5062 return eir_len;
5063 }
5064
hci_extended_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5065 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5066 struct sk_buff *skb)
5067 {
5068 struct hci_ev_ext_inquiry_result *ev = edata;
5069 struct inquiry_data data;
5070 size_t eir_len;
5071 int i;
5072
5073 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5074 flex_array_size(ev, info, ev->num)))
5075 return;
5076
5077 bt_dev_dbg(hdev, "num %d", ev->num);
5078
5079 if (!ev->num)
5080 return;
5081
5082 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5083 return;
5084
5085 hci_dev_lock(hdev);
5086
5087 for (i = 0; i < ev->num; i++) {
5088 struct extended_inquiry_info *info = &ev->info[i];
5089 u32 flags;
5090 bool name_known;
5091
5092 bacpy(&data.bdaddr, &info->bdaddr);
5093 data.pscan_rep_mode = info->pscan_rep_mode;
5094 data.pscan_period_mode = info->pscan_period_mode;
5095 data.pscan_mode = 0x00;
5096 memcpy(data.dev_class, info->dev_class, 3);
5097 data.clock_offset = info->clock_offset;
5098 data.rssi = info->rssi;
5099 data.ssp_mode = 0x01;
5100
5101 if (hci_dev_test_flag(hdev, HCI_MGMT))
5102 name_known = eir_get_data(info->data,
5103 sizeof(info->data),
5104 EIR_NAME_COMPLETE, NULL);
5105 else
5106 name_known = true;
5107
5108 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5109
5110 eir_len = eir_get_length(info->data, sizeof(info->data));
5111
5112 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5113 info->dev_class, info->rssi,
5114 flags, info->data, eir_len, NULL, 0, 0);
5115 }
5116
5117 hci_dev_unlock(hdev);
5118 }
5119
hci_key_refresh_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5120 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5121 struct sk_buff *skb)
5122 {
5123 struct hci_ev_key_refresh_complete *ev = data;
5124 struct hci_conn *conn;
5125
5126 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5127 __le16_to_cpu(ev->handle));
5128
5129 hci_dev_lock(hdev);
5130
5131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5132 if (!conn)
5133 goto unlock;
5134
5135 /* For BR/EDR the necessary steps are taken through the
5136 * auth_complete event.
5137 */
5138 if (conn->type != LE_LINK)
5139 goto unlock;
5140
5141 if (!ev->status)
5142 conn->sec_level = conn->pending_sec_level;
5143
5144 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5145
5146 if (ev->status && conn->state == BT_CONNECTED) {
5147 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5148 hci_conn_drop(conn);
5149 goto unlock;
5150 }
5151
5152 if (conn->state == BT_CONFIG) {
5153 if (!ev->status)
5154 conn->state = BT_CONNECTED;
5155
5156 hci_connect_cfm(conn, ev->status);
5157 hci_conn_drop(conn);
5158 } else {
5159 hci_auth_cfm(conn, ev->status);
5160
5161 hci_conn_hold(conn);
5162 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5163 hci_conn_drop(conn);
5164 }
5165
5166 unlock:
5167 hci_dev_unlock(hdev);
5168 }
5169
hci_get_auth_req(struct hci_conn * conn)5170 static u8 hci_get_auth_req(struct hci_conn *conn)
5171 {
5172 /* If remote requests no-bonding follow that lead */
5173 if (conn->remote_auth == HCI_AT_NO_BONDING ||
5174 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5175 return conn->remote_auth | (conn->auth_type & 0x01);
5176
5177 /* If both remote and local have enough IO capabilities, require
5178 * MITM protection
5179 */
5180 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5181 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5182 return conn->remote_auth | 0x01;
5183
5184 /* No MITM protection possible so ignore remote requirement */
5185 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5186 }
5187
bredr_oob_data_present(struct hci_conn * conn)5188 static u8 bredr_oob_data_present(struct hci_conn *conn)
5189 {
5190 struct hci_dev *hdev = conn->hdev;
5191 struct oob_data *data;
5192
5193 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5194 if (!data)
5195 return 0x00;
5196
5197 if (bredr_sc_enabled(hdev)) {
5198 /* When Secure Connections is enabled, then just
5199 * return the present value stored with the OOB
5200 * data. The stored value contains the right present
5201 * information. However it can only be trusted when
5202 * not in Secure Connection Only mode.
5203 */
5204 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5205 return data->present;
5206
5207 /* When Secure Connections Only mode is enabled, then
5208 * the P-256 values are required. If they are not
5209 * available, then do not declare that OOB data is
5210 * present.
5211 */
5212 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5213 !crypto_memneq(data->hash256, ZERO_KEY, 16))
5214 return 0x00;
5215
5216 return 0x02;
5217 }
5218
5219 /* When Secure Connections is not enabled or actually
5220 * not supported by the hardware, then check that if
5221 * P-192 data values are present.
5222 */
5223 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5224 !crypto_memneq(data->hash192, ZERO_KEY, 16))
5225 return 0x00;
5226
5227 return 0x01;
5228 }
5229
hci_io_capa_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5230 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5231 struct sk_buff *skb)
5232 {
5233 struct hci_ev_io_capa_request *ev = data;
5234 struct hci_conn *conn;
5235
5236 bt_dev_dbg(hdev, "");
5237
5238 hci_dev_lock(hdev);
5239
5240 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5241 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5242 goto unlock;
5243
5244 /* Assume remote supports SSP since it has triggered this event */
5245 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5246
5247 hci_conn_hold(conn);
5248
5249 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5250 goto unlock;
5251
5252 /* Allow pairing if we're pairable, the initiators of the
5253 * pairing or if the remote is not requesting bonding.
5254 */
5255 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5256 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5257 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5258 struct hci_cp_io_capability_reply cp;
5259
5260 bacpy(&cp.bdaddr, &ev->bdaddr);
5261 /* Change the IO capability from KeyboardDisplay
5262 * to DisplayYesNo as it is not supported by BT spec. */
5263 cp.capability = (conn->io_capability == 0x04) ?
5264 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5265
5266 /* If we are initiators, there is no remote information yet */
5267 if (conn->remote_auth == 0xff) {
5268 /* Request MITM protection if our IO caps allow it
5269 * except for the no-bonding case.
5270 */
5271 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5272 conn->auth_type != HCI_AT_NO_BONDING)
5273 conn->auth_type |= 0x01;
5274 } else {
5275 conn->auth_type = hci_get_auth_req(conn);
5276 }
5277
5278 /* If we're not bondable, force one of the non-bondable
5279 * authentication requirement values.
5280 */
5281 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5282 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5283
5284 cp.authentication = conn->auth_type;
5285 cp.oob_data = bredr_oob_data_present(conn);
5286
5287 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5288 sizeof(cp), &cp);
5289 } else {
5290 struct hci_cp_io_capability_neg_reply cp;
5291
5292 bacpy(&cp.bdaddr, &ev->bdaddr);
5293 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5294
5295 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5296 sizeof(cp), &cp);
5297 }
5298
5299 unlock:
5300 hci_dev_unlock(hdev);
5301 }
5302
hci_io_capa_reply_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5303 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5304 struct sk_buff *skb)
5305 {
5306 struct hci_ev_io_capa_reply *ev = data;
5307 struct hci_conn *conn;
5308
5309 bt_dev_dbg(hdev, "");
5310
5311 hci_dev_lock(hdev);
5312
5313 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5314 if (!conn)
5315 goto unlock;
5316
5317 conn->remote_cap = ev->capability;
5318 conn->remote_auth = ev->authentication;
5319
5320 unlock:
5321 hci_dev_unlock(hdev);
5322 }
5323
hci_user_confirm_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5324 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5325 struct sk_buff *skb)
5326 {
5327 struct hci_ev_user_confirm_req *ev = data;
5328 int loc_mitm, rem_mitm, confirm_hint = 0;
5329 struct hci_conn *conn;
5330
5331 bt_dev_dbg(hdev, "");
5332
5333 hci_dev_lock(hdev);
5334
5335 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5336 goto unlock;
5337
5338 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5339 if (!conn)
5340 goto unlock;
5341
5342 loc_mitm = (conn->auth_type & 0x01);
5343 rem_mitm = (conn->remote_auth & 0x01);
5344
5345 /* If we require MITM but the remote device can't provide that
5346 * (it has NoInputNoOutput) then reject the confirmation
5347 * request. We check the security level here since it doesn't
5348 * necessarily match conn->auth_type.
5349 */
5350 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5351 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5352 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5353 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5354 sizeof(ev->bdaddr), &ev->bdaddr);
5355 goto unlock;
5356 }
5357
5358 /* If no side requires MITM protection; use JUST_CFM method */
5359 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5360 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5361
5362 /* If we're not the initiator of request authorization and the
5363 * local IO capability is not NoInputNoOutput, use JUST_WORKS
5364 * method (mgmt_user_confirm with confirm_hint set to 1).
5365 */
5366 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5367 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) {
5368 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5369 confirm_hint = 1;
5370 goto confirm;
5371 }
5372
5373 /* If there already exists link key in local host, leave the
5374 * decision to user space since the remote device could be
5375 * legitimate or malicious.
5376 */
5377 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5378 bt_dev_dbg(hdev, "Local host already has link key");
5379 confirm_hint = 1;
5380 goto confirm;
5381 }
5382
5383 BT_DBG("Auto-accept of user confirmation with %ums delay",
5384 hdev->auto_accept_delay);
5385
5386 if (hdev->auto_accept_delay > 0) {
5387 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5388 queue_delayed_work(conn->hdev->workqueue,
5389 &conn->auto_accept_work, delay);
5390 goto unlock;
5391 }
5392
5393 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5394 sizeof(ev->bdaddr), &ev->bdaddr);
5395 goto unlock;
5396 }
5397
5398 confirm:
5399 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5400 le32_to_cpu(ev->passkey), confirm_hint);
5401
5402 unlock:
5403 hci_dev_unlock(hdev);
5404 }
5405
hci_user_passkey_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5406 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5407 struct sk_buff *skb)
5408 {
5409 struct hci_ev_user_passkey_req *ev = data;
5410
5411 bt_dev_dbg(hdev, "");
5412
5413 if (hci_dev_test_flag(hdev, HCI_MGMT))
5414 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5415 }
5416
hci_user_passkey_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5417 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5418 struct sk_buff *skb)
5419 {
5420 struct hci_ev_user_passkey_notify *ev = data;
5421 struct hci_conn *conn;
5422
5423 bt_dev_dbg(hdev, "");
5424
5425 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5426 if (!conn)
5427 return;
5428
5429 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5430 conn->passkey_entered = 0;
5431
5432 if (hci_dev_test_flag(hdev, HCI_MGMT))
5433 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5434 conn->dst_type, conn->passkey_notify,
5435 conn->passkey_entered);
5436 }
5437
hci_keypress_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5438 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5439 struct sk_buff *skb)
5440 {
5441 struct hci_ev_keypress_notify *ev = data;
5442 struct hci_conn *conn;
5443
5444 bt_dev_dbg(hdev, "");
5445
5446 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5447 if (!conn)
5448 return;
5449
5450 switch (ev->type) {
5451 case HCI_KEYPRESS_STARTED:
5452 conn->passkey_entered = 0;
5453 return;
5454
5455 case HCI_KEYPRESS_ENTERED:
5456 conn->passkey_entered++;
5457 break;
5458
5459 case HCI_KEYPRESS_ERASED:
5460 conn->passkey_entered--;
5461 break;
5462
5463 case HCI_KEYPRESS_CLEARED:
5464 conn->passkey_entered = 0;
5465 break;
5466
5467 case HCI_KEYPRESS_COMPLETED:
5468 return;
5469 }
5470
5471 if (hci_dev_test_flag(hdev, HCI_MGMT))
5472 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5473 conn->dst_type, conn->passkey_notify,
5474 conn->passkey_entered);
5475 }
5476
hci_simple_pair_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5477 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5478 struct sk_buff *skb)
5479 {
5480 struct hci_ev_simple_pair_complete *ev = data;
5481 struct hci_conn *conn;
5482
5483 bt_dev_dbg(hdev, "");
5484
5485 hci_dev_lock(hdev);
5486
5487 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5488 if (!conn || !hci_conn_ssp_enabled(conn))
5489 goto unlock;
5490
5491 /* Reset the authentication requirement to unknown */
5492 conn->remote_auth = 0xff;
5493
5494 /* To avoid duplicate auth_failed events to user space we check
5495 * the HCI_CONN_AUTH_PEND flag which will be set if we
5496 * initiated the authentication. A traditional auth_complete
5497 * event gets always produced as initiator and is also mapped to
5498 * the mgmt_auth_failed event */
5499 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5500 mgmt_auth_failed(conn, ev->status);
5501
5502 hci_conn_drop(conn);
5503
5504 unlock:
5505 hci_dev_unlock(hdev);
5506 }
5507
hci_remote_host_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5508 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5509 struct sk_buff *skb)
5510 {
5511 struct hci_ev_remote_host_features *ev = data;
5512 struct inquiry_entry *ie;
5513 struct hci_conn *conn;
5514
5515 bt_dev_dbg(hdev, "");
5516
5517 hci_dev_lock(hdev);
5518
5519 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5520 if (conn)
5521 memcpy(conn->features[1], ev->features, 8);
5522
5523 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5524 if (ie)
5525 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5526
5527 hci_dev_unlock(hdev);
5528 }
5529
hci_remote_oob_data_request_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5530 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5531 struct sk_buff *skb)
5532 {
5533 struct hci_ev_remote_oob_data_request *ev = edata;
5534 struct oob_data *data;
5535
5536 bt_dev_dbg(hdev, "");
5537
5538 hci_dev_lock(hdev);
5539
5540 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5541 goto unlock;
5542
5543 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5544 if (!data) {
5545 struct hci_cp_remote_oob_data_neg_reply cp;
5546
5547 bacpy(&cp.bdaddr, &ev->bdaddr);
5548 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5549 sizeof(cp), &cp);
5550 goto unlock;
5551 }
5552
5553 if (bredr_sc_enabled(hdev)) {
5554 struct hci_cp_remote_oob_ext_data_reply cp;
5555
5556 bacpy(&cp.bdaddr, &ev->bdaddr);
5557 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5558 memset(cp.hash192, 0, sizeof(cp.hash192));
5559 memset(cp.rand192, 0, sizeof(cp.rand192));
5560 } else {
5561 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5562 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5563 }
5564 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5565 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5566
5567 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5568 sizeof(cp), &cp);
5569 } else {
5570 struct hci_cp_remote_oob_data_reply cp;
5571
5572 bacpy(&cp.bdaddr, &ev->bdaddr);
5573 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5574 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5575
5576 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5577 sizeof(cp), &cp);
5578 }
5579
5580 unlock:
5581 hci_dev_unlock(hdev);
5582 }
5583
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5584 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5585 u8 bdaddr_type, bdaddr_t *local_rpa)
5586 {
5587 if (conn->out) {
5588 conn->dst_type = bdaddr_type;
5589 conn->resp_addr_type = bdaddr_type;
5590 bacpy(&conn->resp_addr, bdaddr);
5591
5592 /* Check if the controller has set a Local RPA then it must be
5593 * used instead or hdev->rpa.
5594 */
5595 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5596 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5597 bacpy(&conn->init_addr, local_rpa);
5598 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5599 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5600 bacpy(&conn->init_addr, &conn->hdev->rpa);
5601 } else {
5602 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5603 &conn->init_addr_type);
5604 }
5605 } else {
5606 conn->resp_addr_type = conn->hdev->adv_addr_type;
5607 /* Check if the controller has set a Local RPA then it must be
5608 * used instead or hdev->rpa.
5609 */
5610 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5611 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5612 bacpy(&conn->resp_addr, local_rpa);
5613 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5614 /* In case of ext adv, resp_addr will be updated in
5615 * Adv Terminated event.
5616 */
5617 if (!ext_adv_capable(conn->hdev))
5618 bacpy(&conn->resp_addr,
5619 &conn->hdev->random_addr);
5620 } else {
5621 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5622 }
5623
5624 conn->init_addr_type = bdaddr_type;
5625 bacpy(&conn->init_addr, bdaddr);
5626
5627 /* For incoming connections, set the default minimum
5628 * and maximum connection interval. They will be used
5629 * to check if the parameters are in range and if not
5630 * trigger the connection update procedure.
5631 */
5632 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5633 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5634 }
5635 }
5636
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5637 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5638 bdaddr_t *bdaddr, u8 bdaddr_type,
5639 bdaddr_t *local_rpa, u8 role, u16 handle,
5640 u16 interval, u16 latency,
5641 u16 supervision_timeout)
5642 {
5643 struct hci_conn_params *params;
5644 struct hci_conn *conn;
5645 struct smp_irk *irk;
5646 u8 addr_type;
5647
5648 hci_dev_lock(hdev);
5649
5650 /* All controllers implicitly stop advertising in the event of a
5651 * connection, so ensure that the state bit is cleared.
5652 */
5653 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5654
5655 /* Check for existing connection:
5656 *
5657 * 1. If it doesn't exist then use the role to create a new object.
5658 * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
5659 * of initiator/master role since there could be a collision where
5660 * either side is attempting to connect or something like a fuzzing
5661 * testing is trying to play tricks to destroy the hcon object before
5662 * it even attempts to connect (e.g. hcon->state == BT_OPEN).
5663 */
5664 conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr);
5665 if (!conn ||
5666 (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
5667 /* In case of error status and there is no connection pending
5668 * just unlock as there is nothing to cleanup.
5669 */
5670 if (status)
5671 goto unlock;
5672
5673 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5674 if (IS_ERR(conn)) {
5675 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5676 goto unlock;
5677 }
5678
5679 conn->dst_type = bdaddr_type;
5680
5681 /* If we didn't have a hci_conn object previously
5682 * but we're in central role this must be something
5683 * initiated using an accept list. Since accept list based
5684 * connections are not "first class citizens" we don't
5685 * have full tracking of them. Therefore, we go ahead
5686 * with a "best effort" approach of determining the
5687 * initiator address based on the HCI_PRIVACY flag.
5688 */
5689 if (conn->out) {
5690 conn->resp_addr_type = bdaddr_type;
5691 bacpy(&conn->resp_addr, bdaddr);
5692 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5693 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5694 bacpy(&conn->init_addr, &hdev->rpa);
5695 } else {
5696 hci_copy_identity_address(hdev,
5697 &conn->init_addr,
5698 &conn->init_addr_type);
5699 }
5700 }
5701 } else {
5702 cancel_delayed_work(&conn->le_conn_timeout);
5703 }
5704
5705 /* The HCI_LE_Connection_Complete event is only sent once per connection.
5706 * Processing it more than once per connection can corrupt kernel memory.
5707 *
5708 * As the connection handle is set here for the first time, it indicates
5709 * whether the connection is already set up.
5710 */
5711 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5712 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5713 goto unlock;
5714 }
5715
5716 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5717
5718 /* Lookup the identity address from the stored connection
5719 * address and address type.
5720 *
5721 * When establishing connections to an identity address, the
5722 * connection procedure will store the resolvable random
5723 * address first. Now if it can be converted back into the
5724 * identity address, start using the identity address from
5725 * now on.
5726 */
5727 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5728 if (irk) {
5729 bacpy(&conn->dst, &irk->bdaddr);
5730 conn->dst_type = irk->addr_type;
5731 }
5732
5733 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5734
5735 /* All connection failure handling is taken care of by the
5736 * hci_conn_failed function which is triggered by the HCI
5737 * request completion callbacks used for connecting.
5738 */
5739 if (status || hci_conn_set_handle(conn, handle))
5740 goto unlock;
5741
5742 /* Drop the connection if it has been aborted */
5743 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5744 hci_conn_drop(conn);
5745 goto unlock;
5746 }
5747
5748 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5749 addr_type = BDADDR_LE_PUBLIC;
5750 else
5751 addr_type = BDADDR_LE_RANDOM;
5752
5753 /* Drop the connection if the device is blocked */
5754 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5755 hci_conn_drop(conn);
5756 goto unlock;
5757 }
5758
5759 mgmt_device_connected(hdev, conn, NULL, 0);
5760
5761 conn->sec_level = BT_SECURITY_LOW;
5762 conn->state = BT_CONFIG;
5763
5764 /* Store current advertising instance as connection advertising instance
5765 * when software rotation is in use so it can be re-enabled when
5766 * disconnected.
5767 */
5768 if (!ext_adv_capable(hdev))
5769 conn->adv_instance = hdev->cur_adv_instance;
5770
5771 conn->le_conn_interval = interval;
5772 conn->le_conn_latency = latency;
5773 conn->le_supv_timeout = supervision_timeout;
5774
5775 hci_debugfs_create_conn(conn);
5776 hci_conn_add_sysfs(conn);
5777
5778 /* The remote features procedure is defined for central
5779 * role only. So only in case of an initiated connection
5780 * request the remote features.
5781 *
5782 * If the local controller supports peripheral-initiated features
5783 * exchange, then requesting the remote features in peripheral
5784 * role is possible. Otherwise just transition into the
5785 * connected state without requesting the remote features.
5786 */
5787 if (conn->out ||
5788 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5789 struct hci_cp_le_read_remote_features cp;
5790
5791 cp.handle = __cpu_to_le16(conn->handle);
5792
5793 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5794 sizeof(cp), &cp);
5795
5796 hci_conn_hold(conn);
5797 } else {
5798 conn->state = BT_CONNECTED;
5799 hci_connect_cfm(conn, status);
5800 }
5801
5802 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5803 conn->dst_type);
5804 if (params) {
5805 hci_pend_le_list_del_init(params);
5806 if (params->conn) {
5807 hci_conn_drop(params->conn);
5808 hci_conn_put(params->conn);
5809 params->conn = NULL;
5810 }
5811 }
5812
5813 unlock:
5814 hci_update_passive_scan(hdev);
5815 hci_dev_unlock(hdev);
5816 }
5817
hci_le_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5818 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5819 struct sk_buff *skb)
5820 {
5821 struct hci_ev_le_conn_complete *ev = data;
5822
5823 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5824
5825 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5826 NULL, ev->role, le16_to_cpu(ev->handle),
5827 le16_to_cpu(ev->interval),
5828 le16_to_cpu(ev->latency),
5829 le16_to_cpu(ev->supervision_timeout));
5830 }
5831
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5832 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5833 struct sk_buff *skb)
5834 {
5835 struct hci_ev_le_enh_conn_complete *ev = data;
5836
5837 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5838
5839 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5840 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5841 le16_to_cpu(ev->interval),
5842 le16_to_cpu(ev->latency),
5843 le16_to_cpu(ev->supervision_timeout));
5844 }
5845
hci_le_ext_adv_term_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5846 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5847 struct sk_buff *skb)
5848 {
5849 struct hci_evt_le_ext_adv_set_term *ev = data;
5850 struct hci_conn *conn;
5851 struct adv_info *adv, *n;
5852
5853 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5854
5855 /* The Bluetooth Core 5.3 specification clearly states that this event
5856 * shall not be sent when the Host disables the advertising set. So in
5857 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5858 *
5859 * When the Host disables an advertising set, all cleanup is done via
5860 * its command callback and not needed to be duplicated here.
5861 */
5862 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5863 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5864 return;
5865 }
5866
5867 hci_dev_lock(hdev);
5868
5869 adv = hci_find_adv_instance(hdev, ev->handle);
5870
5871 if (ev->status) {
5872 if (!adv)
5873 goto unlock;
5874
5875 /* Remove advertising as it has been terminated */
5876 hci_remove_adv_instance(hdev, ev->handle);
5877 mgmt_advertising_removed(NULL, hdev, ev->handle);
5878
5879 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5880 if (adv->enabled)
5881 goto unlock;
5882 }
5883
5884 /* We are no longer advertising, clear HCI_LE_ADV */
5885 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5886 goto unlock;
5887 }
5888
5889 if (adv)
5890 adv->enabled = false;
5891
5892 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5893 if (conn) {
5894 /* Store handle in the connection so the correct advertising
5895 * instance can be re-enabled when disconnected.
5896 */
5897 conn->adv_instance = ev->handle;
5898
5899 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5900 bacmp(&conn->resp_addr, BDADDR_ANY))
5901 goto unlock;
5902
5903 if (!ev->handle) {
5904 bacpy(&conn->resp_addr, &hdev->random_addr);
5905 goto unlock;
5906 }
5907
5908 if (adv)
5909 bacpy(&conn->resp_addr, &adv->random_addr);
5910 }
5911
5912 unlock:
5913 hci_dev_unlock(hdev);
5914 }
5915
hci_le_conn_update_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5916 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5917 struct sk_buff *skb)
5918 {
5919 struct hci_ev_le_conn_update_complete *ev = data;
5920 struct hci_conn *conn;
5921
5922 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5923
5924 if (ev->status)
5925 return;
5926
5927 hci_dev_lock(hdev);
5928
5929 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5930 if (conn) {
5931 conn->le_conn_interval = le16_to_cpu(ev->interval);
5932 conn->le_conn_latency = le16_to_cpu(ev->latency);
5933 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5934 }
5935
5936 hci_dev_unlock(hdev);
5937 }
5938
5939 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,bool addr_resolved,u8 adv_type,u8 phy,u8 sec_phy)5940 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5941 bdaddr_t *addr,
5942 u8 addr_type, bool addr_resolved,
5943 u8 adv_type, u8 phy, u8 sec_phy)
5944 {
5945 struct hci_conn *conn;
5946 struct hci_conn_params *params;
5947
5948 /* If the event is not connectable don't proceed further */
5949 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5950 return NULL;
5951
5952 /* Ignore if the device is blocked or hdev is suspended */
5953 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
5954 hdev->suspended)
5955 return NULL;
5956
5957 /* Most controller will fail if we try to create new connections
5958 * while we have an existing one in peripheral role.
5959 */
5960 if (hdev->conn_hash.le_num_peripheral > 0 &&
5961 (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) ||
5962 !(hdev->le_states[3] & 0x10)))
5963 return NULL;
5964
5965 /* If we're not connectable only connect devices that we have in
5966 * our pend_le_conns list.
5967 */
5968 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5969 addr_type);
5970 if (!params)
5971 return NULL;
5972
5973 if (!params->explicit_connect) {
5974 switch (params->auto_connect) {
5975 case HCI_AUTO_CONN_DIRECT:
5976 /* Only devices advertising with ADV_DIRECT_IND are
5977 * triggering a connection attempt. This is allowing
5978 * incoming connections from peripheral devices.
5979 */
5980 if (adv_type != LE_ADV_DIRECT_IND)
5981 return NULL;
5982 break;
5983 case HCI_AUTO_CONN_ALWAYS:
5984 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5985 * are triggering a connection attempt. This means
5986 * that incoming connections from peripheral device are
5987 * accepted and also outgoing connections to peripheral
5988 * devices are established when found.
5989 */
5990 break;
5991 default:
5992 return NULL;
5993 }
5994 }
5995
5996 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5997 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5998 HCI_ROLE_MASTER, phy, sec_phy);
5999 if (!IS_ERR(conn)) {
6000 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6001 * by higher layer that tried to connect, if no then
6002 * store the pointer since we don't really have any
6003 * other owner of the object besides the params that
6004 * triggered it. This way we can abort the connection if
6005 * the parameters get removed and keep the reference
6006 * count consistent once the connection is established.
6007 */
6008
6009 if (!params->explicit_connect)
6010 params->conn = hci_conn_get(conn);
6011
6012 return conn;
6013 }
6014
6015 switch (PTR_ERR(conn)) {
6016 case -EBUSY:
6017 /* If hci_connect() returns -EBUSY it means there is already
6018 * an LE connection attempt going on. Since controllers don't
6019 * support more than one connection attempt at the time, we
6020 * don't consider this an error case.
6021 */
6022 break;
6023 default:
6024 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6025 return NULL;
6026 }
6027
6028 return NULL;
6029 }
6030
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,u8 phy,u8 sec_phy,s8 rssi,u8 * data,u8 len,bool ext_adv,bool ctl_time,u64 instant)6031 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6032 u8 bdaddr_type, bdaddr_t *direct_addr,
6033 u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
6034 u8 *data, u8 len, bool ext_adv, bool ctl_time,
6035 u64 instant)
6036 {
6037 struct discovery_state *d = &hdev->discovery;
6038 struct smp_irk *irk;
6039 struct hci_conn *conn;
6040 bool match, bdaddr_resolved;
6041 u32 flags;
6042 u8 *ptr;
6043
6044 switch (type) {
6045 case LE_ADV_IND:
6046 case LE_ADV_DIRECT_IND:
6047 case LE_ADV_SCAN_IND:
6048 case LE_ADV_NONCONN_IND:
6049 case LE_ADV_SCAN_RSP:
6050 break;
6051 default:
6052 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6053 "type: 0x%02x", type);
6054 return;
6055 }
6056
6057 if (len > max_adv_len(hdev)) {
6058 bt_dev_err_ratelimited(hdev,
6059 "adv larger than maximum supported");
6060 return;
6061 }
6062
6063 /* Find the end of the data in case the report contains padded zero
6064 * bytes at the end causing an invalid length value.
6065 *
6066 * When data is NULL, len is 0 so there is no need for extra ptr
6067 * check as 'ptr < data + 0' is already false in such case.
6068 */
6069 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6070 if (ptr + 1 + *ptr > data + len)
6071 break;
6072 }
6073
6074 /* Adjust for actual length. This handles the case when remote
6075 * device is advertising with incorrect data length.
6076 */
6077 len = ptr - data;
6078
6079 /* If the direct address is present, then this report is from
6080 * a LE Direct Advertising Report event. In that case it is
6081 * important to see if the address is matching the local
6082 * controller address.
6083 *
6084 * If local privacy is not enable the controller shall not be
6085 * generating such event since according to its documentation it is only
6086 * valid for filter_policy 0x02 and 0x03, but the fact that it did
6087 * generate LE Direct Advertising Report means it is probably broken and
6088 * won't generate any other event which can potentially break
6089 * auto-connect logic so in case local privacy is not enable this
6090 * ignores the direct_addr so it works as a regular report.
6091 */
6092 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr &&
6093 hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6094 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6095 &bdaddr_resolved);
6096
6097 /* Only resolvable random addresses are valid for these
6098 * kind of reports and others can be ignored.
6099 */
6100 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6101 return;
6102
6103 /* If the local IRK of the controller does not match
6104 * with the resolvable random address provided, then
6105 * this report can be ignored.
6106 */
6107 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6108 return;
6109 }
6110
6111 /* Check if we need to convert to identity address */
6112 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6113 if (irk) {
6114 bdaddr = &irk->bdaddr;
6115 bdaddr_type = irk->addr_type;
6116 }
6117
6118 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6119
6120 /* Check if we have been requested to connect to this device.
6121 *
6122 * direct_addr is set only for directed advertising reports (it is NULL
6123 * for advertising reports) and is already verified to be RPA above.
6124 */
6125 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6126 type, phy, sec_phy);
6127 if (!ext_adv && conn && type == LE_ADV_IND &&
6128 len <= max_adv_len(hdev)) {
6129 /* Store report for later inclusion by
6130 * mgmt_device_connected
6131 */
6132 memcpy(conn->le_adv_data, data, len);
6133 conn->le_adv_data_len = len;
6134 }
6135
6136 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6137 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6138 else
6139 flags = 0;
6140
6141 /* All scan results should be sent up for Mesh systems */
6142 if (hci_dev_test_flag(hdev, HCI_MESH)) {
6143 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6144 rssi, flags, data, len, NULL, 0, instant);
6145 return;
6146 }
6147
6148 /* Passive scanning shouldn't trigger any device found events,
6149 * except for devices marked as CONN_REPORT for which we do send
6150 * device found events, or advertisement monitoring requested.
6151 */
6152 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6153 if (type == LE_ADV_DIRECT_IND)
6154 return;
6155
6156 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6157 bdaddr, bdaddr_type) &&
6158 idr_is_empty(&hdev->adv_monitors_idr))
6159 return;
6160
6161 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6162 rssi, flags, data, len, NULL, 0, 0);
6163 return;
6164 }
6165
6166 /* When receiving a scan response, then there is no way to
6167 * know if the remote device is connectable or not. However
6168 * since scan responses are merged with a previously seen
6169 * advertising report, the flags field from that report
6170 * will be used.
6171 *
6172 * In the unlikely case that a controller just sends a scan
6173 * response event that doesn't match the pending report, then
6174 * it is marked as a standalone SCAN_RSP.
6175 */
6176 if (type == LE_ADV_SCAN_RSP)
6177 flags = MGMT_DEV_FOUND_SCAN_RSP;
6178
6179 /* If there's nothing pending either store the data from this
6180 * event or send an immediate device found event if the data
6181 * should not be stored for later.
6182 */
6183 if (!has_pending_adv_report(hdev)) {
6184 /* If the report will trigger a SCAN_REQ store it for
6185 * later merging.
6186 */
6187 if (!ext_adv && (type == LE_ADV_IND ||
6188 type == LE_ADV_SCAN_IND)) {
6189 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6190 rssi, flags, data, len);
6191 return;
6192 }
6193
6194 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6195 rssi, flags, data, len, NULL, 0, 0);
6196 return;
6197 }
6198
6199 /* Check if the pending report is for the same device as the new one */
6200 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6201 bdaddr_type == d->last_adv_addr_type);
6202
6203 /* If the pending data doesn't match this report or this isn't a
6204 * scan response (e.g. we got a duplicate ADV_IND) then force
6205 * sending of the pending data.
6206 */
6207 if (type != LE_ADV_SCAN_RSP || !match) {
6208 /* Send out whatever is in the cache, but skip duplicates */
6209 if (!match)
6210 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6211 d->last_adv_addr_type, NULL,
6212 d->last_adv_rssi, d->last_adv_flags,
6213 d->last_adv_data,
6214 d->last_adv_data_len, NULL, 0, 0);
6215
6216 /* If the new report will trigger a SCAN_REQ store it for
6217 * later merging.
6218 */
6219 if (!ext_adv && (type == LE_ADV_IND ||
6220 type == LE_ADV_SCAN_IND)) {
6221 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6222 rssi, flags, data, len);
6223 return;
6224 }
6225
6226 /* The advertising reports cannot be merged, so clear
6227 * the pending report and send out a device found event.
6228 */
6229 clear_pending_adv_report(hdev);
6230 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6231 rssi, flags, data, len, NULL, 0, 0);
6232 return;
6233 }
6234
6235 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6236 * the new event is a SCAN_RSP. We can therefore proceed with
6237 * sending a merged device found event.
6238 */
6239 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6240 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6241 d->last_adv_data, d->last_adv_data_len, data, len, 0);
6242 clear_pending_adv_report(hdev);
6243 }
6244
hci_le_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6245 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6246 struct sk_buff *skb)
6247 {
6248 struct hci_ev_le_advertising_report *ev = data;
6249 u64 instant = jiffies;
6250
6251 if (!ev->num)
6252 return;
6253
6254 hci_dev_lock(hdev);
6255
6256 while (ev->num--) {
6257 struct hci_ev_le_advertising_info *info;
6258 s8 rssi;
6259
6260 info = hci_le_ev_skb_pull(hdev, skb,
6261 HCI_EV_LE_ADVERTISING_REPORT,
6262 sizeof(*info));
6263 if (!info)
6264 break;
6265
6266 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6267 info->length + 1))
6268 break;
6269
6270 if (info->length <= max_adv_len(hdev)) {
6271 rssi = info->data[info->length];
6272 process_adv_report(hdev, info->type, &info->bdaddr,
6273 info->bdaddr_type, NULL, 0,
6274 HCI_ADV_PHY_1M, 0, rssi,
6275 info->data, info->length, false,
6276 false, instant);
6277 } else {
6278 bt_dev_err(hdev, "Dropping invalid advertising data");
6279 }
6280 }
6281
6282 hci_dev_unlock(hdev);
6283 }
6284
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)6285 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6286 {
6287 u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK;
6288
6289 if (!pdu_type)
6290 return LE_ADV_NONCONN_IND;
6291
6292 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6293 switch (evt_type) {
6294 case LE_LEGACY_ADV_IND:
6295 return LE_ADV_IND;
6296 case LE_LEGACY_ADV_DIRECT_IND:
6297 return LE_ADV_DIRECT_IND;
6298 case LE_LEGACY_ADV_SCAN_IND:
6299 return LE_ADV_SCAN_IND;
6300 case LE_LEGACY_NONCONN_IND:
6301 return LE_ADV_NONCONN_IND;
6302 case LE_LEGACY_SCAN_RSP_ADV:
6303 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6304 return LE_ADV_SCAN_RSP;
6305 }
6306
6307 goto invalid;
6308 }
6309
6310 if (evt_type & LE_EXT_ADV_CONN_IND) {
6311 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6312 return LE_ADV_DIRECT_IND;
6313
6314 return LE_ADV_IND;
6315 }
6316
6317 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6318 return LE_ADV_SCAN_RSP;
6319
6320 if (evt_type & LE_EXT_ADV_SCAN_IND)
6321 return LE_ADV_SCAN_IND;
6322
6323 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6324 return LE_ADV_NONCONN_IND;
6325
6326 invalid:
6327 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6328 evt_type);
6329
6330 return LE_ADV_INVALID;
6331 }
6332
hci_le_ext_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6333 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6334 struct sk_buff *skb)
6335 {
6336 struct hci_ev_le_ext_adv_report *ev = data;
6337 u64 instant = jiffies;
6338
6339 if (!ev->num)
6340 return;
6341
6342 hci_dev_lock(hdev);
6343
6344 while (ev->num--) {
6345 struct hci_ev_le_ext_adv_info *info;
6346 u8 legacy_evt_type;
6347 u16 evt_type;
6348
6349 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6350 sizeof(*info));
6351 if (!info)
6352 break;
6353
6354 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6355 info->length))
6356 break;
6357
6358 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6359 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6360
6361 if (hci_test_quirk(hdev,
6362 HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) {
6363 info->primary_phy &= 0x1f;
6364 info->secondary_phy &= 0x1f;
6365 }
6366
6367 /* Check if PA Sync is pending and if the hci_conn SID has not
6368 * been set update it.
6369 */
6370 if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
6371 struct hci_conn *conn;
6372
6373 conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6374 if (conn && conn->sid == HCI_SID_INVALID)
6375 conn->sid = info->sid;
6376 }
6377
6378 if (legacy_evt_type != LE_ADV_INVALID) {
6379 process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6380 info->bdaddr_type, NULL, 0,
6381 info->primary_phy,
6382 info->secondary_phy,
6383 info->rssi, info->data, info->length,
6384 !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6385 false, instant);
6386 }
6387 }
6388
6389 hci_dev_unlock(hdev);
6390 }
6391
hci_le_pa_term_sync(struct hci_dev * hdev,__le16 handle)6392 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6393 {
6394 struct hci_cp_le_pa_term_sync cp;
6395
6396 memset(&cp, 0, sizeof(cp));
6397 cp.handle = handle;
6398
6399 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6400 }
6401
hci_le_pa_sync_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6402 static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data,
6403 struct sk_buff *skb)
6404 {
6405 struct hci_ev_le_pa_sync_established *ev = data;
6406 int mask = hdev->link_mode;
6407 __u8 flags = 0;
6408 struct hci_conn *pa_sync, *conn;
6409
6410 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6411
6412 hci_dev_lock(hdev);
6413
6414 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6415
6416 conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6417 if (!conn) {
6418 bt_dev_err(hdev,
6419 "Unable to find connection for dst %pMR sid 0x%2.2x",
6420 &ev->bdaddr, ev->sid);
6421 goto unlock;
6422 }
6423
6424 clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
6425
6426 conn->sync_handle = le16_to_cpu(ev->handle);
6427 conn->sid = HCI_SID_INVALID;
6428
6429 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
6430 &flags);
6431 if (!(mask & HCI_LM_ACCEPT)) {
6432 hci_le_pa_term_sync(hdev, ev->handle);
6433 goto unlock;
6434 }
6435
6436 if (!(flags & HCI_PROTO_DEFER))
6437 goto unlock;
6438
6439 /* Add connection to indicate PA sync event */
6440 pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
6441 HCI_ROLE_SLAVE);
6442
6443 if (IS_ERR(pa_sync))
6444 goto unlock;
6445
6446 pa_sync->sync_handle = le16_to_cpu(ev->handle);
6447
6448 if (ev->status) {
6449 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6450
6451 /* Notify iso layer */
6452 hci_connect_cfm(pa_sync, ev->status);
6453 }
6454
6455 unlock:
6456 hci_dev_unlock(hdev);
6457 }
6458
hci_le_per_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6459 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6460 struct sk_buff *skb)
6461 {
6462 struct hci_ev_le_per_adv_report *ev = data;
6463 int mask = hdev->link_mode;
6464 __u8 flags = 0;
6465 struct hci_conn *pa_sync;
6466
6467 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6468
6469 hci_dev_lock(hdev);
6470
6471 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags);
6472 if (!(mask & HCI_LM_ACCEPT))
6473 goto unlock;
6474
6475 if (!(flags & HCI_PROTO_DEFER))
6476 goto unlock;
6477
6478 pa_sync = hci_conn_hash_lookup_pa_sync_handle
6479 (hdev,
6480 le16_to_cpu(ev->sync_handle));
6481
6482 if (!pa_sync)
6483 goto unlock;
6484
6485 if (ev->data_status == LE_PA_DATA_COMPLETE &&
6486 !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
6487 /* Notify iso layer */
6488 hci_connect_cfm(pa_sync, 0);
6489
6490 /* Notify MGMT layer */
6491 mgmt_device_connected(hdev, pa_sync, NULL, 0);
6492 }
6493
6494 unlock:
6495 hci_dev_unlock(hdev);
6496 }
6497
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6498 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6499 struct sk_buff *skb)
6500 {
6501 struct hci_ev_le_remote_feat_complete *ev = data;
6502 struct hci_conn *conn;
6503
6504 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6505
6506 hci_dev_lock(hdev);
6507
6508 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6509 if (conn) {
6510 if (!ev->status)
6511 memcpy(conn->features[0], ev->features, 8);
6512
6513 if (conn->state == BT_CONFIG) {
6514 __u8 status;
6515
6516 /* If the local controller supports peripheral-initiated
6517 * features exchange, but the remote controller does
6518 * not, then it is possible that the error code 0x1a
6519 * for unsupported remote feature gets returned.
6520 *
6521 * In this specific case, allow the connection to
6522 * transition into connected state and mark it as
6523 * successful.
6524 */
6525 if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6526 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6527 status = 0x00;
6528 else
6529 status = ev->status;
6530
6531 conn->state = BT_CONNECTED;
6532 hci_connect_cfm(conn, status);
6533 hci_conn_drop(conn);
6534 }
6535 }
6536
6537 hci_dev_unlock(hdev);
6538 }
6539
hci_le_ltk_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6540 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6541 struct sk_buff *skb)
6542 {
6543 struct hci_ev_le_ltk_req *ev = data;
6544 struct hci_cp_le_ltk_reply cp;
6545 struct hci_cp_le_ltk_neg_reply neg;
6546 struct hci_conn *conn;
6547 struct smp_ltk *ltk;
6548
6549 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6550
6551 hci_dev_lock(hdev);
6552
6553 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6554 if (conn == NULL)
6555 goto not_found;
6556
6557 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6558 if (!ltk)
6559 goto not_found;
6560
6561 if (smp_ltk_is_sc(ltk)) {
6562 /* With SC both EDiv and Rand are set to zero */
6563 if (ev->ediv || ev->rand)
6564 goto not_found;
6565 } else {
6566 /* For non-SC keys check that EDiv and Rand match */
6567 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6568 goto not_found;
6569 }
6570
6571 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6572 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6573 cp.handle = cpu_to_le16(conn->handle);
6574
6575 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6576
6577 conn->enc_key_size = ltk->enc_size;
6578
6579 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6580
6581 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6582 * temporary key used to encrypt a connection following
6583 * pairing. It is used during the Encrypted Session Setup to
6584 * distribute the keys. Later, security can be re-established
6585 * using a distributed LTK.
6586 */
6587 if (ltk->type == SMP_STK) {
6588 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6589 list_del_rcu(<k->list);
6590 kfree_rcu(ltk, rcu);
6591 } else {
6592 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6593 }
6594
6595 hci_dev_unlock(hdev);
6596
6597 return;
6598
6599 not_found:
6600 neg.handle = ev->handle;
6601 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6602 hci_dev_unlock(hdev);
6603 }
6604
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)6605 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6606 u8 reason)
6607 {
6608 struct hci_cp_le_conn_param_req_neg_reply cp;
6609
6610 cp.handle = cpu_to_le16(handle);
6611 cp.reason = reason;
6612
6613 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6614 &cp);
6615 }
6616
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6617 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6618 struct sk_buff *skb)
6619 {
6620 struct hci_ev_le_remote_conn_param_req *ev = data;
6621 struct hci_cp_le_conn_param_req_reply cp;
6622 struct hci_conn *hcon;
6623 u16 handle, min, max, latency, timeout;
6624
6625 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6626
6627 handle = le16_to_cpu(ev->handle);
6628 min = le16_to_cpu(ev->interval_min);
6629 max = le16_to_cpu(ev->interval_max);
6630 latency = le16_to_cpu(ev->latency);
6631 timeout = le16_to_cpu(ev->timeout);
6632
6633 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6634 if (!hcon || hcon->state != BT_CONNECTED)
6635 return send_conn_param_neg_reply(hdev, handle,
6636 HCI_ERROR_UNKNOWN_CONN_ID);
6637
6638 if (max > hcon->le_conn_max_interval)
6639 return send_conn_param_neg_reply(hdev, handle,
6640 HCI_ERROR_INVALID_LL_PARAMS);
6641
6642 if (hci_check_conn_params(min, max, latency, timeout))
6643 return send_conn_param_neg_reply(hdev, handle,
6644 HCI_ERROR_INVALID_LL_PARAMS);
6645
6646 if (hcon->role == HCI_ROLE_MASTER) {
6647 struct hci_conn_params *params;
6648 u8 store_hint;
6649
6650 hci_dev_lock(hdev);
6651
6652 params = hci_conn_params_lookup(hdev, &hcon->dst,
6653 hcon->dst_type);
6654 if (params) {
6655 params->conn_min_interval = min;
6656 params->conn_max_interval = max;
6657 params->conn_latency = latency;
6658 params->supervision_timeout = timeout;
6659 store_hint = 0x01;
6660 } else {
6661 store_hint = 0x00;
6662 }
6663
6664 hci_dev_unlock(hdev);
6665
6666 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6667 store_hint, min, max, latency, timeout);
6668 }
6669
6670 cp.handle = ev->handle;
6671 cp.interval_min = ev->interval_min;
6672 cp.interval_max = ev->interval_max;
6673 cp.latency = ev->latency;
6674 cp.timeout = ev->timeout;
6675 cp.min_ce_len = 0;
6676 cp.max_ce_len = 0;
6677
6678 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6679 }
6680
hci_le_direct_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6681 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6682 struct sk_buff *skb)
6683 {
6684 struct hci_ev_le_direct_adv_report *ev = data;
6685 u64 instant = jiffies;
6686 int i;
6687
6688 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6689 flex_array_size(ev, info, ev->num)))
6690 return;
6691
6692 if (!ev->num)
6693 return;
6694
6695 hci_dev_lock(hdev);
6696
6697 for (i = 0; i < ev->num; i++) {
6698 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6699
6700 process_adv_report(hdev, info->type, &info->bdaddr,
6701 info->bdaddr_type, &info->direct_addr,
6702 info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6703 info->rssi, NULL, 0, false, false, instant);
6704 }
6705
6706 hci_dev_unlock(hdev);
6707 }
6708
hci_le_phy_update_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6709 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6710 struct sk_buff *skb)
6711 {
6712 struct hci_ev_le_phy_update_complete *ev = data;
6713 struct hci_conn *conn;
6714
6715 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6716
6717 if (ev->status)
6718 return;
6719
6720 hci_dev_lock(hdev);
6721
6722 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6723 if (!conn)
6724 goto unlock;
6725
6726 conn->le_tx_phy = ev->tx_phy;
6727 conn->le_rx_phy = ev->rx_phy;
6728
6729 unlock:
6730 hci_dev_unlock(hdev);
6731 }
6732
hci_le_cis_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6733 static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data,
6734 struct sk_buff *skb)
6735 {
6736 struct hci_evt_le_cis_established *ev = data;
6737 struct hci_conn *conn;
6738 struct bt_iso_qos *qos;
6739 bool pending = false;
6740 u16 handle = __le16_to_cpu(ev->handle);
6741 u32 c_sdu_interval, p_sdu_interval;
6742
6743 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6744
6745 hci_dev_lock(hdev);
6746
6747 conn = hci_conn_hash_lookup_handle(hdev, handle);
6748 if (!conn) {
6749 bt_dev_err(hdev,
6750 "Unable to find connection with handle 0x%4.4x",
6751 handle);
6752 goto unlock;
6753 }
6754
6755 if (conn->type != CIS_LINK) {
6756 bt_dev_err(hdev,
6757 "Invalid connection link type handle 0x%4.4x",
6758 handle);
6759 goto unlock;
6760 }
6761
6762 qos = &conn->iso_qos;
6763
6764 pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6765
6766 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
6767 * page 3075:
6768 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
6769 * ISO_Interval + SDU_Interval_C_To_P
6770 * ...
6771 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
6772 * Transport_Latency
6773 */
6774 c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6775 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
6776 get_unaligned_le24(ev->c_latency);
6777 p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6778 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
6779 get_unaligned_le24(ev->p_latency);
6780
6781 switch (conn->role) {
6782 case HCI_ROLE_SLAVE:
6783 qos->ucast.in.interval = c_sdu_interval;
6784 qos->ucast.out.interval = p_sdu_interval;
6785 /* Convert Transport Latency (us) to Latency (msec) */
6786 qos->ucast.in.latency =
6787 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6788 1000);
6789 qos->ucast.out.latency =
6790 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6791 1000);
6792 qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6793 qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6794 qos->ucast.in.phy = ev->c_phy;
6795 qos->ucast.out.phy = ev->p_phy;
6796 break;
6797 case HCI_ROLE_MASTER:
6798 qos->ucast.in.interval = p_sdu_interval;
6799 qos->ucast.out.interval = c_sdu_interval;
6800 /* Convert Transport Latency (us) to Latency (msec) */
6801 qos->ucast.out.latency =
6802 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6803 1000);
6804 qos->ucast.in.latency =
6805 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6806 1000);
6807 qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6808 qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6809 qos->ucast.out.phy = ev->c_phy;
6810 qos->ucast.in.phy = ev->p_phy;
6811 break;
6812 }
6813
6814 if (!ev->status) {
6815 conn->state = BT_CONNECTED;
6816 hci_debugfs_create_conn(conn);
6817 hci_conn_add_sysfs(conn);
6818 hci_iso_setup_path(conn);
6819 goto unlock;
6820 }
6821
6822 conn->state = BT_CLOSED;
6823 hci_connect_cfm(conn, ev->status);
6824 hci_conn_del(conn);
6825
6826 unlock:
6827 if (pending)
6828 hci_le_create_cis_pending(hdev);
6829
6830 hci_dev_unlock(hdev);
6831 }
6832
hci_le_reject_cis(struct hci_dev * hdev,__le16 handle)6833 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6834 {
6835 struct hci_cp_le_reject_cis cp;
6836
6837 memset(&cp, 0, sizeof(cp));
6838 cp.handle = handle;
6839 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6840 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6841 }
6842
hci_le_accept_cis(struct hci_dev * hdev,__le16 handle)6843 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6844 {
6845 struct hci_cp_le_accept_cis cp;
6846
6847 memset(&cp, 0, sizeof(cp));
6848 cp.handle = handle;
6849 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6850 }
6851
hci_le_cis_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6852 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6853 struct sk_buff *skb)
6854 {
6855 struct hci_evt_le_cis_req *ev = data;
6856 u16 acl_handle, cis_handle;
6857 struct hci_conn *acl, *cis;
6858 int mask;
6859 __u8 flags = 0;
6860
6861 acl_handle = __le16_to_cpu(ev->acl_handle);
6862 cis_handle = __le16_to_cpu(ev->cis_handle);
6863
6864 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6865 acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6866
6867 hci_dev_lock(hdev);
6868
6869 acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6870 if (!acl)
6871 goto unlock;
6872
6873 mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags);
6874 if (!(mask & HCI_LM_ACCEPT)) {
6875 hci_le_reject_cis(hdev, ev->cis_handle);
6876 goto unlock;
6877 }
6878
6879 cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6880 if (!cis) {
6881 cis = hci_conn_add(hdev, CIS_LINK, &acl->dst,
6882 HCI_ROLE_SLAVE, cis_handle);
6883 if (IS_ERR(cis)) {
6884 hci_le_reject_cis(hdev, ev->cis_handle);
6885 goto unlock;
6886 }
6887 }
6888
6889 cis->iso_qos.ucast.cig = ev->cig_id;
6890 cis->iso_qos.ucast.cis = ev->cis_id;
6891
6892 if (!(flags & HCI_PROTO_DEFER)) {
6893 hci_le_accept_cis(hdev, ev->cis_handle);
6894 } else {
6895 cis->state = BT_CONNECT2;
6896 hci_connect_cfm(cis, 0);
6897 }
6898
6899 unlock:
6900 hci_dev_unlock(hdev);
6901 }
6902
hci_iso_term_big_sync(struct hci_dev * hdev,void * data)6903 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6904 {
6905 u8 handle = PTR_UINT(data);
6906
6907 return hci_le_terminate_big_sync(hdev, handle,
6908 HCI_ERROR_LOCAL_HOST_TERM);
6909 }
6910
hci_le_create_big_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6911 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6912 struct sk_buff *skb)
6913 {
6914 struct hci_evt_le_create_big_complete *ev = data;
6915 struct hci_conn *conn;
6916 __u8 i = 0;
6917
6918 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6919
6920 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6921 flex_array_size(ev, bis_handle, ev->num_bis)))
6922 return;
6923
6924 hci_dev_lock(hdev);
6925
6926 /* Connect all BISes that are bound to the BIG */
6927 while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
6928 BT_BOUND,
6929 HCI_ROLE_MASTER))) {
6930 if (ev->status) {
6931 hci_connect_cfm(conn, ev->status);
6932 hci_conn_del(conn);
6933 continue;
6934 }
6935
6936 if (hci_conn_set_handle(conn,
6937 __le16_to_cpu(ev->bis_handle[i++])))
6938 continue;
6939
6940 conn->state = BT_CONNECTED;
6941 set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6942 hci_debugfs_create_conn(conn);
6943 hci_conn_add_sysfs(conn);
6944 hci_iso_setup_path(conn);
6945 }
6946
6947 if (!ev->status && !i)
6948 /* If no BISes have been connected for the BIG,
6949 * terminate. This is in case all bound connections
6950 * have been closed before the BIG creation
6951 * has completed.
6952 */
6953 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6954 UINT_PTR(ev->handle), NULL);
6955
6956 hci_dev_unlock(hdev);
6957 }
6958
hci_le_big_sync_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6959 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6960 struct sk_buff *skb)
6961 {
6962 struct hci_evt_le_big_sync_established *ev = data;
6963 struct hci_conn *bis, *conn;
6964 int i;
6965
6966 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6967
6968 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
6969 flex_array_size(ev, bis, ev->num_bis)))
6970 return;
6971
6972 hci_dev_lock(hdev);
6973
6974 conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
6975 ev->num_bis);
6976 if (!conn) {
6977 bt_dev_err(hdev,
6978 "Unable to find connection for big 0x%2.2x",
6979 ev->handle);
6980 goto unlock;
6981 }
6982
6983 clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
6984
6985 conn->num_bis = 0;
6986 memset(conn->bis, 0, sizeof(conn->num_bis));
6987
6988 for (i = 0; i < ev->num_bis; i++) {
6989 u16 handle = le16_to_cpu(ev->bis[i]);
6990 __le32 interval;
6991
6992 bis = hci_conn_hash_lookup_handle(hdev, handle);
6993 if (!bis) {
6994 if (handle > HCI_CONN_HANDLE_MAX) {
6995 bt_dev_dbg(hdev, "ignore too large handle %u", handle);
6996 continue;
6997 }
6998 bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY,
6999 HCI_ROLE_SLAVE, handle);
7000 if (IS_ERR(bis))
7001 continue;
7002 }
7003
7004 if (ev->status != 0x42) {
7005 /* Mark PA sync as established */
7006 set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7007 /* Reset cleanup callback of PA Sync so it doesn't
7008 * terminate the sync when deleting the connection.
7009 */
7010 conn->cleanup = NULL;
7011 }
7012
7013 bis->sync_handle = conn->sync_handle;
7014 bis->iso_qos.bcast.big = ev->handle;
7015 memset(&interval, 0, sizeof(interval));
7016 memcpy(&interval, ev->latency, sizeof(ev->latency));
7017 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7018 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7019 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7020 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7021
7022 if (!ev->status) {
7023 bis->state = BT_CONNECTED;
7024 set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7025 hci_debugfs_create_conn(bis);
7026 hci_conn_add_sysfs(bis);
7027 hci_iso_setup_path(bis);
7028 }
7029 }
7030
7031 /* In case BIG sync failed, notify each failed connection to
7032 * the user after all hci connections have been added
7033 */
7034 if (ev->status)
7035 for (i = 0; i < ev->num_bis; i++) {
7036 u16 handle = le16_to_cpu(ev->bis[i]);
7037
7038 bis = hci_conn_hash_lookup_handle(hdev, handle);
7039 if (!bis)
7040 continue;
7041
7042 set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7043 hci_connect_cfm(bis, ev->status);
7044 }
7045
7046 unlock:
7047 hci_dev_unlock(hdev);
7048 }
7049
hci_le_big_sync_lost_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7050 static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data,
7051 struct sk_buff *skb)
7052 {
7053 struct hci_evt_le_big_sync_lost *ev = data;
7054 struct hci_conn *bis, *conn;
7055 bool mgmt_conn;
7056
7057 bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle);
7058
7059 hci_dev_lock(hdev);
7060
7061 /* Delete the pa sync connection */
7062 bis = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7063 if (bis) {
7064 conn = hci_conn_hash_lookup_pa_sync_handle(hdev,
7065 bis->sync_handle);
7066 if (conn)
7067 hci_conn_del(conn);
7068 }
7069
7070 /* Delete each bis connection */
7071 while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle,
7072 BT_CONNECTED,
7073 HCI_ROLE_SLAVE))) {
7074 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &bis->flags);
7075 mgmt_device_disconnected(hdev, &bis->dst, bis->type, bis->dst_type,
7076 ev->reason, mgmt_conn);
7077
7078 clear_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7079 hci_disconn_cfm(bis, ev->reason);
7080 hci_conn_del(bis);
7081 }
7082
7083 hci_dev_unlock(hdev);
7084 }
7085
hci_le_big_info_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7086 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7087 struct sk_buff *skb)
7088 {
7089 struct hci_evt_le_big_info_adv_report *ev = data;
7090 int mask = hdev->link_mode;
7091 __u8 flags = 0;
7092 struct hci_conn *pa_sync;
7093
7094 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7095
7096 hci_dev_lock(hdev);
7097
7098 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags);
7099 if (!(mask & HCI_LM_ACCEPT))
7100 goto unlock;
7101
7102 if (!(flags & HCI_PROTO_DEFER))
7103 goto unlock;
7104
7105 pa_sync = hci_conn_hash_lookup_pa_sync_handle
7106 (hdev,
7107 le16_to_cpu(ev->sync_handle));
7108
7109 if (!pa_sync)
7110 goto unlock;
7111
7112 pa_sync->iso_qos.bcast.encryption = ev->encryption;
7113
7114 /* Notify iso layer */
7115 hci_connect_cfm(pa_sync, 0);
7116
7117 unlock:
7118 hci_dev_unlock(hdev);
7119 }
7120
7121 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7122 [_op] = { \
7123 .func = _func, \
7124 .min_len = _min_len, \
7125 .max_len = _max_len, \
7126 }
7127
7128 #define HCI_LE_EV(_op, _func, _len) \
7129 HCI_LE_EV_VL(_op, _func, _len, _len)
7130
7131 #define HCI_LE_EV_STATUS(_op, _func) \
7132 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7133
7134 /* Entries in this table shall have their position according to the subevent
7135 * opcode they handle so the use of the macros above is recommend since it does
7136 * attempt to initialize at its proper index using Designated Initializers that
7137 * way events without a callback function can be omitted.
7138 */
7139 static const struct hci_le_ev {
7140 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7141 u16 min_len;
7142 u16 max_len;
7143 } hci_le_ev_table[U8_MAX + 1] = {
7144 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7145 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7146 sizeof(struct hci_ev_le_conn_complete)),
7147 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7148 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7149 sizeof(struct hci_ev_le_advertising_report),
7150 HCI_MAX_EVENT_SIZE),
7151 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7152 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7153 hci_le_conn_update_complete_evt,
7154 sizeof(struct hci_ev_le_conn_update_complete)),
7155 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7156 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7157 hci_le_remote_feat_complete_evt,
7158 sizeof(struct hci_ev_le_remote_feat_complete)),
7159 /* [0x05 = HCI_EV_LE_LTK_REQ] */
7160 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7161 sizeof(struct hci_ev_le_ltk_req)),
7162 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7163 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7164 hci_le_remote_conn_param_req_evt,
7165 sizeof(struct hci_ev_le_remote_conn_param_req)),
7166 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7167 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7168 hci_le_enh_conn_complete_evt,
7169 sizeof(struct hci_ev_le_enh_conn_complete)),
7170 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7171 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7172 sizeof(struct hci_ev_le_direct_adv_report),
7173 HCI_MAX_EVENT_SIZE),
7174 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7175 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7176 sizeof(struct hci_ev_le_phy_update_complete)),
7177 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7178 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7179 sizeof(struct hci_ev_le_ext_adv_report),
7180 HCI_MAX_EVENT_SIZE),
7181 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7182 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7183 hci_le_pa_sync_established_evt,
7184 sizeof(struct hci_ev_le_pa_sync_established)),
7185 /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7186 HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7187 hci_le_per_adv_report_evt,
7188 sizeof(struct hci_ev_le_per_adv_report),
7189 HCI_MAX_EVENT_SIZE),
7190 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7191 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7192 sizeof(struct hci_evt_le_ext_adv_set_term)),
7193 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7194 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt,
7195 sizeof(struct hci_evt_le_cis_established)),
7196 /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7197 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7198 sizeof(struct hci_evt_le_cis_req)),
7199 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7200 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7201 hci_le_create_big_complete_evt,
7202 sizeof(struct hci_evt_le_create_big_complete),
7203 HCI_MAX_EVENT_SIZE),
7204 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
7205 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7206 hci_le_big_sync_established_evt,
7207 sizeof(struct hci_evt_le_big_sync_established),
7208 HCI_MAX_EVENT_SIZE),
7209 /* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */
7210 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST,
7211 hci_le_big_sync_lost_evt,
7212 sizeof(struct hci_evt_le_big_sync_lost),
7213 HCI_MAX_EVENT_SIZE),
7214 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7215 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7216 hci_le_big_info_adv_report_evt,
7217 sizeof(struct hci_evt_le_big_info_adv_report),
7218 HCI_MAX_EVENT_SIZE),
7219 };
7220
hci_le_meta_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7221 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7222 struct sk_buff *skb, u16 *opcode, u8 *status,
7223 hci_req_complete_t *req_complete,
7224 hci_req_complete_skb_t *req_complete_skb)
7225 {
7226 struct hci_ev_le_meta *ev = data;
7227 const struct hci_le_ev *subev;
7228
7229 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7230
7231 /* Only match event if command OGF is for LE */
7232 if (hdev->req_skb &&
7233 (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 ||
7234 hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) &&
7235 hci_skb_event(hdev->req_skb) == ev->subevent) {
7236 *opcode = hci_skb_opcode(hdev->req_skb);
7237 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7238 req_complete_skb);
7239 }
7240
7241 subev = &hci_le_ev_table[ev->subevent];
7242 if (!subev->func)
7243 return;
7244
7245 if (skb->len < subev->min_len) {
7246 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7247 ev->subevent, skb->len, subev->min_len);
7248 return;
7249 }
7250
7251 /* Just warn if the length is over max_len size it still be
7252 * possible to partially parse the event so leave to callback to
7253 * decide if that is acceptable.
7254 */
7255 if (skb->len > subev->max_len)
7256 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7257 ev->subevent, skb->len, subev->max_len);
7258 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7259 if (!data)
7260 return;
7261
7262 subev->func(hdev, data, skb);
7263 }
7264
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)7265 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7266 u8 event, struct sk_buff *skb)
7267 {
7268 struct hci_ev_cmd_complete *ev;
7269 struct hci_event_hdr *hdr;
7270
7271 if (!skb)
7272 return false;
7273
7274 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7275 if (!hdr)
7276 return false;
7277
7278 if (event) {
7279 if (hdr->evt != event)
7280 return false;
7281 return true;
7282 }
7283
7284 /* Check if request ended in Command Status - no way to retrieve
7285 * any extra parameters in this case.
7286 */
7287 if (hdr->evt == HCI_EV_CMD_STATUS)
7288 return false;
7289
7290 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7291 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7292 hdr->evt);
7293 return false;
7294 }
7295
7296 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7297 if (!ev)
7298 return false;
7299
7300 if (opcode != __le16_to_cpu(ev->opcode)) {
7301 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7302 __le16_to_cpu(ev->opcode));
7303 return false;
7304 }
7305
7306 return true;
7307 }
7308
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)7309 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7310 struct sk_buff *skb)
7311 {
7312 struct hci_ev_le_advertising_info *adv;
7313 struct hci_ev_le_direct_adv_info *direct_adv;
7314 struct hci_ev_le_ext_adv_info *ext_adv;
7315 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7316 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7317
7318 hci_dev_lock(hdev);
7319
7320 /* If we are currently suspended and this is the first BT event seen,
7321 * save the wake reason associated with the event.
7322 */
7323 if (!hdev->suspended || hdev->wake_reason)
7324 goto unlock;
7325
7326 /* Default to remote wake. Values for wake_reason are documented in the
7327 * Bluez mgmt api docs.
7328 */
7329 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7330
7331 /* Once configured for remote wakeup, we should only wake up for
7332 * reconnections. It's useful to see which device is waking us up so
7333 * keep track of the bdaddr of the connection event that woke us up.
7334 */
7335 if (event == HCI_EV_CONN_REQUEST) {
7336 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7337 hdev->wake_addr_type = BDADDR_BREDR;
7338 } else if (event == HCI_EV_CONN_COMPLETE) {
7339 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7340 hdev->wake_addr_type = BDADDR_BREDR;
7341 } else if (event == HCI_EV_LE_META) {
7342 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7343 u8 subevent = le_ev->subevent;
7344 u8 *ptr = &skb->data[sizeof(*le_ev)];
7345 u8 num_reports = *ptr;
7346
7347 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7348 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7349 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7350 num_reports) {
7351 adv = (void *)(ptr + 1);
7352 direct_adv = (void *)(ptr + 1);
7353 ext_adv = (void *)(ptr + 1);
7354
7355 switch (subevent) {
7356 case HCI_EV_LE_ADVERTISING_REPORT:
7357 bacpy(&hdev->wake_addr, &adv->bdaddr);
7358 hdev->wake_addr_type = adv->bdaddr_type;
7359 break;
7360 case HCI_EV_LE_DIRECT_ADV_REPORT:
7361 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7362 hdev->wake_addr_type = direct_adv->bdaddr_type;
7363 break;
7364 case HCI_EV_LE_EXT_ADV_REPORT:
7365 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7366 hdev->wake_addr_type = ext_adv->bdaddr_type;
7367 break;
7368 }
7369 }
7370 } else {
7371 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7372 }
7373
7374 unlock:
7375 hci_dev_unlock(hdev);
7376 }
7377
7378 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7379 [_op] = { \
7380 .req = false, \
7381 .func = _func, \
7382 .min_len = _min_len, \
7383 .max_len = _max_len, \
7384 }
7385
7386 #define HCI_EV(_op, _func, _len) \
7387 HCI_EV_VL(_op, _func, _len, _len)
7388
7389 #define HCI_EV_STATUS(_op, _func) \
7390 HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7391
7392 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7393 [_op] = { \
7394 .req = true, \
7395 .func_req = _func, \
7396 .min_len = _min_len, \
7397 .max_len = _max_len, \
7398 }
7399
7400 #define HCI_EV_REQ(_op, _func, _len) \
7401 HCI_EV_REQ_VL(_op, _func, _len, _len)
7402
7403 /* Entries in this table shall have their position according to the event opcode
7404 * they handle so the use of the macros above is recommend since it does attempt
7405 * to initialize at its proper index using Designated Initializers that way
7406 * events without a callback function don't have entered.
7407 */
7408 static const struct hci_ev {
7409 bool req;
7410 union {
7411 void (*func)(struct hci_dev *hdev, void *data,
7412 struct sk_buff *skb);
7413 void (*func_req)(struct hci_dev *hdev, void *data,
7414 struct sk_buff *skb, u16 *opcode, u8 *status,
7415 hci_req_complete_t *req_complete,
7416 hci_req_complete_skb_t *req_complete_skb);
7417 };
7418 u16 min_len;
7419 u16 max_len;
7420 } hci_ev_table[U8_MAX + 1] = {
7421 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7422 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7423 /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7424 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7425 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7426 /* [0x03 = HCI_EV_CONN_COMPLETE] */
7427 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7428 sizeof(struct hci_ev_conn_complete)),
7429 /* [0x04 = HCI_EV_CONN_REQUEST] */
7430 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7431 sizeof(struct hci_ev_conn_request)),
7432 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7433 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7434 sizeof(struct hci_ev_disconn_complete)),
7435 /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7436 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7437 sizeof(struct hci_ev_auth_complete)),
7438 /* [0x07 = HCI_EV_REMOTE_NAME] */
7439 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7440 sizeof(struct hci_ev_remote_name)),
7441 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7442 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7443 sizeof(struct hci_ev_encrypt_change)),
7444 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7445 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7446 hci_change_link_key_complete_evt,
7447 sizeof(struct hci_ev_change_link_key_complete)),
7448 /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7449 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7450 sizeof(struct hci_ev_remote_features)),
7451 /* [0x0e = HCI_EV_CMD_COMPLETE] */
7452 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7453 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7454 /* [0x0f = HCI_EV_CMD_STATUS] */
7455 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7456 sizeof(struct hci_ev_cmd_status)),
7457 /* [0x10 = HCI_EV_CMD_STATUS] */
7458 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7459 sizeof(struct hci_ev_hardware_error)),
7460 /* [0x12 = HCI_EV_ROLE_CHANGE] */
7461 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7462 sizeof(struct hci_ev_role_change)),
7463 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7464 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7465 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7466 /* [0x14 = HCI_EV_MODE_CHANGE] */
7467 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7468 sizeof(struct hci_ev_mode_change)),
7469 /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7470 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7471 sizeof(struct hci_ev_pin_code_req)),
7472 /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7473 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7474 sizeof(struct hci_ev_link_key_req)),
7475 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7476 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7477 sizeof(struct hci_ev_link_key_notify)),
7478 /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7479 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7480 sizeof(struct hci_ev_clock_offset)),
7481 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7482 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7483 sizeof(struct hci_ev_pkt_type_change)),
7484 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7485 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7486 sizeof(struct hci_ev_pscan_rep_mode)),
7487 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7488 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7489 hci_inquiry_result_with_rssi_evt,
7490 sizeof(struct hci_ev_inquiry_result_rssi),
7491 HCI_MAX_EVENT_SIZE),
7492 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7493 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7494 sizeof(struct hci_ev_remote_ext_features)),
7495 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7496 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7497 sizeof(struct hci_ev_sync_conn_complete)),
7498 /* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7499 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7500 hci_extended_inquiry_result_evt,
7501 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7502 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7503 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7504 sizeof(struct hci_ev_key_refresh_complete)),
7505 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7506 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7507 sizeof(struct hci_ev_io_capa_request)),
7508 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7509 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7510 sizeof(struct hci_ev_io_capa_reply)),
7511 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7512 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7513 sizeof(struct hci_ev_user_confirm_req)),
7514 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7515 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7516 sizeof(struct hci_ev_user_passkey_req)),
7517 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7518 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7519 sizeof(struct hci_ev_remote_oob_data_request)),
7520 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7521 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7522 sizeof(struct hci_ev_simple_pair_complete)),
7523 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7524 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7525 sizeof(struct hci_ev_user_passkey_notify)),
7526 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7527 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7528 sizeof(struct hci_ev_keypress_notify)),
7529 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7530 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7531 sizeof(struct hci_ev_remote_host_features)),
7532 /* [0x3e = HCI_EV_LE_META] */
7533 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7534 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7535 /* [0xff = HCI_EV_VENDOR] */
7536 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7537 };
7538
hci_event_func(struct hci_dev * hdev,u8 event,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7539 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7540 u16 *opcode, u8 *status,
7541 hci_req_complete_t *req_complete,
7542 hci_req_complete_skb_t *req_complete_skb)
7543 {
7544 const struct hci_ev *ev = &hci_ev_table[event];
7545 void *data;
7546
7547 if (!ev->func)
7548 return;
7549
7550 if (skb->len < ev->min_len) {
7551 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7552 event, skb->len, ev->min_len);
7553 return;
7554 }
7555
7556 /* Just warn if the length is over max_len size it still be
7557 * possible to partially parse the event so leave to callback to
7558 * decide if that is acceptable.
7559 */
7560 if (skb->len > ev->max_len)
7561 bt_dev_warn_ratelimited(hdev,
7562 "unexpected event 0x%2.2x length: %u > %u",
7563 event, skb->len, ev->max_len);
7564
7565 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7566 if (!data)
7567 return;
7568
7569 if (ev->req)
7570 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7571 req_complete_skb);
7572 else
7573 ev->func(hdev, data, skb);
7574 }
7575
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)7576 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7577 {
7578 struct hci_event_hdr *hdr = (void *) skb->data;
7579 hci_req_complete_t req_complete = NULL;
7580 hci_req_complete_skb_t req_complete_skb = NULL;
7581 struct sk_buff *orig_skb = NULL;
7582 u8 status = 0, event, req_evt = 0;
7583 u16 opcode = HCI_OP_NOP;
7584
7585 if (skb->len < sizeof(*hdr)) {
7586 bt_dev_err(hdev, "Malformed HCI Event");
7587 goto done;
7588 }
7589
7590 hci_dev_lock(hdev);
7591 kfree_skb(hdev->recv_event);
7592 hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7593 hci_dev_unlock(hdev);
7594
7595 event = hdr->evt;
7596 if (!event) {
7597 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7598 event);
7599 goto done;
7600 }
7601
7602 /* Only match event if command OGF is not for LE */
7603 if (hdev->req_skb &&
7604 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7605 hci_skb_event(hdev->req_skb) == event) {
7606 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7607 status, &req_complete, &req_complete_skb);
7608 req_evt = event;
7609 }
7610
7611 /* If it looks like we might end up having to call
7612 * req_complete_skb, store a pristine copy of the skb since the
7613 * various handlers may modify the original one through
7614 * skb_pull() calls, etc.
7615 */
7616 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7617 event == HCI_EV_CMD_COMPLETE)
7618 orig_skb = skb_clone(skb, GFP_KERNEL);
7619
7620 skb_pull(skb, HCI_EVENT_HDR_SIZE);
7621
7622 /* Store wake reason if we're suspended */
7623 hci_store_wake_reason(hdev, event, skb);
7624
7625 bt_dev_dbg(hdev, "event 0x%2.2x", event);
7626
7627 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7628 &req_complete_skb);
7629
7630 if (req_complete) {
7631 req_complete(hdev, status, opcode);
7632 } else if (req_complete_skb) {
7633 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7634 kfree_skb(orig_skb);
7635 orig_skb = NULL;
7636 }
7637 req_complete_skb(hdev, status, opcode, orig_skb);
7638 }
7639
7640 done:
7641 kfree_skb(orig_skb);
7642 kfree_skb(skb);
7643 hdev->stat.evt_rx++;
7644 }
7645