1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 Copyright 2023-2024 NXP
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI event handling. */
27
28 #include <linux/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_debugfs.h"
37 #include "hci_codec.h"
38 #include "smp.h"
39 #include "msft.h"
40 #include "eir.h"
41
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 "\x00\x00\x00\x00\x00\x00\x00\x00"
44
45 /* Handle HCI Event packets */
46
hci_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)47 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
48 u8 ev, size_t len)
49 {
50 void *data;
51
52 data = skb_pull_data(skb, len);
53 if (!data)
54 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
55
56 return data;
57 }
58
hci_cc_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u16 op,size_t len)59 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
60 u16 op, size_t len)
61 {
62 void *data;
63
64 data = skb_pull_data(skb, len);
65 if (!data)
66 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
67
68 return data;
69 }
70
hci_le_ev_skb_pull(struct hci_dev * hdev,struct sk_buff * skb,u8 ev,size_t len)71 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
72 u8 ev, size_t len)
73 {
74 void *data;
75
76 data = skb_pull_data(skb, len);
77 if (!data)
78 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
79
80 return data;
81 }
82
hci_cc_inquiry_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)83 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
84 struct sk_buff *skb)
85 {
86 struct hci_ev_status *rp = data;
87
88 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
89
90 /* It is possible that we receive Inquiry Complete event right
91 * before we receive Inquiry Cancel Command Complete event, in
92 * which case the latter event should have status of Command
93 * Disallowed. This should not be treated as error, since
94 * we actually achieve what Inquiry Cancel wants to achieve,
95 * which is to end the last Inquiry session.
96 */
97 if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
98 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
99 rp->status = 0x00;
100 }
101
102 if (rp->status)
103 return rp->status;
104
105 clear_bit(HCI_INQUIRY, &hdev->flags);
106 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
107 wake_up_bit(&hdev->flags, HCI_INQUIRY);
108
109 hci_dev_lock(hdev);
110 /* Set discovery state to stopped if we're not doing LE active
111 * scanning.
112 */
113 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
114 hdev->le_scan_type != LE_SCAN_ACTIVE)
115 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
116 hci_dev_unlock(hdev);
117
118 return rp->status;
119 }
120
hci_cc_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)121 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
122 struct sk_buff *skb)
123 {
124 struct hci_ev_status *rp = data;
125
126 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
127
128 if (rp->status)
129 return rp->status;
130
131 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
132
133 return rp->status;
134 }
135
hci_cc_exit_periodic_inq(struct hci_dev * hdev,void * data,struct sk_buff * skb)136 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
137 struct sk_buff *skb)
138 {
139 struct hci_ev_status *rp = data;
140
141 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
142
143 if (rp->status)
144 return rp->status;
145
146 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
147
148 return rp->status;
149 }
150
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,void * data,struct sk_buff * skb)151 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
152 struct sk_buff *skb)
153 {
154 struct hci_rp_remote_name_req_cancel *rp = data;
155
156 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
157
158 return rp->status;
159 }
160
hci_cc_role_discovery(struct hci_dev * hdev,void * data,struct sk_buff * skb)161 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
162 struct sk_buff *skb)
163 {
164 struct hci_rp_role_discovery *rp = data;
165 struct hci_conn *conn;
166
167 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
168
169 if (rp->status)
170 return rp->status;
171
172 hci_dev_lock(hdev);
173
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
175 if (conn)
176 conn->role = rp->role;
177
178 hci_dev_unlock(hdev);
179
180 return rp->status;
181 }
182
hci_cc_read_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)183 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
184 struct sk_buff *skb)
185 {
186 struct hci_rp_read_link_policy *rp = data;
187 struct hci_conn *conn;
188
189 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
190
191 if (rp->status)
192 return rp->status;
193
194 hci_dev_lock(hdev);
195
196 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
197 if (conn)
198 conn->link_policy = __le16_to_cpu(rp->policy);
199
200 hci_dev_unlock(hdev);
201
202 return rp->status;
203 }
204
hci_cc_write_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)205 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
206 struct sk_buff *skb)
207 {
208 struct hci_rp_write_link_policy *rp = data;
209 struct hci_conn *conn;
210 void *sent;
211
212 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
213
214 if (rp->status)
215 return rp->status;
216
217 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
218 if (!sent)
219 return rp->status;
220
221 hci_dev_lock(hdev);
222
223 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
224 if (conn)
225 conn->link_policy = get_unaligned_le16(sent + 2);
226
227 hci_dev_unlock(hdev);
228
229 return rp->status;
230 }
231
hci_cc_read_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)232 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
233 struct sk_buff *skb)
234 {
235 struct hci_rp_read_def_link_policy *rp = data;
236
237 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
238
239 if (rp->status)
240 return rp->status;
241
242 hdev->link_policy = __le16_to_cpu(rp->policy);
243
244 return rp->status;
245 }
246
hci_cc_write_def_link_policy(struct hci_dev * hdev,void * data,struct sk_buff * skb)247 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
248 struct sk_buff *skb)
249 {
250 struct hci_ev_status *rp = data;
251 void *sent;
252
253 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
254
255 if (rp->status)
256 return rp->status;
257
258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
259 if (!sent)
260 return rp->status;
261
262 hdev->link_policy = get_unaligned_le16(sent);
263
264 return rp->status;
265 }
266
hci_cc_reset(struct hci_dev * hdev,void * data,struct sk_buff * skb)267 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
268 {
269 struct hci_ev_status *rp = data;
270
271 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
272
273 clear_bit(HCI_RESET, &hdev->flags);
274
275 if (rp->status)
276 return rp->status;
277
278 /* Reset all non-persistent flags */
279 hci_dev_clear_volatile_flags(hdev);
280
281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
282
283 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
284 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
285
286 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
287 hdev->adv_data_len = 0;
288
289 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
290 hdev->scan_rsp_data_len = 0;
291
292 hdev->le_scan_type = LE_SCAN_PASSIVE;
293
294 hdev->ssp_debug_mode = 0;
295
296 hci_bdaddr_list_clear(&hdev->le_accept_list);
297 hci_bdaddr_list_clear(&hdev->le_resolv_list);
298
299 return rp->status;
300 }
301
hci_cc_read_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)302 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
303 struct sk_buff *skb)
304 {
305 struct hci_rp_read_stored_link_key *rp = data;
306 struct hci_cp_read_stored_link_key *sent;
307
308 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
309
310 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
311 if (!sent)
312 return rp->status;
313
314 if (!rp->status && sent->read_all == 0x01) {
315 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
316 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
317 }
318
319 return rp->status;
320 }
321
hci_cc_delete_stored_link_key(struct hci_dev * hdev,void * data,struct sk_buff * skb)322 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
323 struct sk_buff *skb)
324 {
325 struct hci_rp_delete_stored_link_key *rp = data;
326 u16 num_keys;
327
328 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
329
330 if (rp->status)
331 return rp->status;
332
333 num_keys = le16_to_cpu(rp->num_keys);
334
335 if (num_keys <= hdev->stored_num_keys)
336 hdev->stored_num_keys -= num_keys;
337 else
338 hdev->stored_num_keys = 0;
339
340 return rp->status;
341 }
342
hci_cc_write_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)343 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
344 struct sk_buff *skb)
345 {
346 struct hci_ev_status *rp = data;
347 void *sent;
348
349 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
350
351 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
352 if (!sent)
353 return rp->status;
354
355 hci_dev_lock(hdev);
356
357 if (hci_dev_test_flag(hdev, HCI_MGMT))
358 mgmt_set_local_name_complete(hdev, sent, rp->status);
359 else if (!rp->status)
360 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
361
362 hci_dev_unlock(hdev);
363
364 return rp->status;
365 }
366
hci_cc_read_local_name(struct hci_dev * hdev,void * data,struct sk_buff * skb)367 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
368 struct sk_buff *skb)
369 {
370 struct hci_rp_read_local_name *rp = data;
371
372 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
373
374 if (rp->status)
375 return rp->status;
376
377 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
378 hci_dev_test_flag(hdev, HCI_CONFIG))
379 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
380
381 return rp->status;
382 }
383
hci_cc_write_auth_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)384 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
385 struct sk_buff *skb)
386 {
387 struct hci_ev_status *rp = data;
388 void *sent;
389
390 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
391
392 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
393 if (!sent)
394 return rp->status;
395
396 hci_dev_lock(hdev);
397
398 if (!rp->status) {
399 __u8 param = *((__u8 *) sent);
400
401 if (param == AUTH_ENABLED)
402 set_bit(HCI_AUTH, &hdev->flags);
403 else
404 clear_bit(HCI_AUTH, &hdev->flags);
405 }
406
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_auth_enable_complete(hdev, rp->status);
409
410 hci_dev_unlock(hdev);
411
412 return rp->status;
413 }
414
hci_cc_write_encrypt_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)415 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
416 struct sk_buff *skb)
417 {
418 struct hci_ev_status *rp = data;
419 __u8 param;
420 void *sent;
421
422 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
423
424 if (rp->status)
425 return rp->status;
426
427 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
428 if (!sent)
429 return rp->status;
430
431 param = *((__u8 *) sent);
432
433 if (param)
434 set_bit(HCI_ENCRYPT, &hdev->flags);
435 else
436 clear_bit(HCI_ENCRYPT, &hdev->flags);
437
438 return rp->status;
439 }
440
hci_cc_write_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)441 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
442 struct sk_buff *skb)
443 {
444 struct hci_ev_status *rp = data;
445 __u8 param;
446 void *sent;
447
448 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
449
450 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
451 if (!sent)
452 return rp->status;
453
454 param = *((__u8 *) sent);
455
456 hci_dev_lock(hdev);
457
458 if (rp->status) {
459 hdev->discov_timeout = 0;
460 goto done;
461 }
462
463 if (param & SCAN_INQUIRY)
464 set_bit(HCI_ISCAN, &hdev->flags);
465 else
466 clear_bit(HCI_ISCAN, &hdev->flags);
467
468 if (param & SCAN_PAGE)
469 set_bit(HCI_PSCAN, &hdev->flags);
470 else
471 clear_bit(HCI_PSCAN, &hdev->flags);
472
473 done:
474 hci_dev_unlock(hdev);
475
476 return rp->status;
477 }
478
hci_cc_set_event_filter(struct hci_dev * hdev,void * data,struct sk_buff * skb)479 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
480 struct sk_buff *skb)
481 {
482 struct hci_ev_status *rp = data;
483 struct hci_cp_set_event_filter *cp;
484 void *sent;
485
486 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
487
488 if (rp->status)
489 return rp->status;
490
491 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
492 if (!sent)
493 return rp->status;
494
495 cp = (struct hci_cp_set_event_filter *)sent;
496
497 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
498 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
499 else
500 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
501
502 return rp->status;
503 }
504
hci_cc_read_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)505 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
506 struct sk_buff *skb)
507 {
508 struct hci_rp_read_class_of_dev *rp = data;
509
510 if (WARN_ON(!hdev))
511 return HCI_ERROR_UNSPECIFIED;
512
513 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
514
515 if (rp->status)
516 return rp->status;
517
518 memcpy(hdev->dev_class, rp->dev_class, 3);
519
520 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
521 hdev->dev_class[1], hdev->dev_class[0]);
522
523 return rp->status;
524 }
525
hci_cc_write_class_of_dev(struct hci_dev * hdev,void * data,struct sk_buff * skb)526 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
527 struct sk_buff *skb)
528 {
529 struct hci_ev_status *rp = data;
530 void *sent;
531
532 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
533
534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
535 if (!sent)
536 return rp->status;
537
538 hci_dev_lock(hdev);
539
540 if (!rp->status)
541 memcpy(hdev->dev_class, sent, 3);
542
543 if (hci_dev_test_flag(hdev, HCI_MGMT))
544 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
545
546 hci_dev_unlock(hdev);
547
548 return rp->status;
549 }
550
hci_cc_read_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)551 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
552 struct sk_buff *skb)
553 {
554 struct hci_rp_read_voice_setting *rp = data;
555 __u16 setting;
556
557 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
558
559 if (rp->status)
560 return rp->status;
561
562 setting = __le16_to_cpu(rp->voice_setting);
563
564 if (hdev->voice_setting == setting)
565 return rp->status;
566
567 hdev->voice_setting = setting;
568
569 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
570
571 if (hdev->notify)
572 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
573
574 return rp->status;
575 }
576
hci_cc_write_voice_setting(struct hci_dev * hdev,void * data,struct sk_buff * skb)577 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
578 struct sk_buff *skb)
579 {
580 struct hci_ev_status *rp = data;
581 __u16 setting;
582 void *sent;
583
584 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
585
586 if (rp->status)
587 return rp->status;
588
589 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
590 if (!sent)
591 return rp->status;
592
593 setting = get_unaligned_le16(sent);
594
595 if (hdev->voice_setting == setting)
596 return rp->status;
597
598 hdev->voice_setting = setting;
599
600 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
601
602 if (hdev->notify)
603 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
604
605 return rp->status;
606 }
607
hci_cc_read_num_supported_iac(struct hci_dev * hdev,void * data,struct sk_buff * skb)608 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
609 struct sk_buff *skb)
610 {
611 struct hci_rp_read_num_supported_iac *rp = data;
612
613 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
614
615 if (rp->status)
616 return rp->status;
617
618 hdev->num_iac = rp->num_iac;
619
620 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
621
622 return rp->status;
623 }
624
hci_cc_write_ssp_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)625 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
626 struct sk_buff *skb)
627 {
628 struct hci_ev_status *rp = data;
629 struct hci_cp_write_ssp_mode *sent;
630
631 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
632
633 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
634 if (!sent)
635 return rp->status;
636
637 hci_dev_lock(hdev);
638
639 if (!rp->status) {
640 if (sent->mode)
641 hdev->features[1][0] |= LMP_HOST_SSP;
642 else
643 hdev->features[1][0] &= ~LMP_HOST_SSP;
644 }
645
646 if (!rp->status) {
647 if (sent->mode)
648 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
649 else
650 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
651 }
652
653 hci_dev_unlock(hdev);
654
655 return rp->status;
656 }
657
hci_cc_write_sc_support(struct hci_dev * hdev,void * data,struct sk_buff * skb)658 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
659 struct sk_buff *skb)
660 {
661 struct hci_ev_status *rp = data;
662 struct hci_cp_write_sc_support *sent;
663
664 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
665
666 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
667 if (!sent)
668 return rp->status;
669
670 hci_dev_lock(hdev);
671
672 if (!rp->status) {
673 if (sent->support)
674 hdev->features[1][0] |= LMP_HOST_SC;
675 else
676 hdev->features[1][0] &= ~LMP_HOST_SC;
677 }
678
679 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
680 if (sent->support)
681 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
682 else
683 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
684 }
685
686 hci_dev_unlock(hdev);
687
688 return rp->status;
689 }
690
hci_cc_read_local_version(struct hci_dev * hdev,void * data,struct sk_buff * skb)691 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
692 struct sk_buff *skb)
693 {
694 struct hci_rp_read_local_version *rp = data;
695
696 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
697
698 if (rp->status)
699 return rp->status;
700
701 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
702 hci_dev_test_flag(hdev, HCI_CONFIG)) {
703 hdev->hci_ver = rp->hci_ver;
704 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
705 hdev->lmp_ver = rp->lmp_ver;
706 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
707 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
708 }
709
710 return rp->status;
711 }
712
hci_cc_read_enc_key_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)713 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
714 struct sk_buff *skb)
715 {
716 struct hci_rp_read_enc_key_size *rp = data;
717 struct hci_conn *conn;
718 u16 handle;
719 u8 status = rp->status;
720
721 bt_dev_dbg(hdev, "status 0x%2.2x", status);
722
723 handle = le16_to_cpu(rp->handle);
724
725 hci_dev_lock(hdev);
726
727 conn = hci_conn_hash_lookup_handle(hdev, handle);
728 if (!conn) {
729 status = 0xFF;
730 goto done;
731 }
732
733 /* While unexpected, the read_enc_key_size command may fail. The most
734 * secure approach is to then assume the key size is 0 to force a
735 * disconnection.
736 */
737 if (status) {
738 bt_dev_err(hdev, "failed to read key size for handle %u",
739 handle);
740 conn->enc_key_size = 0;
741 } else {
742 u8 *key_enc_size = hci_conn_key_enc_size(conn);
743
744 conn->enc_key_size = rp->key_size;
745 status = 0;
746
747 /* Attempt to check if the key size is too small or if it has
748 * been downgraded from the last time it was stored as part of
749 * the link_key.
750 */
751 if (conn->enc_key_size < hdev->min_enc_key_size ||
752 (key_enc_size && conn->enc_key_size < *key_enc_size)) {
753 /* As slave role, the conn->state has been set to
754 * BT_CONNECTED and l2cap conn req might not be received
755 * yet, at this moment the l2cap layer almost does
756 * nothing with the non-zero status.
757 * So we also clear encrypt related bits, and then the
758 * handler of l2cap conn req will get the right secure
759 * state at a later time.
760 */
761 status = HCI_ERROR_AUTH_FAILURE;
762 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
763 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
764 }
765
766 /* Update the key encryption size with the connection one */
767 if (key_enc_size && *key_enc_size != conn->enc_key_size)
768 *key_enc_size = conn->enc_key_size;
769 }
770
771 hci_encrypt_cfm(conn, status);
772
773 done:
774 hci_dev_unlock(hdev);
775
776 return status;
777 }
778
hci_cc_read_local_commands(struct hci_dev * hdev,void * data,struct sk_buff * skb)779 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
780 struct sk_buff *skb)
781 {
782 struct hci_rp_read_local_commands *rp = data;
783
784 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
785
786 if (rp->status)
787 return rp->status;
788
789 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
790 hci_dev_test_flag(hdev, HCI_CONFIG))
791 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
792
793 return rp->status;
794 }
795
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)796 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
797 struct sk_buff *skb)
798 {
799 struct hci_rp_read_auth_payload_to *rp = data;
800 struct hci_conn *conn;
801
802 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
803
804 if (rp->status)
805 return rp->status;
806
807 hci_dev_lock(hdev);
808
809 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810 if (conn)
811 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
812
813 hci_dev_unlock(hdev);
814
815 return rp->status;
816 }
817
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,void * data,struct sk_buff * skb)818 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
819 struct sk_buff *skb)
820 {
821 struct hci_rp_write_auth_payload_to *rp = data;
822 struct hci_conn *conn;
823 void *sent;
824
825 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
826
827 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
828 if (!sent)
829 return rp->status;
830
831 hci_dev_lock(hdev);
832
833 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
834 if (!conn) {
835 rp->status = 0xff;
836 goto unlock;
837 }
838
839 if (!rp->status)
840 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
841
842 unlock:
843 hci_dev_unlock(hdev);
844
845 return rp->status;
846 }
847
hci_cc_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)848 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
849 struct sk_buff *skb)
850 {
851 struct hci_rp_read_local_features *rp = data;
852
853 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
854
855 if (rp->status)
856 return rp->status;
857
858 memcpy(hdev->features, rp->features, 8);
859
860 /* Adjust default settings according to features
861 * supported by device. */
862
863 if (hdev->features[0][0] & LMP_3SLOT)
864 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
865
866 if (hdev->features[0][0] & LMP_5SLOT)
867 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
868
869 if (hdev->features[0][1] & LMP_HV2) {
870 hdev->pkt_type |= (HCI_HV2);
871 hdev->esco_type |= (ESCO_HV2);
872 }
873
874 if (hdev->features[0][1] & LMP_HV3) {
875 hdev->pkt_type |= (HCI_HV3);
876 hdev->esco_type |= (ESCO_HV3);
877 }
878
879 if (lmp_esco_capable(hdev))
880 hdev->esco_type |= (ESCO_EV3);
881
882 if (hdev->features[0][4] & LMP_EV4)
883 hdev->esco_type |= (ESCO_EV4);
884
885 if (hdev->features[0][4] & LMP_EV5)
886 hdev->esco_type |= (ESCO_EV5);
887
888 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
889 hdev->esco_type |= (ESCO_2EV3);
890
891 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
892 hdev->esco_type |= (ESCO_3EV3);
893
894 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
895 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
896
897 return rp->status;
898 }
899
hci_cc_read_local_ext_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)900 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
901 struct sk_buff *skb)
902 {
903 struct hci_rp_read_local_ext_features *rp = data;
904
905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
906
907 if (rp->status)
908 return rp->status;
909
910 if (hdev->max_page < rp->max_page) {
911 if (hci_test_quirk(hdev,
912 HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2))
913 bt_dev_warn(hdev, "broken local ext features page 2");
914 else
915 hdev->max_page = rp->max_page;
916 }
917
918 if (rp->page < HCI_MAX_PAGES)
919 memcpy(hdev->features[rp->page], rp->features, 8);
920
921 return rp->status;
922 }
923
hci_cc_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)924 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
925 struct sk_buff *skb)
926 {
927 struct hci_rp_read_buffer_size *rp = data;
928
929 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
930
931 if (rp->status)
932 return rp->status;
933
934 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
935 hdev->sco_mtu = rp->sco_mtu;
936 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
937 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
938
939 if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) {
940 hdev->sco_mtu = 64;
941 hdev->sco_pkts = 8;
942 }
943
944 if (!read_voice_setting_capable(hdev))
945 hdev->sco_pkts = 0;
946
947 hdev->acl_cnt = hdev->acl_pkts;
948 hdev->sco_cnt = hdev->sco_pkts;
949
950 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
951 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
952
953 if (!hdev->acl_mtu || !hdev->acl_pkts)
954 return HCI_ERROR_INVALID_PARAMETERS;
955
956 return rp->status;
957 }
958
hci_cc_read_bd_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)959 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
960 struct sk_buff *skb)
961 {
962 struct hci_rp_read_bd_addr *rp = data;
963
964 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
965
966 if (rp->status)
967 return rp->status;
968
969 if (test_bit(HCI_INIT, &hdev->flags))
970 bacpy(&hdev->bdaddr, &rp->bdaddr);
971
972 if (hci_dev_test_flag(hdev, HCI_SETUP))
973 bacpy(&hdev->setup_addr, &rp->bdaddr);
974
975 return rp->status;
976 }
977
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,void * data,struct sk_buff * skb)978 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
979 struct sk_buff *skb)
980 {
981 struct hci_rp_read_local_pairing_opts *rp = data;
982
983 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
984
985 if (rp->status)
986 return rp->status;
987
988 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
989 hci_dev_test_flag(hdev, HCI_CONFIG)) {
990 hdev->pairing_opts = rp->pairing_opts;
991 hdev->max_enc_key_size = rp->max_key_size;
992 }
993
994 return rp->status;
995 }
996
hci_cc_read_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)997 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
998 struct sk_buff *skb)
999 {
1000 struct hci_rp_read_page_scan_activity *rp = data;
1001
1002 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1003
1004 if (rp->status)
1005 return rp->status;
1006
1007 if (test_bit(HCI_INIT, &hdev->flags)) {
1008 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1009 hdev->page_scan_window = __le16_to_cpu(rp->window);
1010 }
1011
1012 return rp->status;
1013 }
1014
hci_cc_write_page_scan_activity(struct hci_dev * hdev,void * data,struct sk_buff * skb)1015 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1016 struct sk_buff *skb)
1017 {
1018 struct hci_ev_status *rp = data;
1019 struct hci_cp_write_page_scan_activity *sent;
1020
1021 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1022
1023 if (rp->status)
1024 return rp->status;
1025
1026 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1027 if (!sent)
1028 return rp->status;
1029
1030 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1031 hdev->page_scan_window = __le16_to_cpu(sent->window);
1032
1033 return rp->status;
1034 }
1035
hci_cc_read_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1036 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1037 struct sk_buff *skb)
1038 {
1039 struct hci_rp_read_page_scan_type *rp = data;
1040
1041 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1042
1043 if (rp->status)
1044 return rp->status;
1045
1046 if (test_bit(HCI_INIT, &hdev->flags))
1047 hdev->page_scan_type = rp->type;
1048
1049 return rp->status;
1050 }
1051
hci_cc_write_page_scan_type(struct hci_dev * hdev,void * data,struct sk_buff * skb)1052 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1053 struct sk_buff *skb)
1054 {
1055 struct hci_ev_status *rp = data;
1056 u8 *type;
1057
1058 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1059
1060 if (rp->status)
1061 return rp->status;
1062
1063 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1064 if (type)
1065 hdev->page_scan_type = *type;
1066
1067 return rp->status;
1068 }
1069
hci_cc_read_clock(struct hci_dev * hdev,void * data,struct sk_buff * skb)1070 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1071 struct sk_buff *skb)
1072 {
1073 struct hci_rp_read_clock *rp = data;
1074 struct hci_cp_read_clock *cp;
1075 struct hci_conn *conn;
1076
1077 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1078
1079 if (rp->status)
1080 return rp->status;
1081
1082 hci_dev_lock(hdev);
1083
1084 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1085 if (!cp)
1086 goto unlock;
1087
1088 if (cp->which == 0x00) {
1089 hdev->clock = le32_to_cpu(rp->clock);
1090 goto unlock;
1091 }
1092
1093 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1094 if (conn) {
1095 conn->clock = le32_to_cpu(rp->clock);
1096 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1097 }
1098
1099 unlock:
1100 hci_dev_unlock(hdev);
1101 return rp->status;
1102 }
1103
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1104 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1105 struct sk_buff *skb)
1106 {
1107 struct hci_rp_read_inq_rsp_tx_power *rp = data;
1108
1109 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1110
1111 if (rp->status)
1112 return rp->status;
1113
1114 hdev->inq_tx_power = rp->tx_power;
1115
1116 return rp->status;
1117 }
1118
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1119 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1120 struct sk_buff *skb)
1121 {
1122 struct hci_rp_read_def_err_data_reporting *rp = data;
1123
1124 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1125
1126 if (rp->status)
1127 return rp->status;
1128
1129 hdev->err_data_reporting = rp->err_data_reporting;
1130
1131 return rp->status;
1132 }
1133
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,void * data,struct sk_buff * skb)1134 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1135 struct sk_buff *skb)
1136 {
1137 struct hci_ev_status *rp = data;
1138 struct hci_cp_write_def_err_data_reporting *cp;
1139
1140 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1141
1142 if (rp->status)
1143 return rp->status;
1144
1145 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1146 if (!cp)
1147 return rp->status;
1148
1149 hdev->err_data_reporting = cp->err_data_reporting;
1150
1151 return rp->status;
1152 }
1153
hci_cc_pin_code_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1154 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1155 struct sk_buff *skb)
1156 {
1157 struct hci_rp_pin_code_reply *rp = data;
1158 struct hci_cp_pin_code_reply *cp;
1159 struct hci_conn *conn;
1160
1161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1162
1163 hci_dev_lock(hdev);
1164
1165 if (hci_dev_test_flag(hdev, HCI_MGMT))
1166 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1167
1168 if (rp->status)
1169 goto unlock;
1170
1171 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1172 if (!cp)
1173 goto unlock;
1174
1175 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1176 if (conn)
1177 conn->pin_length = cp->pin_len;
1178
1179 unlock:
1180 hci_dev_unlock(hdev);
1181 return rp->status;
1182 }
1183
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1184 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1185 struct sk_buff *skb)
1186 {
1187 struct hci_rp_pin_code_neg_reply *rp = data;
1188
1189 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1190
1191 hci_dev_lock(hdev);
1192
1193 if (hci_dev_test_flag(hdev, HCI_MGMT))
1194 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1195 rp->status);
1196
1197 hci_dev_unlock(hdev);
1198
1199 return rp->status;
1200 }
1201
hci_cc_le_read_buffer_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1202 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1203 struct sk_buff *skb)
1204 {
1205 struct hci_rp_le_read_buffer_size *rp = data;
1206
1207 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1208
1209 if (rp->status)
1210 return rp->status;
1211
1212 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1213 hdev->le_pkts = rp->le_max_pkt;
1214
1215 hdev->le_cnt = hdev->le_pkts;
1216
1217 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1218
1219 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1220 return HCI_ERROR_INVALID_PARAMETERS;
1221
1222 return rp->status;
1223 }
1224
hci_cc_le_read_local_features(struct hci_dev * hdev,void * data,struct sk_buff * skb)1225 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1226 struct sk_buff *skb)
1227 {
1228 struct hci_rp_le_read_local_features *rp = data;
1229
1230 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1231
1232 if (rp->status)
1233 return rp->status;
1234
1235 memcpy(hdev->le_features, rp->features, 8);
1236
1237 return rp->status;
1238 }
1239
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1240 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1241 struct sk_buff *skb)
1242 {
1243 struct hci_rp_le_read_adv_tx_power *rp = data;
1244
1245 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1246
1247 if (rp->status)
1248 return rp->status;
1249
1250 hdev->adv_tx_power = rp->tx_power;
1251
1252 return rp->status;
1253 }
1254
hci_cc_user_confirm_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1255 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1256 struct sk_buff *skb)
1257 {
1258 struct hci_rp_user_confirm_reply *rp = data;
1259
1260 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1261
1262 hci_dev_lock(hdev);
1263
1264 if (hci_dev_test_flag(hdev, HCI_MGMT))
1265 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1266 rp->status);
1267
1268 hci_dev_unlock(hdev);
1269
1270 return rp->status;
1271 }
1272
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1273 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1274 struct sk_buff *skb)
1275 {
1276 struct hci_rp_user_confirm_reply *rp = data;
1277
1278 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1279
1280 hci_dev_lock(hdev);
1281
1282 if (hci_dev_test_flag(hdev, HCI_MGMT))
1283 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1284 ACL_LINK, 0, rp->status);
1285
1286 hci_dev_unlock(hdev);
1287
1288 return rp->status;
1289 }
1290
hci_cc_user_passkey_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1291 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1292 struct sk_buff *skb)
1293 {
1294 struct hci_rp_user_confirm_reply *rp = data;
1295
1296 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1297
1298 hci_dev_lock(hdev);
1299
1300 if (hci_dev_test_flag(hdev, HCI_MGMT))
1301 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1302 0, rp->status);
1303
1304 hci_dev_unlock(hdev);
1305
1306 return rp->status;
1307 }
1308
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,void * data,struct sk_buff * skb)1309 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1310 struct sk_buff *skb)
1311 {
1312 struct hci_rp_user_confirm_reply *rp = data;
1313
1314 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1315
1316 hci_dev_lock(hdev);
1317
1318 if (hci_dev_test_flag(hdev, HCI_MGMT))
1319 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1320 ACL_LINK, 0, rp->status);
1321
1322 hci_dev_unlock(hdev);
1323
1324 return rp->status;
1325 }
1326
hci_cc_read_local_oob_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1327 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1328 struct sk_buff *skb)
1329 {
1330 struct hci_rp_read_local_oob_data *rp = data;
1331
1332 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1333
1334 return rp->status;
1335 }
1336
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,void * data,struct sk_buff * skb)1337 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1338 struct sk_buff *skb)
1339 {
1340 struct hci_rp_read_local_oob_ext_data *rp = data;
1341
1342 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1343
1344 return rp->status;
1345 }
1346
hci_cc_le_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1347 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1348 struct sk_buff *skb)
1349 {
1350 struct hci_ev_status *rp = data;
1351 bdaddr_t *sent;
1352
1353 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1354
1355 if (rp->status)
1356 return rp->status;
1357
1358 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1359 if (!sent)
1360 return rp->status;
1361
1362 hci_dev_lock(hdev);
1363
1364 bacpy(&hdev->random_addr, sent);
1365
1366 if (!bacmp(&hdev->rpa, sent)) {
1367 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1368 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1369 secs_to_jiffies(hdev->rpa_timeout));
1370 }
1371
1372 hci_dev_unlock(hdev);
1373
1374 return rp->status;
1375 }
1376
hci_cc_le_set_default_phy(struct hci_dev * hdev,void * data,struct sk_buff * skb)1377 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1378 struct sk_buff *skb)
1379 {
1380 struct hci_ev_status *rp = data;
1381 struct hci_cp_le_set_default_phy *cp;
1382
1383 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1384
1385 if (rp->status)
1386 return rp->status;
1387
1388 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1389 if (!cp)
1390 return rp->status;
1391
1392 hci_dev_lock(hdev);
1393
1394 hdev->le_tx_def_phys = cp->tx_phys;
1395 hdev->le_rx_def_phys = cp->rx_phys;
1396
1397 hci_dev_unlock(hdev);
1398
1399 return rp->status;
1400 }
1401
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,void * data,struct sk_buff * skb)1402 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1403 struct sk_buff *skb)
1404 {
1405 struct hci_ev_status *rp = data;
1406 struct hci_cp_le_set_adv_set_rand_addr *cp;
1407 struct adv_info *adv;
1408
1409 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1410
1411 if (rp->status)
1412 return rp->status;
1413
1414 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1415 /* Update only in case the adv instance since handle 0x00 shall be using
1416 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1417 * non-extended adverting.
1418 */
1419 if (!cp || !cp->handle)
1420 return rp->status;
1421
1422 hci_dev_lock(hdev);
1423
1424 adv = hci_find_adv_instance(hdev, cp->handle);
1425 if (adv) {
1426 bacpy(&adv->random_addr, &cp->bdaddr);
1427 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1428 adv->rpa_expired = false;
1429 queue_delayed_work(hdev->workqueue,
1430 &adv->rpa_expired_cb,
1431 secs_to_jiffies(hdev->rpa_timeout));
1432 }
1433 }
1434
1435 hci_dev_unlock(hdev);
1436
1437 return rp->status;
1438 }
1439
hci_cc_le_remove_adv_set(struct hci_dev * hdev,void * data,struct sk_buff * skb)1440 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1441 struct sk_buff *skb)
1442 {
1443 struct hci_ev_status *rp = data;
1444 u8 *instance;
1445 int err;
1446
1447 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1448
1449 if (rp->status)
1450 return rp->status;
1451
1452 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1453 if (!instance)
1454 return rp->status;
1455
1456 hci_dev_lock(hdev);
1457
1458 err = hci_remove_adv_instance(hdev, *instance);
1459 if (!err)
1460 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1461 *instance);
1462
1463 hci_dev_unlock(hdev);
1464
1465 return rp->status;
1466 }
1467
hci_cc_le_clear_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1468 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1469 struct sk_buff *skb)
1470 {
1471 struct hci_ev_status *rp = data;
1472 struct adv_info *adv, *n;
1473 int err;
1474
1475 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1476
1477 if (rp->status)
1478 return rp->status;
1479
1480 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1481 return rp->status;
1482
1483 hci_dev_lock(hdev);
1484
1485 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1486 u8 instance = adv->instance;
1487
1488 err = hci_remove_adv_instance(hdev, instance);
1489 if (!err)
1490 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1491 hdev, instance);
1492 }
1493
1494 hci_dev_unlock(hdev);
1495
1496 return rp->status;
1497 }
1498
hci_cc_le_read_transmit_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)1499 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1500 struct sk_buff *skb)
1501 {
1502 struct hci_rp_le_read_transmit_power *rp = data;
1503
1504 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1505
1506 if (rp->status)
1507 return rp->status;
1508
1509 hdev->min_le_tx_power = rp->min_le_tx_power;
1510 hdev->max_le_tx_power = rp->max_le_tx_power;
1511
1512 return rp->status;
1513 }
1514
hci_cc_le_set_privacy_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)1515 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1516 struct sk_buff *skb)
1517 {
1518 struct hci_ev_status *rp = data;
1519 struct hci_cp_le_set_privacy_mode *cp;
1520 struct hci_conn_params *params;
1521
1522 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1523
1524 if (rp->status)
1525 return rp->status;
1526
1527 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1528 if (!cp)
1529 return rp->status;
1530
1531 hci_dev_lock(hdev);
1532
1533 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1534 if (params)
1535 WRITE_ONCE(params->privacy_mode, cp->mode);
1536
1537 hci_dev_unlock(hdev);
1538
1539 return rp->status;
1540 }
1541
hci_cc_le_set_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1542 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1543 struct sk_buff *skb)
1544 {
1545 struct hci_ev_status *rp = data;
1546 __u8 *sent;
1547
1548 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549
1550 if (rp->status)
1551 return rp->status;
1552
1553 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1554 if (!sent)
1555 return rp->status;
1556
1557 hci_dev_lock(hdev);
1558
1559 /* If we're doing connection initiation as peripheral. Set a
1560 * timeout in case something goes wrong.
1561 */
1562 if (*sent) {
1563 struct hci_conn *conn;
1564
1565 hci_dev_set_flag(hdev, HCI_LE_ADV);
1566
1567 conn = hci_lookup_le_connect(hdev);
1568 if (conn)
1569 queue_delayed_work(hdev->workqueue,
1570 &conn->le_conn_timeout,
1571 conn->conn_timeout);
1572 } else {
1573 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1574 }
1575
1576 hci_dev_unlock(hdev);
1577
1578 return rp->status;
1579 }
1580
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1581 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1582 struct sk_buff *skb)
1583 {
1584 struct hci_cp_le_set_ext_adv_enable *cp;
1585 struct hci_cp_ext_adv_set *set;
1586 struct adv_info *adv = NULL, *n;
1587 struct hci_ev_status *rp = data;
1588
1589 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1590
1591 if (rp->status)
1592 return rp->status;
1593
1594 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1595 if (!cp)
1596 return rp->status;
1597
1598 set = (void *)cp->data;
1599
1600 hci_dev_lock(hdev);
1601
1602 if (cp->num_of_sets)
1603 adv = hci_find_adv_instance(hdev, set->handle);
1604
1605 if (cp->enable) {
1606 struct hci_conn *conn;
1607
1608 hci_dev_set_flag(hdev, HCI_LE_ADV);
1609
1610 if (adv)
1611 adv->enabled = true;
1612 else if (!set->handle)
1613 hci_dev_set_flag(hdev, HCI_LE_ADV_0);
1614
1615 conn = hci_lookup_le_connect(hdev);
1616 if (conn)
1617 queue_delayed_work(hdev->workqueue,
1618 &conn->le_conn_timeout,
1619 conn->conn_timeout);
1620 } else {
1621 if (cp->num_of_sets) {
1622 if (adv)
1623 adv->enabled = false;
1624 else if (!set->handle)
1625 hci_dev_clear_flag(hdev, HCI_LE_ADV_0);
1626
1627 /* If just one instance was disabled check if there are
1628 * any other instance enabled before clearing HCI_LE_ADV
1629 */
1630 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1631 list) {
1632 if (adv->enabled)
1633 goto unlock;
1634 }
1635 } else {
1636 /* All instances shall be considered disabled */
1637 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1638 list)
1639 adv->enabled = false;
1640 }
1641
1642 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1643 }
1644
1645 unlock:
1646 hci_dev_unlock(hdev);
1647 return rp->status;
1648 }
1649
hci_cc_le_set_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1650 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1651 struct sk_buff *skb)
1652 {
1653 struct hci_cp_le_set_scan_param *cp;
1654 struct hci_ev_status *rp = data;
1655
1656 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1657
1658 if (rp->status)
1659 return rp->status;
1660
1661 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1662 if (!cp)
1663 return rp->status;
1664
1665 hci_dev_lock(hdev);
1666
1667 hdev->le_scan_type = cp->type;
1668
1669 hci_dev_unlock(hdev);
1670
1671 return rp->status;
1672 }
1673
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)1674 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1675 struct sk_buff *skb)
1676 {
1677 struct hci_cp_le_set_ext_scan_params *cp;
1678 struct hci_ev_status *rp = data;
1679 struct hci_cp_le_scan_phy_params *phy_param;
1680
1681 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1682
1683 if (rp->status)
1684 return rp->status;
1685
1686 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1687 if (!cp)
1688 return rp->status;
1689
1690 phy_param = (void *)cp->data;
1691
1692 hci_dev_lock(hdev);
1693
1694 hdev->le_scan_type = phy_param->type;
1695
1696 hci_dev_unlock(hdev);
1697
1698 return rp->status;
1699 }
1700
has_pending_adv_report(struct hci_dev * hdev)1701 static bool has_pending_adv_report(struct hci_dev *hdev)
1702 {
1703 struct discovery_state *d = &hdev->discovery;
1704
1705 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1706 }
1707
clear_pending_adv_report(struct hci_dev * hdev)1708 static void clear_pending_adv_report(struct hci_dev *hdev)
1709 {
1710 struct discovery_state *d = &hdev->discovery;
1711
1712 bacpy(&d->last_adv_addr, BDADDR_ANY);
1713 d->last_adv_data_len = 0;
1714 }
1715
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1716 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1717 u8 bdaddr_type, s8 rssi, u32 flags,
1718 u8 *data, u8 len)
1719 {
1720 struct discovery_state *d = &hdev->discovery;
1721
1722 if (len > max_adv_len(hdev))
1723 return;
1724
1725 bacpy(&d->last_adv_addr, bdaddr);
1726 d->last_adv_addr_type = bdaddr_type;
1727 d->last_adv_rssi = rssi;
1728 d->last_adv_flags = flags;
1729 memcpy(d->last_adv_data, data, len);
1730 d->last_adv_data_len = len;
1731 }
1732
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1733 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1734 {
1735 hci_dev_lock(hdev);
1736
1737 switch (enable) {
1738 case LE_SCAN_ENABLE:
1739 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1740 if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
1741 clear_pending_adv_report(hdev);
1742 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1743 }
1744 break;
1745
1746 case LE_SCAN_DISABLE:
1747 /* We do this here instead of when setting DISCOVERY_STOPPED
1748 * since the latter would potentially require waiting for
1749 * inquiry to stop too.
1750 */
1751 if (has_pending_adv_report(hdev)) {
1752 struct discovery_state *d = &hdev->discovery;
1753
1754 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1755 d->last_adv_addr_type, NULL,
1756 d->last_adv_rssi, d->last_adv_flags,
1757 d->last_adv_data,
1758 d->last_adv_data_len, NULL, 0, 0);
1759 }
1760
1761 /* Cancel this timer so that we don't try to disable scanning
1762 * when it's already disabled.
1763 */
1764 cancel_delayed_work(&hdev->le_scan_disable);
1765
1766 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1767
1768 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1769 * interrupted scanning due to a connect request. Mark
1770 * therefore discovery as stopped.
1771 */
1772 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1773 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1774 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1775 hdev->discovery.state == DISCOVERY_FINDING)
1776 queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1777
1778 break;
1779
1780 default:
1781 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1782 enable);
1783 break;
1784 }
1785
1786 hci_dev_unlock(hdev);
1787 }
1788
hci_cc_le_set_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1789 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1790 struct sk_buff *skb)
1791 {
1792 struct hci_cp_le_set_scan_enable *cp;
1793 struct hci_ev_status *rp = data;
1794
1795 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1796
1797 if (rp->status)
1798 return rp->status;
1799
1800 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1801 if (!cp)
1802 return rp->status;
1803
1804 le_set_scan_enable_complete(hdev, cp->enable);
1805
1806 return rp->status;
1807 }
1808
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)1809 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1810 struct sk_buff *skb)
1811 {
1812 struct hci_cp_le_set_ext_scan_enable *cp;
1813 struct hci_ev_status *rp = data;
1814
1815 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1816
1817 if (rp->status)
1818 return rp->status;
1819
1820 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1821 if (!cp)
1822 return rp->status;
1823
1824 le_set_scan_enable_complete(hdev, cp->enable);
1825
1826 return rp->status;
1827 }
1828
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,void * data,struct sk_buff * skb)1829 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1830 struct sk_buff *skb)
1831 {
1832 struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1833
1834 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1835 rp->num_of_sets);
1836
1837 if (rp->status)
1838 return rp->status;
1839
1840 hdev->le_num_of_adv_sets = rp->num_of_sets;
1841
1842 return rp->status;
1843 }
1844
hci_cc_le_read_accept_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)1845 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1846 struct sk_buff *skb)
1847 {
1848 struct hci_rp_le_read_accept_list_size *rp = data;
1849
1850 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1851
1852 if (rp->status)
1853 return rp->status;
1854
1855 hdev->le_accept_list_size = rp->size;
1856
1857 return rp->status;
1858 }
1859
hci_cc_le_clear_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1860 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1861 struct sk_buff *skb)
1862 {
1863 struct hci_ev_status *rp = data;
1864
1865 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1866
1867 if (rp->status)
1868 return rp->status;
1869
1870 hci_dev_lock(hdev);
1871 hci_bdaddr_list_clear(&hdev->le_accept_list);
1872 hci_dev_unlock(hdev);
1873
1874 return rp->status;
1875 }
1876
hci_cc_le_add_to_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1877 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1878 struct sk_buff *skb)
1879 {
1880 struct hci_cp_le_add_to_accept_list *sent;
1881 struct hci_ev_status *rp = data;
1882
1883 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1884
1885 if (rp->status)
1886 return rp->status;
1887
1888 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1889 if (!sent)
1890 return rp->status;
1891
1892 hci_dev_lock(hdev);
1893 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1894 sent->bdaddr_type);
1895 hci_dev_unlock(hdev);
1896
1897 return rp->status;
1898 }
1899
hci_cc_le_del_from_accept_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1900 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1901 struct sk_buff *skb)
1902 {
1903 struct hci_cp_le_del_from_accept_list *sent;
1904 struct hci_ev_status *rp = data;
1905
1906 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1907
1908 if (rp->status)
1909 return rp->status;
1910
1911 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1912 if (!sent)
1913 return rp->status;
1914
1915 hci_dev_lock(hdev);
1916 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1917 sent->bdaddr_type);
1918 hci_dev_unlock(hdev);
1919
1920 return rp->status;
1921 }
1922
hci_cc_le_read_supported_states(struct hci_dev * hdev,void * data,struct sk_buff * skb)1923 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1924 struct sk_buff *skb)
1925 {
1926 struct hci_rp_le_read_supported_states *rp = data;
1927
1928 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1929
1930 if (rp->status)
1931 return rp->status;
1932
1933 memcpy(hdev->le_states, rp->le_states, 8);
1934
1935 return rp->status;
1936 }
1937
hci_cc_le_read_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1938 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1939 struct sk_buff *skb)
1940 {
1941 struct hci_rp_le_read_def_data_len *rp = data;
1942
1943 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1944
1945 if (rp->status)
1946 return rp->status;
1947
1948 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1949 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1950
1951 return rp->status;
1952 }
1953
hci_cc_le_write_def_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)1954 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1955 struct sk_buff *skb)
1956 {
1957 struct hci_cp_le_write_def_data_len *sent;
1958 struct hci_ev_status *rp = data;
1959
1960 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1961
1962 if (rp->status)
1963 return rp->status;
1964
1965 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1966 if (!sent)
1967 return rp->status;
1968
1969 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1970 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1971
1972 return rp->status;
1973 }
1974
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1975 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1976 struct sk_buff *skb)
1977 {
1978 struct hci_cp_le_add_to_resolv_list *sent;
1979 struct hci_ev_status *rp = data;
1980
1981 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1982
1983 if (rp->status)
1984 return rp->status;
1985
1986 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1987 if (!sent)
1988 return rp->status;
1989
1990 hci_dev_lock(hdev);
1991 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1992 sent->bdaddr_type, sent->peer_irk,
1993 sent->local_irk);
1994 hci_dev_unlock(hdev);
1995
1996 return rp->status;
1997 }
1998
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)1999 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2000 struct sk_buff *skb)
2001 {
2002 struct hci_cp_le_del_from_resolv_list *sent;
2003 struct hci_ev_status *rp = data;
2004
2005 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2006
2007 if (rp->status)
2008 return rp->status;
2009
2010 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2011 if (!sent)
2012 return rp->status;
2013
2014 hci_dev_lock(hdev);
2015 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2016 sent->bdaddr_type);
2017 hci_dev_unlock(hdev);
2018
2019 return rp->status;
2020 }
2021
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,void * data,struct sk_buff * skb)2022 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2023 struct sk_buff *skb)
2024 {
2025 struct hci_ev_status *rp = data;
2026
2027 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028
2029 if (rp->status)
2030 return rp->status;
2031
2032 hci_dev_lock(hdev);
2033 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2034 hci_dev_unlock(hdev);
2035
2036 return rp->status;
2037 }
2038
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,void * data,struct sk_buff * skb)2039 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2040 struct sk_buff *skb)
2041 {
2042 struct hci_rp_le_read_resolv_list_size *rp = data;
2043
2044 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2045
2046 if (rp->status)
2047 return rp->status;
2048
2049 hdev->le_resolv_list_size = rp->size;
2050
2051 return rp->status;
2052 }
2053
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)2054 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2055 struct sk_buff *skb)
2056 {
2057 struct hci_ev_status *rp = data;
2058 __u8 *sent;
2059
2060 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2061
2062 if (rp->status)
2063 return rp->status;
2064
2065 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2066 if (!sent)
2067 return rp->status;
2068
2069 hci_dev_lock(hdev);
2070
2071 if (*sent)
2072 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2073 else
2074 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2075
2076 hci_dev_unlock(hdev);
2077
2078 return rp->status;
2079 }
2080
hci_cc_le_read_max_data_len(struct hci_dev * hdev,void * data,struct sk_buff * skb)2081 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2082 struct sk_buff *skb)
2083 {
2084 struct hci_rp_le_read_max_data_len *rp = data;
2085
2086 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2087
2088 if (rp->status)
2089 return rp->status;
2090
2091 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2092 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2093 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2094 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2095
2096 return rp->status;
2097 }
2098
hci_cc_write_le_host_supported(struct hci_dev * hdev,void * data,struct sk_buff * skb)2099 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2100 struct sk_buff *skb)
2101 {
2102 struct hci_cp_write_le_host_supported *sent;
2103 struct hci_ev_status *rp = data;
2104
2105 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2106
2107 if (rp->status)
2108 return rp->status;
2109
2110 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2111 if (!sent)
2112 return rp->status;
2113
2114 hci_dev_lock(hdev);
2115
2116 if (sent->le) {
2117 hdev->features[1][0] |= LMP_HOST_LE;
2118 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2119 } else {
2120 hdev->features[1][0] &= ~LMP_HOST_LE;
2121 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2122 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2123 }
2124
2125 if (sent->simul)
2126 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2127 else
2128 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2129
2130 hci_dev_unlock(hdev);
2131
2132 return rp->status;
2133 }
2134
hci_cc_set_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)2135 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2136 struct sk_buff *skb)
2137 {
2138 struct hci_cp_le_set_adv_param *cp;
2139 struct hci_ev_status *rp = data;
2140
2141 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2142
2143 if (rp->status)
2144 return rp->status;
2145
2146 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2147 if (!cp)
2148 return rp->status;
2149
2150 hci_dev_lock(hdev);
2151 hdev->adv_addr_type = cp->own_address_type;
2152 hci_dev_unlock(hdev);
2153
2154 return rp->status;
2155 }
2156
hci_cc_read_rssi(struct hci_dev * hdev,void * data,struct sk_buff * skb)2157 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2158 struct sk_buff *skb)
2159 {
2160 struct hci_rp_read_rssi *rp = data;
2161 struct hci_conn *conn;
2162
2163 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2164
2165 if (rp->status)
2166 return rp->status;
2167
2168 hci_dev_lock(hdev);
2169
2170 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2171 if (conn)
2172 conn->rssi = rp->rssi;
2173
2174 hci_dev_unlock(hdev);
2175
2176 return rp->status;
2177 }
2178
hci_cc_read_tx_power(struct hci_dev * hdev,void * data,struct sk_buff * skb)2179 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2180 struct sk_buff *skb)
2181 {
2182 struct hci_cp_read_tx_power *sent;
2183 struct hci_rp_read_tx_power *rp = data;
2184 struct hci_conn *conn;
2185
2186 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2187
2188 if (rp->status)
2189 return rp->status;
2190
2191 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2192 if (!sent)
2193 return rp->status;
2194
2195 hci_dev_lock(hdev);
2196
2197 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2198 if (!conn)
2199 goto unlock;
2200
2201 switch (sent->type) {
2202 case 0x00:
2203 conn->tx_power = rp->tx_power;
2204 break;
2205 case 0x01:
2206 conn->max_tx_power = rp->tx_power;
2207 break;
2208 }
2209
2210 unlock:
2211 hci_dev_unlock(hdev);
2212 return rp->status;
2213 }
2214
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,void * data,struct sk_buff * skb)2215 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2216 struct sk_buff *skb)
2217 {
2218 struct hci_ev_status *rp = data;
2219 u8 *mode;
2220
2221 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2222
2223 if (rp->status)
2224 return rp->status;
2225
2226 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2227 if (mode)
2228 hdev->ssp_debug_mode = *mode;
2229
2230 return rp->status;
2231 }
2232
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)2233 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2234 {
2235 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2236
2237 if (status)
2238 return;
2239
2240 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2241 set_bit(HCI_INQUIRY, &hdev->flags);
2242 }
2243
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)2244 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2245 {
2246 struct hci_cp_create_conn *cp;
2247 struct hci_conn *conn;
2248
2249 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2250
2251 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2252 if (!cp)
2253 return;
2254
2255 hci_dev_lock(hdev);
2256
2257 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2258
2259 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2260
2261 if (status) {
2262 if (conn && conn->state == BT_CONNECT) {
2263 conn->state = BT_CLOSED;
2264 hci_connect_cfm(conn, status);
2265 hci_conn_del(conn);
2266 }
2267 } else {
2268 if (!conn) {
2269 conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2270 HCI_ROLE_MASTER);
2271 if (IS_ERR(conn))
2272 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2273 }
2274 }
2275
2276 hci_dev_unlock(hdev);
2277 }
2278
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)2279 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2280 {
2281 struct hci_cp_add_sco *cp;
2282 struct hci_conn *acl;
2283 struct hci_link *link;
2284 __u16 handle;
2285
2286 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2287
2288 if (!status)
2289 return;
2290
2291 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2292 if (!cp)
2293 return;
2294
2295 handle = __le16_to_cpu(cp->handle);
2296
2297 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2298
2299 hci_dev_lock(hdev);
2300
2301 acl = hci_conn_hash_lookup_handle(hdev, handle);
2302 if (acl) {
2303 link = list_first_entry_or_null(&acl->link_list,
2304 struct hci_link, list);
2305 if (link && link->conn) {
2306 link->conn->state = BT_CLOSED;
2307
2308 hci_connect_cfm(link->conn, status);
2309 hci_conn_del(link->conn);
2310 }
2311 }
2312
2313 hci_dev_unlock(hdev);
2314 }
2315
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)2316 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2317 {
2318 struct hci_cp_auth_requested *cp;
2319 struct hci_conn *conn;
2320
2321 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2322
2323 if (!status)
2324 return;
2325
2326 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2327 if (!cp)
2328 return;
2329
2330 hci_dev_lock(hdev);
2331
2332 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2333 if (conn) {
2334 if (conn->state == BT_CONFIG) {
2335 hci_connect_cfm(conn, status);
2336 hci_conn_drop(conn);
2337 }
2338 }
2339
2340 hci_dev_unlock(hdev);
2341 }
2342
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)2343 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2344 {
2345 struct hci_cp_set_conn_encrypt *cp;
2346 struct hci_conn *conn;
2347
2348 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2349
2350 if (!status)
2351 return;
2352
2353 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2354 if (!cp)
2355 return;
2356
2357 hci_dev_lock(hdev);
2358
2359 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2360 if (conn) {
2361 if (conn->state == BT_CONFIG) {
2362 hci_connect_cfm(conn, status);
2363 hci_conn_drop(conn);
2364 }
2365 }
2366
2367 hci_dev_unlock(hdev);
2368 }
2369
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)2370 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2371 struct hci_conn *conn)
2372 {
2373 if (conn->state != BT_CONFIG || !conn->out)
2374 return 0;
2375
2376 if (conn->pending_sec_level == BT_SECURITY_SDP)
2377 return 0;
2378
2379 /* Only request authentication for SSP connections or non-SSP
2380 * devices with sec_level MEDIUM or HIGH or if MITM protection
2381 * is requested.
2382 */
2383 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2384 conn->pending_sec_level != BT_SECURITY_FIPS &&
2385 conn->pending_sec_level != BT_SECURITY_HIGH &&
2386 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2387 return 0;
2388
2389 return 1;
2390 }
2391
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)2392 static int hci_resolve_name(struct hci_dev *hdev,
2393 struct inquiry_entry *e)
2394 {
2395 struct hci_cp_remote_name_req cp;
2396
2397 memset(&cp, 0, sizeof(cp));
2398
2399 bacpy(&cp.bdaddr, &e->data.bdaddr);
2400 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2401 cp.pscan_mode = e->data.pscan_mode;
2402 cp.clock_offset = e->data.clock_offset;
2403
2404 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2405 }
2406
hci_resolve_next_name(struct hci_dev * hdev)2407 static bool hci_resolve_next_name(struct hci_dev *hdev)
2408 {
2409 struct discovery_state *discov = &hdev->discovery;
2410 struct inquiry_entry *e;
2411
2412 if (list_empty(&discov->resolve))
2413 return false;
2414
2415 /* We should stop if we already spent too much time resolving names. */
2416 if (time_after(jiffies, discov->name_resolve_timeout)) {
2417 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2418 return false;
2419 }
2420
2421 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2422 if (!e)
2423 return false;
2424
2425 if (hci_resolve_name(hdev, e) == 0) {
2426 e->name_state = NAME_PENDING;
2427 return true;
2428 }
2429
2430 return false;
2431 }
2432
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2433 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2434 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2435 {
2436 struct discovery_state *discov = &hdev->discovery;
2437 struct inquiry_entry *e;
2438
2439 /* Update the mgmt connected state if necessary. Be careful with
2440 * conn objects that exist but are not (yet) connected however.
2441 * Only those in BT_CONFIG or BT_CONNECTED states can be
2442 * considered connected.
2443 */
2444 if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2445 mgmt_device_connected(hdev, conn, name, name_len);
2446
2447 if (discov->state == DISCOVERY_STOPPED)
2448 return;
2449
2450 if (discov->state == DISCOVERY_STOPPING)
2451 goto discov_complete;
2452
2453 if (discov->state != DISCOVERY_RESOLVING)
2454 return;
2455
2456 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2457 /* If the device was not found in a list of found devices names of which
2458 * are pending. there is no need to continue resolving a next name as it
2459 * will be done upon receiving another Remote Name Request Complete
2460 * Event */
2461 if (!e)
2462 return;
2463
2464 list_del(&e->list);
2465
2466 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2467 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2468 name, name_len);
2469
2470 if (hci_resolve_next_name(hdev))
2471 return;
2472
2473 discov_complete:
2474 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2475 }
2476
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2477 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2478 {
2479 struct hci_cp_remote_name_req *cp;
2480 struct hci_conn *conn;
2481
2482 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2483
2484 /* If successful wait for the name req complete event before
2485 * checking for the need to do authentication */
2486 if (!status)
2487 return;
2488
2489 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2490 if (!cp)
2491 return;
2492
2493 hci_dev_lock(hdev);
2494
2495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2496
2497 if (hci_dev_test_flag(hdev, HCI_MGMT))
2498 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2499
2500 if (!conn)
2501 goto unlock;
2502
2503 if (!hci_outgoing_auth_needed(hdev, conn))
2504 goto unlock;
2505
2506 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2507 struct hci_cp_auth_requested auth_cp;
2508
2509 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2510
2511 auth_cp.handle = __cpu_to_le16(conn->handle);
2512 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2513 sizeof(auth_cp), &auth_cp);
2514 }
2515
2516 unlock:
2517 hci_dev_unlock(hdev);
2518 }
2519
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2520 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2521 {
2522 struct hci_cp_read_remote_features *cp;
2523 struct hci_conn *conn;
2524
2525 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2526
2527 if (!status)
2528 return;
2529
2530 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2531 if (!cp)
2532 return;
2533
2534 hci_dev_lock(hdev);
2535
2536 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2537 if (conn) {
2538 if (conn->state == BT_CONFIG) {
2539 hci_connect_cfm(conn, status);
2540 hci_conn_drop(conn);
2541 }
2542 }
2543
2544 hci_dev_unlock(hdev);
2545 }
2546
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2547 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2548 {
2549 struct hci_cp_read_remote_ext_features *cp;
2550 struct hci_conn *conn;
2551
2552 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2553
2554 if (!status)
2555 return;
2556
2557 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2558 if (!cp)
2559 return;
2560
2561 hci_dev_lock(hdev);
2562
2563 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2564 if (conn) {
2565 if (conn->state == BT_CONFIG) {
2566 hci_connect_cfm(conn, status);
2567 hci_conn_drop(conn);
2568 }
2569 }
2570
2571 hci_dev_unlock(hdev);
2572 }
2573
hci_setup_sync_conn_status(struct hci_dev * hdev,__u16 handle,__u8 status)2574 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2575 __u8 status)
2576 {
2577 struct hci_conn *acl;
2578 struct hci_link *link;
2579
2580 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2581
2582 hci_dev_lock(hdev);
2583
2584 acl = hci_conn_hash_lookup_handle(hdev, handle);
2585 if (acl) {
2586 link = list_first_entry_or_null(&acl->link_list,
2587 struct hci_link, list);
2588 if (link && link->conn) {
2589 link->conn->state = BT_CLOSED;
2590
2591 hci_connect_cfm(link->conn, status);
2592 hci_conn_del(link->conn);
2593 }
2594 }
2595
2596 hci_dev_unlock(hdev);
2597 }
2598
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2599 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2600 {
2601 struct hci_cp_setup_sync_conn *cp;
2602
2603 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2604
2605 if (!status)
2606 return;
2607
2608 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2609 if (!cp)
2610 return;
2611
2612 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2613 }
2614
hci_cs_enhanced_setup_sync_conn(struct hci_dev * hdev,__u8 status)2615 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2616 {
2617 struct hci_cp_enhanced_setup_sync_conn *cp;
2618
2619 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2620
2621 if (!status)
2622 return;
2623
2624 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2625 if (!cp)
2626 return;
2627
2628 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2629 }
2630
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2631 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2632 {
2633 struct hci_cp_sniff_mode *cp;
2634 struct hci_conn *conn;
2635
2636 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2637
2638 if (!status)
2639 return;
2640
2641 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2642 if (!cp)
2643 return;
2644
2645 hci_dev_lock(hdev);
2646
2647 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2648 if (conn) {
2649 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2650
2651 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2652 hci_sco_setup(conn, status);
2653 }
2654
2655 hci_dev_unlock(hdev);
2656 }
2657
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2658 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2659 {
2660 struct hci_cp_exit_sniff_mode *cp;
2661 struct hci_conn *conn;
2662
2663 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2664
2665 if (!status)
2666 return;
2667
2668 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2669 if (!cp)
2670 return;
2671
2672 hci_dev_lock(hdev);
2673
2674 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2675 if (conn) {
2676 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2677
2678 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2679 hci_sco_setup(conn, status);
2680 }
2681
2682 hci_dev_unlock(hdev);
2683 }
2684
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2685 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2686 {
2687 struct hci_cp_disconnect *cp;
2688 struct hci_conn_params *params;
2689 struct hci_conn *conn;
2690 bool mgmt_conn;
2691
2692 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2693
2694 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2695 * otherwise cleanup the connection immediately.
2696 */
2697 if (!status && !hdev->suspended)
2698 return;
2699
2700 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2701 if (!cp)
2702 return;
2703
2704 hci_dev_lock(hdev);
2705
2706 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2707 if (!conn)
2708 goto unlock;
2709
2710 if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) {
2711 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2712 conn->dst_type, status);
2713
2714 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2715 hdev->cur_adv_instance = conn->adv_instance;
2716 hci_enable_advertising(hdev);
2717 }
2718
2719 /* Inform sockets conn is gone before we delete it */
2720 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2721
2722 goto done;
2723 }
2724
2725 /* During suspend, mark connection as closed immediately
2726 * since we might not receive HCI_EV_DISCONN_COMPLETE
2727 */
2728 if (hdev->suspended)
2729 conn->state = BT_CLOSED;
2730
2731 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2732
2733 if (conn->type == ACL_LINK) {
2734 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2735 hci_remove_link_key(hdev, &conn->dst);
2736 }
2737
2738 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2739 if (params) {
2740 switch (params->auto_connect) {
2741 case HCI_AUTO_CONN_LINK_LOSS:
2742 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2743 break;
2744 fallthrough;
2745
2746 case HCI_AUTO_CONN_DIRECT:
2747 case HCI_AUTO_CONN_ALWAYS:
2748 hci_pend_le_list_del_init(params);
2749 hci_pend_le_list_add(params, &hdev->pend_le_conns);
2750 break;
2751
2752 default:
2753 break;
2754 }
2755 }
2756
2757 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2758 cp->reason, mgmt_conn);
2759
2760 hci_disconn_cfm(conn, cp->reason);
2761
2762 done:
2763 /* If the disconnection failed for any reason, the upper layer
2764 * does not retry to disconnect in current implementation.
2765 * Hence, we need to do some basic cleanup here and re-enable
2766 * advertising if necessary.
2767 */
2768 hci_conn_del(conn);
2769 unlock:
2770 hci_dev_unlock(hdev);
2771 }
2772
ev_bdaddr_type(struct hci_dev * hdev,u8 type,bool * resolved)2773 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2774 {
2775 /* When using controller based address resolution, then the new
2776 * address types 0x02 and 0x03 are used. These types need to be
2777 * converted back into either public address or random address type
2778 */
2779 switch (type) {
2780 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2781 if (resolved)
2782 *resolved = true;
2783 return ADDR_LE_DEV_PUBLIC;
2784 case ADDR_LE_DEV_RANDOM_RESOLVED:
2785 if (resolved)
2786 *resolved = true;
2787 return ADDR_LE_DEV_RANDOM;
2788 }
2789
2790 if (resolved)
2791 *resolved = false;
2792 return type;
2793 }
2794
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2795 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2796 u8 peer_addr_type, u8 own_address_type,
2797 u8 filter_policy)
2798 {
2799 struct hci_conn *conn;
2800
2801 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2802 peer_addr_type);
2803 if (!conn)
2804 return;
2805
2806 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2807
2808 /* Store the initiator and responder address information which
2809 * is needed for SMP. These values will not change during the
2810 * lifetime of the connection.
2811 */
2812 conn->init_addr_type = own_address_type;
2813 if (own_address_type == ADDR_LE_DEV_RANDOM)
2814 bacpy(&conn->init_addr, &hdev->random_addr);
2815 else
2816 bacpy(&conn->init_addr, &hdev->bdaddr);
2817
2818 conn->resp_addr_type = peer_addr_type;
2819 bacpy(&conn->resp_addr, peer_addr);
2820 }
2821
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2822 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2823 {
2824 struct hci_cp_le_create_conn *cp;
2825
2826 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2827
2828 /* All connection failure handling is taken care of by the
2829 * hci_conn_failed function which is triggered by the HCI
2830 * request completion callbacks used for connecting.
2831 */
2832 if (status)
2833 return;
2834
2835 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2836 if (!cp)
2837 return;
2838
2839 hci_dev_lock(hdev);
2840
2841 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2842 cp->own_address_type, cp->filter_policy);
2843
2844 hci_dev_unlock(hdev);
2845 }
2846
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2847 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2848 {
2849 struct hci_cp_le_ext_create_conn *cp;
2850
2851 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2852
2853 /* All connection failure handling is taken care of by the
2854 * hci_conn_failed function which is triggered by the HCI
2855 * request completion callbacks used for connecting.
2856 */
2857 if (status)
2858 return;
2859
2860 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2861 if (!cp)
2862 return;
2863
2864 hci_dev_lock(hdev);
2865
2866 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2867 cp->own_addr_type, cp->filter_policy);
2868
2869 hci_dev_unlock(hdev);
2870 }
2871
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2872 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2873 {
2874 struct hci_cp_le_read_remote_features *cp;
2875 struct hci_conn *conn;
2876
2877 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2878
2879 if (!status)
2880 return;
2881
2882 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2883 if (!cp)
2884 return;
2885
2886 hci_dev_lock(hdev);
2887
2888 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2889 if (conn) {
2890 if (conn->state == BT_CONFIG) {
2891 hci_connect_cfm(conn, status);
2892 hci_conn_drop(conn);
2893 }
2894 }
2895
2896 hci_dev_unlock(hdev);
2897 }
2898
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2899 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2900 {
2901 struct hci_cp_le_start_enc *cp;
2902 struct hci_conn *conn;
2903
2904 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2905
2906 if (!status)
2907 return;
2908
2909 hci_dev_lock(hdev);
2910
2911 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2912 if (!cp)
2913 goto unlock;
2914
2915 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2916 if (!conn)
2917 goto unlock;
2918
2919 if (conn->state != BT_CONNECTED)
2920 goto unlock;
2921
2922 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2923 hci_conn_drop(conn);
2924
2925 unlock:
2926 hci_dev_unlock(hdev);
2927 }
2928
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2929 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2930 {
2931 struct hci_cp_switch_role *cp;
2932 struct hci_conn *conn;
2933
2934 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2935
2936 if (!status)
2937 return;
2938
2939 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2940 if (!cp)
2941 return;
2942
2943 hci_dev_lock(hdev);
2944
2945 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2946 if (conn)
2947 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2948
2949 hci_dev_unlock(hdev);
2950 }
2951
hci_inquiry_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)2952 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2953 struct sk_buff *skb)
2954 {
2955 struct hci_ev_status *ev = data;
2956 struct discovery_state *discov = &hdev->discovery;
2957 struct inquiry_entry *e;
2958
2959 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2960
2961 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2962 return;
2963
2964 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2965 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2966
2967 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2968 return;
2969
2970 hci_dev_lock(hdev);
2971
2972 if (discov->state != DISCOVERY_FINDING)
2973 goto unlock;
2974
2975 if (list_empty(&discov->resolve)) {
2976 /* When BR/EDR inquiry is active and no LE scanning is in
2977 * progress, then change discovery state to indicate completion.
2978 *
2979 * When running LE scanning and BR/EDR inquiry simultaneously
2980 * and the LE scan already finished, then change the discovery
2981 * state to indicate completion.
2982 */
2983 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2984 !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
2985 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2986 goto unlock;
2987 }
2988
2989 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2990 if (e && hci_resolve_name(hdev, e) == 0) {
2991 e->name_state = NAME_PENDING;
2992 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2993 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
2994 } else {
2995 /* When BR/EDR inquiry is active and no LE scanning is in
2996 * progress, then change discovery state to indicate completion.
2997 *
2998 * When running LE scanning and BR/EDR inquiry simultaneously
2999 * and the LE scan already finished, then change the discovery
3000 * state to indicate completion.
3001 */
3002 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3003 !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
3004 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3005 }
3006
3007 unlock:
3008 hci_dev_unlock(hdev);
3009 }
3010
hci_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)3011 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3012 struct sk_buff *skb)
3013 {
3014 struct hci_ev_inquiry_result *ev = edata;
3015 struct inquiry_data data;
3016 int i;
3017
3018 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3019 flex_array_size(ev, info, ev->num)))
3020 return;
3021
3022 bt_dev_dbg(hdev, "num %d", ev->num);
3023
3024 if (!ev->num)
3025 return;
3026
3027 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3028 return;
3029
3030 hci_dev_lock(hdev);
3031
3032 for (i = 0; i < ev->num; i++) {
3033 struct inquiry_info *info = &ev->info[i];
3034 u32 flags;
3035
3036 bacpy(&data.bdaddr, &info->bdaddr);
3037 data.pscan_rep_mode = info->pscan_rep_mode;
3038 data.pscan_period_mode = info->pscan_period_mode;
3039 data.pscan_mode = info->pscan_mode;
3040 memcpy(data.dev_class, info->dev_class, 3);
3041 data.clock_offset = info->clock_offset;
3042 data.rssi = HCI_RSSI_INVALID;
3043 data.ssp_mode = 0x00;
3044
3045 flags = hci_inquiry_cache_update(hdev, &data, false);
3046
3047 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3048 info->dev_class, HCI_RSSI_INVALID,
3049 flags, NULL, 0, NULL, 0, 0);
3050 }
3051
3052 hci_dev_unlock(hdev);
3053 }
3054
hci_read_enc_key_size(struct hci_dev * hdev,struct hci_conn * conn)3055 static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn)
3056 {
3057 struct hci_cp_read_enc_key_size cp;
3058 u8 *key_enc_size = hci_conn_key_enc_size(conn);
3059
3060 if (!read_key_size_capable(hdev)) {
3061 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3062 return -EOPNOTSUPP;
3063 }
3064
3065 bt_dev_dbg(hdev, "hcon %p", conn);
3066
3067 memset(&cp, 0, sizeof(cp));
3068 cp.handle = cpu_to_le16(conn->handle);
3069
3070 /* If the key enc_size is already known, use it as conn->enc_key_size,
3071 * otherwise use hdev->min_enc_key_size so the likes of
3072 * l2cap_check_enc_key_size don't fail while waiting for
3073 * HCI_OP_READ_ENC_KEY_SIZE response.
3074 */
3075 if (key_enc_size && *key_enc_size)
3076 conn->enc_key_size = *key_enc_size;
3077 else
3078 conn->enc_key_size = hdev->min_enc_key_size;
3079
3080 return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3081 }
3082
hci_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3083 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3084 struct sk_buff *skb)
3085 {
3086 struct hci_ev_conn_complete *ev = data;
3087 struct hci_conn *conn;
3088 u8 status = ev->status;
3089
3090 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3091
3092 hci_dev_lock(hdev);
3093
3094 /* Check for existing connection:
3095 *
3096 * 1. If it doesn't exist then it must be receiver/slave role.
3097 * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
3098 * of initiator/master role since there could be a collision where
3099 * either side is attempting to connect or something like a fuzzing
3100 * testing is trying to play tricks to destroy the hcon object before
3101 * it even attempts to connect (e.g. hcon->state == BT_OPEN).
3102 */
3103 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3104 if (!conn ||
3105 (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
3106 /* In case of error status and there is no connection pending
3107 * just unlock as there is nothing to cleanup.
3108 */
3109 if (ev->status)
3110 goto unlock;
3111
3112 /* Connection may not exist if auto-connected. Check the bredr
3113 * allowlist to see if this device is allowed to auto connect.
3114 * If link is an ACL type, create a connection class
3115 * automatically.
3116 *
3117 * Auto-connect will only occur if the event filter is
3118 * programmed with a given address. Right now, event filter is
3119 * only used during suspend.
3120 */
3121 if (ev->link_type == ACL_LINK &&
3122 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3123 &ev->bdaddr,
3124 BDADDR_BREDR)) {
3125 conn = hci_conn_add_unset(hdev, ev->link_type,
3126 &ev->bdaddr, HCI_ROLE_SLAVE);
3127 if (IS_ERR(conn)) {
3128 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3129 goto unlock;
3130 }
3131 } else {
3132 if (ev->link_type != SCO_LINK)
3133 goto unlock;
3134
3135 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3136 &ev->bdaddr);
3137 if (!conn)
3138 goto unlock;
3139
3140 conn->type = SCO_LINK;
3141 }
3142 }
3143
3144 /* The HCI_Connection_Complete event is only sent once per connection.
3145 * Processing it more than once per connection can corrupt kernel memory.
3146 *
3147 * As the connection handle is set here for the first time, it indicates
3148 * whether the connection is already set up.
3149 */
3150 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3151 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3152 goto unlock;
3153 }
3154
3155 if (!status) {
3156 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3157 if (status)
3158 goto done;
3159
3160 if (conn->type == ACL_LINK) {
3161 conn->state = BT_CONFIG;
3162 hci_conn_hold(conn);
3163
3164 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3165 !hci_find_link_key(hdev, &ev->bdaddr))
3166 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3167 else
3168 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3169 } else
3170 conn->state = BT_CONNECTED;
3171
3172 hci_debugfs_create_conn(conn);
3173 hci_conn_add_sysfs(conn);
3174
3175 if (test_bit(HCI_AUTH, &hdev->flags))
3176 set_bit(HCI_CONN_AUTH, &conn->flags);
3177
3178 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3179 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3180
3181 /* "Link key request" completed ahead of "connect request" completes */
3182 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3183 ev->link_type == ACL_LINK) {
3184 struct link_key *key;
3185
3186 key = hci_find_link_key(hdev, &ev->bdaddr);
3187 if (key) {
3188 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3189 hci_read_enc_key_size(hdev, conn);
3190 hci_encrypt_cfm(conn, ev->status);
3191 }
3192 }
3193
3194 /* Get remote features */
3195 if (conn->type == ACL_LINK) {
3196 struct hci_cp_read_remote_features cp;
3197 cp.handle = ev->handle;
3198 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3199 sizeof(cp), &cp);
3200
3201 hci_update_scan(hdev);
3202 }
3203
3204 /* Set packet type for incoming connection */
3205 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3206 struct hci_cp_change_conn_ptype cp;
3207 cp.handle = ev->handle;
3208 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3209 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3210 &cp);
3211 }
3212 }
3213
3214 if (conn->type == ACL_LINK)
3215 hci_sco_setup(conn, ev->status);
3216
3217 done:
3218 if (status) {
3219 hci_conn_failed(conn, status);
3220 } else if (ev->link_type == SCO_LINK) {
3221 switch (conn->setting & SCO_AIRMODE_MASK) {
3222 case SCO_AIRMODE_CVSD:
3223 if (hdev->notify)
3224 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3225 break;
3226 }
3227
3228 hci_connect_cfm(conn, status);
3229 }
3230
3231 unlock:
3232 hci_dev_unlock(hdev);
3233 }
3234
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)3235 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3236 {
3237 struct hci_cp_reject_conn_req cp;
3238
3239 bacpy(&cp.bdaddr, bdaddr);
3240 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3241 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3242 }
3243
hci_conn_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3244 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3245 struct sk_buff *skb)
3246 {
3247 struct hci_ev_conn_request *ev = data;
3248 int mask = hdev->link_mode;
3249 struct inquiry_entry *ie;
3250 struct hci_conn *conn;
3251 __u8 flags = 0;
3252
3253 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3254
3255 /* Reject incoming connection from device with same BD ADDR against
3256 * CVE-2020-26555
3257 */
3258 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3259 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3260 &ev->bdaddr);
3261 hci_reject_conn(hdev, &ev->bdaddr);
3262 return;
3263 }
3264
3265 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3266 &flags);
3267
3268 if (!(mask & HCI_LM_ACCEPT)) {
3269 hci_reject_conn(hdev, &ev->bdaddr);
3270 return;
3271 }
3272
3273 hci_dev_lock(hdev);
3274
3275 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3276 BDADDR_BREDR)) {
3277 hci_reject_conn(hdev, &ev->bdaddr);
3278 goto unlock;
3279 }
3280
3281 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3282 * connection. These features are only touched through mgmt so
3283 * only do the checks if HCI_MGMT is set.
3284 */
3285 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3286 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3287 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3288 BDADDR_BREDR)) {
3289 hci_reject_conn(hdev, &ev->bdaddr);
3290 goto unlock;
3291 }
3292
3293 /* Connection accepted */
3294
3295 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3296 if (ie)
3297 memcpy(ie->data.dev_class, ev->dev_class, 3);
3298
3299 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3300 &ev->bdaddr);
3301 if (!conn) {
3302 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3303 HCI_ROLE_SLAVE);
3304 if (IS_ERR(conn)) {
3305 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3306 goto unlock;
3307 }
3308 }
3309
3310 memcpy(conn->dev_class, ev->dev_class, 3);
3311
3312 hci_dev_unlock(hdev);
3313
3314 if (ev->link_type == ACL_LINK ||
3315 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3316 struct hci_cp_accept_conn_req cp;
3317 conn->state = BT_CONNECT;
3318
3319 bacpy(&cp.bdaddr, &ev->bdaddr);
3320
3321 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3322 cp.role = 0x00; /* Become central */
3323 else
3324 cp.role = 0x01; /* Remain peripheral */
3325
3326 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3327 } else if (!(flags & HCI_PROTO_DEFER)) {
3328 struct hci_cp_accept_sync_conn_req cp;
3329 conn->state = BT_CONNECT;
3330
3331 bacpy(&cp.bdaddr, &ev->bdaddr);
3332 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3333
3334 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3335 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3336 cp.max_latency = cpu_to_le16(0xffff);
3337 cp.content_format = cpu_to_le16(hdev->voice_setting);
3338 cp.retrans_effort = 0xff;
3339
3340 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3341 &cp);
3342 } else {
3343 conn->state = BT_CONNECT2;
3344 hci_connect_cfm(conn, 0);
3345 }
3346
3347 return;
3348 unlock:
3349 hci_dev_unlock(hdev);
3350 }
3351
hci_to_mgmt_reason(u8 err)3352 static u8 hci_to_mgmt_reason(u8 err)
3353 {
3354 switch (err) {
3355 case HCI_ERROR_CONNECTION_TIMEOUT:
3356 return MGMT_DEV_DISCONN_TIMEOUT;
3357 case HCI_ERROR_REMOTE_USER_TERM:
3358 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3359 case HCI_ERROR_REMOTE_POWER_OFF:
3360 return MGMT_DEV_DISCONN_REMOTE;
3361 case HCI_ERROR_LOCAL_HOST_TERM:
3362 return MGMT_DEV_DISCONN_LOCAL_HOST;
3363 default:
3364 return MGMT_DEV_DISCONN_UNKNOWN;
3365 }
3366 }
3367
hci_disconn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3368 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3369 struct sk_buff *skb)
3370 {
3371 struct hci_ev_disconn_complete *ev = data;
3372 u8 reason;
3373 struct hci_conn_params *params;
3374 struct hci_conn *conn;
3375 bool mgmt_connected;
3376
3377 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3378
3379 hci_dev_lock(hdev);
3380
3381 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3382 if (!conn)
3383 goto unlock;
3384
3385 if (ev->status) {
3386 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3387 conn->dst_type, ev->status);
3388 goto unlock;
3389 }
3390
3391 conn->state = BT_CLOSED;
3392
3393 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3394
3395 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3396 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3397 else
3398 reason = hci_to_mgmt_reason(ev->reason);
3399
3400 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3401 reason, mgmt_connected);
3402
3403 if (conn->type == ACL_LINK) {
3404 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3405 hci_remove_link_key(hdev, &conn->dst);
3406
3407 hci_update_scan(hdev);
3408 }
3409
3410 /* Re-enable passive scanning if disconnected device is marked
3411 * as auto-connectable.
3412 */
3413 if (conn->type == LE_LINK) {
3414 params = hci_conn_params_lookup(hdev, &conn->dst,
3415 conn->dst_type);
3416 if (params) {
3417 switch (params->auto_connect) {
3418 case HCI_AUTO_CONN_LINK_LOSS:
3419 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3420 break;
3421 fallthrough;
3422
3423 case HCI_AUTO_CONN_DIRECT:
3424 case HCI_AUTO_CONN_ALWAYS:
3425 hci_pend_le_list_del_init(params);
3426 hci_pend_le_list_add(params,
3427 &hdev->pend_le_conns);
3428 hci_update_passive_scan(hdev);
3429 break;
3430
3431 default:
3432 break;
3433 }
3434 }
3435 }
3436
3437 hci_disconn_cfm(conn, ev->reason);
3438
3439 /* Re-enable advertising if necessary, since it might
3440 * have been disabled by the connection. From the
3441 * HCI_LE_Set_Advertise_Enable command description in
3442 * the core specification (v4.0):
3443 * "The Controller shall continue advertising until the Host
3444 * issues an LE_Set_Advertise_Enable command with
3445 * Advertising_Enable set to 0x00 (Advertising is disabled)
3446 * or until a connection is created or until the Advertising
3447 * is timed out due to Directed Advertising."
3448 */
3449 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3450 hdev->cur_adv_instance = conn->adv_instance;
3451 hci_enable_advertising(hdev);
3452 }
3453
3454 hci_conn_del(conn);
3455
3456 unlock:
3457 hci_dev_unlock(hdev);
3458 }
3459
hci_auth_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3460 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3461 struct sk_buff *skb)
3462 {
3463 struct hci_ev_auth_complete *ev = data;
3464 struct hci_conn *conn;
3465
3466 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3467
3468 hci_dev_lock(hdev);
3469
3470 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3471 if (!conn)
3472 goto unlock;
3473
3474 if (!ev->status) {
3475 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3476 set_bit(HCI_CONN_AUTH, &conn->flags);
3477 conn->sec_level = conn->pending_sec_level;
3478 } else {
3479 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3480 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3481
3482 mgmt_auth_failed(conn, ev->status);
3483 }
3484
3485 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3486
3487 if (conn->state == BT_CONFIG) {
3488 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3489 struct hci_cp_set_conn_encrypt cp;
3490 cp.handle = ev->handle;
3491 cp.encrypt = 0x01;
3492 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3493 &cp);
3494 } else {
3495 conn->state = BT_CONNECTED;
3496 hci_connect_cfm(conn, ev->status);
3497 hci_conn_drop(conn);
3498 }
3499 } else {
3500 hci_auth_cfm(conn, ev->status);
3501
3502 hci_conn_hold(conn);
3503 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3504 hci_conn_drop(conn);
3505 }
3506
3507 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3508 if (!ev->status) {
3509 struct hci_cp_set_conn_encrypt cp;
3510 cp.handle = ev->handle;
3511 cp.encrypt = 0x01;
3512 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3513 &cp);
3514 } else {
3515 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3516 hci_encrypt_cfm(conn, ev->status);
3517 }
3518 }
3519
3520 unlock:
3521 hci_dev_unlock(hdev);
3522 }
3523
hci_remote_name_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3524 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3525 struct sk_buff *skb)
3526 {
3527 struct hci_ev_remote_name *ev = data;
3528 struct hci_conn *conn;
3529
3530 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3531
3532 hci_dev_lock(hdev);
3533
3534 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3535
3536 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3537 goto check_auth;
3538
3539 if (ev->status == 0)
3540 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3541 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3542 else
3543 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3544
3545 check_auth:
3546 if (!conn)
3547 goto unlock;
3548
3549 if (!hci_outgoing_auth_needed(hdev, conn))
3550 goto unlock;
3551
3552 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3553 struct hci_cp_auth_requested cp;
3554
3555 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3556
3557 cp.handle = __cpu_to_le16(conn->handle);
3558 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3559 }
3560
3561 unlock:
3562 hci_dev_unlock(hdev);
3563 }
3564
hci_encrypt_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3565 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3566 struct sk_buff *skb)
3567 {
3568 struct hci_ev_encrypt_change *ev = data;
3569 struct hci_conn *conn;
3570
3571 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3572
3573 hci_dev_lock(hdev);
3574
3575 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3576 if (!conn)
3577 goto unlock;
3578
3579 if (!ev->status) {
3580 if (ev->encrypt) {
3581 /* Encryption implies authentication */
3582 set_bit(HCI_CONN_AUTH, &conn->flags);
3583 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3584 conn->sec_level = conn->pending_sec_level;
3585
3586 /* P-256 authentication key implies FIPS */
3587 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3588 set_bit(HCI_CONN_FIPS, &conn->flags);
3589
3590 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3591 conn->type == LE_LINK)
3592 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3593 } else {
3594 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3595 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3596 }
3597 }
3598
3599 /* We should disregard the current RPA and generate a new one
3600 * whenever the encryption procedure fails.
3601 */
3602 if (ev->status && conn->type == LE_LINK) {
3603 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3604 hci_adv_instances_set_rpa_expired(hdev, true);
3605 }
3606
3607 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3608
3609 /* Check link security requirements are met */
3610 if (!hci_conn_check_link_mode(conn))
3611 ev->status = HCI_ERROR_AUTH_FAILURE;
3612
3613 if (ev->status && conn->state == BT_CONNECTED) {
3614 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3615 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3616
3617 /* Notify upper layers so they can cleanup before
3618 * disconnecting.
3619 */
3620 hci_encrypt_cfm(conn, ev->status);
3621 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3622 hci_conn_drop(conn);
3623 goto unlock;
3624 }
3625
3626 /* Try reading the encryption key size for encrypted ACL links */
3627 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3628 if (hci_read_enc_key_size(hdev, conn))
3629 goto notify;
3630
3631 goto unlock;
3632 }
3633
3634 /* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
3635 * to avoid unexpected SMP command errors when pairing.
3636 */
3637 if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT))
3638 goto notify;
3639
3640 /* Set the default Authenticated Payload Timeout after
3641 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3642 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3643 * sent when the link is active and Encryption is enabled, the conn
3644 * type can be either LE or ACL and controller must support LMP Ping.
3645 * Ensure for AES-CCM encryption as well.
3646 */
3647 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3648 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3649 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3650 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3651 struct hci_cp_write_auth_payload_to cp;
3652
3653 cp.handle = cpu_to_le16(conn->handle);
3654 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3655 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3656 sizeof(cp), &cp))
3657 bt_dev_err(hdev, "write auth payload timeout failed");
3658 }
3659
3660 notify:
3661 hci_encrypt_cfm(conn, ev->status);
3662
3663 unlock:
3664 hci_dev_unlock(hdev);
3665 }
3666
hci_change_link_key_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3667 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3668 struct sk_buff *skb)
3669 {
3670 struct hci_ev_change_link_key_complete *ev = data;
3671 struct hci_conn *conn;
3672
3673 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3674
3675 hci_dev_lock(hdev);
3676
3677 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3678 if (conn) {
3679 if (!ev->status)
3680 set_bit(HCI_CONN_SECURE, &conn->flags);
3681
3682 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3683
3684 hci_key_change_cfm(conn, ev->status);
3685 }
3686
3687 hci_dev_unlock(hdev);
3688 }
3689
hci_remote_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)3690 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3691 struct sk_buff *skb)
3692 {
3693 struct hci_ev_remote_features *ev = data;
3694 struct hci_conn *conn;
3695
3696 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3697
3698 hci_dev_lock(hdev);
3699
3700 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3701 if (!conn)
3702 goto unlock;
3703
3704 if (!ev->status)
3705 memcpy(conn->features[0], ev->features, 8);
3706
3707 if (conn->state != BT_CONFIG)
3708 goto unlock;
3709
3710 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3711 lmp_ext_feat_capable(conn)) {
3712 struct hci_cp_read_remote_ext_features cp;
3713 cp.handle = ev->handle;
3714 cp.page = 0x01;
3715 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3716 sizeof(cp), &cp);
3717 goto unlock;
3718 }
3719
3720 if (!ev->status) {
3721 struct hci_cp_remote_name_req cp;
3722 memset(&cp, 0, sizeof(cp));
3723 bacpy(&cp.bdaddr, &conn->dst);
3724 cp.pscan_rep_mode = 0x02;
3725 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3726 } else {
3727 mgmt_device_connected(hdev, conn, NULL, 0);
3728 }
3729
3730 if (!hci_outgoing_auth_needed(hdev, conn)) {
3731 conn->state = BT_CONNECTED;
3732 hci_connect_cfm(conn, ev->status);
3733 hci_conn_drop(conn);
3734 }
3735
3736 unlock:
3737 hci_dev_unlock(hdev);
3738 }
3739
handle_cmd_cnt_and_timer(struct hci_dev * hdev,u8 ncmd)3740 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3741 {
3742 cancel_delayed_work(&hdev->cmd_timer);
3743
3744 rcu_read_lock();
3745 if (!test_bit(HCI_RESET, &hdev->flags)) {
3746 if (ncmd) {
3747 cancel_delayed_work(&hdev->ncmd_timer);
3748 atomic_set(&hdev->cmd_cnt, 1);
3749 } else {
3750 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3751 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3752 HCI_NCMD_TIMEOUT);
3753 }
3754 }
3755 rcu_read_unlock();
3756 }
3757
hci_cc_le_read_buffer_size_v2(struct hci_dev * hdev,void * data,struct sk_buff * skb)3758 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3759 struct sk_buff *skb)
3760 {
3761 struct hci_rp_le_read_buffer_size_v2 *rp = data;
3762
3763 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3764
3765 if (rp->status)
3766 return rp->status;
3767
3768 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3769 hdev->le_pkts = rp->acl_max_pkt;
3770 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3771 hdev->iso_pkts = rp->iso_max_pkt;
3772
3773 hdev->le_cnt = hdev->le_pkts;
3774 hdev->iso_cnt = hdev->iso_pkts;
3775
3776 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3777 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3778
3779 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3780 return HCI_ERROR_INVALID_PARAMETERS;
3781
3782 return rp->status;
3783 }
3784
hci_unbound_cis_failed(struct hci_dev * hdev,u8 cig,u8 status)3785 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3786 {
3787 struct hci_conn *conn, *tmp;
3788
3789 lockdep_assert_held(&hdev->lock);
3790
3791 list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3792 if (conn->type != CIS_LINK ||
3793 conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3794 continue;
3795
3796 if (HCI_CONN_HANDLE_UNSET(conn->handle))
3797 hci_conn_failed(conn, status);
3798 }
3799 }
3800
hci_cc_le_set_cig_params(struct hci_dev * hdev,void * data,struct sk_buff * skb)3801 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3802 struct sk_buff *skb)
3803 {
3804 struct hci_rp_le_set_cig_params *rp = data;
3805 struct hci_cp_le_set_cig_params *cp;
3806 struct hci_conn *conn;
3807 u8 status = rp->status;
3808 bool pending = false;
3809 int i;
3810
3811 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3812
3813 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3814 if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3815 rp->cig_id != cp->cig_id)) {
3816 bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3817 status = HCI_ERROR_UNSPECIFIED;
3818 }
3819
3820 hci_dev_lock(hdev);
3821
3822 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3823 *
3824 * If the Status return parameter is non-zero, then the state of the CIG
3825 * and its CIS configurations shall not be changed by the command. If
3826 * the CIG did not already exist, it shall not be created.
3827 */
3828 if (status) {
3829 /* Keep current configuration, fail only the unbound CIS */
3830 hci_unbound_cis_failed(hdev, rp->cig_id, status);
3831 goto unlock;
3832 }
3833
3834 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3835 *
3836 * If the Status return parameter is zero, then the Controller shall
3837 * set the Connection_Handle arrayed return parameter to the connection
3838 * handle(s) corresponding to the CIS configurations specified in
3839 * the CIS_IDs command parameter, in the same order.
3840 */
3841 for (i = 0; i < rp->num_handles; ++i) {
3842 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3843 cp->cis[i].cis_id);
3844 if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3845 continue;
3846
3847 if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3848 continue;
3849
3850 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3851 continue;
3852
3853 if (conn->state == BT_CONNECT)
3854 pending = true;
3855 }
3856
3857 unlock:
3858 if (pending)
3859 hci_le_create_cis_pending(hdev);
3860
3861 hci_dev_unlock(hdev);
3862
3863 return rp->status;
3864 }
3865
hci_cc_le_setup_iso_path(struct hci_dev * hdev,void * data,struct sk_buff * skb)3866 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3867 struct sk_buff *skb)
3868 {
3869 struct hci_rp_le_setup_iso_path *rp = data;
3870 struct hci_cp_le_setup_iso_path *cp;
3871 struct hci_conn *conn;
3872
3873 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3874
3875 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3876 if (!cp)
3877 return rp->status;
3878
3879 hci_dev_lock(hdev);
3880
3881 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3882 if (!conn)
3883 goto unlock;
3884
3885 if (rp->status) {
3886 hci_connect_cfm(conn, rp->status);
3887 hci_conn_del(conn);
3888 goto unlock;
3889 }
3890
3891 switch (cp->direction) {
3892 /* Input (Host to Controller) */
3893 case 0x00:
3894 /* Only confirm connection if output only */
3895 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3896 hci_connect_cfm(conn, rp->status);
3897 break;
3898 /* Output (Controller to Host) */
3899 case 0x01:
3900 /* Confirm connection since conn->iso_qos is always configured
3901 * last.
3902 */
3903 hci_connect_cfm(conn, rp->status);
3904
3905 /* Notify device connected in case it is a BIG Sync */
3906 if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3907 mgmt_device_connected(hdev, conn, NULL, 0);
3908
3909 break;
3910 }
3911
3912 unlock:
3913 hci_dev_unlock(hdev);
3914 return rp->status;
3915 }
3916
hci_cs_le_create_big(struct hci_dev * hdev,u8 status)3917 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3918 {
3919 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3920 }
3921
hci_cc_set_per_adv_param(struct hci_dev * hdev,void * data,struct sk_buff * skb)3922 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3923 struct sk_buff *skb)
3924 {
3925 struct hci_ev_status *rp = data;
3926 struct hci_cp_le_set_per_adv_params *cp;
3927
3928 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3929
3930 if (rp->status)
3931 return rp->status;
3932
3933 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3934 if (!cp)
3935 return rp->status;
3936
3937 /* TODO: set the conn state */
3938 return rp->status;
3939 }
3940
hci_cc_le_set_per_adv_enable(struct hci_dev * hdev,void * data,struct sk_buff * skb)3941 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3942 struct sk_buff *skb)
3943 {
3944 struct hci_ev_status *rp = data;
3945 struct hci_cp_le_set_per_adv_enable *cp;
3946 struct adv_info *adv = NULL, *n;
3947 u8 per_adv_cnt = 0;
3948
3949 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3950
3951 if (rp->status)
3952 return rp->status;
3953
3954 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3955 if (!cp)
3956 return rp->status;
3957
3958 hci_dev_lock(hdev);
3959
3960 adv = hci_find_adv_instance(hdev, cp->handle);
3961
3962 if (cp->enable) {
3963 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3964
3965 if (adv)
3966 adv->periodic_enabled = true;
3967 } else {
3968 if (adv)
3969 adv->periodic_enabled = false;
3970
3971 /* If just one instance was disabled check if there are
3972 * any other instance enabled before clearing HCI_LE_PER_ADV.
3973 * The current periodic adv instance will be marked as
3974 * disabled once extended advertising is also disabled.
3975 */
3976 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3977 list) {
3978 if (adv->periodic && adv->enabled)
3979 per_adv_cnt++;
3980 }
3981
3982 if (per_adv_cnt > 1)
3983 goto unlock;
3984
3985 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3986 }
3987
3988 unlock:
3989 hci_dev_unlock(hdev);
3990
3991 return rp->status;
3992 }
3993
3994 #define HCI_CC_VL(_op, _func, _min, _max) \
3995 { \
3996 .op = _op, \
3997 .func = _func, \
3998 .min_len = _min, \
3999 .max_len = _max, \
4000 }
4001
4002 #define HCI_CC(_op, _func, _len) \
4003 HCI_CC_VL(_op, _func, _len, _len)
4004
4005 #define HCI_CC_STATUS(_op, _func) \
4006 HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4007
4008 static const struct hci_cc {
4009 u16 op;
4010 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4011 u16 min_len;
4012 u16 max_len;
4013 } hci_cc_table[] = {
4014 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4015 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4016 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4017 HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel,
4018 sizeof(struct hci_rp_remote_name_req_cancel)),
4019 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4020 sizeof(struct hci_rp_role_discovery)),
4021 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4022 sizeof(struct hci_rp_read_link_policy)),
4023 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4024 sizeof(struct hci_rp_write_link_policy)),
4025 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4026 sizeof(struct hci_rp_read_def_link_policy)),
4027 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4028 hci_cc_write_def_link_policy),
4029 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4030 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4031 sizeof(struct hci_rp_read_stored_link_key)),
4032 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4033 sizeof(struct hci_rp_delete_stored_link_key)),
4034 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4035 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4036 sizeof(struct hci_rp_read_local_name)),
4037 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4038 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4039 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4040 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4041 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4042 sizeof(struct hci_rp_read_class_of_dev)),
4043 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4044 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4045 sizeof(struct hci_rp_read_voice_setting)),
4046 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4047 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4048 sizeof(struct hci_rp_read_num_supported_iac)),
4049 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4050 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4051 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4052 sizeof(struct hci_rp_read_auth_payload_to)),
4053 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4054 sizeof(struct hci_rp_write_auth_payload_to)),
4055 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4056 sizeof(struct hci_rp_read_local_version)),
4057 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4058 sizeof(struct hci_rp_read_local_commands)),
4059 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4060 sizeof(struct hci_rp_read_local_features)),
4061 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4062 sizeof(struct hci_rp_read_local_ext_features)),
4063 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4064 sizeof(struct hci_rp_read_buffer_size)),
4065 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4066 sizeof(struct hci_rp_read_bd_addr)),
4067 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4068 sizeof(struct hci_rp_read_local_pairing_opts)),
4069 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4070 sizeof(struct hci_rp_read_page_scan_activity)),
4071 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4072 hci_cc_write_page_scan_activity),
4073 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4074 sizeof(struct hci_rp_read_page_scan_type)),
4075 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4076 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4077 sizeof(struct hci_rp_read_clock)),
4078 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4079 sizeof(struct hci_rp_read_enc_key_size)),
4080 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4081 sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4082 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4083 hci_cc_read_def_err_data_reporting,
4084 sizeof(struct hci_rp_read_def_err_data_reporting)),
4085 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4086 hci_cc_write_def_err_data_reporting),
4087 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4088 sizeof(struct hci_rp_pin_code_reply)),
4089 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4090 sizeof(struct hci_rp_pin_code_neg_reply)),
4091 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4092 sizeof(struct hci_rp_read_local_oob_data)),
4093 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4094 sizeof(struct hci_rp_read_local_oob_ext_data)),
4095 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4096 sizeof(struct hci_rp_le_read_buffer_size)),
4097 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4098 sizeof(struct hci_rp_le_read_local_features)),
4099 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4100 sizeof(struct hci_rp_le_read_adv_tx_power)),
4101 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4102 sizeof(struct hci_rp_user_confirm_reply)),
4103 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4104 sizeof(struct hci_rp_user_confirm_reply)),
4105 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4106 sizeof(struct hci_rp_user_confirm_reply)),
4107 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4108 sizeof(struct hci_rp_user_confirm_reply)),
4109 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4110 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4111 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4112 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4113 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4114 hci_cc_le_read_accept_list_size,
4115 sizeof(struct hci_rp_le_read_accept_list_size)),
4116 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4117 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4118 hci_cc_le_add_to_accept_list),
4119 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4120 hci_cc_le_del_from_accept_list),
4121 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4122 sizeof(struct hci_rp_le_read_supported_states)),
4123 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4124 sizeof(struct hci_rp_le_read_def_data_len)),
4125 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4126 hci_cc_le_write_def_data_len),
4127 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4128 hci_cc_le_add_to_resolv_list),
4129 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4130 hci_cc_le_del_from_resolv_list),
4131 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4132 hci_cc_le_clear_resolv_list),
4133 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4134 sizeof(struct hci_rp_le_read_resolv_list_size)),
4135 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4136 hci_cc_le_set_addr_resolution_enable),
4137 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4138 sizeof(struct hci_rp_le_read_max_data_len)),
4139 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4140 hci_cc_write_le_host_supported),
4141 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4142 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4143 sizeof(struct hci_rp_read_rssi)),
4144 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4145 sizeof(struct hci_rp_read_tx_power)),
4146 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4147 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4148 hci_cc_le_set_ext_scan_param),
4149 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4150 hci_cc_le_set_ext_scan_enable),
4151 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4152 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4153 hci_cc_le_read_num_adv_sets,
4154 sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4155 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4156 hci_cc_le_set_ext_adv_enable),
4157 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4158 hci_cc_le_set_adv_set_random_addr),
4159 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4160 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4161 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4162 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4163 hci_cc_le_set_per_adv_enable),
4164 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4165 sizeof(struct hci_rp_le_read_transmit_power)),
4166 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4167 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4168 sizeof(struct hci_rp_le_read_buffer_size_v2)),
4169 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4170 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4171 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4172 sizeof(struct hci_rp_le_setup_iso_path)),
4173 };
4174
hci_cc_func(struct hci_dev * hdev,const struct hci_cc * cc,struct sk_buff * skb)4175 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4176 struct sk_buff *skb)
4177 {
4178 void *data;
4179
4180 if (skb->len < cc->min_len) {
4181 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4182 cc->op, skb->len, cc->min_len);
4183 return HCI_ERROR_UNSPECIFIED;
4184 }
4185
4186 /* Just warn if the length is over max_len size it still be possible to
4187 * partially parse the cc so leave to callback to decide if that is
4188 * acceptable.
4189 */
4190 if (skb->len > cc->max_len)
4191 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4192 cc->op, skb->len, cc->max_len);
4193
4194 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4195 if (!data)
4196 return HCI_ERROR_UNSPECIFIED;
4197
4198 return cc->func(hdev, data, skb);
4199 }
4200
hci_cmd_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4201 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4202 struct sk_buff *skb, u16 *opcode, u8 *status,
4203 hci_req_complete_t *req_complete,
4204 hci_req_complete_skb_t *req_complete_skb)
4205 {
4206 struct hci_ev_cmd_complete *ev = data;
4207 int i;
4208
4209 *opcode = __le16_to_cpu(ev->opcode);
4210
4211 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4212
4213 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4214 if (hci_cc_table[i].op == *opcode) {
4215 *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4216 break;
4217 }
4218 }
4219
4220 if (i == ARRAY_SIZE(hci_cc_table)) {
4221 /* Unknown opcode, assume byte 0 contains the status, so
4222 * that e.g. __hci_cmd_sync() properly returns errors
4223 * for vendor specific commands send by HCI drivers.
4224 * If a vendor doesn't actually follow this convention we may
4225 * need to introduce a vendor CC table in order to properly set
4226 * the status.
4227 */
4228 *status = skb->data[0];
4229 }
4230
4231 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4232
4233 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4234 req_complete_skb);
4235
4236 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4237 bt_dev_err(hdev,
4238 "unexpected event for opcode 0x%4.4x", *opcode);
4239 return;
4240 }
4241
4242 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4243 queue_work(hdev->workqueue, &hdev->cmd_work);
4244 }
4245
hci_cs_le_create_cis(struct hci_dev * hdev,u8 status)4246 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4247 {
4248 struct hci_cp_le_create_cis *cp;
4249 bool pending = false;
4250 int i;
4251
4252 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4253
4254 if (!status)
4255 return;
4256
4257 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4258 if (!cp)
4259 return;
4260
4261 hci_dev_lock(hdev);
4262
4263 /* Remove connection if command failed */
4264 for (i = 0; i < cp->num_cis; i++) {
4265 struct hci_conn *conn;
4266 u16 handle;
4267
4268 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4269
4270 conn = hci_conn_hash_lookup_handle(hdev, handle);
4271 if (conn) {
4272 if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4273 &conn->flags))
4274 pending = true;
4275 conn->state = BT_CLOSED;
4276 hci_connect_cfm(conn, status);
4277 hci_conn_del(conn);
4278 }
4279 }
4280 cp->num_cis = 0;
4281
4282 if (pending)
4283 hci_le_create_cis_pending(hdev);
4284
4285 hci_dev_unlock(hdev);
4286 }
4287
4288 #define HCI_CS(_op, _func) \
4289 { \
4290 .op = _op, \
4291 .func = _func, \
4292 }
4293
4294 static const struct hci_cs {
4295 u16 op;
4296 void (*func)(struct hci_dev *hdev, __u8 status);
4297 } hci_cs_table[] = {
4298 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4299 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4300 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4301 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4302 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4303 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4304 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4305 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4306 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4307 hci_cs_read_remote_ext_features),
4308 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4309 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4310 hci_cs_enhanced_setup_sync_conn),
4311 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4312 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4313 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4314 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4315 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4316 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4317 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4318 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4319 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4320 };
4321
hci_cmd_status_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)4322 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4323 struct sk_buff *skb, u16 *opcode, u8 *status,
4324 hci_req_complete_t *req_complete,
4325 hci_req_complete_skb_t *req_complete_skb)
4326 {
4327 struct hci_ev_cmd_status *ev = data;
4328 int i;
4329
4330 *opcode = __le16_to_cpu(ev->opcode);
4331 *status = ev->status;
4332
4333 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4334
4335 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4336 if (hci_cs_table[i].op == *opcode) {
4337 hci_cs_table[i].func(hdev, ev->status);
4338 break;
4339 }
4340 }
4341
4342 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4343
4344 /* Indicate request completion if the command failed. Also, if
4345 * we're not waiting for a special event and we get a success
4346 * command status we should try to flag the request as completed
4347 * (since for this kind of commands there will not be a command
4348 * complete event).
4349 */
4350 if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4351 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4352 req_complete_skb);
4353 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4354 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4355 *opcode);
4356 return;
4357 }
4358 }
4359
4360 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4361 queue_work(hdev->workqueue, &hdev->cmd_work);
4362 }
4363
hci_hardware_error_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4364 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4365 struct sk_buff *skb)
4366 {
4367 struct hci_ev_hardware_error *ev = data;
4368
4369 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4370
4371 hdev->hw_error_code = ev->code;
4372
4373 queue_work(hdev->req_workqueue, &hdev->error_reset);
4374 }
4375
hci_role_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4376 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4377 struct sk_buff *skb)
4378 {
4379 struct hci_ev_role_change *ev = data;
4380 struct hci_conn *conn;
4381
4382 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4383
4384 hci_dev_lock(hdev);
4385
4386 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4387 if (conn) {
4388 if (!ev->status)
4389 conn->role = ev->role;
4390
4391 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4392
4393 hci_role_switch_cfm(conn, ev->status, ev->role);
4394 }
4395
4396 hci_dev_unlock(hdev);
4397 }
4398
hci_num_comp_pkts_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4399 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4400 struct sk_buff *skb)
4401 {
4402 struct hci_ev_num_comp_pkts *ev = data;
4403 int i;
4404
4405 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4406 flex_array_size(ev, handles, ev->num)))
4407 return;
4408
4409 bt_dev_dbg(hdev, "num %d", ev->num);
4410
4411 hci_dev_lock(hdev);
4412
4413 for (i = 0; i < ev->num; i++) {
4414 struct hci_comp_pkts_info *info = &ev->handles[i];
4415 struct hci_conn *conn;
4416 __u16 handle, count;
4417 unsigned int i;
4418
4419 handle = __le16_to_cpu(info->handle);
4420 count = __le16_to_cpu(info->count);
4421
4422 conn = hci_conn_hash_lookup_handle(hdev, handle);
4423 if (!conn)
4424 continue;
4425
4426 /* Check if there is really enough packets outstanding before
4427 * attempting to decrease the sent counter otherwise it could
4428 * underflow..
4429 */
4430 if (conn->sent >= count) {
4431 conn->sent -= count;
4432 } else {
4433 bt_dev_warn(hdev, "hcon %p sent %u < count %u",
4434 conn, conn->sent, count);
4435 conn->sent = 0;
4436 }
4437
4438 for (i = 0; i < count; ++i)
4439 hci_conn_tx_dequeue(conn);
4440
4441 switch (conn->type) {
4442 case ACL_LINK:
4443 hdev->acl_cnt += count;
4444 if (hdev->acl_cnt > hdev->acl_pkts)
4445 hdev->acl_cnt = hdev->acl_pkts;
4446 break;
4447
4448 case LE_LINK:
4449 if (hdev->le_pkts) {
4450 hdev->le_cnt += count;
4451 if (hdev->le_cnt > hdev->le_pkts)
4452 hdev->le_cnt = hdev->le_pkts;
4453 } else {
4454 hdev->acl_cnt += count;
4455 if (hdev->acl_cnt > hdev->acl_pkts)
4456 hdev->acl_cnt = hdev->acl_pkts;
4457 }
4458 break;
4459
4460 case SCO_LINK:
4461 case ESCO_LINK:
4462 hdev->sco_cnt += count;
4463 if (hdev->sco_cnt > hdev->sco_pkts)
4464 hdev->sco_cnt = hdev->sco_pkts;
4465
4466 break;
4467
4468 case CIS_LINK:
4469 case BIS_LINK:
4470 case PA_LINK:
4471 hdev->iso_cnt += count;
4472 if (hdev->iso_cnt > hdev->iso_pkts)
4473 hdev->iso_cnt = hdev->iso_pkts;
4474 break;
4475
4476 default:
4477 bt_dev_err(hdev, "unknown type %d conn %p",
4478 conn->type, conn);
4479 break;
4480 }
4481 }
4482
4483 queue_work(hdev->workqueue, &hdev->tx_work);
4484
4485 hci_dev_unlock(hdev);
4486 }
4487
hci_mode_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4488 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4489 struct sk_buff *skb)
4490 {
4491 struct hci_ev_mode_change *ev = data;
4492 struct hci_conn *conn;
4493
4494 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4495
4496 hci_dev_lock(hdev);
4497
4498 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4499 if (conn) {
4500 conn->mode = ev->mode;
4501
4502 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4503 &conn->flags)) {
4504 if (conn->mode == HCI_CM_ACTIVE)
4505 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4506 else
4507 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4508 }
4509
4510 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4511 hci_sco_setup(conn, ev->status);
4512 }
4513
4514 hci_dev_unlock(hdev);
4515 }
4516
hci_pin_code_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4517 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4518 struct sk_buff *skb)
4519 {
4520 struct hci_ev_pin_code_req *ev = data;
4521 struct hci_conn *conn;
4522
4523 bt_dev_dbg(hdev, "");
4524
4525 hci_dev_lock(hdev);
4526
4527 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4528 if (!conn)
4529 goto unlock;
4530
4531 if (conn->state == BT_CONNECTED) {
4532 hci_conn_hold(conn);
4533 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4534 hci_conn_drop(conn);
4535 }
4536
4537 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4538 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4539 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4540 sizeof(ev->bdaddr), &ev->bdaddr);
4541 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4542 u8 secure;
4543
4544 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4545 secure = 1;
4546 else
4547 secure = 0;
4548
4549 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4550 }
4551
4552 unlock:
4553 hci_dev_unlock(hdev);
4554 }
4555
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)4556 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4557 {
4558 if (key_type == HCI_LK_CHANGED_COMBINATION)
4559 return;
4560
4561 conn->pin_length = pin_len;
4562 conn->key_type = key_type;
4563
4564 switch (key_type) {
4565 case HCI_LK_LOCAL_UNIT:
4566 case HCI_LK_REMOTE_UNIT:
4567 case HCI_LK_DEBUG_COMBINATION:
4568 return;
4569 case HCI_LK_COMBINATION:
4570 if (pin_len == 16)
4571 conn->pending_sec_level = BT_SECURITY_HIGH;
4572 else
4573 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4574 break;
4575 case HCI_LK_UNAUTH_COMBINATION_P192:
4576 case HCI_LK_UNAUTH_COMBINATION_P256:
4577 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4578 break;
4579 case HCI_LK_AUTH_COMBINATION_P192:
4580 conn->pending_sec_level = BT_SECURITY_HIGH;
4581 break;
4582 case HCI_LK_AUTH_COMBINATION_P256:
4583 conn->pending_sec_level = BT_SECURITY_FIPS;
4584 break;
4585 }
4586 }
4587
hci_link_key_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4588 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4589 struct sk_buff *skb)
4590 {
4591 struct hci_ev_link_key_req *ev = data;
4592 struct hci_cp_link_key_reply cp;
4593 struct hci_conn *conn;
4594 struct link_key *key;
4595
4596 bt_dev_dbg(hdev, "");
4597
4598 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4599 return;
4600
4601 hci_dev_lock(hdev);
4602
4603 key = hci_find_link_key(hdev, &ev->bdaddr);
4604 if (!key) {
4605 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4606 goto not_found;
4607 }
4608
4609 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4610
4611 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4612 if (conn) {
4613 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4614
4615 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4616 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4617 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4618 bt_dev_dbg(hdev, "ignoring unauthenticated key");
4619 goto not_found;
4620 }
4621
4622 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4623 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4624 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4625 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4626 goto not_found;
4627 }
4628
4629 conn_set_key(conn, key->type, key->pin_len);
4630 }
4631
4632 bacpy(&cp.bdaddr, &ev->bdaddr);
4633 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4634
4635 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4636
4637 hci_dev_unlock(hdev);
4638
4639 return;
4640
4641 not_found:
4642 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4643 hci_dev_unlock(hdev);
4644 }
4645
hci_link_key_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4646 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4647 struct sk_buff *skb)
4648 {
4649 struct hci_ev_link_key_notify *ev = data;
4650 struct hci_conn *conn;
4651 struct link_key *key;
4652 bool persistent;
4653 u8 pin_len = 0;
4654
4655 bt_dev_dbg(hdev, "");
4656
4657 hci_dev_lock(hdev);
4658
4659 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4660 if (!conn)
4661 goto unlock;
4662
4663 /* Ignore NULL link key against CVE-2020-26555 */
4664 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4665 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4666 &ev->bdaddr);
4667 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4668 hci_conn_drop(conn);
4669 goto unlock;
4670 }
4671
4672 hci_conn_hold(conn);
4673 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4674 hci_conn_drop(conn);
4675
4676 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4677 conn_set_key(conn, ev->key_type, conn->pin_length);
4678
4679 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4680 goto unlock;
4681
4682 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4683 ev->key_type, pin_len, &persistent);
4684 if (!key)
4685 goto unlock;
4686
4687 /* Update connection information since adding the key will have
4688 * fixed up the type in the case of changed combination keys.
4689 */
4690 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4691 conn_set_key(conn, key->type, key->pin_len);
4692
4693 mgmt_new_link_key(hdev, key, persistent);
4694
4695 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4696 * is set. If it's not set simply remove the key from the kernel
4697 * list (we've still notified user space about it but with
4698 * store_hint being 0).
4699 */
4700 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4701 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4702 list_del_rcu(&key->list);
4703 kfree_rcu(key, rcu);
4704 goto unlock;
4705 }
4706
4707 if (persistent)
4708 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4709 else
4710 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4711
4712 unlock:
4713 hci_dev_unlock(hdev);
4714 }
4715
hci_clock_offset_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4716 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4717 struct sk_buff *skb)
4718 {
4719 struct hci_ev_clock_offset *ev = data;
4720 struct hci_conn *conn;
4721
4722 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4723
4724 hci_dev_lock(hdev);
4725
4726 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4727 if (conn && !ev->status) {
4728 struct inquiry_entry *ie;
4729
4730 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4731 if (ie) {
4732 ie->data.clock_offset = ev->clock_offset;
4733 ie->timestamp = jiffies;
4734 }
4735 }
4736
4737 hci_dev_unlock(hdev);
4738 }
4739
hci_pkt_type_change_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4740 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4741 struct sk_buff *skb)
4742 {
4743 struct hci_ev_pkt_type_change *ev = data;
4744 struct hci_conn *conn;
4745
4746 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4747
4748 hci_dev_lock(hdev);
4749
4750 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4751 if (conn && !ev->status)
4752 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4753
4754 hci_dev_unlock(hdev);
4755 }
4756
hci_pscan_rep_mode_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4757 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4758 struct sk_buff *skb)
4759 {
4760 struct hci_ev_pscan_rep_mode *ev = data;
4761 struct inquiry_entry *ie;
4762
4763 bt_dev_dbg(hdev, "");
4764
4765 hci_dev_lock(hdev);
4766
4767 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4768 if (ie) {
4769 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4770 ie->timestamp = jiffies;
4771 }
4772
4773 hci_dev_unlock(hdev);
4774 }
4775
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)4776 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4777 struct sk_buff *skb)
4778 {
4779 struct hci_ev_inquiry_result_rssi *ev = edata;
4780 struct inquiry_data data;
4781 int i;
4782
4783 bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4784
4785 if (!ev->num)
4786 return;
4787
4788 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4789 return;
4790
4791 hci_dev_lock(hdev);
4792
4793 if (skb->len == array_size(ev->num,
4794 sizeof(struct inquiry_info_rssi_pscan))) {
4795 struct inquiry_info_rssi_pscan *info;
4796
4797 for (i = 0; i < ev->num; i++) {
4798 u32 flags;
4799
4800 info = hci_ev_skb_pull(hdev, skb,
4801 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4802 sizeof(*info));
4803 if (!info) {
4804 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4805 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4806 goto unlock;
4807 }
4808
4809 bacpy(&data.bdaddr, &info->bdaddr);
4810 data.pscan_rep_mode = info->pscan_rep_mode;
4811 data.pscan_period_mode = info->pscan_period_mode;
4812 data.pscan_mode = info->pscan_mode;
4813 memcpy(data.dev_class, info->dev_class, 3);
4814 data.clock_offset = info->clock_offset;
4815 data.rssi = info->rssi;
4816 data.ssp_mode = 0x00;
4817
4818 flags = hci_inquiry_cache_update(hdev, &data, false);
4819
4820 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4821 info->dev_class, info->rssi,
4822 flags, NULL, 0, NULL, 0, 0);
4823 }
4824 } else if (skb->len == array_size(ev->num,
4825 sizeof(struct inquiry_info_rssi))) {
4826 struct inquiry_info_rssi *info;
4827
4828 for (i = 0; i < ev->num; i++) {
4829 u32 flags;
4830
4831 info = hci_ev_skb_pull(hdev, skb,
4832 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4833 sizeof(*info));
4834 if (!info) {
4835 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4836 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4837 goto unlock;
4838 }
4839
4840 bacpy(&data.bdaddr, &info->bdaddr);
4841 data.pscan_rep_mode = info->pscan_rep_mode;
4842 data.pscan_period_mode = info->pscan_period_mode;
4843 data.pscan_mode = 0x00;
4844 memcpy(data.dev_class, info->dev_class, 3);
4845 data.clock_offset = info->clock_offset;
4846 data.rssi = info->rssi;
4847 data.ssp_mode = 0x00;
4848
4849 flags = hci_inquiry_cache_update(hdev, &data, false);
4850
4851 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4852 info->dev_class, info->rssi,
4853 flags, NULL, 0, NULL, 0, 0);
4854 }
4855 } else {
4856 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4857 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4858 }
4859 unlock:
4860 hci_dev_unlock(hdev);
4861 }
4862
hci_remote_ext_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4863 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4864 struct sk_buff *skb)
4865 {
4866 struct hci_ev_remote_ext_features *ev = data;
4867 struct hci_conn *conn;
4868
4869 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4870
4871 hci_dev_lock(hdev);
4872
4873 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4874 if (!conn)
4875 goto unlock;
4876
4877 if (ev->page < HCI_MAX_PAGES)
4878 memcpy(conn->features[ev->page], ev->features, 8);
4879
4880 if (!ev->status && ev->page == 0x01) {
4881 struct inquiry_entry *ie;
4882
4883 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4884 if (ie)
4885 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4886
4887 if (ev->features[0] & LMP_HOST_SSP) {
4888 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4889 } else {
4890 /* It is mandatory by the Bluetooth specification that
4891 * Extended Inquiry Results are only used when Secure
4892 * Simple Pairing is enabled, but some devices violate
4893 * this.
4894 *
4895 * To make these devices work, the internal SSP
4896 * enabled flag needs to be cleared if the remote host
4897 * features do not indicate SSP support */
4898 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4899 }
4900
4901 if (ev->features[0] & LMP_HOST_SC)
4902 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4903 }
4904
4905 if (conn->state != BT_CONFIG)
4906 goto unlock;
4907
4908 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4909 struct hci_cp_remote_name_req cp;
4910 memset(&cp, 0, sizeof(cp));
4911 bacpy(&cp.bdaddr, &conn->dst);
4912 cp.pscan_rep_mode = 0x02;
4913 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4914 } else {
4915 mgmt_device_connected(hdev, conn, NULL, 0);
4916 }
4917
4918 if (!hci_outgoing_auth_needed(hdev, conn)) {
4919 conn->state = BT_CONNECTED;
4920 hci_connect_cfm(conn, ev->status);
4921 hci_conn_drop(conn);
4922 }
4923
4924 unlock:
4925 hci_dev_unlock(hdev);
4926 }
4927
hci_sync_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)4928 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4929 struct sk_buff *skb)
4930 {
4931 struct hci_ev_sync_conn_complete *ev = data;
4932 struct hci_conn *conn;
4933 u8 status = ev->status;
4934
4935 switch (ev->link_type) {
4936 case SCO_LINK:
4937 case ESCO_LINK:
4938 break;
4939 default:
4940 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4941 * for HCI_Synchronous_Connection_Complete is limited to
4942 * either SCO or eSCO
4943 */
4944 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4945 return;
4946 }
4947
4948 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4949
4950 hci_dev_lock(hdev);
4951
4952 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4953 if (!conn) {
4954 if (ev->link_type == ESCO_LINK)
4955 goto unlock;
4956
4957 /* When the link type in the event indicates SCO connection
4958 * and lookup of the connection object fails, then check
4959 * if an eSCO connection object exists.
4960 *
4961 * The core limits the synchronous connections to either
4962 * SCO or eSCO. The eSCO connection is preferred and tried
4963 * to be setup first and until successfully established,
4964 * the link type will be hinted as eSCO.
4965 */
4966 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4967 if (!conn)
4968 goto unlock;
4969 }
4970
4971 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4972 * Processing it more than once per connection can corrupt kernel memory.
4973 *
4974 * As the connection handle is set here for the first time, it indicates
4975 * whether the connection is already set up.
4976 */
4977 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
4978 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4979 goto unlock;
4980 }
4981
4982 switch (status) {
4983 case 0x00:
4984 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
4985 if (status) {
4986 conn->state = BT_CLOSED;
4987 break;
4988 }
4989
4990 conn->state = BT_CONNECTED;
4991 conn->type = ev->link_type;
4992
4993 hci_debugfs_create_conn(conn);
4994 hci_conn_add_sysfs(conn);
4995 break;
4996
4997 case 0x10: /* Connection Accept Timeout */
4998 case 0x0d: /* Connection Rejected due to Limited Resources */
4999 case 0x11: /* Unsupported Feature or Parameter Value */
5000 case 0x1c: /* SCO interval rejected */
5001 case 0x1a: /* Unsupported Remote Feature */
5002 case 0x1e: /* Invalid LMP Parameters */
5003 case 0x1f: /* Unspecified error */
5004 case 0x20: /* Unsupported LMP Parameter value */
5005 if (conn->out) {
5006 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5007 (hdev->esco_type & EDR_ESCO_MASK);
5008 if (hci_setup_sync(conn, conn->parent->handle))
5009 goto unlock;
5010 }
5011 fallthrough;
5012
5013 default:
5014 conn->state = BT_CLOSED;
5015 break;
5016 }
5017
5018 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5019 /* Notify only in case of SCO over HCI transport data path which
5020 * is zero and non-zero value shall be non-HCI transport data path
5021 */
5022 if (conn->codec.data_path == 0 && hdev->notify) {
5023 switch (ev->air_mode) {
5024 case 0x02:
5025 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5026 break;
5027 case 0x03:
5028 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5029 break;
5030 }
5031 }
5032
5033 hci_connect_cfm(conn, status);
5034 if (status)
5035 hci_conn_del(conn);
5036
5037 unlock:
5038 hci_dev_unlock(hdev);
5039 }
5040
eir_get_length(u8 * eir,size_t eir_len)5041 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5042 {
5043 size_t parsed = 0;
5044
5045 while (parsed < eir_len) {
5046 u8 field_len = eir[0];
5047
5048 if (field_len == 0)
5049 return parsed;
5050
5051 parsed += field_len + 1;
5052 eir += field_len + 1;
5053 }
5054
5055 return eir_len;
5056 }
5057
hci_extended_inquiry_result_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5058 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5059 struct sk_buff *skb)
5060 {
5061 struct hci_ev_ext_inquiry_result *ev = edata;
5062 struct inquiry_data data;
5063 size_t eir_len;
5064 int i;
5065
5066 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5067 flex_array_size(ev, info, ev->num)))
5068 return;
5069
5070 bt_dev_dbg(hdev, "num %d", ev->num);
5071
5072 if (!ev->num)
5073 return;
5074
5075 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5076 return;
5077
5078 hci_dev_lock(hdev);
5079
5080 for (i = 0; i < ev->num; i++) {
5081 struct extended_inquiry_info *info = &ev->info[i];
5082 u32 flags;
5083 bool name_known;
5084
5085 bacpy(&data.bdaddr, &info->bdaddr);
5086 data.pscan_rep_mode = info->pscan_rep_mode;
5087 data.pscan_period_mode = info->pscan_period_mode;
5088 data.pscan_mode = 0x00;
5089 memcpy(data.dev_class, info->dev_class, 3);
5090 data.clock_offset = info->clock_offset;
5091 data.rssi = info->rssi;
5092 data.ssp_mode = 0x01;
5093
5094 if (hci_dev_test_flag(hdev, HCI_MGMT))
5095 name_known = eir_get_data(info->data,
5096 sizeof(info->data),
5097 EIR_NAME_COMPLETE, NULL);
5098 else
5099 name_known = true;
5100
5101 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5102
5103 eir_len = eir_get_length(info->data, sizeof(info->data));
5104
5105 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5106 info->dev_class, info->rssi,
5107 flags, info->data, eir_len, NULL, 0, 0);
5108 }
5109
5110 hci_dev_unlock(hdev);
5111 }
5112
hci_key_refresh_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5113 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5114 struct sk_buff *skb)
5115 {
5116 struct hci_ev_key_refresh_complete *ev = data;
5117 struct hci_conn *conn;
5118
5119 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5120 __le16_to_cpu(ev->handle));
5121
5122 hci_dev_lock(hdev);
5123
5124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5125 if (!conn)
5126 goto unlock;
5127
5128 /* For BR/EDR the necessary steps are taken through the
5129 * auth_complete event.
5130 */
5131 if (conn->type != LE_LINK)
5132 goto unlock;
5133
5134 if (!ev->status)
5135 conn->sec_level = conn->pending_sec_level;
5136
5137 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5138
5139 if (ev->status && conn->state == BT_CONNECTED) {
5140 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5141 hci_conn_drop(conn);
5142 goto unlock;
5143 }
5144
5145 if (conn->state == BT_CONFIG) {
5146 if (!ev->status)
5147 conn->state = BT_CONNECTED;
5148
5149 hci_connect_cfm(conn, ev->status);
5150 hci_conn_drop(conn);
5151 } else {
5152 hci_auth_cfm(conn, ev->status);
5153
5154 hci_conn_hold(conn);
5155 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5156 hci_conn_drop(conn);
5157 }
5158
5159 unlock:
5160 hci_dev_unlock(hdev);
5161 }
5162
hci_get_auth_req(struct hci_conn * conn)5163 static u8 hci_get_auth_req(struct hci_conn *conn)
5164 {
5165 /* If remote requests no-bonding follow that lead */
5166 if (conn->remote_auth == HCI_AT_NO_BONDING ||
5167 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5168 return conn->remote_auth | (conn->auth_type & 0x01);
5169
5170 /* If both remote and local have enough IO capabilities, require
5171 * MITM protection
5172 */
5173 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5174 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5175 return conn->remote_auth | 0x01;
5176
5177 /* No MITM protection possible so ignore remote requirement */
5178 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5179 }
5180
bredr_oob_data_present(struct hci_conn * conn)5181 static u8 bredr_oob_data_present(struct hci_conn *conn)
5182 {
5183 struct hci_dev *hdev = conn->hdev;
5184 struct oob_data *data;
5185
5186 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5187 if (!data)
5188 return 0x00;
5189
5190 if (bredr_sc_enabled(hdev)) {
5191 /* When Secure Connections is enabled, then just
5192 * return the present value stored with the OOB
5193 * data. The stored value contains the right present
5194 * information. However it can only be trusted when
5195 * not in Secure Connection Only mode.
5196 */
5197 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5198 return data->present;
5199
5200 /* When Secure Connections Only mode is enabled, then
5201 * the P-256 values are required. If they are not
5202 * available, then do not declare that OOB data is
5203 * present.
5204 */
5205 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5206 !crypto_memneq(data->hash256, ZERO_KEY, 16))
5207 return 0x00;
5208
5209 return 0x02;
5210 }
5211
5212 /* When Secure Connections is not enabled or actually
5213 * not supported by the hardware, then check that if
5214 * P-192 data values are present.
5215 */
5216 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5217 !crypto_memneq(data->hash192, ZERO_KEY, 16))
5218 return 0x00;
5219
5220 return 0x01;
5221 }
5222
hci_io_capa_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5223 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5224 struct sk_buff *skb)
5225 {
5226 struct hci_ev_io_capa_request *ev = data;
5227 struct hci_conn *conn;
5228
5229 bt_dev_dbg(hdev, "");
5230
5231 hci_dev_lock(hdev);
5232
5233 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5234 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5235 goto unlock;
5236
5237 /* Assume remote supports SSP since it has triggered this event */
5238 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5239
5240 hci_conn_hold(conn);
5241
5242 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5243 goto unlock;
5244
5245 /* Allow pairing if we're pairable, the initiators of the
5246 * pairing or if the remote is not requesting bonding.
5247 */
5248 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5249 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5250 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5251 struct hci_cp_io_capability_reply cp;
5252
5253 bacpy(&cp.bdaddr, &ev->bdaddr);
5254 /* Change the IO capability from KeyboardDisplay
5255 * to DisplayYesNo as it is not supported by BT spec. */
5256 cp.capability = (conn->io_capability == 0x04) ?
5257 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5258
5259 /* If we are initiators, there is no remote information yet */
5260 if (conn->remote_auth == 0xff) {
5261 /* Request MITM protection if our IO caps allow it
5262 * except for the no-bonding case.
5263 */
5264 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5265 conn->auth_type != HCI_AT_NO_BONDING)
5266 conn->auth_type |= 0x01;
5267 } else {
5268 conn->auth_type = hci_get_auth_req(conn);
5269 }
5270
5271 /* If we're not bondable, force one of the non-bondable
5272 * authentication requirement values.
5273 */
5274 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5275 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5276
5277 cp.authentication = conn->auth_type;
5278 cp.oob_data = bredr_oob_data_present(conn);
5279
5280 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5281 sizeof(cp), &cp);
5282 } else {
5283 struct hci_cp_io_capability_neg_reply cp;
5284
5285 bacpy(&cp.bdaddr, &ev->bdaddr);
5286 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5287
5288 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5289 sizeof(cp), &cp);
5290 }
5291
5292 unlock:
5293 hci_dev_unlock(hdev);
5294 }
5295
hci_io_capa_reply_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5296 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5297 struct sk_buff *skb)
5298 {
5299 struct hci_ev_io_capa_reply *ev = data;
5300 struct hci_conn *conn;
5301
5302 bt_dev_dbg(hdev, "");
5303
5304 hci_dev_lock(hdev);
5305
5306 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5307 if (!conn)
5308 goto unlock;
5309
5310 conn->remote_cap = ev->capability;
5311 conn->remote_auth = ev->authentication;
5312
5313 unlock:
5314 hci_dev_unlock(hdev);
5315 }
5316
hci_user_confirm_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5317 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5318 struct sk_buff *skb)
5319 {
5320 struct hci_ev_user_confirm_req *ev = data;
5321 int loc_mitm, rem_mitm, confirm_hint = 0;
5322 struct hci_conn *conn;
5323
5324 bt_dev_dbg(hdev, "");
5325
5326 hci_dev_lock(hdev);
5327
5328 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5329 goto unlock;
5330
5331 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5332 if (!conn)
5333 goto unlock;
5334
5335 loc_mitm = (conn->auth_type & 0x01);
5336 rem_mitm = (conn->remote_auth & 0x01);
5337
5338 /* If we require MITM but the remote device can't provide that
5339 * (it has NoInputNoOutput) then reject the confirmation
5340 * request. We check the security level here since it doesn't
5341 * necessarily match conn->auth_type.
5342 */
5343 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5344 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5345 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5346 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5347 sizeof(ev->bdaddr), &ev->bdaddr);
5348 goto unlock;
5349 }
5350
5351 /* If no side requires MITM protection; use JUST_CFM method */
5352 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5353 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5354
5355 /* If we're not the initiator of request authorization and the
5356 * local IO capability is not NoInputNoOutput, use JUST_WORKS
5357 * method (mgmt_user_confirm with confirm_hint set to 1).
5358 */
5359 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5360 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) {
5361 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5362 confirm_hint = 1;
5363 goto confirm;
5364 }
5365
5366 /* If there already exists link key in local host, leave the
5367 * decision to user space since the remote device could be
5368 * legitimate or malicious.
5369 */
5370 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5371 bt_dev_dbg(hdev, "Local host already has link key");
5372 confirm_hint = 1;
5373 goto confirm;
5374 }
5375
5376 BT_DBG("Auto-accept of user confirmation with %ums delay",
5377 hdev->auto_accept_delay);
5378
5379 if (hdev->auto_accept_delay > 0) {
5380 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5381 queue_delayed_work(conn->hdev->workqueue,
5382 &conn->auto_accept_work, delay);
5383 goto unlock;
5384 }
5385
5386 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5387 sizeof(ev->bdaddr), &ev->bdaddr);
5388 goto unlock;
5389 }
5390
5391 confirm:
5392 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5393 le32_to_cpu(ev->passkey), confirm_hint);
5394
5395 unlock:
5396 hci_dev_unlock(hdev);
5397 }
5398
hci_user_passkey_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5399 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5400 struct sk_buff *skb)
5401 {
5402 struct hci_ev_user_passkey_req *ev = data;
5403
5404 bt_dev_dbg(hdev, "");
5405
5406 if (hci_dev_test_flag(hdev, HCI_MGMT))
5407 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5408 }
5409
hci_user_passkey_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5410 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5411 struct sk_buff *skb)
5412 {
5413 struct hci_ev_user_passkey_notify *ev = data;
5414 struct hci_conn *conn;
5415
5416 bt_dev_dbg(hdev, "");
5417
5418 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5419 if (!conn)
5420 return;
5421
5422 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5423 conn->passkey_entered = 0;
5424
5425 if (hci_dev_test_flag(hdev, HCI_MGMT))
5426 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5427 conn->dst_type, conn->passkey_notify,
5428 conn->passkey_entered);
5429 }
5430
hci_keypress_notify_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5431 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5432 struct sk_buff *skb)
5433 {
5434 struct hci_ev_keypress_notify *ev = data;
5435 struct hci_conn *conn;
5436
5437 bt_dev_dbg(hdev, "");
5438
5439 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5440 if (!conn)
5441 return;
5442
5443 switch (ev->type) {
5444 case HCI_KEYPRESS_STARTED:
5445 conn->passkey_entered = 0;
5446 return;
5447
5448 case HCI_KEYPRESS_ENTERED:
5449 conn->passkey_entered++;
5450 break;
5451
5452 case HCI_KEYPRESS_ERASED:
5453 conn->passkey_entered--;
5454 break;
5455
5456 case HCI_KEYPRESS_CLEARED:
5457 conn->passkey_entered = 0;
5458 break;
5459
5460 case HCI_KEYPRESS_COMPLETED:
5461 return;
5462 }
5463
5464 if (hci_dev_test_flag(hdev, HCI_MGMT))
5465 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5466 conn->dst_type, conn->passkey_notify,
5467 conn->passkey_entered);
5468 }
5469
hci_simple_pair_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5470 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5471 struct sk_buff *skb)
5472 {
5473 struct hci_ev_simple_pair_complete *ev = data;
5474 struct hci_conn *conn;
5475
5476 bt_dev_dbg(hdev, "");
5477
5478 hci_dev_lock(hdev);
5479
5480 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5481 if (!conn || !hci_conn_ssp_enabled(conn))
5482 goto unlock;
5483
5484 /* Reset the authentication requirement to unknown */
5485 conn->remote_auth = 0xff;
5486
5487 /* To avoid duplicate auth_failed events to user space we check
5488 * the HCI_CONN_AUTH_PEND flag which will be set if we
5489 * initiated the authentication. A traditional auth_complete
5490 * event gets always produced as initiator and is also mapped to
5491 * the mgmt_auth_failed event */
5492 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5493 mgmt_auth_failed(conn, ev->status);
5494
5495 hci_conn_drop(conn);
5496
5497 unlock:
5498 hci_dev_unlock(hdev);
5499 }
5500
hci_remote_host_features_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5501 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5502 struct sk_buff *skb)
5503 {
5504 struct hci_ev_remote_host_features *ev = data;
5505 struct inquiry_entry *ie;
5506 struct hci_conn *conn;
5507
5508 bt_dev_dbg(hdev, "");
5509
5510 hci_dev_lock(hdev);
5511
5512 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5513 if (conn)
5514 memcpy(conn->features[1], ev->features, 8);
5515
5516 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5517 if (ie)
5518 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5519
5520 hci_dev_unlock(hdev);
5521 }
5522
hci_remote_oob_data_request_evt(struct hci_dev * hdev,void * edata,struct sk_buff * skb)5523 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5524 struct sk_buff *skb)
5525 {
5526 struct hci_ev_remote_oob_data_request *ev = edata;
5527 struct oob_data *data;
5528
5529 bt_dev_dbg(hdev, "");
5530
5531 hci_dev_lock(hdev);
5532
5533 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5534 goto unlock;
5535
5536 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5537 if (!data) {
5538 struct hci_cp_remote_oob_data_neg_reply cp;
5539
5540 bacpy(&cp.bdaddr, &ev->bdaddr);
5541 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5542 sizeof(cp), &cp);
5543 goto unlock;
5544 }
5545
5546 if (bredr_sc_enabled(hdev)) {
5547 struct hci_cp_remote_oob_ext_data_reply cp;
5548
5549 bacpy(&cp.bdaddr, &ev->bdaddr);
5550 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5551 memset(cp.hash192, 0, sizeof(cp.hash192));
5552 memset(cp.rand192, 0, sizeof(cp.rand192));
5553 } else {
5554 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5555 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5556 }
5557 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5558 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5559
5560 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5561 sizeof(cp), &cp);
5562 } else {
5563 struct hci_cp_remote_oob_data_reply cp;
5564
5565 bacpy(&cp.bdaddr, &ev->bdaddr);
5566 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5567 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5568
5569 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5570 sizeof(cp), &cp);
5571 }
5572
5573 unlock:
5574 hci_dev_unlock(hdev);
5575 }
5576
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5577 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5578 u8 bdaddr_type, bdaddr_t *local_rpa)
5579 {
5580 if (conn->out) {
5581 conn->dst_type = bdaddr_type;
5582 conn->resp_addr_type = bdaddr_type;
5583 bacpy(&conn->resp_addr, bdaddr);
5584
5585 /* Check if the controller has set a Local RPA then it must be
5586 * used instead or hdev->rpa.
5587 */
5588 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5589 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5590 bacpy(&conn->init_addr, local_rpa);
5591 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5592 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5593 bacpy(&conn->init_addr, &conn->hdev->rpa);
5594 } else {
5595 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5596 &conn->init_addr_type);
5597 }
5598 } else {
5599 conn->resp_addr_type = conn->hdev->adv_addr_type;
5600 /* Check if the controller has set a Local RPA then it must be
5601 * used instead or hdev->rpa.
5602 */
5603 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5604 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5605 bacpy(&conn->resp_addr, local_rpa);
5606 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5607 /* In case of ext adv, resp_addr will be updated in
5608 * Adv Terminated event.
5609 */
5610 if (!ext_adv_capable(conn->hdev))
5611 bacpy(&conn->resp_addr,
5612 &conn->hdev->random_addr);
5613 } else {
5614 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5615 }
5616
5617 conn->init_addr_type = bdaddr_type;
5618 bacpy(&conn->init_addr, bdaddr);
5619
5620 /* For incoming connections, set the default minimum
5621 * and maximum connection interval. They will be used
5622 * to check if the parameters are in range and if not
5623 * trigger the connection update procedure.
5624 */
5625 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5626 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5627 }
5628 }
5629
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5630 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5631 bdaddr_t *bdaddr, u8 bdaddr_type,
5632 bdaddr_t *local_rpa, u8 role, u16 handle,
5633 u16 interval, u16 latency,
5634 u16 supervision_timeout)
5635 {
5636 struct hci_conn_params *params;
5637 struct hci_conn *conn;
5638 struct smp_irk *irk;
5639 u8 addr_type;
5640
5641 hci_dev_lock(hdev);
5642
5643 /* All controllers implicitly stop advertising in the event of a
5644 * connection, so ensure that the state bit is cleared.
5645 */
5646 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5647
5648 /* Check for existing connection:
5649 *
5650 * 1. If it doesn't exist then use the role to create a new object.
5651 * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
5652 * of initiator/master role since there could be a collision where
5653 * either side is attempting to connect or something like a fuzzing
5654 * testing is trying to play tricks to destroy the hcon object before
5655 * it even attempts to connect (e.g. hcon->state == BT_OPEN).
5656 */
5657 conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr);
5658 if (!conn ||
5659 (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
5660 /* In case of error status and there is no connection pending
5661 * just unlock as there is nothing to cleanup.
5662 */
5663 if (status)
5664 goto unlock;
5665
5666 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5667 if (IS_ERR(conn)) {
5668 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5669 goto unlock;
5670 }
5671
5672 conn->dst_type = bdaddr_type;
5673
5674 /* If we didn't have a hci_conn object previously
5675 * but we're in central role this must be something
5676 * initiated using an accept list. Since accept list based
5677 * connections are not "first class citizens" we don't
5678 * have full tracking of them. Therefore, we go ahead
5679 * with a "best effort" approach of determining the
5680 * initiator address based on the HCI_PRIVACY flag.
5681 */
5682 if (conn->out) {
5683 conn->resp_addr_type = bdaddr_type;
5684 bacpy(&conn->resp_addr, bdaddr);
5685 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5686 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5687 bacpy(&conn->init_addr, &hdev->rpa);
5688 } else {
5689 hci_copy_identity_address(hdev,
5690 &conn->init_addr,
5691 &conn->init_addr_type);
5692 }
5693 }
5694 } else {
5695 cancel_delayed_work(&conn->le_conn_timeout);
5696 }
5697
5698 /* The HCI_LE_Connection_Complete event is only sent once per connection.
5699 * Processing it more than once per connection can corrupt kernel memory.
5700 *
5701 * As the connection handle is set here for the first time, it indicates
5702 * whether the connection is already set up.
5703 */
5704 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5705 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5706 goto unlock;
5707 }
5708
5709 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5710
5711 /* Lookup the identity address from the stored connection
5712 * address and address type.
5713 *
5714 * When establishing connections to an identity address, the
5715 * connection procedure will store the resolvable random
5716 * address first. Now if it can be converted back into the
5717 * identity address, start using the identity address from
5718 * now on.
5719 */
5720 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5721 if (irk) {
5722 bacpy(&conn->dst, &irk->bdaddr);
5723 conn->dst_type = irk->addr_type;
5724 }
5725
5726 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5727
5728 /* All connection failure handling is taken care of by the
5729 * hci_conn_failed function which is triggered by the HCI
5730 * request completion callbacks used for connecting.
5731 */
5732 if (status || hci_conn_set_handle(conn, handle))
5733 goto unlock;
5734
5735 /* Drop the connection if it has been aborted */
5736 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5737 hci_conn_drop(conn);
5738 goto unlock;
5739 }
5740
5741 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5742 addr_type = BDADDR_LE_PUBLIC;
5743 else
5744 addr_type = BDADDR_LE_RANDOM;
5745
5746 /* Drop the connection if the device is blocked */
5747 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5748 hci_conn_drop(conn);
5749 goto unlock;
5750 }
5751
5752 mgmt_device_connected(hdev, conn, NULL, 0);
5753
5754 conn->sec_level = BT_SECURITY_LOW;
5755 conn->state = BT_CONFIG;
5756
5757 /* Store current advertising instance as connection advertising instance
5758 * when software rotation is in use so it can be re-enabled when
5759 * disconnected.
5760 */
5761 if (!ext_adv_capable(hdev))
5762 conn->adv_instance = hdev->cur_adv_instance;
5763
5764 conn->le_conn_interval = interval;
5765 conn->le_conn_latency = latency;
5766 conn->le_supv_timeout = supervision_timeout;
5767
5768 hci_debugfs_create_conn(conn);
5769 hci_conn_add_sysfs(conn);
5770
5771 /* The remote features procedure is defined for central
5772 * role only. So only in case of an initiated connection
5773 * request the remote features.
5774 *
5775 * If the local controller supports peripheral-initiated features
5776 * exchange, then requesting the remote features in peripheral
5777 * role is possible. Otherwise just transition into the
5778 * connected state without requesting the remote features.
5779 */
5780 if (conn->out ||
5781 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5782 struct hci_cp_le_read_remote_features cp;
5783
5784 cp.handle = __cpu_to_le16(conn->handle);
5785
5786 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5787 sizeof(cp), &cp);
5788
5789 hci_conn_hold(conn);
5790 } else {
5791 conn->state = BT_CONNECTED;
5792 hci_connect_cfm(conn, status);
5793 }
5794
5795 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5796 conn->dst_type);
5797 if (params) {
5798 hci_pend_le_list_del_init(params);
5799 if (params->conn) {
5800 hci_conn_drop(params->conn);
5801 hci_conn_put(params->conn);
5802 params->conn = NULL;
5803 }
5804 }
5805
5806 unlock:
5807 hci_update_passive_scan(hdev);
5808 hci_dev_unlock(hdev);
5809 }
5810
hci_le_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5811 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5812 struct sk_buff *skb)
5813 {
5814 struct hci_ev_le_conn_complete *ev = data;
5815
5816 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5817
5818 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5819 NULL, ev->role, le16_to_cpu(ev->handle),
5820 le16_to_cpu(ev->interval),
5821 le16_to_cpu(ev->latency),
5822 le16_to_cpu(ev->supervision_timeout));
5823 }
5824
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5825 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5826 struct sk_buff *skb)
5827 {
5828 struct hci_ev_le_enh_conn_complete *ev = data;
5829
5830 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5831
5832 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5833 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5834 le16_to_cpu(ev->interval),
5835 le16_to_cpu(ev->latency),
5836 le16_to_cpu(ev->supervision_timeout));
5837 }
5838
hci_le_ext_adv_term_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5839 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5840 struct sk_buff *skb)
5841 {
5842 struct hci_evt_le_ext_adv_set_term *ev = data;
5843 struct hci_conn *conn;
5844 struct adv_info *adv, *n;
5845
5846 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5847
5848 /* The Bluetooth Core 5.3 specification clearly states that this event
5849 * shall not be sent when the Host disables the advertising set. So in
5850 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5851 *
5852 * When the Host disables an advertising set, all cleanup is done via
5853 * its command callback and not needed to be duplicated here.
5854 */
5855 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5856 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5857 return;
5858 }
5859
5860 hci_dev_lock(hdev);
5861
5862 adv = hci_find_adv_instance(hdev, ev->handle);
5863
5864 if (ev->status) {
5865 if (!adv)
5866 goto unlock;
5867
5868 /* Remove advertising as it has been terminated */
5869 hci_remove_adv_instance(hdev, ev->handle);
5870 mgmt_advertising_removed(NULL, hdev, ev->handle);
5871
5872 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5873 if (adv->enabled)
5874 goto unlock;
5875 }
5876
5877 /* We are no longer advertising, clear HCI_LE_ADV */
5878 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5879 goto unlock;
5880 }
5881
5882 if (adv)
5883 adv->enabled = false;
5884
5885 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5886 if (conn) {
5887 /* Store handle in the connection so the correct advertising
5888 * instance can be re-enabled when disconnected.
5889 */
5890 conn->adv_instance = ev->handle;
5891
5892 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5893 bacmp(&conn->resp_addr, BDADDR_ANY))
5894 goto unlock;
5895
5896 if (!ev->handle) {
5897 bacpy(&conn->resp_addr, &hdev->random_addr);
5898 goto unlock;
5899 }
5900
5901 if (adv)
5902 bacpy(&conn->resp_addr, &adv->random_addr);
5903 }
5904
5905 unlock:
5906 hci_dev_unlock(hdev);
5907 }
5908
hci_le_conn_update_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)5909 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5910 struct sk_buff *skb)
5911 {
5912 struct hci_ev_le_conn_update_complete *ev = data;
5913 struct hci_conn *conn;
5914
5915 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5916
5917 if (ev->status)
5918 return;
5919
5920 hci_dev_lock(hdev);
5921
5922 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5923 if (conn) {
5924 conn->le_conn_interval = le16_to_cpu(ev->interval);
5925 conn->le_conn_latency = le16_to_cpu(ev->latency);
5926 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5927 }
5928
5929 hci_dev_unlock(hdev);
5930 }
5931
5932 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,bool addr_resolved,u8 adv_type,u8 phy,u8 sec_phy)5933 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5934 bdaddr_t *addr,
5935 u8 addr_type, bool addr_resolved,
5936 u8 adv_type, u8 phy, u8 sec_phy)
5937 {
5938 struct hci_conn *conn;
5939 struct hci_conn_params *params;
5940
5941 /* If the event is not connectable don't proceed further */
5942 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5943 return NULL;
5944
5945 /* Ignore if the device is blocked or hdev is suspended */
5946 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
5947 hdev->suspended)
5948 return NULL;
5949
5950 /* Most controller will fail if we try to create new connections
5951 * while we have an existing one in peripheral role.
5952 */
5953 if (hdev->conn_hash.le_num_peripheral > 0 &&
5954 (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) ||
5955 !(hdev->le_states[3] & 0x10)))
5956 return NULL;
5957
5958 /* If we're not connectable only connect devices that we have in
5959 * our pend_le_conns list.
5960 */
5961 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5962 addr_type);
5963 if (!params)
5964 return NULL;
5965
5966 if (!params->explicit_connect) {
5967 switch (params->auto_connect) {
5968 case HCI_AUTO_CONN_DIRECT:
5969 /* Only devices advertising with ADV_DIRECT_IND are
5970 * triggering a connection attempt. This is allowing
5971 * incoming connections from peripheral devices.
5972 */
5973 if (adv_type != LE_ADV_DIRECT_IND)
5974 return NULL;
5975 break;
5976 case HCI_AUTO_CONN_ALWAYS:
5977 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5978 * are triggering a connection attempt. This means
5979 * that incoming connections from peripheral device are
5980 * accepted and also outgoing connections to peripheral
5981 * devices are established when found.
5982 */
5983 break;
5984 default:
5985 return NULL;
5986 }
5987 }
5988
5989 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5990 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5991 HCI_ROLE_MASTER, phy, sec_phy);
5992 if (!IS_ERR(conn)) {
5993 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5994 * by higher layer that tried to connect, if no then
5995 * store the pointer since we don't really have any
5996 * other owner of the object besides the params that
5997 * triggered it. This way we can abort the connection if
5998 * the parameters get removed and keep the reference
5999 * count consistent once the connection is established.
6000 */
6001
6002 if (!params->explicit_connect)
6003 params->conn = hci_conn_get(conn);
6004
6005 return conn;
6006 }
6007
6008 switch (PTR_ERR(conn)) {
6009 case -EBUSY:
6010 /* If hci_connect() returns -EBUSY it means there is already
6011 * an LE connection attempt going on. Since controllers don't
6012 * support more than one connection attempt at the time, we
6013 * don't consider this an error case.
6014 */
6015 break;
6016 default:
6017 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6018 return NULL;
6019 }
6020
6021 return NULL;
6022 }
6023
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,u8 phy,u8 sec_phy,s8 rssi,u8 * data,u8 len,bool ext_adv,bool ctl_time,u64 instant)6024 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6025 u8 bdaddr_type, bdaddr_t *direct_addr,
6026 u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
6027 u8 *data, u8 len, bool ext_adv, bool ctl_time,
6028 u64 instant)
6029 {
6030 struct discovery_state *d = &hdev->discovery;
6031 struct smp_irk *irk;
6032 struct hci_conn *conn;
6033 bool match, bdaddr_resolved;
6034 u32 flags;
6035 u8 *ptr;
6036
6037 switch (type) {
6038 case LE_ADV_IND:
6039 case LE_ADV_DIRECT_IND:
6040 case LE_ADV_SCAN_IND:
6041 case LE_ADV_NONCONN_IND:
6042 case LE_ADV_SCAN_RSP:
6043 break;
6044 default:
6045 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6046 "type: 0x%02x", type);
6047 return;
6048 }
6049
6050 if (len > max_adv_len(hdev)) {
6051 bt_dev_err_ratelimited(hdev,
6052 "adv larger than maximum supported");
6053 return;
6054 }
6055
6056 /* Find the end of the data in case the report contains padded zero
6057 * bytes at the end causing an invalid length value.
6058 *
6059 * When data is NULL, len is 0 so there is no need for extra ptr
6060 * check as 'ptr < data + 0' is already false in such case.
6061 */
6062 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6063 if (ptr + 1 + *ptr > data + len)
6064 break;
6065 }
6066
6067 /* Adjust for actual length. This handles the case when remote
6068 * device is advertising with incorrect data length.
6069 */
6070 len = ptr - data;
6071
6072 /* If the direct address is present, then this report is from
6073 * a LE Direct Advertising Report event. In that case it is
6074 * important to see if the address is matching the local
6075 * controller address.
6076 *
6077 * If local privacy is not enable the controller shall not be
6078 * generating such event since according to its documentation it is only
6079 * valid for filter_policy 0x02 and 0x03, but the fact that it did
6080 * generate LE Direct Advertising Report means it is probably broken and
6081 * won't generate any other event which can potentially break
6082 * auto-connect logic so in case local privacy is not enable this
6083 * ignores the direct_addr so it works as a regular report.
6084 */
6085 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr &&
6086 hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6087 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6088 &bdaddr_resolved);
6089
6090 /* Only resolvable random addresses are valid for these
6091 * kind of reports and others can be ignored.
6092 */
6093 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6094 return;
6095
6096 /* If the local IRK of the controller does not match
6097 * with the resolvable random address provided, then
6098 * this report can be ignored.
6099 */
6100 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6101 return;
6102 }
6103
6104 /* Check if we need to convert to identity address */
6105 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6106 if (irk) {
6107 bdaddr = &irk->bdaddr;
6108 bdaddr_type = irk->addr_type;
6109 }
6110
6111 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6112
6113 /* Check if we have been requested to connect to this device.
6114 *
6115 * direct_addr is set only for directed advertising reports (it is NULL
6116 * for advertising reports) and is already verified to be RPA above.
6117 */
6118 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6119 type, phy, sec_phy);
6120 if (!ext_adv && conn && type == LE_ADV_IND &&
6121 len <= max_adv_len(hdev)) {
6122 /* Store report for later inclusion by
6123 * mgmt_device_connected
6124 */
6125 memcpy(conn->le_adv_data, data, len);
6126 conn->le_adv_data_len = len;
6127 }
6128
6129 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6130 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6131 else
6132 flags = 0;
6133
6134 /* All scan results should be sent up for Mesh systems */
6135 if (hci_dev_test_flag(hdev, HCI_MESH)) {
6136 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6137 rssi, flags, data, len, NULL, 0, instant);
6138 return;
6139 }
6140
6141 /* Passive scanning shouldn't trigger any device found events,
6142 * except for devices marked as CONN_REPORT for which we do send
6143 * device found events, or advertisement monitoring requested.
6144 */
6145 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6146 if (type == LE_ADV_DIRECT_IND)
6147 return;
6148
6149 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6150 bdaddr, bdaddr_type) &&
6151 idr_is_empty(&hdev->adv_monitors_idr))
6152 return;
6153
6154 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6155 rssi, flags, data, len, NULL, 0, 0);
6156 return;
6157 }
6158
6159 /* When receiving a scan response, then there is no way to
6160 * know if the remote device is connectable or not. However
6161 * since scan responses are merged with a previously seen
6162 * advertising report, the flags field from that report
6163 * will be used.
6164 *
6165 * In the unlikely case that a controller just sends a scan
6166 * response event that doesn't match the pending report, then
6167 * it is marked as a standalone SCAN_RSP.
6168 */
6169 if (type == LE_ADV_SCAN_RSP)
6170 flags = MGMT_DEV_FOUND_SCAN_RSP;
6171
6172 /* If there's nothing pending either store the data from this
6173 * event or send an immediate device found event if the data
6174 * should not be stored for later.
6175 */
6176 if (!has_pending_adv_report(hdev)) {
6177 /* If the report will trigger a SCAN_REQ store it for
6178 * later merging.
6179 */
6180 if (!ext_adv && (type == LE_ADV_IND ||
6181 type == LE_ADV_SCAN_IND)) {
6182 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6183 rssi, flags, data, len);
6184 return;
6185 }
6186
6187 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6188 rssi, flags, data, len, NULL, 0, 0);
6189 return;
6190 }
6191
6192 /* Check if the pending report is for the same device as the new one */
6193 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6194 bdaddr_type == d->last_adv_addr_type);
6195
6196 /* If the pending data doesn't match this report or this isn't a
6197 * scan response (e.g. we got a duplicate ADV_IND) then force
6198 * sending of the pending data.
6199 */
6200 if (type != LE_ADV_SCAN_RSP || !match) {
6201 /* Send out whatever is in the cache, but skip duplicates */
6202 if (!match)
6203 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6204 d->last_adv_addr_type, NULL,
6205 d->last_adv_rssi, d->last_adv_flags,
6206 d->last_adv_data,
6207 d->last_adv_data_len, NULL, 0, 0);
6208
6209 /* If the new report will trigger a SCAN_REQ store it for
6210 * later merging.
6211 */
6212 if (!ext_adv && (type == LE_ADV_IND ||
6213 type == LE_ADV_SCAN_IND)) {
6214 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6215 rssi, flags, data, len);
6216 return;
6217 }
6218
6219 /* The advertising reports cannot be merged, so clear
6220 * the pending report and send out a device found event.
6221 */
6222 clear_pending_adv_report(hdev);
6223 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6224 rssi, flags, data, len, NULL, 0, 0);
6225 return;
6226 }
6227
6228 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6229 * the new event is a SCAN_RSP. We can therefore proceed with
6230 * sending a merged device found event.
6231 */
6232 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6233 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6234 d->last_adv_data, d->last_adv_data_len, data, len, 0);
6235 clear_pending_adv_report(hdev);
6236 }
6237
hci_le_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6238 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6239 struct sk_buff *skb)
6240 {
6241 struct hci_ev_le_advertising_report *ev = data;
6242 u64 instant = jiffies;
6243
6244 if (!ev->num)
6245 return;
6246
6247 hci_dev_lock(hdev);
6248
6249 while (ev->num--) {
6250 struct hci_ev_le_advertising_info *info;
6251 s8 rssi;
6252
6253 info = hci_le_ev_skb_pull(hdev, skb,
6254 HCI_EV_LE_ADVERTISING_REPORT,
6255 sizeof(*info));
6256 if (!info)
6257 break;
6258
6259 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6260 info->length + 1))
6261 break;
6262
6263 if (info->length <= max_adv_len(hdev)) {
6264 rssi = info->data[info->length];
6265 process_adv_report(hdev, info->type, &info->bdaddr,
6266 info->bdaddr_type, NULL, 0,
6267 HCI_ADV_PHY_1M, 0, rssi,
6268 info->data, info->length, false,
6269 false, instant);
6270 } else {
6271 bt_dev_err(hdev, "Dropping invalid advertising data");
6272 }
6273 }
6274
6275 hci_dev_unlock(hdev);
6276 }
6277
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)6278 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6279 {
6280 u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK;
6281
6282 if (!pdu_type)
6283 return LE_ADV_NONCONN_IND;
6284
6285 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6286 switch (evt_type) {
6287 case LE_LEGACY_ADV_IND:
6288 return LE_ADV_IND;
6289 case LE_LEGACY_ADV_DIRECT_IND:
6290 return LE_ADV_DIRECT_IND;
6291 case LE_LEGACY_ADV_SCAN_IND:
6292 return LE_ADV_SCAN_IND;
6293 case LE_LEGACY_NONCONN_IND:
6294 return LE_ADV_NONCONN_IND;
6295 case LE_LEGACY_SCAN_RSP_ADV:
6296 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6297 return LE_ADV_SCAN_RSP;
6298 }
6299
6300 goto invalid;
6301 }
6302
6303 if (evt_type & LE_EXT_ADV_CONN_IND) {
6304 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6305 return LE_ADV_DIRECT_IND;
6306
6307 return LE_ADV_IND;
6308 }
6309
6310 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6311 return LE_ADV_SCAN_RSP;
6312
6313 if (evt_type & LE_EXT_ADV_SCAN_IND)
6314 return LE_ADV_SCAN_IND;
6315
6316 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6317 return LE_ADV_NONCONN_IND;
6318
6319 invalid:
6320 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6321 evt_type);
6322
6323 return LE_ADV_INVALID;
6324 }
6325
hci_le_ext_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6326 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6327 struct sk_buff *skb)
6328 {
6329 struct hci_ev_le_ext_adv_report *ev = data;
6330 u64 instant = jiffies;
6331
6332 if (!ev->num)
6333 return;
6334
6335 hci_dev_lock(hdev);
6336
6337 while (ev->num--) {
6338 struct hci_ev_le_ext_adv_info *info;
6339 u8 legacy_evt_type;
6340 u16 evt_type;
6341
6342 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6343 sizeof(*info));
6344 if (!info)
6345 break;
6346
6347 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6348 info->length))
6349 break;
6350
6351 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6352 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6353
6354 if (hci_test_quirk(hdev,
6355 HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) {
6356 info->primary_phy &= 0x1f;
6357 info->secondary_phy &= 0x1f;
6358 }
6359
6360 /* Check if PA Sync is pending and if the hci_conn SID has not
6361 * been set update it.
6362 */
6363 if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
6364 struct hci_conn *conn;
6365
6366 conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6367 if (conn && conn->sid == HCI_SID_INVALID)
6368 conn->sid = info->sid;
6369 }
6370
6371 if (legacy_evt_type != LE_ADV_INVALID) {
6372 process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6373 info->bdaddr_type, NULL, 0,
6374 info->primary_phy,
6375 info->secondary_phy,
6376 info->rssi, info->data, info->length,
6377 !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6378 false, instant);
6379 }
6380 }
6381
6382 hci_dev_unlock(hdev);
6383 }
6384
hci_le_pa_term_sync(struct hci_dev * hdev,__le16 handle)6385 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6386 {
6387 struct hci_cp_le_pa_term_sync cp;
6388
6389 memset(&cp, 0, sizeof(cp));
6390 cp.handle = handle;
6391
6392 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6393 }
6394
hci_le_pa_sync_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6395 static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data,
6396 struct sk_buff *skb)
6397 {
6398 struct hci_ev_le_pa_sync_established *ev = data;
6399 int mask = hdev->link_mode;
6400 __u8 flags = 0;
6401 struct hci_conn *pa_sync, *conn;
6402
6403 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6404
6405 hci_dev_lock(hdev);
6406
6407 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6408
6409 conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6410 if (!conn) {
6411 bt_dev_err(hdev,
6412 "Unable to find connection for dst %pMR sid 0x%2.2x",
6413 &ev->bdaddr, ev->sid);
6414 goto unlock;
6415 }
6416
6417 clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
6418
6419 conn->sync_handle = le16_to_cpu(ev->handle);
6420 conn->sid = HCI_SID_INVALID;
6421
6422 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
6423 &flags);
6424 if (!(mask & HCI_LM_ACCEPT)) {
6425 hci_le_pa_term_sync(hdev, ev->handle);
6426 goto unlock;
6427 }
6428
6429 if (!(flags & HCI_PROTO_DEFER))
6430 goto unlock;
6431
6432 /* Add connection to indicate PA sync event */
6433 pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
6434 HCI_ROLE_SLAVE);
6435
6436 if (IS_ERR(pa_sync))
6437 goto unlock;
6438
6439 pa_sync->sync_handle = le16_to_cpu(ev->handle);
6440
6441 if (ev->status) {
6442 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6443
6444 /* Notify iso layer */
6445 hci_connect_cfm(pa_sync, ev->status);
6446 }
6447
6448 unlock:
6449 hci_dev_unlock(hdev);
6450 }
6451
hci_le_per_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6452 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6453 struct sk_buff *skb)
6454 {
6455 struct hci_ev_le_per_adv_report *ev = data;
6456 int mask = hdev->link_mode;
6457 __u8 flags = 0;
6458 struct hci_conn *pa_sync;
6459
6460 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6461
6462 hci_dev_lock(hdev);
6463
6464 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags);
6465 if (!(mask & HCI_LM_ACCEPT))
6466 goto unlock;
6467
6468 if (!(flags & HCI_PROTO_DEFER))
6469 goto unlock;
6470
6471 pa_sync = hci_conn_hash_lookup_pa_sync_handle
6472 (hdev,
6473 le16_to_cpu(ev->sync_handle));
6474
6475 if (!pa_sync)
6476 goto unlock;
6477
6478 if (ev->data_status == LE_PA_DATA_COMPLETE &&
6479 !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
6480 /* Notify iso layer */
6481 hci_connect_cfm(pa_sync, 0);
6482
6483 /* Notify MGMT layer */
6484 mgmt_device_connected(hdev, pa_sync, NULL, 0);
6485 }
6486
6487 unlock:
6488 hci_dev_unlock(hdev);
6489 }
6490
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6491 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6492 struct sk_buff *skb)
6493 {
6494 struct hci_ev_le_remote_feat_complete *ev = data;
6495 struct hci_conn *conn;
6496
6497 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6498
6499 hci_dev_lock(hdev);
6500
6501 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6502 if (conn) {
6503 if (!ev->status)
6504 memcpy(conn->features[0], ev->features, 8);
6505
6506 if (conn->state == BT_CONFIG) {
6507 __u8 status;
6508
6509 /* If the local controller supports peripheral-initiated
6510 * features exchange, but the remote controller does
6511 * not, then it is possible that the error code 0x1a
6512 * for unsupported remote feature gets returned.
6513 *
6514 * In this specific case, allow the connection to
6515 * transition into connected state and mark it as
6516 * successful.
6517 */
6518 if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6519 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6520 status = 0x00;
6521 else
6522 status = ev->status;
6523
6524 conn->state = BT_CONNECTED;
6525 hci_connect_cfm(conn, status);
6526 hci_conn_drop(conn);
6527 }
6528 }
6529
6530 hci_dev_unlock(hdev);
6531 }
6532
hci_le_ltk_request_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6533 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6534 struct sk_buff *skb)
6535 {
6536 struct hci_ev_le_ltk_req *ev = data;
6537 struct hci_cp_le_ltk_reply cp;
6538 struct hci_cp_le_ltk_neg_reply neg;
6539 struct hci_conn *conn;
6540 struct smp_ltk *ltk;
6541
6542 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6543
6544 hci_dev_lock(hdev);
6545
6546 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6547 if (conn == NULL)
6548 goto not_found;
6549
6550 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6551 if (!ltk)
6552 goto not_found;
6553
6554 if (smp_ltk_is_sc(ltk)) {
6555 /* With SC both EDiv and Rand are set to zero */
6556 if (ev->ediv || ev->rand)
6557 goto not_found;
6558 } else {
6559 /* For non-SC keys check that EDiv and Rand match */
6560 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6561 goto not_found;
6562 }
6563
6564 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6565 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6566 cp.handle = cpu_to_le16(conn->handle);
6567
6568 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6569
6570 conn->enc_key_size = ltk->enc_size;
6571
6572 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6573
6574 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6575 * temporary key used to encrypt a connection following
6576 * pairing. It is used during the Encrypted Session Setup to
6577 * distribute the keys. Later, security can be re-established
6578 * using a distributed LTK.
6579 */
6580 if (ltk->type == SMP_STK) {
6581 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6582 list_del_rcu(<k->list);
6583 kfree_rcu(ltk, rcu);
6584 } else {
6585 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6586 }
6587
6588 hci_dev_unlock(hdev);
6589
6590 return;
6591
6592 not_found:
6593 neg.handle = ev->handle;
6594 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6595 hci_dev_unlock(hdev);
6596 }
6597
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)6598 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6599 u8 reason)
6600 {
6601 struct hci_cp_le_conn_param_req_neg_reply cp;
6602
6603 cp.handle = cpu_to_le16(handle);
6604 cp.reason = reason;
6605
6606 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6607 &cp);
6608 }
6609
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6610 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6611 struct sk_buff *skb)
6612 {
6613 struct hci_ev_le_remote_conn_param_req *ev = data;
6614 struct hci_cp_le_conn_param_req_reply cp;
6615 struct hci_conn *hcon;
6616 u16 handle, min, max, latency, timeout;
6617
6618 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6619
6620 handle = le16_to_cpu(ev->handle);
6621 min = le16_to_cpu(ev->interval_min);
6622 max = le16_to_cpu(ev->interval_max);
6623 latency = le16_to_cpu(ev->latency);
6624 timeout = le16_to_cpu(ev->timeout);
6625
6626 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6627 if (!hcon || hcon->state != BT_CONNECTED)
6628 return send_conn_param_neg_reply(hdev, handle,
6629 HCI_ERROR_UNKNOWN_CONN_ID);
6630
6631 if (max > hcon->le_conn_max_interval)
6632 return send_conn_param_neg_reply(hdev, handle,
6633 HCI_ERROR_INVALID_LL_PARAMS);
6634
6635 if (hci_check_conn_params(min, max, latency, timeout))
6636 return send_conn_param_neg_reply(hdev, handle,
6637 HCI_ERROR_INVALID_LL_PARAMS);
6638
6639 if (hcon->role == HCI_ROLE_MASTER) {
6640 struct hci_conn_params *params;
6641 u8 store_hint;
6642
6643 hci_dev_lock(hdev);
6644
6645 params = hci_conn_params_lookup(hdev, &hcon->dst,
6646 hcon->dst_type);
6647 if (params) {
6648 params->conn_min_interval = min;
6649 params->conn_max_interval = max;
6650 params->conn_latency = latency;
6651 params->supervision_timeout = timeout;
6652 store_hint = 0x01;
6653 } else {
6654 store_hint = 0x00;
6655 }
6656
6657 hci_dev_unlock(hdev);
6658
6659 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6660 store_hint, min, max, latency, timeout);
6661 }
6662
6663 cp.handle = ev->handle;
6664 cp.interval_min = ev->interval_min;
6665 cp.interval_max = ev->interval_max;
6666 cp.latency = ev->latency;
6667 cp.timeout = ev->timeout;
6668 cp.min_ce_len = 0;
6669 cp.max_ce_len = 0;
6670
6671 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6672 }
6673
hci_le_direct_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6674 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6675 struct sk_buff *skb)
6676 {
6677 struct hci_ev_le_direct_adv_report *ev = data;
6678 u64 instant = jiffies;
6679 int i;
6680
6681 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6682 flex_array_size(ev, info, ev->num)))
6683 return;
6684
6685 if (!ev->num)
6686 return;
6687
6688 hci_dev_lock(hdev);
6689
6690 for (i = 0; i < ev->num; i++) {
6691 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6692
6693 process_adv_report(hdev, info->type, &info->bdaddr,
6694 info->bdaddr_type, &info->direct_addr,
6695 info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6696 info->rssi, NULL, 0, false, false, instant);
6697 }
6698
6699 hci_dev_unlock(hdev);
6700 }
6701
hci_le_phy_update_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6702 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6703 struct sk_buff *skb)
6704 {
6705 struct hci_ev_le_phy_update_complete *ev = data;
6706 struct hci_conn *conn;
6707
6708 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6709
6710 if (ev->status)
6711 return;
6712
6713 hci_dev_lock(hdev);
6714
6715 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6716 if (!conn)
6717 goto unlock;
6718
6719 conn->le_tx_phy = ev->tx_phy;
6720 conn->le_rx_phy = ev->rx_phy;
6721
6722 unlock:
6723 hci_dev_unlock(hdev);
6724 }
6725
hci_le_cis_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6726 static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data,
6727 struct sk_buff *skb)
6728 {
6729 struct hci_evt_le_cis_established *ev = data;
6730 struct hci_conn *conn;
6731 struct bt_iso_qos *qos;
6732 bool pending = false;
6733 u16 handle = __le16_to_cpu(ev->handle);
6734 u32 c_sdu_interval, p_sdu_interval;
6735
6736 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6737
6738 hci_dev_lock(hdev);
6739
6740 conn = hci_conn_hash_lookup_handle(hdev, handle);
6741 if (!conn) {
6742 bt_dev_err(hdev,
6743 "Unable to find connection with handle 0x%4.4x",
6744 handle);
6745 goto unlock;
6746 }
6747
6748 if (conn->type != CIS_LINK) {
6749 bt_dev_err(hdev,
6750 "Invalid connection link type handle 0x%4.4x",
6751 handle);
6752 goto unlock;
6753 }
6754
6755 qos = &conn->iso_qos;
6756
6757 pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6758
6759 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
6760 * page 3075:
6761 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
6762 * ISO_Interval + SDU_Interval_C_To_P
6763 * ...
6764 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
6765 * Transport_Latency
6766 */
6767 c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6768 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
6769 get_unaligned_le24(ev->c_latency);
6770 p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6771 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
6772 get_unaligned_le24(ev->p_latency);
6773
6774 switch (conn->role) {
6775 case HCI_ROLE_SLAVE:
6776 qos->ucast.in.interval = c_sdu_interval;
6777 qos->ucast.out.interval = p_sdu_interval;
6778 /* Convert Transport Latency (us) to Latency (msec) */
6779 qos->ucast.in.latency =
6780 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6781 1000);
6782 qos->ucast.out.latency =
6783 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6784 1000);
6785 qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6786 qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6787 qos->ucast.in.phy = ev->c_phy;
6788 qos->ucast.out.phy = ev->p_phy;
6789 break;
6790 case HCI_ROLE_MASTER:
6791 qos->ucast.in.interval = p_sdu_interval;
6792 qos->ucast.out.interval = c_sdu_interval;
6793 /* Convert Transport Latency (us) to Latency (msec) */
6794 qos->ucast.out.latency =
6795 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6796 1000);
6797 qos->ucast.in.latency =
6798 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6799 1000);
6800 qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6801 qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6802 qos->ucast.out.phy = ev->c_phy;
6803 qos->ucast.in.phy = ev->p_phy;
6804 break;
6805 }
6806
6807 if (!ev->status) {
6808 conn->state = BT_CONNECTED;
6809 hci_debugfs_create_conn(conn);
6810 hci_conn_add_sysfs(conn);
6811 hci_iso_setup_path(conn);
6812 goto unlock;
6813 }
6814
6815 conn->state = BT_CLOSED;
6816 hci_connect_cfm(conn, ev->status);
6817 hci_conn_del(conn);
6818
6819 unlock:
6820 if (pending)
6821 hci_le_create_cis_pending(hdev);
6822
6823 hci_dev_unlock(hdev);
6824 }
6825
hci_le_reject_cis(struct hci_dev * hdev,__le16 handle)6826 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6827 {
6828 struct hci_cp_le_reject_cis cp;
6829
6830 memset(&cp, 0, sizeof(cp));
6831 cp.handle = handle;
6832 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6833 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6834 }
6835
hci_le_accept_cis(struct hci_dev * hdev,__le16 handle)6836 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6837 {
6838 struct hci_cp_le_accept_cis cp;
6839
6840 memset(&cp, 0, sizeof(cp));
6841 cp.handle = handle;
6842 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6843 }
6844
hci_le_cis_req_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6845 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6846 struct sk_buff *skb)
6847 {
6848 struct hci_evt_le_cis_req *ev = data;
6849 u16 acl_handle, cis_handle;
6850 struct hci_conn *acl, *cis;
6851 int mask;
6852 __u8 flags = 0;
6853
6854 acl_handle = __le16_to_cpu(ev->acl_handle);
6855 cis_handle = __le16_to_cpu(ev->cis_handle);
6856
6857 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6858 acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6859
6860 hci_dev_lock(hdev);
6861
6862 acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6863 if (!acl)
6864 goto unlock;
6865
6866 mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags);
6867 if (!(mask & HCI_LM_ACCEPT)) {
6868 hci_le_reject_cis(hdev, ev->cis_handle);
6869 goto unlock;
6870 }
6871
6872 cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6873 if (!cis) {
6874 cis = hci_conn_add(hdev, CIS_LINK, &acl->dst,
6875 HCI_ROLE_SLAVE, cis_handle);
6876 if (IS_ERR(cis)) {
6877 hci_le_reject_cis(hdev, ev->cis_handle);
6878 goto unlock;
6879 }
6880 }
6881
6882 cis->iso_qos.ucast.cig = ev->cig_id;
6883 cis->iso_qos.ucast.cis = ev->cis_id;
6884
6885 if (!(flags & HCI_PROTO_DEFER)) {
6886 hci_le_accept_cis(hdev, ev->cis_handle);
6887 } else {
6888 cis->state = BT_CONNECT2;
6889 hci_connect_cfm(cis, 0);
6890 }
6891
6892 unlock:
6893 hci_dev_unlock(hdev);
6894 }
6895
hci_iso_term_big_sync(struct hci_dev * hdev,void * data)6896 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6897 {
6898 u8 handle = PTR_UINT(data);
6899
6900 return hci_le_terminate_big_sync(hdev, handle,
6901 HCI_ERROR_LOCAL_HOST_TERM);
6902 }
6903
hci_le_create_big_complete_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6904 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6905 struct sk_buff *skb)
6906 {
6907 struct hci_evt_le_create_big_complete *ev = data;
6908 struct hci_conn *conn;
6909 __u8 i = 0;
6910
6911 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6912
6913 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6914 flex_array_size(ev, bis_handle, ev->num_bis)))
6915 return;
6916
6917 hci_dev_lock(hdev);
6918
6919 /* Connect all BISes that are bound to the BIG */
6920 while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
6921 BT_BOUND,
6922 HCI_ROLE_MASTER))) {
6923 if (ev->status) {
6924 hci_connect_cfm(conn, ev->status);
6925 hci_conn_del(conn);
6926 continue;
6927 }
6928
6929 if (hci_conn_set_handle(conn,
6930 __le16_to_cpu(ev->bis_handle[i++])))
6931 continue;
6932
6933 conn->state = BT_CONNECTED;
6934 set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6935 hci_debugfs_create_conn(conn);
6936 hci_conn_add_sysfs(conn);
6937 hci_iso_setup_path(conn);
6938 }
6939
6940 if (!ev->status && !i)
6941 /* If no BISes have been connected for the BIG,
6942 * terminate. This is in case all bound connections
6943 * have been closed before the BIG creation
6944 * has completed.
6945 */
6946 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6947 UINT_PTR(ev->handle), NULL);
6948
6949 hci_dev_unlock(hdev);
6950 }
6951
hci_le_big_sync_established_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)6952 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6953 struct sk_buff *skb)
6954 {
6955 struct hci_evt_le_big_sync_established *ev = data;
6956 struct hci_conn *bis, *conn;
6957 int i;
6958
6959 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6960
6961 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
6962 flex_array_size(ev, bis, ev->num_bis)))
6963 return;
6964
6965 hci_dev_lock(hdev);
6966
6967 conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
6968 ev->num_bis);
6969 if (!conn) {
6970 bt_dev_err(hdev,
6971 "Unable to find connection for big 0x%2.2x",
6972 ev->handle);
6973 goto unlock;
6974 }
6975
6976 clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
6977
6978 conn->num_bis = 0;
6979 memset(conn->bis, 0, sizeof(conn->num_bis));
6980
6981 for (i = 0; i < ev->num_bis; i++) {
6982 u16 handle = le16_to_cpu(ev->bis[i]);
6983 __le32 interval;
6984
6985 bis = hci_conn_hash_lookup_handle(hdev, handle);
6986 if (!bis) {
6987 if (handle > HCI_CONN_HANDLE_MAX) {
6988 bt_dev_dbg(hdev, "ignore too large handle %u", handle);
6989 continue;
6990 }
6991 bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY,
6992 HCI_ROLE_SLAVE, handle);
6993 if (IS_ERR(bis))
6994 continue;
6995 }
6996
6997 if (ev->status != 0x42) {
6998 /* Mark PA sync as established */
6999 set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7000 /* Reset cleanup callback of PA Sync so it doesn't
7001 * terminate the sync when deleting the connection.
7002 */
7003 conn->cleanup = NULL;
7004 }
7005
7006 bis->sync_handle = conn->sync_handle;
7007 bis->iso_qos.bcast.big = ev->handle;
7008 memset(&interval, 0, sizeof(interval));
7009 memcpy(&interval, ev->latency, sizeof(ev->latency));
7010 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7011 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7012 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7013 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7014
7015 if (!ev->status) {
7016 bis->state = BT_CONNECTED;
7017 set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7018 hci_debugfs_create_conn(bis);
7019 hci_conn_add_sysfs(bis);
7020 hci_iso_setup_path(bis);
7021 }
7022 }
7023
7024 /* In case BIG sync failed, notify each failed connection to
7025 * the user after all hci connections have been added
7026 */
7027 if (ev->status)
7028 for (i = 0; i < ev->num_bis; i++) {
7029 u16 handle = le16_to_cpu(ev->bis[i]);
7030
7031 bis = hci_conn_hash_lookup_handle(hdev, handle);
7032 if (!bis)
7033 continue;
7034
7035 set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7036 hci_connect_cfm(bis, ev->status);
7037 }
7038
7039 unlock:
7040 hci_dev_unlock(hdev);
7041 }
7042
hci_le_big_sync_lost_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7043 static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data,
7044 struct sk_buff *skb)
7045 {
7046 struct hci_evt_le_big_sync_lost *ev = data;
7047 struct hci_conn *bis, *conn;
7048 bool mgmt_conn;
7049
7050 bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle);
7051
7052 hci_dev_lock(hdev);
7053
7054 /* Delete the pa sync connection */
7055 bis = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7056 if (bis) {
7057 conn = hci_conn_hash_lookup_pa_sync_handle(hdev,
7058 bis->sync_handle);
7059 if (conn)
7060 hci_conn_del(conn);
7061 }
7062
7063 /* Delete each bis connection */
7064 while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle,
7065 BT_CONNECTED,
7066 HCI_ROLE_SLAVE))) {
7067 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &bis->flags);
7068 mgmt_device_disconnected(hdev, &bis->dst, bis->type, bis->dst_type,
7069 ev->reason, mgmt_conn);
7070
7071 clear_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7072 hci_disconn_cfm(bis, ev->reason);
7073 hci_conn_del(bis);
7074 }
7075
7076 hci_dev_unlock(hdev);
7077 }
7078
hci_le_big_info_adv_report_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb)7079 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7080 struct sk_buff *skb)
7081 {
7082 struct hci_evt_le_big_info_adv_report *ev = data;
7083 int mask = hdev->link_mode;
7084 __u8 flags = 0;
7085 struct hci_conn *pa_sync;
7086
7087 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7088
7089 hci_dev_lock(hdev);
7090
7091 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags);
7092 if (!(mask & HCI_LM_ACCEPT))
7093 goto unlock;
7094
7095 if (!(flags & HCI_PROTO_DEFER))
7096 goto unlock;
7097
7098 pa_sync = hci_conn_hash_lookup_pa_sync_handle
7099 (hdev,
7100 le16_to_cpu(ev->sync_handle));
7101
7102 if (!pa_sync)
7103 goto unlock;
7104
7105 pa_sync->iso_qos.bcast.encryption = ev->encryption;
7106
7107 /* Notify iso layer */
7108 hci_connect_cfm(pa_sync, 0);
7109
7110 unlock:
7111 hci_dev_unlock(hdev);
7112 }
7113
7114 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7115 [_op] = { \
7116 .func = _func, \
7117 .min_len = _min_len, \
7118 .max_len = _max_len, \
7119 }
7120
7121 #define HCI_LE_EV(_op, _func, _len) \
7122 HCI_LE_EV_VL(_op, _func, _len, _len)
7123
7124 #define HCI_LE_EV_STATUS(_op, _func) \
7125 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7126
7127 /* Entries in this table shall have their position according to the subevent
7128 * opcode they handle so the use of the macros above is recommend since it does
7129 * attempt to initialize at its proper index using Designated Initializers that
7130 * way events without a callback function can be omitted.
7131 */
7132 static const struct hci_le_ev {
7133 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7134 u16 min_len;
7135 u16 max_len;
7136 } hci_le_ev_table[U8_MAX + 1] = {
7137 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7138 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7139 sizeof(struct hci_ev_le_conn_complete)),
7140 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7141 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7142 sizeof(struct hci_ev_le_advertising_report),
7143 HCI_MAX_EVENT_SIZE),
7144 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7145 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7146 hci_le_conn_update_complete_evt,
7147 sizeof(struct hci_ev_le_conn_update_complete)),
7148 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7149 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7150 hci_le_remote_feat_complete_evt,
7151 sizeof(struct hci_ev_le_remote_feat_complete)),
7152 /* [0x05 = HCI_EV_LE_LTK_REQ] */
7153 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7154 sizeof(struct hci_ev_le_ltk_req)),
7155 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7156 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7157 hci_le_remote_conn_param_req_evt,
7158 sizeof(struct hci_ev_le_remote_conn_param_req)),
7159 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7160 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7161 hci_le_enh_conn_complete_evt,
7162 sizeof(struct hci_ev_le_enh_conn_complete)),
7163 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7164 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7165 sizeof(struct hci_ev_le_direct_adv_report),
7166 HCI_MAX_EVENT_SIZE),
7167 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7168 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7169 sizeof(struct hci_ev_le_phy_update_complete)),
7170 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7171 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7172 sizeof(struct hci_ev_le_ext_adv_report),
7173 HCI_MAX_EVENT_SIZE),
7174 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7175 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7176 hci_le_pa_sync_established_evt,
7177 sizeof(struct hci_ev_le_pa_sync_established)),
7178 /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7179 HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7180 hci_le_per_adv_report_evt,
7181 sizeof(struct hci_ev_le_per_adv_report),
7182 HCI_MAX_EVENT_SIZE),
7183 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7184 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7185 sizeof(struct hci_evt_le_ext_adv_set_term)),
7186 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7187 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt,
7188 sizeof(struct hci_evt_le_cis_established)),
7189 /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7190 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7191 sizeof(struct hci_evt_le_cis_req)),
7192 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7193 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7194 hci_le_create_big_complete_evt,
7195 sizeof(struct hci_evt_le_create_big_complete),
7196 HCI_MAX_EVENT_SIZE),
7197 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
7198 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7199 hci_le_big_sync_established_evt,
7200 sizeof(struct hci_evt_le_big_sync_established),
7201 HCI_MAX_EVENT_SIZE),
7202 /* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */
7203 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST,
7204 hci_le_big_sync_lost_evt,
7205 sizeof(struct hci_evt_le_big_sync_lost),
7206 HCI_MAX_EVENT_SIZE),
7207 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7208 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7209 hci_le_big_info_adv_report_evt,
7210 sizeof(struct hci_evt_le_big_info_adv_report),
7211 HCI_MAX_EVENT_SIZE),
7212 };
7213
hci_le_meta_evt(struct hci_dev * hdev,void * data,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7214 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7215 struct sk_buff *skb, u16 *opcode, u8 *status,
7216 hci_req_complete_t *req_complete,
7217 hci_req_complete_skb_t *req_complete_skb)
7218 {
7219 struct hci_ev_le_meta *ev = data;
7220 const struct hci_le_ev *subev;
7221
7222 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7223
7224 /* Only match event if command OGF is for LE */
7225 if (hdev->req_skb &&
7226 (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 ||
7227 hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) &&
7228 hci_skb_event(hdev->req_skb) == ev->subevent) {
7229 *opcode = hci_skb_opcode(hdev->req_skb);
7230 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7231 req_complete_skb);
7232 }
7233
7234 subev = &hci_le_ev_table[ev->subevent];
7235 if (!subev->func)
7236 return;
7237
7238 if (skb->len < subev->min_len) {
7239 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7240 ev->subevent, skb->len, subev->min_len);
7241 return;
7242 }
7243
7244 /* Just warn if the length is over max_len size it still be
7245 * possible to partially parse the event so leave to callback to
7246 * decide if that is acceptable.
7247 */
7248 if (skb->len > subev->max_len)
7249 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7250 ev->subevent, skb->len, subev->max_len);
7251 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7252 if (!data)
7253 return;
7254
7255 subev->func(hdev, data, skb);
7256 }
7257
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)7258 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7259 u8 event, struct sk_buff *skb)
7260 {
7261 struct hci_ev_cmd_complete *ev;
7262 struct hci_event_hdr *hdr;
7263
7264 if (!skb)
7265 return false;
7266
7267 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7268 if (!hdr)
7269 return false;
7270
7271 if (event) {
7272 if (hdr->evt != event)
7273 return false;
7274 return true;
7275 }
7276
7277 /* Check if request ended in Command Status - no way to retrieve
7278 * any extra parameters in this case.
7279 */
7280 if (hdr->evt == HCI_EV_CMD_STATUS)
7281 return false;
7282
7283 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7284 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7285 hdr->evt);
7286 return false;
7287 }
7288
7289 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7290 if (!ev)
7291 return false;
7292
7293 if (opcode != __le16_to_cpu(ev->opcode)) {
7294 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7295 __le16_to_cpu(ev->opcode));
7296 return false;
7297 }
7298
7299 return true;
7300 }
7301
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)7302 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7303 struct sk_buff *skb)
7304 {
7305 struct hci_ev_le_advertising_info *adv;
7306 struct hci_ev_le_direct_adv_info *direct_adv;
7307 struct hci_ev_le_ext_adv_info *ext_adv;
7308 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7309 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7310
7311 hci_dev_lock(hdev);
7312
7313 /* If we are currently suspended and this is the first BT event seen,
7314 * save the wake reason associated with the event.
7315 */
7316 if (!hdev->suspended || hdev->wake_reason)
7317 goto unlock;
7318
7319 /* Default to remote wake. Values for wake_reason are documented in the
7320 * Bluez mgmt api docs.
7321 */
7322 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7323
7324 /* Once configured for remote wakeup, we should only wake up for
7325 * reconnections. It's useful to see which device is waking us up so
7326 * keep track of the bdaddr of the connection event that woke us up.
7327 */
7328 if (event == HCI_EV_CONN_REQUEST) {
7329 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7330 hdev->wake_addr_type = BDADDR_BREDR;
7331 } else if (event == HCI_EV_CONN_COMPLETE) {
7332 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7333 hdev->wake_addr_type = BDADDR_BREDR;
7334 } else if (event == HCI_EV_LE_META) {
7335 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7336 u8 subevent = le_ev->subevent;
7337 u8 *ptr = &skb->data[sizeof(*le_ev)];
7338 u8 num_reports = *ptr;
7339
7340 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7341 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7342 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7343 num_reports) {
7344 adv = (void *)(ptr + 1);
7345 direct_adv = (void *)(ptr + 1);
7346 ext_adv = (void *)(ptr + 1);
7347
7348 switch (subevent) {
7349 case HCI_EV_LE_ADVERTISING_REPORT:
7350 bacpy(&hdev->wake_addr, &adv->bdaddr);
7351 hdev->wake_addr_type = adv->bdaddr_type;
7352 break;
7353 case HCI_EV_LE_DIRECT_ADV_REPORT:
7354 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7355 hdev->wake_addr_type = direct_adv->bdaddr_type;
7356 break;
7357 case HCI_EV_LE_EXT_ADV_REPORT:
7358 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7359 hdev->wake_addr_type = ext_adv->bdaddr_type;
7360 break;
7361 }
7362 }
7363 } else {
7364 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7365 }
7366
7367 unlock:
7368 hci_dev_unlock(hdev);
7369 }
7370
7371 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7372 [_op] = { \
7373 .req = false, \
7374 .func = _func, \
7375 .min_len = _min_len, \
7376 .max_len = _max_len, \
7377 }
7378
7379 #define HCI_EV(_op, _func, _len) \
7380 HCI_EV_VL(_op, _func, _len, _len)
7381
7382 #define HCI_EV_STATUS(_op, _func) \
7383 HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7384
7385 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7386 [_op] = { \
7387 .req = true, \
7388 .func_req = _func, \
7389 .min_len = _min_len, \
7390 .max_len = _max_len, \
7391 }
7392
7393 #define HCI_EV_REQ(_op, _func, _len) \
7394 HCI_EV_REQ_VL(_op, _func, _len, _len)
7395
7396 /* Entries in this table shall have their position according to the event opcode
7397 * they handle so the use of the macros above is recommend since it does attempt
7398 * to initialize at its proper index using Designated Initializers that way
7399 * events without a callback function don't have entered.
7400 */
7401 static const struct hci_ev {
7402 bool req;
7403 union {
7404 void (*func)(struct hci_dev *hdev, void *data,
7405 struct sk_buff *skb);
7406 void (*func_req)(struct hci_dev *hdev, void *data,
7407 struct sk_buff *skb, u16 *opcode, u8 *status,
7408 hci_req_complete_t *req_complete,
7409 hci_req_complete_skb_t *req_complete_skb);
7410 };
7411 u16 min_len;
7412 u16 max_len;
7413 } hci_ev_table[U8_MAX + 1] = {
7414 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7415 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7416 /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7417 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7418 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7419 /* [0x03 = HCI_EV_CONN_COMPLETE] */
7420 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7421 sizeof(struct hci_ev_conn_complete)),
7422 /* [0x04 = HCI_EV_CONN_REQUEST] */
7423 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7424 sizeof(struct hci_ev_conn_request)),
7425 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7426 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7427 sizeof(struct hci_ev_disconn_complete)),
7428 /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7429 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7430 sizeof(struct hci_ev_auth_complete)),
7431 /* [0x07 = HCI_EV_REMOTE_NAME] */
7432 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7433 sizeof(struct hci_ev_remote_name)),
7434 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7435 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7436 sizeof(struct hci_ev_encrypt_change)),
7437 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7438 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7439 hci_change_link_key_complete_evt,
7440 sizeof(struct hci_ev_change_link_key_complete)),
7441 /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7442 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7443 sizeof(struct hci_ev_remote_features)),
7444 /* [0x0e = HCI_EV_CMD_COMPLETE] */
7445 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7446 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7447 /* [0x0f = HCI_EV_CMD_STATUS] */
7448 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7449 sizeof(struct hci_ev_cmd_status)),
7450 /* [0x10 = HCI_EV_CMD_STATUS] */
7451 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7452 sizeof(struct hci_ev_hardware_error)),
7453 /* [0x12 = HCI_EV_ROLE_CHANGE] */
7454 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7455 sizeof(struct hci_ev_role_change)),
7456 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7457 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7458 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7459 /* [0x14 = HCI_EV_MODE_CHANGE] */
7460 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7461 sizeof(struct hci_ev_mode_change)),
7462 /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7463 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7464 sizeof(struct hci_ev_pin_code_req)),
7465 /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7466 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7467 sizeof(struct hci_ev_link_key_req)),
7468 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7469 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7470 sizeof(struct hci_ev_link_key_notify)),
7471 /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7472 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7473 sizeof(struct hci_ev_clock_offset)),
7474 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7475 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7476 sizeof(struct hci_ev_pkt_type_change)),
7477 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7478 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7479 sizeof(struct hci_ev_pscan_rep_mode)),
7480 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7481 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7482 hci_inquiry_result_with_rssi_evt,
7483 sizeof(struct hci_ev_inquiry_result_rssi),
7484 HCI_MAX_EVENT_SIZE),
7485 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7486 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7487 sizeof(struct hci_ev_remote_ext_features)),
7488 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7489 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7490 sizeof(struct hci_ev_sync_conn_complete)),
7491 /* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7492 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7493 hci_extended_inquiry_result_evt,
7494 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7495 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7496 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7497 sizeof(struct hci_ev_key_refresh_complete)),
7498 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7499 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7500 sizeof(struct hci_ev_io_capa_request)),
7501 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7502 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7503 sizeof(struct hci_ev_io_capa_reply)),
7504 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7505 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7506 sizeof(struct hci_ev_user_confirm_req)),
7507 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7508 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7509 sizeof(struct hci_ev_user_passkey_req)),
7510 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7511 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7512 sizeof(struct hci_ev_remote_oob_data_request)),
7513 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7514 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7515 sizeof(struct hci_ev_simple_pair_complete)),
7516 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7517 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7518 sizeof(struct hci_ev_user_passkey_notify)),
7519 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7520 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7521 sizeof(struct hci_ev_keypress_notify)),
7522 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7523 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7524 sizeof(struct hci_ev_remote_host_features)),
7525 /* [0x3e = HCI_EV_LE_META] */
7526 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7527 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7528 /* [0xff = HCI_EV_VENDOR] */
7529 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7530 };
7531
hci_event_func(struct hci_dev * hdev,u8 event,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)7532 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7533 u16 *opcode, u8 *status,
7534 hci_req_complete_t *req_complete,
7535 hci_req_complete_skb_t *req_complete_skb)
7536 {
7537 const struct hci_ev *ev = &hci_ev_table[event];
7538 void *data;
7539
7540 if (!ev->func)
7541 return;
7542
7543 if (skb->len < ev->min_len) {
7544 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7545 event, skb->len, ev->min_len);
7546 return;
7547 }
7548
7549 /* Just warn if the length is over max_len size it still be
7550 * possible to partially parse the event so leave to callback to
7551 * decide if that is acceptable.
7552 */
7553 if (skb->len > ev->max_len)
7554 bt_dev_warn_ratelimited(hdev,
7555 "unexpected event 0x%2.2x length: %u > %u",
7556 event, skb->len, ev->max_len);
7557
7558 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7559 if (!data)
7560 return;
7561
7562 if (ev->req)
7563 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7564 req_complete_skb);
7565 else
7566 ev->func(hdev, data, skb);
7567 }
7568
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)7569 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7570 {
7571 struct hci_event_hdr *hdr = (void *) skb->data;
7572 hci_req_complete_t req_complete = NULL;
7573 hci_req_complete_skb_t req_complete_skb = NULL;
7574 struct sk_buff *orig_skb = NULL;
7575 u8 status = 0, event, req_evt = 0;
7576 u16 opcode = HCI_OP_NOP;
7577
7578 if (skb->len < sizeof(*hdr)) {
7579 bt_dev_err(hdev, "Malformed HCI Event");
7580 goto done;
7581 }
7582
7583 hci_dev_lock(hdev);
7584 kfree_skb(hdev->recv_event);
7585 hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7586 hci_dev_unlock(hdev);
7587
7588 event = hdr->evt;
7589 if (!event) {
7590 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7591 event);
7592 goto done;
7593 }
7594
7595 /* Only match event if command OGF is not for LE */
7596 if (hdev->req_skb &&
7597 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7598 hci_skb_event(hdev->req_skb) == event) {
7599 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7600 status, &req_complete, &req_complete_skb);
7601 req_evt = event;
7602 }
7603
7604 /* If it looks like we might end up having to call
7605 * req_complete_skb, store a pristine copy of the skb since the
7606 * various handlers may modify the original one through
7607 * skb_pull() calls, etc.
7608 */
7609 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7610 event == HCI_EV_CMD_COMPLETE)
7611 orig_skb = skb_clone(skb, GFP_KERNEL);
7612
7613 skb_pull(skb, HCI_EVENT_HDR_SIZE);
7614
7615 /* Store wake reason if we're suspended */
7616 hci_store_wake_reason(hdev, event, skb);
7617
7618 bt_dev_dbg(hdev, "event 0x%2.2x", event);
7619
7620 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7621 &req_complete_skb);
7622
7623 if (req_complete) {
7624 req_complete(hdev, status, opcode);
7625 } else if (req_complete_skb) {
7626 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7627 kfree_skb(orig_skb);
7628 orig_skb = NULL;
7629 }
7630 req_complete_skb(hdev, status, opcode, orig_skb);
7631 }
7632
7633 done:
7634 kfree_skb(orig_skb);
7635 kfree_skb(skb);
7636 hdev->stat.evt_rx++;
7637 }
7638