1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 Copyright 2023-2024 NXP 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI event handling. */ 27 28 #include <linux/unaligned.h> 29 #include <linux/crypto.h> 30 #include <crypto/algapi.h> 31 32 #include <net/bluetooth/bluetooth.h> 33 #include <net/bluetooth/hci_core.h> 34 #include <net/bluetooth/mgmt.h> 35 36 #include "hci_debugfs.h" 37 #include "hci_codec.h" 38 #include "smp.h" 39 #include "msft.h" 40 #include "eir.h" 41 42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 43 "\x00\x00\x00\x00\x00\x00\x00\x00" 44 45 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 46 47 /* Handle HCI Event packets */ 48 49 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 50 u8 ev, size_t len) 51 { 52 void *data; 53 54 data = skb_pull_data(skb, len); 55 if (!data) 56 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); 57 58 return data; 59 } 60 61 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 62 u16 op, size_t len) 63 { 64 void *data; 65 66 data = skb_pull_data(skb, len); 67 if (!data) 68 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); 69 70 return data; 71 } 72 73 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 74 u8 ev, size_t len) 75 { 76 void *data; 77 78 data = skb_pull_data(skb, len); 79 if (!data) 80 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); 81 82 return data; 83 } 84 85 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, 86 struct sk_buff *skb) 87 { 88 struct hci_ev_status *rp = data; 89 90 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 91 92 /* It is possible that we receive Inquiry Complete event right 93 * before we receive Inquiry Cancel Command Complete event, in 94 * which case the latter event should have status of Command 95 * Disallowed. This should not be treated as error, since 96 * we actually achieve what Inquiry Cancel wants to achieve, 97 * which is to end the last Inquiry session. 98 */ 99 if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) { 100 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 101 rp->status = 0x00; 102 } 103 104 if (rp->status) 105 return rp->status; 106 107 clear_bit(HCI_INQUIRY, &hdev->flags); 108 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 109 wake_up_bit(&hdev->flags, HCI_INQUIRY); 110 111 hci_dev_lock(hdev); 112 /* Set discovery state to stopped if we're not doing LE active 113 * scanning. 114 */ 115 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 116 hdev->le_scan_type != LE_SCAN_ACTIVE) 117 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 118 hci_dev_unlock(hdev); 119 120 return rp->status; 121 } 122 123 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, 124 struct sk_buff *skb) 125 { 126 struct hci_ev_status *rp = data; 127 128 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 129 130 if (rp->status) 131 return rp->status; 132 133 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 134 135 return rp->status; 136 } 137 138 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, 139 struct sk_buff *skb) 140 { 141 struct hci_ev_status *rp = data; 142 143 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 144 145 if (rp->status) 146 return rp->status; 147 148 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 149 150 return rp->status; 151 } 152 153 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, 154 struct sk_buff *skb) 155 { 156 struct hci_ev_status *rp = data; 157 158 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 159 160 return rp->status; 161 } 162 163 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, 164 struct sk_buff *skb) 165 { 166 struct hci_rp_role_discovery *rp = data; 167 struct hci_conn *conn; 168 169 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 170 171 if (rp->status) 172 return rp->status; 173 174 hci_dev_lock(hdev); 175 176 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 177 if (conn) 178 conn->role = rp->role; 179 180 hci_dev_unlock(hdev); 181 182 return rp->status; 183 } 184 185 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, 186 struct sk_buff *skb) 187 { 188 struct hci_rp_read_link_policy *rp = data; 189 struct hci_conn *conn; 190 191 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 192 193 if (rp->status) 194 return rp->status; 195 196 hci_dev_lock(hdev); 197 198 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 199 if (conn) 200 conn->link_policy = __le16_to_cpu(rp->policy); 201 202 hci_dev_unlock(hdev); 203 204 return rp->status; 205 } 206 207 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, 208 struct sk_buff *skb) 209 { 210 struct hci_rp_write_link_policy *rp = data; 211 struct hci_conn *conn; 212 void *sent; 213 214 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 215 216 if (rp->status) 217 return rp->status; 218 219 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 220 if (!sent) 221 return rp->status; 222 223 hci_dev_lock(hdev); 224 225 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 226 if (conn) 227 conn->link_policy = get_unaligned_le16(sent + 2); 228 229 hci_dev_unlock(hdev); 230 231 return rp->status; 232 } 233 234 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, 235 struct sk_buff *skb) 236 { 237 struct hci_rp_read_def_link_policy *rp = data; 238 239 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 240 241 if (rp->status) 242 return rp->status; 243 244 hdev->link_policy = __le16_to_cpu(rp->policy); 245 246 return rp->status; 247 } 248 249 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, 250 struct sk_buff *skb) 251 { 252 struct hci_ev_status *rp = data; 253 void *sent; 254 255 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 256 257 if (rp->status) 258 return rp->status; 259 260 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 261 if (!sent) 262 return rp->status; 263 264 hdev->link_policy = get_unaligned_le16(sent); 265 266 return rp->status; 267 } 268 269 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) 270 { 271 struct hci_ev_status *rp = data; 272 273 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 274 275 clear_bit(HCI_RESET, &hdev->flags); 276 277 if (rp->status) 278 return rp->status; 279 280 /* Reset all non-persistent flags */ 281 hci_dev_clear_volatile_flags(hdev); 282 283 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 284 285 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 286 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 287 288 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 289 hdev->adv_data_len = 0; 290 291 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 292 hdev->scan_rsp_data_len = 0; 293 294 hdev->le_scan_type = LE_SCAN_PASSIVE; 295 296 hdev->ssp_debug_mode = 0; 297 298 hci_bdaddr_list_clear(&hdev->le_accept_list); 299 hci_bdaddr_list_clear(&hdev->le_resolv_list); 300 301 return rp->status; 302 } 303 304 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, 305 struct sk_buff *skb) 306 { 307 struct hci_rp_read_stored_link_key *rp = data; 308 struct hci_cp_read_stored_link_key *sent; 309 310 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 311 312 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 313 if (!sent) 314 return rp->status; 315 316 if (!rp->status && sent->read_all == 0x01) { 317 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 318 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 319 } 320 321 return rp->status; 322 } 323 324 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, 325 struct sk_buff *skb) 326 { 327 struct hci_rp_delete_stored_link_key *rp = data; 328 u16 num_keys; 329 330 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 331 332 if (rp->status) 333 return rp->status; 334 335 num_keys = le16_to_cpu(rp->num_keys); 336 337 if (num_keys <= hdev->stored_num_keys) 338 hdev->stored_num_keys -= num_keys; 339 else 340 hdev->stored_num_keys = 0; 341 342 return rp->status; 343 } 344 345 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, 346 struct sk_buff *skb) 347 { 348 struct hci_ev_status *rp = data; 349 void *sent; 350 351 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 352 353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 354 if (!sent) 355 return rp->status; 356 357 hci_dev_lock(hdev); 358 359 if (hci_dev_test_flag(hdev, HCI_MGMT)) 360 mgmt_set_local_name_complete(hdev, sent, rp->status); 361 else if (!rp->status) 362 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 363 364 hci_dev_unlock(hdev); 365 366 return rp->status; 367 } 368 369 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, 370 struct sk_buff *skb) 371 { 372 struct hci_rp_read_local_name *rp = data; 373 374 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 375 376 if (rp->status) 377 return rp->status; 378 379 if (hci_dev_test_flag(hdev, HCI_SETUP) || 380 hci_dev_test_flag(hdev, HCI_CONFIG)) 381 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 382 383 return rp->status; 384 } 385 386 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, 387 struct sk_buff *skb) 388 { 389 struct hci_ev_status *rp = data; 390 void *sent; 391 392 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 393 394 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 395 if (!sent) 396 return rp->status; 397 398 hci_dev_lock(hdev); 399 400 if (!rp->status) { 401 __u8 param = *((__u8 *) sent); 402 403 if (param == AUTH_ENABLED) 404 set_bit(HCI_AUTH, &hdev->flags); 405 else 406 clear_bit(HCI_AUTH, &hdev->flags); 407 } 408 409 if (hci_dev_test_flag(hdev, HCI_MGMT)) 410 mgmt_auth_enable_complete(hdev, rp->status); 411 412 hci_dev_unlock(hdev); 413 414 return rp->status; 415 } 416 417 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, 418 struct sk_buff *skb) 419 { 420 struct hci_ev_status *rp = data; 421 __u8 param; 422 void *sent; 423 424 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 425 426 if (rp->status) 427 return rp->status; 428 429 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 430 if (!sent) 431 return rp->status; 432 433 param = *((__u8 *) sent); 434 435 if (param) 436 set_bit(HCI_ENCRYPT, &hdev->flags); 437 else 438 clear_bit(HCI_ENCRYPT, &hdev->flags); 439 440 return rp->status; 441 } 442 443 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, 444 struct sk_buff *skb) 445 { 446 struct hci_ev_status *rp = data; 447 __u8 param; 448 void *sent; 449 450 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 451 452 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 453 if (!sent) 454 return rp->status; 455 456 param = *((__u8 *) sent); 457 458 hci_dev_lock(hdev); 459 460 if (rp->status) { 461 hdev->discov_timeout = 0; 462 goto done; 463 } 464 465 if (param & SCAN_INQUIRY) 466 set_bit(HCI_ISCAN, &hdev->flags); 467 else 468 clear_bit(HCI_ISCAN, &hdev->flags); 469 470 if (param & SCAN_PAGE) 471 set_bit(HCI_PSCAN, &hdev->flags); 472 else 473 clear_bit(HCI_PSCAN, &hdev->flags); 474 475 done: 476 hci_dev_unlock(hdev); 477 478 return rp->status; 479 } 480 481 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, 482 struct sk_buff *skb) 483 { 484 struct hci_ev_status *rp = data; 485 struct hci_cp_set_event_filter *cp; 486 void *sent; 487 488 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 489 490 if (rp->status) 491 return rp->status; 492 493 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 494 if (!sent) 495 return rp->status; 496 497 cp = (struct hci_cp_set_event_filter *)sent; 498 499 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 500 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 501 else 502 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 503 504 return rp->status; 505 } 506 507 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, 508 struct sk_buff *skb) 509 { 510 struct hci_rp_read_class_of_dev *rp = data; 511 512 if (WARN_ON(!hdev)) 513 return HCI_ERROR_UNSPECIFIED; 514 515 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 516 517 if (rp->status) 518 return rp->status; 519 520 memcpy(hdev->dev_class, rp->dev_class, 3); 521 522 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], 523 hdev->dev_class[1], hdev->dev_class[0]); 524 525 return rp->status; 526 } 527 528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, 529 struct sk_buff *skb) 530 { 531 struct hci_ev_status *rp = data; 532 void *sent; 533 534 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 535 536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 537 if (!sent) 538 return rp->status; 539 540 hci_dev_lock(hdev); 541 542 if (!rp->status) 543 memcpy(hdev->dev_class, sent, 3); 544 545 if (hci_dev_test_flag(hdev, HCI_MGMT)) 546 mgmt_set_class_of_dev_complete(hdev, sent, rp->status); 547 548 hci_dev_unlock(hdev); 549 550 return rp->status; 551 } 552 553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, 554 struct sk_buff *skb) 555 { 556 struct hci_rp_read_voice_setting *rp = data; 557 __u16 setting; 558 559 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 560 561 if (rp->status) 562 return rp->status; 563 564 setting = __le16_to_cpu(rp->voice_setting); 565 566 if (hdev->voice_setting == setting) 567 return rp->status; 568 569 hdev->voice_setting = setting; 570 571 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 572 573 if (hdev->notify) 574 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 575 576 return rp->status; 577 } 578 579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, 580 struct sk_buff *skb) 581 { 582 struct hci_ev_status *rp = data; 583 __u16 setting; 584 void *sent; 585 586 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 587 588 if (rp->status) 589 return rp->status; 590 591 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 592 if (!sent) 593 return rp->status; 594 595 setting = get_unaligned_le16(sent); 596 597 if (hdev->voice_setting == setting) 598 return rp->status; 599 600 hdev->voice_setting = setting; 601 602 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 603 604 if (hdev->notify) 605 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 606 607 return rp->status; 608 } 609 610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, 611 struct sk_buff *skb) 612 { 613 struct hci_rp_read_num_supported_iac *rp = data; 614 615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 616 617 if (rp->status) 618 return rp->status; 619 620 hdev->num_iac = rp->num_iac; 621 622 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); 623 624 return rp->status; 625 } 626 627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, 628 struct sk_buff *skb) 629 { 630 struct hci_ev_status *rp = data; 631 struct hci_cp_write_ssp_mode *sent; 632 633 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 634 635 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 636 if (!sent) 637 return rp->status; 638 639 hci_dev_lock(hdev); 640 641 if (!rp->status) { 642 if (sent->mode) 643 hdev->features[1][0] |= LMP_HOST_SSP; 644 else 645 hdev->features[1][0] &= ~LMP_HOST_SSP; 646 } 647 648 if (!rp->status) { 649 if (sent->mode) 650 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 651 else 652 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 653 } 654 655 hci_dev_unlock(hdev); 656 657 return rp->status; 658 } 659 660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, 661 struct sk_buff *skb) 662 { 663 struct hci_ev_status *rp = data; 664 struct hci_cp_write_sc_support *sent; 665 666 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 667 668 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 669 if (!sent) 670 return rp->status; 671 672 hci_dev_lock(hdev); 673 674 if (!rp->status) { 675 if (sent->support) 676 hdev->features[1][0] |= LMP_HOST_SC; 677 else 678 hdev->features[1][0] &= ~LMP_HOST_SC; 679 } 680 681 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { 682 if (sent->support) 683 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 684 else 685 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 686 } 687 688 hci_dev_unlock(hdev); 689 690 return rp->status; 691 } 692 693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, 694 struct sk_buff *skb) 695 { 696 struct hci_rp_read_local_version *rp = data; 697 698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 699 700 if (rp->status) 701 return rp->status; 702 703 if (hci_dev_test_flag(hdev, HCI_SETUP) || 704 hci_dev_test_flag(hdev, HCI_CONFIG)) { 705 hdev->hci_ver = rp->hci_ver; 706 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 707 hdev->lmp_ver = rp->lmp_ver; 708 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 709 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 710 } 711 712 return rp->status; 713 } 714 715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data, 716 struct sk_buff *skb) 717 { 718 struct hci_rp_read_enc_key_size *rp = data; 719 struct hci_conn *conn; 720 u16 handle; 721 u8 status = rp->status; 722 723 bt_dev_dbg(hdev, "status 0x%2.2x", status); 724 725 handle = le16_to_cpu(rp->handle); 726 727 hci_dev_lock(hdev); 728 729 conn = hci_conn_hash_lookup_handle(hdev, handle); 730 if (!conn) { 731 status = 0xFF; 732 goto done; 733 } 734 735 /* While unexpected, the read_enc_key_size command may fail. The most 736 * secure approach is to then assume the key size is 0 to force a 737 * disconnection. 738 */ 739 if (status) { 740 bt_dev_err(hdev, "failed to read key size for handle %u", 741 handle); 742 conn->enc_key_size = 0; 743 } else { 744 conn->enc_key_size = rp->key_size; 745 status = 0; 746 747 if (conn->enc_key_size < hdev->min_enc_key_size) { 748 /* As slave role, the conn->state has been set to 749 * BT_CONNECTED and l2cap conn req might not be received 750 * yet, at this moment the l2cap layer almost does 751 * nothing with the non-zero status. 752 * So we also clear encrypt related bits, and then the 753 * handler of l2cap conn req will get the right secure 754 * state at a later time. 755 */ 756 status = HCI_ERROR_AUTH_FAILURE; 757 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 758 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 759 } 760 } 761 762 hci_encrypt_cfm(conn, status); 763 764 done: 765 hci_dev_unlock(hdev); 766 767 return status; 768 } 769 770 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, 771 struct sk_buff *skb) 772 { 773 struct hci_rp_read_local_commands *rp = data; 774 775 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 776 777 if (rp->status) 778 return rp->status; 779 780 if (hci_dev_test_flag(hdev, HCI_SETUP) || 781 hci_dev_test_flag(hdev, HCI_CONFIG)) 782 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 783 784 return rp->status; 785 } 786 787 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, 788 struct sk_buff *skb) 789 { 790 struct hci_rp_read_auth_payload_to *rp = data; 791 struct hci_conn *conn; 792 793 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 794 795 if (rp->status) 796 return rp->status; 797 798 hci_dev_lock(hdev); 799 800 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 801 if (conn) 802 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 803 804 hci_dev_unlock(hdev); 805 806 return rp->status; 807 } 808 809 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, 810 struct sk_buff *skb) 811 { 812 struct hci_rp_write_auth_payload_to *rp = data; 813 struct hci_conn *conn; 814 void *sent; 815 816 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 817 818 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 819 if (!sent) 820 return rp->status; 821 822 hci_dev_lock(hdev); 823 824 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 825 if (!conn) { 826 rp->status = 0xff; 827 goto unlock; 828 } 829 830 if (!rp->status) 831 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 832 833 unlock: 834 hci_dev_unlock(hdev); 835 836 return rp->status; 837 } 838 839 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, 840 struct sk_buff *skb) 841 { 842 struct hci_rp_read_local_features *rp = data; 843 844 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 845 846 if (rp->status) 847 return rp->status; 848 849 memcpy(hdev->features, rp->features, 8); 850 851 /* Adjust default settings according to features 852 * supported by device. */ 853 854 if (hdev->features[0][0] & LMP_3SLOT) 855 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 856 857 if (hdev->features[0][0] & LMP_5SLOT) 858 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 859 860 if (hdev->features[0][1] & LMP_HV2) { 861 hdev->pkt_type |= (HCI_HV2); 862 hdev->esco_type |= (ESCO_HV2); 863 } 864 865 if (hdev->features[0][1] & LMP_HV3) { 866 hdev->pkt_type |= (HCI_HV3); 867 hdev->esco_type |= (ESCO_HV3); 868 } 869 870 if (lmp_esco_capable(hdev)) 871 hdev->esco_type |= (ESCO_EV3); 872 873 if (hdev->features[0][4] & LMP_EV4) 874 hdev->esco_type |= (ESCO_EV4); 875 876 if (hdev->features[0][4] & LMP_EV5) 877 hdev->esco_type |= (ESCO_EV5); 878 879 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 880 hdev->esco_type |= (ESCO_2EV3); 881 882 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 883 hdev->esco_type |= (ESCO_3EV3); 884 885 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 886 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 887 888 return rp->status; 889 } 890 891 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, 892 struct sk_buff *skb) 893 { 894 struct hci_rp_read_local_ext_features *rp = data; 895 896 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 897 898 if (rp->status) 899 return rp->status; 900 901 if (hdev->max_page < rp->max_page) { 902 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2, 903 &hdev->quirks)) 904 bt_dev_warn(hdev, "broken local ext features page 2"); 905 else 906 hdev->max_page = rp->max_page; 907 } 908 909 if (rp->page < HCI_MAX_PAGES) 910 memcpy(hdev->features[rp->page], rp->features, 8); 911 912 return rp->status; 913 } 914 915 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, 916 struct sk_buff *skb) 917 { 918 struct hci_rp_read_buffer_size *rp = data; 919 920 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 921 922 if (rp->status) 923 return rp->status; 924 925 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 926 hdev->sco_mtu = rp->sco_mtu; 927 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 928 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 929 930 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 931 hdev->sco_mtu = 64; 932 hdev->sco_pkts = 8; 933 } 934 935 hdev->acl_cnt = hdev->acl_pkts; 936 hdev->sco_cnt = hdev->sco_pkts; 937 938 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 939 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 940 941 if (!hdev->acl_mtu || !hdev->acl_pkts) 942 return HCI_ERROR_INVALID_PARAMETERS; 943 944 return rp->status; 945 } 946 947 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, 948 struct sk_buff *skb) 949 { 950 struct hci_rp_read_bd_addr *rp = data; 951 952 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 953 954 if (rp->status) 955 return rp->status; 956 957 if (test_bit(HCI_INIT, &hdev->flags)) 958 bacpy(&hdev->bdaddr, &rp->bdaddr); 959 960 if (hci_dev_test_flag(hdev, HCI_SETUP)) 961 bacpy(&hdev->setup_addr, &rp->bdaddr); 962 963 return rp->status; 964 } 965 966 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, 967 struct sk_buff *skb) 968 { 969 struct hci_rp_read_local_pairing_opts *rp = data; 970 971 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 972 973 if (rp->status) 974 return rp->status; 975 976 if (hci_dev_test_flag(hdev, HCI_SETUP) || 977 hci_dev_test_flag(hdev, HCI_CONFIG)) { 978 hdev->pairing_opts = rp->pairing_opts; 979 hdev->max_enc_key_size = rp->max_key_size; 980 } 981 982 return rp->status; 983 } 984 985 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, 986 struct sk_buff *skb) 987 { 988 struct hci_rp_read_page_scan_activity *rp = data; 989 990 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 991 992 if (rp->status) 993 return rp->status; 994 995 if (test_bit(HCI_INIT, &hdev->flags)) { 996 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 997 hdev->page_scan_window = __le16_to_cpu(rp->window); 998 } 999 1000 return rp->status; 1001 } 1002 1003 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, 1004 struct sk_buff *skb) 1005 { 1006 struct hci_ev_status *rp = data; 1007 struct hci_cp_write_page_scan_activity *sent; 1008 1009 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1010 1011 if (rp->status) 1012 return rp->status; 1013 1014 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 1015 if (!sent) 1016 return rp->status; 1017 1018 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 1019 hdev->page_scan_window = __le16_to_cpu(sent->window); 1020 1021 return rp->status; 1022 } 1023 1024 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, 1025 struct sk_buff *skb) 1026 { 1027 struct hci_rp_read_page_scan_type *rp = data; 1028 1029 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1030 1031 if (rp->status) 1032 return rp->status; 1033 1034 if (test_bit(HCI_INIT, &hdev->flags)) 1035 hdev->page_scan_type = rp->type; 1036 1037 return rp->status; 1038 } 1039 1040 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, 1041 struct sk_buff *skb) 1042 { 1043 struct hci_ev_status *rp = data; 1044 u8 *type; 1045 1046 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1047 1048 if (rp->status) 1049 return rp->status; 1050 1051 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 1052 if (type) 1053 hdev->page_scan_type = *type; 1054 1055 return rp->status; 1056 } 1057 1058 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, 1059 struct sk_buff *skb) 1060 { 1061 struct hci_rp_read_clock *rp = data; 1062 struct hci_cp_read_clock *cp; 1063 struct hci_conn *conn; 1064 1065 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1066 1067 if (rp->status) 1068 return rp->status; 1069 1070 hci_dev_lock(hdev); 1071 1072 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 1073 if (!cp) 1074 goto unlock; 1075 1076 if (cp->which == 0x00) { 1077 hdev->clock = le32_to_cpu(rp->clock); 1078 goto unlock; 1079 } 1080 1081 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1082 if (conn) { 1083 conn->clock = le32_to_cpu(rp->clock); 1084 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 1085 } 1086 1087 unlock: 1088 hci_dev_unlock(hdev); 1089 return rp->status; 1090 } 1091 1092 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, 1093 struct sk_buff *skb) 1094 { 1095 struct hci_rp_read_inq_rsp_tx_power *rp = data; 1096 1097 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1098 1099 if (rp->status) 1100 return rp->status; 1101 1102 hdev->inq_tx_power = rp->tx_power; 1103 1104 return rp->status; 1105 } 1106 1107 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, 1108 struct sk_buff *skb) 1109 { 1110 struct hci_rp_read_def_err_data_reporting *rp = data; 1111 1112 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1113 1114 if (rp->status) 1115 return rp->status; 1116 1117 hdev->err_data_reporting = rp->err_data_reporting; 1118 1119 return rp->status; 1120 } 1121 1122 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, 1123 struct sk_buff *skb) 1124 { 1125 struct hci_ev_status *rp = data; 1126 struct hci_cp_write_def_err_data_reporting *cp; 1127 1128 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1129 1130 if (rp->status) 1131 return rp->status; 1132 1133 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 1134 if (!cp) 1135 return rp->status; 1136 1137 hdev->err_data_reporting = cp->err_data_reporting; 1138 1139 return rp->status; 1140 } 1141 1142 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, 1143 struct sk_buff *skb) 1144 { 1145 struct hci_rp_pin_code_reply *rp = data; 1146 struct hci_cp_pin_code_reply *cp; 1147 struct hci_conn *conn; 1148 1149 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1150 1151 hci_dev_lock(hdev); 1152 1153 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1154 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1155 1156 if (rp->status) 1157 goto unlock; 1158 1159 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1160 if (!cp) 1161 goto unlock; 1162 1163 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1164 if (conn) 1165 conn->pin_length = cp->pin_len; 1166 1167 unlock: 1168 hci_dev_unlock(hdev); 1169 return rp->status; 1170 } 1171 1172 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, 1173 struct sk_buff *skb) 1174 { 1175 struct hci_rp_pin_code_neg_reply *rp = data; 1176 1177 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1178 1179 hci_dev_lock(hdev); 1180 1181 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1182 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1183 rp->status); 1184 1185 hci_dev_unlock(hdev); 1186 1187 return rp->status; 1188 } 1189 1190 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, 1191 struct sk_buff *skb) 1192 { 1193 struct hci_rp_le_read_buffer_size *rp = data; 1194 1195 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1196 1197 if (rp->status) 1198 return rp->status; 1199 1200 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1201 hdev->le_pkts = rp->le_max_pkt; 1202 1203 hdev->le_cnt = hdev->le_pkts; 1204 1205 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1206 1207 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) 1208 return HCI_ERROR_INVALID_PARAMETERS; 1209 1210 return rp->status; 1211 } 1212 1213 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, 1214 struct sk_buff *skb) 1215 { 1216 struct hci_rp_le_read_local_features *rp = data; 1217 1218 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1219 1220 if (rp->status) 1221 return rp->status; 1222 1223 memcpy(hdev->le_features, rp->features, 8); 1224 1225 return rp->status; 1226 } 1227 1228 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, 1229 struct sk_buff *skb) 1230 { 1231 struct hci_rp_le_read_adv_tx_power *rp = data; 1232 1233 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1234 1235 if (rp->status) 1236 return rp->status; 1237 1238 hdev->adv_tx_power = rp->tx_power; 1239 1240 return rp->status; 1241 } 1242 1243 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, 1244 struct sk_buff *skb) 1245 { 1246 struct hci_rp_user_confirm_reply *rp = data; 1247 1248 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1249 1250 hci_dev_lock(hdev); 1251 1252 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1253 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1254 rp->status); 1255 1256 hci_dev_unlock(hdev); 1257 1258 return rp->status; 1259 } 1260 1261 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, 1262 struct sk_buff *skb) 1263 { 1264 struct hci_rp_user_confirm_reply *rp = data; 1265 1266 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1267 1268 hci_dev_lock(hdev); 1269 1270 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1271 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1272 ACL_LINK, 0, rp->status); 1273 1274 hci_dev_unlock(hdev); 1275 1276 return rp->status; 1277 } 1278 1279 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, 1280 struct sk_buff *skb) 1281 { 1282 struct hci_rp_user_confirm_reply *rp = data; 1283 1284 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1285 1286 hci_dev_lock(hdev); 1287 1288 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1289 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1290 0, rp->status); 1291 1292 hci_dev_unlock(hdev); 1293 1294 return rp->status; 1295 } 1296 1297 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, 1298 struct sk_buff *skb) 1299 { 1300 struct hci_rp_user_confirm_reply *rp = data; 1301 1302 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1303 1304 hci_dev_lock(hdev); 1305 1306 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1307 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1308 ACL_LINK, 0, rp->status); 1309 1310 hci_dev_unlock(hdev); 1311 1312 return rp->status; 1313 } 1314 1315 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, 1316 struct sk_buff *skb) 1317 { 1318 struct hci_rp_read_local_oob_data *rp = data; 1319 1320 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1321 1322 return rp->status; 1323 } 1324 1325 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, 1326 struct sk_buff *skb) 1327 { 1328 struct hci_rp_read_local_oob_ext_data *rp = data; 1329 1330 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1331 1332 return rp->status; 1333 } 1334 1335 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, 1336 struct sk_buff *skb) 1337 { 1338 struct hci_ev_status *rp = data; 1339 bdaddr_t *sent; 1340 1341 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1342 1343 if (rp->status) 1344 return rp->status; 1345 1346 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1347 if (!sent) 1348 return rp->status; 1349 1350 hci_dev_lock(hdev); 1351 1352 bacpy(&hdev->random_addr, sent); 1353 1354 if (!bacmp(&hdev->rpa, sent)) { 1355 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1356 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1357 secs_to_jiffies(hdev->rpa_timeout)); 1358 } 1359 1360 hci_dev_unlock(hdev); 1361 1362 return rp->status; 1363 } 1364 1365 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, 1366 struct sk_buff *skb) 1367 { 1368 struct hci_ev_status *rp = data; 1369 struct hci_cp_le_set_default_phy *cp; 1370 1371 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1372 1373 if (rp->status) 1374 return rp->status; 1375 1376 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1377 if (!cp) 1378 return rp->status; 1379 1380 hci_dev_lock(hdev); 1381 1382 hdev->le_tx_def_phys = cp->tx_phys; 1383 hdev->le_rx_def_phys = cp->rx_phys; 1384 1385 hci_dev_unlock(hdev); 1386 1387 return rp->status; 1388 } 1389 1390 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, 1391 struct sk_buff *skb) 1392 { 1393 struct hci_ev_status *rp = data; 1394 struct hci_cp_le_set_adv_set_rand_addr *cp; 1395 struct adv_info *adv; 1396 1397 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1398 1399 if (rp->status) 1400 return rp->status; 1401 1402 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1403 /* Update only in case the adv instance since handle 0x00 shall be using 1404 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1405 * non-extended adverting. 1406 */ 1407 if (!cp || !cp->handle) 1408 return rp->status; 1409 1410 hci_dev_lock(hdev); 1411 1412 adv = hci_find_adv_instance(hdev, cp->handle); 1413 if (adv) { 1414 bacpy(&adv->random_addr, &cp->bdaddr); 1415 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1416 adv->rpa_expired = false; 1417 queue_delayed_work(hdev->workqueue, 1418 &adv->rpa_expired_cb, 1419 secs_to_jiffies(hdev->rpa_timeout)); 1420 } 1421 } 1422 1423 hci_dev_unlock(hdev); 1424 1425 return rp->status; 1426 } 1427 1428 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, 1429 struct sk_buff *skb) 1430 { 1431 struct hci_ev_status *rp = data; 1432 u8 *instance; 1433 int err; 1434 1435 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1436 1437 if (rp->status) 1438 return rp->status; 1439 1440 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1441 if (!instance) 1442 return rp->status; 1443 1444 hci_dev_lock(hdev); 1445 1446 err = hci_remove_adv_instance(hdev, *instance); 1447 if (!err) 1448 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1449 *instance); 1450 1451 hci_dev_unlock(hdev); 1452 1453 return rp->status; 1454 } 1455 1456 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, 1457 struct sk_buff *skb) 1458 { 1459 struct hci_ev_status *rp = data; 1460 struct adv_info *adv, *n; 1461 int err; 1462 1463 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1464 1465 if (rp->status) 1466 return rp->status; 1467 1468 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1469 return rp->status; 1470 1471 hci_dev_lock(hdev); 1472 1473 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1474 u8 instance = adv->instance; 1475 1476 err = hci_remove_adv_instance(hdev, instance); 1477 if (!err) 1478 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1479 hdev, instance); 1480 } 1481 1482 hci_dev_unlock(hdev); 1483 1484 return rp->status; 1485 } 1486 1487 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, 1488 struct sk_buff *skb) 1489 { 1490 struct hci_rp_le_read_transmit_power *rp = data; 1491 1492 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1493 1494 if (rp->status) 1495 return rp->status; 1496 1497 hdev->min_le_tx_power = rp->min_le_tx_power; 1498 hdev->max_le_tx_power = rp->max_le_tx_power; 1499 1500 return rp->status; 1501 } 1502 1503 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, 1504 struct sk_buff *skb) 1505 { 1506 struct hci_ev_status *rp = data; 1507 struct hci_cp_le_set_privacy_mode *cp; 1508 struct hci_conn_params *params; 1509 1510 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1511 1512 if (rp->status) 1513 return rp->status; 1514 1515 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); 1516 if (!cp) 1517 return rp->status; 1518 1519 hci_dev_lock(hdev); 1520 1521 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); 1522 if (params) 1523 WRITE_ONCE(params->privacy_mode, cp->mode); 1524 1525 hci_dev_unlock(hdev); 1526 1527 return rp->status; 1528 } 1529 1530 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, 1531 struct sk_buff *skb) 1532 { 1533 struct hci_ev_status *rp = data; 1534 __u8 *sent; 1535 1536 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1537 1538 if (rp->status) 1539 return rp->status; 1540 1541 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1542 if (!sent) 1543 return rp->status; 1544 1545 hci_dev_lock(hdev); 1546 1547 /* If we're doing connection initiation as peripheral. Set a 1548 * timeout in case something goes wrong. 1549 */ 1550 if (*sent) { 1551 struct hci_conn *conn; 1552 1553 hci_dev_set_flag(hdev, HCI_LE_ADV); 1554 1555 conn = hci_lookup_le_connect(hdev); 1556 if (conn) 1557 queue_delayed_work(hdev->workqueue, 1558 &conn->le_conn_timeout, 1559 conn->conn_timeout); 1560 } else { 1561 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1562 } 1563 1564 hci_dev_unlock(hdev); 1565 1566 return rp->status; 1567 } 1568 1569 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, 1570 struct sk_buff *skb) 1571 { 1572 struct hci_cp_le_set_ext_adv_enable *cp; 1573 struct hci_cp_ext_adv_set *set; 1574 struct adv_info *adv = NULL, *n; 1575 struct hci_ev_status *rp = data; 1576 1577 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1578 1579 if (rp->status) 1580 return rp->status; 1581 1582 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1583 if (!cp) 1584 return rp->status; 1585 1586 set = (void *)cp->data; 1587 1588 hci_dev_lock(hdev); 1589 1590 if (cp->num_of_sets) 1591 adv = hci_find_adv_instance(hdev, set->handle); 1592 1593 if (cp->enable) { 1594 struct hci_conn *conn; 1595 1596 hci_dev_set_flag(hdev, HCI_LE_ADV); 1597 1598 if (adv && !adv->periodic) 1599 adv->enabled = true; 1600 1601 conn = hci_lookup_le_connect(hdev); 1602 if (conn) 1603 queue_delayed_work(hdev->workqueue, 1604 &conn->le_conn_timeout, 1605 conn->conn_timeout); 1606 } else { 1607 if (cp->num_of_sets) { 1608 if (adv) 1609 adv->enabled = false; 1610 1611 /* If just one instance was disabled check if there are 1612 * any other instance enabled before clearing HCI_LE_ADV 1613 */ 1614 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1615 list) { 1616 if (adv->enabled) 1617 goto unlock; 1618 } 1619 } else { 1620 /* All instances shall be considered disabled */ 1621 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1622 list) 1623 adv->enabled = false; 1624 } 1625 1626 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1627 } 1628 1629 unlock: 1630 hci_dev_unlock(hdev); 1631 return rp->status; 1632 } 1633 1634 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, 1635 struct sk_buff *skb) 1636 { 1637 struct hci_cp_le_set_scan_param *cp; 1638 struct hci_ev_status *rp = data; 1639 1640 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1641 1642 if (rp->status) 1643 return rp->status; 1644 1645 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1646 if (!cp) 1647 return rp->status; 1648 1649 hci_dev_lock(hdev); 1650 1651 hdev->le_scan_type = cp->type; 1652 1653 hci_dev_unlock(hdev); 1654 1655 return rp->status; 1656 } 1657 1658 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, 1659 struct sk_buff *skb) 1660 { 1661 struct hci_cp_le_set_ext_scan_params *cp; 1662 struct hci_ev_status *rp = data; 1663 struct hci_cp_le_scan_phy_params *phy_param; 1664 1665 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1666 1667 if (rp->status) 1668 return rp->status; 1669 1670 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1671 if (!cp) 1672 return rp->status; 1673 1674 phy_param = (void *)cp->data; 1675 1676 hci_dev_lock(hdev); 1677 1678 hdev->le_scan_type = phy_param->type; 1679 1680 hci_dev_unlock(hdev); 1681 1682 return rp->status; 1683 } 1684 1685 static bool has_pending_adv_report(struct hci_dev *hdev) 1686 { 1687 struct discovery_state *d = &hdev->discovery; 1688 1689 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1690 } 1691 1692 static void clear_pending_adv_report(struct hci_dev *hdev) 1693 { 1694 struct discovery_state *d = &hdev->discovery; 1695 1696 bacpy(&d->last_adv_addr, BDADDR_ANY); 1697 d->last_adv_data_len = 0; 1698 } 1699 1700 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1701 u8 bdaddr_type, s8 rssi, u32 flags, 1702 u8 *data, u8 len) 1703 { 1704 struct discovery_state *d = &hdev->discovery; 1705 1706 if (len > max_adv_len(hdev)) 1707 return; 1708 1709 bacpy(&d->last_adv_addr, bdaddr); 1710 d->last_adv_addr_type = bdaddr_type; 1711 d->last_adv_rssi = rssi; 1712 d->last_adv_flags = flags; 1713 memcpy(d->last_adv_data, data, len); 1714 d->last_adv_data_len = len; 1715 } 1716 1717 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1718 { 1719 hci_dev_lock(hdev); 1720 1721 switch (enable) { 1722 case LE_SCAN_ENABLE: 1723 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1724 if (hdev->le_scan_type == LE_SCAN_ACTIVE) { 1725 clear_pending_adv_report(hdev); 1726 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1727 } 1728 break; 1729 1730 case LE_SCAN_DISABLE: 1731 /* We do this here instead of when setting DISCOVERY_STOPPED 1732 * since the latter would potentially require waiting for 1733 * inquiry to stop too. 1734 */ 1735 if (has_pending_adv_report(hdev)) { 1736 struct discovery_state *d = &hdev->discovery; 1737 1738 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1739 d->last_adv_addr_type, NULL, 1740 d->last_adv_rssi, d->last_adv_flags, 1741 d->last_adv_data, 1742 d->last_adv_data_len, NULL, 0, 0); 1743 } 1744 1745 /* Cancel this timer so that we don't try to disable scanning 1746 * when it's already disabled. 1747 */ 1748 cancel_delayed_work(&hdev->le_scan_disable); 1749 1750 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1751 1752 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1753 * interrupted scanning due to a connect request. Mark 1754 * therefore discovery as stopped. 1755 */ 1756 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1757 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1758 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && 1759 hdev->discovery.state == DISCOVERY_FINDING) 1760 queue_work(hdev->workqueue, &hdev->reenable_adv_work); 1761 1762 break; 1763 1764 default: 1765 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1766 enable); 1767 break; 1768 } 1769 1770 hci_dev_unlock(hdev); 1771 } 1772 1773 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, 1774 struct sk_buff *skb) 1775 { 1776 struct hci_cp_le_set_scan_enable *cp; 1777 struct hci_ev_status *rp = data; 1778 1779 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1780 1781 if (rp->status) 1782 return rp->status; 1783 1784 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1785 if (!cp) 1786 return rp->status; 1787 1788 le_set_scan_enable_complete(hdev, cp->enable); 1789 1790 return rp->status; 1791 } 1792 1793 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, 1794 struct sk_buff *skb) 1795 { 1796 struct hci_cp_le_set_ext_scan_enable *cp; 1797 struct hci_ev_status *rp = data; 1798 1799 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1800 1801 if (rp->status) 1802 return rp->status; 1803 1804 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1805 if (!cp) 1806 return rp->status; 1807 1808 le_set_scan_enable_complete(hdev, cp->enable); 1809 1810 return rp->status; 1811 } 1812 1813 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, 1814 struct sk_buff *skb) 1815 { 1816 struct hci_rp_le_read_num_supported_adv_sets *rp = data; 1817 1818 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, 1819 rp->num_of_sets); 1820 1821 if (rp->status) 1822 return rp->status; 1823 1824 hdev->le_num_of_adv_sets = rp->num_of_sets; 1825 1826 return rp->status; 1827 } 1828 1829 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, 1830 struct sk_buff *skb) 1831 { 1832 struct hci_rp_le_read_accept_list_size *rp = data; 1833 1834 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 1835 1836 if (rp->status) 1837 return rp->status; 1838 1839 hdev->le_accept_list_size = rp->size; 1840 1841 return rp->status; 1842 } 1843 1844 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, 1845 struct sk_buff *skb) 1846 { 1847 struct hci_ev_status *rp = data; 1848 1849 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1850 1851 if (rp->status) 1852 return rp->status; 1853 1854 hci_dev_lock(hdev); 1855 hci_bdaddr_list_clear(&hdev->le_accept_list); 1856 hci_dev_unlock(hdev); 1857 1858 return rp->status; 1859 } 1860 1861 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, 1862 struct sk_buff *skb) 1863 { 1864 struct hci_cp_le_add_to_accept_list *sent; 1865 struct hci_ev_status *rp = data; 1866 1867 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1868 1869 if (rp->status) 1870 return rp->status; 1871 1872 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1873 if (!sent) 1874 return rp->status; 1875 1876 hci_dev_lock(hdev); 1877 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1878 sent->bdaddr_type); 1879 hci_dev_unlock(hdev); 1880 1881 return rp->status; 1882 } 1883 1884 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, 1885 struct sk_buff *skb) 1886 { 1887 struct hci_cp_le_del_from_accept_list *sent; 1888 struct hci_ev_status *rp = data; 1889 1890 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1891 1892 if (rp->status) 1893 return rp->status; 1894 1895 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1896 if (!sent) 1897 return rp->status; 1898 1899 hci_dev_lock(hdev); 1900 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1901 sent->bdaddr_type); 1902 hci_dev_unlock(hdev); 1903 1904 return rp->status; 1905 } 1906 1907 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, 1908 struct sk_buff *skb) 1909 { 1910 struct hci_rp_le_read_supported_states *rp = data; 1911 1912 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1913 1914 if (rp->status) 1915 return rp->status; 1916 1917 memcpy(hdev->le_states, rp->le_states, 8); 1918 1919 return rp->status; 1920 } 1921 1922 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, 1923 struct sk_buff *skb) 1924 { 1925 struct hci_rp_le_read_def_data_len *rp = data; 1926 1927 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1928 1929 if (rp->status) 1930 return rp->status; 1931 1932 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1933 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1934 1935 return rp->status; 1936 } 1937 1938 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, 1939 struct sk_buff *skb) 1940 { 1941 struct hci_cp_le_write_def_data_len *sent; 1942 struct hci_ev_status *rp = data; 1943 1944 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1945 1946 if (rp->status) 1947 return rp->status; 1948 1949 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1950 if (!sent) 1951 return rp->status; 1952 1953 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1954 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1955 1956 return rp->status; 1957 } 1958 1959 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, 1960 struct sk_buff *skb) 1961 { 1962 struct hci_cp_le_add_to_resolv_list *sent; 1963 struct hci_ev_status *rp = data; 1964 1965 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1966 1967 if (rp->status) 1968 return rp->status; 1969 1970 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1971 if (!sent) 1972 return rp->status; 1973 1974 hci_dev_lock(hdev); 1975 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1976 sent->bdaddr_type, sent->peer_irk, 1977 sent->local_irk); 1978 hci_dev_unlock(hdev); 1979 1980 return rp->status; 1981 } 1982 1983 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, 1984 struct sk_buff *skb) 1985 { 1986 struct hci_cp_le_del_from_resolv_list *sent; 1987 struct hci_ev_status *rp = data; 1988 1989 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1990 1991 if (rp->status) 1992 return rp->status; 1993 1994 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1995 if (!sent) 1996 return rp->status; 1997 1998 hci_dev_lock(hdev); 1999 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 2000 sent->bdaddr_type); 2001 hci_dev_unlock(hdev); 2002 2003 return rp->status; 2004 } 2005 2006 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, 2007 struct sk_buff *skb) 2008 { 2009 struct hci_ev_status *rp = data; 2010 2011 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2012 2013 if (rp->status) 2014 return rp->status; 2015 2016 hci_dev_lock(hdev); 2017 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2018 hci_dev_unlock(hdev); 2019 2020 return rp->status; 2021 } 2022 2023 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, 2024 struct sk_buff *skb) 2025 { 2026 struct hci_rp_le_read_resolv_list_size *rp = data; 2027 2028 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 2029 2030 if (rp->status) 2031 return rp->status; 2032 2033 hdev->le_resolv_list_size = rp->size; 2034 2035 return rp->status; 2036 } 2037 2038 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, 2039 struct sk_buff *skb) 2040 { 2041 struct hci_ev_status *rp = data; 2042 __u8 *sent; 2043 2044 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2045 2046 if (rp->status) 2047 return rp->status; 2048 2049 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 2050 if (!sent) 2051 return rp->status; 2052 2053 hci_dev_lock(hdev); 2054 2055 if (*sent) 2056 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 2057 else 2058 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 2059 2060 hci_dev_unlock(hdev); 2061 2062 return rp->status; 2063 } 2064 2065 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, 2066 struct sk_buff *skb) 2067 { 2068 struct hci_rp_le_read_max_data_len *rp = data; 2069 2070 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2071 2072 if (rp->status) 2073 return rp->status; 2074 2075 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 2076 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 2077 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 2078 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 2079 2080 return rp->status; 2081 } 2082 2083 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, 2084 struct sk_buff *skb) 2085 { 2086 struct hci_cp_write_le_host_supported *sent; 2087 struct hci_ev_status *rp = data; 2088 2089 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2090 2091 if (rp->status) 2092 return rp->status; 2093 2094 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 2095 if (!sent) 2096 return rp->status; 2097 2098 hci_dev_lock(hdev); 2099 2100 if (sent->le) { 2101 hdev->features[1][0] |= LMP_HOST_LE; 2102 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 2103 } else { 2104 hdev->features[1][0] &= ~LMP_HOST_LE; 2105 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 2106 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 2107 } 2108 2109 if (sent->simul) 2110 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 2111 else 2112 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 2113 2114 hci_dev_unlock(hdev); 2115 2116 return rp->status; 2117 } 2118 2119 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, 2120 struct sk_buff *skb) 2121 { 2122 struct hci_cp_le_set_adv_param *cp; 2123 struct hci_ev_status *rp = data; 2124 2125 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2126 2127 if (rp->status) 2128 return rp->status; 2129 2130 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 2131 if (!cp) 2132 return rp->status; 2133 2134 hci_dev_lock(hdev); 2135 hdev->adv_addr_type = cp->own_address_type; 2136 hci_dev_unlock(hdev); 2137 2138 return rp->status; 2139 } 2140 2141 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, 2142 struct sk_buff *skb) 2143 { 2144 struct hci_rp_le_set_ext_adv_params *rp = data; 2145 struct hci_cp_le_set_ext_adv_params *cp; 2146 struct adv_info *adv_instance; 2147 2148 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2149 2150 if (rp->status) 2151 return rp->status; 2152 2153 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 2154 if (!cp) 2155 return rp->status; 2156 2157 hci_dev_lock(hdev); 2158 hdev->adv_addr_type = cp->own_addr_type; 2159 if (!cp->handle) { 2160 /* Store in hdev for instance 0 */ 2161 hdev->adv_tx_power = rp->tx_power; 2162 } else { 2163 adv_instance = hci_find_adv_instance(hdev, cp->handle); 2164 if (adv_instance) 2165 adv_instance->tx_power = rp->tx_power; 2166 } 2167 /* Update adv data as tx power is known now */ 2168 hci_update_adv_data(hdev, cp->handle); 2169 2170 hci_dev_unlock(hdev); 2171 2172 return rp->status; 2173 } 2174 2175 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, 2176 struct sk_buff *skb) 2177 { 2178 struct hci_rp_read_rssi *rp = data; 2179 struct hci_conn *conn; 2180 2181 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2182 2183 if (rp->status) 2184 return rp->status; 2185 2186 hci_dev_lock(hdev); 2187 2188 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2189 if (conn) 2190 conn->rssi = rp->rssi; 2191 2192 hci_dev_unlock(hdev); 2193 2194 return rp->status; 2195 } 2196 2197 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, 2198 struct sk_buff *skb) 2199 { 2200 struct hci_cp_read_tx_power *sent; 2201 struct hci_rp_read_tx_power *rp = data; 2202 struct hci_conn *conn; 2203 2204 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2205 2206 if (rp->status) 2207 return rp->status; 2208 2209 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 2210 if (!sent) 2211 return rp->status; 2212 2213 hci_dev_lock(hdev); 2214 2215 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2216 if (!conn) 2217 goto unlock; 2218 2219 switch (sent->type) { 2220 case 0x00: 2221 conn->tx_power = rp->tx_power; 2222 break; 2223 case 0x01: 2224 conn->max_tx_power = rp->tx_power; 2225 break; 2226 } 2227 2228 unlock: 2229 hci_dev_unlock(hdev); 2230 return rp->status; 2231 } 2232 2233 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, 2234 struct sk_buff *skb) 2235 { 2236 struct hci_ev_status *rp = data; 2237 u8 *mode; 2238 2239 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2240 2241 if (rp->status) 2242 return rp->status; 2243 2244 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 2245 if (mode) 2246 hdev->ssp_debug_mode = *mode; 2247 2248 return rp->status; 2249 } 2250 2251 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 2252 { 2253 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2254 2255 if (status) 2256 return; 2257 2258 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY)) 2259 set_bit(HCI_INQUIRY, &hdev->flags); 2260 } 2261 2262 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 2263 { 2264 struct hci_cp_create_conn *cp; 2265 struct hci_conn *conn; 2266 2267 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2268 2269 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 2270 if (!cp) 2271 return; 2272 2273 hci_dev_lock(hdev); 2274 2275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2276 2277 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); 2278 2279 if (status) { 2280 if (conn && conn->state == BT_CONNECT) { 2281 conn->state = BT_CLOSED; 2282 hci_connect_cfm(conn, status); 2283 hci_conn_del(conn); 2284 } 2285 } else { 2286 if (!conn) { 2287 conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr, 2288 HCI_ROLE_MASTER); 2289 if (IS_ERR(conn)) 2290 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); 2291 } 2292 } 2293 2294 hci_dev_unlock(hdev); 2295 } 2296 2297 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 2298 { 2299 struct hci_cp_add_sco *cp; 2300 struct hci_conn *acl; 2301 struct hci_link *link; 2302 __u16 handle; 2303 2304 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2305 2306 if (!status) 2307 return; 2308 2309 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2310 if (!cp) 2311 return; 2312 2313 handle = __le16_to_cpu(cp->handle); 2314 2315 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2316 2317 hci_dev_lock(hdev); 2318 2319 acl = hci_conn_hash_lookup_handle(hdev, handle); 2320 if (acl) { 2321 link = list_first_entry_or_null(&acl->link_list, 2322 struct hci_link, list); 2323 if (link && link->conn) { 2324 link->conn->state = BT_CLOSED; 2325 2326 hci_connect_cfm(link->conn, status); 2327 hci_conn_del(link->conn); 2328 } 2329 } 2330 2331 hci_dev_unlock(hdev); 2332 } 2333 2334 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2335 { 2336 struct hci_cp_auth_requested *cp; 2337 struct hci_conn *conn; 2338 2339 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2340 2341 if (!status) 2342 return; 2343 2344 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2345 if (!cp) 2346 return; 2347 2348 hci_dev_lock(hdev); 2349 2350 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2351 if (conn) { 2352 if (conn->state == BT_CONFIG) { 2353 hci_connect_cfm(conn, status); 2354 hci_conn_drop(conn); 2355 } 2356 } 2357 2358 hci_dev_unlock(hdev); 2359 } 2360 2361 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2362 { 2363 struct hci_cp_set_conn_encrypt *cp; 2364 struct hci_conn *conn; 2365 2366 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2367 2368 if (!status) 2369 return; 2370 2371 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2372 if (!cp) 2373 return; 2374 2375 hci_dev_lock(hdev); 2376 2377 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2378 if (conn) { 2379 if (conn->state == BT_CONFIG) { 2380 hci_connect_cfm(conn, status); 2381 hci_conn_drop(conn); 2382 } 2383 } 2384 2385 hci_dev_unlock(hdev); 2386 } 2387 2388 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2389 struct hci_conn *conn) 2390 { 2391 if (conn->state != BT_CONFIG || !conn->out) 2392 return 0; 2393 2394 if (conn->pending_sec_level == BT_SECURITY_SDP) 2395 return 0; 2396 2397 /* Only request authentication for SSP connections or non-SSP 2398 * devices with sec_level MEDIUM or HIGH or if MITM protection 2399 * is requested. 2400 */ 2401 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2402 conn->pending_sec_level != BT_SECURITY_FIPS && 2403 conn->pending_sec_level != BT_SECURITY_HIGH && 2404 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2405 return 0; 2406 2407 return 1; 2408 } 2409 2410 static int hci_resolve_name(struct hci_dev *hdev, 2411 struct inquiry_entry *e) 2412 { 2413 struct hci_cp_remote_name_req cp; 2414 2415 memset(&cp, 0, sizeof(cp)); 2416 2417 bacpy(&cp.bdaddr, &e->data.bdaddr); 2418 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2419 cp.pscan_mode = e->data.pscan_mode; 2420 cp.clock_offset = e->data.clock_offset; 2421 2422 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2423 } 2424 2425 static bool hci_resolve_next_name(struct hci_dev *hdev) 2426 { 2427 struct discovery_state *discov = &hdev->discovery; 2428 struct inquiry_entry *e; 2429 2430 if (list_empty(&discov->resolve)) 2431 return false; 2432 2433 /* We should stop if we already spent too much time resolving names. */ 2434 if (time_after(jiffies, discov->name_resolve_timeout)) { 2435 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2436 return false; 2437 } 2438 2439 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2440 if (!e) 2441 return false; 2442 2443 if (hci_resolve_name(hdev, e) == 0) { 2444 e->name_state = NAME_PENDING; 2445 return true; 2446 } 2447 2448 return false; 2449 } 2450 2451 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2452 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2453 { 2454 struct discovery_state *discov = &hdev->discovery; 2455 struct inquiry_entry *e; 2456 2457 /* Update the mgmt connected state if necessary. Be careful with 2458 * conn objects that exist but are not (yet) connected however. 2459 * Only those in BT_CONFIG or BT_CONNECTED states can be 2460 * considered connected. 2461 */ 2462 if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) 2463 mgmt_device_connected(hdev, conn, name, name_len); 2464 2465 if (discov->state == DISCOVERY_STOPPED) 2466 return; 2467 2468 if (discov->state == DISCOVERY_STOPPING) 2469 goto discov_complete; 2470 2471 if (discov->state != DISCOVERY_RESOLVING) 2472 return; 2473 2474 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2475 /* If the device was not found in a list of found devices names of which 2476 * are pending. there is no need to continue resolving a next name as it 2477 * will be done upon receiving another Remote Name Request Complete 2478 * Event */ 2479 if (!e) 2480 return; 2481 2482 list_del(&e->list); 2483 2484 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2485 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2486 name, name_len); 2487 2488 if (hci_resolve_next_name(hdev)) 2489 return; 2490 2491 discov_complete: 2492 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2493 } 2494 2495 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2496 { 2497 struct hci_cp_remote_name_req *cp; 2498 struct hci_conn *conn; 2499 2500 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2501 2502 /* If successful wait for the name req complete event before 2503 * checking for the need to do authentication */ 2504 if (!status) 2505 return; 2506 2507 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2508 if (!cp) 2509 return; 2510 2511 hci_dev_lock(hdev); 2512 2513 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2514 2515 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2516 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2517 2518 if (!conn) 2519 goto unlock; 2520 2521 if (!hci_outgoing_auth_needed(hdev, conn)) 2522 goto unlock; 2523 2524 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2525 struct hci_cp_auth_requested auth_cp; 2526 2527 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2528 2529 auth_cp.handle = __cpu_to_le16(conn->handle); 2530 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2531 sizeof(auth_cp), &auth_cp); 2532 } 2533 2534 unlock: 2535 hci_dev_unlock(hdev); 2536 } 2537 2538 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2539 { 2540 struct hci_cp_read_remote_features *cp; 2541 struct hci_conn *conn; 2542 2543 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2544 2545 if (!status) 2546 return; 2547 2548 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2549 if (!cp) 2550 return; 2551 2552 hci_dev_lock(hdev); 2553 2554 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2555 if (conn) { 2556 if (conn->state == BT_CONFIG) { 2557 hci_connect_cfm(conn, status); 2558 hci_conn_drop(conn); 2559 } 2560 } 2561 2562 hci_dev_unlock(hdev); 2563 } 2564 2565 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2566 { 2567 struct hci_cp_read_remote_ext_features *cp; 2568 struct hci_conn *conn; 2569 2570 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2571 2572 if (!status) 2573 return; 2574 2575 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2576 if (!cp) 2577 return; 2578 2579 hci_dev_lock(hdev); 2580 2581 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2582 if (conn) { 2583 if (conn->state == BT_CONFIG) { 2584 hci_connect_cfm(conn, status); 2585 hci_conn_drop(conn); 2586 } 2587 } 2588 2589 hci_dev_unlock(hdev); 2590 } 2591 2592 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle, 2593 __u8 status) 2594 { 2595 struct hci_conn *acl; 2596 struct hci_link *link; 2597 2598 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status); 2599 2600 hci_dev_lock(hdev); 2601 2602 acl = hci_conn_hash_lookup_handle(hdev, handle); 2603 if (acl) { 2604 link = list_first_entry_or_null(&acl->link_list, 2605 struct hci_link, list); 2606 if (link && link->conn) { 2607 link->conn->state = BT_CLOSED; 2608 2609 hci_connect_cfm(link->conn, status); 2610 hci_conn_del(link->conn); 2611 } 2612 } 2613 2614 hci_dev_unlock(hdev); 2615 } 2616 2617 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2618 { 2619 struct hci_cp_setup_sync_conn *cp; 2620 2621 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2622 2623 if (!status) 2624 return; 2625 2626 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2627 if (!cp) 2628 return; 2629 2630 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); 2631 } 2632 2633 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2634 { 2635 struct hci_cp_enhanced_setup_sync_conn *cp; 2636 2637 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2638 2639 if (!status) 2640 return; 2641 2642 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2643 if (!cp) 2644 return; 2645 2646 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); 2647 } 2648 2649 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2650 { 2651 struct hci_cp_sniff_mode *cp; 2652 struct hci_conn *conn; 2653 2654 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2655 2656 if (!status) 2657 return; 2658 2659 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2660 if (!cp) 2661 return; 2662 2663 hci_dev_lock(hdev); 2664 2665 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2666 if (conn) { 2667 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2668 2669 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2670 hci_sco_setup(conn, status); 2671 } 2672 2673 hci_dev_unlock(hdev); 2674 } 2675 2676 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2677 { 2678 struct hci_cp_exit_sniff_mode *cp; 2679 struct hci_conn *conn; 2680 2681 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2682 2683 if (!status) 2684 return; 2685 2686 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2687 if (!cp) 2688 return; 2689 2690 hci_dev_lock(hdev); 2691 2692 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2693 if (conn) { 2694 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2695 2696 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2697 hci_sco_setup(conn, status); 2698 } 2699 2700 hci_dev_unlock(hdev); 2701 } 2702 2703 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2704 { 2705 struct hci_cp_disconnect *cp; 2706 struct hci_conn_params *params; 2707 struct hci_conn *conn; 2708 bool mgmt_conn; 2709 2710 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2711 2712 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2713 * otherwise cleanup the connection immediately. 2714 */ 2715 if (!status && !hdev->suspended) 2716 return; 2717 2718 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2719 if (!cp) 2720 return; 2721 2722 hci_dev_lock(hdev); 2723 2724 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2725 if (!conn) 2726 goto unlock; 2727 2728 if (status) { 2729 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2730 conn->dst_type, status); 2731 2732 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2733 hdev->cur_adv_instance = conn->adv_instance; 2734 hci_enable_advertising(hdev); 2735 } 2736 2737 /* Inform sockets conn is gone before we delete it */ 2738 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED); 2739 2740 goto done; 2741 } 2742 2743 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2744 2745 if (conn->type == ACL_LINK) { 2746 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2747 hci_remove_link_key(hdev, &conn->dst); 2748 } 2749 2750 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2751 if (params) { 2752 switch (params->auto_connect) { 2753 case HCI_AUTO_CONN_LINK_LOSS: 2754 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2755 break; 2756 fallthrough; 2757 2758 case HCI_AUTO_CONN_DIRECT: 2759 case HCI_AUTO_CONN_ALWAYS: 2760 hci_pend_le_list_del_init(params); 2761 hci_pend_le_list_add(params, &hdev->pend_le_conns); 2762 break; 2763 2764 default: 2765 break; 2766 } 2767 } 2768 2769 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2770 cp->reason, mgmt_conn); 2771 2772 hci_disconn_cfm(conn, cp->reason); 2773 2774 done: 2775 /* If the disconnection failed for any reason, the upper layer 2776 * does not retry to disconnect in current implementation. 2777 * Hence, we need to do some basic cleanup here and re-enable 2778 * advertising if necessary. 2779 */ 2780 hci_conn_del(conn); 2781 unlock: 2782 hci_dev_unlock(hdev); 2783 } 2784 2785 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2786 { 2787 /* When using controller based address resolution, then the new 2788 * address types 0x02 and 0x03 are used. These types need to be 2789 * converted back into either public address or random address type 2790 */ 2791 switch (type) { 2792 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2793 if (resolved) 2794 *resolved = true; 2795 return ADDR_LE_DEV_PUBLIC; 2796 case ADDR_LE_DEV_RANDOM_RESOLVED: 2797 if (resolved) 2798 *resolved = true; 2799 return ADDR_LE_DEV_RANDOM; 2800 } 2801 2802 if (resolved) 2803 *resolved = false; 2804 return type; 2805 } 2806 2807 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2808 u8 peer_addr_type, u8 own_address_type, 2809 u8 filter_policy) 2810 { 2811 struct hci_conn *conn; 2812 2813 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2814 peer_addr_type); 2815 if (!conn) 2816 return; 2817 2818 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2819 2820 /* Store the initiator and responder address information which 2821 * is needed for SMP. These values will not change during the 2822 * lifetime of the connection. 2823 */ 2824 conn->init_addr_type = own_address_type; 2825 if (own_address_type == ADDR_LE_DEV_RANDOM) 2826 bacpy(&conn->init_addr, &hdev->random_addr); 2827 else 2828 bacpy(&conn->init_addr, &hdev->bdaddr); 2829 2830 conn->resp_addr_type = peer_addr_type; 2831 bacpy(&conn->resp_addr, peer_addr); 2832 } 2833 2834 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2835 { 2836 struct hci_cp_le_create_conn *cp; 2837 2838 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2839 2840 /* All connection failure handling is taken care of by the 2841 * hci_conn_failed function which is triggered by the HCI 2842 * request completion callbacks used for connecting. 2843 */ 2844 if (status) 2845 return; 2846 2847 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2848 if (!cp) 2849 return; 2850 2851 hci_dev_lock(hdev); 2852 2853 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2854 cp->own_address_type, cp->filter_policy); 2855 2856 hci_dev_unlock(hdev); 2857 } 2858 2859 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2860 { 2861 struct hci_cp_le_ext_create_conn *cp; 2862 2863 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2864 2865 /* All connection failure handling is taken care of by the 2866 * hci_conn_failed function which is triggered by the HCI 2867 * request completion callbacks used for connecting. 2868 */ 2869 if (status) 2870 return; 2871 2872 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2873 if (!cp) 2874 return; 2875 2876 hci_dev_lock(hdev); 2877 2878 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2879 cp->own_addr_type, cp->filter_policy); 2880 2881 hci_dev_unlock(hdev); 2882 } 2883 2884 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2885 { 2886 struct hci_cp_le_read_remote_features *cp; 2887 struct hci_conn *conn; 2888 2889 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2890 2891 if (!status) 2892 return; 2893 2894 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2895 if (!cp) 2896 return; 2897 2898 hci_dev_lock(hdev); 2899 2900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2901 if (conn) { 2902 if (conn->state == BT_CONFIG) { 2903 hci_connect_cfm(conn, status); 2904 hci_conn_drop(conn); 2905 } 2906 } 2907 2908 hci_dev_unlock(hdev); 2909 } 2910 2911 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2912 { 2913 struct hci_cp_le_start_enc *cp; 2914 struct hci_conn *conn; 2915 2916 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2917 2918 if (!status) 2919 return; 2920 2921 hci_dev_lock(hdev); 2922 2923 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2924 if (!cp) 2925 goto unlock; 2926 2927 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2928 if (!conn) 2929 goto unlock; 2930 2931 if (conn->state != BT_CONNECTED) 2932 goto unlock; 2933 2934 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2935 hci_conn_drop(conn); 2936 2937 unlock: 2938 hci_dev_unlock(hdev); 2939 } 2940 2941 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2942 { 2943 struct hci_cp_switch_role *cp; 2944 struct hci_conn *conn; 2945 2946 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2947 2948 if (!status) 2949 return; 2950 2951 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2952 if (!cp) 2953 return; 2954 2955 hci_dev_lock(hdev); 2956 2957 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2958 if (conn) 2959 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2960 2961 hci_dev_unlock(hdev); 2962 } 2963 2964 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, 2965 struct sk_buff *skb) 2966 { 2967 struct hci_ev_status *ev = data; 2968 struct discovery_state *discov = &hdev->discovery; 2969 struct inquiry_entry *e; 2970 2971 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 2972 2973 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2974 return; 2975 2976 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2977 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2978 2979 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2980 return; 2981 2982 hci_dev_lock(hdev); 2983 2984 if (discov->state != DISCOVERY_FINDING) 2985 goto unlock; 2986 2987 if (list_empty(&discov->resolve)) { 2988 /* When BR/EDR inquiry is active and no LE scanning is in 2989 * progress, then change discovery state to indicate completion. 2990 * 2991 * When running LE scanning and BR/EDR inquiry simultaneously 2992 * and the LE scan already finished, then change the discovery 2993 * state to indicate completion. 2994 */ 2995 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2996 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2997 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2998 goto unlock; 2999 } 3000 3001 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 3002 if (e && hci_resolve_name(hdev, e) == 0) { 3003 e->name_state = NAME_PENDING; 3004 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 3005 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 3006 } else { 3007 /* When BR/EDR inquiry is active and no LE scanning is in 3008 * progress, then change discovery state to indicate completion. 3009 * 3010 * When running LE scanning and BR/EDR inquiry simultaneously 3011 * and the LE scan already finished, then change the discovery 3012 * state to indicate completion. 3013 */ 3014 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3015 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3016 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3017 } 3018 3019 unlock: 3020 hci_dev_unlock(hdev); 3021 } 3022 3023 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, 3024 struct sk_buff *skb) 3025 { 3026 struct hci_ev_inquiry_result *ev = edata; 3027 struct inquiry_data data; 3028 int i; 3029 3030 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, 3031 flex_array_size(ev, info, ev->num))) 3032 return; 3033 3034 bt_dev_dbg(hdev, "num %d", ev->num); 3035 3036 if (!ev->num) 3037 return; 3038 3039 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3040 return; 3041 3042 hci_dev_lock(hdev); 3043 3044 for (i = 0; i < ev->num; i++) { 3045 struct inquiry_info *info = &ev->info[i]; 3046 u32 flags; 3047 3048 bacpy(&data.bdaddr, &info->bdaddr); 3049 data.pscan_rep_mode = info->pscan_rep_mode; 3050 data.pscan_period_mode = info->pscan_period_mode; 3051 data.pscan_mode = info->pscan_mode; 3052 memcpy(data.dev_class, info->dev_class, 3); 3053 data.clock_offset = info->clock_offset; 3054 data.rssi = HCI_RSSI_INVALID; 3055 data.ssp_mode = 0x00; 3056 3057 flags = hci_inquiry_cache_update(hdev, &data, false); 3058 3059 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3060 info->dev_class, HCI_RSSI_INVALID, 3061 flags, NULL, 0, NULL, 0, 0); 3062 } 3063 3064 hci_dev_unlock(hdev); 3065 } 3066 3067 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, 3068 struct sk_buff *skb) 3069 { 3070 struct hci_ev_conn_complete *ev = data; 3071 struct hci_conn *conn; 3072 u8 status = ev->status; 3073 3074 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3075 3076 hci_dev_lock(hdev); 3077 3078 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3079 if (!conn) { 3080 /* In case of error status and there is no connection pending 3081 * just unlock as there is nothing to cleanup. 3082 */ 3083 if (ev->status) 3084 goto unlock; 3085 3086 /* Connection may not exist if auto-connected. Check the bredr 3087 * allowlist to see if this device is allowed to auto connect. 3088 * If link is an ACL type, create a connection class 3089 * automatically. 3090 * 3091 * Auto-connect will only occur if the event filter is 3092 * programmed with a given address. Right now, event filter is 3093 * only used during suspend. 3094 */ 3095 if (ev->link_type == ACL_LINK && 3096 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 3097 &ev->bdaddr, 3098 BDADDR_BREDR)) { 3099 conn = hci_conn_add_unset(hdev, ev->link_type, 3100 &ev->bdaddr, HCI_ROLE_SLAVE); 3101 if (IS_ERR(conn)) { 3102 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); 3103 goto unlock; 3104 } 3105 } else { 3106 if (ev->link_type != SCO_LINK) 3107 goto unlock; 3108 3109 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 3110 &ev->bdaddr); 3111 if (!conn) 3112 goto unlock; 3113 3114 conn->type = SCO_LINK; 3115 } 3116 } 3117 3118 /* The HCI_Connection_Complete event is only sent once per connection. 3119 * Processing it more than once per connection can corrupt kernel memory. 3120 * 3121 * As the connection handle is set here for the first time, it indicates 3122 * whether the connection is already set up. 3123 */ 3124 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { 3125 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 3126 goto unlock; 3127 } 3128 3129 if (!status) { 3130 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); 3131 if (status) 3132 goto done; 3133 3134 if (conn->type == ACL_LINK) { 3135 conn->state = BT_CONFIG; 3136 hci_conn_hold(conn); 3137 3138 if (!conn->out && !hci_conn_ssp_enabled(conn) && 3139 !hci_find_link_key(hdev, &ev->bdaddr)) 3140 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3141 else 3142 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3143 } else 3144 conn->state = BT_CONNECTED; 3145 3146 hci_debugfs_create_conn(conn); 3147 hci_conn_add_sysfs(conn); 3148 3149 if (test_bit(HCI_AUTH, &hdev->flags)) 3150 set_bit(HCI_CONN_AUTH, &conn->flags); 3151 3152 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 3153 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3154 3155 /* "Link key request" completed ahead of "connect request" completes */ 3156 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3157 ev->link_type == ACL_LINK) { 3158 struct link_key *key; 3159 struct hci_cp_read_enc_key_size cp; 3160 3161 key = hci_find_link_key(hdev, &ev->bdaddr); 3162 if (key) { 3163 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3164 3165 if (!read_key_size_capable(hdev)) { 3166 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3167 } else { 3168 cp.handle = cpu_to_le16(conn->handle); 3169 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, 3170 sizeof(cp), &cp)) { 3171 bt_dev_err(hdev, "sending read key size failed"); 3172 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3173 } 3174 } 3175 3176 hci_encrypt_cfm(conn, ev->status); 3177 } 3178 } 3179 3180 /* Get remote features */ 3181 if (conn->type == ACL_LINK) { 3182 struct hci_cp_read_remote_features cp; 3183 cp.handle = ev->handle; 3184 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 3185 sizeof(cp), &cp); 3186 3187 hci_update_scan(hdev); 3188 } 3189 3190 /* Set packet type for incoming connection */ 3191 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 3192 struct hci_cp_change_conn_ptype cp; 3193 cp.handle = ev->handle; 3194 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3195 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 3196 &cp); 3197 } 3198 } 3199 3200 if (conn->type == ACL_LINK) 3201 hci_sco_setup(conn, ev->status); 3202 3203 done: 3204 if (status) { 3205 hci_conn_failed(conn, status); 3206 } else if (ev->link_type == SCO_LINK) { 3207 switch (conn->setting & SCO_AIRMODE_MASK) { 3208 case SCO_AIRMODE_CVSD: 3209 if (hdev->notify) 3210 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 3211 break; 3212 } 3213 3214 hci_connect_cfm(conn, status); 3215 } 3216 3217 unlock: 3218 hci_dev_unlock(hdev); 3219 } 3220 3221 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 3222 { 3223 struct hci_cp_reject_conn_req cp; 3224 3225 bacpy(&cp.bdaddr, bdaddr); 3226 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 3227 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 3228 } 3229 3230 static void hci_conn_request_evt(struct hci_dev *hdev, void *data, 3231 struct sk_buff *skb) 3232 { 3233 struct hci_ev_conn_request *ev = data; 3234 int mask = hdev->link_mode; 3235 struct inquiry_entry *ie; 3236 struct hci_conn *conn; 3237 __u8 flags = 0; 3238 3239 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); 3240 3241 /* Reject incoming connection from device with same BD ADDR against 3242 * CVE-2020-26555 3243 */ 3244 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) { 3245 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", 3246 &ev->bdaddr); 3247 hci_reject_conn(hdev, &ev->bdaddr); 3248 return; 3249 } 3250 3251 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 3252 &flags); 3253 3254 if (!(mask & HCI_LM_ACCEPT)) { 3255 hci_reject_conn(hdev, &ev->bdaddr); 3256 return; 3257 } 3258 3259 hci_dev_lock(hdev); 3260 3261 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 3262 BDADDR_BREDR)) { 3263 hci_reject_conn(hdev, &ev->bdaddr); 3264 goto unlock; 3265 } 3266 3267 /* Require HCI_CONNECTABLE or an accept list entry to accept the 3268 * connection. These features are only touched through mgmt so 3269 * only do the checks if HCI_MGMT is set. 3270 */ 3271 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3272 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 3273 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 3274 BDADDR_BREDR)) { 3275 hci_reject_conn(hdev, &ev->bdaddr); 3276 goto unlock; 3277 } 3278 3279 /* Connection accepted */ 3280 3281 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3282 if (ie) 3283 memcpy(ie->data.dev_class, ev->dev_class, 3); 3284 3285 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 3286 &ev->bdaddr); 3287 if (!conn) { 3288 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, 3289 HCI_ROLE_SLAVE); 3290 if (IS_ERR(conn)) { 3291 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); 3292 goto unlock; 3293 } 3294 } 3295 3296 memcpy(conn->dev_class, ev->dev_class, 3); 3297 3298 hci_dev_unlock(hdev); 3299 3300 if (ev->link_type == ACL_LINK || 3301 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 3302 struct hci_cp_accept_conn_req cp; 3303 conn->state = BT_CONNECT; 3304 3305 bacpy(&cp.bdaddr, &ev->bdaddr); 3306 3307 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 3308 cp.role = 0x00; /* Become central */ 3309 else 3310 cp.role = 0x01; /* Remain peripheral */ 3311 3312 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 3313 } else if (!(flags & HCI_PROTO_DEFER)) { 3314 struct hci_cp_accept_sync_conn_req cp; 3315 conn->state = BT_CONNECT; 3316 3317 bacpy(&cp.bdaddr, &ev->bdaddr); 3318 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3319 3320 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 3321 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 3322 cp.max_latency = cpu_to_le16(0xffff); 3323 cp.content_format = cpu_to_le16(hdev->voice_setting); 3324 cp.retrans_effort = 0xff; 3325 3326 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 3327 &cp); 3328 } else { 3329 conn->state = BT_CONNECT2; 3330 hci_connect_cfm(conn, 0); 3331 } 3332 3333 return; 3334 unlock: 3335 hci_dev_unlock(hdev); 3336 } 3337 3338 static u8 hci_to_mgmt_reason(u8 err) 3339 { 3340 switch (err) { 3341 case HCI_ERROR_CONNECTION_TIMEOUT: 3342 return MGMT_DEV_DISCONN_TIMEOUT; 3343 case HCI_ERROR_REMOTE_USER_TERM: 3344 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3345 case HCI_ERROR_REMOTE_POWER_OFF: 3346 return MGMT_DEV_DISCONN_REMOTE; 3347 case HCI_ERROR_LOCAL_HOST_TERM: 3348 return MGMT_DEV_DISCONN_LOCAL_HOST; 3349 default: 3350 return MGMT_DEV_DISCONN_UNKNOWN; 3351 } 3352 } 3353 3354 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, 3355 struct sk_buff *skb) 3356 { 3357 struct hci_ev_disconn_complete *ev = data; 3358 u8 reason; 3359 struct hci_conn_params *params; 3360 struct hci_conn *conn; 3361 bool mgmt_connected; 3362 3363 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3364 3365 hci_dev_lock(hdev); 3366 3367 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3368 if (!conn) 3369 goto unlock; 3370 3371 if (ev->status) { 3372 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3373 conn->dst_type, ev->status); 3374 goto unlock; 3375 } 3376 3377 conn->state = BT_CLOSED; 3378 3379 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3380 3381 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3382 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3383 else 3384 reason = hci_to_mgmt_reason(ev->reason); 3385 3386 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3387 reason, mgmt_connected); 3388 3389 if (conn->type == ACL_LINK) { 3390 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3391 hci_remove_link_key(hdev, &conn->dst); 3392 3393 hci_update_scan(hdev); 3394 } 3395 3396 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3397 if (params) { 3398 switch (params->auto_connect) { 3399 case HCI_AUTO_CONN_LINK_LOSS: 3400 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3401 break; 3402 fallthrough; 3403 3404 case HCI_AUTO_CONN_DIRECT: 3405 case HCI_AUTO_CONN_ALWAYS: 3406 hci_pend_le_list_del_init(params); 3407 hci_pend_le_list_add(params, &hdev->pend_le_conns); 3408 hci_update_passive_scan(hdev); 3409 break; 3410 3411 default: 3412 break; 3413 } 3414 } 3415 3416 hci_disconn_cfm(conn, ev->reason); 3417 3418 /* Re-enable advertising if necessary, since it might 3419 * have been disabled by the connection. From the 3420 * HCI_LE_Set_Advertise_Enable command description in 3421 * the core specification (v4.0): 3422 * "The Controller shall continue advertising until the Host 3423 * issues an LE_Set_Advertise_Enable command with 3424 * Advertising_Enable set to 0x00 (Advertising is disabled) 3425 * or until a connection is created or until the Advertising 3426 * is timed out due to Directed Advertising." 3427 */ 3428 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3429 hdev->cur_adv_instance = conn->adv_instance; 3430 hci_enable_advertising(hdev); 3431 } 3432 3433 hci_conn_del(conn); 3434 3435 unlock: 3436 hci_dev_unlock(hdev); 3437 } 3438 3439 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, 3440 struct sk_buff *skb) 3441 { 3442 struct hci_ev_auth_complete *ev = data; 3443 struct hci_conn *conn; 3444 3445 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3446 3447 hci_dev_lock(hdev); 3448 3449 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3450 if (!conn) 3451 goto unlock; 3452 3453 if (!ev->status) { 3454 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3455 set_bit(HCI_CONN_AUTH, &conn->flags); 3456 conn->sec_level = conn->pending_sec_level; 3457 } else { 3458 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3459 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3460 3461 mgmt_auth_failed(conn, ev->status); 3462 } 3463 3464 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3465 3466 if (conn->state == BT_CONFIG) { 3467 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3468 struct hci_cp_set_conn_encrypt cp; 3469 cp.handle = ev->handle; 3470 cp.encrypt = 0x01; 3471 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3472 &cp); 3473 } else { 3474 conn->state = BT_CONNECTED; 3475 hci_connect_cfm(conn, ev->status); 3476 hci_conn_drop(conn); 3477 } 3478 } else { 3479 hci_auth_cfm(conn, ev->status); 3480 3481 hci_conn_hold(conn); 3482 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3483 hci_conn_drop(conn); 3484 } 3485 3486 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3487 if (!ev->status) { 3488 struct hci_cp_set_conn_encrypt cp; 3489 cp.handle = ev->handle; 3490 cp.encrypt = 0x01; 3491 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3492 &cp); 3493 } else { 3494 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3495 hci_encrypt_cfm(conn, ev->status); 3496 } 3497 } 3498 3499 unlock: 3500 hci_dev_unlock(hdev); 3501 } 3502 3503 static void hci_remote_name_evt(struct hci_dev *hdev, void *data, 3504 struct sk_buff *skb) 3505 { 3506 struct hci_ev_remote_name *ev = data; 3507 struct hci_conn *conn; 3508 3509 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3510 3511 hci_dev_lock(hdev); 3512 3513 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3514 3515 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3516 goto check_auth; 3517 3518 if (ev->status == 0) 3519 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3520 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3521 else 3522 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3523 3524 check_auth: 3525 if (!conn) 3526 goto unlock; 3527 3528 if (!hci_outgoing_auth_needed(hdev, conn)) 3529 goto unlock; 3530 3531 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3532 struct hci_cp_auth_requested cp; 3533 3534 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3535 3536 cp.handle = __cpu_to_le16(conn->handle); 3537 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3538 } 3539 3540 unlock: 3541 hci_dev_unlock(hdev); 3542 } 3543 3544 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, 3545 struct sk_buff *skb) 3546 { 3547 struct hci_ev_encrypt_change *ev = data; 3548 struct hci_conn *conn; 3549 3550 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3551 3552 hci_dev_lock(hdev); 3553 3554 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3555 if (!conn) 3556 goto unlock; 3557 3558 if (!ev->status) { 3559 if (ev->encrypt) { 3560 /* Encryption implies authentication */ 3561 set_bit(HCI_CONN_AUTH, &conn->flags); 3562 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3563 conn->sec_level = conn->pending_sec_level; 3564 3565 /* P-256 authentication key implies FIPS */ 3566 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3567 set_bit(HCI_CONN_FIPS, &conn->flags); 3568 3569 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3570 conn->type == LE_LINK) 3571 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3572 } else { 3573 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3574 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3575 } 3576 } 3577 3578 /* We should disregard the current RPA and generate a new one 3579 * whenever the encryption procedure fails. 3580 */ 3581 if (ev->status && conn->type == LE_LINK) { 3582 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3583 hci_adv_instances_set_rpa_expired(hdev, true); 3584 } 3585 3586 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3587 3588 /* Check link security requirements are met */ 3589 if (!hci_conn_check_link_mode(conn)) 3590 ev->status = HCI_ERROR_AUTH_FAILURE; 3591 3592 if (ev->status && conn->state == BT_CONNECTED) { 3593 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3594 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3595 3596 /* Notify upper layers so they can cleanup before 3597 * disconnecting. 3598 */ 3599 hci_encrypt_cfm(conn, ev->status); 3600 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3601 hci_conn_drop(conn); 3602 goto unlock; 3603 } 3604 3605 /* Try reading the encryption key size for encrypted ACL links */ 3606 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3607 struct hci_cp_read_enc_key_size cp; 3608 3609 /* Only send HCI_Read_Encryption_Key_Size if the 3610 * controller really supports it. If it doesn't, assume 3611 * the default size (16). 3612 */ 3613 if (!read_key_size_capable(hdev)) { 3614 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3615 goto notify; 3616 } 3617 3618 cp.handle = cpu_to_le16(conn->handle); 3619 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, 3620 sizeof(cp), &cp)) { 3621 bt_dev_err(hdev, "sending read key size failed"); 3622 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3623 goto notify; 3624 } 3625 3626 goto unlock; 3627 } 3628 3629 /* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers 3630 * to avoid unexpected SMP command errors when pairing. 3631 */ 3632 if (test_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, 3633 &hdev->quirks)) 3634 goto notify; 3635 3636 /* Set the default Authenticated Payload Timeout after 3637 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3638 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3639 * sent when the link is active and Encryption is enabled, the conn 3640 * type can be either LE or ACL and controller must support LMP Ping. 3641 * Ensure for AES-CCM encryption as well. 3642 */ 3643 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3644 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3645 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3646 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3647 struct hci_cp_write_auth_payload_to cp; 3648 3649 cp.handle = cpu_to_le16(conn->handle); 3650 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3651 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3652 sizeof(cp), &cp)) 3653 bt_dev_err(hdev, "write auth payload timeout failed"); 3654 } 3655 3656 notify: 3657 hci_encrypt_cfm(conn, ev->status); 3658 3659 unlock: 3660 hci_dev_unlock(hdev); 3661 } 3662 3663 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, 3664 struct sk_buff *skb) 3665 { 3666 struct hci_ev_change_link_key_complete *ev = data; 3667 struct hci_conn *conn; 3668 3669 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3670 3671 hci_dev_lock(hdev); 3672 3673 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3674 if (conn) { 3675 if (!ev->status) 3676 set_bit(HCI_CONN_SECURE, &conn->flags); 3677 3678 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3679 3680 hci_key_change_cfm(conn, ev->status); 3681 } 3682 3683 hci_dev_unlock(hdev); 3684 } 3685 3686 static void hci_remote_features_evt(struct hci_dev *hdev, void *data, 3687 struct sk_buff *skb) 3688 { 3689 struct hci_ev_remote_features *ev = data; 3690 struct hci_conn *conn; 3691 3692 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3693 3694 hci_dev_lock(hdev); 3695 3696 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3697 if (!conn) 3698 goto unlock; 3699 3700 if (!ev->status) 3701 memcpy(conn->features[0], ev->features, 8); 3702 3703 if (conn->state != BT_CONFIG) 3704 goto unlock; 3705 3706 if (!ev->status && lmp_ext_feat_capable(hdev) && 3707 lmp_ext_feat_capable(conn)) { 3708 struct hci_cp_read_remote_ext_features cp; 3709 cp.handle = ev->handle; 3710 cp.page = 0x01; 3711 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3712 sizeof(cp), &cp); 3713 goto unlock; 3714 } 3715 3716 if (!ev->status) { 3717 struct hci_cp_remote_name_req cp; 3718 memset(&cp, 0, sizeof(cp)); 3719 bacpy(&cp.bdaddr, &conn->dst); 3720 cp.pscan_rep_mode = 0x02; 3721 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3722 } else { 3723 mgmt_device_connected(hdev, conn, NULL, 0); 3724 } 3725 3726 if (!hci_outgoing_auth_needed(hdev, conn)) { 3727 conn->state = BT_CONNECTED; 3728 hci_connect_cfm(conn, ev->status); 3729 hci_conn_drop(conn); 3730 } 3731 3732 unlock: 3733 hci_dev_unlock(hdev); 3734 } 3735 3736 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3737 { 3738 cancel_delayed_work(&hdev->cmd_timer); 3739 3740 rcu_read_lock(); 3741 if (!test_bit(HCI_RESET, &hdev->flags)) { 3742 if (ncmd) { 3743 cancel_delayed_work(&hdev->ncmd_timer); 3744 atomic_set(&hdev->cmd_cnt, 1); 3745 } else { 3746 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 3747 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer, 3748 HCI_NCMD_TIMEOUT); 3749 } 3750 } 3751 rcu_read_unlock(); 3752 } 3753 3754 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, 3755 struct sk_buff *skb) 3756 { 3757 struct hci_rp_le_read_buffer_size_v2 *rp = data; 3758 3759 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3760 3761 if (rp->status) 3762 return rp->status; 3763 3764 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); 3765 hdev->le_pkts = rp->acl_max_pkt; 3766 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); 3767 hdev->iso_pkts = rp->iso_max_pkt; 3768 3769 hdev->le_cnt = hdev->le_pkts; 3770 hdev->iso_cnt = hdev->iso_pkts; 3771 3772 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, 3773 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); 3774 3775 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) 3776 return HCI_ERROR_INVALID_PARAMETERS; 3777 3778 return rp->status; 3779 } 3780 3781 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status) 3782 { 3783 struct hci_conn *conn, *tmp; 3784 3785 lockdep_assert_held(&hdev->lock); 3786 3787 list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { 3788 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) || 3789 conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig) 3790 continue; 3791 3792 if (HCI_CONN_HANDLE_UNSET(conn->handle)) 3793 hci_conn_failed(conn, status); 3794 } 3795 } 3796 3797 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, 3798 struct sk_buff *skb) 3799 { 3800 struct hci_rp_le_set_cig_params *rp = data; 3801 struct hci_cp_le_set_cig_params *cp; 3802 struct hci_conn *conn; 3803 u8 status = rp->status; 3804 bool pending = false; 3805 int i; 3806 3807 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3808 3809 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS); 3810 if (!rp->status && (!cp || rp->num_handles != cp->num_cis || 3811 rp->cig_id != cp->cig_id)) { 3812 bt_dev_err(hdev, "unexpected Set CIG Parameters response data"); 3813 status = HCI_ERROR_UNSPECIFIED; 3814 } 3815 3816 hci_dev_lock(hdev); 3817 3818 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554 3819 * 3820 * If the Status return parameter is non-zero, then the state of the CIG 3821 * and its CIS configurations shall not be changed by the command. If 3822 * the CIG did not already exist, it shall not be created. 3823 */ 3824 if (status) { 3825 /* Keep current configuration, fail only the unbound CIS */ 3826 hci_unbound_cis_failed(hdev, rp->cig_id, status); 3827 goto unlock; 3828 } 3829 3830 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553 3831 * 3832 * If the Status return parameter is zero, then the Controller shall 3833 * set the Connection_Handle arrayed return parameter to the connection 3834 * handle(s) corresponding to the CIS configurations specified in 3835 * the CIS_IDs command parameter, in the same order. 3836 */ 3837 for (i = 0; i < rp->num_handles; ++i) { 3838 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id, 3839 cp->cis[i].cis_id); 3840 if (!conn || !bacmp(&conn->dst, BDADDR_ANY)) 3841 continue; 3842 3843 if (conn->state != BT_BOUND && conn->state != BT_CONNECT) 3844 continue; 3845 3846 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i]))) 3847 continue; 3848 3849 if (conn->state == BT_CONNECT) 3850 pending = true; 3851 } 3852 3853 unlock: 3854 if (pending) 3855 hci_le_create_cis_pending(hdev); 3856 3857 hci_dev_unlock(hdev); 3858 3859 return rp->status; 3860 } 3861 3862 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, 3863 struct sk_buff *skb) 3864 { 3865 struct hci_rp_le_setup_iso_path *rp = data; 3866 struct hci_cp_le_setup_iso_path *cp; 3867 struct hci_conn *conn; 3868 3869 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3870 3871 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); 3872 if (!cp) 3873 return rp->status; 3874 3875 hci_dev_lock(hdev); 3876 3877 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 3878 if (!conn) 3879 goto unlock; 3880 3881 if (rp->status) { 3882 hci_connect_cfm(conn, rp->status); 3883 hci_conn_del(conn); 3884 goto unlock; 3885 } 3886 3887 switch (cp->direction) { 3888 /* Input (Host to Controller) */ 3889 case 0x00: 3890 /* Only confirm connection if output only */ 3891 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu) 3892 hci_connect_cfm(conn, rp->status); 3893 break; 3894 /* Output (Controller to Host) */ 3895 case 0x01: 3896 /* Confirm connection since conn->iso_qos is always configured 3897 * last. 3898 */ 3899 hci_connect_cfm(conn, rp->status); 3900 3901 /* Notify device connected in case it is a BIG Sync */ 3902 if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) 3903 mgmt_device_connected(hdev, conn, NULL, 0); 3904 3905 break; 3906 } 3907 3908 unlock: 3909 hci_dev_unlock(hdev); 3910 return rp->status; 3911 } 3912 3913 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) 3914 { 3915 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3916 } 3917 3918 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, 3919 struct sk_buff *skb) 3920 { 3921 struct hci_ev_status *rp = data; 3922 struct hci_cp_le_set_per_adv_params *cp; 3923 3924 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3925 3926 if (rp->status) 3927 return rp->status; 3928 3929 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); 3930 if (!cp) 3931 return rp->status; 3932 3933 /* TODO: set the conn state */ 3934 return rp->status; 3935 } 3936 3937 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, 3938 struct sk_buff *skb) 3939 { 3940 struct hci_ev_status *rp = data; 3941 struct hci_cp_le_set_per_adv_enable *cp; 3942 struct adv_info *adv = NULL, *n; 3943 u8 per_adv_cnt = 0; 3944 3945 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3946 3947 if (rp->status) 3948 return rp->status; 3949 3950 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); 3951 if (!cp) 3952 return rp->status; 3953 3954 hci_dev_lock(hdev); 3955 3956 adv = hci_find_adv_instance(hdev, cp->handle); 3957 3958 if (cp->enable) { 3959 hci_dev_set_flag(hdev, HCI_LE_PER_ADV); 3960 3961 if (adv) 3962 adv->enabled = true; 3963 } else { 3964 /* If just one instance was disabled check if there are 3965 * any other instance enabled before clearing HCI_LE_PER_ADV. 3966 * The current periodic adv instance will be marked as 3967 * disabled once extended advertising is also disabled. 3968 */ 3969 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 3970 list) { 3971 if (adv->periodic && adv->enabled) 3972 per_adv_cnt++; 3973 } 3974 3975 if (per_adv_cnt > 1) 3976 goto unlock; 3977 3978 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); 3979 } 3980 3981 unlock: 3982 hci_dev_unlock(hdev); 3983 3984 return rp->status; 3985 } 3986 3987 #define HCI_CC_VL(_op, _func, _min, _max) \ 3988 { \ 3989 .op = _op, \ 3990 .func = _func, \ 3991 .min_len = _min, \ 3992 .max_len = _max, \ 3993 } 3994 3995 #define HCI_CC(_op, _func, _len) \ 3996 HCI_CC_VL(_op, _func, _len, _len) 3997 3998 #define HCI_CC_STATUS(_op, _func) \ 3999 HCI_CC(_op, _func, sizeof(struct hci_ev_status)) 4000 4001 static const struct hci_cc { 4002 u16 op; 4003 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 4004 u16 min_len; 4005 u16 max_len; 4006 } hci_cc_table[] = { 4007 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), 4008 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), 4009 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), 4010 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, 4011 hci_cc_remote_name_req_cancel), 4012 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, 4013 sizeof(struct hci_rp_role_discovery)), 4014 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, 4015 sizeof(struct hci_rp_read_link_policy)), 4016 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, 4017 sizeof(struct hci_rp_write_link_policy)), 4018 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, 4019 sizeof(struct hci_rp_read_def_link_policy)), 4020 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, 4021 hci_cc_write_def_link_policy), 4022 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), 4023 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, 4024 sizeof(struct hci_rp_read_stored_link_key)), 4025 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, 4026 sizeof(struct hci_rp_delete_stored_link_key)), 4027 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), 4028 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, 4029 sizeof(struct hci_rp_read_local_name)), 4030 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), 4031 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), 4032 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), 4033 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), 4034 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, 4035 sizeof(struct hci_rp_read_class_of_dev)), 4036 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), 4037 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, 4038 sizeof(struct hci_rp_read_voice_setting)), 4039 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), 4040 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, 4041 sizeof(struct hci_rp_read_num_supported_iac)), 4042 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), 4043 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), 4044 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, 4045 sizeof(struct hci_rp_read_auth_payload_to)), 4046 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, 4047 sizeof(struct hci_rp_write_auth_payload_to)), 4048 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, 4049 sizeof(struct hci_rp_read_local_version)), 4050 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, 4051 sizeof(struct hci_rp_read_local_commands)), 4052 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, 4053 sizeof(struct hci_rp_read_local_features)), 4054 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, 4055 sizeof(struct hci_rp_read_local_ext_features)), 4056 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, 4057 sizeof(struct hci_rp_read_buffer_size)), 4058 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, 4059 sizeof(struct hci_rp_read_bd_addr)), 4060 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, 4061 sizeof(struct hci_rp_read_local_pairing_opts)), 4062 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, 4063 sizeof(struct hci_rp_read_page_scan_activity)), 4064 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 4065 hci_cc_write_page_scan_activity), 4066 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, 4067 sizeof(struct hci_rp_read_page_scan_type)), 4068 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), 4069 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, 4070 sizeof(struct hci_rp_read_clock)), 4071 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, 4072 sizeof(struct hci_rp_read_enc_key_size)), 4073 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, 4074 sizeof(struct hci_rp_read_inq_rsp_tx_power)), 4075 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, 4076 hci_cc_read_def_err_data_reporting, 4077 sizeof(struct hci_rp_read_def_err_data_reporting)), 4078 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 4079 hci_cc_write_def_err_data_reporting), 4080 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, 4081 sizeof(struct hci_rp_pin_code_reply)), 4082 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, 4083 sizeof(struct hci_rp_pin_code_neg_reply)), 4084 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, 4085 sizeof(struct hci_rp_read_local_oob_data)), 4086 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, 4087 sizeof(struct hci_rp_read_local_oob_ext_data)), 4088 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, 4089 sizeof(struct hci_rp_le_read_buffer_size)), 4090 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, 4091 sizeof(struct hci_rp_le_read_local_features)), 4092 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, 4093 sizeof(struct hci_rp_le_read_adv_tx_power)), 4094 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, 4095 sizeof(struct hci_rp_user_confirm_reply)), 4096 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, 4097 sizeof(struct hci_rp_user_confirm_reply)), 4098 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, 4099 sizeof(struct hci_rp_user_confirm_reply)), 4100 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, 4101 sizeof(struct hci_rp_user_confirm_reply)), 4102 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), 4103 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), 4104 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), 4105 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), 4106 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 4107 hci_cc_le_read_accept_list_size, 4108 sizeof(struct hci_rp_le_read_accept_list_size)), 4109 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), 4110 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, 4111 hci_cc_le_add_to_accept_list), 4112 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 4113 hci_cc_le_del_from_accept_list), 4114 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, 4115 sizeof(struct hci_rp_le_read_supported_states)), 4116 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, 4117 sizeof(struct hci_rp_le_read_def_data_len)), 4118 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, 4119 hci_cc_le_write_def_data_len), 4120 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, 4121 hci_cc_le_add_to_resolv_list), 4122 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, 4123 hci_cc_le_del_from_resolv_list), 4124 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, 4125 hci_cc_le_clear_resolv_list), 4126 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, 4127 sizeof(struct hci_rp_le_read_resolv_list_size)), 4128 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 4129 hci_cc_le_set_addr_resolution_enable), 4130 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, 4131 sizeof(struct hci_rp_le_read_max_data_len)), 4132 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, 4133 hci_cc_write_le_host_supported), 4134 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), 4135 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, 4136 sizeof(struct hci_rp_read_rssi)), 4137 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, 4138 sizeof(struct hci_rp_read_tx_power)), 4139 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), 4140 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, 4141 hci_cc_le_set_ext_scan_param), 4142 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, 4143 hci_cc_le_set_ext_scan_enable), 4144 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), 4145 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 4146 hci_cc_le_read_num_adv_sets, 4147 sizeof(struct hci_rp_le_read_num_supported_adv_sets)), 4148 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, 4149 sizeof(struct hci_rp_le_set_ext_adv_params)), 4150 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, 4151 hci_cc_le_set_ext_adv_enable), 4152 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 4153 hci_cc_le_set_adv_set_random_addr), 4154 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), 4155 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), 4156 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), 4157 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, 4158 hci_cc_le_set_per_adv_enable), 4159 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, 4160 sizeof(struct hci_rp_le_read_transmit_power)), 4161 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), 4162 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, 4163 sizeof(struct hci_rp_le_read_buffer_size_v2)), 4164 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, 4165 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), 4166 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, 4167 sizeof(struct hci_rp_le_setup_iso_path)), 4168 }; 4169 4170 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, 4171 struct sk_buff *skb) 4172 { 4173 void *data; 4174 4175 if (skb->len < cc->min_len) { 4176 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", 4177 cc->op, skb->len, cc->min_len); 4178 return HCI_ERROR_UNSPECIFIED; 4179 } 4180 4181 /* Just warn if the length is over max_len size it still be possible to 4182 * partially parse the cc so leave to callback to decide if that is 4183 * acceptable. 4184 */ 4185 if (skb->len > cc->max_len) 4186 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", 4187 cc->op, skb->len, cc->max_len); 4188 4189 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); 4190 if (!data) 4191 return HCI_ERROR_UNSPECIFIED; 4192 4193 return cc->func(hdev, data, skb); 4194 } 4195 4196 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, 4197 struct sk_buff *skb, u16 *opcode, u8 *status, 4198 hci_req_complete_t *req_complete, 4199 hci_req_complete_skb_t *req_complete_skb) 4200 { 4201 struct hci_ev_cmd_complete *ev = data; 4202 int i; 4203 4204 *opcode = __le16_to_cpu(ev->opcode); 4205 4206 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4207 4208 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { 4209 if (hci_cc_table[i].op == *opcode) { 4210 *status = hci_cc_func(hdev, &hci_cc_table[i], skb); 4211 break; 4212 } 4213 } 4214 4215 if (i == ARRAY_SIZE(hci_cc_table)) { 4216 /* Unknown opcode, assume byte 0 contains the status, so 4217 * that e.g. __hci_cmd_sync() properly returns errors 4218 * for vendor specific commands send by HCI drivers. 4219 * If a vendor doesn't actually follow this convention we may 4220 * need to introduce a vendor CC table in order to properly set 4221 * the status. 4222 */ 4223 *status = skb->data[0]; 4224 } 4225 4226 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4227 4228 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 4229 req_complete_skb); 4230 4231 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4232 bt_dev_err(hdev, 4233 "unexpected event for opcode 0x%4.4x", *opcode); 4234 return; 4235 } 4236 4237 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4238 queue_work(hdev->workqueue, &hdev->cmd_work); 4239 } 4240 4241 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) 4242 { 4243 struct hci_cp_le_create_cis *cp; 4244 bool pending = false; 4245 int i; 4246 4247 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4248 4249 if (!status) 4250 return; 4251 4252 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); 4253 if (!cp) 4254 return; 4255 4256 hci_dev_lock(hdev); 4257 4258 /* Remove connection if command failed */ 4259 for (i = 0; i < cp->num_cis; i++) { 4260 struct hci_conn *conn; 4261 u16 handle; 4262 4263 handle = __le16_to_cpu(cp->cis[i].cis_handle); 4264 4265 conn = hci_conn_hash_lookup_handle(hdev, handle); 4266 if (conn) { 4267 if (test_and_clear_bit(HCI_CONN_CREATE_CIS, 4268 &conn->flags)) 4269 pending = true; 4270 conn->state = BT_CLOSED; 4271 hci_connect_cfm(conn, status); 4272 hci_conn_del(conn); 4273 } 4274 } 4275 cp->num_cis = 0; 4276 4277 if (pending) 4278 hci_le_create_cis_pending(hdev); 4279 4280 hci_dev_unlock(hdev); 4281 } 4282 4283 #define HCI_CS(_op, _func) \ 4284 { \ 4285 .op = _op, \ 4286 .func = _func, \ 4287 } 4288 4289 static const struct hci_cs { 4290 u16 op; 4291 void (*func)(struct hci_dev *hdev, __u8 status); 4292 } hci_cs_table[] = { 4293 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), 4294 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), 4295 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), 4296 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), 4297 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), 4298 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), 4299 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), 4300 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), 4301 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, 4302 hci_cs_read_remote_ext_features), 4303 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), 4304 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, 4305 hci_cs_enhanced_setup_sync_conn), 4306 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), 4307 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), 4308 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), 4309 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), 4310 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), 4311 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), 4312 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), 4313 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), 4314 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), 4315 }; 4316 4317 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, 4318 struct sk_buff *skb, u16 *opcode, u8 *status, 4319 hci_req_complete_t *req_complete, 4320 hci_req_complete_skb_t *req_complete_skb) 4321 { 4322 struct hci_ev_cmd_status *ev = data; 4323 int i; 4324 4325 *opcode = __le16_to_cpu(ev->opcode); 4326 *status = ev->status; 4327 4328 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4329 4330 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { 4331 if (hci_cs_table[i].op == *opcode) { 4332 hci_cs_table[i].func(hdev, ev->status); 4333 break; 4334 } 4335 } 4336 4337 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4338 4339 /* Indicate request completion if the command failed. Also, if 4340 * we're not waiting for a special event and we get a success 4341 * command status we should try to flag the request as completed 4342 * (since for this kind of commands there will not be a command 4343 * complete event). 4344 */ 4345 if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) { 4346 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 4347 req_complete_skb); 4348 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4349 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", 4350 *opcode); 4351 return; 4352 } 4353 } 4354 4355 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4356 queue_work(hdev->workqueue, &hdev->cmd_work); 4357 } 4358 4359 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, 4360 struct sk_buff *skb) 4361 { 4362 struct hci_ev_hardware_error *ev = data; 4363 4364 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); 4365 4366 hdev->hw_error_code = ev->code; 4367 4368 queue_work(hdev->req_workqueue, &hdev->error_reset); 4369 } 4370 4371 static void hci_role_change_evt(struct hci_dev *hdev, void *data, 4372 struct sk_buff *skb) 4373 { 4374 struct hci_ev_role_change *ev = data; 4375 struct hci_conn *conn; 4376 4377 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4378 4379 hci_dev_lock(hdev); 4380 4381 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4382 if (conn) { 4383 if (!ev->status) 4384 conn->role = ev->role; 4385 4386 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 4387 4388 hci_role_switch_cfm(conn, ev->status, ev->role); 4389 } 4390 4391 hci_dev_unlock(hdev); 4392 } 4393 4394 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, 4395 struct sk_buff *skb) 4396 { 4397 struct hci_ev_num_comp_pkts *ev = data; 4398 int i; 4399 4400 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, 4401 flex_array_size(ev, handles, ev->num))) 4402 return; 4403 4404 bt_dev_dbg(hdev, "num %d", ev->num); 4405 4406 for (i = 0; i < ev->num; i++) { 4407 struct hci_comp_pkts_info *info = &ev->handles[i]; 4408 struct hci_conn *conn; 4409 __u16 handle, count; 4410 4411 handle = __le16_to_cpu(info->handle); 4412 count = __le16_to_cpu(info->count); 4413 4414 conn = hci_conn_hash_lookup_handle(hdev, handle); 4415 if (!conn) 4416 continue; 4417 4418 conn->sent -= count; 4419 4420 switch (conn->type) { 4421 case ACL_LINK: 4422 hdev->acl_cnt += count; 4423 if (hdev->acl_cnt > hdev->acl_pkts) 4424 hdev->acl_cnt = hdev->acl_pkts; 4425 break; 4426 4427 case LE_LINK: 4428 if (hdev->le_pkts) { 4429 hdev->le_cnt += count; 4430 if (hdev->le_cnt > hdev->le_pkts) 4431 hdev->le_cnt = hdev->le_pkts; 4432 } else { 4433 hdev->acl_cnt += count; 4434 if (hdev->acl_cnt > hdev->acl_pkts) 4435 hdev->acl_cnt = hdev->acl_pkts; 4436 } 4437 break; 4438 4439 case SCO_LINK: 4440 hdev->sco_cnt += count; 4441 if (hdev->sco_cnt > hdev->sco_pkts) 4442 hdev->sco_cnt = hdev->sco_pkts; 4443 break; 4444 4445 case ISO_LINK: 4446 if (hdev->iso_pkts) { 4447 hdev->iso_cnt += count; 4448 if (hdev->iso_cnt > hdev->iso_pkts) 4449 hdev->iso_cnt = hdev->iso_pkts; 4450 } else if (hdev->le_pkts) { 4451 hdev->le_cnt += count; 4452 if (hdev->le_cnt > hdev->le_pkts) 4453 hdev->le_cnt = hdev->le_pkts; 4454 } else { 4455 hdev->acl_cnt += count; 4456 if (hdev->acl_cnt > hdev->acl_pkts) 4457 hdev->acl_cnt = hdev->acl_pkts; 4458 } 4459 break; 4460 4461 default: 4462 bt_dev_err(hdev, "unknown type %d conn %p", 4463 conn->type, conn); 4464 break; 4465 } 4466 } 4467 4468 queue_work(hdev->workqueue, &hdev->tx_work); 4469 } 4470 4471 static void hci_mode_change_evt(struct hci_dev *hdev, void *data, 4472 struct sk_buff *skb) 4473 { 4474 struct hci_ev_mode_change *ev = data; 4475 struct hci_conn *conn; 4476 4477 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4478 4479 hci_dev_lock(hdev); 4480 4481 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4482 if (conn) { 4483 conn->mode = ev->mode; 4484 4485 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4486 &conn->flags)) { 4487 if (conn->mode == HCI_CM_ACTIVE) 4488 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4489 else 4490 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4491 } 4492 4493 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4494 hci_sco_setup(conn, ev->status); 4495 } 4496 4497 hci_dev_unlock(hdev); 4498 } 4499 4500 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, 4501 struct sk_buff *skb) 4502 { 4503 struct hci_ev_pin_code_req *ev = data; 4504 struct hci_conn *conn; 4505 4506 bt_dev_dbg(hdev, ""); 4507 4508 hci_dev_lock(hdev); 4509 4510 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4511 if (!conn) 4512 goto unlock; 4513 4514 if (conn->state == BT_CONNECTED) { 4515 hci_conn_hold(conn); 4516 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4517 hci_conn_drop(conn); 4518 } 4519 4520 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4521 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4522 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4523 sizeof(ev->bdaddr), &ev->bdaddr); 4524 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4525 u8 secure; 4526 4527 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4528 secure = 1; 4529 else 4530 secure = 0; 4531 4532 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4533 } 4534 4535 unlock: 4536 hci_dev_unlock(hdev); 4537 } 4538 4539 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4540 { 4541 if (key_type == HCI_LK_CHANGED_COMBINATION) 4542 return; 4543 4544 conn->pin_length = pin_len; 4545 conn->key_type = key_type; 4546 4547 switch (key_type) { 4548 case HCI_LK_LOCAL_UNIT: 4549 case HCI_LK_REMOTE_UNIT: 4550 case HCI_LK_DEBUG_COMBINATION: 4551 return; 4552 case HCI_LK_COMBINATION: 4553 if (pin_len == 16) 4554 conn->pending_sec_level = BT_SECURITY_HIGH; 4555 else 4556 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4557 break; 4558 case HCI_LK_UNAUTH_COMBINATION_P192: 4559 case HCI_LK_UNAUTH_COMBINATION_P256: 4560 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4561 break; 4562 case HCI_LK_AUTH_COMBINATION_P192: 4563 conn->pending_sec_level = BT_SECURITY_HIGH; 4564 break; 4565 case HCI_LK_AUTH_COMBINATION_P256: 4566 conn->pending_sec_level = BT_SECURITY_FIPS; 4567 break; 4568 } 4569 } 4570 4571 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, 4572 struct sk_buff *skb) 4573 { 4574 struct hci_ev_link_key_req *ev = data; 4575 struct hci_cp_link_key_reply cp; 4576 struct hci_conn *conn; 4577 struct link_key *key; 4578 4579 bt_dev_dbg(hdev, ""); 4580 4581 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4582 return; 4583 4584 hci_dev_lock(hdev); 4585 4586 key = hci_find_link_key(hdev, &ev->bdaddr); 4587 if (!key) { 4588 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); 4589 goto not_found; 4590 } 4591 4592 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); 4593 4594 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4595 if (conn) { 4596 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4597 4598 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4599 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4600 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4601 bt_dev_dbg(hdev, "ignoring unauthenticated key"); 4602 goto not_found; 4603 } 4604 4605 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4606 (conn->pending_sec_level == BT_SECURITY_HIGH || 4607 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4608 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); 4609 goto not_found; 4610 } 4611 4612 conn_set_key(conn, key->type, key->pin_len); 4613 } 4614 4615 bacpy(&cp.bdaddr, &ev->bdaddr); 4616 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4617 4618 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4619 4620 hci_dev_unlock(hdev); 4621 4622 return; 4623 4624 not_found: 4625 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4626 hci_dev_unlock(hdev); 4627 } 4628 4629 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, 4630 struct sk_buff *skb) 4631 { 4632 struct hci_ev_link_key_notify *ev = data; 4633 struct hci_conn *conn; 4634 struct link_key *key; 4635 bool persistent; 4636 u8 pin_len = 0; 4637 4638 bt_dev_dbg(hdev, ""); 4639 4640 hci_dev_lock(hdev); 4641 4642 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4643 if (!conn) 4644 goto unlock; 4645 4646 /* Ignore NULL link key against CVE-2020-26555 */ 4647 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) { 4648 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR", 4649 &ev->bdaddr); 4650 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 4651 hci_conn_drop(conn); 4652 goto unlock; 4653 } 4654 4655 hci_conn_hold(conn); 4656 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4657 hci_conn_drop(conn); 4658 4659 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4660 conn_set_key(conn, ev->key_type, conn->pin_length); 4661 4662 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4663 goto unlock; 4664 4665 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4666 ev->key_type, pin_len, &persistent); 4667 if (!key) 4668 goto unlock; 4669 4670 /* Update connection information since adding the key will have 4671 * fixed up the type in the case of changed combination keys. 4672 */ 4673 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4674 conn_set_key(conn, key->type, key->pin_len); 4675 4676 mgmt_new_link_key(hdev, key, persistent); 4677 4678 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4679 * is set. If it's not set simply remove the key from the kernel 4680 * list (we've still notified user space about it but with 4681 * store_hint being 0). 4682 */ 4683 if (key->type == HCI_LK_DEBUG_COMBINATION && 4684 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4685 list_del_rcu(&key->list); 4686 kfree_rcu(key, rcu); 4687 goto unlock; 4688 } 4689 4690 if (persistent) 4691 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4692 else 4693 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4694 4695 unlock: 4696 hci_dev_unlock(hdev); 4697 } 4698 4699 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, 4700 struct sk_buff *skb) 4701 { 4702 struct hci_ev_clock_offset *ev = data; 4703 struct hci_conn *conn; 4704 4705 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4706 4707 hci_dev_lock(hdev); 4708 4709 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4710 if (conn && !ev->status) { 4711 struct inquiry_entry *ie; 4712 4713 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4714 if (ie) { 4715 ie->data.clock_offset = ev->clock_offset; 4716 ie->timestamp = jiffies; 4717 } 4718 } 4719 4720 hci_dev_unlock(hdev); 4721 } 4722 4723 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, 4724 struct sk_buff *skb) 4725 { 4726 struct hci_ev_pkt_type_change *ev = data; 4727 struct hci_conn *conn; 4728 4729 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4730 4731 hci_dev_lock(hdev); 4732 4733 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4734 if (conn && !ev->status) 4735 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4736 4737 hci_dev_unlock(hdev); 4738 } 4739 4740 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, 4741 struct sk_buff *skb) 4742 { 4743 struct hci_ev_pscan_rep_mode *ev = data; 4744 struct inquiry_entry *ie; 4745 4746 bt_dev_dbg(hdev, ""); 4747 4748 hci_dev_lock(hdev); 4749 4750 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4751 if (ie) { 4752 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4753 ie->timestamp = jiffies; 4754 } 4755 4756 hci_dev_unlock(hdev); 4757 } 4758 4759 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, 4760 struct sk_buff *skb) 4761 { 4762 struct hci_ev_inquiry_result_rssi *ev = edata; 4763 struct inquiry_data data; 4764 int i; 4765 4766 bt_dev_dbg(hdev, "num_rsp %d", ev->num); 4767 4768 if (!ev->num) 4769 return; 4770 4771 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4772 return; 4773 4774 hci_dev_lock(hdev); 4775 4776 if (skb->len == array_size(ev->num, 4777 sizeof(struct inquiry_info_rssi_pscan))) { 4778 struct inquiry_info_rssi_pscan *info; 4779 4780 for (i = 0; i < ev->num; i++) { 4781 u32 flags; 4782 4783 info = hci_ev_skb_pull(hdev, skb, 4784 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4785 sizeof(*info)); 4786 if (!info) { 4787 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4788 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4789 goto unlock; 4790 } 4791 4792 bacpy(&data.bdaddr, &info->bdaddr); 4793 data.pscan_rep_mode = info->pscan_rep_mode; 4794 data.pscan_period_mode = info->pscan_period_mode; 4795 data.pscan_mode = info->pscan_mode; 4796 memcpy(data.dev_class, info->dev_class, 3); 4797 data.clock_offset = info->clock_offset; 4798 data.rssi = info->rssi; 4799 data.ssp_mode = 0x00; 4800 4801 flags = hci_inquiry_cache_update(hdev, &data, false); 4802 4803 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4804 info->dev_class, info->rssi, 4805 flags, NULL, 0, NULL, 0, 0); 4806 } 4807 } else if (skb->len == array_size(ev->num, 4808 sizeof(struct inquiry_info_rssi))) { 4809 struct inquiry_info_rssi *info; 4810 4811 for (i = 0; i < ev->num; i++) { 4812 u32 flags; 4813 4814 info = hci_ev_skb_pull(hdev, skb, 4815 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4816 sizeof(*info)); 4817 if (!info) { 4818 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4819 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4820 goto unlock; 4821 } 4822 4823 bacpy(&data.bdaddr, &info->bdaddr); 4824 data.pscan_rep_mode = info->pscan_rep_mode; 4825 data.pscan_period_mode = info->pscan_period_mode; 4826 data.pscan_mode = 0x00; 4827 memcpy(data.dev_class, info->dev_class, 3); 4828 data.clock_offset = info->clock_offset; 4829 data.rssi = info->rssi; 4830 data.ssp_mode = 0x00; 4831 4832 flags = hci_inquiry_cache_update(hdev, &data, false); 4833 4834 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4835 info->dev_class, info->rssi, 4836 flags, NULL, 0, NULL, 0, 0); 4837 } 4838 } else { 4839 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4840 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4841 } 4842 unlock: 4843 hci_dev_unlock(hdev); 4844 } 4845 4846 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, 4847 struct sk_buff *skb) 4848 { 4849 struct hci_ev_remote_ext_features *ev = data; 4850 struct hci_conn *conn; 4851 4852 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4853 4854 hci_dev_lock(hdev); 4855 4856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4857 if (!conn) 4858 goto unlock; 4859 4860 if (ev->page < HCI_MAX_PAGES) 4861 memcpy(conn->features[ev->page], ev->features, 8); 4862 4863 if (!ev->status && ev->page == 0x01) { 4864 struct inquiry_entry *ie; 4865 4866 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4867 if (ie) 4868 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4869 4870 if (ev->features[0] & LMP_HOST_SSP) { 4871 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4872 } else { 4873 /* It is mandatory by the Bluetooth specification that 4874 * Extended Inquiry Results are only used when Secure 4875 * Simple Pairing is enabled, but some devices violate 4876 * this. 4877 * 4878 * To make these devices work, the internal SSP 4879 * enabled flag needs to be cleared if the remote host 4880 * features do not indicate SSP support */ 4881 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4882 } 4883 4884 if (ev->features[0] & LMP_HOST_SC) 4885 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4886 } 4887 4888 if (conn->state != BT_CONFIG) 4889 goto unlock; 4890 4891 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4892 struct hci_cp_remote_name_req cp; 4893 memset(&cp, 0, sizeof(cp)); 4894 bacpy(&cp.bdaddr, &conn->dst); 4895 cp.pscan_rep_mode = 0x02; 4896 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4897 } else { 4898 mgmt_device_connected(hdev, conn, NULL, 0); 4899 } 4900 4901 if (!hci_outgoing_auth_needed(hdev, conn)) { 4902 conn->state = BT_CONNECTED; 4903 hci_connect_cfm(conn, ev->status); 4904 hci_conn_drop(conn); 4905 } 4906 4907 unlock: 4908 hci_dev_unlock(hdev); 4909 } 4910 4911 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, 4912 struct sk_buff *skb) 4913 { 4914 struct hci_ev_sync_conn_complete *ev = data; 4915 struct hci_conn *conn; 4916 u8 status = ev->status; 4917 4918 switch (ev->link_type) { 4919 case SCO_LINK: 4920 case ESCO_LINK: 4921 break; 4922 default: 4923 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type 4924 * for HCI_Synchronous_Connection_Complete is limited to 4925 * either SCO or eSCO 4926 */ 4927 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); 4928 return; 4929 } 4930 4931 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4932 4933 hci_dev_lock(hdev); 4934 4935 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4936 if (!conn) { 4937 if (ev->link_type == ESCO_LINK) 4938 goto unlock; 4939 4940 /* When the link type in the event indicates SCO connection 4941 * and lookup of the connection object fails, then check 4942 * if an eSCO connection object exists. 4943 * 4944 * The core limits the synchronous connections to either 4945 * SCO or eSCO. The eSCO connection is preferred and tried 4946 * to be setup first and until successfully established, 4947 * the link type will be hinted as eSCO. 4948 */ 4949 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4950 if (!conn) 4951 goto unlock; 4952 } 4953 4954 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. 4955 * Processing it more than once per connection can corrupt kernel memory. 4956 * 4957 * As the connection handle is set here for the first time, it indicates 4958 * whether the connection is already set up. 4959 */ 4960 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { 4961 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); 4962 goto unlock; 4963 } 4964 4965 switch (status) { 4966 case 0x00: 4967 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); 4968 if (status) { 4969 conn->state = BT_CLOSED; 4970 break; 4971 } 4972 4973 conn->state = BT_CONNECTED; 4974 conn->type = ev->link_type; 4975 4976 hci_debugfs_create_conn(conn); 4977 hci_conn_add_sysfs(conn); 4978 break; 4979 4980 case 0x10: /* Connection Accept Timeout */ 4981 case 0x0d: /* Connection Rejected due to Limited Resources */ 4982 case 0x11: /* Unsupported Feature or Parameter Value */ 4983 case 0x1c: /* SCO interval rejected */ 4984 case 0x1a: /* Unsupported Remote Feature */ 4985 case 0x1e: /* Invalid LMP Parameters */ 4986 case 0x1f: /* Unspecified error */ 4987 case 0x20: /* Unsupported LMP Parameter value */ 4988 if (conn->out) { 4989 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 4990 (hdev->esco_type & EDR_ESCO_MASK); 4991 if (hci_setup_sync(conn, conn->parent->handle)) 4992 goto unlock; 4993 } 4994 fallthrough; 4995 4996 default: 4997 conn->state = BT_CLOSED; 4998 break; 4999 } 5000 5001 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 5002 /* Notify only in case of SCO over HCI transport data path which 5003 * is zero and non-zero value shall be non-HCI transport data path 5004 */ 5005 if (conn->codec.data_path == 0 && hdev->notify) { 5006 switch (ev->air_mode) { 5007 case 0x02: 5008 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 5009 break; 5010 case 0x03: 5011 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 5012 break; 5013 } 5014 } 5015 5016 hci_connect_cfm(conn, status); 5017 if (status) 5018 hci_conn_del(conn); 5019 5020 unlock: 5021 hci_dev_unlock(hdev); 5022 } 5023 5024 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 5025 { 5026 size_t parsed = 0; 5027 5028 while (parsed < eir_len) { 5029 u8 field_len = eir[0]; 5030 5031 if (field_len == 0) 5032 return parsed; 5033 5034 parsed += field_len + 1; 5035 eir += field_len + 1; 5036 } 5037 5038 return eir_len; 5039 } 5040 5041 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, 5042 struct sk_buff *skb) 5043 { 5044 struct hci_ev_ext_inquiry_result *ev = edata; 5045 struct inquiry_data data; 5046 size_t eir_len; 5047 int i; 5048 5049 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, 5050 flex_array_size(ev, info, ev->num))) 5051 return; 5052 5053 bt_dev_dbg(hdev, "num %d", ev->num); 5054 5055 if (!ev->num) 5056 return; 5057 5058 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 5059 return; 5060 5061 hci_dev_lock(hdev); 5062 5063 for (i = 0; i < ev->num; i++) { 5064 struct extended_inquiry_info *info = &ev->info[i]; 5065 u32 flags; 5066 bool name_known; 5067 5068 bacpy(&data.bdaddr, &info->bdaddr); 5069 data.pscan_rep_mode = info->pscan_rep_mode; 5070 data.pscan_period_mode = info->pscan_period_mode; 5071 data.pscan_mode = 0x00; 5072 memcpy(data.dev_class, info->dev_class, 3); 5073 data.clock_offset = info->clock_offset; 5074 data.rssi = info->rssi; 5075 data.ssp_mode = 0x01; 5076 5077 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5078 name_known = eir_get_data(info->data, 5079 sizeof(info->data), 5080 EIR_NAME_COMPLETE, NULL); 5081 else 5082 name_known = true; 5083 5084 flags = hci_inquiry_cache_update(hdev, &data, name_known); 5085 5086 eir_len = eir_get_length(info->data, sizeof(info->data)); 5087 5088 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 5089 info->dev_class, info->rssi, 5090 flags, info->data, eir_len, NULL, 0, 0); 5091 } 5092 5093 hci_dev_unlock(hdev); 5094 } 5095 5096 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, 5097 struct sk_buff *skb) 5098 { 5099 struct hci_ev_key_refresh_complete *ev = data; 5100 struct hci_conn *conn; 5101 5102 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, 5103 __le16_to_cpu(ev->handle)); 5104 5105 hci_dev_lock(hdev); 5106 5107 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5108 if (!conn) 5109 goto unlock; 5110 5111 /* For BR/EDR the necessary steps are taken through the 5112 * auth_complete event. 5113 */ 5114 if (conn->type != LE_LINK) 5115 goto unlock; 5116 5117 if (!ev->status) 5118 conn->sec_level = conn->pending_sec_level; 5119 5120 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 5121 5122 if (ev->status && conn->state == BT_CONNECTED) { 5123 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 5124 hci_conn_drop(conn); 5125 goto unlock; 5126 } 5127 5128 if (conn->state == BT_CONFIG) { 5129 if (!ev->status) 5130 conn->state = BT_CONNECTED; 5131 5132 hci_connect_cfm(conn, ev->status); 5133 hci_conn_drop(conn); 5134 } else { 5135 hci_auth_cfm(conn, ev->status); 5136 5137 hci_conn_hold(conn); 5138 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 5139 hci_conn_drop(conn); 5140 } 5141 5142 unlock: 5143 hci_dev_unlock(hdev); 5144 } 5145 5146 static u8 hci_get_auth_req(struct hci_conn *conn) 5147 { 5148 /* If remote requests no-bonding follow that lead */ 5149 if (conn->remote_auth == HCI_AT_NO_BONDING || 5150 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 5151 return conn->remote_auth | (conn->auth_type & 0x01); 5152 5153 /* If both remote and local have enough IO capabilities, require 5154 * MITM protection 5155 */ 5156 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 5157 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 5158 return conn->remote_auth | 0x01; 5159 5160 /* No MITM protection possible so ignore remote requirement */ 5161 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 5162 } 5163 5164 static u8 bredr_oob_data_present(struct hci_conn *conn) 5165 { 5166 struct hci_dev *hdev = conn->hdev; 5167 struct oob_data *data; 5168 5169 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 5170 if (!data) 5171 return 0x00; 5172 5173 if (bredr_sc_enabled(hdev)) { 5174 /* When Secure Connections is enabled, then just 5175 * return the present value stored with the OOB 5176 * data. The stored value contains the right present 5177 * information. However it can only be trusted when 5178 * not in Secure Connection Only mode. 5179 */ 5180 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 5181 return data->present; 5182 5183 /* When Secure Connections Only mode is enabled, then 5184 * the P-256 values are required. If they are not 5185 * available, then do not declare that OOB data is 5186 * present. 5187 */ 5188 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) || 5189 !crypto_memneq(data->hash256, ZERO_KEY, 16)) 5190 return 0x00; 5191 5192 return 0x02; 5193 } 5194 5195 /* When Secure Connections is not enabled or actually 5196 * not supported by the hardware, then check that if 5197 * P-192 data values are present. 5198 */ 5199 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) || 5200 !crypto_memneq(data->hash192, ZERO_KEY, 16)) 5201 return 0x00; 5202 5203 return 0x01; 5204 } 5205 5206 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, 5207 struct sk_buff *skb) 5208 { 5209 struct hci_ev_io_capa_request *ev = data; 5210 struct hci_conn *conn; 5211 5212 bt_dev_dbg(hdev, ""); 5213 5214 hci_dev_lock(hdev); 5215 5216 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5217 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 5218 goto unlock; 5219 5220 /* Assume remote supports SSP since it has triggered this event */ 5221 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 5222 5223 hci_conn_hold(conn); 5224 5225 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5226 goto unlock; 5227 5228 /* Allow pairing if we're pairable, the initiators of the 5229 * pairing or if the remote is not requesting bonding. 5230 */ 5231 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 5232 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 5233 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 5234 struct hci_cp_io_capability_reply cp; 5235 5236 bacpy(&cp.bdaddr, &ev->bdaddr); 5237 /* Change the IO capability from KeyboardDisplay 5238 * to DisplayYesNo as it is not supported by BT spec. */ 5239 cp.capability = (conn->io_capability == 0x04) ? 5240 HCI_IO_DISPLAY_YESNO : conn->io_capability; 5241 5242 /* If we are initiators, there is no remote information yet */ 5243 if (conn->remote_auth == 0xff) { 5244 /* Request MITM protection if our IO caps allow it 5245 * except for the no-bonding case. 5246 */ 5247 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5248 conn->auth_type != HCI_AT_NO_BONDING) 5249 conn->auth_type |= 0x01; 5250 } else { 5251 conn->auth_type = hci_get_auth_req(conn); 5252 } 5253 5254 /* If we're not bondable, force one of the non-bondable 5255 * authentication requirement values. 5256 */ 5257 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 5258 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 5259 5260 cp.authentication = conn->auth_type; 5261 cp.oob_data = bredr_oob_data_present(conn); 5262 5263 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 5264 sizeof(cp), &cp); 5265 } else { 5266 struct hci_cp_io_capability_neg_reply cp; 5267 5268 bacpy(&cp.bdaddr, &ev->bdaddr); 5269 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 5270 5271 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 5272 sizeof(cp), &cp); 5273 } 5274 5275 unlock: 5276 hci_dev_unlock(hdev); 5277 } 5278 5279 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, 5280 struct sk_buff *skb) 5281 { 5282 struct hci_ev_io_capa_reply *ev = data; 5283 struct hci_conn *conn; 5284 5285 bt_dev_dbg(hdev, ""); 5286 5287 hci_dev_lock(hdev); 5288 5289 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5290 if (!conn) 5291 goto unlock; 5292 5293 conn->remote_cap = ev->capability; 5294 conn->remote_auth = ev->authentication; 5295 5296 unlock: 5297 hci_dev_unlock(hdev); 5298 } 5299 5300 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, 5301 struct sk_buff *skb) 5302 { 5303 struct hci_ev_user_confirm_req *ev = data; 5304 int loc_mitm, rem_mitm, confirm_hint = 0; 5305 struct hci_conn *conn; 5306 5307 bt_dev_dbg(hdev, ""); 5308 5309 hci_dev_lock(hdev); 5310 5311 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5312 goto unlock; 5313 5314 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5315 if (!conn) 5316 goto unlock; 5317 5318 loc_mitm = (conn->auth_type & 0x01); 5319 rem_mitm = (conn->remote_auth & 0x01); 5320 5321 /* If we require MITM but the remote device can't provide that 5322 * (it has NoInputNoOutput) then reject the confirmation 5323 * request. We check the security level here since it doesn't 5324 * necessarily match conn->auth_type. 5325 */ 5326 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 5327 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 5328 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); 5329 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 5330 sizeof(ev->bdaddr), &ev->bdaddr); 5331 goto unlock; 5332 } 5333 5334 /* If no side requires MITM protection; use JUST_CFM method */ 5335 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 5336 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 5337 5338 /* If we're not the initiator of request authorization and the 5339 * local IO capability is not NoInputNoOutput, use JUST_WORKS 5340 * method (mgmt_user_confirm with confirm_hint set to 1). 5341 */ 5342 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 5343 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) { 5344 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); 5345 confirm_hint = 1; 5346 goto confirm; 5347 } 5348 5349 /* If there already exists link key in local host, leave the 5350 * decision to user space since the remote device could be 5351 * legitimate or malicious. 5352 */ 5353 if (hci_find_link_key(hdev, &ev->bdaddr)) { 5354 bt_dev_dbg(hdev, "Local host already has link key"); 5355 confirm_hint = 1; 5356 goto confirm; 5357 } 5358 5359 BT_DBG("Auto-accept of user confirmation with %ums delay", 5360 hdev->auto_accept_delay); 5361 5362 if (hdev->auto_accept_delay > 0) { 5363 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 5364 queue_delayed_work(conn->hdev->workqueue, 5365 &conn->auto_accept_work, delay); 5366 goto unlock; 5367 } 5368 5369 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 5370 sizeof(ev->bdaddr), &ev->bdaddr); 5371 goto unlock; 5372 } 5373 5374 confirm: 5375 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 5376 le32_to_cpu(ev->passkey), confirm_hint); 5377 5378 unlock: 5379 hci_dev_unlock(hdev); 5380 } 5381 5382 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, 5383 struct sk_buff *skb) 5384 { 5385 struct hci_ev_user_passkey_req *ev = data; 5386 5387 bt_dev_dbg(hdev, ""); 5388 5389 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5390 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 5391 } 5392 5393 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, 5394 struct sk_buff *skb) 5395 { 5396 struct hci_ev_user_passkey_notify *ev = data; 5397 struct hci_conn *conn; 5398 5399 bt_dev_dbg(hdev, ""); 5400 5401 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5402 if (!conn) 5403 return; 5404 5405 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5406 conn->passkey_entered = 0; 5407 5408 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5409 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5410 conn->dst_type, conn->passkey_notify, 5411 conn->passkey_entered); 5412 } 5413 5414 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, 5415 struct sk_buff *skb) 5416 { 5417 struct hci_ev_keypress_notify *ev = data; 5418 struct hci_conn *conn; 5419 5420 bt_dev_dbg(hdev, ""); 5421 5422 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5423 if (!conn) 5424 return; 5425 5426 switch (ev->type) { 5427 case HCI_KEYPRESS_STARTED: 5428 conn->passkey_entered = 0; 5429 return; 5430 5431 case HCI_KEYPRESS_ENTERED: 5432 conn->passkey_entered++; 5433 break; 5434 5435 case HCI_KEYPRESS_ERASED: 5436 conn->passkey_entered--; 5437 break; 5438 5439 case HCI_KEYPRESS_CLEARED: 5440 conn->passkey_entered = 0; 5441 break; 5442 5443 case HCI_KEYPRESS_COMPLETED: 5444 return; 5445 } 5446 5447 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5448 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5449 conn->dst_type, conn->passkey_notify, 5450 conn->passkey_entered); 5451 } 5452 5453 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, 5454 struct sk_buff *skb) 5455 { 5456 struct hci_ev_simple_pair_complete *ev = data; 5457 struct hci_conn *conn; 5458 5459 bt_dev_dbg(hdev, ""); 5460 5461 hci_dev_lock(hdev); 5462 5463 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5464 if (!conn || !hci_conn_ssp_enabled(conn)) 5465 goto unlock; 5466 5467 /* Reset the authentication requirement to unknown */ 5468 conn->remote_auth = 0xff; 5469 5470 /* To avoid duplicate auth_failed events to user space we check 5471 * the HCI_CONN_AUTH_PEND flag which will be set if we 5472 * initiated the authentication. A traditional auth_complete 5473 * event gets always produced as initiator and is also mapped to 5474 * the mgmt_auth_failed event */ 5475 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5476 mgmt_auth_failed(conn, ev->status); 5477 5478 hci_conn_drop(conn); 5479 5480 unlock: 5481 hci_dev_unlock(hdev); 5482 } 5483 5484 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, 5485 struct sk_buff *skb) 5486 { 5487 struct hci_ev_remote_host_features *ev = data; 5488 struct inquiry_entry *ie; 5489 struct hci_conn *conn; 5490 5491 bt_dev_dbg(hdev, ""); 5492 5493 hci_dev_lock(hdev); 5494 5495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5496 if (conn) 5497 memcpy(conn->features[1], ev->features, 8); 5498 5499 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5500 if (ie) 5501 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5502 5503 hci_dev_unlock(hdev); 5504 } 5505 5506 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, 5507 struct sk_buff *skb) 5508 { 5509 struct hci_ev_remote_oob_data_request *ev = edata; 5510 struct oob_data *data; 5511 5512 bt_dev_dbg(hdev, ""); 5513 5514 hci_dev_lock(hdev); 5515 5516 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5517 goto unlock; 5518 5519 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5520 if (!data) { 5521 struct hci_cp_remote_oob_data_neg_reply cp; 5522 5523 bacpy(&cp.bdaddr, &ev->bdaddr); 5524 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5525 sizeof(cp), &cp); 5526 goto unlock; 5527 } 5528 5529 if (bredr_sc_enabled(hdev)) { 5530 struct hci_cp_remote_oob_ext_data_reply cp; 5531 5532 bacpy(&cp.bdaddr, &ev->bdaddr); 5533 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5534 memset(cp.hash192, 0, sizeof(cp.hash192)); 5535 memset(cp.rand192, 0, sizeof(cp.rand192)); 5536 } else { 5537 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5538 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5539 } 5540 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5541 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5542 5543 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5544 sizeof(cp), &cp); 5545 } else { 5546 struct hci_cp_remote_oob_data_reply cp; 5547 5548 bacpy(&cp.bdaddr, &ev->bdaddr); 5549 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5550 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5551 5552 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5553 sizeof(cp), &cp); 5554 } 5555 5556 unlock: 5557 hci_dev_unlock(hdev); 5558 } 5559 5560 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5561 u8 bdaddr_type, bdaddr_t *local_rpa) 5562 { 5563 if (conn->out) { 5564 conn->dst_type = bdaddr_type; 5565 conn->resp_addr_type = bdaddr_type; 5566 bacpy(&conn->resp_addr, bdaddr); 5567 5568 /* Check if the controller has set a Local RPA then it must be 5569 * used instead or hdev->rpa. 5570 */ 5571 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5572 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5573 bacpy(&conn->init_addr, local_rpa); 5574 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5575 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5576 bacpy(&conn->init_addr, &conn->hdev->rpa); 5577 } else { 5578 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5579 &conn->init_addr_type); 5580 } 5581 } else { 5582 conn->resp_addr_type = conn->hdev->adv_addr_type; 5583 /* Check if the controller has set a Local RPA then it must be 5584 * used instead or hdev->rpa. 5585 */ 5586 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5587 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5588 bacpy(&conn->resp_addr, local_rpa); 5589 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5590 /* In case of ext adv, resp_addr will be updated in 5591 * Adv Terminated event. 5592 */ 5593 if (!ext_adv_capable(conn->hdev)) 5594 bacpy(&conn->resp_addr, 5595 &conn->hdev->random_addr); 5596 } else { 5597 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5598 } 5599 5600 conn->init_addr_type = bdaddr_type; 5601 bacpy(&conn->init_addr, bdaddr); 5602 5603 /* For incoming connections, set the default minimum 5604 * and maximum connection interval. They will be used 5605 * to check if the parameters are in range and if not 5606 * trigger the connection update procedure. 5607 */ 5608 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5609 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5610 } 5611 } 5612 5613 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5614 bdaddr_t *bdaddr, u8 bdaddr_type, 5615 bdaddr_t *local_rpa, u8 role, u16 handle, 5616 u16 interval, u16 latency, 5617 u16 supervision_timeout) 5618 { 5619 struct hci_conn_params *params; 5620 struct hci_conn *conn; 5621 struct smp_irk *irk; 5622 u8 addr_type; 5623 5624 hci_dev_lock(hdev); 5625 5626 /* All controllers implicitly stop advertising in the event of a 5627 * connection, so ensure that the state bit is cleared. 5628 */ 5629 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5630 5631 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); 5632 if (!conn) { 5633 /* In case of error status and there is no connection pending 5634 * just unlock as there is nothing to cleanup. 5635 */ 5636 if (status) 5637 goto unlock; 5638 5639 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role); 5640 if (IS_ERR(conn)) { 5641 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); 5642 goto unlock; 5643 } 5644 5645 conn->dst_type = bdaddr_type; 5646 5647 /* If we didn't have a hci_conn object previously 5648 * but we're in central role this must be something 5649 * initiated using an accept list. Since accept list based 5650 * connections are not "first class citizens" we don't 5651 * have full tracking of them. Therefore, we go ahead 5652 * with a "best effort" approach of determining the 5653 * initiator address based on the HCI_PRIVACY flag. 5654 */ 5655 if (conn->out) { 5656 conn->resp_addr_type = bdaddr_type; 5657 bacpy(&conn->resp_addr, bdaddr); 5658 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5659 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5660 bacpy(&conn->init_addr, &hdev->rpa); 5661 } else { 5662 hci_copy_identity_address(hdev, 5663 &conn->init_addr, 5664 &conn->init_addr_type); 5665 } 5666 } 5667 } else { 5668 cancel_delayed_work(&conn->le_conn_timeout); 5669 } 5670 5671 /* The HCI_LE_Connection_Complete event is only sent once per connection. 5672 * Processing it more than once per connection can corrupt kernel memory. 5673 * 5674 * As the connection handle is set here for the first time, it indicates 5675 * whether the connection is already set up. 5676 */ 5677 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { 5678 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 5679 goto unlock; 5680 } 5681 5682 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5683 5684 /* Lookup the identity address from the stored connection 5685 * address and address type. 5686 * 5687 * When establishing connections to an identity address, the 5688 * connection procedure will store the resolvable random 5689 * address first. Now if it can be converted back into the 5690 * identity address, start using the identity address from 5691 * now on. 5692 */ 5693 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5694 if (irk) { 5695 bacpy(&conn->dst, &irk->bdaddr); 5696 conn->dst_type = irk->addr_type; 5697 } 5698 5699 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5700 5701 /* All connection failure handling is taken care of by the 5702 * hci_conn_failed function which is triggered by the HCI 5703 * request completion callbacks used for connecting. 5704 */ 5705 if (status || hci_conn_set_handle(conn, handle)) 5706 goto unlock; 5707 5708 /* Drop the connection if it has been aborted */ 5709 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) { 5710 hci_conn_drop(conn); 5711 goto unlock; 5712 } 5713 5714 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5715 addr_type = BDADDR_LE_PUBLIC; 5716 else 5717 addr_type = BDADDR_LE_RANDOM; 5718 5719 /* Drop the connection if the device is blocked */ 5720 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5721 hci_conn_drop(conn); 5722 goto unlock; 5723 } 5724 5725 mgmt_device_connected(hdev, conn, NULL, 0); 5726 5727 conn->sec_level = BT_SECURITY_LOW; 5728 conn->state = BT_CONFIG; 5729 5730 /* Store current advertising instance as connection advertising instance 5731 * when sotfware rotation is in use so it can be re-enabled when 5732 * disconnected. 5733 */ 5734 if (!ext_adv_capable(hdev)) 5735 conn->adv_instance = hdev->cur_adv_instance; 5736 5737 conn->le_conn_interval = interval; 5738 conn->le_conn_latency = latency; 5739 conn->le_supv_timeout = supervision_timeout; 5740 5741 hci_debugfs_create_conn(conn); 5742 hci_conn_add_sysfs(conn); 5743 5744 /* The remote features procedure is defined for central 5745 * role only. So only in case of an initiated connection 5746 * request the remote features. 5747 * 5748 * If the local controller supports peripheral-initiated features 5749 * exchange, then requesting the remote features in peripheral 5750 * role is possible. Otherwise just transition into the 5751 * connected state without requesting the remote features. 5752 */ 5753 if (conn->out || 5754 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5755 struct hci_cp_le_read_remote_features cp; 5756 5757 cp.handle = __cpu_to_le16(conn->handle); 5758 5759 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5760 sizeof(cp), &cp); 5761 5762 hci_conn_hold(conn); 5763 } else { 5764 conn->state = BT_CONNECTED; 5765 hci_connect_cfm(conn, status); 5766 } 5767 5768 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5769 conn->dst_type); 5770 if (params) { 5771 hci_pend_le_list_del_init(params); 5772 if (params->conn) { 5773 hci_conn_drop(params->conn); 5774 hci_conn_put(params->conn); 5775 params->conn = NULL; 5776 } 5777 } 5778 5779 unlock: 5780 hci_update_passive_scan(hdev); 5781 hci_dev_unlock(hdev); 5782 } 5783 5784 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, 5785 struct sk_buff *skb) 5786 { 5787 struct hci_ev_le_conn_complete *ev = data; 5788 5789 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5790 5791 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5792 NULL, ev->role, le16_to_cpu(ev->handle), 5793 le16_to_cpu(ev->interval), 5794 le16_to_cpu(ev->latency), 5795 le16_to_cpu(ev->supervision_timeout)); 5796 } 5797 5798 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, 5799 struct sk_buff *skb) 5800 { 5801 struct hci_ev_le_enh_conn_complete *ev = data; 5802 5803 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5804 5805 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5806 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5807 le16_to_cpu(ev->interval), 5808 le16_to_cpu(ev->latency), 5809 le16_to_cpu(ev->supervision_timeout)); 5810 } 5811 5812 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, 5813 struct sk_buff *skb) 5814 { 5815 struct hci_evt_le_ext_adv_set_term *ev = data; 5816 struct hci_conn *conn; 5817 struct adv_info *adv, *n; 5818 5819 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5820 5821 /* The Bluetooth Core 5.3 specification clearly states that this event 5822 * shall not be sent when the Host disables the advertising set. So in 5823 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 5824 * 5825 * When the Host disables an advertising set, all cleanup is done via 5826 * its command callback and not needed to be duplicated here. 5827 */ 5828 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 5829 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 5830 return; 5831 } 5832 5833 hci_dev_lock(hdev); 5834 5835 adv = hci_find_adv_instance(hdev, ev->handle); 5836 5837 if (ev->status) { 5838 if (!adv) 5839 goto unlock; 5840 5841 /* Remove advertising as it has been terminated */ 5842 hci_remove_adv_instance(hdev, ev->handle); 5843 mgmt_advertising_removed(NULL, hdev, ev->handle); 5844 5845 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 5846 if (adv->enabled) 5847 goto unlock; 5848 } 5849 5850 /* We are no longer advertising, clear HCI_LE_ADV */ 5851 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5852 goto unlock; 5853 } 5854 5855 if (adv) 5856 adv->enabled = false; 5857 5858 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 5859 if (conn) { 5860 /* Store handle in the connection so the correct advertising 5861 * instance can be re-enabled when disconnected. 5862 */ 5863 conn->adv_instance = ev->handle; 5864 5865 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 5866 bacmp(&conn->resp_addr, BDADDR_ANY)) 5867 goto unlock; 5868 5869 if (!ev->handle) { 5870 bacpy(&conn->resp_addr, &hdev->random_addr); 5871 goto unlock; 5872 } 5873 5874 if (adv) 5875 bacpy(&conn->resp_addr, &adv->random_addr); 5876 } 5877 5878 unlock: 5879 hci_dev_unlock(hdev); 5880 } 5881 5882 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, 5883 struct sk_buff *skb) 5884 { 5885 struct hci_ev_le_conn_update_complete *ev = data; 5886 struct hci_conn *conn; 5887 5888 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5889 5890 if (ev->status) 5891 return; 5892 5893 hci_dev_lock(hdev); 5894 5895 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5896 if (conn) { 5897 conn->le_conn_interval = le16_to_cpu(ev->interval); 5898 conn->le_conn_latency = le16_to_cpu(ev->latency); 5899 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 5900 } 5901 5902 hci_dev_unlock(hdev); 5903 } 5904 5905 /* This function requires the caller holds hdev->lock */ 5906 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5907 bdaddr_t *addr, 5908 u8 addr_type, bool addr_resolved, 5909 u8 adv_type, u8 phy, u8 sec_phy) 5910 { 5911 struct hci_conn *conn; 5912 struct hci_conn_params *params; 5913 5914 /* If the event is not connectable don't proceed further */ 5915 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 5916 return NULL; 5917 5918 /* Ignore if the device is blocked or hdev is suspended */ 5919 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 5920 hdev->suspended) 5921 return NULL; 5922 5923 /* Most controller will fail if we try to create new connections 5924 * while we have an existing one in peripheral role. 5925 */ 5926 if (hdev->conn_hash.le_num_peripheral > 0 && 5927 (test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) || 5928 !(hdev->le_states[3] & 0x10))) 5929 return NULL; 5930 5931 /* If we're not connectable only connect devices that we have in 5932 * our pend_le_conns list. 5933 */ 5934 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 5935 addr_type); 5936 if (!params) 5937 return NULL; 5938 5939 if (!params->explicit_connect) { 5940 switch (params->auto_connect) { 5941 case HCI_AUTO_CONN_DIRECT: 5942 /* Only devices advertising with ADV_DIRECT_IND are 5943 * triggering a connection attempt. This is allowing 5944 * incoming connections from peripheral devices. 5945 */ 5946 if (adv_type != LE_ADV_DIRECT_IND) 5947 return NULL; 5948 break; 5949 case HCI_AUTO_CONN_ALWAYS: 5950 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 5951 * are triggering a connection attempt. This means 5952 * that incoming connections from peripheral device are 5953 * accepted and also outgoing connections to peripheral 5954 * devices are established when found. 5955 */ 5956 break; 5957 default: 5958 return NULL; 5959 } 5960 } 5961 5962 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 5963 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 5964 HCI_ROLE_MASTER, phy, sec_phy); 5965 if (!IS_ERR(conn)) { 5966 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5967 * by higher layer that tried to connect, if no then 5968 * store the pointer since we don't really have any 5969 * other owner of the object besides the params that 5970 * triggered it. This way we can abort the connection if 5971 * the parameters get removed and keep the reference 5972 * count consistent once the connection is established. 5973 */ 5974 5975 if (!params->explicit_connect) 5976 params->conn = hci_conn_get(conn); 5977 5978 return conn; 5979 } 5980 5981 switch (PTR_ERR(conn)) { 5982 case -EBUSY: 5983 /* If hci_connect() returns -EBUSY it means there is already 5984 * an LE connection attempt going on. Since controllers don't 5985 * support more than one connection attempt at the time, we 5986 * don't consider this an error case. 5987 */ 5988 break; 5989 default: 5990 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 5991 return NULL; 5992 } 5993 5994 return NULL; 5995 } 5996 5997 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 5998 u8 bdaddr_type, bdaddr_t *direct_addr, 5999 u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi, 6000 u8 *data, u8 len, bool ext_adv, bool ctl_time, 6001 u64 instant) 6002 { 6003 struct discovery_state *d = &hdev->discovery; 6004 struct smp_irk *irk; 6005 struct hci_conn *conn; 6006 bool match, bdaddr_resolved; 6007 u32 flags; 6008 u8 *ptr; 6009 6010 switch (type) { 6011 case LE_ADV_IND: 6012 case LE_ADV_DIRECT_IND: 6013 case LE_ADV_SCAN_IND: 6014 case LE_ADV_NONCONN_IND: 6015 case LE_ADV_SCAN_RSP: 6016 break; 6017 default: 6018 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 6019 "type: 0x%02x", type); 6020 return; 6021 } 6022 6023 if (len > max_adv_len(hdev)) { 6024 bt_dev_err_ratelimited(hdev, 6025 "adv larger than maximum supported"); 6026 return; 6027 } 6028 6029 /* Find the end of the data in case the report contains padded zero 6030 * bytes at the end causing an invalid length value. 6031 * 6032 * When data is NULL, len is 0 so there is no need for extra ptr 6033 * check as 'ptr < data + 0' is already false in such case. 6034 */ 6035 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 6036 if (ptr + 1 + *ptr > data + len) 6037 break; 6038 } 6039 6040 /* Adjust for actual length. This handles the case when remote 6041 * device is advertising with incorrect data length. 6042 */ 6043 len = ptr - data; 6044 6045 /* If the direct address is present, then this report is from 6046 * a LE Direct Advertising Report event. In that case it is 6047 * important to see if the address is matching the local 6048 * controller address. 6049 */ 6050 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) { 6051 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 6052 &bdaddr_resolved); 6053 6054 /* Only resolvable random addresses are valid for these 6055 * kind of reports and others can be ignored. 6056 */ 6057 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 6058 return; 6059 6060 /* If the controller is not using resolvable random 6061 * addresses, then this report can be ignored. 6062 */ 6063 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 6064 return; 6065 6066 /* If the local IRK of the controller does not match 6067 * with the resolvable random address provided, then 6068 * this report can be ignored. 6069 */ 6070 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 6071 return; 6072 } 6073 6074 /* Check if we need to convert to identity address */ 6075 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 6076 if (irk) { 6077 bdaddr = &irk->bdaddr; 6078 bdaddr_type = irk->addr_type; 6079 } 6080 6081 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 6082 6083 /* Check if we have been requested to connect to this device. 6084 * 6085 * direct_addr is set only for directed advertising reports (it is NULL 6086 * for advertising reports) and is already verified to be RPA above. 6087 */ 6088 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 6089 type, phy, sec_phy); 6090 if (!ext_adv && conn && type == LE_ADV_IND && 6091 len <= max_adv_len(hdev)) { 6092 /* Store report for later inclusion by 6093 * mgmt_device_connected 6094 */ 6095 memcpy(conn->le_adv_data, data, len); 6096 conn->le_adv_data_len = len; 6097 } 6098 6099 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 6100 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6101 else 6102 flags = 0; 6103 6104 /* All scan results should be sent up for Mesh systems */ 6105 if (hci_dev_test_flag(hdev, HCI_MESH)) { 6106 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6107 rssi, flags, data, len, NULL, 0, instant); 6108 return; 6109 } 6110 6111 /* Passive scanning shouldn't trigger any device found events, 6112 * except for devices marked as CONN_REPORT for which we do send 6113 * device found events, or advertisement monitoring requested. 6114 */ 6115 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 6116 if (type == LE_ADV_DIRECT_IND) 6117 return; 6118 6119 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 6120 bdaddr, bdaddr_type) && 6121 idr_is_empty(&hdev->adv_monitors_idr)) 6122 return; 6123 6124 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6125 rssi, flags, data, len, NULL, 0, 0); 6126 return; 6127 } 6128 6129 /* When receiving a scan response, then there is no way to 6130 * know if the remote device is connectable or not. However 6131 * since scan responses are merged with a previously seen 6132 * advertising report, the flags field from that report 6133 * will be used. 6134 * 6135 * In the unlikely case that a controller just sends a scan 6136 * response event that doesn't match the pending report, then 6137 * it is marked as a standalone SCAN_RSP. 6138 */ 6139 if (type == LE_ADV_SCAN_RSP) 6140 flags = MGMT_DEV_FOUND_SCAN_RSP; 6141 6142 /* If there's nothing pending either store the data from this 6143 * event or send an immediate device found event if the data 6144 * should not be stored for later. 6145 */ 6146 if (!ext_adv && !has_pending_adv_report(hdev)) { 6147 /* If the report will trigger a SCAN_REQ store it for 6148 * later merging. 6149 */ 6150 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 6151 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6152 rssi, flags, data, len); 6153 return; 6154 } 6155 6156 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6157 rssi, flags, data, len, NULL, 0, 0); 6158 return; 6159 } 6160 6161 /* Check if the pending report is for the same device as the new one */ 6162 match = (!bacmp(bdaddr, &d->last_adv_addr) && 6163 bdaddr_type == d->last_adv_addr_type); 6164 6165 /* If the pending data doesn't match this report or this isn't a 6166 * scan response (e.g. we got a duplicate ADV_IND) then force 6167 * sending of the pending data. 6168 */ 6169 if (type != LE_ADV_SCAN_RSP || !match) { 6170 /* Send out whatever is in the cache, but skip duplicates */ 6171 if (!match) 6172 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6173 d->last_adv_addr_type, NULL, 6174 d->last_adv_rssi, d->last_adv_flags, 6175 d->last_adv_data, 6176 d->last_adv_data_len, NULL, 0, 0); 6177 6178 /* If the new report will trigger a SCAN_REQ store it for 6179 * later merging. 6180 */ 6181 if (!ext_adv && (type == LE_ADV_IND || 6182 type == LE_ADV_SCAN_IND)) { 6183 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6184 rssi, flags, data, len); 6185 return; 6186 } 6187 6188 /* The advertising reports cannot be merged, so clear 6189 * the pending report and send out a device found event. 6190 */ 6191 clear_pending_adv_report(hdev); 6192 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6193 rssi, flags, data, len, NULL, 0, 0); 6194 return; 6195 } 6196 6197 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 6198 * the new event is a SCAN_RSP. We can therefore proceed with 6199 * sending a merged device found event. 6200 */ 6201 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6202 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 6203 d->last_adv_data, d->last_adv_data_len, data, len, 0); 6204 clear_pending_adv_report(hdev); 6205 } 6206 6207 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, 6208 struct sk_buff *skb) 6209 { 6210 struct hci_ev_le_advertising_report *ev = data; 6211 u64 instant = jiffies; 6212 6213 if (!ev->num) 6214 return; 6215 6216 hci_dev_lock(hdev); 6217 6218 while (ev->num--) { 6219 struct hci_ev_le_advertising_info *info; 6220 s8 rssi; 6221 6222 info = hci_le_ev_skb_pull(hdev, skb, 6223 HCI_EV_LE_ADVERTISING_REPORT, 6224 sizeof(*info)); 6225 if (!info) 6226 break; 6227 6228 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, 6229 info->length + 1)) 6230 break; 6231 6232 if (info->length <= max_adv_len(hdev)) { 6233 rssi = info->data[info->length]; 6234 process_adv_report(hdev, info->type, &info->bdaddr, 6235 info->bdaddr_type, NULL, 0, 6236 HCI_ADV_PHY_1M, 0, rssi, 6237 info->data, info->length, false, 6238 false, instant); 6239 } else { 6240 bt_dev_err(hdev, "Dropping invalid advertising data"); 6241 } 6242 } 6243 6244 hci_dev_unlock(hdev); 6245 } 6246 6247 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 6248 { 6249 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 6250 switch (evt_type) { 6251 case LE_LEGACY_ADV_IND: 6252 return LE_ADV_IND; 6253 case LE_LEGACY_ADV_DIRECT_IND: 6254 return LE_ADV_DIRECT_IND; 6255 case LE_LEGACY_ADV_SCAN_IND: 6256 return LE_ADV_SCAN_IND; 6257 case LE_LEGACY_NONCONN_IND: 6258 return LE_ADV_NONCONN_IND; 6259 case LE_LEGACY_SCAN_RSP_ADV: 6260 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 6261 return LE_ADV_SCAN_RSP; 6262 } 6263 6264 goto invalid; 6265 } 6266 6267 if (evt_type & LE_EXT_ADV_CONN_IND) { 6268 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6269 return LE_ADV_DIRECT_IND; 6270 6271 return LE_ADV_IND; 6272 } 6273 6274 if (evt_type & LE_EXT_ADV_SCAN_RSP) 6275 return LE_ADV_SCAN_RSP; 6276 6277 if (evt_type & LE_EXT_ADV_SCAN_IND) 6278 return LE_ADV_SCAN_IND; 6279 6280 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 6281 evt_type & LE_EXT_ADV_DIRECT_IND) 6282 return LE_ADV_NONCONN_IND; 6283 6284 invalid: 6285 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 6286 evt_type); 6287 6288 return LE_ADV_INVALID; 6289 } 6290 6291 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, 6292 struct sk_buff *skb) 6293 { 6294 struct hci_ev_le_ext_adv_report *ev = data; 6295 u64 instant = jiffies; 6296 6297 if (!ev->num) 6298 return; 6299 6300 hci_dev_lock(hdev); 6301 6302 while (ev->num--) { 6303 struct hci_ev_le_ext_adv_info *info; 6304 u8 legacy_evt_type; 6305 u16 evt_type; 6306 6307 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6308 sizeof(*info)); 6309 if (!info) 6310 break; 6311 6312 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6313 info->length)) 6314 break; 6315 6316 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK; 6317 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6318 6319 if (test_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, 6320 &hdev->quirks)) { 6321 info->primary_phy &= 0x1f; 6322 info->secondary_phy &= 0x1f; 6323 } 6324 6325 if (legacy_evt_type != LE_ADV_INVALID) { 6326 process_adv_report(hdev, legacy_evt_type, &info->bdaddr, 6327 info->bdaddr_type, NULL, 0, 6328 info->primary_phy, 6329 info->secondary_phy, 6330 info->rssi, info->data, info->length, 6331 !(evt_type & LE_EXT_ADV_LEGACY_PDU), 6332 false, instant); 6333 } 6334 } 6335 6336 hci_dev_unlock(hdev); 6337 } 6338 6339 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) 6340 { 6341 struct hci_cp_le_pa_term_sync cp; 6342 6343 memset(&cp, 0, sizeof(cp)); 6344 cp.handle = handle; 6345 6346 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); 6347 } 6348 6349 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, 6350 struct sk_buff *skb) 6351 { 6352 struct hci_ev_le_pa_sync_established *ev = data; 6353 int mask = hdev->link_mode; 6354 __u8 flags = 0; 6355 struct hci_conn *pa_sync, *conn; 6356 6357 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6358 6359 hci_dev_lock(hdev); 6360 6361 hci_dev_clear_flag(hdev, HCI_PA_SYNC); 6362 6363 conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr, 6364 ev->bdaddr_type); 6365 if (!conn) { 6366 bt_dev_err(hdev, 6367 "Unable to find connection for dst %pMR sid 0x%2.2x", 6368 &ev->bdaddr, ev->sid); 6369 goto unlock; 6370 } 6371 6372 clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); 6373 6374 conn->sync_handle = le16_to_cpu(ev->handle); 6375 conn->sid = HCI_SID_INVALID; 6376 6377 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); 6378 if (!(mask & HCI_LM_ACCEPT)) { 6379 hci_le_pa_term_sync(hdev, ev->handle); 6380 goto unlock; 6381 } 6382 6383 if (!(flags & HCI_PROTO_DEFER)) 6384 goto unlock; 6385 6386 /* Add connection to indicate PA sync event */ 6387 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY, 6388 HCI_ROLE_SLAVE); 6389 6390 if (IS_ERR(pa_sync)) 6391 goto unlock; 6392 6393 pa_sync->sync_handle = le16_to_cpu(ev->handle); 6394 6395 if (ev->status) { 6396 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); 6397 6398 /* Notify iso layer */ 6399 hci_connect_cfm(pa_sync, ev->status); 6400 } 6401 6402 unlock: 6403 /* Handle any other pending PA sync command */ 6404 hci_pa_create_sync_pending(hdev); 6405 6406 hci_dev_unlock(hdev); 6407 } 6408 6409 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data, 6410 struct sk_buff *skb) 6411 { 6412 struct hci_ev_le_per_adv_report *ev = data; 6413 int mask = hdev->link_mode; 6414 __u8 flags = 0; 6415 struct hci_conn *pa_sync; 6416 6417 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); 6418 6419 hci_dev_lock(hdev); 6420 6421 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); 6422 if (!(mask & HCI_LM_ACCEPT)) 6423 goto unlock; 6424 6425 if (!(flags & HCI_PROTO_DEFER)) 6426 goto unlock; 6427 6428 pa_sync = hci_conn_hash_lookup_pa_sync_handle 6429 (hdev, 6430 le16_to_cpu(ev->sync_handle)); 6431 6432 if (!pa_sync) 6433 goto unlock; 6434 6435 if (ev->data_status == LE_PA_DATA_COMPLETE && 6436 !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) { 6437 /* Notify iso layer */ 6438 hci_connect_cfm(pa_sync, 0); 6439 6440 /* Notify MGMT layer */ 6441 mgmt_device_connected(hdev, pa_sync, NULL, 0); 6442 } 6443 6444 unlock: 6445 hci_dev_unlock(hdev); 6446 } 6447 6448 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, 6449 struct sk_buff *skb) 6450 { 6451 struct hci_ev_le_remote_feat_complete *ev = data; 6452 struct hci_conn *conn; 6453 6454 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6455 6456 hci_dev_lock(hdev); 6457 6458 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6459 if (conn) { 6460 if (!ev->status) 6461 memcpy(conn->features[0], ev->features, 8); 6462 6463 if (conn->state == BT_CONFIG) { 6464 __u8 status; 6465 6466 /* If the local controller supports peripheral-initiated 6467 * features exchange, but the remote controller does 6468 * not, then it is possible that the error code 0x1a 6469 * for unsupported remote feature gets returned. 6470 * 6471 * In this specific case, allow the connection to 6472 * transition into connected state and mark it as 6473 * successful. 6474 */ 6475 if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE && 6476 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6477 status = 0x00; 6478 else 6479 status = ev->status; 6480 6481 conn->state = BT_CONNECTED; 6482 hci_connect_cfm(conn, status); 6483 hci_conn_drop(conn); 6484 } 6485 } 6486 6487 hci_dev_unlock(hdev); 6488 } 6489 6490 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, 6491 struct sk_buff *skb) 6492 { 6493 struct hci_ev_le_ltk_req *ev = data; 6494 struct hci_cp_le_ltk_reply cp; 6495 struct hci_cp_le_ltk_neg_reply neg; 6496 struct hci_conn *conn; 6497 struct smp_ltk *ltk; 6498 6499 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6500 6501 hci_dev_lock(hdev); 6502 6503 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6504 if (conn == NULL) 6505 goto not_found; 6506 6507 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6508 if (!ltk) 6509 goto not_found; 6510 6511 if (smp_ltk_is_sc(ltk)) { 6512 /* With SC both EDiv and Rand are set to zero */ 6513 if (ev->ediv || ev->rand) 6514 goto not_found; 6515 } else { 6516 /* For non-SC keys check that EDiv and Rand match */ 6517 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6518 goto not_found; 6519 } 6520 6521 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6522 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6523 cp.handle = cpu_to_le16(conn->handle); 6524 6525 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6526 6527 conn->enc_key_size = ltk->enc_size; 6528 6529 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6530 6531 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6532 * temporary key used to encrypt a connection following 6533 * pairing. It is used during the Encrypted Session Setup to 6534 * distribute the keys. Later, security can be re-established 6535 * using a distributed LTK. 6536 */ 6537 if (ltk->type == SMP_STK) { 6538 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6539 list_del_rcu(<k->list); 6540 kfree_rcu(ltk, rcu); 6541 } else { 6542 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6543 } 6544 6545 hci_dev_unlock(hdev); 6546 6547 return; 6548 6549 not_found: 6550 neg.handle = ev->handle; 6551 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6552 hci_dev_unlock(hdev); 6553 } 6554 6555 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6556 u8 reason) 6557 { 6558 struct hci_cp_le_conn_param_req_neg_reply cp; 6559 6560 cp.handle = cpu_to_le16(handle); 6561 cp.reason = reason; 6562 6563 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6564 &cp); 6565 } 6566 6567 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, 6568 struct sk_buff *skb) 6569 { 6570 struct hci_ev_le_remote_conn_param_req *ev = data; 6571 struct hci_cp_le_conn_param_req_reply cp; 6572 struct hci_conn *hcon; 6573 u16 handle, min, max, latency, timeout; 6574 6575 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6576 6577 handle = le16_to_cpu(ev->handle); 6578 min = le16_to_cpu(ev->interval_min); 6579 max = le16_to_cpu(ev->interval_max); 6580 latency = le16_to_cpu(ev->latency); 6581 timeout = le16_to_cpu(ev->timeout); 6582 6583 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6584 if (!hcon || hcon->state != BT_CONNECTED) 6585 return send_conn_param_neg_reply(hdev, handle, 6586 HCI_ERROR_UNKNOWN_CONN_ID); 6587 6588 if (max > hcon->le_conn_max_interval) 6589 return send_conn_param_neg_reply(hdev, handle, 6590 HCI_ERROR_INVALID_LL_PARAMS); 6591 6592 if (hci_check_conn_params(min, max, latency, timeout)) 6593 return send_conn_param_neg_reply(hdev, handle, 6594 HCI_ERROR_INVALID_LL_PARAMS); 6595 6596 if (hcon->role == HCI_ROLE_MASTER) { 6597 struct hci_conn_params *params; 6598 u8 store_hint; 6599 6600 hci_dev_lock(hdev); 6601 6602 params = hci_conn_params_lookup(hdev, &hcon->dst, 6603 hcon->dst_type); 6604 if (params) { 6605 params->conn_min_interval = min; 6606 params->conn_max_interval = max; 6607 params->conn_latency = latency; 6608 params->supervision_timeout = timeout; 6609 store_hint = 0x01; 6610 } else { 6611 store_hint = 0x00; 6612 } 6613 6614 hci_dev_unlock(hdev); 6615 6616 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6617 store_hint, min, max, latency, timeout); 6618 } 6619 6620 cp.handle = ev->handle; 6621 cp.interval_min = ev->interval_min; 6622 cp.interval_max = ev->interval_max; 6623 cp.latency = ev->latency; 6624 cp.timeout = ev->timeout; 6625 cp.min_ce_len = 0; 6626 cp.max_ce_len = 0; 6627 6628 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6629 } 6630 6631 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, 6632 struct sk_buff *skb) 6633 { 6634 struct hci_ev_le_direct_adv_report *ev = data; 6635 u64 instant = jiffies; 6636 int i; 6637 6638 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, 6639 flex_array_size(ev, info, ev->num))) 6640 return; 6641 6642 if (!ev->num) 6643 return; 6644 6645 hci_dev_lock(hdev); 6646 6647 for (i = 0; i < ev->num; i++) { 6648 struct hci_ev_le_direct_adv_info *info = &ev->info[i]; 6649 6650 process_adv_report(hdev, info->type, &info->bdaddr, 6651 info->bdaddr_type, &info->direct_addr, 6652 info->direct_addr_type, HCI_ADV_PHY_1M, 0, 6653 info->rssi, NULL, 0, false, false, instant); 6654 } 6655 6656 hci_dev_unlock(hdev); 6657 } 6658 6659 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, 6660 struct sk_buff *skb) 6661 { 6662 struct hci_ev_le_phy_update_complete *ev = data; 6663 struct hci_conn *conn; 6664 6665 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6666 6667 if (ev->status) 6668 return; 6669 6670 hci_dev_lock(hdev); 6671 6672 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6673 if (!conn) 6674 goto unlock; 6675 6676 conn->le_tx_phy = ev->tx_phy; 6677 conn->le_rx_phy = ev->rx_phy; 6678 6679 unlock: 6680 hci_dev_unlock(hdev); 6681 } 6682 6683 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, 6684 struct sk_buff *skb) 6685 { 6686 struct hci_evt_le_cis_established *ev = data; 6687 struct hci_conn *conn; 6688 struct bt_iso_qos *qos; 6689 bool pending = false; 6690 u16 handle = __le16_to_cpu(ev->handle); 6691 u32 c_sdu_interval, p_sdu_interval; 6692 6693 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6694 6695 hci_dev_lock(hdev); 6696 6697 conn = hci_conn_hash_lookup_handle(hdev, handle); 6698 if (!conn) { 6699 bt_dev_err(hdev, 6700 "Unable to find connection with handle 0x%4.4x", 6701 handle); 6702 goto unlock; 6703 } 6704 6705 if (conn->type != ISO_LINK) { 6706 bt_dev_err(hdev, 6707 "Invalid connection link type handle 0x%4.4x", 6708 handle); 6709 goto unlock; 6710 } 6711 6712 qos = &conn->iso_qos; 6713 6714 pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags); 6715 6716 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G 6717 * page 3075: 6718 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) × 6719 * ISO_Interval + SDU_Interval_C_To_P 6720 * ... 6721 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) - 6722 * Transport_Latency 6723 */ 6724 c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) + 6725 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) - 6726 get_unaligned_le24(ev->c_latency); 6727 p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) + 6728 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) - 6729 get_unaligned_le24(ev->p_latency); 6730 6731 switch (conn->role) { 6732 case HCI_ROLE_SLAVE: 6733 qos->ucast.in.interval = c_sdu_interval; 6734 qos->ucast.out.interval = p_sdu_interval; 6735 /* Convert Transport Latency (us) to Latency (msec) */ 6736 qos->ucast.in.latency = 6737 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), 6738 1000); 6739 qos->ucast.out.latency = 6740 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 6741 1000); 6742 qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu); 6743 qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu); 6744 qos->ucast.in.phy = ev->c_phy; 6745 qos->ucast.out.phy = ev->p_phy; 6746 break; 6747 case HCI_ROLE_MASTER: 6748 qos->ucast.in.interval = p_sdu_interval; 6749 qos->ucast.out.interval = c_sdu_interval; 6750 /* Convert Transport Latency (us) to Latency (msec) */ 6751 qos->ucast.out.latency = 6752 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), 6753 1000); 6754 qos->ucast.in.latency = 6755 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 6756 1000); 6757 qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu); 6758 qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu); 6759 qos->ucast.out.phy = ev->c_phy; 6760 qos->ucast.in.phy = ev->p_phy; 6761 break; 6762 } 6763 6764 if (!ev->status) { 6765 conn->state = BT_CONNECTED; 6766 hci_debugfs_create_conn(conn); 6767 hci_conn_add_sysfs(conn); 6768 hci_iso_setup_path(conn); 6769 goto unlock; 6770 } 6771 6772 conn->state = BT_CLOSED; 6773 hci_connect_cfm(conn, ev->status); 6774 hci_conn_del(conn); 6775 6776 unlock: 6777 if (pending) 6778 hci_le_create_cis_pending(hdev); 6779 6780 hci_dev_unlock(hdev); 6781 } 6782 6783 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) 6784 { 6785 struct hci_cp_le_reject_cis cp; 6786 6787 memset(&cp, 0, sizeof(cp)); 6788 cp.handle = handle; 6789 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 6790 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); 6791 } 6792 6793 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) 6794 { 6795 struct hci_cp_le_accept_cis cp; 6796 6797 memset(&cp, 0, sizeof(cp)); 6798 cp.handle = handle; 6799 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); 6800 } 6801 6802 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, 6803 struct sk_buff *skb) 6804 { 6805 struct hci_evt_le_cis_req *ev = data; 6806 u16 acl_handle, cis_handle; 6807 struct hci_conn *acl, *cis; 6808 int mask; 6809 __u8 flags = 0; 6810 6811 acl_handle = __le16_to_cpu(ev->acl_handle); 6812 cis_handle = __le16_to_cpu(ev->cis_handle); 6813 6814 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", 6815 acl_handle, cis_handle, ev->cig_id, ev->cis_id); 6816 6817 hci_dev_lock(hdev); 6818 6819 acl = hci_conn_hash_lookup_handle(hdev, acl_handle); 6820 if (!acl) 6821 goto unlock; 6822 6823 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags); 6824 if (!(mask & HCI_LM_ACCEPT)) { 6825 hci_le_reject_cis(hdev, ev->cis_handle); 6826 goto unlock; 6827 } 6828 6829 cis = hci_conn_hash_lookup_handle(hdev, cis_handle); 6830 if (!cis) { 6831 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE, 6832 cis_handle); 6833 if (IS_ERR(cis)) { 6834 hci_le_reject_cis(hdev, ev->cis_handle); 6835 goto unlock; 6836 } 6837 } 6838 6839 cis->iso_qos.ucast.cig = ev->cig_id; 6840 cis->iso_qos.ucast.cis = ev->cis_id; 6841 6842 if (!(flags & HCI_PROTO_DEFER)) { 6843 hci_le_accept_cis(hdev, ev->cis_handle); 6844 } else { 6845 cis->state = BT_CONNECT2; 6846 hci_connect_cfm(cis, 0); 6847 } 6848 6849 unlock: 6850 hci_dev_unlock(hdev); 6851 } 6852 6853 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data) 6854 { 6855 u8 handle = PTR_UINT(data); 6856 6857 return hci_le_terminate_big_sync(hdev, handle, 6858 HCI_ERROR_LOCAL_HOST_TERM); 6859 } 6860 6861 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, 6862 struct sk_buff *skb) 6863 { 6864 struct hci_evt_le_create_big_complete *ev = data; 6865 struct hci_conn *conn; 6866 __u8 i = 0; 6867 6868 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6869 6870 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, 6871 flex_array_size(ev, bis_handle, ev->num_bis))) 6872 return; 6873 6874 hci_dev_lock(hdev); 6875 rcu_read_lock(); 6876 6877 /* Connect all BISes that are bound to the BIG */ 6878 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 6879 if (bacmp(&conn->dst, BDADDR_ANY) || 6880 conn->type != ISO_LINK || 6881 conn->iso_qos.bcast.big != ev->handle) 6882 continue; 6883 6884 if (hci_conn_set_handle(conn, 6885 __le16_to_cpu(ev->bis_handle[i++]))) 6886 continue; 6887 6888 if (!ev->status) { 6889 conn->state = BT_CONNECTED; 6890 set_bit(HCI_CONN_BIG_CREATED, &conn->flags); 6891 rcu_read_unlock(); 6892 hci_debugfs_create_conn(conn); 6893 hci_conn_add_sysfs(conn); 6894 hci_iso_setup_path(conn); 6895 rcu_read_lock(); 6896 continue; 6897 } 6898 6899 hci_connect_cfm(conn, ev->status); 6900 rcu_read_unlock(); 6901 hci_conn_del(conn); 6902 rcu_read_lock(); 6903 } 6904 6905 rcu_read_unlock(); 6906 6907 if (!ev->status && !i) 6908 /* If no BISes have been connected for the BIG, 6909 * terminate. This is in case all bound connections 6910 * have been closed before the BIG creation 6911 * has completed. 6912 */ 6913 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync, 6914 UINT_PTR(ev->handle), NULL); 6915 6916 hci_dev_unlock(hdev); 6917 } 6918 6919 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, 6920 struct sk_buff *skb) 6921 { 6922 struct hci_evt_le_big_sync_estabilished *ev = data; 6923 struct hci_conn *bis, *conn; 6924 int i; 6925 6926 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6927 6928 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 6929 flex_array_size(ev, bis, ev->num_bis))) 6930 return; 6931 6932 hci_dev_lock(hdev); 6933 6934 conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle, 6935 ev->num_bis); 6936 if (!conn) { 6937 bt_dev_err(hdev, 6938 "Unable to find connection for big 0x%2.2x", 6939 ev->handle); 6940 goto unlock; 6941 } 6942 6943 clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); 6944 6945 conn->num_bis = 0; 6946 memset(conn->bis, 0, sizeof(conn->num_bis)); 6947 6948 for (i = 0; i < ev->num_bis; i++) { 6949 u16 handle = le16_to_cpu(ev->bis[i]); 6950 __le32 interval; 6951 6952 bis = hci_conn_hash_lookup_handle(hdev, handle); 6953 if (!bis) { 6954 if (handle > HCI_CONN_HANDLE_MAX) { 6955 bt_dev_dbg(hdev, "ignore too large handle %u", handle); 6956 continue; 6957 } 6958 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, 6959 HCI_ROLE_SLAVE, handle); 6960 if (IS_ERR(bis)) 6961 continue; 6962 } 6963 6964 if (ev->status != 0x42) 6965 /* Mark PA sync as established */ 6966 set_bit(HCI_CONN_PA_SYNC, &bis->flags); 6967 6968 bis->sync_handle = conn->sync_handle; 6969 bis->iso_qos.bcast.big = ev->handle; 6970 memset(&interval, 0, sizeof(interval)); 6971 memcpy(&interval, ev->latency, sizeof(ev->latency)); 6972 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval); 6973 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ 6974 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100; 6975 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); 6976 6977 if (!ev->status) { 6978 set_bit(HCI_CONN_BIG_SYNC, &bis->flags); 6979 hci_iso_setup_path(bis); 6980 } 6981 } 6982 6983 /* In case BIG sync failed, notify each failed connection to 6984 * the user after all hci connections have been added 6985 */ 6986 if (ev->status) 6987 for (i = 0; i < ev->num_bis; i++) { 6988 u16 handle = le16_to_cpu(ev->bis[i]); 6989 6990 bis = hci_conn_hash_lookup_handle(hdev, handle); 6991 if (!bis) 6992 continue; 6993 6994 set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags); 6995 hci_connect_cfm(bis, ev->status); 6996 } 6997 6998 unlock: 6999 /* Handle any other pending BIG sync command */ 7000 hci_le_big_create_sync_pending(hdev); 7001 7002 hci_dev_unlock(hdev); 7003 } 7004 7005 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, 7006 struct sk_buff *skb) 7007 { 7008 struct hci_evt_le_big_info_adv_report *ev = data; 7009 int mask = hdev->link_mode; 7010 __u8 flags = 0; 7011 struct hci_conn *pa_sync; 7012 7013 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); 7014 7015 hci_dev_lock(hdev); 7016 7017 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); 7018 if (!(mask & HCI_LM_ACCEPT)) 7019 goto unlock; 7020 7021 if (!(flags & HCI_PROTO_DEFER)) 7022 goto unlock; 7023 7024 pa_sync = hci_conn_hash_lookup_pa_sync_handle 7025 (hdev, 7026 le16_to_cpu(ev->sync_handle)); 7027 7028 if (!pa_sync) 7029 goto unlock; 7030 7031 pa_sync->iso_qos.bcast.encryption = ev->encryption; 7032 7033 /* Notify iso layer */ 7034 hci_connect_cfm(pa_sync, 0); 7035 7036 unlock: 7037 hci_dev_unlock(hdev); 7038 } 7039 7040 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ 7041 [_op] = { \ 7042 .func = _func, \ 7043 .min_len = _min_len, \ 7044 .max_len = _max_len, \ 7045 } 7046 7047 #define HCI_LE_EV(_op, _func, _len) \ 7048 HCI_LE_EV_VL(_op, _func, _len, _len) 7049 7050 #define HCI_LE_EV_STATUS(_op, _func) \ 7051 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) 7052 7053 /* Entries in this table shall have their position according to the subevent 7054 * opcode they handle so the use of the macros above is recommend since it does 7055 * attempt to initialize at its proper index using Designated Initializers that 7056 * way events without a callback function can be ommited. 7057 */ 7058 static const struct hci_le_ev { 7059 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 7060 u16 min_len; 7061 u16 max_len; 7062 } hci_le_ev_table[U8_MAX + 1] = { 7063 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ 7064 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, 7065 sizeof(struct hci_ev_le_conn_complete)), 7066 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ 7067 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, 7068 sizeof(struct hci_ev_le_advertising_report), 7069 HCI_MAX_EVENT_SIZE), 7070 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ 7071 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, 7072 hci_le_conn_update_complete_evt, 7073 sizeof(struct hci_ev_le_conn_update_complete)), 7074 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ 7075 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, 7076 hci_le_remote_feat_complete_evt, 7077 sizeof(struct hci_ev_le_remote_feat_complete)), 7078 /* [0x05 = HCI_EV_LE_LTK_REQ] */ 7079 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, 7080 sizeof(struct hci_ev_le_ltk_req)), 7081 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ 7082 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, 7083 hci_le_remote_conn_param_req_evt, 7084 sizeof(struct hci_ev_le_remote_conn_param_req)), 7085 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ 7086 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, 7087 hci_le_enh_conn_complete_evt, 7088 sizeof(struct hci_ev_le_enh_conn_complete)), 7089 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ 7090 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, 7091 sizeof(struct hci_ev_le_direct_adv_report), 7092 HCI_MAX_EVENT_SIZE), 7093 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ 7094 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, 7095 sizeof(struct hci_ev_le_phy_update_complete)), 7096 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ 7097 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, 7098 sizeof(struct hci_ev_le_ext_adv_report), 7099 HCI_MAX_EVENT_SIZE), 7100 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ 7101 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, 7102 hci_le_pa_sync_estabilished_evt, 7103 sizeof(struct hci_ev_le_pa_sync_established)), 7104 /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */ 7105 HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT, 7106 hci_le_per_adv_report_evt, 7107 sizeof(struct hci_ev_le_per_adv_report), 7108 HCI_MAX_EVENT_SIZE), 7109 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ 7110 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, 7111 sizeof(struct hci_evt_le_ext_adv_set_term)), 7112 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ 7113 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt, 7114 sizeof(struct hci_evt_le_cis_established)), 7115 /* [0x1a = HCI_EVT_LE_CIS_REQ] */ 7116 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, 7117 sizeof(struct hci_evt_le_cis_req)), 7118 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ 7119 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, 7120 hci_le_create_big_complete_evt, 7121 sizeof(struct hci_evt_le_create_big_complete), 7122 HCI_MAX_EVENT_SIZE), 7123 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ 7124 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 7125 hci_le_big_sync_established_evt, 7126 sizeof(struct hci_evt_le_big_sync_estabilished), 7127 HCI_MAX_EVENT_SIZE), 7128 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ 7129 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, 7130 hci_le_big_info_adv_report_evt, 7131 sizeof(struct hci_evt_le_big_info_adv_report), 7132 HCI_MAX_EVENT_SIZE), 7133 }; 7134 7135 static void hci_le_meta_evt(struct hci_dev *hdev, void *data, 7136 struct sk_buff *skb, u16 *opcode, u8 *status, 7137 hci_req_complete_t *req_complete, 7138 hci_req_complete_skb_t *req_complete_skb) 7139 { 7140 struct hci_ev_le_meta *ev = data; 7141 const struct hci_le_ev *subev; 7142 7143 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); 7144 7145 /* Only match event if command OGF is for LE */ 7146 if (hdev->req_skb && 7147 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 && 7148 hci_skb_event(hdev->req_skb) == ev->subevent) { 7149 *opcode = hci_skb_opcode(hdev->req_skb); 7150 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, 7151 req_complete_skb); 7152 } 7153 7154 subev = &hci_le_ev_table[ev->subevent]; 7155 if (!subev->func) 7156 return; 7157 7158 if (skb->len < subev->min_len) { 7159 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", 7160 ev->subevent, skb->len, subev->min_len); 7161 return; 7162 } 7163 7164 /* Just warn if the length is over max_len size it still be 7165 * possible to partially parse the event so leave to callback to 7166 * decide if that is acceptable. 7167 */ 7168 if (skb->len > subev->max_len) 7169 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", 7170 ev->subevent, skb->len, subev->max_len); 7171 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); 7172 if (!data) 7173 return; 7174 7175 subev->func(hdev, data, skb); 7176 } 7177 7178 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 7179 u8 event, struct sk_buff *skb) 7180 { 7181 struct hci_ev_cmd_complete *ev; 7182 struct hci_event_hdr *hdr; 7183 7184 if (!skb) 7185 return false; 7186 7187 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); 7188 if (!hdr) 7189 return false; 7190 7191 if (event) { 7192 if (hdr->evt != event) 7193 return false; 7194 return true; 7195 } 7196 7197 /* Check if request ended in Command Status - no way to retrieve 7198 * any extra parameters in this case. 7199 */ 7200 if (hdr->evt == HCI_EV_CMD_STATUS) 7201 return false; 7202 7203 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 7204 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 7205 hdr->evt); 7206 return false; 7207 } 7208 7209 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); 7210 if (!ev) 7211 return false; 7212 7213 if (opcode != __le16_to_cpu(ev->opcode)) { 7214 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 7215 __le16_to_cpu(ev->opcode)); 7216 return false; 7217 } 7218 7219 return true; 7220 } 7221 7222 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 7223 struct sk_buff *skb) 7224 { 7225 struct hci_ev_le_advertising_info *adv; 7226 struct hci_ev_le_direct_adv_info *direct_adv; 7227 struct hci_ev_le_ext_adv_info *ext_adv; 7228 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 7229 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 7230 7231 hci_dev_lock(hdev); 7232 7233 /* If we are currently suspended and this is the first BT event seen, 7234 * save the wake reason associated with the event. 7235 */ 7236 if (!hdev->suspended || hdev->wake_reason) 7237 goto unlock; 7238 7239 /* Default to remote wake. Values for wake_reason are documented in the 7240 * Bluez mgmt api docs. 7241 */ 7242 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 7243 7244 /* Once configured for remote wakeup, we should only wake up for 7245 * reconnections. It's useful to see which device is waking us up so 7246 * keep track of the bdaddr of the connection event that woke us up. 7247 */ 7248 if (event == HCI_EV_CONN_REQUEST) { 7249 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 7250 hdev->wake_addr_type = BDADDR_BREDR; 7251 } else if (event == HCI_EV_CONN_COMPLETE) { 7252 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 7253 hdev->wake_addr_type = BDADDR_BREDR; 7254 } else if (event == HCI_EV_LE_META) { 7255 struct hci_ev_le_meta *le_ev = (void *)skb->data; 7256 u8 subevent = le_ev->subevent; 7257 u8 *ptr = &skb->data[sizeof(*le_ev)]; 7258 u8 num_reports = *ptr; 7259 7260 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 7261 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 7262 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 7263 num_reports) { 7264 adv = (void *)(ptr + 1); 7265 direct_adv = (void *)(ptr + 1); 7266 ext_adv = (void *)(ptr + 1); 7267 7268 switch (subevent) { 7269 case HCI_EV_LE_ADVERTISING_REPORT: 7270 bacpy(&hdev->wake_addr, &adv->bdaddr); 7271 hdev->wake_addr_type = adv->bdaddr_type; 7272 break; 7273 case HCI_EV_LE_DIRECT_ADV_REPORT: 7274 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 7275 hdev->wake_addr_type = direct_adv->bdaddr_type; 7276 break; 7277 case HCI_EV_LE_EXT_ADV_REPORT: 7278 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 7279 hdev->wake_addr_type = ext_adv->bdaddr_type; 7280 break; 7281 } 7282 } 7283 } else { 7284 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 7285 } 7286 7287 unlock: 7288 hci_dev_unlock(hdev); 7289 } 7290 7291 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ 7292 [_op] = { \ 7293 .req = false, \ 7294 .func = _func, \ 7295 .min_len = _min_len, \ 7296 .max_len = _max_len, \ 7297 } 7298 7299 #define HCI_EV(_op, _func, _len) \ 7300 HCI_EV_VL(_op, _func, _len, _len) 7301 7302 #define HCI_EV_STATUS(_op, _func) \ 7303 HCI_EV(_op, _func, sizeof(struct hci_ev_status)) 7304 7305 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ 7306 [_op] = { \ 7307 .req = true, \ 7308 .func_req = _func, \ 7309 .min_len = _min_len, \ 7310 .max_len = _max_len, \ 7311 } 7312 7313 #define HCI_EV_REQ(_op, _func, _len) \ 7314 HCI_EV_REQ_VL(_op, _func, _len, _len) 7315 7316 /* Entries in this table shall have their position according to the event opcode 7317 * they handle so the use of the macros above is recommend since it does attempt 7318 * to initialize at its proper index using Designated Initializers that way 7319 * events without a callback function don't have entered. 7320 */ 7321 static const struct hci_ev { 7322 bool req; 7323 union { 7324 void (*func)(struct hci_dev *hdev, void *data, 7325 struct sk_buff *skb); 7326 void (*func_req)(struct hci_dev *hdev, void *data, 7327 struct sk_buff *skb, u16 *opcode, u8 *status, 7328 hci_req_complete_t *req_complete, 7329 hci_req_complete_skb_t *req_complete_skb); 7330 }; 7331 u16 min_len; 7332 u16 max_len; 7333 } hci_ev_table[U8_MAX + 1] = { 7334 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ 7335 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), 7336 /* [0x02 = HCI_EV_INQUIRY_RESULT] */ 7337 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, 7338 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), 7339 /* [0x03 = HCI_EV_CONN_COMPLETE] */ 7340 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, 7341 sizeof(struct hci_ev_conn_complete)), 7342 /* [0x04 = HCI_EV_CONN_REQUEST] */ 7343 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, 7344 sizeof(struct hci_ev_conn_request)), 7345 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ 7346 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, 7347 sizeof(struct hci_ev_disconn_complete)), 7348 /* [0x06 = HCI_EV_AUTH_COMPLETE] */ 7349 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, 7350 sizeof(struct hci_ev_auth_complete)), 7351 /* [0x07 = HCI_EV_REMOTE_NAME] */ 7352 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, 7353 sizeof(struct hci_ev_remote_name)), 7354 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ 7355 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, 7356 sizeof(struct hci_ev_encrypt_change)), 7357 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ 7358 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, 7359 hci_change_link_key_complete_evt, 7360 sizeof(struct hci_ev_change_link_key_complete)), 7361 /* [0x0b = HCI_EV_REMOTE_FEATURES] */ 7362 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, 7363 sizeof(struct hci_ev_remote_features)), 7364 /* [0x0e = HCI_EV_CMD_COMPLETE] */ 7365 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, 7366 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), 7367 /* [0x0f = HCI_EV_CMD_STATUS] */ 7368 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, 7369 sizeof(struct hci_ev_cmd_status)), 7370 /* [0x10 = HCI_EV_CMD_STATUS] */ 7371 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, 7372 sizeof(struct hci_ev_hardware_error)), 7373 /* [0x12 = HCI_EV_ROLE_CHANGE] */ 7374 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, 7375 sizeof(struct hci_ev_role_change)), 7376 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ 7377 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, 7378 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), 7379 /* [0x14 = HCI_EV_MODE_CHANGE] */ 7380 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, 7381 sizeof(struct hci_ev_mode_change)), 7382 /* [0x16 = HCI_EV_PIN_CODE_REQ] */ 7383 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, 7384 sizeof(struct hci_ev_pin_code_req)), 7385 /* [0x17 = HCI_EV_LINK_KEY_REQ] */ 7386 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, 7387 sizeof(struct hci_ev_link_key_req)), 7388 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ 7389 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, 7390 sizeof(struct hci_ev_link_key_notify)), 7391 /* [0x1c = HCI_EV_CLOCK_OFFSET] */ 7392 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, 7393 sizeof(struct hci_ev_clock_offset)), 7394 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ 7395 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, 7396 sizeof(struct hci_ev_pkt_type_change)), 7397 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ 7398 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, 7399 sizeof(struct hci_ev_pscan_rep_mode)), 7400 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ 7401 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, 7402 hci_inquiry_result_with_rssi_evt, 7403 sizeof(struct hci_ev_inquiry_result_rssi), 7404 HCI_MAX_EVENT_SIZE), 7405 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ 7406 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, 7407 sizeof(struct hci_ev_remote_ext_features)), 7408 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ 7409 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, 7410 sizeof(struct hci_ev_sync_conn_complete)), 7411 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ 7412 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, 7413 hci_extended_inquiry_result_evt, 7414 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), 7415 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ 7416 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, 7417 sizeof(struct hci_ev_key_refresh_complete)), 7418 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ 7419 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, 7420 sizeof(struct hci_ev_io_capa_request)), 7421 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ 7422 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, 7423 sizeof(struct hci_ev_io_capa_reply)), 7424 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ 7425 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, 7426 sizeof(struct hci_ev_user_confirm_req)), 7427 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ 7428 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, 7429 sizeof(struct hci_ev_user_passkey_req)), 7430 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ 7431 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, 7432 sizeof(struct hci_ev_remote_oob_data_request)), 7433 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ 7434 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, 7435 sizeof(struct hci_ev_simple_pair_complete)), 7436 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ 7437 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, 7438 sizeof(struct hci_ev_user_passkey_notify)), 7439 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ 7440 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, 7441 sizeof(struct hci_ev_keypress_notify)), 7442 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ 7443 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, 7444 sizeof(struct hci_ev_remote_host_features)), 7445 /* [0x3e = HCI_EV_LE_META] */ 7446 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, 7447 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), 7448 /* [0xff = HCI_EV_VENDOR] */ 7449 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), 7450 }; 7451 7452 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, 7453 u16 *opcode, u8 *status, 7454 hci_req_complete_t *req_complete, 7455 hci_req_complete_skb_t *req_complete_skb) 7456 { 7457 const struct hci_ev *ev = &hci_ev_table[event]; 7458 void *data; 7459 7460 if (!ev->func) 7461 return; 7462 7463 if (skb->len < ev->min_len) { 7464 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", 7465 event, skb->len, ev->min_len); 7466 return; 7467 } 7468 7469 /* Just warn if the length is over max_len size it still be 7470 * possible to partially parse the event so leave to callback to 7471 * decide if that is acceptable. 7472 */ 7473 if (skb->len > ev->max_len) 7474 bt_dev_warn_ratelimited(hdev, 7475 "unexpected event 0x%2.2x length: %u > %u", 7476 event, skb->len, ev->max_len); 7477 7478 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); 7479 if (!data) 7480 return; 7481 7482 if (ev->req) 7483 ev->func_req(hdev, data, skb, opcode, status, req_complete, 7484 req_complete_skb); 7485 else 7486 ev->func(hdev, data, skb); 7487 } 7488 7489 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 7490 { 7491 struct hci_event_hdr *hdr = (void *) skb->data; 7492 hci_req_complete_t req_complete = NULL; 7493 hci_req_complete_skb_t req_complete_skb = NULL; 7494 struct sk_buff *orig_skb = NULL; 7495 u8 status = 0, event, req_evt = 0; 7496 u16 opcode = HCI_OP_NOP; 7497 7498 if (skb->len < sizeof(*hdr)) { 7499 bt_dev_err(hdev, "Malformed HCI Event"); 7500 goto done; 7501 } 7502 7503 kfree_skb(hdev->recv_event); 7504 hdev->recv_event = skb_clone(skb, GFP_KERNEL); 7505 7506 event = hdr->evt; 7507 if (!event) { 7508 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", 7509 event); 7510 goto done; 7511 } 7512 7513 /* Only match event if command OGF is not for LE */ 7514 if (hdev->req_skb && 7515 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 && 7516 hci_skb_event(hdev->req_skb) == event) { 7517 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb), 7518 status, &req_complete, &req_complete_skb); 7519 req_evt = event; 7520 } 7521 7522 /* If it looks like we might end up having to call 7523 * req_complete_skb, store a pristine copy of the skb since the 7524 * various handlers may modify the original one through 7525 * skb_pull() calls, etc. 7526 */ 7527 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 7528 event == HCI_EV_CMD_COMPLETE) 7529 orig_skb = skb_clone(skb, GFP_KERNEL); 7530 7531 skb_pull(skb, HCI_EVENT_HDR_SIZE); 7532 7533 /* Store wake reason if we're suspended */ 7534 hci_store_wake_reason(hdev, event, skb); 7535 7536 bt_dev_dbg(hdev, "event 0x%2.2x", event); 7537 7538 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, 7539 &req_complete_skb); 7540 7541 if (req_complete) { 7542 req_complete(hdev, status, opcode); 7543 } else if (req_complete_skb) { 7544 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 7545 kfree_skb(orig_skb); 7546 orig_skb = NULL; 7547 } 7548 req_complete_skb(hdev, status, opcode, orig_skb); 7549 } 7550 7551 done: 7552 kfree_skb(orig_skb); 7553 kfree_skb(skb); 7554 hdev->stat.evt_rx++; 7555 } 7556