1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 Copyright 2023-2024 NXP 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI event handling. */ 27 28 #include <linux/unaligned.h> 29 #include <linux/crypto.h> 30 #include <crypto/algapi.h> 31 32 #include <net/bluetooth/bluetooth.h> 33 #include <net/bluetooth/hci_core.h> 34 #include <net/bluetooth/mgmt.h> 35 36 #include "hci_debugfs.h" 37 #include "hci_codec.h" 38 #include "smp.h" 39 #include "msft.h" 40 #include "eir.h" 41 42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 43 "\x00\x00\x00\x00\x00\x00\x00\x00" 44 45 /* Handle HCI Event packets */ 46 47 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 48 u8 ev, size_t len) 49 { 50 void *data; 51 52 data = skb_pull_data(skb, len); 53 if (!data) 54 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); 55 56 return data; 57 } 58 59 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 60 u16 op, size_t len) 61 { 62 void *data; 63 64 data = skb_pull_data(skb, len); 65 if (!data) 66 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); 67 68 return data; 69 } 70 71 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 72 u8 ev, size_t len) 73 { 74 void *data; 75 76 data = skb_pull_data(skb, len); 77 if (!data) 78 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); 79 80 return data; 81 } 82 83 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, 84 struct sk_buff *skb) 85 { 86 struct hci_ev_status *rp = data; 87 88 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 89 90 /* It is possible that we receive Inquiry Complete event right 91 * before we receive Inquiry Cancel Command Complete event, in 92 * which case the latter event should have status of Command 93 * Disallowed. This should not be treated as error, since 94 * we actually achieve what Inquiry Cancel wants to achieve, 95 * which is to end the last Inquiry session. 96 */ 97 if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) { 98 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 99 rp->status = 0x00; 100 } 101 102 if (rp->status) 103 return rp->status; 104 105 clear_bit(HCI_INQUIRY, &hdev->flags); 106 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 107 wake_up_bit(&hdev->flags, HCI_INQUIRY); 108 109 hci_dev_lock(hdev); 110 /* Set discovery state to stopped if we're not doing LE active 111 * scanning. 112 */ 113 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 114 hdev->le_scan_type != LE_SCAN_ACTIVE) 115 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 116 hci_dev_unlock(hdev); 117 118 return rp->status; 119 } 120 121 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, 122 struct sk_buff *skb) 123 { 124 struct hci_ev_status *rp = data; 125 126 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 127 128 if (rp->status) 129 return rp->status; 130 131 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 132 133 return rp->status; 134 } 135 136 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, 137 struct sk_buff *skb) 138 { 139 struct hci_ev_status *rp = data; 140 141 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 142 143 if (rp->status) 144 return rp->status; 145 146 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 147 148 return rp->status; 149 } 150 151 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, 152 struct sk_buff *skb) 153 { 154 struct hci_rp_remote_name_req_cancel *rp = data; 155 156 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 157 158 return rp->status; 159 } 160 161 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, 162 struct sk_buff *skb) 163 { 164 struct hci_rp_role_discovery *rp = data; 165 struct hci_conn *conn; 166 167 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 168 169 if (rp->status) 170 return rp->status; 171 172 hci_dev_lock(hdev); 173 174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 175 if (conn) 176 conn->role = rp->role; 177 178 hci_dev_unlock(hdev); 179 180 return rp->status; 181 } 182 183 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, 184 struct sk_buff *skb) 185 { 186 struct hci_rp_read_link_policy *rp = data; 187 struct hci_conn *conn; 188 189 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 190 191 if (rp->status) 192 return rp->status; 193 194 hci_dev_lock(hdev); 195 196 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 197 if (conn) 198 conn->link_policy = __le16_to_cpu(rp->policy); 199 200 hci_dev_unlock(hdev); 201 202 return rp->status; 203 } 204 205 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, 206 struct sk_buff *skb) 207 { 208 struct hci_rp_write_link_policy *rp = data; 209 struct hci_conn *conn; 210 void *sent; 211 212 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 213 214 if (rp->status) 215 return rp->status; 216 217 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 218 if (!sent) 219 return rp->status; 220 221 hci_dev_lock(hdev); 222 223 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 224 if (conn) 225 conn->link_policy = get_unaligned_le16(sent + 2); 226 227 hci_dev_unlock(hdev); 228 229 return rp->status; 230 } 231 232 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, 233 struct sk_buff *skb) 234 { 235 struct hci_rp_read_def_link_policy *rp = data; 236 237 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 238 239 if (rp->status) 240 return rp->status; 241 242 hdev->link_policy = __le16_to_cpu(rp->policy); 243 244 return rp->status; 245 } 246 247 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, 248 struct sk_buff *skb) 249 { 250 struct hci_ev_status *rp = data; 251 void *sent; 252 253 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 254 255 if (rp->status) 256 return rp->status; 257 258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 259 if (!sent) 260 return rp->status; 261 262 hdev->link_policy = get_unaligned_le16(sent); 263 264 return rp->status; 265 } 266 267 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) 268 { 269 struct hci_ev_status *rp = data; 270 271 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 272 273 clear_bit(HCI_RESET, &hdev->flags); 274 275 if (rp->status) 276 return rp->status; 277 278 /* Reset all non-persistent flags */ 279 hci_dev_clear_volatile_flags(hdev); 280 281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 282 283 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 284 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 285 286 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 287 hdev->adv_data_len = 0; 288 289 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 290 hdev->scan_rsp_data_len = 0; 291 292 hdev->le_scan_type = LE_SCAN_PASSIVE; 293 294 hdev->ssp_debug_mode = 0; 295 296 hci_bdaddr_list_clear(&hdev->le_accept_list); 297 hci_bdaddr_list_clear(&hdev->le_resolv_list); 298 299 return rp->status; 300 } 301 302 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, 303 struct sk_buff *skb) 304 { 305 struct hci_rp_read_stored_link_key *rp = data; 306 struct hci_cp_read_stored_link_key *sent; 307 308 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 309 310 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 311 if (!sent) 312 return rp->status; 313 314 if (!rp->status && sent->read_all == 0x01) { 315 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 316 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 317 } 318 319 return rp->status; 320 } 321 322 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, 323 struct sk_buff *skb) 324 { 325 struct hci_rp_delete_stored_link_key *rp = data; 326 u16 num_keys; 327 328 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 329 330 if (rp->status) 331 return rp->status; 332 333 num_keys = le16_to_cpu(rp->num_keys); 334 335 if (num_keys <= hdev->stored_num_keys) 336 hdev->stored_num_keys -= num_keys; 337 else 338 hdev->stored_num_keys = 0; 339 340 return rp->status; 341 } 342 343 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, 344 struct sk_buff *skb) 345 { 346 struct hci_ev_status *rp = data; 347 void *sent; 348 349 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 350 351 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 352 if (!sent) 353 return rp->status; 354 355 hci_dev_lock(hdev); 356 357 if (hci_dev_test_flag(hdev, HCI_MGMT)) 358 mgmt_set_local_name_complete(hdev, sent, rp->status); 359 else if (!rp->status) 360 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 361 362 hci_dev_unlock(hdev); 363 364 return rp->status; 365 } 366 367 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, 368 struct sk_buff *skb) 369 { 370 struct hci_rp_read_local_name *rp = data; 371 372 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 373 374 if (rp->status) 375 return rp->status; 376 377 if (hci_dev_test_flag(hdev, HCI_SETUP) || 378 hci_dev_test_flag(hdev, HCI_CONFIG)) 379 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 380 381 return rp->status; 382 } 383 384 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, 385 struct sk_buff *skb) 386 { 387 struct hci_ev_status *rp = data; 388 void *sent; 389 390 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 391 392 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 393 if (!sent) 394 return rp->status; 395 396 hci_dev_lock(hdev); 397 398 if (!rp->status) { 399 __u8 param = *((__u8 *) sent); 400 401 if (param == AUTH_ENABLED) 402 set_bit(HCI_AUTH, &hdev->flags); 403 else 404 clear_bit(HCI_AUTH, &hdev->flags); 405 } 406 407 if (hci_dev_test_flag(hdev, HCI_MGMT)) 408 mgmt_auth_enable_complete(hdev, rp->status); 409 410 hci_dev_unlock(hdev); 411 412 return rp->status; 413 } 414 415 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, 416 struct sk_buff *skb) 417 { 418 struct hci_ev_status *rp = data; 419 __u8 param; 420 void *sent; 421 422 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 423 424 if (rp->status) 425 return rp->status; 426 427 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 428 if (!sent) 429 return rp->status; 430 431 param = *((__u8 *) sent); 432 433 if (param) 434 set_bit(HCI_ENCRYPT, &hdev->flags); 435 else 436 clear_bit(HCI_ENCRYPT, &hdev->flags); 437 438 return rp->status; 439 } 440 441 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, 442 struct sk_buff *skb) 443 { 444 struct hci_ev_status *rp = data; 445 __u8 param; 446 void *sent; 447 448 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 449 450 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 451 if (!sent) 452 return rp->status; 453 454 param = *((__u8 *) sent); 455 456 hci_dev_lock(hdev); 457 458 if (rp->status) { 459 hdev->discov_timeout = 0; 460 goto done; 461 } 462 463 if (param & SCAN_INQUIRY) 464 set_bit(HCI_ISCAN, &hdev->flags); 465 else 466 clear_bit(HCI_ISCAN, &hdev->flags); 467 468 if (param & SCAN_PAGE) 469 set_bit(HCI_PSCAN, &hdev->flags); 470 else 471 clear_bit(HCI_PSCAN, &hdev->flags); 472 473 done: 474 hci_dev_unlock(hdev); 475 476 return rp->status; 477 } 478 479 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, 480 struct sk_buff *skb) 481 { 482 struct hci_ev_status *rp = data; 483 struct hci_cp_set_event_filter *cp; 484 void *sent; 485 486 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 487 488 if (rp->status) 489 return rp->status; 490 491 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 492 if (!sent) 493 return rp->status; 494 495 cp = (struct hci_cp_set_event_filter *)sent; 496 497 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 498 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 499 else 500 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 501 502 return rp->status; 503 } 504 505 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, 506 struct sk_buff *skb) 507 { 508 struct hci_rp_read_class_of_dev *rp = data; 509 510 if (WARN_ON(!hdev)) 511 return HCI_ERROR_UNSPECIFIED; 512 513 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 514 515 if (rp->status) 516 return rp->status; 517 518 memcpy(hdev->dev_class, rp->dev_class, 3); 519 520 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], 521 hdev->dev_class[1], hdev->dev_class[0]); 522 523 return rp->status; 524 } 525 526 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, 527 struct sk_buff *skb) 528 { 529 struct hci_ev_status *rp = data; 530 void *sent; 531 532 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 533 534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 535 if (!sent) 536 return rp->status; 537 538 hci_dev_lock(hdev); 539 540 if (!rp->status) 541 memcpy(hdev->dev_class, sent, 3); 542 543 if (hci_dev_test_flag(hdev, HCI_MGMT)) 544 mgmt_set_class_of_dev_complete(hdev, sent, rp->status); 545 546 hci_dev_unlock(hdev); 547 548 return rp->status; 549 } 550 551 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, 552 struct sk_buff *skb) 553 { 554 struct hci_rp_read_voice_setting *rp = data; 555 __u16 setting; 556 557 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 558 559 if (rp->status) 560 return rp->status; 561 562 setting = __le16_to_cpu(rp->voice_setting); 563 564 if (hdev->voice_setting == setting) 565 return rp->status; 566 567 hdev->voice_setting = setting; 568 569 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 570 571 if (hdev->notify) 572 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 573 574 return rp->status; 575 } 576 577 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, 578 struct sk_buff *skb) 579 { 580 struct hci_ev_status *rp = data; 581 __u16 setting; 582 void *sent; 583 584 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 585 586 if (rp->status) 587 return rp->status; 588 589 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 590 if (!sent) 591 return rp->status; 592 593 setting = get_unaligned_le16(sent); 594 595 if (hdev->voice_setting == setting) 596 return rp->status; 597 598 hdev->voice_setting = setting; 599 600 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 601 602 if (hdev->notify) 603 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 604 605 return rp->status; 606 } 607 608 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, 609 struct sk_buff *skb) 610 { 611 struct hci_rp_read_num_supported_iac *rp = data; 612 613 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 614 615 if (rp->status) 616 return rp->status; 617 618 hdev->num_iac = rp->num_iac; 619 620 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); 621 622 return rp->status; 623 } 624 625 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, 626 struct sk_buff *skb) 627 { 628 struct hci_ev_status *rp = data; 629 struct hci_cp_write_ssp_mode *sent; 630 631 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 632 633 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 634 if (!sent) 635 return rp->status; 636 637 hci_dev_lock(hdev); 638 639 if (!rp->status) { 640 if (sent->mode) 641 hdev->features[1][0] |= LMP_HOST_SSP; 642 else 643 hdev->features[1][0] &= ~LMP_HOST_SSP; 644 } 645 646 if (!rp->status) { 647 if (sent->mode) 648 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 649 else 650 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 651 } 652 653 hci_dev_unlock(hdev); 654 655 return rp->status; 656 } 657 658 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, 659 struct sk_buff *skb) 660 { 661 struct hci_ev_status *rp = data; 662 struct hci_cp_write_sc_support *sent; 663 664 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 665 666 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 667 if (!sent) 668 return rp->status; 669 670 hci_dev_lock(hdev); 671 672 if (!rp->status) { 673 if (sent->support) 674 hdev->features[1][0] |= LMP_HOST_SC; 675 else 676 hdev->features[1][0] &= ~LMP_HOST_SC; 677 } 678 679 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { 680 if (sent->support) 681 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 682 else 683 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 684 } 685 686 hci_dev_unlock(hdev); 687 688 return rp->status; 689 } 690 691 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, 692 struct sk_buff *skb) 693 { 694 struct hci_rp_read_local_version *rp = data; 695 696 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 697 698 if (rp->status) 699 return rp->status; 700 701 if (hci_dev_test_flag(hdev, HCI_SETUP) || 702 hci_dev_test_flag(hdev, HCI_CONFIG)) { 703 hdev->hci_ver = rp->hci_ver; 704 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 705 hdev->lmp_ver = rp->lmp_ver; 706 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 707 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 708 } 709 710 return rp->status; 711 } 712 713 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data, 714 struct sk_buff *skb) 715 { 716 struct hci_rp_read_enc_key_size *rp = data; 717 struct hci_conn *conn; 718 u16 handle; 719 u8 status = rp->status; 720 721 bt_dev_dbg(hdev, "status 0x%2.2x", status); 722 723 handle = le16_to_cpu(rp->handle); 724 725 hci_dev_lock(hdev); 726 727 conn = hci_conn_hash_lookup_handle(hdev, handle); 728 if (!conn) { 729 status = 0xFF; 730 goto done; 731 } 732 733 /* While unexpected, the read_enc_key_size command may fail. The most 734 * secure approach is to then assume the key size is 0 to force a 735 * disconnection. 736 */ 737 if (status) { 738 bt_dev_err(hdev, "failed to read key size for handle %u", 739 handle); 740 conn->enc_key_size = 0; 741 } else { 742 u8 *key_enc_size = hci_conn_key_enc_size(conn); 743 744 conn->enc_key_size = rp->key_size; 745 status = 0; 746 747 /* Attempt to check if the key size is too small or if it has 748 * been downgraded from the last time it was stored as part of 749 * the link_key. 750 */ 751 if (conn->enc_key_size < hdev->min_enc_key_size || 752 (key_enc_size && conn->enc_key_size < *key_enc_size)) { 753 /* As slave role, the conn->state has been set to 754 * BT_CONNECTED and l2cap conn req might not be received 755 * yet, at this moment the l2cap layer almost does 756 * nothing with the non-zero status. 757 * So we also clear encrypt related bits, and then the 758 * handler of l2cap conn req will get the right secure 759 * state at a later time. 760 */ 761 status = HCI_ERROR_AUTH_FAILURE; 762 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 763 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 764 } 765 766 /* Update the key encryption size with the connection one */ 767 if (key_enc_size && *key_enc_size != conn->enc_key_size) 768 *key_enc_size = conn->enc_key_size; 769 } 770 771 hci_encrypt_cfm(conn, status); 772 773 done: 774 hci_dev_unlock(hdev); 775 776 return status; 777 } 778 779 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, 780 struct sk_buff *skb) 781 { 782 struct hci_rp_read_local_commands *rp = data; 783 784 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 785 786 if (rp->status) 787 return rp->status; 788 789 if (hci_dev_test_flag(hdev, HCI_SETUP) || 790 hci_dev_test_flag(hdev, HCI_CONFIG)) 791 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 792 793 return rp->status; 794 } 795 796 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, 797 struct sk_buff *skb) 798 { 799 struct hci_rp_read_auth_payload_to *rp = data; 800 struct hci_conn *conn; 801 802 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 803 804 if (rp->status) 805 return rp->status; 806 807 hci_dev_lock(hdev); 808 809 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 810 if (conn) 811 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 812 813 hci_dev_unlock(hdev); 814 815 return rp->status; 816 } 817 818 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, 819 struct sk_buff *skb) 820 { 821 struct hci_rp_write_auth_payload_to *rp = data; 822 struct hci_conn *conn; 823 void *sent; 824 825 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 826 827 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 828 if (!sent) 829 return rp->status; 830 831 hci_dev_lock(hdev); 832 833 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 834 if (!conn) { 835 rp->status = 0xff; 836 goto unlock; 837 } 838 839 if (!rp->status) 840 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 841 842 unlock: 843 hci_dev_unlock(hdev); 844 845 return rp->status; 846 } 847 848 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, 849 struct sk_buff *skb) 850 { 851 struct hci_rp_read_local_features *rp = data; 852 853 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 854 855 if (rp->status) 856 return rp->status; 857 858 memcpy(hdev->features, rp->features, 8); 859 860 /* Adjust default settings according to features 861 * supported by device. */ 862 863 if (hdev->features[0][0] & LMP_3SLOT) 864 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 865 866 if (hdev->features[0][0] & LMP_5SLOT) 867 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 868 869 if (hdev->features[0][1] & LMP_HV2) { 870 hdev->pkt_type |= (HCI_HV2); 871 hdev->esco_type |= (ESCO_HV2); 872 } 873 874 if (hdev->features[0][1] & LMP_HV3) { 875 hdev->pkt_type |= (HCI_HV3); 876 hdev->esco_type |= (ESCO_HV3); 877 } 878 879 if (lmp_esco_capable(hdev)) 880 hdev->esco_type |= (ESCO_EV3); 881 882 if (hdev->features[0][4] & LMP_EV4) 883 hdev->esco_type |= (ESCO_EV4); 884 885 if (hdev->features[0][4] & LMP_EV5) 886 hdev->esco_type |= (ESCO_EV5); 887 888 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 889 hdev->esco_type |= (ESCO_2EV3); 890 891 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 892 hdev->esco_type |= (ESCO_3EV3); 893 894 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 895 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 896 897 return rp->status; 898 } 899 900 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, 901 struct sk_buff *skb) 902 { 903 struct hci_rp_read_local_ext_features *rp = data; 904 905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 906 907 if (rp->status) 908 return rp->status; 909 910 if (hdev->max_page < rp->max_page) { 911 if (hci_test_quirk(hdev, 912 HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2)) 913 bt_dev_warn(hdev, "broken local ext features page 2"); 914 else 915 hdev->max_page = rp->max_page; 916 } 917 918 if (rp->page < HCI_MAX_PAGES) 919 memcpy(hdev->features[rp->page], rp->features, 8); 920 921 return rp->status; 922 } 923 924 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, 925 struct sk_buff *skb) 926 { 927 struct hci_rp_read_buffer_size *rp = data; 928 929 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 930 931 if (rp->status) 932 return rp->status; 933 934 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 935 hdev->sco_mtu = rp->sco_mtu; 936 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 937 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 938 939 if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) { 940 hdev->sco_mtu = 64; 941 hdev->sco_pkts = 8; 942 } 943 944 if (!read_voice_setting_capable(hdev)) 945 hdev->sco_pkts = 0; 946 947 hdev->acl_cnt = hdev->acl_pkts; 948 hdev->sco_cnt = hdev->sco_pkts; 949 950 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 951 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 952 953 if (!hdev->acl_mtu || !hdev->acl_pkts) 954 return HCI_ERROR_INVALID_PARAMETERS; 955 956 return rp->status; 957 } 958 959 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, 960 struct sk_buff *skb) 961 { 962 struct hci_rp_read_bd_addr *rp = data; 963 964 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 965 966 if (rp->status) 967 return rp->status; 968 969 if (test_bit(HCI_INIT, &hdev->flags)) 970 bacpy(&hdev->bdaddr, &rp->bdaddr); 971 972 if (hci_dev_test_flag(hdev, HCI_SETUP)) 973 bacpy(&hdev->setup_addr, &rp->bdaddr); 974 975 return rp->status; 976 } 977 978 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, 979 struct sk_buff *skb) 980 { 981 struct hci_rp_read_local_pairing_opts *rp = data; 982 983 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 984 985 if (rp->status) 986 return rp->status; 987 988 if (hci_dev_test_flag(hdev, HCI_SETUP) || 989 hci_dev_test_flag(hdev, HCI_CONFIG)) { 990 hdev->pairing_opts = rp->pairing_opts; 991 hdev->max_enc_key_size = rp->max_key_size; 992 } 993 994 return rp->status; 995 } 996 997 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, 998 struct sk_buff *skb) 999 { 1000 struct hci_rp_read_page_scan_activity *rp = data; 1001 1002 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1003 1004 if (rp->status) 1005 return rp->status; 1006 1007 if (test_bit(HCI_INIT, &hdev->flags)) { 1008 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 1009 hdev->page_scan_window = __le16_to_cpu(rp->window); 1010 } 1011 1012 return rp->status; 1013 } 1014 1015 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, 1016 struct sk_buff *skb) 1017 { 1018 struct hci_ev_status *rp = data; 1019 struct hci_cp_write_page_scan_activity *sent; 1020 1021 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1022 1023 if (rp->status) 1024 return rp->status; 1025 1026 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 1027 if (!sent) 1028 return rp->status; 1029 1030 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 1031 hdev->page_scan_window = __le16_to_cpu(sent->window); 1032 1033 return rp->status; 1034 } 1035 1036 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, 1037 struct sk_buff *skb) 1038 { 1039 struct hci_rp_read_page_scan_type *rp = data; 1040 1041 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1042 1043 if (rp->status) 1044 return rp->status; 1045 1046 if (test_bit(HCI_INIT, &hdev->flags)) 1047 hdev->page_scan_type = rp->type; 1048 1049 return rp->status; 1050 } 1051 1052 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, 1053 struct sk_buff *skb) 1054 { 1055 struct hci_ev_status *rp = data; 1056 u8 *type; 1057 1058 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1059 1060 if (rp->status) 1061 return rp->status; 1062 1063 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 1064 if (type) 1065 hdev->page_scan_type = *type; 1066 1067 return rp->status; 1068 } 1069 1070 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, 1071 struct sk_buff *skb) 1072 { 1073 struct hci_rp_read_clock *rp = data; 1074 struct hci_cp_read_clock *cp; 1075 struct hci_conn *conn; 1076 1077 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1078 1079 if (rp->status) 1080 return rp->status; 1081 1082 hci_dev_lock(hdev); 1083 1084 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 1085 if (!cp) 1086 goto unlock; 1087 1088 if (cp->which == 0x00) { 1089 hdev->clock = le32_to_cpu(rp->clock); 1090 goto unlock; 1091 } 1092 1093 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1094 if (conn) { 1095 conn->clock = le32_to_cpu(rp->clock); 1096 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 1097 } 1098 1099 unlock: 1100 hci_dev_unlock(hdev); 1101 return rp->status; 1102 } 1103 1104 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, 1105 struct sk_buff *skb) 1106 { 1107 struct hci_rp_read_inq_rsp_tx_power *rp = data; 1108 1109 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1110 1111 if (rp->status) 1112 return rp->status; 1113 1114 hdev->inq_tx_power = rp->tx_power; 1115 1116 return rp->status; 1117 } 1118 1119 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, 1120 struct sk_buff *skb) 1121 { 1122 struct hci_rp_read_def_err_data_reporting *rp = data; 1123 1124 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1125 1126 if (rp->status) 1127 return rp->status; 1128 1129 hdev->err_data_reporting = rp->err_data_reporting; 1130 1131 return rp->status; 1132 } 1133 1134 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, 1135 struct sk_buff *skb) 1136 { 1137 struct hci_ev_status *rp = data; 1138 struct hci_cp_write_def_err_data_reporting *cp; 1139 1140 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1141 1142 if (rp->status) 1143 return rp->status; 1144 1145 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 1146 if (!cp) 1147 return rp->status; 1148 1149 hdev->err_data_reporting = cp->err_data_reporting; 1150 1151 return rp->status; 1152 } 1153 1154 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, 1155 struct sk_buff *skb) 1156 { 1157 struct hci_rp_pin_code_reply *rp = data; 1158 struct hci_cp_pin_code_reply *cp; 1159 struct hci_conn *conn; 1160 1161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1162 1163 hci_dev_lock(hdev); 1164 1165 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1166 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1167 1168 if (rp->status) 1169 goto unlock; 1170 1171 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1172 if (!cp) 1173 goto unlock; 1174 1175 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1176 if (conn) 1177 conn->pin_length = cp->pin_len; 1178 1179 unlock: 1180 hci_dev_unlock(hdev); 1181 return rp->status; 1182 } 1183 1184 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, 1185 struct sk_buff *skb) 1186 { 1187 struct hci_rp_pin_code_neg_reply *rp = data; 1188 1189 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1190 1191 hci_dev_lock(hdev); 1192 1193 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1194 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1195 rp->status); 1196 1197 hci_dev_unlock(hdev); 1198 1199 return rp->status; 1200 } 1201 1202 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, 1203 struct sk_buff *skb) 1204 { 1205 struct hci_rp_le_read_buffer_size *rp = data; 1206 1207 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1208 1209 if (rp->status) 1210 return rp->status; 1211 1212 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1213 hdev->le_pkts = rp->le_max_pkt; 1214 1215 hdev->le_cnt = hdev->le_pkts; 1216 1217 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1218 1219 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) 1220 return HCI_ERROR_INVALID_PARAMETERS; 1221 1222 return rp->status; 1223 } 1224 1225 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, 1226 struct sk_buff *skb) 1227 { 1228 struct hci_rp_le_read_local_features *rp = data; 1229 1230 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1231 1232 if (rp->status) 1233 return rp->status; 1234 1235 memcpy(hdev->le_features, rp->features, 8); 1236 1237 return rp->status; 1238 } 1239 1240 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, 1241 struct sk_buff *skb) 1242 { 1243 struct hci_rp_le_read_adv_tx_power *rp = data; 1244 1245 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1246 1247 if (rp->status) 1248 return rp->status; 1249 1250 hdev->adv_tx_power = rp->tx_power; 1251 1252 return rp->status; 1253 } 1254 1255 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, 1256 struct sk_buff *skb) 1257 { 1258 struct hci_rp_user_confirm_reply *rp = data; 1259 1260 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1261 1262 hci_dev_lock(hdev); 1263 1264 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1265 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1266 rp->status); 1267 1268 hci_dev_unlock(hdev); 1269 1270 return rp->status; 1271 } 1272 1273 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, 1274 struct sk_buff *skb) 1275 { 1276 struct hci_rp_user_confirm_reply *rp = data; 1277 1278 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1279 1280 hci_dev_lock(hdev); 1281 1282 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1283 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1284 ACL_LINK, 0, rp->status); 1285 1286 hci_dev_unlock(hdev); 1287 1288 return rp->status; 1289 } 1290 1291 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, 1292 struct sk_buff *skb) 1293 { 1294 struct hci_rp_user_confirm_reply *rp = data; 1295 1296 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1297 1298 hci_dev_lock(hdev); 1299 1300 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1301 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1302 0, rp->status); 1303 1304 hci_dev_unlock(hdev); 1305 1306 return rp->status; 1307 } 1308 1309 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, 1310 struct sk_buff *skb) 1311 { 1312 struct hci_rp_user_confirm_reply *rp = data; 1313 1314 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1315 1316 hci_dev_lock(hdev); 1317 1318 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1319 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1320 ACL_LINK, 0, rp->status); 1321 1322 hci_dev_unlock(hdev); 1323 1324 return rp->status; 1325 } 1326 1327 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, 1328 struct sk_buff *skb) 1329 { 1330 struct hci_rp_read_local_oob_data *rp = data; 1331 1332 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1333 1334 return rp->status; 1335 } 1336 1337 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, 1338 struct sk_buff *skb) 1339 { 1340 struct hci_rp_read_local_oob_ext_data *rp = data; 1341 1342 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1343 1344 return rp->status; 1345 } 1346 1347 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, 1348 struct sk_buff *skb) 1349 { 1350 struct hci_ev_status *rp = data; 1351 bdaddr_t *sent; 1352 1353 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1354 1355 if (rp->status) 1356 return rp->status; 1357 1358 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1359 if (!sent) 1360 return rp->status; 1361 1362 hci_dev_lock(hdev); 1363 1364 bacpy(&hdev->random_addr, sent); 1365 1366 if (!bacmp(&hdev->rpa, sent)) { 1367 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1368 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1369 secs_to_jiffies(hdev->rpa_timeout)); 1370 } 1371 1372 hci_dev_unlock(hdev); 1373 1374 return rp->status; 1375 } 1376 1377 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, 1378 struct sk_buff *skb) 1379 { 1380 struct hci_ev_status *rp = data; 1381 struct hci_cp_le_set_default_phy *cp; 1382 1383 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1384 1385 if (rp->status) 1386 return rp->status; 1387 1388 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1389 if (!cp) 1390 return rp->status; 1391 1392 hci_dev_lock(hdev); 1393 1394 hdev->le_tx_def_phys = cp->tx_phys; 1395 hdev->le_rx_def_phys = cp->rx_phys; 1396 1397 hci_dev_unlock(hdev); 1398 1399 return rp->status; 1400 } 1401 1402 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, 1403 struct sk_buff *skb) 1404 { 1405 struct hci_ev_status *rp = data; 1406 struct hci_cp_le_set_adv_set_rand_addr *cp; 1407 struct adv_info *adv; 1408 1409 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1410 1411 if (rp->status) 1412 return rp->status; 1413 1414 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1415 /* Update only in case the adv instance since handle 0x00 shall be using 1416 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1417 * non-extended adverting. 1418 */ 1419 if (!cp || !cp->handle) 1420 return rp->status; 1421 1422 hci_dev_lock(hdev); 1423 1424 adv = hci_find_adv_instance(hdev, cp->handle); 1425 if (adv) { 1426 bacpy(&adv->random_addr, &cp->bdaddr); 1427 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1428 adv->rpa_expired = false; 1429 queue_delayed_work(hdev->workqueue, 1430 &adv->rpa_expired_cb, 1431 secs_to_jiffies(hdev->rpa_timeout)); 1432 } 1433 } 1434 1435 hci_dev_unlock(hdev); 1436 1437 return rp->status; 1438 } 1439 1440 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, 1441 struct sk_buff *skb) 1442 { 1443 struct hci_ev_status *rp = data; 1444 u8 *instance; 1445 int err; 1446 1447 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1448 1449 if (rp->status) 1450 return rp->status; 1451 1452 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1453 if (!instance) 1454 return rp->status; 1455 1456 hci_dev_lock(hdev); 1457 1458 err = hci_remove_adv_instance(hdev, *instance); 1459 if (!err) 1460 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1461 *instance); 1462 1463 hci_dev_unlock(hdev); 1464 1465 return rp->status; 1466 } 1467 1468 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, 1469 struct sk_buff *skb) 1470 { 1471 struct hci_ev_status *rp = data; 1472 struct adv_info *adv, *n; 1473 int err; 1474 1475 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1476 1477 if (rp->status) 1478 return rp->status; 1479 1480 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1481 return rp->status; 1482 1483 hci_dev_lock(hdev); 1484 1485 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1486 u8 instance = adv->instance; 1487 1488 err = hci_remove_adv_instance(hdev, instance); 1489 if (!err) 1490 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1491 hdev, instance); 1492 } 1493 1494 hci_dev_unlock(hdev); 1495 1496 return rp->status; 1497 } 1498 1499 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, 1500 struct sk_buff *skb) 1501 { 1502 struct hci_rp_le_read_transmit_power *rp = data; 1503 1504 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1505 1506 if (rp->status) 1507 return rp->status; 1508 1509 hdev->min_le_tx_power = rp->min_le_tx_power; 1510 hdev->max_le_tx_power = rp->max_le_tx_power; 1511 1512 return rp->status; 1513 } 1514 1515 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, 1516 struct sk_buff *skb) 1517 { 1518 struct hci_ev_status *rp = data; 1519 struct hci_cp_le_set_privacy_mode *cp; 1520 struct hci_conn_params *params; 1521 1522 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1523 1524 if (rp->status) 1525 return rp->status; 1526 1527 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); 1528 if (!cp) 1529 return rp->status; 1530 1531 hci_dev_lock(hdev); 1532 1533 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); 1534 if (params) 1535 WRITE_ONCE(params->privacy_mode, cp->mode); 1536 1537 hci_dev_unlock(hdev); 1538 1539 return rp->status; 1540 } 1541 1542 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, 1543 struct sk_buff *skb) 1544 { 1545 struct hci_ev_status *rp = data; 1546 __u8 *sent; 1547 1548 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1549 1550 if (rp->status) 1551 return rp->status; 1552 1553 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1554 if (!sent) 1555 return rp->status; 1556 1557 hci_dev_lock(hdev); 1558 1559 /* If we're doing connection initiation as peripheral. Set a 1560 * timeout in case something goes wrong. 1561 */ 1562 if (*sent) { 1563 struct hci_conn *conn; 1564 1565 hci_dev_set_flag(hdev, HCI_LE_ADV); 1566 1567 conn = hci_lookup_le_connect(hdev); 1568 if (conn) 1569 queue_delayed_work(hdev->workqueue, 1570 &conn->le_conn_timeout, 1571 conn->conn_timeout); 1572 } else { 1573 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1574 } 1575 1576 hci_dev_unlock(hdev); 1577 1578 return rp->status; 1579 } 1580 1581 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, 1582 struct sk_buff *skb) 1583 { 1584 struct hci_cp_le_set_ext_adv_enable *cp; 1585 struct hci_cp_ext_adv_set *set; 1586 struct adv_info *adv = NULL, *n; 1587 struct hci_ev_status *rp = data; 1588 1589 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1590 1591 if (rp->status) 1592 return rp->status; 1593 1594 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1595 if (!cp) 1596 return rp->status; 1597 1598 set = (void *)cp->data; 1599 1600 hci_dev_lock(hdev); 1601 1602 if (cp->num_of_sets) 1603 adv = hci_find_adv_instance(hdev, set->handle); 1604 1605 if (cp->enable) { 1606 struct hci_conn *conn; 1607 1608 hci_dev_set_flag(hdev, HCI_LE_ADV); 1609 1610 if (adv && !adv->periodic) 1611 adv->enabled = true; 1612 1613 conn = hci_lookup_le_connect(hdev); 1614 if (conn) 1615 queue_delayed_work(hdev->workqueue, 1616 &conn->le_conn_timeout, 1617 conn->conn_timeout); 1618 } else { 1619 if (cp->num_of_sets) { 1620 if (adv) 1621 adv->enabled = false; 1622 1623 /* If just one instance was disabled check if there are 1624 * any other instance enabled before clearing HCI_LE_ADV 1625 */ 1626 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1627 list) { 1628 if (adv->enabled) 1629 goto unlock; 1630 } 1631 } else { 1632 /* All instances shall be considered disabled */ 1633 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1634 list) 1635 adv->enabled = false; 1636 } 1637 1638 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1639 } 1640 1641 unlock: 1642 hci_dev_unlock(hdev); 1643 return rp->status; 1644 } 1645 1646 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, 1647 struct sk_buff *skb) 1648 { 1649 struct hci_cp_le_set_scan_param *cp; 1650 struct hci_ev_status *rp = data; 1651 1652 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1653 1654 if (rp->status) 1655 return rp->status; 1656 1657 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1658 if (!cp) 1659 return rp->status; 1660 1661 hci_dev_lock(hdev); 1662 1663 hdev->le_scan_type = cp->type; 1664 1665 hci_dev_unlock(hdev); 1666 1667 return rp->status; 1668 } 1669 1670 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, 1671 struct sk_buff *skb) 1672 { 1673 struct hci_cp_le_set_ext_scan_params *cp; 1674 struct hci_ev_status *rp = data; 1675 struct hci_cp_le_scan_phy_params *phy_param; 1676 1677 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1678 1679 if (rp->status) 1680 return rp->status; 1681 1682 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1683 if (!cp) 1684 return rp->status; 1685 1686 phy_param = (void *)cp->data; 1687 1688 hci_dev_lock(hdev); 1689 1690 hdev->le_scan_type = phy_param->type; 1691 1692 hci_dev_unlock(hdev); 1693 1694 return rp->status; 1695 } 1696 1697 static bool has_pending_adv_report(struct hci_dev *hdev) 1698 { 1699 struct discovery_state *d = &hdev->discovery; 1700 1701 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1702 } 1703 1704 static void clear_pending_adv_report(struct hci_dev *hdev) 1705 { 1706 struct discovery_state *d = &hdev->discovery; 1707 1708 bacpy(&d->last_adv_addr, BDADDR_ANY); 1709 d->last_adv_data_len = 0; 1710 } 1711 1712 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1713 u8 bdaddr_type, s8 rssi, u32 flags, 1714 u8 *data, u8 len) 1715 { 1716 struct discovery_state *d = &hdev->discovery; 1717 1718 if (len > max_adv_len(hdev)) 1719 return; 1720 1721 bacpy(&d->last_adv_addr, bdaddr); 1722 d->last_adv_addr_type = bdaddr_type; 1723 d->last_adv_rssi = rssi; 1724 d->last_adv_flags = flags; 1725 memcpy(d->last_adv_data, data, len); 1726 d->last_adv_data_len = len; 1727 } 1728 1729 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1730 { 1731 hci_dev_lock(hdev); 1732 1733 switch (enable) { 1734 case LE_SCAN_ENABLE: 1735 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1736 if (hdev->le_scan_type == LE_SCAN_ACTIVE) { 1737 clear_pending_adv_report(hdev); 1738 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1739 } 1740 break; 1741 1742 case LE_SCAN_DISABLE: 1743 /* We do this here instead of when setting DISCOVERY_STOPPED 1744 * since the latter would potentially require waiting for 1745 * inquiry to stop too. 1746 */ 1747 if (has_pending_adv_report(hdev)) { 1748 struct discovery_state *d = &hdev->discovery; 1749 1750 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1751 d->last_adv_addr_type, NULL, 1752 d->last_adv_rssi, d->last_adv_flags, 1753 d->last_adv_data, 1754 d->last_adv_data_len, NULL, 0, 0); 1755 } 1756 1757 /* Cancel this timer so that we don't try to disable scanning 1758 * when it's already disabled. 1759 */ 1760 cancel_delayed_work(&hdev->le_scan_disable); 1761 1762 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1763 1764 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1765 * interrupted scanning due to a connect request. Mark 1766 * therefore discovery as stopped. 1767 */ 1768 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1769 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1770 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && 1771 hdev->discovery.state == DISCOVERY_FINDING) 1772 queue_work(hdev->workqueue, &hdev->reenable_adv_work); 1773 1774 break; 1775 1776 default: 1777 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1778 enable); 1779 break; 1780 } 1781 1782 hci_dev_unlock(hdev); 1783 } 1784 1785 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, 1786 struct sk_buff *skb) 1787 { 1788 struct hci_cp_le_set_scan_enable *cp; 1789 struct hci_ev_status *rp = data; 1790 1791 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1792 1793 if (rp->status) 1794 return rp->status; 1795 1796 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1797 if (!cp) 1798 return rp->status; 1799 1800 le_set_scan_enable_complete(hdev, cp->enable); 1801 1802 return rp->status; 1803 } 1804 1805 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, 1806 struct sk_buff *skb) 1807 { 1808 struct hci_cp_le_set_ext_scan_enable *cp; 1809 struct hci_ev_status *rp = data; 1810 1811 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1812 1813 if (rp->status) 1814 return rp->status; 1815 1816 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1817 if (!cp) 1818 return rp->status; 1819 1820 le_set_scan_enable_complete(hdev, cp->enable); 1821 1822 return rp->status; 1823 } 1824 1825 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, 1826 struct sk_buff *skb) 1827 { 1828 struct hci_rp_le_read_num_supported_adv_sets *rp = data; 1829 1830 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, 1831 rp->num_of_sets); 1832 1833 if (rp->status) 1834 return rp->status; 1835 1836 hdev->le_num_of_adv_sets = rp->num_of_sets; 1837 1838 return rp->status; 1839 } 1840 1841 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, 1842 struct sk_buff *skb) 1843 { 1844 struct hci_rp_le_read_accept_list_size *rp = data; 1845 1846 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 1847 1848 if (rp->status) 1849 return rp->status; 1850 1851 hdev->le_accept_list_size = rp->size; 1852 1853 return rp->status; 1854 } 1855 1856 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, 1857 struct sk_buff *skb) 1858 { 1859 struct hci_ev_status *rp = data; 1860 1861 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1862 1863 if (rp->status) 1864 return rp->status; 1865 1866 hci_dev_lock(hdev); 1867 hci_bdaddr_list_clear(&hdev->le_accept_list); 1868 hci_dev_unlock(hdev); 1869 1870 return rp->status; 1871 } 1872 1873 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, 1874 struct sk_buff *skb) 1875 { 1876 struct hci_cp_le_add_to_accept_list *sent; 1877 struct hci_ev_status *rp = data; 1878 1879 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1880 1881 if (rp->status) 1882 return rp->status; 1883 1884 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1885 if (!sent) 1886 return rp->status; 1887 1888 hci_dev_lock(hdev); 1889 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1890 sent->bdaddr_type); 1891 hci_dev_unlock(hdev); 1892 1893 return rp->status; 1894 } 1895 1896 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, 1897 struct sk_buff *skb) 1898 { 1899 struct hci_cp_le_del_from_accept_list *sent; 1900 struct hci_ev_status *rp = data; 1901 1902 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1903 1904 if (rp->status) 1905 return rp->status; 1906 1907 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1908 if (!sent) 1909 return rp->status; 1910 1911 hci_dev_lock(hdev); 1912 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1913 sent->bdaddr_type); 1914 hci_dev_unlock(hdev); 1915 1916 return rp->status; 1917 } 1918 1919 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, 1920 struct sk_buff *skb) 1921 { 1922 struct hci_rp_le_read_supported_states *rp = data; 1923 1924 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1925 1926 if (rp->status) 1927 return rp->status; 1928 1929 memcpy(hdev->le_states, rp->le_states, 8); 1930 1931 return rp->status; 1932 } 1933 1934 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, 1935 struct sk_buff *skb) 1936 { 1937 struct hci_rp_le_read_def_data_len *rp = data; 1938 1939 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1940 1941 if (rp->status) 1942 return rp->status; 1943 1944 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1945 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1946 1947 return rp->status; 1948 } 1949 1950 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, 1951 struct sk_buff *skb) 1952 { 1953 struct hci_cp_le_write_def_data_len *sent; 1954 struct hci_ev_status *rp = data; 1955 1956 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1957 1958 if (rp->status) 1959 return rp->status; 1960 1961 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1962 if (!sent) 1963 return rp->status; 1964 1965 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1966 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1967 1968 return rp->status; 1969 } 1970 1971 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, 1972 struct sk_buff *skb) 1973 { 1974 struct hci_cp_le_add_to_resolv_list *sent; 1975 struct hci_ev_status *rp = data; 1976 1977 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1978 1979 if (rp->status) 1980 return rp->status; 1981 1982 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1983 if (!sent) 1984 return rp->status; 1985 1986 hci_dev_lock(hdev); 1987 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1988 sent->bdaddr_type, sent->peer_irk, 1989 sent->local_irk); 1990 hci_dev_unlock(hdev); 1991 1992 return rp->status; 1993 } 1994 1995 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, 1996 struct sk_buff *skb) 1997 { 1998 struct hci_cp_le_del_from_resolv_list *sent; 1999 struct hci_ev_status *rp = data; 2000 2001 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2002 2003 if (rp->status) 2004 return rp->status; 2005 2006 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 2007 if (!sent) 2008 return rp->status; 2009 2010 hci_dev_lock(hdev); 2011 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 2012 sent->bdaddr_type); 2013 hci_dev_unlock(hdev); 2014 2015 return rp->status; 2016 } 2017 2018 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, 2019 struct sk_buff *skb) 2020 { 2021 struct hci_ev_status *rp = data; 2022 2023 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2024 2025 if (rp->status) 2026 return rp->status; 2027 2028 hci_dev_lock(hdev); 2029 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2030 hci_dev_unlock(hdev); 2031 2032 return rp->status; 2033 } 2034 2035 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, 2036 struct sk_buff *skb) 2037 { 2038 struct hci_rp_le_read_resolv_list_size *rp = data; 2039 2040 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 2041 2042 if (rp->status) 2043 return rp->status; 2044 2045 hdev->le_resolv_list_size = rp->size; 2046 2047 return rp->status; 2048 } 2049 2050 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, 2051 struct sk_buff *skb) 2052 { 2053 struct hci_ev_status *rp = data; 2054 __u8 *sent; 2055 2056 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2057 2058 if (rp->status) 2059 return rp->status; 2060 2061 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 2062 if (!sent) 2063 return rp->status; 2064 2065 hci_dev_lock(hdev); 2066 2067 if (*sent) 2068 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 2069 else 2070 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 2071 2072 hci_dev_unlock(hdev); 2073 2074 return rp->status; 2075 } 2076 2077 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, 2078 struct sk_buff *skb) 2079 { 2080 struct hci_rp_le_read_max_data_len *rp = data; 2081 2082 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2083 2084 if (rp->status) 2085 return rp->status; 2086 2087 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 2088 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 2089 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 2090 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 2091 2092 return rp->status; 2093 } 2094 2095 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, 2096 struct sk_buff *skb) 2097 { 2098 struct hci_cp_write_le_host_supported *sent; 2099 struct hci_ev_status *rp = data; 2100 2101 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2102 2103 if (rp->status) 2104 return rp->status; 2105 2106 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 2107 if (!sent) 2108 return rp->status; 2109 2110 hci_dev_lock(hdev); 2111 2112 if (sent->le) { 2113 hdev->features[1][0] |= LMP_HOST_LE; 2114 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 2115 } else { 2116 hdev->features[1][0] &= ~LMP_HOST_LE; 2117 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 2118 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 2119 } 2120 2121 if (sent->simul) 2122 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 2123 else 2124 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 2125 2126 hci_dev_unlock(hdev); 2127 2128 return rp->status; 2129 } 2130 2131 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, 2132 struct sk_buff *skb) 2133 { 2134 struct hci_cp_le_set_adv_param *cp; 2135 struct hci_ev_status *rp = data; 2136 2137 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2138 2139 if (rp->status) 2140 return rp->status; 2141 2142 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 2143 if (!cp) 2144 return rp->status; 2145 2146 hci_dev_lock(hdev); 2147 hdev->adv_addr_type = cp->own_address_type; 2148 hci_dev_unlock(hdev); 2149 2150 return rp->status; 2151 } 2152 2153 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, 2154 struct sk_buff *skb) 2155 { 2156 struct hci_rp_read_rssi *rp = data; 2157 struct hci_conn *conn; 2158 2159 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2160 2161 if (rp->status) 2162 return rp->status; 2163 2164 hci_dev_lock(hdev); 2165 2166 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2167 if (conn) 2168 conn->rssi = rp->rssi; 2169 2170 hci_dev_unlock(hdev); 2171 2172 return rp->status; 2173 } 2174 2175 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, 2176 struct sk_buff *skb) 2177 { 2178 struct hci_cp_read_tx_power *sent; 2179 struct hci_rp_read_tx_power *rp = data; 2180 struct hci_conn *conn; 2181 2182 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2183 2184 if (rp->status) 2185 return rp->status; 2186 2187 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 2188 if (!sent) 2189 return rp->status; 2190 2191 hci_dev_lock(hdev); 2192 2193 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2194 if (!conn) 2195 goto unlock; 2196 2197 switch (sent->type) { 2198 case 0x00: 2199 conn->tx_power = rp->tx_power; 2200 break; 2201 case 0x01: 2202 conn->max_tx_power = rp->tx_power; 2203 break; 2204 } 2205 2206 unlock: 2207 hci_dev_unlock(hdev); 2208 return rp->status; 2209 } 2210 2211 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, 2212 struct sk_buff *skb) 2213 { 2214 struct hci_ev_status *rp = data; 2215 u8 *mode; 2216 2217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2218 2219 if (rp->status) 2220 return rp->status; 2221 2222 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 2223 if (mode) 2224 hdev->ssp_debug_mode = *mode; 2225 2226 return rp->status; 2227 } 2228 2229 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 2230 { 2231 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2232 2233 if (status) 2234 return; 2235 2236 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY)) 2237 set_bit(HCI_INQUIRY, &hdev->flags); 2238 } 2239 2240 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 2241 { 2242 struct hci_cp_create_conn *cp; 2243 struct hci_conn *conn; 2244 2245 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2246 2247 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 2248 if (!cp) 2249 return; 2250 2251 hci_dev_lock(hdev); 2252 2253 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2254 2255 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); 2256 2257 if (status) { 2258 if (conn && conn->state == BT_CONNECT) { 2259 conn->state = BT_CLOSED; 2260 hci_connect_cfm(conn, status); 2261 hci_conn_del(conn); 2262 } 2263 } else { 2264 if (!conn) { 2265 conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr, 2266 HCI_ROLE_MASTER); 2267 if (IS_ERR(conn)) 2268 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); 2269 } 2270 } 2271 2272 hci_dev_unlock(hdev); 2273 } 2274 2275 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 2276 { 2277 struct hci_cp_add_sco *cp; 2278 struct hci_conn *acl; 2279 struct hci_link *link; 2280 __u16 handle; 2281 2282 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2283 2284 if (!status) 2285 return; 2286 2287 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2288 if (!cp) 2289 return; 2290 2291 handle = __le16_to_cpu(cp->handle); 2292 2293 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2294 2295 hci_dev_lock(hdev); 2296 2297 acl = hci_conn_hash_lookup_handle(hdev, handle); 2298 if (acl) { 2299 link = list_first_entry_or_null(&acl->link_list, 2300 struct hci_link, list); 2301 if (link && link->conn) { 2302 link->conn->state = BT_CLOSED; 2303 2304 hci_connect_cfm(link->conn, status); 2305 hci_conn_del(link->conn); 2306 } 2307 } 2308 2309 hci_dev_unlock(hdev); 2310 } 2311 2312 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2313 { 2314 struct hci_cp_auth_requested *cp; 2315 struct hci_conn *conn; 2316 2317 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2318 2319 if (!status) 2320 return; 2321 2322 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2323 if (!cp) 2324 return; 2325 2326 hci_dev_lock(hdev); 2327 2328 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2329 if (conn) { 2330 if (conn->state == BT_CONFIG) { 2331 hci_connect_cfm(conn, status); 2332 hci_conn_drop(conn); 2333 } 2334 } 2335 2336 hci_dev_unlock(hdev); 2337 } 2338 2339 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2340 { 2341 struct hci_cp_set_conn_encrypt *cp; 2342 struct hci_conn *conn; 2343 2344 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2345 2346 if (!status) 2347 return; 2348 2349 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2350 if (!cp) 2351 return; 2352 2353 hci_dev_lock(hdev); 2354 2355 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2356 if (conn) { 2357 if (conn->state == BT_CONFIG) { 2358 hci_connect_cfm(conn, status); 2359 hci_conn_drop(conn); 2360 } 2361 } 2362 2363 hci_dev_unlock(hdev); 2364 } 2365 2366 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2367 struct hci_conn *conn) 2368 { 2369 if (conn->state != BT_CONFIG || !conn->out) 2370 return 0; 2371 2372 if (conn->pending_sec_level == BT_SECURITY_SDP) 2373 return 0; 2374 2375 /* Only request authentication for SSP connections or non-SSP 2376 * devices with sec_level MEDIUM or HIGH or if MITM protection 2377 * is requested. 2378 */ 2379 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2380 conn->pending_sec_level != BT_SECURITY_FIPS && 2381 conn->pending_sec_level != BT_SECURITY_HIGH && 2382 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2383 return 0; 2384 2385 return 1; 2386 } 2387 2388 static int hci_resolve_name(struct hci_dev *hdev, 2389 struct inquiry_entry *e) 2390 { 2391 struct hci_cp_remote_name_req cp; 2392 2393 memset(&cp, 0, sizeof(cp)); 2394 2395 bacpy(&cp.bdaddr, &e->data.bdaddr); 2396 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2397 cp.pscan_mode = e->data.pscan_mode; 2398 cp.clock_offset = e->data.clock_offset; 2399 2400 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2401 } 2402 2403 static bool hci_resolve_next_name(struct hci_dev *hdev) 2404 { 2405 struct discovery_state *discov = &hdev->discovery; 2406 struct inquiry_entry *e; 2407 2408 if (list_empty(&discov->resolve)) 2409 return false; 2410 2411 /* We should stop if we already spent too much time resolving names. */ 2412 if (time_after(jiffies, discov->name_resolve_timeout)) { 2413 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2414 return false; 2415 } 2416 2417 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2418 if (!e) 2419 return false; 2420 2421 if (hci_resolve_name(hdev, e) == 0) { 2422 e->name_state = NAME_PENDING; 2423 return true; 2424 } 2425 2426 return false; 2427 } 2428 2429 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2430 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2431 { 2432 struct discovery_state *discov = &hdev->discovery; 2433 struct inquiry_entry *e; 2434 2435 /* Update the mgmt connected state if necessary. Be careful with 2436 * conn objects that exist but are not (yet) connected however. 2437 * Only those in BT_CONFIG or BT_CONNECTED states can be 2438 * considered connected. 2439 */ 2440 if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) 2441 mgmt_device_connected(hdev, conn, name, name_len); 2442 2443 if (discov->state == DISCOVERY_STOPPED) 2444 return; 2445 2446 if (discov->state == DISCOVERY_STOPPING) 2447 goto discov_complete; 2448 2449 if (discov->state != DISCOVERY_RESOLVING) 2450 return; 2451 2452 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2453 /* If the device was not found in a list of found devices names of which 2454 * are pending. there is no need to continue resolving a next name as it 2455 * will be done upon receiving another Remote Name Request Complete 2456 * Event */ 2457 if (!e) 2458 return; 2459 2460 list_del(&e->list); 2461 2462 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2463 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2464 name, name_len); 2465 2466 if (hci_resolve_next_name(hdev)) 2467 return; 2468 2469 discov_complete: 2470 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2471 } 2472 2473 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2474 { 2475 struct hci_cp_remote_name_req *cp; 2476 struct hci_conn *conn; 2477 2478 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2479 2480 /* If successful wait for the name req complete event before 2481 * checking for the need to do authentication */ 2482 if (!status) 2483 return; 2484 2485 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2486 if (!cp) 2487 return; 2488 2489 hci_dev_lock(hdev); 2490 2491 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2492 2493 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2494 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2495 2496 if (!conn) 2497 goto unlock; 2498 2499 if (!hci_outgoing_auth_needed(hdev, conn)) 2500 goto unlock; 2501 2502 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2503 struct hci_cp_auth_requested auth_cp; 2504 2505 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2506 2507 auth_cp.handle = __cpu_to_le16(conn->handle); 2508 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2509 sizeof(auth_cp), &auth_cp); 2510 } 2511 2512 unlock: 2513 hci_dev_unlock(hdev); 2514 } 2515 2516 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2517 { 2518 struct hci_cp_read_remote_features *cp; 2519 struct hci_conn *conn; 2520 2521 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2522 2523 if (!status) 2524 return; 2525 2526 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2527 if (!cp) 2528 return; 2529 2530 hci_dev_lock(hdev); 2531 2532 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2533 if (conn) { 2534 if (conn->state == BT_CONFIG) { 2535 hci_connect_cfm(conn, status); 2536 hci_conn_drop(conn); 2537 } 2538 } 2539 2540 hci_dev_unlock(hdev); 2541 } 2542 2543 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2544 { 2545 struct hci_cp_read_remote_ext_features *cp; 2546 struct hci_conn *conn; 2547 2548 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2549 2550 if (!status) 2551 return; 2552 2553 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2554 if (!cp) 2555 return; 2556 2557 hci_dev_lock(hdev); 2558 2559 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2560 if (conn) { 2561 if (conn->state == BT_CONFIG) { 2562 hci_connect_cfm(conn, status); 2563 hci_conn_drop(conn); 2564 } 2565 } 2566 2567 hci_dev_unlock(hdev); 2568 } 2569 2570 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle, 2571 __u8 status) 2572 { 2573 struct hci_conn *acl; 2574 struct hci_link *link; 2575 2576 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status); 2577 2578 hci_dev_lock(hdev); 2579 2580 acl = hci_conn_hash_lookup_handle(hdev, handle); 2581 if (acl) { 2582 link = list_first_entry_or_null(&acl->link_list, 2583 struct hci_link, list); 2584 if (link && link->conn) { 2585 link->conn->state = BT_CLOSED; 2586 2587 hci_connect_cfm(link->conn, status); 2588 hci_conn_del(link->conn); 2589 } 2590 } 2591 2592 hci_dev_unlock(hdev); 2593 } 2594 2595 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2596 { 2597 struct hci_cp_setup_sync_conn *cp; 2598 2599 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2600 2601 if (!status) 2602 return; 2603 2604 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2605 if (!cp) 2606 return; 2607 2608 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); 2609 } 2610 2611 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2612 { 2613 struct hci_cp_enhanced_setup_sync_conn *cp; 2614 2615 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2616 2617 if (!status) 2618 return; 2619 2620 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2621 if (!cp) 2622 return; 2623 2624 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); 2625 } 2626 2627 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2628 { 2629 struct hci_cp_sniff_mode *cp; 2630 struct hci_conn *conn; 2631 2632 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2633 2634 if (!status) 2635 return; 2636 2637 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2638 if (!cp) 2639 return; 2640 2641 hci_dev_lock(hdev); 2642 2643 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2644 if (conn) { 2645 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2646 2647 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2648 hci_sco_setup(conn, status); 2649 } 2650 2651 hci_dev_unlock(hdev); 2652 } 2653 2654 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2655 { 2656 struct hci_cp_exit_sniff_mode *cp; 2657 struct hci_conn *conn; 2658 2659 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2660 2661 if (!status) 2662 return; 2663 2664 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2665 if (!cp) 2666 return; 2667 2668 hci_dev_lock(hdev); 2669 2670 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2671 if (conn) { 2672 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2673 2674 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2675 hci_sco_setup(conn, status); 2676 } 2677 2678 hci_dev_unlock(hdev); 2679 } 2680 2681 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2682 { 2683 struct hci_cp_disconnect *cp; 2684 struct hci_conn_params *params; 2685 struct hci_conn *conn; 2686 bool mgmt_conn; 2687 2688 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2689 2690 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2691 * otherwise cleanup the connection immediately. 2692 */ 2693 if (!status && !hdev->suspended) 2694 return; 2695 2696 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2697 if (!cp) 2698 return; 2699 2700 hci_dev_lock(hdev); 2701 2702 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2703 if (!conn) 2704 goto unlock; 2705 2706 if (status) { 2707 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2708 conn->dst_type, status); 2709 2710 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2711 hdev->cur_adv_instance = conn->adv_instance; 2712 hci_enable_advertising(hdev); 2713 } 2714 2715 /* Inform sockets conn is gone before we delete it */ 2716 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED); 2717 2718 goto done; 2719 } 2720 2721 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2722 2723 if (conn->type == ACL_LINK) { 2724 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2725 hci_remove_link_key(hdev, &conn->dst); 2726 } 2727 2728 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2729 if (params) { 2730 switch (params->auto_connect) { 2731 case HCI_AUTO_CONN_LINK_LOSS: 2732 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2733 break; 2734 fallthrough; 2735 2736 case HCI_AUTO_CONN_DIRECT: 2737 case HCI_AUTO_CONN_ALWAYS: 2738 hci_pend_le_list_del_init(params); 2739 hci_pend_le_list_add(params, &hdev->pend_le_conns); 2740 break; 2741 2742 default: 2743 break; 2744 } 2745 } 2746 2747 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2748 cp->reason, mgmt_conn); 2749 2750 hci_disconn_cfm(conn, cp->reason); 2751 2752 done: 2753 /* If the disconnection failed for any reason, the upper layer 2754 * does not retry to disconnect in current implementation. 2755 * Hence, we need to do some basic cleanup here and re-enable 2756 * advertising if necessary. 2757 */ 2758 hci_conn_del(conn); 2759 unlock: 2760 hci_dev_unlock(hdev); 2761 } 2762 2763 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2764 { 2765 /* When using controller based address resolution, then the new 2766 * address types 0x02 and 0x03 are used. These types need to be 2767 * converted back into either public address or random address type 2768 */ 2769 switch (type) { 2770 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2771 if (resolved) 2772 *resolved = true; 2773 return ADDR_LE_DEV_PUBLIC; 2774 case ADDR_LE_DEV_RANDOM_RESOLVED: 2775 if (resolved) 2776 *resolved = true; 2777 return ADDR_LE_DEV_RANDOM; 2778 } 2779 2780 if (resolved) 2781 *resolved = false; 2782 return type; 2783 } 2784 2785 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2786 u8 peer_addr_type, u8 own_address_type, 2787 u8 filter_policy) 2788 { 2789 struct hci_conn *conn; 2790 2791 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2792 peer_addr_type); 2793 if (!conn) 2794 return; 2795 2796 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2797 2798 /* Store the initiator and responder address information which 2799 * is needed for SMP. These values will not change during the 2800 * lifetime of the connection. 2801 */ 2802 conn->init_addr_type = own_address_type; 2803 if (own_address_type == ADDR_LE_DEV_RANDOM) 2804 bacpy(&conn->init_addr, &hdev->random_addr); 2805 else 2806 bacpy(&conn->init_addr, &hdev->bdaddr); 2807 2808 conn->resp_addr_type = peer_addr_type; 2809 bacpy(&conn->resp_addr, peer_addr); 2810 } 2811 2812 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2813 { 2814 struct hci_cp_le_create_conn *cp; 2815 2816 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2817 2818 /* All connection failure handling is taken care of by the 2819 * hci_conn_failed function which is triggered by the HCI 2820 * request completion callbacks used for connecting. 2821 */ 2822 if (status) 2823 return; 2824 2825 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2826 if (!cp) 2827 return; 2828 2829 hci_dev_lock(hdev); 2830 2831 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2832 cp->own_address_type, cp->filter_policy); 2833 2834 hci_dev_unlock(hdev); 2835 } 2836 2837 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2838 { 2839 struct hci_cp_le_ext_create_conn *cp; 2840 2841 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2842 2843 /* All connection failure handling is taken care of by the 2844 * hci_conn_failed function which is triggered by the HCI 2845 * request completion callbacks used for connecting. 2846 */ 2847 if (status) 2848 return; 2849 2850 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2851 if (!cp) 2852 return; 2853 2854 hci_dev_lock(hdev); 2855 2856 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2857 cp->own_addr_type, cp->filter_policy); 2858 2859 hci_dev_unlock(hdev); 2860 } 2861 2862 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2863 { 2864 struct hci_cp_le_read_remote_features *cp; 2865 struct hci_conn *conn; 2866 2867 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2868 2869 if (!status) 2870 return; 2871 2872 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2873 if (!cp) 2874 return; 2875 2876 hci_dev_lock(hdev); 2877 2878 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2879 if (conn) { 2880 if (conn->state == BT_CONFIG) { 2881 hci_connect_cfm(conn, status); 2882 hci_conn_drop(conn); 2883 } 2884 } 2885 2886 hci_dev_unlock(hdev); 2887 } 2888 2889 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2890 { 2891 struct hci_cp_le_start_enc *cp; 2892 struct hci_conn *conn; 2893 2894 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2895 2896 if (!status) 2897 return; 2898 2899 hci_dev_lock(hdev); 2900 2901 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2902 if (!cp) 2903 goto unlock; 2904 2905 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2906 if (!conn) 2907 goto unlock; 2908 2909 if (conn->state != BT_CONNECTED) 2910 goto unlock; 2911 2912 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2913 hci_conn_drop(conn); 2914 2915 unlock: 2916 hci_dev_unlock(hdev); 2917 } 2918 2919 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2920 { 2921 struct hci_cp_switch_role *cp; 2922 struct hci_conn *conn; 2923 2924 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2925 2926 if (!status) 2927 return; 2928 2929 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2930 if (!cp) 2931 return; 2932 2933 hci_dev_lock(hdev); 2934 2935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2936 if (conn) 2937 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2938 2939 hci_dev_unlock(hdev); 2940 } 2941 2942 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, 2943 struct sk_buff *skb) 2944 { 2945 struct hci_ev_status *ev = data; 2946 struct discovery_state *discov = &hdev->discovery; 2947 struct inquiry_entry *e; 2948 2949 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 2950 2951 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2952 return; 2953 2954 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2955 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2956 2957 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2958 return; 2959 2960 hci_dev_lock(hdev); 2961 2962 if (discov->state != DISCOVERY_FINDING) 2963 goto unlock; 2964 2965 if (list_empty(&discov->resolve)) { 2966 /* When BR/EDR inquiry is active and no LE scanning is in 2967 * progress, then change discovery state to indicate completion. 2968 * 2969 * When running LE scanning and BR/EDR inquiry simultaneously 2970 * and the LE scan already finished, then change the discovery 2971 * state to indicate completion. 2972 */ 2973 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2974 !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) 2975 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2976 goto unlock; 2977 } 2978 2979 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2980 if (e && hci_resolve_name(hdev, e) == 0) { 2981 e->name_state = NAME_PENDING; 2982 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 2983 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 2984 } else { 2985 /* When BR/EDR inquiry is active and no LE scanning is in 2986 * progress, then change discovery state to indicate completion. 2987 * 2988 * When running LE scanning and BR/EDR inquiry simultaneously 2989 * and the LE scan already finished, then change the discovery 2990 * state to indicate completion. 2991 */ 2992 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2993 !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) 2994 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2995 } 2996 2997 unlock: 2998 hci_dev_unlock(hdev); 2999 } 3000 3001 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, 3002 struct sk_buff *skb) 3003 { 3004 struct hci_ev_inquiry_result *ev = edata; 3005 struct inquiry_data data; 3006 int i; 3007 3008 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, 3009 flex_array_size(ev, info, ev->num))) 3010 return; 3011 3012 bt_dev_dbg(hdev, "num %d", ev->num); 3013 3014 if (!ev->num) 3015 return; 3016 3017 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3018 return; 3019 3020 hci_dev_lock(hdev); 3021 3022 for (i = 0; i < ev->num; i++) { 3023 struct inquiry_info *info = &ev->info[i]; 3024 u32 flags; 3025 3026 bacpy(&data.bdaddr, &info->bdaddr); 3027 data.pscan_rep_mode = info->pscan_rep_mode; 3028 data.pscan_period_mode = info->pscan_period_mode; 3029 data.pscan_mode = info->pscan_mode; 3030 memcpy(data.dev_class, info->dev_class, 3); 3031 data.clock_offset = info->clock_offset; 3032 data.rssi = HCI_RSSI_INVALID; 3033 data.ssp_mode = 0x00; 3034 3035 flags = hci_inquiry_cache_update(hdev, &data, false); 3036 3037 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3038 info->dev_class, HCI_RSSI_INVALID, 3039 flags, NULL, 0, NULL, 0, 0); 3040 } 3041 3042 hci_dev_unlock(hdev); 3043 } 3044 3045 static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn) 3046 { 3047 struct hci_cp_read_enc_key_size cp; 3048 u8 *key_enc_size = hci_conn_key_enc_size(conn); 3049 3050 if (!read_key_size_capable(hdev)) { 3051 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3052 return -EOPNOTSUPP; 3053 } 3054 3055 bt_dev_dbg(hdev, "hcon %p", conn); 3056 3057 memset(&cp, 0, sizeof(cp)); 3058 cp.handle = cpu_to_le16(conn->handle); 3059 3060 /* If the key enc_size is already known, use it as conn->enc_key_size, 3061 * otherwise use hdev->min_enc_key_size so the likes of 3062 * l2cap_check_enc_key_size don't fail while waiting for 3063 * HCI_OP_READ_ENC_KEY_SIZE response. 3064 */ 3065 if (key_enc_size && *key_enc_size) 3066 conn->enc_key_size = *key_enc_size; 3067 else 3068 conn->enc_key_size = hdev->min_enc_key_size; 3069 3070 return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3071 } 3072 3073 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, 3074 struct sk_buff *skb) 3075 { 3076 struct hci_ev_conn_complete *ev = data; 3077 struct hci_conn *conn; 3078 u8 status = ev->status; 3079 3080 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3081 3082 hci_dev_lock(hdev); 3083 3084 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3085 if (!conn) { 3086 /* In case of error status and there is no connection pending 3087 * just unlock as there is nothing to cleanup. 3088 */ 3089 if (ev->status) 3090 goto unlock; 3091 3092 /* Connection may not exist if auto-connected. Check the bredr 3093 * allowlist to see if this device is allowed to auto connect. 3094 * If link is an ACL type, create a connection class 3095 * automatically. 3096 * 3097 * Auto-connect will only occur if the event filter is 3098 * programmed with a given address. Right now, event filter is 3099 * only used during suspend. 3100 */ 3101 if (ev->link_type == ACL_LINK && 3102 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 3103 &ev->bdaddr, 3104 BDADDR_BREDR)) { 3105 conn = hci_conn_add_unset(hdev, ev->link_type, 3106 &ev->bdaddr, HCI_ROLE_SLAVE); 3107 if (IS_ERR(conn)) { 3108 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); 3109 goto unlock; 3110 } 3111 } else { 3112 if (ev->link_type != SCO_LINK) 3113 goto unlock; 3114 3115 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 3116 &ev->bdaddr); 3117 if (!conn) 3118 goto unlock; 3119 3120 conn->type = SCO_LINK; 3121 } 3122 } 3123 3124 /* The HCI_Connection_Complete event is only sent once per connection. 3125 * Processing it more than once per connection can corrupt kernel memory. 3126 * 3127 * As the connection handle is set here for the first time, it indicates 3128 * whether the connection is already set up. 3129 */ 3130 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { 3131 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 3132 goto unlock; 3133 } 3134 3135 if (!status) { 3136 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); 3137 if (status) 3138 goto done; 3139 3140 if (conn->type == ACL_LINK) { 3141 conn->state = BT_CONFIG; 3142 hci_conn_hold(conn); 3143 3144 if (!conn->out && !hci_conn_ssp_enabled(conn) && 3145 !hci_find_link_key(hdev, &ev->bdaddr)) 3146 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3147 else 3148 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3149 } else 3150 conn->state = BT_CONNECTED; 3151 3152 hci_debugfs_create_conn(conn); 3153 hci_conn_add_sysfs(conn); 3154 3155 if (test_bit(HCI_AUTH, &hdev->flags)) 3156 set_bit(HCI_CONN_AUTH, &conn->flags); 3157 3158 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 3159 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3160 3161 /* "Link key request" completed ahead of "connect request" completes */ 3162 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3163 ev->link_type == ACL_LINK) { 3164 struct link_key *key; 3165 3166 key = hci_find_link_key(hdev, &ev->bdaddr); 3167 if (key) { 3168 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3169 hci_read_enc_key_size(hdev, conn); 3170 hci_encrypt_cfm(conn, ev->status); 3171 } 3172 } 3173 3174 /* Get remote features */ 3175 if (conn->type == ACL_LINK) { 3176 struct hci_cp_read_remote_features cp; 3177 cp.handle = ev->handle; 3178 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 3179 sizeof(cp), &cp); 3180 3181 hci_update_scan(hdev); 3182 } 3183 3184 /* Set packet type for incoming connection */ 3185 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 3186 struct hci_cp_change_conn_ptype cp; 3187 cp.handle = ev->handle; 3188 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3189 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 3190 &cp); 3191 } 3192 } 3193 3194 if (conn->type == ACL_LINK) 3195 hci_sco_setup(conn, ev->status); 3196 3197 done: 3198 if (status) { 3199 hci_conn_failed(conn, status); 3200 } else if (ev->link_type == SCO_LINK) { 3201 switch (conn->setting & SCO_AIRMODE_MASK) { 3202 case SCO_AIRMODE_CVSD: 3203 if (hdev->notify) 3204 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 3205 break; 3206 } 3207 3208 hci_connect_cfm(conn, status); 3209 } 3210 3211 unlock: 3212 hci_dev_unlock(hdev); 3213 } 3214 3215 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 3216 { 3217 struct hci_cp_reject_conn_req cp; 3218 3219 bacpy(&cp.bdaddr, bdaddr); 3220 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 3221 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 3222 } 3223 3224 static void hci_conn_request_evt(struct hci_dev *hdev, void *data, 3225 struct sk_buff *skb) 3226 { 3227 struct hci_ev_conn_request *ev = data; 3228 int mask = hdev->link_mode; 3229 struct inquiry_entry *ie; 3230 struct hci_conn *conn; 3231 __u8 flags = 0; 3232 3233 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); 3234 3235 /* Reject incoming connection from device with same BD ADDR against 3236 * CVE-2020-26555 3237 */ 3238 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) { 3239 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", 3240 &ev->bdaddr); 3241 hci_reject_conn(hdev, &ev->bdaddr); 3242 return; 3243 } 3244 3245 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 3246 &flags); 3247 3248 if (!(mask & HCI_LM_ACCEPT)) { 3249 hci_reject_conn(hdev, &ev->bdaddr); 3250 return; 3251 } 3252 3253 hci_dev_lock(hdev); 3254 3255 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 3256 BDADDR_BREDR)) { 3257 hci_reject_conn(hdev, &ev->bdaddr); 3258 goto unlock; 3259 } 3260 3261 /* Require HCI_CONNECTABLE or an accept list entry to accept the 3262 * connection. These features are only touched through mgmt so 3263 * only do the checks if HCI_MGMT is set. 3264 */ 3265 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3266 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 3267 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 3268 BDADDR_BREDR)) { 3269 hci_reject_conn(hdev, &ev->bdaddr); 3270 goto unlock; 3271 } 3272 3273 /* Connection accepted */ 3274 3275 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3276 if (ie) 3277 memcpy(ie->data.dev_class, ev->dev_class, 3); 3278 3279 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 3280 &ev->bdaddr); 3281 if (!conn) { 3282 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, 3283 HCI_ROLE_SLAVE); 3284 if (IS_ERR(conn)) { 3285 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); 3286 goto unlock; 3287 } 3288 } 3289 3290 memcpy(conn->dev_class, ev->dev_class, 3); 3291 3292 hci_dev_unlock(hdev); 3293 3294 if (ev->link_type == ACL_LINK || 3295 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 3296 struct hci_cp_accept_conn_req cp; 3297 conn->state = BT_CONNECT; 3298 3299 bacpy(&cp.bdaddr, &ev->bdaddr); 3300 3301 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 3302 cp.role = 0x00; /* Become central */ 3303 else 3304 cp.role = 0x01; /* Remain peripheral */ 3305 3306 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 3307 } else if (!(flags & HCI_PROTO_DEFER)) { 3308 struct hci_cp_accept_sync_conn_req cp; 3309 conn->state = BT_CONNECT; 3310 3311 bacpy(&cp.bdaddr, &ev->bdaddr); 3312 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3313 3314 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 3315 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 3316 cp.max_latency = cpu_to_le16(0xffff); 3317 cp.content_format = cpu_to_le16(hdev->voice_setting); 3318 cp.retrans_effort = 0xff; 3319 3320 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 3321 &cp); 3322 } else { 3323 conn->state = BT_CONNECT2; 3324 hci_connect_cfm(conn, 0); 3325 } 3326 3327 return; 3328 unlock: 3329 hci_dev_unlock(hdev); 3330 } 3331 3332 static u8 hci_to_mgmt_reason(u8 err) 3333 { 3334 switch (err) { 3335 case HCI_ERROR_CONNECTION_TIMEOUT: 3336 return MGMT_DEV_DISCONN_TIMEOUT; 3337 case HCI_ERROR_REMOTE_USER_TERM: 3338 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3339 case HCI_ERROR_REMOTE_POWER_OFF: 3340 return MGMT_DEV_DISCONN_REMOTE; 3341 case HCI_ERROR_LOCAL_HOST_TERM: 3342 return MGMT_DEV_DISCONN_LOCAL_HOST; 3343 default: 3344 return MGMT_DEV_DISCONN_UNKNOWN; 3345 } 3346 } 3347 3348 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, 3349 struct sk_buff *skb) 3350 { 3351 struct hci_ev_disconn_complete *ev = data; 3352 u8 reason; 3353 struct hci_conn_params *params; 3354 struct hci_conn *conn; 3355 bool mgmt_connected; 3356 3357 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3358 3359 hci_dev_lock(hdev); 3360 3361 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3362 if (!conn) 3363 goto unlock; 3364 3365 if (ev->status) { 3366 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3367 conn->dst_type, ev->status); 3368 goto unlock; 3369 } 3370 3371 conn->state = BT_CLOSED; 3372 3373 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3374 3375 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3376 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3377 else 3378 reason = hci_to_mgmt_reason(ev->reason); 3379 3380 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3381 reason, mgmt_connected); 3382 3383 if (conn->type == ACL_LINK) { 3384 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3385 hci_remove_link_key(hdev, &conn->dst); 3386 3387 hci_update_scan(hdev); 3388 } 3389 3390 /* Re-enable passive scanning if disconnected device is marked 3391 * as auto-connectable. 3392 */ 3393 if (conn->type == LE_LINK) { 3394 params = hci_conn_params_lookup(hdev, &conn->dst, 3395 conn->dst_type); 3396 if (params) { 3397 switch (params->auto_connect) { 3398 case HCI_AUTO_CONN_LINK_LOSS: 3399 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3400 break; 3401 fallthrough; 3402 3403 case HCI_AUTO_CONN_DIRECT: 3404 case HCI_AUTO_CONN_ALWAYS: 3405 hci_pend_le_list_del_init(params); 3406 hci_pend_le_list_add(params, 3407 &hdev->pend_le_conns); 3408 hci_update_passive_scan(hdev); 3409 break; 3410 3411 default: 3412 break; 3413 } 3414 } 3415 } 3416 3417 hci_disconn_cfm(conn, ev->reason); 3418 3419 /* Re-enable advertising if necessary, since it might 3420 * have been disabled by the connection. From the 3421 * HCI_LE_Set_Advertise_Enable command description in 3422 * the core specification (v4.0): 3423 * "The Controller shall continue advertising until the Host 3424 * issues an LE_Set_Advertise_Enable command with 3425 * Advertising_Enable set to 0x00 (Advertising is disabled) 3426 * or until a connection is created or until the Advertising 3427 * is timed out due to Directed Advertising." 3428 */ 3429 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3430 hdev->cur_adv_instance = conn->adv_instance; 3431 hci_enable_advertising(hdev); 3432 } 3433 3434 hci_conn_del(conn); 3435 3436 unlock: 3437 hci_dev_unlock(hdev); 3438 } 3439 3440 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, 3441 struct sk_buff *skb) 3442 { 3443 struct hci_ev_auth_complete *ev = data; 3444 struct hci_conn *conn; 3445 3446 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3447 3448 hci_dev_lock(hdev); 3449 3450 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3451 if (!conn) 3452 goto unlock; 3453 3454 if (!ev->status) { 3455 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3456 set_bit(HCI_CONN_AUTH, &conn->flags); 3457 conn->sec_level = conn->pending_sec_level; 3458 } else { 3459 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3460 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3461 3462 mgmt_auth_failed(conn, ev->status); 3463 } 3464 3465 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3466 3467 if (conn->state == BT_CONFIG) { 3468 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3469 struct hci_cp_set_conn_encrypt cp; 3470 cp.handle = ev->handle; 3471 cp.encrypt = 0x01; 3472 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3473 &cp); 3474 } else { 3475 conn->state = BT_CONNECTED; 3476 hci_connect_cfm(conn, ev->status); 3477 hci_conn_drop(conn); 3478 } 3479 } else { 3480 hci_auth_cfm(conn, ev->status); 3481 3482 hci_conn_hold(conn); 3483 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3484 hci_conn_drop(conn); 3485 } 3486 3487 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3488 if (!ev->status) { 3489 struct hci_cp_set_conn_encrypt cp; 3490 cp.handle = ev->handle; 3491 cp.encrypt = 0x01; 3492 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3493 &cp); 3494 } else { 3495 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3496 hci_encrypt_cfm(conn, ev->status); 3497 } 3498 } 3499 3500 unlock: 3501 hci_dev_unlock(hdev); 3502 } 3503 3504 static void hci_remote_name_evt(struct hci_dev *hdev, void *data, 3505 struct sk_buff *skb) 3506 { 3507 struct hci_ev_remote_name *ev = data; 3508 struct hci_conn *conn; 3509 3510 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3511 3512 hci_dev_lock(hdev); 3513 3514 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3515 3516 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3517 goto check_auth; 3518 3519 if (ev->status == 0) 3520 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3521 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3522 else 3523 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3524 3525 check_auth: 3526 if (!conn) 3527 goto unlock; 3528 3529 if (!hci_outgoing_auth_needed(hdev, conn)) 3530 goto unlock; 3531 3532 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3533 struct hci_cp_auth_requested cp; 3534 3535 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3536 3537 cp.handle = __cpu_to_le16(conn->handle); 3538 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3539 } 3540 3541 unlock: 3542 hci_dev_unlock(hdev); 3543 } 3544 3545 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, 3546 struct sk_buff *skb) 3547 { 3548 struct hci_ev_encrypt_change *ev = data; 3549 struct hci_conn *conn; 3550 3551 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3552 3553 hci_dev_lock(hdev); 3554 3555 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3556 if (!conn) 3557 goto unlock; 3558 3559 if (!ev->status) { 3560 if (ev->encrypt) { 3561 /* Encryption implies authentication */ 3562 set_bit(HCI_CONN_AUTH, &conn->flags); 3563 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3564 conn->sec_level = conn->pending_sec_level; 3565 3566 /* P-256 authentication key implies FIPS */ 3567 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3568 set_bit(HCI_CONN_FIPS, &conn->flags); 3569 3570 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3571 conn->type == LE_LINK) 3572 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3573 } else { 3574 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3575 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3576 } 3577 } 3578 3579 /* We should disregard the current RPA and generate a new one 3580 * whenever the encryption procedure fails. 3581 */ 3582 if (ev->status && conn->type == LE_LINK) { 3583 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3584 hci_adv_instances_set_rpa_expired(hdev, true); 3585 } 3586 3587 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3588 3589 /* Check link security requirements are met */ 3590 if (!hci_conn_check_link_mode(conn)) 3591 ev->status = HCI_ERROR_AUTH_FAILURE; 3592 3593 if (ev->status && conn->state == BT_CONNECTED) { 3594 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3595 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3596 3597 /* Notify upper layers so they can cleanup before 3598 * disconnecting. 3599 */ 3600 hci_encrypt_cfm(conn, ev->status); 3601 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3602 hci_conn_drop(conn); 3603 goto unlock; 3604 } 3605 3606 /* Try reading the encryption key size for encrypted ACL links */ 3607 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3608 if (hci_read_enc_key_size(hdev, conn)) 3609 goto notify; 3610 3611 goto unlock; 3612 } 3613 3614 /* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers 3615 * to avoid unexpected SMP command errors when pairing. 3616 */ 3617 if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT)) 3618 goto notify; 3619 3620 /* Set the default Authenticated Payload Timeout after 3621 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3622 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3623 * sent when the link is active and Encryption is enabled, the conn 3624 * type can be either LE or ACL and controller must support LMP Ping. 3625 * Ensure for AES-CCM encryption as well. 3626 */ 3627 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3628 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3629 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3630 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3631 struct hci_cp_write_auth_payload_to cp; 3632 3633 cp.handle = cpu_to_le16(conn->handle); 3634 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3635 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3636 sizeof(cp), &cp)) 3637 bt_dev_err(hdev, "write auth payload timeout failed"); 3638 } 3639 3640 notify: 3641 hci_encrypt_cfm(conn, ev->status); 3642 3643 unlock: 3644 hci_dev_unlock(hdev); 3645 } 3646 3647 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, 3648 struct sk_buff *skb) 3649 { 3650 struct hci_ev_change_link_key_complete *ev = data; 3651 struct hci_conn *conn; 3652 3653 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3654 3655 hci_dev_lock(hdev); 3656 3657 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3658 if (conn) { 3659 if (!ev->status) 3660 set_bit(HCI_CONN_SECURE, &conn->flags); 3661 3662 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3663 3664 hci_key_change_cfm(conn, ev->status); 3665 } 3666 3667 hci_dev_unlock(hdev); 3668 } 3669 3670 static void hci_remote_features_evt(struct hci_dev *hdev, void *data, 3671 struct sk_buff *skb) 3672 { 3673 struct hci_ev_remote_features *ev = data; 3674 struct hci_conn *conn; 3675 3676 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3677 3678 hci_dev_lock(hdev); 3679 3680 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3681 if (!conn) 3682 goto unlock; 3683 3684 if (!ev->status) 3685 memcpy(conn->features[0], ev->features, 8); 3686 3687 if (conn->state != BT_CONFIG) 3688 goto unlock; 3689 3690 if (!ev->status && lmp_ext_feat_capable(hdev) && 3691 lmp_ext_feat_capable(conn)) { 3692 struct hci_cp_read_remote_ext_features cp; 3693 cp.handle = ev->handle; 3694 cp.page = 0x01; 3695 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3696 sizeof(cp), &cp); 3697 goto unlock; 3698 } 3699 3700 if (!ev->status) { 3701 struct hci_cp_remote_name_req cp; 3702 memset(&cp, 0, sizeof(cp)); 3703 bacpy(&cp.bdaddr, &conn->dst); 3704 cp.pscan_rep_mode = 0x02; 3705 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3706 } else { 3707 mgmt_device_connected(hdev, conn, NULL, 0); 3708 } 3709 3710 if (!hci_outgoing_auth_needed(hdev, conn)) { 3711 conn->state = BT_CONNECTED; 3712 hci_connect_cfm(conn, ev->status); 3713 hci_conn_drop(conn); 3714 } 3715 3716 unlock: 3717 hci_dev_unlock(hdev); 3718 } 3719 3720 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3721 { 3722 cancel_delayed_work(&hdev->cmd_timer); 3723 3724 rcu_read_lock(); 3725 if (!test_bit(HCI_RESET, &hdev->flags)) { 3726 if (ncmd) { 3727 cancel_delayed_work(&hdev->ncmd_timer); 3728 atomic_set(&hdev->cmd_cnt, 1); 3729 } else { 3730 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 3731 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer, 3732 HCI_NCMD_TIMEOUT); 3733 } 3734 } 3735 rcu_read_unlock(); 3736 } 3737 3738 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, 3739 struct sk_buff *skb) 3740 { 3741 struct hci_rp_le_read_buffer_size_v2 *rp = data; 3742 3743 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3744 3745 if (rp->status) 3746 return rp->status; 3747 3748 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); 3749 hdev->le_pkts = rp->acl_max_pkt; 3750 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); 3751 hdev->iso_pkts = rp->iso_max_pkt; 3752 3753 hdev->le_cnt = hdev->le_pkts; 3754 hdev->iso_cnt = hdev->iso_pkts; 3755 3756 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, 3757 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); 3758 3759 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) 3760 return HCI_ERROR_INVALID_PARAMETERS; 3761 3762 return rp->status; 3763 } 3764 3765 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status) 3766 { 3767 struct hci_conn *conn, *tmp; 3768 3769 lockdep_assert_held(&hdev->lock); 3770 3771 list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { 3772 if (conn->type != CIS_LINK || 3773 conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig) 3774 continue; 3775 3776 if (HCI_CONN_HANDLE_UNSET(conn->handle)) 3777 hci_conn_failed(conn, status); 3778 } 3779 } 3780 3781 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, 3782 struct sk_buff *skb) 3783 { 3784 struct hci_rp_le_set_cig_params *rp = data; 3785 struct hci_cp_le_set_cig_params *cp; 3786 struct hci_conn *conn; 3787 u8 status = rp->status; 3788 bool pending = false; 3789 int i; 3790 3791 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3792 3793 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS); 3794 if (!rp->status && (!cp || rp->num_handles != cp->num_cis || 3795 rp->cig_id != cp->cig_id)) { 3796 bt_dev_err(hdev, "unexpected Set CIG Parameters response data"); 3797 status = HCI_ERROR_UNSPECIFIED; 3798 } 3799 3800 hci_dev_lock(hdev); 3801 3802 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554 3803 * 3804 * If the Status return parameter is non-zero, then the state of the CIG 3805 * and its CIS configurations shall not be changed by the command. If 3806 * the CIG did not already exist, it shall not be created. 3807 */ 3808 if (status) { 3809 /* Keep current configuration, fail only the unbound CIS */ 3810 hci_unbound_cis_failed(hdev, rp->cig_id, status); 3811 goto unlock; 3812 } 3813 3814 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553 3815 * 3816 * If the Status return parameter is zero, then the Controller shall 3817 * set the Connection_Handle arrayed return parameter to the connection 3818 * handle(s) corresponding to the CIS configurations specified in 3819 * the CIS_IDs command parameter, in the same order. 3820 */ 3821 for (i = 0; i < rp->num_handles; ++i) { 3822 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id, 3823 cp->cis[i].cis_id); 3824 if (!conn || !bacmp(&conn->dst, BDADDR_ANY)) 3825 continue; 3826 3827 if (conn->state != BT_BOUND && conn->state != BT_CONNECT) 3828 continue; 3829 3830 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i]))) 3831 continue; 3832 3833 if (conn->state == BT_CONNECT) 3834 pending = true; 3835 } 3836 3837 unlock: 3838 if (pending) 3839 hci_le_create_cis_pending(hdev); 3840 3841 hci_dev_unlock(hdev); 3842 3843 return rp->status; 3844 } 3845 3846 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, 3847 struct sk_buff *skb) 3848 { 3849 struct hci_rp_le_setup_iso_path *rp = data; 3850 struct hci_cp_le_setup_iso_path *cp; 3851 struct hci_conn *conn; 3852 3853 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3854 3855 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); 3856 if (!cp) 3857 return rp->status; 3858 3859 hci_dev_lock(hdev); 3860 3861 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 3862 if (!conn) 3863 goto unlock; 3864 3865 if (rp->status) { 3866 hci_connect_cfm(conn, rp->status); 3867 hci_conn_del(conn); 3868 goto unlock; 3869 } 3870 3871 switch (cp->direction) { 3872 /* Input (Host to Controller) */ 3873 case 0x00: 3874 /* Only confirm connection if output only */ 3875 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu) 3876 hci_connect_cfm(conn, rp->status); 3877 break; 3878 /* Output (Controller to Host) */ 3879 case 0x01: 3880 /* Confirm connection since conn->iso_qos is always configured 3881 * last. 3882 */ 3883 hci_connect_cfm(conn, rp->status); 3884 3885 /* Notify device connected in case it is a BIG Sync */ 3886 if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) 3887 mgmt_device_connected(hdev, conn, NULL, 0); 3888 3889 break; 3890 } 3891 3892 unlock: 3893 hci_dev_unlock(hdev); 3894 return rp->status; 3895 } 3896 3897 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) 3898 { 3899 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3900 } 3901 3902 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, 3903 struct sk_buff *skb) 3904 { 3905 struct hci_ev_status *rp = data; 3906 struct hci_cp_le_set_per_adv_params *cp; 3907 3908 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3909 3910 if (rp->status) 3911 return rp->status; 3912 3913 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); 3914 if (!cp) 3915 return rp->status; 3916 3917 /* TODO: set the conn state */ 3918 return rp->status; 3919 } 3920 3921 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, 3922 struct sk_buff *skb) 3923 { 3924 struct hci_ev_status *rp = data; 3925 struct hci_cp_le_set_per_adv_enable *cp; 3926 struct adv_info *adv = NULL, *n; 3927 u8 per_adv_cnt = 0; 3928 3929 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3930 3931 if (rp->status) 3932 return rp->status; 3933 3934 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); 3935 if (!cp) 3936 return rp->status; 3937 3938 hci_dev_lock(hdev); 3939 3940 adv = hci_find_adv_instance(hdev, cp->handle); 3941 3942 if (cp->enable) { 3943 hci_dev_set_flag(hdev, HCI_LE_PER_ADV); 3944 3945 if (adv) 3946 adv->enabled = true; 3947 } else { 3948 /* If just one instance was disabled check if there are 3949 * any other instance enabled before clearing HCI_LE_PER_ADV. 3950 * The current periodic adv instance will be marked as 3951 * disabled once extended advertising is also disabled. 3952 */ 3953 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 3954 list) { 3955 if (adv->periodic && adv->enabled) 3956 per_adv_cnt++; 3957 } 3958 3959 if (per_adv_cnt > 1) 3960 goto unlock; 3961 3962 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); 3963 } 3964 3965 unlock: 3966 hci_dev_unlock(hdev); 3967 3968 return rp->status; 3969 } 3970 3971 #define HCI_CC_VL(_op, _func, _min, _max) \ 3972 { \ 3973 .op = _op, \ 3974 .func = _func, \ 3975 .min_len = _min, \ 3976 .max_len = _max, \ 3977 } 3978 3979 #define HCI_CC(_op, _func, _len) \ 3980 HCI_CC_VL(_op, _func, _len, _len) 3981 3982 #define HCI_CC_STATUS(_op, _func) \ 3983 HCI_CC(_op, _func, sizeof(struct hci_ev_status)) 3984 3985 static const struct hci_cc { 3986 u16 op; 3987 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 3988 u16 min_len; 3989 u16 max_len; 3990 } hci_cc_table[] = { 3991 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), 3992 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), 3993 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), 3994 HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel, 3995 sizeof(struct hci_rp_remote_name_req_cancel)), 3996 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, 3997 sizeof(struct hci_rp_role_discovery)), 3998 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, 3999 sizeof(struct hci_rp_read_link_policy)), 4000 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, 4001 sizeof(struct hci_rp_write_link_policy)), 4002 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, 4003 sizeof(struct hci_rp_read_def_link_policy)), 4004 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, 4005 hci_cc_write_def_link_policy), 4006 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), 4007 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, 4008 sizeof(struct hci_rp_read_stored_link_key)), 4009 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, 4010 sizeof(struct hci_rp_delete_stored_link_key)), 4011 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), 4012 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, 4013 sizeof(struct hci_rp_read_local_name)), 4014 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), 4015 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), 4016 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), 4017 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), 4018 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, 4019 sizeof(struct hci_rp_read_class_of_dev)), 4020 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), 4021 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, 4022 sizeof(struct hci_rp_read_voice_setting)), 4023 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), 4024 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, 4025 sizeof(struct hci_rp_read_num_supported_iac)), 4026 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), 4027 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), 4028 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, 4029 sizeof(struct hci_rp_read_auth_payload_to)), 4030 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, 4031 sizeof(struct hci_rp_write_auth_payload_to)), 4032 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, 4033 sizeof(struct hci_rp_read_local_version)), 4034 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, 4035 sizeof(struct hci_rp_read_local_commands)), 4036 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, 4037 sizeof(struct hci_rp_read_local_features)), 4038 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, 4039 sizeof(struct hci_rp_read_local_ext_features)), 4040 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, 4041 sizeof(struct hci_rp_read_buffer_size)), 4042 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, 4043 sizeof(struct hci_rp_read_bd_addr)), 4044 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, 4045 sizeof(struct hci_rp_read_local_pairing_opts)), 4046 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, 4047 sizeof(struct hci_rp_read_page_scan_activity)), 4048 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 4049 hci_cc_write_page_scan_activity), 4050 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, 4051 sizeof(struct hci_rp_read_page_scan_type)), 4052 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), 4053 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, 4054 sizeof(struct hci_rp_read_clock)), 4055 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, 4056 sizeof(struct hci_rp_read_enc_key_size)), 4057 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, 4058 sizeof(struct hci_rp_read_inq_rsp_tx_power)), 4059 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, 4060 hci_cc_read_def_err_data_reporting, 4061 sizeof(struct hci_rp_read_def_err_data_reporting)), 4062 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 4063 hci_cc_write_def_err_data_reporting), 4064 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, 4065 sizeof(struct hci_rp_pin_code_reply)), 4066 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, 4067 sizeof(struct hci_rp_pin_code_neg_reply)), 4068 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, 4069 sizeof(struct hci_rp_read_local_oob_data)), 4070 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, 4071 sizeof(struct hci_rp_read_local_oob_ext_data)), 4072 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, 4073 sizeof(struct hci_rp_le_read_buffer_size)), 4074 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, 4075 sizeof(struct hci_rp_le_read_local_features)), 4076 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, 4077 sizeof(struct hci_rp_le_read_adv_tx_power)), 4078 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, 4079 sizeof(struct hci_rp_user_confirm_reply)), 4080 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, 4081 sizeof(struct hci_rp_user_confirm_reply)), 4082 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, 4083 sizeof(struct hci_rp_user_confirm_reply)), 4084 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, 4085 sizeof(struct hci_rp_user_confirm_reply)), 4086 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), 4087 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), 4088 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), 4089 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), 4090 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 4091 hci_cc_le_read_accept_list_size, 4092 sizeof(struct hci_rp_le_read_accept_list_size)), 4093 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), 4094 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, 4095 hci_cc_le_add_to_accept_list), 4096 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 4097 hci_cc_le_del_from_accept_list), 4098 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, 4099 sizeof(struct hci_rp_le_read_supported_states)), 4100 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, 4101 sizeof(struct hci_rp_le_read_def_data_len)), 4102 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, 4103 hci_cc_le_write_def_data_len), 4104 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, 4105 hci_cc_le_add_to_resolv_list), 4106 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, 4107 hci_cc_le_del_from_resolv_list), 4108 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, 4109 hci_cc_le_clear_resolv_list), 4110 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, 4111 sizeof(struct hci_rp_le_read_resolv_list_size)), 4112 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 4113 hci_cc_le_set_addr_resolution_enable), 4114 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, 4115 sizeof(struct hci_rp_le_read_max_data_len)), 4116 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, 4117 hci_cc_write_le_host_supported), 4118 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), 4119 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, 4120 sizeof(struct hci_rp_read_rssi)), 4121 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, 4122 sizeof(struct hci_rp_read_tx_power)), 4123 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), 4124 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, 4125 hci_cc_le_set_ext_scan_param), 4126 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, 4127 hci_cc_le_set_ext_scan_enable), 4128 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), 4129 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 4130 hci_cc_le_read_num_adv_sets, 4131 sizeof(struct hci_rp_le_read_num_supported_adv_sets)), 4132 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, 4133 hci_cc_le_set_ext_adv_enable), 4134 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 4135 hci_cc_le_set_adv_set_random_addr), 4136 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), 4137 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), 4138 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), 4139 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, 4140 hci_cc_le_set_per_adv_enable), 4141 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, 4142 sizeof(struct hci_rp_le_read_transmit_power)), 4143 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), 4144 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, 4145 sizeof(struct hci_rp_le_read_buffer_size_v2)), 4146 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, 4147 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), 4148 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, 4149 sizeof(struct hci_rp_le_setup_iso_path)), 4150 }; 4151 4152 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, 4153 struct sk_buff *skb) 4154 { 4155 void *data; 4156 4157 if (skb->len < cc->min_len) { 4158 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", 4159 cc->op, skb->len, cc->min_len); 4160 return HCI_ERROR_UNSPECIFIED; 4161 } 4162 4163 /* Just warn if the length is over max_len size it still be possible to 4164 * partially parse the cc so leave to callback to decide if that is 4165 * acceptable. 4166 */ 4167 if (skb->len > cc->max_len) 4168 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", 4169 cc->op, skb->len, cc->max_len); 4170 4171 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); 4172 if (!data) 4173 return HCI_ERROR_UNSPECIFIED; 4174 4175 return cc->func(hdev, data, skb); 4176 } 4177 4178 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, 4179 struct sk_buff *skb, u16 *opcode, u8 *status, 4180 hci_req_complete_t *req_complete, 4181 hci_req_complete_skb_t *req_complete_skb) 4182 { 4183 struct hci_ev_cmd_complete *ev = data; 4184 int i; 4185 4186 *opcode = __le16_to_cpu(ev->opcode); 4187 4188 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4189 4190 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { 4191 if (hci_cc_table[i].op == *opcode) { 4192 *status = hci_cc_func(hdev, &hci_cc_table[i], skb); 4193 break; 4194 } 4195 } 4196 4197 if (i == ARRAY_SIZE(hci_cc_table)) { 4198 /* Unknown opcode, assume byte 0 contains the status, so 4199 * that e.g. __hci_cmd_sync() properly returns errors 4200 * for vendor specific commands send by HCI drivers. 4201 * If a vendor doesn't actually follow this convention we may 4202 * need to introduce a vendor CC table in order to properly set 4203 * the status. 4204 */ 4205 *status = skb->data[0]; 4206 } 4207 4208 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4209 4210 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 4211 req_complete_skb); 4212 4213 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4214 bt_dev_err(hdev, 4215 "unexpected event for opcode 0x%4.4x", *opcode); 4216 return; 4217 } 4218 4219 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4220 queue_work(hdev->workqueue, &hdev->cmd_work); 4221 } 4222 4223 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) 4224 { 4225 struct hci_cp_le_create_cis *cp; 4226 bool pending = false; 4227 int i; 4228 4229 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4230 4231 if (!status) 4232 return; 4233 4234 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); 4235 if (!cp) 4236 return; 4237 4238 hci_dev_lock(hdev); 4239 4240 /* Remove connection if command failed */ 4241 for (i = 0; i < cp->num_cis; i++) { 4242 struct hci_conn *conn; 4243 u16 handle; 4244 4245 handle = __le16_to_cpu(cp->cis[i].cis_handle); 4246 4247 conn = hci_conn_hash_lookup_handle(hdev, handle); 4248 if (conn) { 4249 if (test_and_clear_bit(HCI_CONN_CREATE_CIS, 4250 &conn->flags)) 4251 pending = true; 4252 conn->state = BT_CLOSED; 4253 hci_connect_cfm(conn, status); 4254 hci_conn_del(conn); 4255 } 4256 } 4257 cp->num_cis = 0; 4258 4259 if (pending) 4260 hci_le_create_cis_pending(hdev); 4261 4262 hci_dev_unlock(hdev); 4263 } 4264 4265 #define HCI_CS(_op, _func) \ 4266 { \ 4267 .op = _op, \ 4268 .func = _func, \ 4269 } 4270 4271 static const struct hci_cs { 4272 u16 op; 4273 void (*func)(struct hci_dev *hdev, __u8 status); 4274 } hci_cs_table[] = { 4275 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), 4276 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), 4277 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), 4278 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), 4279 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), 4280 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), 4281 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), 4282 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), 4283 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, 4284 hci_cs_read_remote_ext_features), 4285 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), 4286 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, 4287 hci_cs_enhanced_setup_sync_conn), 4288 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), 4289 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), 4290 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), 4291 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), 4292 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), 4293 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), 4294 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), 4295 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), 4296 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), 4297 }; 4298 4299 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, 4300 struct sk_buff *skb, u16 *opcode, u8 *status, 4301 hci_req_complete_t *req_complete, 4302 hci_req_complete_skb_t *req_complete_skb) 4303 { 4304 struct hci_ev_cmd_status *ev = data; 4305 int i; 4306 4307 *opcode = __le16_to_cpu(ev->opcode); 4308 *status = ev->status; 4309 4310 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4311 4312 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { 4313 if (hci_cs_table[i].op == *opcode) { 4314 hci_cs_table[i].func(hdev, ev->status); 4315 break; 4316 } 4317 } 4318 4319 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4320 4321 /* Indicate request completion if the command failed. Also, if 4322 * we're not waiting for a special event and we get a success 4323 * command status we should try to flag the request as completed 4324 * (since for this kind of commands there will not be a command 4325 * complete event). 4326 */ 4327 if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) { 4328 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 4329 req_complete_skb); 4330 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4331 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", 4332 *opcode); 4333 return; 4334 } 4335 } 4336 4337 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4338 queue_work(hdev->workqueue, &hdev->cmd_work); 4339 } 4340 4341 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, 4342 struct sk_buff *skb) 4343 { 4344 struct hci_ev_hardware_error *ev = data; 4345 4346 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); 4347 4348 hdev->hw_error_code = ev->code; 4349 4350 queue_work(hdev->req_workqueue, &hdev->error_reset); 4351 } 4352 4353 static void hci_role_change_evt(struct hci_dev *hdev, void *data, 4354 struct sk_buff *skb) 4355 { 4356 struct hci_ev_role_change *ev = data; 4357 struct hci_conn *conn; 4358 4359 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4360 4361 hci_dev_lock(hdev); 4362 4363 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4364 if (conn) { 4365 if (!ev->status) 4366 conn->role = ev->role; 4367 4368 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 4369 4370 hci_role_switch_cfm(conn, ev->status, ev->role); 4371 } 4372 4373 hci_dev_unlock(hdev); 4374 } 4375 4376 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, 4377 struct sk_buff *skb) 4378 { 4379 struct hci_ev_num_comp_pkts *ev = data; 4380 int i; 4381 4382 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, 4383 flex_array_size(ev, handles, ev->num))) 4384 return; 4385 4386 bt_dev_dbg(hdev, "num %d", ev->num); 4387 4388 for (i = 0; i < ev->num; i++) { 4389 struct hci_comp_pkts_info *info = &ev->handles[i]; 4390 struct hci_conn *conn; 4391 __u16 handle, count; 4392 unsigned int i; 4393 4394 handle = __le16_to_cpu(info->handle); 4395 count = __le16_to_cpu(info->count); 4396 4397 conn = hci_conn_hash_lookup_handle(hdev, handle); 4398 if (!conn) 4399 continue; 4400 4401 conn->sent -= count; 4402 4403 for (i = 0; i < count; ++i) 4404 hci_conn_tx_dequeue(conn); 4405 4406 switch (conn->type) { 4407 case ACL_LINK: 4408 hdev->acl_cnt += count; 4409 if (hdev->acl_cnt > hdev->acl_pkts) 4410 hdev->acl_cnt = hdev->acl_pkts; 4411 break; 4412 4413 case LE_LINK: 4414 if (hdev->le_pkts) { 4415 hdev->le_cnt += count; 4416 if (hdev->le_cnt > hdev->le_pkts) 4417 hdev->le_cnt = hdev->le_pkts; 4418 } else { 4419 hdev->acl_cnt += count; 4420 if (hdev->acl_cnt > hdev->acl_pkts) 4421 hdev->acl_cnt = hdev->acl_pkts; 4422 } 4423 break; 4424 4425 case SCO_LINK: 4426 case ESCO_LINK: 4427 hdev->sco_cnt += count; 4428 if (hdev->sco_cnt > hdev->sco_pkts) 4429 hdev->sco_cnt = hdev->sco_pkts; 4430 4431 break; 4432 4433 case CIS_LINK: 4434 case BIS_LINK: 4435 case PA_LINK: 4436 if (hdev->iso_pkts) { 4437 hdev->iso_cnt += count; 4438 if (hdev->iso_cnt > hdev->iso_pkts) 4439 hdev->iso_cnt = hdev->iso_pkts; 4440 } else if (hdev->le_pkts) { 4441 hdev->le_cnt += count; 4442 if (hdev->le_cnt > hdev->le_pkts) 4443 hdev->le_cnt = hdev->le_pkts; 4444 } else { 4445 hdev->acl_cnt += count; 4446 if (hdev->acl_cnt > hdev->acl_pkts) 4447 hdev->acl_cnt = hdev->acl_pkts; 4448 } 4449 break; 4450 4451 default: 4452 bt_dev_err(hdev, "unknown type %d conn %p", 4453 conn->type, conn); 4454 break; 4455 } 4456 } 4457 4458 queue_work(hdev->workqueue, &hdev->tx_work); 4459 } 4460 4461 static void hci_mode_change_evt(struct hci_dev *hdev, void *data, 4462 struct sk_buff *skb) 4463 { 4464 struct hci_ev_mode_change *ev = data; 4465 struct hci_conn *conn; 4466 4467 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4468 4469 hci_dev_lock(hdev); 4470 4471 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4472 if (conn) { 4473 conn->mode = ev->mode; 4474 4475 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4476 &conn->flags)) { 4477 if (conn->mode == HCI_CM_ACTIVE) 4478 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4479 else 4480 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4481 } 4482 4483 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4484 hci_sco_setup(conn, ev->status); 4485 } 4486 4487 hci_dev_unlock(hdev); 4488 } 4489 4490 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, 4491 struct sk_buff *skb) 4492 { 4493 struct hci_ev_pin_code_req *ev = data; 4494 struct hci_conn *conn; 4495 4496 bt_dev_dbg(hdev, ""); 4497 4498 hci_dev_lock(hdev); 4499 4500 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4501 if (!conn) 4502 goto unlock; 4503 4504 if (conn->state == BT_CONNECTED) { 4505 hci_conn_hold(conn); 4506 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4507 hci_conn_drop(conn); 4508 } 4509 4510 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4511 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4512 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4513 sizeof(ev->bdaddr), &ev->bdaddr); 4514 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4515 u8 secure; 4516 4517 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4518 secure = 1; 4519 else 4520 secure = 0; 4521 4522 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4523 } 4524 4525 unlock: 4526 hci_dev_unlock(hdev); 4527 } 4528 4529 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4530 { 4531 if (key_type == HCI_LK_CHANGED_COMBINATION) 4532 return; 4533 4534 conn->pin_length = pin_len; 4535 conn->key_type = key_type; 4536 4537 switch (key_type) { 4538 case HCI_LK_LOCAL_UNIT: 4539 case HCI_LK_REMOTE_UNIT: 4540 case HCI_LK_DEBUG_COMBINATION: 4541 return; 4542 case HCI_LK_COMBINATION: 4543 if (pin_len == 16) 4544 conn->pending_sec_level = BT_SECURITY_HIGH; 4545 else 4546 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4547 break; 4548 case HCI_LK_UNAUTH_COMBINATION_P192: 4549 case HCI_LK_UNAUTH_COMBINATION_P256: 4550 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4551 break; 4552 case HCI_LK_AUTH_COMBINATION_P192: 4553 conn->pending_sec_level = BT_SECURITY_HIGH; 4554 break; 4555 case HCI_LK_AUTH_COMBINATION_P256: 4556 conn->pending_sec_level = BT_SECURITY_FIPS; 4557 break; 4558 } 4559 } 4560 4561 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, 4562 struct sk_buff *skb) 4563 { 4564 struct hci_ev_link_key_req *ev = data; 4565 struct hci_cp_link_key_reply cp; 4566 struct hci_conn *conn; 4567 struct link_key *key; 4568 4569 bt_dev_dbg(hdev, ""); 4570 4571 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4572 return; 4573 4574 hci_dev_lock(hdev); 4575 4576 key = hci_find_link_key(hdev, &ev->bdaddr); 4577 if (!key) { 4578 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); 4579 goto not_found; 4580 } 4581 4582 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); 4583 4584 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4585 if (conn) { 4586 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4587 4588 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4589 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4590 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4591 bt_dev_dbg(hdev, "ignoring unauthenticated key"); 4592 goto not_found; 4593 } 4594 4595 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4596 (conn->pending_sec_level == BT_SECURITY_HIGH || 4597 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4598 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); 4599 goto not_found; 4600 } 4601 4602 conn_set_key(conn, key->type, key->pin_len); 4603 } 4604 4605 bacpy(&cp.bdaddr, &ev->bdaddr); 4606 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4607 4608 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4609 4610 hci_dev_unlock(hdev); 4611 4612 return; 4613 4614 not_found: 4615 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4616 hci_dev_unlock(hdev); 4617 } 4618 4619 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, 4620 struct sk_buff *skb) 4621 { 4622 struct hci_ev_link_key_notify *ev = data; 4623 struct hci_conn *conn; 4624 struct link_key *key; 4625 bool persistent; 4626 u8 pin_len = 0; 4627 4628 bt_dev_dbg(hdev, ""); 4629 4630 hci_dev_lock(hdev); 4631 4632 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4633 if (!conn) 4634 goto unlock; 4635 4636 /* Ignore NULL link key against CVE-2020-26555 */ 4637 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) { 4638 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR", 4639 &ev->bdaddr); 4640 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 4641 hci_conn_drop(conn); 4642 goto unlock; 4643 } 4644 4645 hci_conn_hold(conn); 4646 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4647 hci_conn_drop(conn); 4648 4649 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4650 conn_set_key(conn, ev->key_type, conn->pin_length); 4651 4652 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4653 goto unlock; 4654 4655 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4656 ev->key_type, pin_len, &persistent); 4657 if (!key) 4658 goto unlock; 4659 4660 /* Update connection information since adding the key will have 4661 * fixed up the type in the case of changed combination keys. 4662 */ 4663 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4664 conn_set_key(conn, key->type, key->pin_len); 4665 4666 mgmt_new_link_key(hdev, key, persistent); 4667 4668 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4669 * is set. If it's not set simply remove the key from the kernel 4670 * list (we've still notified user space about it but with 4671 * store_hint being 0). 4672 */ 4673 if (key->type == HCI_LK_DEBUG_COMBINATION && 4674 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4675 list_del_rcu(&key->list); 4676 kfree_rcu(key, rcu); 4677 goto unlock; 4678 } 4679 4680 if (persistent) 4681 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4682 else 4683 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4684 4685 unlock: 4686 hci_dev_unlock(hdev); 4687 } 4688 4689 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, 4690 struct sk_buff *skb) 4691 { 4692 struct hci_ev_clock_offset *ev = data; 4693 struct hci_conn *conn; 4694 4695 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4696 4697 hci_dev_lock(hdev); 4698 4699 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4700 if (conn && !ev->status) { 4701 struct inquiry_entry *ie; 4702 4703 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4704 if (ie) { 4705 ie->data.clock_offset = ev->clock_offset; 4706 ie->timestamp = jiffies; 4707 } 4708 } 4709 4710 hci_dev_unlock(hdev); 4711 } 4712 4713 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, 4714 struct sk_buff *skb) 4715 { 4716 struct hci_ev_pkt_type_change *ev = data; 4717 struct hci_conn *conn; 4718 4719 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4720 4721 hci_dev_lock(hdev); 4722 4723 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4724 if (conn && !ev->status) 4725 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4726 4727 hci_dev_unlock(hdev); 4728 } 4729 4730 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, 4731 struct sk_buff *skb) 4732 { 4733 struct hci_ev_pscan_rep_mode *ev = data; 4734 struct inquiry_entry *ie; 4735 4736 bt_dev_dbg(hdev, ""); 4737 4738 hci_dev_lock(hdev); 4739 4740 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4741 if (ie) { 4742 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4743 ie->timestamp = jiffies; 4744 } 4745 4746 hci_dev_unlock(hdev); 4747 } 4748 4749 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, 4750 struct sk_buff *skb) 4751 { 4752 struct hci_ev_inquiry_result_rssi *ev = edata; 4753 struct inquiry_data data; 4754 int i; 4755 4756 bt_dev_dbg(hdev, "num_rsp %d", ev->num); 4757 4758 if (!ev->num) 4759 return; 4760 4761 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4762 return; 4763 4764 hci_dev_lock(hdev); 4765 4766 if (skb->len == array_size(ev->num, 4767 sizeof(struct inquiry_info_rssi_pscan))) { 4768 struct inquiry_info_rssi_pscan *info; 4769 4770 for (i = 0; i < ev->num; i++) { 4771 u32 flags; 4772 4773 info = hci_ev_skb_pull(hdev, skb, 4774 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4775 sizeof(*info)); 4776 if (!info) { 4777 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4778 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4779 goto unlock; 4780 } 4781 4782 bacpy(&data.bdaddr, &info->bdaddr); 4783 data.pscan_rep_mode = info->pscan_rep_mode; 4784 data.pscan_period_mode = info->pscan_period_mode; 4785 data.pscan_mode = info->pscan_mode; 4786 memcpy(data.dev_class, info->dev_class, 3); 4787 data.clock_offset = info->clock_offset; 4788 data.rssi = info->rssi; 4789 data.ssp_mode = 0x00; 4790 4791 flags = hci_inquiry_cache_update(hdev, &data, false); 4792 4793 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4794 info->dev_class, info->rssi, 4795 flags, NULL, 0, NULL, 0, 0); 4796 } 4797 } else if (skb->len == array_size(ev->num, 4798 sizeof(struct inquiry_info_rssi))) { 4799 struct inquiry_info_rssi *info; 4800 4801 for (i = 0; i < ev->num; i++) { 4802 u32 flags; 4803 4804 info = hci_ev_skb_pull(hdev, skb, 4805 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4806 sizeof(*info)); 4807 if (!info) { 4808 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4809 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4810 goto unlock; 4811 } 4812 4813 bacpy(&data.bdaddr, &info->bdaddr); 4814 data.pscan_rep_mode = info->pscan_rep_mode; 4815 data.pscan_period_mode = info->pscan_period_mode; 4816 data.pscan_mode = 0x00; 4817 memcpy(data.dev_class, info->dev_class, 3); 4818 data.clock_offset = info->clock_offset; 4819 data.rssi = info->rssi; 4820 data.ssp_mode = 0x00; 4821 4822 flags = hci_inquiry_cache_update(hdev, &data, false); 4823 4824 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4825 info->dev_class, info->rssi, 4826 flags, NULL, 0, NULL, 0, 0); 4827 } 4828 } else { 4829 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4830 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4831 } 4832 unlock: 4833 hci_dev_unlock(hdev); 4834 } 4835 4836 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, 4837 struct sk_buff *skb) 4838 { 4839 struct hci_ev_remote_ext_features *ev = data; 4840 struct hci_conn *conn; 4841 4842 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4843 4844 hci_dev_lock(hdev); 4845 4846 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4847 if (!conn) 4848 goto unlock; 4849 4850 if (ev->page < HCI_MAX_PAGES) 4851 memcpy(conn->features[ev->page], ev->features, 8); 4852 4853 if (!ev->status && ev->page == 0x01) { 4854 struct inquiry_entry *ie; 4855 4856 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4857 if (ie) 4858 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4859 4860 if (ev->features[0] & LMP_HOST_SSP) { 4861 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4862 } else { 4863 /* It is mandatory by the Bluetooth specification that 4864 * Extended Inquiry Results are only used when Secure 4865 * Simple Pairing is enabled, but some devices violate 4866 * this. 4867 * 4868 * To make these devices work, the internal SSP 4869 * enabled flag needs to be cleared if the remote host 4870 * features do not indicate SSP support */ 4871 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4872 } 4873 4874 if (ev->features[0] & LMP_HOST_SC) 4875 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4876 } 4877 4878 if (conn->state != BT_CONFIG) 4879 goto unlock; 4880 4881 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4882 struct hci_cp_remote_name_req cp; 4883 memset(&cp, 0, sizeof(cp)); 4884 bacpy(&cp.bdaddr, &conn->dst); 4885 cp.pscan_rep_mode = 0x02; 4886 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4887 } else { 4888 mgmt_device_connected(hdev, conn, NULL, 0); 4889 } 4890 4891 if (!hci_outgoing_auth_needed(hdev, conn)) { 4892 conn->state = BT_CONNECTED; 4893 hci_connect_cfm(conn, ev->status); 4894 hci_conn_drop(conn); 4895 } 4896 4897 unlock: 4898 hci_dev_unlock(hdev); 4899 } 4900 4901 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, 4902 struct sk_buff *skb) 4903 { 4904 struct hci_ev_sync_conn_complete *ev = data; 4905 struct hci_conn *conn; 4906 u8 status = ev->status; 4907 4908 switch (ev->link_type) { 4909 case SCO_LINK: 4910 case ESCO_LINK: 4911 break; 4912 default: 4913 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type 4914 * for HCI_Synchronous_Connection_Complete is limited to 4915 * either SCO or eSCO 4916 */ 4917 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); 4918 return; 4919 } 4920 4921 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4922 4923 hci_dev_lock(hdev); 4924 4925 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4926 if (!conn) { 4927 if (ev->link_type == ESCO_LINK) 4928 goto unlock; 4929 4930 /* When the link type in the event indicates SCO connection 4931 * and lookup of the connection object fails, then check 4932 * if an eSCO connection object exists. 4933 * 4934 * The core limits the synchronous connections to either 4935 * SCO or eSCO. The eSCO connection is preferred and tried 4936 * to be setup first and until successfully established, 4937 * the link type will be hinted as eSCO. 4938 */ 4939 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4940 if (!conn) 4941 goto unlock; 4942 } 4943 4944 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. 4945 * Processing it more than once per connection can corrupt kernel memory. 4946 * 4947 * As the connection handle is set here for the first time, it indicates 4948 * whether the connection is already set up. 4949 */ 4950 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { 4951 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); 4952 goto unlock; 4953 } 4954 4955 switch (status) { 4956 case 0x00: 4957 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); 4958 if (status) { 4959 conn->state = BT_CLOSED; 4960 break; 4961 } 4962 4963 conn->state = BT_CONNECTED; 4964 conn->type = ev->link_type; 4965 4966 hci_debugfs_create_conn(conn); 4967 hci_conn_add_sysfs(conn); 4968 break; 4969 4970 case 0x10: /* Connection Accept Timeout */ 4971 case 0x0d: /* Connection Rejected due to Limited Resources */ 4972 case 0x11: /* Unsupported Feature or Parameter Value */ 4973 case 0x1c: /* SCO interval rejected */ 4974 case 0x1a: /* Unsupported Remote Feature */ 4975 case 0x1e: /* Invalid LMP Parameters */ 4976 case 0x1f: /* Unspecified error */ 4977 case 0x20: /* Unsupported LMP Parameter value */ 4978 if (conn->out) { 4979 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 4980 (hdev->esco_type & EDR_ESCO_MASK); 4981 if (hci_setup_sync(conn, conn->parent->handle)) 4982 goto unlock; 4983 } 4984 fallthrough; 4985 4986 default: 4987 conn->state = BT_CLOSED; 4988 break; 4989 } 4990 4991 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 4992 /* Notify only in case of SCO over HCI transport data path which 4993 * is zero and non-zero value shall be non-HCI transport data path 4994 */ 4995 if (conn->codec.data_path == 0 && hdev->notify) { 4996 switch (ev->air_mode) { 4997 case 0x02: 4998 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 4999 break; 5000 case 0x03: 5001 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 5002 break; 5003 } 5004 } 5005 5006 hci_connect_cfm(conn, status); 5007 if (status) 5008 hci_conn_del(conn); 5009 5010 unlock: 5011 hci_dev_unlock(hdev); 5012 } 5013 5014 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 5015 { 5016 size_t parsed = 0; 5017 5018 while (parsed < eir_len) { 5019 u8 field_len = eir[0]; 5020 5021 if (field_len == 0) 5022 return parsed; 5023 5024 parsed += field_len + 1; 5025 eir += field_len + 1; 5026 } 5027 5028 return eir_len; 5029 } 5030 5031 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, 5032 struct sk_buff *skb) 5033 { 5034 struct hci_ev_ext_inquiry_result *ev = edata; 5035 struct inquiry_data data; 5036 size_t eir_len; 5037 int i; 5038 5039 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, 5040 flex_array_size(ev, info, ev->num))) 5041 return; 5042 5043 bt_dev_dbg(hdev, "num %d", ev->num); 5044 5045 if (!ev->num) 5046 return; 5047 5048 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 5049 return; 5050 5051 hci_dev_lock(hdev); 5052 5053 for (i = 0; i < ev->num; i++) { 5054 struct extended_inquiry_info *info = &ev->info[i]; 5055 u32 flags; 5056 bool name_known; 5057 5058 bacpy(&data.bdaddr, &info->bdaddr); 5059 data.pscan_rep_mode = info->pscan_rep_mode; 5060 data.pscan_period_mode = info->pscan_period_mode; 5061 data.pscan_mode = 0x00; 5062 memcpy(data.dev_class, info->dev_class, 3); 5063 data.clock_offset = info->clock_offset; 5064 data.rssi = info->rssi; 5065 data.ssp_mode = 0x01; 5066 5067 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5068 name_known = eir_get_data(info->data, 5069 sizeof(info->data), 5070 EIR_NAME_COMPLETE, NULL); 5071 else 5072 name_known = true; 5073 5074 flags = hci_inquiry_cache_update(hdev, &data, name_known); 5075 5076 eir_len = eir_get_length(info->data, sizeof(info->data)); 5077 5078 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 5079 info->dev_class, info->rssi, 5080 flags, info->data, eir_len, NULL, 0, 0); 5081 } 5082 5083 hci_dev_unlock(hdev); 5084 } 5085 5086 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, 5087 struct sk_buff *skb) 5088 { 5089 struct hci_ev_key_refresh_complete *ev = data; 5090 struct hci_conn *conn; 5091 5092 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, 5093 __le16_to_cpu(ev->handle)); 5094 5095 hci_dev_lock(hdev); 5096 5097 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5098 if (!conn) 5099 goto unlock; 5100 5101 /* For BR/EDR the necessary steps are taken through the 5102 * auth_complete event. 5103 */ 5104 if (conn->type != LE_LINK) 5105 goto unlock; 5106 5107 if (!ev->status) 5108 conn->sec_level = conn->pending_sec_level; 5109 5110 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 5111 5112 if (ev->status && conn->state == BT_CONNECTED) { 5113 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 5114 hci_conn_drop(conn); 5115 goto unlock; 5116 } 5117 5118 if (conn->state == BT_CONFIG) { 5119 if (!ev->status) 5120 conn->state = BT_CONNECTED; 5121 5122 hci_connect_cfm(conn, ev->status); 5123 hci_conn_drop(conn); 5124 } else { 5125 hci_auth_cfm(conn, ev->status); 5126 5127 hci_conn_hold(conn); 5128 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 5129 hci_conn_drop(conn); 5130 } 5131 5132 unlock: 5133 hci_dev_unlock(hdev); 5134 } 5135 5136 static u8 hci_get_auth_req(struct hci_conn *conn) 5137 { 5138 /* If remote requests no-bonding follow that lead */ 5139 if (conn->remote_auth == HCI_AT_NO_BONDING || 5140 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 5141 return conn->remote_auth | (conn->auth_type & 0x01); 5142 5143 /* If both remote and local have enough IO capabilities, require 5144 * MITM protection 5145 */ 5146 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 5147 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 5148 return conn->remote_auth | 0x01; 5149 5150 /* No MITM protection possible so ignore remote requirement */ 5151 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 5152 } 5153 5154 static u8 bredr_oob_data_present(struct hci_conn *conn) 5155 { 5156 struct hci_dev *hdev = conn->hdev; 5157 struct oob_data *data; 5158 5159 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 5160 if (!data) 5161 return 0x00; 5162 5163 if (bredr_sc_enabled(hdev)) { 5164 /* When Secure Connections is enabled, then just 5165 * return the present value stored with the OOB 5166 * data. The stored value contains the right present 5167 * information. However it can only be trusted when 5168 * not in Secure Connection Only mode. 5169 */ 5170 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 5171 return data->present; 5172 5173 /* When Secure Connections Only mode is enabled, then 5174 * the P-256 values are required. If they are not 5175 * available, then do not declare that OOB data is 5176 * present. 5177 */ 5178 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) || 5179 !crypto_memneq(data->hash256, ZERO_KEY, 16)) 5180 return 0x00; 5181 5182 return 0x02; 5183 } 5184 5185 /* When Secure Connections is not enabled or actually 5186 * not supported by the hardware, then check that if 5187 * P-192 data values are present. 5188 */ 5189 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) || 5190 !crypto_memneq(data->hash192, ZERO_KEY, 16)) 5191 return 0x00; 5192 5193 return 0x01; 5194 } 5195 5196 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, 5197 struct sk_buff *skb) 5198 { 5199 struct hci_ev_io_capa_request *ev = data; 5200 struct hci_conn *conn; 5201 5202 bt_dev_dbg(hdev, ""); 5203 5204 hci_dev_lock(hdev); 5205 5206 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5207 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 5208 goto unlock; 5209 5210 /* Assume remote supports SSP since it has triggered this event */ 5211 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 5212 5213 hci_conn_hold(conn); 5214 5215 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5216 goto unlock; 5217 5218 /* Allow pairing if we're pairable, the initiators of the 5219 * pairing or if the remote is not requesting bonding. 5220 */ 5221 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 5222 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 5223 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 5224 struct hci_cp_io_capability_reply cp; 5225 5226 bacpy(&cp.bdaddr, &ev->bdaddr); 5227 /* Change the IO capability from KeyboardDisplay 5228 * to DisplayYesNo as it is not supported by BT spec. */ 5229 cp.capability = (conn->io_capability == 0x04) ? 5230 HCI_IO_DISPLAY_YESNO : conn->io_capability; 5231 5232 /* If we are initiators, there is no remote information yet */ 5233 if (conn->remote_auth == 0xff) { 5234 /* Request MITM protection if our IO caps allow it 5235 * except for the no-bonding case. 5236 */ 5237 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5238 conn->auth_type != HCI_AT_NO_BONDING) 5239 conn->auth_type |= 0x01; 5240 } else { 5241 conn->auth_type = hci_get_auth_req(conn); 5242 } 5243 5244 /* If we're not bondable, force one of the non-bondable 5245 * authentication requirement values. 5246 */ 5247 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 5248 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 5249 5250 cp.authentication = conn->auth_type; 5251 cp.oob_data = bredr_oob_data_present(conn); 5252 5253 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 5254 sizeof(cp), &cp); 5255 } else { 5256 struct hci_cp_io_capability_neg_reply cp; 5257 5258 bacpy(&cp.bdaddr, &ev->bdaddr); 5259 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 5260 5261 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 5262 sizeof(cp), &cp); 5263 } 5264 5265 unlock: 5266 hci_dev_unlock(hdev); 5267 } 5268 5269 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, 5270 struct sk_buff *skb) 5271 { 5272 struct hci_ev_io_capa_reply *ev = data; 5273 struct hci_conn *conn; 5274 5275 bt_dev_dbg(hdev, ""); 5276 5277 hci_dev_lock(hdev); 5278 5279 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5280 if (!conn) 5281 goto unlock; 5282 5283 conn->remote_cap = ev->capability; 5284 conn->remote_auth = ev->authentication; 5285 5286 unlock: 5287 hci_dev_unlock(hdev); 5288 } 5289 5290 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, 5291 struct sk_buff *skb) 5292 { 5293 struct hci_ev_user_confirm_req *ev = data; 5294 int loc_mitm, rem_mitm, confirm_hint = 0; 5295 struct hci_conn *conn; 5296 5297 bt_dev_dbg(hdev, ""); 5298 5299 hci_dev_lock(hdev); 5300 5301 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5302 goto unlock; 5303 5304 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5305 if (!conn) 5306 goto unlock; 5307 5308 loc_mitm = (conn->auth_type & 0x01); 5309 rem_mitm = (conn->remote_auth & 0x01); 5310 5311 /* If we require MITM but the remote device can't provide that 5312 * (it has NoInputNoOutput) then reject the confirmation 5313 * request. We check the security level here since it doesn't 5314 * necessarily match conn->auth_type. 5315 */ 5316 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 5317 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 5318 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); 5319 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 5320 sizeof(ev->bdaddr), &ev->bdaddr); 5321 goto unlock; 5322 } 5323 5324 /* If no side requires MITM protection; use JUST_CFM method */ 5325 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 5326 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 5327 5328 /* If we're not the initiator of request authorization and the 5329 * local IO capability is not NoInputNoOutput, use JUST_WORKS 5330 * method (mgmt_user_confirm with confirm_hint set to 1). 5331 */ 5332 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 5333 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) { 5334 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); 5335 confirm_hint = 1; 5336 goto confirm; 5337 } 5338 5339 /* If there already exists link key in local host, leave the 5340 * decision to user space since the remote device could be 5341 * legitimate or malicious. 5342 */ 5343 if (hci_find_link_key(hdev, &ev->bdaddr)) { 5344 bt_dev_dbg(hdev, "Local host already has link key"); 5345 confirm_hint = 1; 5346 goto confirm; 5347 } 5348 5349 BT_DBG("Auto-accept of user confirmation with %ums delay", 5350 hdev->auto_accept_delay); 5351 5352 if (hdev->auto_accept_delay > 0) { 5353 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 5354 queue_delayed_work(conn->hdev->workqueue, 5355 &conn->auto_accept_work, delay); 5356 goto unlock; 5357 } 5358 5359 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 5360 sizeof(ev->bdaddr), &ev->bdaddr); 5361 goto unlock; 5362 } 5363 5364 confirm: 5365 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 5366 le32_to_cpu(ev->passkey), confirm_hint); 5367 5368 unlock: 5369 hci_dev_unlock(hdev); 5370 } 5371 5372 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, 5373 struct sk_buff *skb) 5374 { 5375 struct hci_ev_user_passkey_req *ev = data; 5376 5377 bt_dev_dbg(hdev, ""); 5378 5379 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5380 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 5381 } 5382 5383 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, 5384 struct sk_buff *skb) 5385 { 5386 struct hci_ev_user_passkey_notify *ev = data; 5387 struct hci_conn *conn; 5388 5389 bt_dev_dbg(hdev, ""); 5390 5391 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5392 if (!conn) 5393 return; 5394 5395 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5396 conn->passkey_entered = 0; 5397 5398 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5399 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5400 conn->dst_type, conn->passkey_notify, 5401 conn->passkey_entered); 5402 } 5403 5404 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, 5405 struct sk_buff *skb) 5406 { 5407 struct hci_ev_keypress_notify *ev = data; 5408 struct hci_conn *conn; 5409 5410 bt_dev_dbg(hdev, ""); 5411 5412 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5413 if (!conn) 5414 return; 5415 5416 switch (ev->type) { 5417 case HCI_KEYPRESS_STARTED: 5418 conn->passkey_entered = 0; 5419 return; 5420 5421 case HCI_KEYPRESS_ENTERED: 5422 conn->passkey_entered++; 5423 break; 5424 5425 case HCI_KEYPRESS_ERASED: 5426 conn->passkey_entered--; 5427 break; 5428 5429 case HCI_KEYPRESS_CLEARED: 5430 conn->passkey_entered = 0; 5431 break; 5432 5433 case HCI_KEYPRESS_COMPLETED: 5434 return; 5435 } 5436 5437 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5438 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5439 conn->dst_type, conn->passkey_notify, 5440 conn->passkey_entered); 5441 } 5442 5443 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, 5444 struct sk_buff *skb) 5445 { 5446 struct hci_ev_simple_pair_complete *ev = data; 5447 struct hci_conn *conn; 5448 5449 bt_dev_dbg(hdev, ""); 5450 5451 hci_dev_lock(hdev); 5452 5453 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5454 if (!conn || !hci_conn_ssp_enabled(conn)) 5455 goto unlock; 5456 5457 /* Reset the authentication requirement to unknown */ 5458 conn->remote_auth = 0xff; 5459 5460 /* To avoid duplicate auth_failed events to user space we check 5461 * the HCI_CONN_AUTH_PEND flag which will be set if we 5462 * initiated the authentication. A traditional auth_complete 5463 * event gets always produced as initiator and is also mapped to 5464 * the mgmt_auth_failed event */ 5465 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5466 mgmt_auth_failed(conn, ev->status); 5467 5468 hci_conn_drop(conn); 5469 5470 unlock: 5471 hci_dev_unlock(hdev); 5472 } 5473 5474 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, 5475 struct sk_buff *skb) 5476 { 5477 struct hci_ev_remote_host_features *ev = data; 5478 struct inquiry_entry *ie; 5479 struct hci_conn *conn; 5480 5481 bt_dev_dbg(hdev, ""); 5482 5483 hci_dev_lock(hdev); 5484 5485 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5486 if (conn) 5487 memcpy(conn->features[1], ev->features, 8); 5488 5489 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5490 if (ie) 5491 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5492 5493 hci_dev_unlock(hdev); 5494 } 5495 5496 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, 5497 struct sk_buff *skb) 5498 { 5499 struct hci_ev_remote_oob_data_request *ev = edata; 5500 struct oob_data *data; 5501 5502 bt_dev_dbg(hdev, ""); 5503 5504 hci_dev_lock(hdev); 5505 5506 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5507 goto unlock; 5508 5509 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5510 if (!data) { 5511 struct hci_cp_remote_oob_data_neg_reply cp; 5512 5513 bacpy(&cp.bdaddr, &ev->bdaddr); 5514 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5515 sizeof(cp), &cp); 5516 goto unlock; 5517 } 5518 5519 if (bredr_sc_enabled(hdev)) { 5520 struct hci_cp_remote_oob_ext_data_reply cp; 5521 5522 bacpy(&cp.bdaddr, &ev->bdaddr); 5523 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5524 memset(cp.hash192, 0, sizeof(cp.hash192)); 5525 memset(cp.rand192, 0, sizeof(cp.rand192)); 5526 } else { 5527 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5528 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5529 } 5530 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5531 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5532 5533 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5534 sizeof(cp), &cp); 5535 } else { 5536 struct hci_cp_remote_oob_data_reply cp; 5537 5538 bacpy(&cp.bdaddr, &ev->bdaddr); 5539 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5540 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5541 5542 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5543 sizeof(cp), &cp); 5544 } 5545 5546 unlock: 5547 hci_dev_unlock(hdev); 5548 } 5549 5550 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5551 u8 bdaddr_type, bdaddr_t *local_rpa) 5552 { 5553 if (conn->out) { 5554 conn->dst_type = bdaddr_type; 5555 conn->resp_addr_type = bdaddr_type; 5556 bacpy(&conn->resp_addr, bdaddr); 5557 5558 /* Check if the controller has set a Local RPA then it must be 5559 * used instead or hdev->rpa. 5560 */ 5561 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5562 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5563 bacpy(&conn->init_addr, local_rpa); 5564 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5565 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5566 bacpy(&conn->init_addr, &conn->hdev->rpa); 5567 } else { 5568 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5569 &conn->init_addr_type); 5570 } 5571 } else { 5572 conn->resp_addr_type = conn->hdev->adv_addr_type; 5573 /* Check if the controller has set a Local RPA then it must be 5574 * used instead or hdev->rpa. 5575 */ 5576 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5577 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5578 bacpy(&conn->resp_addr, local_rpa); 5579 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5580 /* In case of ext adv, resp_addr will be updated in 5581 * Adv Terminated event. 5582 */ 5583 if (!ext_adv_capable(conn->hdev)) 5584 bacpy(&conn->resp_addr, 5585 &conn->hdev->random_addr); 5586 } else { 5587 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5588 } 5589 5590 conn->init_addr_type = bdaddr_type; 5591 bacpy(&conn->init_addr, bdaddr); 5592 5593 /* For incoming connections, set the default minimum 5594 * and maximum connection interval. They will be used 5595 * to check if the parameters are in range and if not 5596 * trigger the connection update procedure. 5597 */ 5598 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5599 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5600 } 5601 } 5602 5603 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5604 bdaddr_t *bdaddr, u8 bdaddr_type, 5605 bdaddr_t *local_rpa, u8 role, u16 handle, 5606 u16 interval, u16 latency, 5607 u16 supervision_timeout) 5608 { 5609 struct hci_conn_params *params; 5610 struct hci_conn *conn; 5611 struct smp_irk *irk; 5612 u8 addr_type; 5613 5614 hci_dev_lock(hdev); 5615 5616 /* All controllers implicitly stop advertising in the event of a 5617 * connection, so ensure that the state bit is cleared. 5618 */ 5619 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5620 5621 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); 5622 if (!conn) { 5623 /* In case of error status and there is no connection pending 5624 * just unlock as there is nothing to cleanup. 5625 */ 5626 if (status) 5627 goto unlock; 5628 5629 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role); 5630 if (IS_ERR(conn)) { 5631 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); 5632 goto unlock; 5633 } 5634 5635 conn->dst_type = bdaddr_type; 5636 5637 /* If we didn't have a hci_conn object previously 5638 * but we're in central role this must be something 5639 * initiated using an accept list. Since accept list based 5640 * connections are not "first class citizens" we don't 5641 * have full tracking of them. Therefore, we go ahead 5642 * with a "best effort" approach of determining the 5643 * initiator address based on the HCI_PRIVACY flag. 5644 */ 5645 if (conn->out) { 5646 conn->resp_addr_type = bdaddr_type; 5647 bacpy(&conn->resp_addr, bdaddr); 5648 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5649 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5650 bacpy(&conn->init_addr, &hdev->rpa); 5651 } else { 5652 hci_copy_identity_address(hdev, 5653 &conn->init_addr, 5654 &conn->init_addr_type); 5655 } 5656 } 5657 } else { 5658 cancel_delayed_work(&conn->le_conn_timeout); 5659 } 5660 5661 /* The HCI_LE_Connection_Complete event is only sent once per connection. 5662 * Processing it more than once per connection can corrupt kernel memory. 5663 * 5664 * As the connection handle is set here for the first time, it indicates 5665 * whether the connection is already set up. 5666 */ 5667 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { 5668 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 5669 goto unlock; 5670 } 5671 5672 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5673 5674 /* Lookup the identity address from the stored connection 5675 * address and address type. 5676 * 5677 * When establishing connections to an identity address, the 5678 * connection procedure will store the resolvable random 5679 * address first. Now if it can be converted back into the 5680 * identity address, start using the identity address from 5681 * now on. 5682 */ 5683 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5684 if (irk) { 5685 bacpy(&conn->dst, &irk->bdaddr); 5686 conn->dst_type = irk->addr_type; 5687 } 5688 5689 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5690 5691 /* All connection failure handling is taken care of by the 5692 * hci_conn_failed function which is triggered by the HCI 5693 * request completion callbacks used for connecting. 5694 */ 5695 if (status || hci_conn_set_handle(conn, handle)) 5696 goto unlock; 5697 5698 /* Drop the connection if it has been aborted */ 5699 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) { 5700 hci_conn_drop(conn); 5701 goto unlock; 5702 } 5703 5704 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5705 addr_type = BDADDR_LE_PUBLIC; 5706 else 5707 addr_type = BDADDR_LE_RANDOM; 5708 5709 /* Drop the connection if the device is blocked */ 5710 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5711 hci_conn_drop(conn); 5712 goto unlock; 5713 } 5714 5715 mgmt_device_connected(hdev, conn, NULL, 0); 5716 5717 conn->sec_level = BT_SECURITY_LOW; 5718 conn->state = BT_CONFIG; 5719 5720 /* Store current advertising instance as connection advertising instance 5721 * when software rotation is in use so it can be re-enabled when 5722 * disconnected. 5723 */ 5724 if (!ext_adv_capable(hdev)) 5725 conn->adv_instance = hdev->cur_adv_instance; 5726 5727 conn->le_conn_interval = interval; 5728 conn->le_conn_latency = latency; 5729 conn->le_supv_timeout = supervision_timeout; 5730 5731 hci_debugfs_create_conn(conn); 5732 hci_conn_add_sysfs(conn); 5733 5734 /* The remote features procedure is defined for central 5735 * role only. So only in case of an initiated connection 5736 * request the remote features. 5737 * 5738 * If the local controller supports peripheral-initiated features 5739 * exchange, then requesting the remote features in peripheral 5740 * role is possible. Otherwise just transition into the 5741 * connected state without requesting the remote features. 5742 */ 5743 if (conn->out || 5744 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5745 struct hci_cp_le_read_remote_features cp; 5746 5747 cp.handle = __cpu_to_le16(conn->handle); 5748 5749 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5750 sizeof(cp), &cp); 5751 5752 hci_conn_hold(conn); 5753 } else { 5754 conn->state = BT_CONNECTED; 5755 hci_connect_cfm(conn, status); 5756 } 5757 5758 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5759 conn->dst_type); 5760 if (params) { 5761 hci_pend_le_list_del_init(params); 5762 if (params->conn) { 5763 hci_conn_drop(params->conn); 5764 hci_conn_put(params->conn); 5765 params->conn = NULL; 5766 } 5767 } 5768 5769 unlock: 5770 hci_update_passive_scan(hdev); 5771 hci_dev_unlock(hdev); 5772 } 5773 5774 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, 5775 struct sk_buff *skb) 5776 { 5777 struct hci_ev_le_conn_complete *ev = data; 5778 5779 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5780 5781 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5782 NULL, ev->role, le16_to_cpu(ev->handle), 5783 le16_to_cpu(ev->interval), 5784 le16_to_cpu(ev->latency), 5785 le16_to_cpu(ev->supervision_timeout)); 5786 } 5787 5788 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, 5789 struct sk_buff *skb) 5790 { 5791 struct hci_ev_le_enh_conn_complete *ev = data; 5792 5793 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5794 5795 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5796 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5797 le16_to_cpu(ev->interval), 5798 le16_to_cpu(ev->latency), 5799 le16_to_cpu(ev->supervision_timeout)); 5800 } 5801 5802 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, 5803 struct sk_buff *skb) 5804 { 5805 struct hci_evt_le_ext_adv_set_term *ev = data; 5806 struct hci_conn *conn; 5807 struct adv_info *adv, *n; 5808 5809 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5810 5811 /* The Bluetooth Core 5.3 specification clearly states that this event 5812 * shall not be sent when the Host disables the advertising set. So in 5813 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 5814 * 5815 * When the Host disables an advertising set, all cleanup is done via 5816 * its command callback and not needed to be duplicated here. 5817 */ 5818 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 5819 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 5820 return; 5821 } 5822 5823 hci_dev_lock(hdev); 5824 5825 adv = hci_find_adv_instance(hdev, ev->handle); 5826 5827 if (ev->status) { 5828 if (!adv) 5829 goto unlock; 5830 5831 /* Remove advertising as it has been terminated */ 5832 hci_remove_adv_instance(hdev, ev->handle); 5833 mgmt_advertising_removed(NULL, hdev, ev->handle); 5834 5835 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 5836 if (adv->enabled) 5837 goto unlock; 5838 } 5839 5840 /* We are no longer advertising, clear HCI_LE_ADV */ 5841 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5842 goto unlock; 5843 } 5844 5845 if (adv) 5846 adv->enabled = false; 5847 5848 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 5849 if (conn) { 5850 /* Store handle in the connection so the correct advertising 5851 * instance can be re-enabled when disconnected. 5852 */ 5853 conn->adv_instance = ev->handle; 5854 5855 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 5856 bacmp(&conn->resp_addr, BDADDR_ANY)) 5857 goto unlock; 5858 5859 if (!ev->handle) { 5860 bacpy(&conn->resp_addr, &hdev->random_addr); 5861 goto unlock; 5862 } 5863 5864 if (adv) 5865 bacpy(&conn->resp_addr, &adv->random_addr); 5866 } 5867 5868 unlock: 5869 hci_dev_unlock(hdev); 5870 } 5871 5872 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, 5873 struct sk_buff *skb) 5874 { 5875 struct hci_ev_le_conn_update_complete *ev = data; 5876 struct hci_conn *conn; 5877 5878 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5879 5880 if (ev->status) 5881 return; 5882 5883 hci_dev_lock(hdev); 5884 5885 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5886 if (conn) { 5887 conn->le_conn_interval = le16_to_cpu(ev->interval); 5888 conn->le_conn_latency = le16_to_cpu(ev->latency); 5889 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 5890 } 5891 5892 hci_dev_unlock(hdev); 5893 } 5894 5895 /* This function requires the caller holds hdev->lock */ 5896 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5897 bdaddr_t *addr, 5898 u8 addr_type, bool addr_resolved, 5899 u8 adv_type, u8 phy, u8 sec_phy) 5900 { 5901 struct hci_conn *conn; 5902 struct hci_conn_params *params; 5903 5904 /* If the event is not connectable don't proceed further */ 5905 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 5906 return NULL; 5907 5908 /* Ignore if the device is blocked or hdev is suspended */ 5909 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 5910 hdev->suspended) 5911 return NULL; 5912 5913 /* Most controller will fail if we try to create new connections 5914 * while we have an existing one in peripheral role. 5915 */ 5916 if (hdev->conn_hash.le_num_peripheral > 0 && 5917 (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) || 5918 !(hdev->le_states[3] & 0x10))) 5919 return NULL; 5920 5921 /* If we're not connectable only connect devices that we have in 5922 * our pend_le_conns list. 5923 */ 5924 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 5925 addr_type); 5926 if (!params) 5927 return NULL; 5928 5929 if (!params->explicit_connect) { 5930 switch (params->auto_connect) { 5931 case HCI_AUTO_CONN_DIRECT: 5932 /* Only devices advertising with ADV_DIRECT_IND are 5933 * triggering a connection attempt. This is allowing 5934 * incoming connections from peripheral devices. 5935 */ 5936 if (adv_type != LE_ADV_DIRECT_IND) 5937 return NULL; 5938 break; 5939 case HCI_AUTO_CONN_ALWAYS: 5940 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 5941 * are triggering a connection attempt. This means 5942 * that incoming connections from peripheral device are 5943 * accepted and also outgoing connections to peripheral 5944 * devices are established when found. 5945 */ 5946 break; 5947 default: 5948 return NULL; 5949 } 5950 } 5951 5952 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 5953 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 5954 HCI_ROLE_MASTER, phy, sec_phy); 5955 if (!IS_ERR(conn)) { 5956 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5957 * by higher layer that tried to connect, if no then 5958 * store the pointer since we don't really have any 5959 * other owner of the object besides the params that 5960 * triggered it. This way we can abort the connection if 5961 * the parameters get removed and keep the reference 5962 * count consistent once the connection is established. 5963 */ 5964 5965 if (!params->explicit_connect) 5966 params->conn = hci_conn_get(conn); 5967 5968 return conn; 5969 } 5970 5971 switch (PTR_ERR(conn)) { 5972 case -EBUSY: 5973 /* If hci_connect() returns -EBUSY it means there is already 5974 * an LE connection attempt going on. Since controllers don't 5975 * support more than one connection attempt at the time, we 5976 * don't consider this an error case. 5977 */ 5978 break; 5979 default: 5980 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 5981 return NULL; 5982 } 5983 5984 return NULL; 5985 } 5986 5987 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 5988 u8 bdaddr_type, bdaddr_t *direct_addr, 5989 u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi, 5990 u8 *data, u8 len, bool ext_adv, bool ctl_time, 5991 u64 instant) 5992 { 5993 struct discovery_state *d = &hdev->discovery; 5994 struct smp_irk *irk; 5995 struct hci_conn *conn; 5996 bool match, bdaddr_resolved; 5997 u32 flags; 5998 u8 *ptr; 5999 6000 switch (type) { 6001 case LE_ADV_IND: 6002 case LE_ADV_DIRECT_IND: 6003 case LE_ADV_SCAN_IND: 6004 case LE_ADV_NONCONN_IND: 6005 case LE_ADV_SCAN_RSP: 6006 break; 6007 default: 6008 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 6009 "type: 0x%02x", type); 6010 return; 6011 } 6012 6013 if (len > max_adv_len(hdev)) { 6014 bt_dev_err_ratelimited(hdev, 6015 "adv larger than maximum supported"); 6016 return; 6017 } 6018 6019 /* Find the end of the data in case the report contains padded zero 6020 * bytes at the end causing an invalid length value. 6021 * 6022 * When data is NULL, len is 0 so there is no need for extra ptr 6023 * check as 'ptr < data + 0' is already false in such case. 6024 */ 6025 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 6026 if (ptr + 1 + *ptr > data + len) 6027 break; 6028 } 6029 6030 /* Adjust for actual length. This handles the case when remote 6031 * device is advertising with incorrect data length. 6032 */ 6033 len = ptr - data; 6034 6035 /* If the direct address is present, then this report is from 6036 * a LE Direct Advertising Report event. In that case it is 6037 * important to see if the address is matching the local 6038 * controller address. 6039 * 6040 * If local privacy is not enable the controller shall not be 6041 * generating such event since according to its documentation it is only 6042 * valid for filter_policy 0x02 and 0x03, but the fact that it did 6043 * generate LE Direct Advertising Report means it is probably broken and 6044 * won't generate any other event which can potentially break 6045 * auto-connect logic so in case local privacy is not enable this 6046 * ignores the direct_addr so it works as a regular report. 6047 */ 6048 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr && 6049 hci_dev_test_flag(hdev, HCI_PRIVACY)) { 6050 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 6051 &bdaddr_resolved); 6052 6053 /* Only resolvable random addresses are valid for these 6054 * kind of reports and others can be ignored. 6055 */ 6056 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 6057 return; 6058 6059 /* If the local IRK of the controller does not match 6060 * with the resolvable random address provided, then 6061 * this report can be ignored. 6062 */ 6063 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 6064 return; 6065 } 6066 6067 /* Check if we need to convert to identity address */ 6068 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 6069 if (irk) { 6070 bdaddr = &irk->bdaddr; 6071 bdaddr_type = irk->addr_type; 6072 } 6073 6074 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 6075 6076 /* Check if we have been requested to connect to this device. 6077 * 6078 * direct_addr is set only for directed advertising reports (it is NULL 6079 * for advertising reports) and is already verified to be RPA above. 6080 */ 6081 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 6082 type, phy, sec_phy); 6083 if (!ext_adv && conn && type == LE_ADV_IND && 6084 len <= max_adv_len(hdev)) { 6085 /* Store report for later inclusion by 6086 * mgmt_device_connected 6087 */ 6088 memcpy(conn->le_adv_data, data, len); 6089 conn->le_adv_data_len = len; 6090 } 6091 6092 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 6093 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6094 else 6095 flags = 0; 6096 6097 /* All scan results should be sent up for Mesh systems */ 6098 if (hci_dev_test_flag(hdev, HCI_MESH)) { 6099 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6100 rssi, flags, data, len, NULL, 0, instant); 6101 return; 6102 } 6103 6104 /* Passive scanning shouldn't trigger any device found events, 6105 * except for devices marked as CONN_REPORT for which we do send 6106 * device found events, or advertisement monitoring requested. 6107 */ 6108 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 6109 if (type == LE_ADV_DIRECT_IND) 6110 return; 6111 6112 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 6113 bdaddr, bdaddr_type) && 6114 idr_is_empty(&hdev->adv_monitors_idr)) 6115 return; 6116 6117 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6118 rssi, flags, data, len, NULL, 0, 0); 6119 return; 6120 } 6121 6122 /* When receiving a scan response, then there is no way to 6123 * know if the remote device is connectable or not. However 6124 * since scan responses are merged with a previously seen 6125 * advertising report, the flags field from that report 6126 * will be used. 6127 * 6128 * In the unlikely case that a controller just sends a scan 6129 * response event that doesn't match the pending report, then 6130 * it is marked as a standalone SCAN_RSP. 6131 */ 6132 if (type == LE_ADV_SCAN_RSP) 6133 flags = MGMT_DEV_FOUND_SCAN_RSP; 6134 6135 /* If there's nothing pending either store the data from this 6136 * event or send an immediate device found event if the data 6137 * should not be stored for later. 6138 */ 6139 if (!has_pending_adv_report(hdev)) { 6140 /* If the report will trigger a SCAN_REQ store it for 6141 * later merging. 6142 */ 6143 if (!ext_adv && (type == LE_ADV_IND || 6144 type == LE_ADV_SCAN_IND)) { 6145 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6146 rssi, flags, data, len); 6147 return; 6148 } 6149 6150 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6151 rssi, flags, data, len, NULL, 0, 0); 6152 return; 6153 } 6154 6155 /* Check if the pending report is for the same device as the new one */ 6156 match = (!bacmp(bdaddr, &d->last_adv_addr) && 6157 bdaddr_type == d->last_adv_addr_type); 6158 6159 /* If the pending data doesn't match this report or this isn't a 6160 * scan response (e.g. we got a duplicate ADV_IND) then force 6161 * sending of the pending data. 6162 */ 6163 if (type != LE_ADV_SCAN_RSP || !match) { 6164 /* Send out whatever is in the cache, but skip duplicates */ 6165 if (!match) 6166 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6167 d->last_adv_addr_type, NULL, 6168 d->last_adv_rssi, d->last_adv_flags, 6169 d->last_adv_data, 6170 d->last_adv_data_len, NULL, 0, 0); 6171 6172 /* If the new report will trigger a SCAN_REQ store it for 6173 * later merging. 6174 */ 6175 if (!ext_adv && (type == LE_ADV_IND || 6176 type == LE_ADV_SCAN_IND)) { 6177 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6178 rssi, flags, data, len); 6179 return; 6180 } 6181 6182 /* The advertising reports cannot be merged, so clear 6183 * the pending report and send out a device found event. 6184 */ 6185 clear_pending_adv_report(hdev); 6186 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6187 rssi, flags, data, len, NULL, 0, 0); 6188 return; 6189 } 6190 6191 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 6192 * the new event is a SCAN_RSP. We can therefore proceed with 6193 * sending a merged device found event. 6194 */ 6195 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6196 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 6197 d->last_adv_data, d->last_adv_data_len, data, len, 0); 6198 clear_pending_adv_report(hdev); 6199 } 6200 6201 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, 6202 struct sk_buff *skb) 6203 { 6204 struct hci_ev_le_advertising_report *ev = data; 6205 u64 instant = jiffies; 6206 6207 if (!ev->num) 6208 return; 6209 6210 hci_dev_lock(hdev); 6211 6212 while (ev->num--) { 6213 struct hci_ev_le_advertising_info *info; 6214 s8 rssi; 6215 6216 info = hci_le_ev_skb_pull(hdev, skb, 6217 HCI_EV_LE_ADVERTISING_REPORT, 6218 sizeof(*info)); 6219 if (!info) 6220 break; 6221 6222 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, 6223 info->length + 1)) 6224 break; 6225 6226 if (info->length <= max_adv_len(hdev)) { 6227 rssi = info->data[info->length]; 6228 process_adv_report(hdev, info->type, &info->bdaddr, 6229 info->bdaddr_type, NULL, 0, 6230 HCI_ADV_PHY_1M, 0, rssi, 6231 info->data, info->length, false, 6232 false, instant); 6233 } else { 6234 bt_dev_err(hdev, "Dropping invalid advertising data"); 6235 } 6236 } 6237 6238 hci_dev_unlock(hdev); 6239 } 6240 6241 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 6242 { 6243 u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK; 6244 6245 if (!pdu_type) 6246 return LE_ADV_NONCONN_IND; 6247 6248 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 6249 switch (evt_type) { 6250 case LE_LEGACY_ADV_IND: 6251 return LE_ADV_IND; 6252 case LE_LEGACY_ADV_DIRECT_IND: 6253 return LE_ADV_DIRECT_IND; 6254 case LE_LEGACY_ADV_SCAN_IND: 6255 return LE_ADV_SCAN_IND; 6256 case LE_LEGACY_NONCONN_IND: 6257 return LE_ADV_NONCONN_IND; 6258 case LE_LEGACY_SCAN_RSP_ADV: 6259 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 6260 return LE_ADV_SCAN_RSP; 6261 } 6262 6263 goto invalid; 6264 } 6265 6266 if (evt_type & LE_EXT_ADV_CONN_IND) { 6267 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6268 return LE_ADV_DIRECT_IND; 6269 6270 return LE_ADV_IND; 6271 } 6272 6273 if (evt_type & LE_EXT_ADV_SCAN_RSP) 6274 return LE_ADV_SCAN_RSP; 6275 6276 if (evt_type & LE_EXT_ADV_SCAN_IND) 6277 return LE_ADV_SCAN_IND; 6278 6279 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6280 return LE_ADV_NONCONN_IND; 6281 6282 invalid: 6283 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 6284 evt_type); 6285 6286 return LE_ADV_INVALID; 6287 } 6288 6289 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, 6290 struct sk_buff *skb) 6291 { 6292 struct hci_ev_le_ext_adv_report *ev = data; 6293 u64 instant = jiffies; 6294 6295 if (!ev->num) 6296 return; 6297 6298 hci_dev_lock(hdev); 6299 6300 while (ev->num--) { 6301 struct hci_ev_le_ext_adv_info *info; 6302 u8 legacy_evt_type; 6303 u16 evt_type; 6304 6305 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6306 sizeof(*info)); 6307 if (!info) 6308 break; 6309 6310 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6311 info->length)) 6312 break; 6313 6314 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK; 6315 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6316 6317 if (hci_test_quirk(hdev, 6318 HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) { 6319 info->primary_phy &= 0x1f; 6320 info->secondary_phy &= 0x1f; 6321 } 6322 6323 /* Check if PA Sync is pending and if the hci_conn SID has not 6324 * been set update it. 6325 */ 6326 if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { 6327 struct hci_conn *conn; 6328 6329 conn = hci_conn_hash_lookup_create_pa_sync(hdev); 6330 if (conn && conn->sid == HCI_SID_INVALID) 6331 conn->sid = info->sid; 6332 } 6333 6334 if (legacy_evt_type != LE_ADV_INVALID) { 6335 process_adv_report(hdev, legacy_evt_type, &info->bdaddr, 6336 info->bdaddr_type, NULL, 0, 6337 info->primary_phy, 6338 info->secondary_phy, 6339 info->rssi, info->data, info->length, 6340 !(evt_type & LE_EXT_ADV_LEGACY_PDU), 6341 false, instant); 6342 } 6343 } 6344 6345 hci_dev_unlock(hdev); 6346 } 6347 6348 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) 6349 { 6350 struct hci_cp_le_pa_term_sync cp; 6351 6352 memset(&cp, 0, sizeof(cp)); 6353 cp.handle = handle; 6354 6355 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); 6356 } 6357 6358 static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data, 6359 struct sk_buff *skb) 6360 { 6361 struct hci_ev_le_pa_sync_established *ev = data; 6362 int mask = hdev->link_mode; 6363 __u8 flags = 0; 6364 struct hci_conn *pa_sync, *conn; 6365 6366 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6367 6368 hci_dev_lock(hdev); 6369 6370 hci_dev_clear_flag(hdev, HCI_PA_SYNC); 6371 6372 conn = hci_conn_hash_lookup_create_pa_sync(hdev); 6373 if (!conn) { 6374 bt_dev_err(hdev, 6375 "Unable to find connection for dst %pMR sid 0x%2.2x", 6376 &ev->bdaddr, ev->sid); 6377 goto unlock; 6378 } 6379 6380 clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); 6381 6382 conn->sync_handle = le16_to_cpu(ev->handle); 6383 conn->sid = HCI_SID_INVALID; 6384 6385 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK, 6386 &flags); 6387 if (!(mask & HCI_LM_ACCEPT)) { 6388 hci_le_pa_term_sync(hdev, ev->handle); 6389 goto unlock; 6390 } 6391 6392 if (!(flags & HCI_PROTO_DEFER)) 6393 goto unlock; 6394 6395 /* Add connection to indicate PA sync event */ 6396 pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, 6397 HCI_ROLE_SLAVE); 6398 6399 if (IS_ERR(pa_sync)) 6400 goto unlock; 6401 6402 pa_sync->sync_handle = le16_to_cpu(ev->handle); 6403 6404 if (ev->status) { 6405 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); 6406 6407 /* Notify iso layer */ 6408 hci_connect_cfm(pa_sync, ev->status); 6409 } 6410 6411 unlock: 6412 hci_dev_unlock(hdev); 6413 } 6414 6415 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data, 6416 struct sk_buff *skb) 6417 { 6418 struct hci_ev_le_per_adv_report *ev = data; 6419 int mask = hdev->link_mode; 6420 __u8 flags = 0; 6421 struct hci_conn *pa_sync; 6422 6423 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); 6424 6425 hci_dev_lock(hdev); 6426 6427 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags); 6428 if (!(mask & HCI_LM_ACCEPT)) 6429 goto unlock; 6430 6431 if (!(flags & HCI_PROTO_DEFER)) 6432 goto unlock; 6433 6434 pa_sync = hci_conn_hash_lookup_pa_sync_handle 6435 (hdev, 6436 le16_to_cpu(ev->sync_handle)); 6437 6438 if (!pa_sync) 6439 goto unlock; 6440 6441 if (ev->data_status == LE_PA_DATA_COMPLETE && 6442 !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) { 6443 /* Notify iso layer */ 6444 hci_connect_cfm(pa_sync, 0); 6445 6446 /* Notify MGMT layer */ 6447 mgmt_device_connected(hdev, pa_sync, NULL, 0); 6448 } 6449 6450 unlock: 6451 hci_dev_unlock(hdev); 6452 } 6453 6454 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, 6455 struct sk_buff *skb) 6456 { 6457 struct hci_ev_le_remote_feat_complete *ev = data; 6458 struct hci_conn *conn; 6459 6460 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6461 6462 hci_dev_lock(hdev); 6463 6464 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6465 if (conn) { 6466 if (!ev->status) 6467 memcpy(conn->features[0], ev->features, 8); 6468 6469 if (conn->state == BT_CONFIG) { 6470 __u8 status; 6471 6472 /* If the local controller supports peripheral-initiated 6473 * features exchange, but the remote controller does 6474 * not, then it is possible that the error code 0x1a 6475 * for unsupported remote feature gets returned. 6476 * 6477 * In this specific case, allow the connection to 6478 * transition into connected state and mark it as 6479 * successful. 6480 */ 6481 if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE && 6482 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6483 status = 0x00; 6484 else 6485 status = ev->status; 6486 6487 conn->state = BT_CONNECTED; 6488 hci_connect_cfm(conn, status); 6489 hci_conn_drop(conn); 6490 } 6491 } 6492 6493 hci_dev_unlock(hdev); 6494 } 6495 6496 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, 6497 struct sk_buff *skb) 6498 { 6499 struct hci_ev_le_ltk_req *ev = data; 6500 struct hci_cp_le_ltk_reply cp; 6501 struct hci_cp_le_ltk_neg_reply neg; 6502 struct hci_conn *conn; 6503 struct smp_ltk *ltk; 6504 6505 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6506 6507 hci_dev_lock(hdev); 6508 6509 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6510 if (conn == NULL) 6511 goto not_found; 6512 6513 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6514 if (!ltk) 6515 goto not_found; 6516 6517 if (smp_ltk_is_sc(ltk)) { 6518 /* With SC both EDiv and Rand are set to zero */ 6519 if (ev->ediv || ev->rand) 6520 goto not_found; 6521 } else { 6522 /* For non-SC keys check that EDiv and Rand match */ 6523 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6524 goto not_found; 6525 } 6526 6527 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6528 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6529 cp.handle = cpu_to_le16(conn->handle); 6530 6531 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6532 6533 conn->enc_key_size = ltk->enc_size; 6534 6535 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6536 6537 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6538 * temporary key used to encrypt a connection following 6539 * pairing. It is used during the Encrypted Session Setup to 6540 * distribute the keys. Later, security can be re-established 6541 * using a distributed LTK. 6542 */ 6543 if (ltk->type == SMP_STK) { 6544 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6545 list_del_rcu(<k->list); 6546 kfree_rcu(ltk, rcu); 6547 } else { 6548 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6549 } 6550 6551 hci_dev_unlock(hdev); 6552 6553 return; 6554 6555 not_found: 6556 neg.handle = ev->handle; 6557 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6558 hci_dev_unlock(hdev); 6559 } 6560 6561 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6562 u8 reason) 6563 { 6564 struct hci_cp_le_conn_param_req_neg_reply cp; 6565 6566 cp.handle = cpu_to_le16(handle); 6567 cp.reason = reason; 6568 6569 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6570 &cp); 6571 } 6572 6573 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, 6574 struct sk_buff *skb) 6575 { 6576 struct hci_ev_le_remote_conn_param_req *ev = data; 6577 struct hci_cp_le_conn_param_req_reply cp; 6578 struct hci_conn *hcon; 6579 u16 handle, min, max, latency, timeout; 6580 6581 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6582 6583 handle = le16_to_cpu(ev->handle); 6584 min = le16_to_cpu(ev->interval_min); 6585 max = le16_to_cpu(ev->interval_max); 6586 latency = le16_to_cpu(ev->latency); 6587 timeout = le16_to_cpu(ev->timeout); 6588 6589 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6590 if (!hcon || hcon->state != BT_CONNECTED) 6591 return send_conn_param_neg_reply(hdev, handle, 6592 HCI_ERROR_UNKNOWN_CONN_ID); 6593 6594 if (max > hcon->le_conn_max_interval) 6595 return send_conn_param_neg_reply(hdev, handle, 6596 HCI_ERROR_INVALID_LL_PARAMS); 6597 6598 if (hci_check_conn_params(min, max, latency, timeout)) 6599 return send_conn_param_neg_reply(hdev, handle, 6600 HCI_ERROR_INVALID_LL_PARAMS); 6601 6602 if (hcon->role == HCI_ROLE_MASTER) { 6603 struct hci_conn_params *params; 6604 u8 store_hint; 6605 6606 hci_dev_lock(hdev); 6607 6608 params = hci_conn_params_lookup(hdev, &hcon->dst, 6609 hcon->dst_type); 6610 if (params) { 6611 params->conn_min_interval = min; 6612 params->conn_max_interval = max; 6613 params->conn_latency = latency; 6614 params->supervision_timeout = timeout; 6615 store_hint = 0x01; 6616 } else { 6617 store_hint = 0x00; 6618 } 6619 6620 hci_dev_unlock(hdev); 6621 6622 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6623 store_hint, min, max, latency, timeout); 6624 } 6625 6626 cp.handle = ev->handle; 6627 cp.interval_min = ev->interval_min; 6628 cp.interval_max = ev->interval_max; 6629 cp.latency = ev->latency; 6630 cp.timeout = ev->timeout; 6631 cp.min_ce_len = 0; 6632 cp.max_ce_len = 0; 6633 6634 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6635 } 6636 6637 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, 6638 struct sk_buff *skb) 6639 { 6640 struct hci_ev_le_direct_adv_report *ev = data; 6641 u64 instant = jiffies; 6642 int i; 6643 6644 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, 6645 flex_array_size(ev, info, ev->num))) 6646 return; 6647 6648 if (!ev->num) 6649 return; 6650 6651 hci_dev_lock(hdev); 6652 6653 for (i = 0; i < ev->num; i++) { 6654 struct hci_ev_le_direct_adv_info *info = &ev->info[i]; 6655 6656 process_adv_report(hdev, info->type, &info->bdaddr, 6657 info->bdaddr_type, &info->direct_addr, 6658 info->direct_addr_type, HCI_ADV_PHY_1M, 0, 6659 info->rssi, NULL, 0, false, false, instant); 6660 } 6661 6662 hci_dev_unlock(hdev); 6663 } 6664 6665 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, 6666 struct sk_buff *skb) 6667 { 6668 struct hci_ev_le_phy_update_complete *ev = data; 6669 struct hci_conn *conn; 6670 6671 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6672 6673 if (ev->status) 6674 return; 6675 6676 hci_dev_lock(hdev); 6677 6678 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6679 if (!conn) 6680 goto unlock; 6681 6682 conn->le_tx_phy = ev->tx_phy; 6683 conn->le_rx_phy = ev->rx_phy; 6684 6685 unlock: 6686 hci_dev_unlock(hdev); 6687 } 6688 6689 static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data, 6690 struct sk_buff *skb) 6691 { 6692 struct hci_evt_le_cis_established *ev = data; 6693 struct hci_conn *conn; 6694 struct bt_iso_qos *qos; 6695 bool pending = false; 6696 u16 handle = __le16_to_cpu(ev->handle); 6697 u32 c_sdu_interval, p_sdu_interval; 6698 6699 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6700 6701 hci_dev_lock(hdev); 6702 6703 conn = hci_conn_hash_lookup_handle(hdev, handle); 6704 if (!conn) { 6705 bt_dev_err(hdev, 6706 "Unable to find connection with handle 0x%4.4x", 6707 handle); 6708 goto unlock; 6709 } 6710 6711 if (conn->type != CIS_LINK) { 6712 bt_dev_err(hdev, 6713 "Invalid connection link type handle 0x%4.4x", 6714 handle); 6715 goto unlock; 6716 } 6717 6718 qos = &conn->iso_qos; 6719 6720 pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags); 6721 6722 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G 6723 * page 3075: 6724 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) × 6725 * ISO_Interval + SDU_Interval_C_To_P 6726 * ... 6727 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) - 6728 * Transport_Latency 6729 */ 6730 c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) + 6731 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) - 6732 get_unaligned_le24(ev->c_latency); 6733 p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) + 6734 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) - 6735 get_unaligned_le24(ev->p_latency); 6736 6737 switch (conn->role) { 6738 case HCI_ROLE_SLAVE: 6739 qos->ucast.in.interval = c_sdu_interval; 6740 qos->ucast.out.interval = p_sdu_interval; 6741 /* Convert Transport Latency (us) to Latency (msec) */ 6742 qos->ucast.in.latency = 6743 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), 6744 1000); 6745 qos->ucast.out.latency = 6746 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 6747 1000); 6748 qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; 6749 qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; 6750 qos->ucast.in.phy = ev->c_phy; 6751 qos->ucast.out.phy = ev->p_phy; 6752 break; 6753 case HCI_ROLE_MASTER: 6754 qos->ucast.in.interval = p_sdu_interval; 6755 qos->ucast.out.interval = c_sdu_interval; 6756 /* Convert Transport Latency (us) to Latency (msec) */ 6757 qos->ucast.out.latency = 6758 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), 6759 1000); 6760 qos->ucast.in.latency = 6761 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 6762 1000); 6763 qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; 6764 qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; 6765 qos->ucast.out.phy = ev->c_phy; 6766 qos->ucast.in.phy = ev->p_phy; 6767 break; 6768 } 6769 6770 if (!ev->status) { 6771 conn->state = BT_CONNECTED; 6772 hci_debugfs_create_conn(conn); 6773 hci_conn_add_sysfs(conn); 6774 hci_iso_setup_path(conn); 6775 goto unlock; 6776 } 6777 6778 conn->state = BT_CLOSED; 6779 hci_connect_cfm(conn, ev->status); 6780 hci_conn_del(conn); 6781 6782 unlock: 6783 if (pending) 6784 hci_le_create_cis_pending(hdev); 6785 6786 hci_dev_unlock(hdev); 6787 } 6788 6789 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) 6790 { 6791 struct hci_cp_le_reject_cis cp; 6792 6793 memset(&cp, 0, sizeof(cp)); 6794 cp.handle = handle; 6795 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 6796 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); 6797 } 6798 6799 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) 6800 { 6801 struct hci_cp_le_accept_cis cp; 6802 6803 memset(&cp, 0, sizeof(cp)); 6804 cp.handle = handle; 6805 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); 6806 } 6807 6808 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, 6809 struct sk_buff *skb) 6810 { 6811 struct hci_evt_le_cis_req *ev = data; 6812 u16 acl_handle, cis_handle; 6813 struct hci_conn *acl, *cis; 6814 int mask; 6815 __u8 flags = 0; 6816 6817 acl_handle = __le16_to_cpu(ev->acl_handle); 6818 cis_handle = __le16_to_cpu(ev->cis_handle); 6819 6820 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", 6821 acl_handle, cis_handle, ev->cig_id, ev->cis_id); 6822 6823 hci_dev_lock(hdev); 6824 6825 acl = hci_conn_hash_lookup_handle(hdev, acl_handle); 6826 if (!acl) 6827 goto unlock; 6828 6829 mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags); 6830 if (!(mask & HCI_LM_ACCEPT)) { 6831 hci_le_reject_cis(hdev, ev->cis_handle); 6832 goto unlock; 6833 } 6834 6835 cis = hci_conn_hash_lookup_handle(hdev, cis_handle); 6836 if (!cis) { 6837 cis = hci_conn_add(hdev, CIS_LINK, &acl->dst, 6838 HCI_ROLE_SLAVE, cis_handle); 6839 if (IS_ERR(cis)) { 6840 hci_le_reject_cis(hdev, ev->cis_handle); 6841 goto unlock; 6842 } 6843 } 6844 6845 cis->iso_qos.ucast.cig = ev->cig_id; 6846 cis->iso_qos.ucast.cis = ev->cis_id; 6847 6848 if (!(flags & HCI_PROTO_DEFER)) { 6849 hci_le_accept_cis(hdev, ev->cis_handle); 6850 } else { 6851 cis->state = BT_CONNECT2; 6852 hci_connect_cfm(cis, 0); 6853 } 6854 6855 unlock: 6856 hci_dev_unlock(hdev); 6857 } 6858 6859 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data) 6860 { 6861 u8 handle = PTR_UINT(data); 6862 6863 return hci_le_terminate_big_sync(hdev, handle, 6864 HCI_ERROR_LOCAL_HOST_TERM); 6865 } 6866 6867 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, 6868 struct sk_buff *skb) 6869 { 6870 struct hci_evt_le_create_big_complete *ev = data; 6871 struct hci_conn *conn; 6872 __u8 i = 0; 6873 6874 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6875 6876 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, 6877 flex_array_size(ev, bis_handle, ev->num_bis))) 6878 return; 6879 6880 hci_dev_lock(hdev); 6881 6882 /* Connect all BISes that are bound to the BIG */ 6883 while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle, 6884 BT_BOUND, 6885 HCI_ROLE_MASTER))) { 6886 if (ev->status) { 6887 hci_connect_cfm(conn, ev->status); 6888 hci_conn_del(conn); 6889 continue; 6890 } 6891 6892 if (hci_conn_set_handle(conn, 6893 __le16_to_cpu(ev->bis_handle[i++]))) 6894 continue; 6895 6896 conn->state = BT_CONNECTED; 6897 set_bit(HCI_CONN_BIG_CREATED, &conn->flags); 6898 hci_debugfs_create_conn(conn); 6899 hci_conn_add_sysfs(conn); 6900 hci_iso_setup_path(conn); 6901 } 6902 6903 if (!ev->status && !i) 6904 /* If no BISes have been connected for the BIG, 6905 * terminate. This is in case all bound connections 6906 * have been closed before the BIG creation 6907 * has completed. 6908 */ 6909 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync, 6910 UINT_PTR(ev->handle), NULL); 6911 6912 hci_dev_unlock(hdev); 6913 } 6914 6915 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, 6916 struct sk_buff *skb) 6917 { 6918 struct hci_evt_le_big_sync_established *ev = data; 6919 struct hci_conn *bis, *conn; 6920 int i; 6921 6922 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6923 6924 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED, 6925 flex_array_size(ev, bis, ev->num_bis))) 6926 return; 6927 6928 hci_dev_lock(hdev); 6929 6930 conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle, 6931 ev->num_bis); 6932 if (!conn) { 6933 bt_dev_err(hdev, 6934 "Unable to find connection for big 0x%2.2x", 6935 ev->handle); 6936 goto unlock; 6937 } 6938 6939 clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); 6940 6941 conn->num_bis = 0; 6942 memset(conn->bis, 0, sizeof(conn->num_bis)); 6943 6944 for (i = 0; i < ev->num_bis; i++) { 6945 u16 handle = le16_to_cpu(ev->bis[i]); 6946 __le32 interval; 6947 6948 bis = hci_conn_hash_lookup_handle(hdev, handle); 6949 if (!bis) { 6950 if (handle > HCI_CONN_HANDLE_MAX) { 6951 bt_dev_dbg(hdev, "ignore too large handle %u", handle); 6952 continue; 6953 } 6954 bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY, 6955 HCI_ROLE_SLAVE, handle); 6956 if (IS_ERR(bis)) 6957 continue; 6958 } 6959 6960 if (ev->status != 0x42) { 6961 /* Mark PA sync as established */ 6962 set_bit(HCI_CONN_PA_SYNC, &bis->flags); 6963 /* Reset cleanup callback of PA Sync so it doesn't 6964 * terminate the sync when deleting the connection. 6965 */ 6966 conn->cleanup = NULL; 6967 } 6968 6969 bis->sync_handle = conn->sync_handle; 6970 bis->iso_qos.bcast.big = ev->handle; 6971 memset(&interval, 0, sizeof(interval)); 6972 memcpy(&interval, ev->latency, sizeof(ev->latency)); 6973 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval); 6974 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ 6975 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100; 6976 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); 6977 6978 if (!ev->status) { 6979 bis->state = BT_CONNECTED; 6980 set_bit(HCI_CONN_BIG_SYNC, &bis->flags); 6981 hci_debugfs_create_conn(bis); 6982 hci_conn_add_sysfs(bis); 6983 hci_iso_setup_path(bis); 6984 } 6985 } 6986 6987 /* In case BIG sync failed, notify each failed connection to 6988 * the user after all hci connections have been added 6989 */ 6990 if (ev->status) 6991 for (i = 0; i < ev->num_bis; i++) { 6992 u16 handle = le16_to_cpu(ev->bis[i]); 6993 6994 bis = hci_conn_hash_lookup_handle(hdev, handle); 6995 if (!bis) 6996 continue; 6997 6998 set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags); 6999 hci_connect_cfm(bis, ev->status); 7000 } 7001 7002 unlock: 7003 hci_dev_unlock(hdev); 7004 } 7005 7006 static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data, 7007 struct sk_buff *skb) 7008 { 7009 struct hci_evt_le_big_sync_lost *ev = data; 7010 struct hci_conn *bis, *conn; 7011 7012 bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle); 7013 7014 hci_dev_lock(hdev); 7015 7016 /* Delete the pa sync connection */ 7017 bis = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle); 7018 if (bis) { 7019 conn = hci_conn_hash_lookup_pa_sync_handle(hdev, 7020 bis->sync_handle); 7021 if (conn) 7022 hci_conn_del(conn); 7023 } 7024 7025 /* Delete each bis connection */ 7026 while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle, 7027 BT_CONNECTED, 7028 HCI_ROLE_SLAVE))) { 7029 clear_bit(HCI_CONN_BIG_SYNC, &bis->flags); 7030 hci_disconn_cfm(bis, ev->reason); 7031 hci_conn_del(bis); 7032 } 7033 7034 hci_dev_unlock(hdev); 7035 } 7036 7037 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, 7038 struct sk_buff *skb) 7039 { 7040 struct hci_evt_le_big_info_adv_report *ev = data; 7041 int mask = hdev->link_mode; 7042 __u8 flags = 0; 7043 struct hci_conn *pa_sync; 7044 7045 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); 7046 7047 hci_dev_lock(hdev); 7048 7049 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags); 7050 if (!(mask & HCI_LM_ACCEPT)) 7051 goto unlock; 7052 7053 if (!(flags & HCI_PROTO_DEFER)) 7054 goto unlock; 7055 7056 pa_sync = hci_conn_hash_lookup_pa_sync_handle 7057 (hdev, 7058 le16_to_cpu(ev->sync_handle)); 7059 7060 if (!pa_sync) 7061 goto unlock; 7062 7063 pa_sync->iso_qos.bcast.encryption = ev->encryption; 7064 7065 /* Notify iso layer */ 7066 hci_connect_cfm(pa_sync, 0); 7067 7068 unlock: 7069 hci_dev_unlock(hdev); 7070 } 7071 7072 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ 7073 [_op] = { \ 7074 .func = _func, \ 7075 .min_len = _min_len, \ 7076 .max_len = _max_len, \ 7077 } 7078 7079 #define HCI_LE_EV(_op, _func, _len) \ 7080 HCI_LE_EV_VL(_op, _func, _len, _len) 7081 7082 #define HCI_LE_EV_STATUS(_op, _func) \ 7083 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) 7084 7085 /* Entries in this table shall have their position according to the subevent 7086 * opcode they handle so the use of the macros above is recommend since it does 7087 * attempt to initialize at its proper index using Designated Initializers that 7088 * way events without a callback function can be omitted. 7089 */ 7090 static const struct hci_le_ev { 7091 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 7092 u16 min_len; 7093 u16 max_len; 7094 } hci_le_ev_table[U8_MAX + 1] = { 7095 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ 7096 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, 7097 sizeof(struct hci_ev_le_conn_complete)), 7098 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ 7099 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, 7100 sizeof(struct hci_ev_le_advertising_report), 7101 HCI_MAX_EVENT_SIZE), 7102 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ 7103 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, 7104 hci_le_conn_update_complete_evt, 7105 sizeof(struct hci_ev_le_conn_update_complete)), 7106 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ 7107 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, 7108 hci_le_remote_feat_complete_evt, 7109 sizeof(struct hci_ev_le_remote_feat_complete)), 7110 /* [0x05 = HCI_EV_LE_LTK_REQ] */ 7111 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, 7112 sizeof(struct hci_ev_le_ltk_req)), 7113 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ 7114 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, 7115 hci_le_remote_conn_param_req_evt, 7116 sizeof(struct hci_ev_le_remote_conn_param_req)), 7117 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ 7118 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, 7119 hci_le_enh_conn_complete_evt, 7120 sizeof(struct hci_ev_le_enh_conn_complete)), 7121 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ 7122 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, 7123 sizeof(struct hci_ev_le_direct_adv_report), 7124 HCI_MAX_EVENT_SIZE), 7125 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ 7126 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, 7127 sizeof(struct hci_ev_le_phy_update_complete)), 7128 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ 7129 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, 7130 sizeof(struct hci_ev_le_ext_adv_report), 7131 HCI_MAX_EVENT_SIZE), 7132 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ 7133 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, 7134 hci_le_pa_sync_established_evt, 7135 sizeof(struct hci_ev_le_pa_sync_established)), 7136 /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */ 7137 HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT, 7138 hci_le_per_adv_report_evt, 7139 sizeof(struct hci_ev_le_per_adv_report), 7140 HCI_MAX_EVENT_SIZE), 7141 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ 7142 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, 7143 sizeof(struct hci_evt_le_ext_adv_set_term)), 7144 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ 7145 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt, 7146 sizeof(struct hci_evt_le_cis_established)), 7147 /* [0x1a = HCI_EVT_LE_CIS_REQ] */ 7148 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, 7149 sizeof(struct hci_evt_le_cis_req)), 7150 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ 7151 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, 7152 hci_le_create_big_complete_evt, 7153 sizeof(struct hci_evt_le_create_big_complete), 7154 HCI_MAX_EVENT_SIZE), 7155 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */ 7156 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED, 7157 hci_le_big_sync_established_evt, 7158 sizeof(struct hci_evt_le_big_sync_established), 7159 HCI_MAX_EVENT_SIZE), 7160 /* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */ 7161 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST, 7162 hci_le_big_sync_lost_evt, 7163 sizeof(struct hci_evt_le_big_sync_lost), 7164 HCI_MAX_EVENT_SIZE), 7165 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ 7166 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, 7167 hci_le_big_info_adv_report_evt, 7168 sizeof(struct hci_evt_le_big_info_adv_report), 7169 HCI_MAX_EVENT_SIZE), 7170 }; 7171 7172 static void hci_le_meta_evt(struct hci_dev *hdev, void *data, 7173 struct sk_buff *skb, u16 *opcode, u8 *status, 7174 hci_req_complete_t *req_complete, 7175 hci_req_complete_skb_t *req_complete_skb) 7176 { 7177 struct hci_ev_le_meta *ev = data; 7178 const struct hci_le_ev *subev; 7179 7180 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); 7181 7182 /* Only match event if command OGF is for LE */ 7183 if (hdev->req_skb && 7184 (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 || 7185 hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) && 7186 hci_skb_event(hdev->req_skb) == ev->subevent) { 7187 *opcode = hci_skb_opcode(hdev->req_skb); 7188 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, 7189 req_complete_skb); 7190 } 7191 7192 subev = &hci_le_ev_table[ev->subevent]; 7193 if (!subev->func) 7194 return; 7195 7196 if (skb->len < subev->min_len) { 7197 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", 7198 ev->subevent, skb->len, subev->min_len); 7199 return; 7200 } 7201 7202 /* Just warn if the length is over max_len size it still be 7203 * possible to partially parse the event so leave to callback to 7204 * decide if that is acceptable. 7205 */ 7206 if (skb->len > subev->max_len) 7207 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", 7208 ev->subevent, skb->len, subev->max_len); 7209 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); 7210 if (!data) 7211 return; 7212 7213 subev->func(hdev, data, skb); 7214 } 7215 7216 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 7217 u8 event, struct sk_buff *skb) 7218 { 7219 struct hci_ev_cmd_complete *ev; 7220 struct hci_event_hdr *hdr; 7221 7222 if (!skb) 7223 return false; 7224 7225 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); 7226 if (!hdr) 7227 return false; 7228 7229 if (event) { 7230 if (hdr->evt != event) 7231 return false; 7232 return true; 7233 } 7234 7235 /* Check if request ended in Command Status - no way to retrieve 7236 * any extra parameters in this case. 7237 */ 7238 if (hdr->evt == HCI_EV_CMD_STATUS) 7239 return false; 7240 7241 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 7242 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 7243 hdr->evt); 7244 return false; 7245 } 7246 7247 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); 7248 if (!ev) 7249 return false; 7250 7251 if (opcode != __le16_to_cpu(ev->opcode)) { 7252 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 7253 __le16_to_cpu(ev->opcode)); 7254 return false; 7255 } 7256 7257 return true; 7258 } 7259 7260 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 7261 struct sk_buff *skb) 7262 { 7263 struct hci_ev_le_advertising_info *adv; 7264 struct hci_ev_le_direct_adv_info *direct_adv; 7265 struct hci_ev_le_ext_adv_info *ext_adv; 7266 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 7267 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 7268 7269 hci_dev_lock(hdev); 7270 7271 /* If we are currently suspended and this is the first BT event seen, 7272 * save the wake reason associated with the event. 7273 */ 7274 if (!hdev->suspended || hdev->wake_reason) 7275 goto unlock; 7276 7277 /* Default to remote wake. Values for wake_reason are documented in the 7278 * Bluez mgmt api docs. 7279 */ 7280 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 7281 7282 /* Once configured for remote wakeup, we should only wake up for 7283 * reconnections. It's useful to see which device is waking us up so 7284 * keep track of the bdaddr of the connection event that woke us up. 7285 */ 7286 if (event == HCI_EV_CONN_REQUEST) { 7287 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 7288 hdev->wake_addr_type = BDADDR_BREDR; 7289 } else if (event == HCI_EV_CONN_COMPLETE) { 7290 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 7291 hdev->wake_addr_type = BDADDR_BREDR; 7292 } else if (event == HCI_EV_LE_META) { 7293 struct hci_ev_le_meta *le_ev = (void *)skb->data; 7294 u8 subevent = le_ev->subevent; 7295 u8 *ptr = &skb->data[sizeof(*le_ev)]; 7296 u8 num_reports = *ptr; 7297 7298 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 7299 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 7300 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 7301 num_reports) { 7302 adv = (void *)(ptr + 1); 7303 direct_adv = (void *)(ptr + 1); 7304 ext_adv = (void *)(ptr + 1); 7305 7306 switch (subevent) { 7307 case HCI_EV_LE_ADVERTISING_REPORT: 7308 bacpy(&hdev->wake_addr, &adv->bdaddr); 7309 hdev->wake_addr_type = adv->bdaddr_type; 7310 break; 7311 case HCI_EV_LE_DIRECT_ADV_REPORT: 7312 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 7313 hdev->wake_addr_type = direct_adv->bdaddr_type; 7314 break; 7315 case HCI_EV_LE_EXT_ADV_REPORT: 7316 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 7317 hdev->wake_addr_type = ext_adv->bdaddr_type; 7318 break; 7319 } 7320 } 7321 } else { 7322 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 7323 } 7324 7325 unlock: 7326 hci_dev_unlock(hdev); 7327 } 7328 7329 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ 7330 [_op] = { \ 7331 .req = false, \ 7332 .func = _func, \ 7333 .min_len = _min_len, \ 7334 .max_len = _max_len, \ 7335 } 7336 7337 #define HCI_EV(_op, _func, _len) \ 7338 HCI_EV_VL(_op, _func, _len, _len) 7339 7340 #define HCI_EV_STATUS(_op, _func) \ 7341 HCI_EV(_op, _func, sizeof(struct hci_ev_status)) 7342 7343 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ 7344 [_op] = { \ 7345 .req = true, \ 7346 .func_req = _func, \ 7347 .min_len = _min_len, \ 7348 .max_len = _max_len, \ 7349 } 7350 7351 #define HCI_EV_REQ(_op, _func, _len) \ 7352 HCI_EV_REQ_VL(_op, _func, _len, _len) 7353 7354 /* Entries in this table shall have their position according to the event opcode 7355 * they handle so the use of the macros above is recommend since it does attempt 7356 * to initialize at its proper index using Designated Initializers that way 7357 * events without a callback function don't have entered. 7358 */ 7359 static const struct hci_ev { 7360 bool req; 7361 union { 7362 void (*func)(struct hci_dev *hdev, void *data, 7363 struct sk_buff *skb); 7364 void (*func_req)(struct hci_dev *hdev, void *data, 7365 struct sk_buff *skb, u16 *opcode, u8 *status, 7366 hci_req_complete_t *req_complete, 7367 hci_req_complete_skb_t *req_complete_skb); 7368 }; 7369 u16 min_len; 7370 u16 max_len; 7371 } hci_ev_table[U8_MAX + 1] = { 7372 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ 7373 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), 7374 /* [0x02 = HCI_EV_INQUIRY_RESULT] */ 7375 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, 7376 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), 7377 /* [0x03 = HCI_EV_CONN_COMPLETE] */ 7378 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, 7379 sizeof(struct hci_ev_conn_complete)), 7380 /* [0x04 = HCI_EV_CONN_REQUEST] */ 7381 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, 7382 sizeof(struct hci_ev_conn_request)), 7383 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ 7384 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, 7385 sizeof(struct hci_ev_disconn_complete)), 7386 /* [0x06 = HCI_EV_AUTH_COMPLETE] */ 7387 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, 7388 sizeof(struct hci_ev_auth_complete)), 7389 /* [0x07 = HCI_EV_REMOTE_NAME] */ 7390 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, 7391 sizeof(struct hci_ev_remote_name)), 7392 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ 7393 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, 7394 sizeof(struct hci_ev_encrypt_change)), 7395 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ 7396 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, 7397 hci_change_link_key_complete_evt, 7398 sizeof(struct hci_ev_change_link_key_complete)), 7399 /* [0x0b = HCI_EV_REMOTE_FEATURES] */ 7400 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, 7401 sizeof(struct hci_ev_remote_features)), 7402 /* [0x0e = HCI_EV_CMD_COMPLETE] */ 7403 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, 7404 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), 7405 /* [0x0f = HCI_EV_CMD_STATUS] */ 7406 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, 7407 sizeof(struct hci_ev_cmd_status)), 7408 /* [0x10 = HCI_EV_CMD_STATUS] */ 7409 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, 7410 sizeof(struct hci_ev_hardware_error)), 7411 /* [0x12 = HCI_EV_ROLE_CHANGE] */ 7412 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, 7413 sizeof(struct hci_ev_role_change)), 7414 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ 7415 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, 7416 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), 7417 /* [0x14 = HCI_EV_MODE_CHANGE] */ 7418 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, 7419 sizeof(struct hci_ev_mode_change)), 7420 /* [0x16 = HCI_EV_PIN_CODE_REQ] */ 7421 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, 7422 sizeof(struct hci_ev_pin_code_req)), 7423 /* [0x17 = HCI_EV_LINK_KEY_REQ] */ 7424 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, 7425 sizeof(struct hci_ev_link_key_req)), 7426 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ 7427 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, 7428 sizeof(struct hci_ev_link_key_notify)), 7429 /* [0x1c = HCI_EV_CLOCK_OFFSET] */ 7430 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, 7431 sizeof(struct hci_ev_clock_offset)), 7432 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ 7433 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, 7434 sizeof(struct hci_ev_pkt_type_change)), 7435 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ 7436 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, 7437 sizeof(struct hci_ev_pscan_rep_mode)), 7438 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ 7439 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, 7440 hci_inquiry_result_with_rssi_evt, 7441 sizeof(struct hci_ev_inquiry_result_rssi), 7442 HCI_MAX_EVENT_SIZE), 7443 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ 7444 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, 7445 sizeof(struct hci_ev_remote_ext_features)), 7446 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ 7447 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, 7448 sizeof(struct hci_ev_sync_conn_complete)), 7449 /* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */ 7450 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, 7451 hci_extended_inquiry_result_evt, 7452 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), 7453 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ 7454 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, 7455 sizeof(struct hci_ev_key_refresh_complete)), 7456 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ 7457 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, 7458 sizeof(struct hci_ev_io_capa_request)), 7459 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ 7460 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, 7461 sizeof(struct hci_ev_io_capa_reply)), 7462 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ 7463 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, 7464 sizeof(struct hci_ev_user_confirm_req)), 7465 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ 7466 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, 7467 sizeof(struct hci_ev_user_passkey_req)), 7468 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ 7469 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, 7470 sizeof(struct hci_ev_remote_oob_data_request)), 7471 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ 7472 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, 7473 sizeof(struct hci_ev_simple_pair_complete)), 7474 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ 7475 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, 7476 sizeof(struct hci_ev_user_passkey_notify)), 7477 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ 7478 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, 7479 sizeof(struct hci_ev_keypress_notify)), 7480 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ 7481 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, 7482 sizeof(struct hci_ev_remote_host_features)), 7483 /* [0x3e = HCI_EV_LE_META] */ 7484 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, 7485 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), 7486 /* [0xff = HCI_EV_VENDOR] */ 7487 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), 7488 }; 7489 7490 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, 7491 u16 *opcode, u8 *status, 7492 hci_req_complete_t *req_complete, 7493 hci_req_complete_skb_t *req_complete_skb) 7494 { 7495 const struct hci_ev *ev = &hci_ev_table[event]; 7496 void *data; 7497 7498 if (!ev->func) 7499 return; 7500 7501 if (skb->len < ev->min_len) { 7502 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", 7503 event, skb->len, ev->min_len); 7504 return; 7505 } 7506 7507 /* Just warn if the length is over max_len size it still be 7508 * possible to partially parse the event so leave to callback to 7509 * decide if that is acceptable. 7510 */ 7511 if (skb->len > ev->max_len) 7512 bt_dev_warn_ratelimited(hdev, 7513 "unexpected event 0x%2.2x length: %u > %u", 7514 event, skb->len, ev->max_len); 7515 7516 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); 7517 if (!data) 7518 return; 7519 7520 if (ev->req) 7521 ev->func_req(hdev, data, skb, opcode, status, req_complete, 7522 req_complete_skb); 7523 else 7524 ev->func(hdev, data, skb); 7525 } 7526 7527 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 7528 { 7529 struct hci_event_hdr *hdr = (void *) skb->data; 7530 hci_req_complete_t req_complete = NULL; 7531 hci_req_complete_skb_t req_complete_skb = NULL; 7532 struct sk_buff *orig_skb = NULL; 7533 u8 status = 0, event, req_evt = 0; 7534 u16 opcode = HCI_OP_NOP; 7535 7536 if (skb->len < sizeof(*hdr)) { 7537 bt_dev_err(hdev, "Malformed HCI Event"); 7538 goto done; 7539 } 7540 7541 hci_dev_lock(hdev); 7542 kfree_skb(hdev->recv_event); 7543 hdev->recv_event = skb_clone(skb, GFP_KERNEL); 7544 hci_dev_unlock(hdev); 7545 7546 event = hdr->evt; 7547 if (!event) { 7548 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", 7549 event); 7550 goto done; 7551 } 7552 7553 /* Only match event if command OGF is not for LE */ 7554 if (hdev->req_skb && 7555 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 && 7556 hci_skb_event(hdev->req_skb) == event) { 7557 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb), 7558 status, &req_complete, &req_complete_skb); 7559 req_evt = event; 7560 } 7561 7562 /* If it looks like we might end up having to call 7563 * req_complete_skb, store a pristine copy of the skb since the 7564 * various handlers may modify the original one through 7565 * skb_pull() calls, etc. 7566 */ 7567 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 7568 event == HCI_EV_CMD_COMPLETE) 7569 orig_skb = skb_clone(skb, GFP_KERNEL); 7570 7571 skb_pull(skb, HCI_EVENT_HDR_SIZE); 7572 7573 /* Store wake reason if we're suspended */ 7574 hci_store_wake_reason(hdev, event, skb); 7575 7576 bt_dev_dbg(hdev, "event 0x%2.2x", event); 7577 7578 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, 7579 &req_complete_skb); 7580 7581 if (req_complete) { 7582 req_complete(hdev, status, opcode); 7583 } else if (req_complete_skb) { 7584 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 7585 kfree_skb(orig_skb); 7586 orig_skb = NULL; 7587 } 7588 req_complete_skb(hdev, status, opcode, orig_skb); 7589 } 7590 7591 done: 7592 kfree_skb(orig_skb); 7593 kfree_skb(skb); 7594 hdev->stat.evt_rx++; 7595 } 7596