1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 #include "eir.h" 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 42 "\x00\x00\x00\x00\x00\x00\x00\x00" 43 44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 45 46 /* Handle HCI Event packets */ 47 48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 49 u8 ev, size_t len) 50 { 51 void *data; 52 53 data = skb_pull_data(skb, len); 54 if (!data) 55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); 56 57 return data; 58 } 59 60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 61 u16 op, size_t len) 62 { 63 void *data; 64 65 data = skb_pull_data(skb, len); 66 if (!data) 67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); 68 69 return data; 70 } 71 72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 73 u8 ev, size_t len) 74 { 75 void *data; 76 77 data = skb_pull_data(skb, len); 78 if (!data) 79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); 80 81 return data; 82 } 83 84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, 85 struct sk_buff *skb) 86 { 87 struct hci_ev_status *rp = data; 88 89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 90 91 /* It is possible that we receive Inquiry Complete event right 92 * before we receive Inquiry Cancel Command Complete event, in 93 * which case the latter event should have status of Command 94 * Disallowed (0x0c). This should not be treated as error, since 95 * we actually achieve what Inquiry Cancel wants to achieve, 96 * which is to end the last Inquiry session. 97 */ 98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 100 rp->status = 0x00; 101 } 102 103 if (rp->status) 104 return rp->status; 105 106 clear_bit(HCI_INQUIRY, &hdev->flags); 107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 108 wake_up_bit(&hdev->flags, HCI_INQUIRY); 109 110 hci_dev_lock(hdev); 111 /* Set discovery state to stopped if we're not doing LE active 112 * scanning. 113 */ 114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 115 hdev->le_scan_type != LE_SCAN_ACTIVE) 116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 117 hci_dev_unlock(hdev); 118 119 hci_conn_check_pending(hdev); 120 121 return rp->status; 122 } 123 124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, 125 struct sk_buff *skb) 126 { 127 struct hci_ev_status *rp = data; 128 129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 130 131 if (rp->status) 132 return rp->status; 133 134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 135 136 return rp->status; 137 } 138 139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, 140 struct sk_buff *skb) 141 { 142 struct hci_ev_status *rp = data; 143 144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 145 146 if (rp->status) 147 return rp->status; 148 149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 150 151 hci_conn_check_pending(hdev); 152 153 return rp->status; 154 } 155 156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, 157 struct sk_buff *skb) 158 { 159 struct hci_ev_status *rp = data; 160 161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 162 163 return rp->status; 164 } 165 166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, 167 struct sk_buff *skb) 168 { 169 struct hci_rp_role_discovery *rp = data; 170 struct hci_conn *conn; 171 172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 173 174 if (rp->status) 175 return rp->status; 176 177 hci_dev_lock(hdev); 178 179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 180 if (conn) 181 conn->role = rp->role; 182 183 hci_dev_unlock(hdev); 184 185 return rp->status; 186 } 187 188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, 189 struct sk_buff *skb) 190 { 191 struct hci_rp_read_link_policy *rp = data; 192 struct hci_conn *conn; 193 194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 195 196 if (rp->status) 197 return rp->status; 198 199 hci_dev_lock(hdev); 200 201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 202 if (conn) 203 conn->link_policy = __le16_to_cpu(rp->policy); 204 205 hci_dev_unlock(hdev); 206 207 return rp->status; 208 } 209 210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, 211 struct sk_buff *skb) 212 { 213 struct hci_rp_write_link_policy *rp = data; 214 struct hci_conn *conn; 215 void *sent; 216 217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 218 219 if (rp->status) 220 return rp->status; 221 222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 223 if (!sent) 224 return rp->status; 225 226 hci_dev_lock(hdev); 227 228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 229 if (conn) 230 conn->link_policy = get_unaligned_le16(sent + 2); 231 232 hci_dev_unlock(hdev); 233 234 return rp->status; 235 } 236 237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, 238 struct sk_buff *skb) 239 { 240 struct hci_rp_read_def_link_policy *rp = data; 241 242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 243 244 if (rp->status) 245 return rp->status; 246 247 hdev->link_policy = __le16_to_cpu(rp->policy); 248 249 return rp->status; 250 } 251 252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, 253 struct sk_buff *skb) 254 { 255 struct hci_ev_status *rp = data; 256 void *sent; 257 258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 259 260 if (rp->status) 261 return rp->status; 262 263 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 264 if (!sent) 265 return rp->status; 266 267 hdev->link_policy = get_unaligned_le16(sent); 268 269 return rp->status; 270 } 271 272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) 273 { 274 struct hci_ev_status *rp = data; 275 276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 277 278 clear_bit(HCI_RESET, &hdev->flags); 279 280 if (rp->status) 281 return rp->status; 282 283 /* Reset all non-persistent flags */ 284 hci_dev_clear_volatile_flags(hdev); 285 286 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 287 288 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 289 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 290 291 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 292 hdev->adv_data_len = 0; 293 294 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 295 hdev->scan_rsp_data_len = 0; 296 297 hdev->le_scan_type = LE_SCAN_PASSIVE; 298 299 hdev->ssp_debug_mode = 0; 300 301 hci_bdaddr_list_clear(&hdev->le_accept_list); 302 hci_bdaddr_list_clear(&hdev->le_resolv_list); 303 304 return rp->status; 305 } 306 307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, 308 struct sk_buff *skb) 309 { 310 struct hci_rp_read_stored_link_key *rp = data; 311 struct hci_cp_read_stored_link_key *sent; 312 313 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 314 315 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 316 if (!sent) 317 return rp->status; 318 319 if (!rp->status && sent->read_all == 0x01) { 320 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 321 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 322 } 323 324 return rp->status; 325 } 326 327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, 328 struct sk_buff *skb) 329 { 330 struct hci_rp_delete_stored_link_key *rp = data; 331 u16 num_keys; 332 333 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 334 335 if (rp->status) 336 return rp->status; 337 338 num_keys = le16_to_cpu(rp->num_keys); 339 340 if (num_keys <= hdev->stored_num_keys) 341 hdev->stored_num_keys -= num_keys; 342 else 343 hdev->stored_num_keys = 0; 344 345 return rp->status; 346 } 347 348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, 349 struct sk_buff *skb) 350 { 351 struct hci_ev_status *rp = data; 352 void *sent; 353 354 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 355 356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 357 if (!sent) 358 return rp->status; 359 360 hci_dev_lock(hdev); 361 362 if (hci_dev_test_flag(hdev, HCI_MGMT)) 363 mgmt_set_local_name_complete(hdev, sent, rp->status); 364 else if (!rp->status) 365 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 366 367 hci_dev_unlock(hdev); 368 369 return rp->status; 370 } 371 372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, 373 struct sk_buff *skb) 374 { 375 struct hci_rp_read_local_name *rp = data; 376 377 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 378 379 if (rp->status) 380 return rp->status; 381 382 if (hci_dev_test_flag(hdev, HCI_SETUP) || 383 hci_dev_test_flag(hdev, HCI_CONFIG)) 384 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 385 386 return rp->status; 387 } 388 389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, 390 struct sk_buff *skb) 391 { 392 struct hci_ev_status *rp = data; 393 void *sent; 394 395 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 396 397 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 398 if (!sent) 399 return rp->status; 400 401 hci_dev_lock(hdev); 402 403 if (!rp->status) { 404 __u8 param = *((__u8 *) sent); 405 406 if (param == AUTH_ENABLED) 407 set_bit(HCI_AUTH, &hdev->flags); 408 else 409 clear_bit(HCI_AUTH, &hdev->flags); 410 } 411 412 if (hci_dev_test_flag(hdev, HCI_MGMT)) 413 mgmt_auth_enable_complete(hdev, rp->status); 414 415 hci_dev_unlock(hdev); 416 417 return rp->status; 418 } 419 420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, 421 struct sk_buff *skb) 422 { 423 struct hci_ev_status *rp = data; 424 __u8 param; 425 void *sent; 426 427 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 428 429 if (rp->status) 430 return rp->status; 431 432 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 433 if (!sent) 434 return rp->status; 435 436 param = *((__u8 *) sent); 437 438 if (param) 439 set_bit(HCI_ENCRYPT, &hdev->flags); 440 else 441 clear_bit(HCI_ENCRYPT, &hdev->flags); 442 443 return rp->status; 444 } 445 446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, 447 struct sk_buff *skb) 448 { 449 struct hci_ev_status *rp = data; 450 __u8 param; 451 void *sent; 452 453 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 454 455 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 456 if (!sent) 457 return rp->status; 458 459 param = *((__u8 *) sent); 460 461 hci_dev_lock(hdev); 462 463 if (rp->status) { 464 hdev->discov_timeout = 0; 465 goto done; 466 } 467 468 if (param & SCAN_INQUIRY) 469 set_bit(HCI_ISCAN, &hdev->flags); 470 else 471 clear_bit(HCI_ISCAN, &hdev->flags); 472 473 if (param & SCAN_PAGE) 474 set_bit(HCI_PSCAN, &hdev->flags); 475 else 476 clear_bit(HCI_PSCAN, &hdev->flags); 477 478 done: 479 hci_dev_unlock(hdev); 480 481 return rp->status; 482 } 483 484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, 485 struct sk_buff *skb) 486 { 487 struct hci_ev_status *rp = data; 488 struct hci_cp_set_event_filter *cp; 489 void *sent; 490 491 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 492 493 if (rp->status) 494 return rp->status; 495 496 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 497 if (!sent) 498 return rp->status; 499 500 cp = (struct hci_cp_set_event_filter *)sent; 501 502 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 503 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 504 else 505 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 506 507 return rp->status; 508 } 509 510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, 511 struct sk_buff *skb) 512 { 513 struct hci_rp_read_class_of_dev *rp = data; 514 515 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 516 517 if (rp->status) 518 return rp->status; 519 520 memcpy(hdev->dev_class, rp->dev_class, 3); 521 522 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], 523 hdev->dev_class[1], hdev->dev_class[0]); 524 525 return rp->status; 526 } 527 528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, 529 struct sk_buff *skb) 530 { 531 struct hci_ev_status *rp = data; 532 void *sent; 533 534 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 535 536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 537 if (!sent) 538 return rp->status; 539 540 hci_dev_lock(hdev); 541 542 if (!rp->status) 543 memcpy(hdev->dev_class, sent, 3); 544 545 if (hci_dev_test_flag(hdev, HCI_MGMT)) 546 mgmt_set_class_of_dev_complete(hdev, sent, rp->status); 547 548 hci_dev_unlock(hdev); 549 550 return rp->status; 551 } 552 553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, 554 struct sk_buff *skb) 555 { 556 struct hci_rp_read_voice_setting *rp = data; 557 __u16 setting; 558 559 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 560 561 if (rp->status) 562 return rp->status; 563 564 setting = __le16_to_cpu(rp->voice_setting); 565 566 if (hdev->voice_setting == setting) 567 return rp->status; 568 569 hdev->voice_setting = setting; 570 571 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 572 573 if (hdev->notify) 574 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 575 576 return rp->status; 577 } 578 579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, 580 struct sk_buff *skb) 581 { 582 struct hci_ev_status *rp = data; 583 __u16 setting; 584 void *sent; 585 586 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 587 588 if (rp->status) 589 return rp->status; 590 591 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 592 if (!sent) 593 return rp->status; 594 595 setting = get_unaligned_le16(sent); 596 597 if (hdev->voice_setting == setting) 598 return rp->status; 599 600 hdev->voice_setting = setting; 601 602 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 603 604 if (hdev->notify) 605 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 606 607 return rp->status; 608 } 609 610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, 611 struct sk_buff *skb) 612 { 613 struct hci_rp_read_num_supported_iac *rp = data; 614 615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 616 617 if (rp->status) 618 return rp->status; 619 620 hdev->num_iac = rp->num_iac; 621 622 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); 623 624 return rp->status; 625 } 626 627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, 628 struct sk_buff *skb) 629 { 630 struct hci_ev_status *rp = data; 631 struct hci_cp_write_ssp_mode *sent; 632 633 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 634 635 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 636 if (!sent) 637 return rp->status; 638 639 hci_dev_lock(hdev); 640 641 if (!rp->status) { 642 if (sent->mode) 643 hdev->features[1][0] |= LMP_HOST_SSP; 644 else 645 hdev->features[1][0] &= ~LMP_HOST_SSP; 646 } 647 648 if (!rp->status) { 649 if (sent->mode) 650 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 651 else 652 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 653 } 654 655 hci_dev_unlock(hdev); 656 657 return rp->status; 658 } 659 660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, 661 struct sk_buff *skb) 662 { 663 struct hci_ev_status *rp = data; 664 struct hci_cp_write_sc_support *sent; 665 666 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 667 668 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 669 if (!sent) 670 return rp->status; 671 672 hci_dev_lock(hdev); 673 674 if (!rp->status) { 675 if (sent->support) 676 hdev->features[1][0] |= LMP_HOST_SC; 677 else 678 hdev->features[1][0] &= ~LMP_HOST_SC; 679 } 680 681 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { 682 if (sent->support) 683 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 684 else 685 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 686 } 687 688 hci_dev_unlock(hdev); 689 690 return rp->status; 691 } 692 693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, 694 struct sk_buff *skb) 695 { 696 struct hci_rp_read_local_version *rp = data; 697 698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 699 700 if (rp->status) 701 return rp->status; 702 703 if (hci_dev_test_flag(hdev, HCI_SETUP) || 704 hci_dev_test_flag(hdev, HCI_CONFIG)) { 705 hdev->hci_ver = rp->hci_ver; 706 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 707 hdev->lmp_ver = rp->lmp_ver; 708 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 709 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 710 } 711 712 return rp->status; 713 } 714 715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data, 716 struct sk_buff *skb) 717 { 718 struct hci_rp_read_enc_key_size *rp = data; 719 struct hci_conn *conn; 720 u16 handle; 721 u8 status = rp->status; 722 723 bt_dev_dbg(hdev, "status 0x%2.2x", status); 724 725 handle = le16_to_cpu(rp->handle); 726 727 hci_dev_lock(hdev); 728 729 conn = hci_conn_hash_lookup_handle(hdev, handle); 730 if (!conn) { 731 status = 0xFF; 732 goto done; 733 } 734 735 /* While unexpected, the read_enc_key_size command may fail. The most 736 * secure approach is to then assume the key size is 0 to force a 737 * disconnection. 738 */ 739 if (status) { 740 bt_dev_err(hdev, "failed to read key size for handle %u", 741 handle); 742 conn->enc_key_size = 0; 743 } else { 744 conn->enc_key_size = rp->key_size; 745 status = 0; 746 } 747 748 hci_encrypt_cfm(conn, 0); 749 750 done: 751 hci_dev_unlock(hdev); 752 753 return status; 754 } 755 756 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, 757 struct sk_buff *skb) 758 { 759 struct hci_rp_read_local_commands *rp = data; 760 761 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 762 763 if (rp->status) 764 return rp->status; 765 766 if (hci_dev_test_flag(hdev, HCI_SETUP) || 767 hci_dev_test_flag(hdev, HCI_CONFIG)) 768 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 769 770 return rp->status; 771 } 772 773 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, 774 struct sk_buff *skb) 775 { 776 struct hci_rp_read_auth_payload_to *rp = data; 777 struct hci_conn *conn; 778 779 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 780 781 if (rp->status) 782 return rp->status; 783 784 hci_dev_lock(hdev); 785 786 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 787 if (conn) 788 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 789 790 hci_dev_unlock(hdev); 791 792 return rp->status; 793 } 794 795 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, 796 struct sk_buff *skb) 797 { 798 struct hci_rp_write_auth_payload_to *rp = data; 799 struct hci_conn *conn; 800 void *sent; 801 802 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 803 804 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 805 if (!sent) 806 return rp->status; 807 808 hci_dev_lock(hdev); 809 810 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 811 if (!conn) { 812 rp->status = 0xff; 813 goto unlock; 814 } 815 816 if (!rp->status) 817 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 818 819 hci_encrypt_cfm(conn, 0); 820 821 unlock: 822 hci_dev_unlock(hdev); 823 824 return rp->status; 825 } 826 827 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, 828 struct sk_buff *skb) 829 { 830 struct hci_rp_read_local_features *rp = data; 831 832 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 833 834 if (rp->status) 835 return rp->status; 836 837 memcpy(hdev->features, rp->features, 8); 838 839 /* Adjust default settings according to features 840 * supported by device. */ 841 842 if (hdev->features[0][0] & LMP_3SLOT) 843 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 844 845 if (hdev->features[0][0] & LMP_5SLOT) 846 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 847 848 if (hdev->features[0][1] & LMP_HV2) { 849 hdev->pkt_type |= (HCI_HV2); 850 hdev->esco_type |= (ESCO_HV2); 851 } 852 853 if (hdev->features[0][1] & LMP_HV3) { 854 hdev->pkt_type |= (HCI_HV3); 855 hdev->esco_type |= (ESCO_HV3); 856 } 857 858 if (lmp_esco_capable(hdev)) 859 hdev->esco_type |= (ESCO_EV3); 860 861 if (hdev->features[0][4] & LMP_EV4) 862 hdev->esco_type |= (ESCO_EV4); 863 864 if (hdev->features[0][4] & LMP_EV5) 865 hdev->esco_type |= (ESCO_EV5); 866 867 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 868 hdev->esco_type |= (ESCO_2EV3); 869 870 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 871 hdev->esco_type |= (ESCO_3EV3); 872 873 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 874 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 875 876 return rp->status; 877 } 878 879 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, 880 struct sk_buff *skb) 881 { 882 struct hci_rp_read_local_ext_features *rp = data; 883 884 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 885 886 if (rp->status) 887 return rp->status; 888 889 if (hdev->max_page < rp->max_page) 890 hdev->max_page = rp->max_page; 891 892 if (rp->page < HCI_MAX_PAGES) 893 memcpy(hdev->features[rp->page], rp->features, 8); 894 895 return rp->status; 896 } 897 898 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, 899 struct sk_buff *skb) 900 { 901 struct hci_rp_read_flow_control_mode *rp = data; 902 903 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 904 905 if (rp->status) 906 return rp->status; 907 908 hdev->flow_ctl_mode = rp->mode; 909 910 return rp->status; 911 } 912 913 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, 914 struct sk_buff *skb) 915 { 916 struct hci_rp_read_buffer_size *rp = data; 917 918 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 919 920 if (rp->status) 921 return rp->status; 922 923 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 924 hdev->sco_mtu = rp->sco_mtu; 925 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 926 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 927 928 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 929 hdev->sco_mtu = 64; 930 hdev->sco_pkts = 8; 931 } 932 933 hdev->acl_cnt = hdev->acl_pkts; 934 hdev->sco_cnt = hdev->sco_pkts; 935 936 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 937 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 938 939 return rp->status; 940 } 941 942 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, 943 struct sk_buff *skb) 944 { 945 struct hci_rp_read_bd_addr *rp = data; 946 947 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 948 949 if (rp->status) 950 return rp->status; 951 952 if (test_bit(HCI_INIT, &hdev->flags)) 953 bacpy(&hdev->bdaddr, &rp->bdaddr); 954 955 if (hci_dev_test_flag(hdev, HCI_SETUP)) 956 bacpy(&hdev->setup_addr, &rp->bdaddr); 957 958 return rp->status; 959 } 960 961 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, 962 struct sk_buff *skb) 963 { 964 struct hci_rp_read_local_pairing_opts *rp = data; 965 966 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 967 968 if (rp->status) 969 return rp->status; 970 971 if (hci_dev_test_flag(hdev, HCI_SETUP) || 972 hci_dev_test_flag(hdev, HCI_CONFIG)) { 973 hdev->pairing_opts = rp->pairing_opts; 974 hdev->max_enc_key_size = rp->max_key_size; 975 } 976 977 return rp->status; 978 } 979 980 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, 981 struct sk_buff *skb) 982 { 983 struct hci_rp_read_page_scan_activity *rp = data; 984 985 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 986 987 if (rp->status) 988 return rp->status; 989 990 if (test_bit(HCI_INIT, &hdev->flags)) { 991 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 992 hdev->page_scan_window = __le16_to_cpu(rp->window); 993 } 994 995 return rp->status; 996 } 997 998 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, 999 struct sk_buff *skb) 1000 { 1001 struct hci_ev_status *rp = data; 1002 struct hci_cp_write_page_scan_activity *sent; 1003 1004 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1005 1006 if (rp->status) 1007 return rp->status; 1008 1009 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 1010 if (!sent) 1011 return rp->status; 1012 1013 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 1014 hdev->page_scan_window = __le16_to_cpu(sent->window); 1015 1016 return rp->status; 1017 } 1018 1019 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, 1020 struct sk_buff *skb) 1021 { 1022 struct hci_rp_read_page_scan_type *rp = data; 1023 1024 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1025 1026 if (rp->status) 1027 return rp->status; 1028 1029 if (test_bit(HCI_INIT, &hdev->flags)) 1030 hdev->page_scan_type = rp->type; 1031 1032 return rp->status; 1033 } 1034 1035 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, 1036 struct sk_buff *skb) 1037 { 1038 struct hci_ev_status *rp = data; 1039 u8 *type; 1040 1041 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1042 1043 if (rp->status) 1044 return rp->status; 1045 1046 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 1047 if (type) 1048 hdev->page_scan_type = *type; 1049 1050 return rp->status; 1051 } 1052 1053 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, 1054 struct sk_buff *skb) 1055 { 1056 struct hci_rp_read_data_block_size *rp = data; 1057 1058 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1059 1060 if (rp->status) 1061 return rp->status; 1062 1063 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 1064 hdev->block_len = __le16_to_cpu(rp->block_len); 1065 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 1066 1067 hdev->block_cnt = hdev->num_blocks; 1068 1069 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 1070 hdev->block_cnt, hdev->block_len); 1071 1072 return rp->status; 1073 } 1074 1075 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, 1076 struct sk_buff *skb) 1077 { 1078 struct hci_rp_read_clock *rp = data; 1079 struct hci_cp_read_clock *cp; 1080 struct hci_conn *conn; 1081 1082 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1083 1084 if (rp->status) 1085 return rp->status; 1086 1087 hci_dev_lock(hdev); 1088 1089 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 1090 if (!cp) 1091 goto unlock; 1092 1093 if (cp->which == 0x00) { 1094 hdev->clock = le32_to_cpu(rp->clock); 1095 goto unlock; 1096 } 1097 1098 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1099 if (conn) { 1100 conn->clock = le32_to_cpu(rp->clock); 1101 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 1102 } 1103 1104 unlock: 1105 hci_dev_unlock(hdev); 1106 return rp->status; 1107 } 1108 1109 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, 1110 struct sk_buff *skb) 1111 { 1112 struct hci_rp_read_local_amp_info *rp = data; 1113 1114 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1115 1116 if (rp->status) 1117 return rp->status; 1118 1119 hdev->amp_status = rp->amp_status; 1120 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 1121 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 1122 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 1123 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 1124 hdev->amp_type = rp->amp_type; 1125 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 1126 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 1127 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 1128 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 1129 1130 return rp->status; 1131 } 1132 1133 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, 1134 struct sk_buff *skb) 1135 { 1136 struct hci_rp_read_inq_rsp_tx_power *rp = data; 1137 1138 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1139 1140 if (rp->status) 1141 return rp->status; 1142 1143 hdev->inq_tx_power = rp->tx_power; 1144 1145 return rp->status; 1146 } 1147 1148 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, 1149 struct sk_buff *skb) 1150 { 1151 struct hci_rp_read_def_err_data_reporting *rp = data; 1152 1153 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1154 1155 if (rp->status) 1156 return rp->status; 1157 1158 hdev->err_data_reporting = rp->err_data_reporting; 1159 1160 return rp->status; 1161 } 1162 1163 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, 1164 struct sk_buff *skb) 1165 { 1166 struct hci_ev_status *rp = data; 1167 struct hci_cp_write_def_err_data_reporting *cp; 1168 1169 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1170 1171 if (rp->status) 1172 return rp->status; 1173 1174 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 1175 if (!cp) 1176 return rp->status; 1177 1178 hdev->err_data_reporting = cp->err_data_reporting; 1179 1180 return rp->status; 1181 } 1182 1183 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, 1184 struct sk_buff *skb) 1185 { 1186 struct hci_rp_pin_code_reply *rp = data; 1187 struct hci_cp_pin_code_reply *cp; 1188 struct hci_conn *conn; 1189 1190 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1191 1192 hci_dev_lock(hdev); 1193 1194 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1195 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1196 1197 if (rp->status) 1198 goto unlock; 1199 1200 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1201 if (!cp) 1202 goto unlock; 1203 1204 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1205 if (conn) 1206 conn->pin_length = cp->pin_len; 1207 1208 unlock: 1209 hci_dev_unlock(hdev); 1210 return rp->status; 1211 } 1212 1213 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, 1214 struct sk_buff *skb) 1215 { 1216 struct hci_rp_pin_code_neg_reply *rp = data; 1217 1218 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1219 1220 hci_dev_lock(hdev); 1221 1222 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1223 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1224 rp->status); 1225 1226 hci_dev_unlock(hdev); 1227 1228 return rp->status; 1229 } 1230 1231 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, 1232 struct sk_buff *skb) 1233 { 1234 struct hci_rp_le_read_buffer_size *rp = data; 1235 1236 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1237 1238 if (rp->status) 1239 return rp->status; 1240 1241 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1242 hdev->le_pkts = rp->le_max_pkt; 1243 1244 hdev->le_cnt = hdev->le_pkts; 1245 1246 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1247 1248 return rp->status; 1249 } 1250 1251 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, 1252 struct sk_buff *skb) 1253 { 1254 struct hci_rp_le_read_local_features *rp = data; 1255 1256 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1257 1258 if (rp->status) 1259 return rp->status; 1260 1261 memcpy(hdev->le_features, rp->features, 8); 1262 1263 return rp->status; 1264 } 1265 1266 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, 1267 struct sk_buff *skb) 1268 { 1269 struct hci_rp_le_read_adv_tx_power *rp = data; 1270 1271 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1272 1273 if (rp->status) 1274 return rp->status; 1275 1276 hdev->adv_tx_power = rp->tx_power; 1277 1278 return rp->status; 1279 } 1280 1281 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, 1282 struct sk_buff *skb) 1283 { 1284 struct hci_rp_user_confirm_reply *rp = data; 1285 1286 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1287 1288 hci_dev_lock(hdev); 1289 1290 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1291 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1292 rp->status); 1293 1294 hci_dev_unlock(hdev); 1295 1296 return rp->status; 1297 } 1298 1299 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, 1300 struct sk_buff *skb) 1301 { 1302 struct hci_rp_user_confirm_reply *rp = data; 1303 1304 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1305 1306 hci_dev_lock(hdev); 1307 1308 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1309 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1310 ACL_LINK, 0, rp->status); 1311 1312 hci_dev_unlock(hdev); 1313 1314 return rp->status; 1315 } 1316 1317 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, 1318 struct sk_buff *skb) 1319 { 1320 struct hci_rp_user_confirm_reply *rp = data; 1321 1322 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1323 1324 hci_dev_lock(hdev); 1325 1326 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1327 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1328 0, rp->status); 1329 1330 hci_dev_unlock(hdev); 1331 1332 return rp->status; 1333 } 1334 1335 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, 1336 struct sk_buff *skb) 1337 { 1338 struct hci_rp_user_confirm_reply *rp = data; 1339 1340 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1341 1342 hci_dev_lock(hdev); 1343 1344 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1345 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1346 ACL_LINK, 0, rp->status); 1347 1348 hci_dev_unlock(hdev); 1349 1350 return rp->status; 1351 } 1352 1353 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, 1354 struct sk_buff *skb) 1355 { 1356 struct hci_rp_read_local_oob_data *rp = data; 1357 1358 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1359 1360 return rp->status; 1361 } 1362 1363 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, 1364 struct sk_buff *skb) 1365 { 1366 struct hci_rp_read_local_oob_ext_data *rp = data; 1367 1368 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1369 1370 return rp->status; 1371 } 1372 1373 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, 1374 struct sk_buff *skb) 1375 { 1376 struct hci_ev_status *rp = data; 1377 bdaddr_t *sent; 1378 1379 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1380 1381 if (rp->status) 1382 return rp->status; 1383 1384 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1385 if (!sent) 1386 return rp->status; 1387 1388 hci_dev_lock(hdev); 1389 1390 bacpy(&hdev->random_addr, sent); 1391 1392 if (!bacmp(&hdev->rpa, sent)) { 1393 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1394 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1395 secs_to_jiffies(hdev->rpa_timeout)); 1396 } 1397 1398 hci_dev_unlock(hdev); 1399 1400 return rp->status; 1401 } 1402 1403 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, 1404 struct sk_buff *skb) 1405 { 1406 struct hci_ev_status *rp = data; 1407 struct hci_cp_le_set_default_phy *cp; 1408 1409 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1410 1411 if (rp->status) 1412 return rp->status; 1413 1414 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1415 if (!cp) 1416 return rp->status; 1417 1418 hci_dev_lock(hdev); 1419 1420 hdev->le_tx_def_phys = cp->tx_phys; 1421 hdev->le_rx_def_phys = cp->rx_phys; 1422 1423 hci_dev_unlock(hdev); 1424 1425 return rp->status; 1426 } 1427 1428 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, 1429 struct sk_buff *skb) 1430 { 1431 struct hci_ev_status *rp = data; 1432 struct hci_cp_le_set_adv_set_rand_addr *cp; 1433 struct adv_info *adv; 1434 1435 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1436 1437 if (rp->status) 1438 return rp->status; 1439 1440 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1441 /* Update only in case the adv instance since handle 0x00 shall be using 1442 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1443 * non-extended adverting. 1444 */ 1445 if (!cp || !cp->handle) 1446 return rp->status; 1447 1448 hci_dev_lock(hdev); 1449 1450 adv = hci_find_adv_instance(hdev, cp->handle); 1451 if (adv) { 1452 bacpy(&adv->random_addr, &cp->bdaddr); 1453 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1454 adv->rpa_expired = false; 1455 queue_delayed_work(hdev->workqueue, 1456 &adv->rpa_expired_cb, 1457 secs_to_jiffies(hdev->rpa_timeout)); 1458 } 1459 } 1460 1461 hci_dev_unlock(hdev); 1462 1463 return rp->status; 1464 } 1465 1466 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, 1467 struct sk_buff *skb) 1468 { 1469 struct hci_ev_status *rp = data; 1470 u8 *instance; 1471 int err; 1472 1473 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1474 1475 if (rp->status) 1476 return rp->status; 1477 1478 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1479 if (!instance) 1480 return rp->status; 1481 1482 hci_dev_lock(hdev); 1483 1484 err = hci_remove_adv_instance(hdev, *instance); 1485 if (!err) 1486 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1487 *instance); 1488 1489 hci_dev_unlock(hdev); 1490 1491 return rp->status; 1492 } 1493 1494 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, 1495 struct sk_buff *skb) 1496 { 1497 struct hci_ev_status *rp = data; 1498 struct adv_info *adv, *n; 1499 int err; 1500 1501 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1502 1503 if (rp->status) 1504 return rp->status; 1505 1506 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1507 return rp->status; 1508 1509 hci_dev_lock(hdev); 1510 1511 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1512 u8 instance = adv->instance; 1513 1514 err = hci_remove_adv_instance(hdev, instance); 1515 if (!err) 1516 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1517 hdev, instance); 1518 } 1519 1520 hci_dev_unlock(hdev); 1521 1522 return rp->status; 1523 } 1524 1525 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, 1526 struct sk_buff *skb) 1527 { 1528 struct hci_rp_le_read_transmit_power *rp = data; 1529 1530 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1531 1532 if (rp->status) 1533 return rp->status; 1534 1535 hdev->min_le_tx_power = rp->min_le_tx_power; 1536 hdev->max_le_tx_power = rp->max_le_tx_power; 1537 1538 return rp->status; 1539 } 1540 1541 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, 1542 struct sk_buff *skb) 1543 { 1544 struct hci_ev_status *rp = data; 1545 struct hci_cp_le_set_privacy_mode *cp; 1546 struct hci_conn_params *params; 1547 1548 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1549 1550 if (rp->status) 1551 return rp->status; 1552 1553 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); 1554 if (!cp) 1555 return rp->status; 1556 1557 hci_dev_lock(hdev); 1558 1559 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); 1560 if (params) 1561 params->privacy_mode = cp->mode; 1562 1563 hci_dev_unlock(hdev); 1564 1565 return rp->status; 1566 } 1567 1568 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, 1569 struct sk_buff *skb) 1570 { 1571 struct hci_ev_status *rp = data; 1572 __u8 *sent; 1573 1574 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1575 1576 if (rp->status) 1577 return rp->status; 1578 1579 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1580 if (!sent) 1581 return rp->status; 1582 1583 hci_dev_lock(hdev); 1584 1585 /* If we're doing connection initiation as peripheral. Set a 1586 * timeout in case something goes wrong. 1587 */ 1588 if (*sent) { 1589 struct hci_conn *conn; 1590 1591 hci_dev_set_flag(hdev, HCI_LE_ADV); 1592 1593 conn = hci_lookup_le_connect(hdev); 1594 if (conn) 1595 queue_delayed_work(hdev->workqueue, 1596 &conn->le_conn_timeout, 1597 conn->conn_timeout); 1598 } else { 1599 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1600 } 1601 1602 hci_dev_unlock(hdev); 1603 1604 return rp->status; 1605 } 1606 1607 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, 1608 struct sk_buff *skb) 1609 { 1610 struct hci_cp_le_set_ext_adv_enable *cp; 1611 struct hci_cp_ext_adv_set *set; 1612 struct adv_info *adv = NULL, *n; 1613 struct hci_ev_status *rp = data; 1614 1615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1616 1617 if (rp->status) 1618 return rp->status; 1619 1620 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1621 if (!cp) 1622 return rp->status; 1623 1624 set = (void *)cp->data; 1625 1626 hci_dev_lock(hdev); 1627 1628 if (cp->num_of_sets) 1629 adv = hci_find_adv_instance(hdev, set->handle); 1630 1631 if (cp->enable) { 1632 struct hci_conn *conn; 1633 1634 hci_dev_set_flag(hdev, HCI_LE_ADV); 1635 1636 if (adv) 1637 adv->enabled = true; 1638 1639 conn = hci_lookup_le_connect(hdev); 1640 if (conn) 1641 queue_delayed_work(hdev->workqueue, 1642 &conn->le_conn_timeout, 1643 conn->conn_timeout); 1644 } else { 1645 if (cp->num_of_sets) { 1646 if (adv) 1647 adv->enabled = false; 1648 1649 /* If just one instance was disabled check if there are 1650 * any other instance enabled before clearing HCI_LE_ADV 1651 */ 1652 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1653 list) { 1654 if (adv->enabled) 1655 goto unlock; 1656 } 1657 } else { 1658 /* All instances shall be considered disabled */ 1659 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1660 list) 1661 adv->enabled = false; 1662 } 1663 1664 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1665 } 1666 1667 unlock: 1668 hci_dev_unlock(hdev); 1669 return rp->status; 1670 } 1671 1672 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, 1673 struct sk_buff *skb) 1674 { 1675 struct hci_cp_le_set_scan_param *cp; 1676 struct hci_ev_status *rp = data; 1677 1678 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1679 1680 if (rp->status) 1681 return rp->status; 1682 1683 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1684 if (!cp) 1685 return rp->status; 1686 1687 hci_dev_lock(hdev); 1688 1689 hdev->le_scan_type = cp->type; 1690 1691 hci_dev_unlock(hdev); 1692 1693 return rp->status; 1694 } 1695 1696 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, 1697 struct sk_buff *skb) 1698 { 1699 struct hci_cp_le_set_ext_scan_params *cp; 1700 struct hci_ev_status *rp = data; 1701 struct hci_cp_le_scan_phy_params *phy_param; 1702 1703 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1704 1705 if (rp->status) 1706 return rp->status; 1707 1708 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1709 if (!cp) 1710 return rp->status; 1711 1712 phy_param = (void *)cp->data; 1713 1714 hci_dev_lock(hdev); 1715 1716 hdev->le_scan_type = phy_param->type; 1717 1718 hci_dev_unlock(hdev); 1719 1720 return rp->status; 1721 } 1722 1723 static bool has_pending_adv_report(struct hci_dev *hdev) 1724 { 1725 struct discovery_state *d = &hdev->discovery; 1726 1727 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1728 } 1729 1730 static void clear_pending_adv_report(struct hci_dev *hdev) 1731 { 1732 struct discovery_state *d = &hdev->discovery; 1733 1734 bacpy(&d->last_adv_addr, BDADDR_ANY); 1735 d->last_adv_data_len = 0; 1736 } 1737 1738 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1739 u8 bdaddr_type, s8 rssi, u32 flags, 1740 u8 *data, u8 len) 1741 { 1742 struct discovery_state *d = &hdev->discovery; 1743 1744 if (len > HCI_MAX_AD_LENGTH) 1745 return; 1746 1747 bacpy(&d->last_adv_addr, bdaddr); 1748 d->last_adv_addr_type = bdaddr_type; 1749 d->last_adv_rssi = rssi; 1750 d->last_adv_flags = flags; 1751 memcpy(d->last_adv_data, data, len); 1752 d->last_adv_data_len = len; 1753 } 1754 1755 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1756 { 1757 hci_dev_lock(hdev); 1758 1759 switch (enable) { 1760 case LE_SCAN_ENABLE: 1761 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1762 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1763 clear_pending_adv_report(hdev); 1764 if (hci_dev_test_flag(hdev, HCI_MESH)) 1765 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1766 break; 1767 1768 case LE_SCAN_DISABLE: 1769 /* We do this here instead of when setting DISCOVERY_STOPPED 1770 * since the latter would potentially require waiting for 1771 * inquiry to stop too. 1772 */ 1773 if (has_pending_adv_report(hdev)) { 1774 struct discovery_state *d = &hdev->discovery; 1775 1776 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1777 d->last_adv_addr_type, NULL, 1778 d->last_adv_rssi, d->last_adv_flags, 1779 d->last_adv_data, 1780 d->last_adv_data_len, NULL, 0, 0); 1781 } 1782 1783 /* Cancel this timer so that we don't try to disable scanning 1784 * when it's already disabled. 1785 */ 1786 cancel_delayed_work(&hdev->le_scan_disable); 1787 1788 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1789 1790 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1791 * interrupted scanning due to a connect request. Mark 1792 * therefore discovery as stopped. 1793 */ 1794 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1795 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1796 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && 1797 hdev->discovery.state == DISCOVERY_FINDING) 1798 queue_work(hdev->workqueue, &hdev->reenable_adv_work); 1799 1800 break; 1801 1802 default: 1803 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1804 enable); 1805 break; 1806 } 1807 1808 hci_dev_unlock(hdev); 1809 } 1810 1811 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, 1812 struct sk_buff *skb) 1813 { 1814 struct hci_cp_le_set_scan_enable *cp; 1815 struct hci_ev_status *rp = data; 1816 1817 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1818 1819 if (rp->status) 1820 return rp->status; 1821 1822 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1823 if (!cp) 1824 return rp->status; 1825 1826 le_set_scan_enable_complete(hdev, cp->enable); 1827 1828 return rp->status; 1829 } 1830 1831 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, 1832 struct sk_buff *skb) 1833 { 1834 struct hci_cp_le_set_ext_scan_enable *cp; 1835 struct hci_ev_status *rp = data; 1836 1837 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1838 1839 if (rp->status) 1840 return rp->status; 1841 1842 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1843 if (!cp) 1844 return rp->status; 1845 1846 le_set_scan_enable_complete(hdev, cp->enable); 1847 1848 return rp->status; 1849 } 1850 1851 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, 1852 struct sk_buff *skb) 1853 { 1854 struct hci_rp_le_read_num_supported_adv_sets *rp = data; 1855 1856 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, 1857 rp->num_of_sets); 1858 1859 if (rp->status) 1860 return rp->status; 1861 1862 hdev->le_num_of_adv_sets = rp->num_of_sets; 1863 1864 return rp->status; 1865 } 1866 1867 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, 1868 struct sk_buff *skb) 1869 { 1870 struct hci_rp_le_read_accept_list_size *rp = data; 1871 1872 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 1873 1874 if (rp->status) 1875 return rp->status; 1876 1877 hdev->le_accept_list_size = rp->size; 1878 1879 return rp->status; 1880 } 1881 1882 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, 1883 struct sk_buff *skb) 1884 { 1885 struct hci_ev_status *rp = data; 1886 1887 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1888 1889 if (rp->status) 1890 return rp->status; 1891 1892 hci_dev_lock(hdev); 1893 hci_bdaddr_list_clear(&hdev->le_accept_list); 1894 hci_dev_unlock(hdev); 1895 1896 return rp->status; 1897 } 1898 1899 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, 1900 struct sk_buff *skb) 1901 { 1902 struct hci_cp_le_add_to_accept_list *sent; 1903 struct hci_ev_status *rp = data; 1904 1905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1906 1907 if (rp->status) 1908 return rp->status; 1909 1910 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1911 if (!sent) 1912 return rp->status; 1913 1914 hci_dev_lock(hdev); 1915 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1916 sent->bdaddr_type); 1917 hci_dev_unlock(hdev); 1918 1919 return rp->status; 1920 } 1921 1922 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, 1923 struct sk_buff *skb) 1924 { 1925 struct hci_cp_le_del_from_accept_list *sent; 1926 struct hci_ev_status *rp = data; 1927 1928 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1929 1930 if (rp->status) 1931 return rp->status; 1932 1933 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1934 if (!sent) 1935 return rp->status; 1936 1937 hci_dev_lock(hdev); 1938 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1939 sent->bdaddr_type); 1940 hci_dev_unlock(hdev); 1941 1942 return rp->status; 1943 } 1944 1945 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, 1946 struct sk_buff *skb) 1947 { 1948 struct hci_rp_le_read_supported_states *rp = data; 1949 1950 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1951 1952 if (rp->status) 1953 return rp->status; 1954 1955 memcpy(hdev->le_states, rp->le_states, 8); 1956 1957 return rp->status; 1958 } 1959 1960 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, 1961 struct sk_buff *skb) 1962 { 1963 struct hci_rp_le_read_def_data_len *rp = data; 1964 1965 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1966 1967 if (rp->status) 1968 return rp->status; 1969 1970 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1971 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1972 1973 return rp->status; 1974 } 1975 1976 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, 1977 struct sk_buff *skb) 1978 { 1979 struct hci_cp_le_write_def_data_len *sent; 1980 struct hci_ev_status *rp = data; 1981 1982 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1983 1984 if (rp->status) 1985 return rp->status; 1986 1987 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1988 if (!sent) 1989 return rp->status; 1990 1991 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1992 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1993 1994 return rp->status; 1995 } 1996 1997 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, 1998 struct sk_buff *skb) 1999 { 2000 struct hci_cp_le_add_to_resolv_list *sent; 2001 struct hci_ev_status *rp = data; 2002 2003 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2004 2005 if (rp->status) 2006 return rp->status; 2007 2008 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 2009 if (!sent) 2010 return rp->status; 2011 2012 hci_dev_lock(hdev); 2013 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 2014 sent->bdaddr_type, sent->peer_irk, 2015 sent->local_irk); 2016 hci_dev_unlock(hdev); 2017 2018 return rp->status; 2019 } 2020 2021 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, 2022 struct sk_buff *skb) 2023 { 2024 struct hci_cp_le_del_from_resolv_list *sent; 2025 struct hci_ev_status *rp = data; 2026 2027 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2028 2029 if (rp->status) 2030 return rp->status; 2031 2032 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 2033 if (!sent) 2034 return rp->status; 2035 2036 hci_dev_lock(hdev); 2037 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 2038 sent->bdaddr_type); 2039 hci_dev_unlock(hdev); 2040 2041 return rp->status; 2042 } 2043 2044 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, 2045 struct sk_buff *skb) 2046 { 2047 struct hci_ev_status *rp = data; 2048 2049 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2050 2051 if (rp->status) 2052 return rp->status; 2053 2054 hci_dev_lock(hdev); 2055 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2056 hci_dev_unlock(hdev); 2057 2058 return rp->status; 2059 } 2060 2061 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, 2062 struct sk_buff *skb) 2063 { 2064 struct hci_rp_le_read_resolv_list_size *rp = data; 2065 2066 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 2067 2068 if (rp->status) 2069 return rp->status; 2070 2071 hdev->le_resolv_list_size = rp->size; 2072 2073 return rp->status; 2074 } 2075 2076 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, 2077 struct sk_buff *skb) 2078 { 2079 struct hci_ev_status *rp = data; 2080 __u8 *sent; 2081 2082 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2083 2084 if (rp->status) 2085 return rp->status; 2086 2087 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 2088 if (!sent) 2089 return rp->status; 2090 2091 hci_dev_lock(hdev); 2092 2093 if (*sent) 2094 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 2095 else 2096 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 2097 2098 hci_dev_unlock(hdev); 2099 2100 return rp->status; 2101 } 2102 2103 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, 2104 struct sk_buff *skb) 2105 { 2106 struct hci_rp_le_read_max_data_len *rp = data; 2107 2108 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2109 2110 if (rp->status) 2111 return rp->status; 2112 2113 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 2114 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 2115 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 2116 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 2117 2118 return rp->status; 2119 } 2120 2121 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, 2122 struct sk_buff *skb) 2123 { 2124 struct hci_cp_write_le_host_supported *sent; 2125 struct hci_ev_status *rp = data; 2126 2127 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2128 2129 if (rp->status) 2130 return rp->status; 2131 2132 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 2133 if (!sent) 2134 return rp->status; 2135 2136 hci_dev_lock(hdev); 2137 2138 if (sent->le) { 2139 hdev->features[1][0] |= LMP_HOST_LE; 2140 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 2141 } else { 2142 hdev->features[1][0] &= ~LMP_HOST_LE; 2143 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 2144 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 2145 } 2146 2147 if (sent->simul) 2148 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 2149 else 2150 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 2151 2152 hci_dev_unlock(hdev); 2153 2154 return rp->status; 2155 } 2156 2157 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, 2158 struct sk_buff *skb) 2159 { 2160 struct hci_cp_le_set_adv_param *cp; 2161 struct hci_ev_status *rp = data; 2162 2163 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2164 2165 if (rp->status) 2166 return rp->status; 2167 2168 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 2169 if (!cp) 2170 return rp->status; 2171 2172 hci_dev_lock(hdev); 2173 hdev->adv_addr_type = cp->own_address_type; 2174 hci_dev_unlock(hdev); 2175 2176 return rp->status; 2177 } 2178 2179 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, 2180 struct sk_buff *skb) 2181 { 2182 struct hci_rp_le_set_ext_adv_params *rp = data; 2183 struct hci_cp_le_set_ext_adv_params *cp; 2184 struct adv_info *adv_instance; 2185 2186 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2187 2188 if (rp->status) 2189 return rp->status; 2190 2191 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 2192 if (!cp) 2193 return rp->status; 2194 2195 hci_dev_lock(hdev); 2196 hdev->adv_addr_type = cp->own_addr_type; 2197 if (!cp->handle) { 2198 /* Store in hdev for instance 0 */ 2199 hdev->adv_tx_power = rp->tx_power; 2200 } else { 2201 adv_instance = hci_find_adv_instance(hdev, cp->handle); 2202 if (adv_instance) 2203 adv_instance->tx_power = rp->tx_power; 2204 } 2205 /* Update adv data as tx power is known now */ 2206 hci_update_adv_data(hdev, cp->handle); 2207 2208 hci_dev_unlock(hdev); 2209 2210 return rp->status; 2211 } 2212 2213 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, 2214 struct sk_buff *skb) 2215 { 2216 struct hci_rp_read_rssi *rp = data; 2217 struct hci_conn *conn; 2218 2219 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2220 2221 if (rp->status) 2222 return rp->status; 2223 2224 hci_dev_lock(hdev); 2225 2226 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2227 if (conn) 2228 conn->rssi = rp->rssi; 2229 2230 hci_dev_unlock(hdev); 2231 2232 return rp->status; 2233 } 2234 2235 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, 2236 struct sk_buff *skb) 2237 { 2238 struct hci_cp_read_tx_power *sent; 2239 struct hci_rp_read_tx_power *rp = data; 2240 struct hci_conn *conn; 2241 2242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2243 2244 if (rp->status) 2245 return rp->status; 2246 2247 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 2248 if (!sent) 2249 return rp->status; 2250 2251 hci_dev_lock(hdev); 2252 2253 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2254 if (!conn) 2255 goto unlock; 2256 2257 switch (sent->type) { 2258 case 0x00: 2259 conn->tx_power = rp->tx_power; 2260 break; 2261 case 0x01: 2262 conn->max_tx_power = rp->tx_power; 2263 break; 2264 } 2265 2266 unlock: 2267 hci_dev_unlock(hdev); 2268 return rp->status; 2269 } 2270 2271 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, 2272 struct sk_buff *skb) 2273 { 2274 struct hci_ev_status *rp = data; 2275 u8 *mode; 2276 2277 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2278 2279 if (rp->status) 2280 return rp->status; 2281 2282 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 2283 if (mode) 2284 hdev->ssp_debug_mode = *mode; 2285 2286 return rp->status; 2287 } 2288 2289 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 2290 { 2291 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2292 2293 if (status) { 2294 hci_conn_check_pending(hdev); 2295 return; 2296 } 2297 2298 set_bit(HCI_INQUIRY, &hdev->flags); 2299 } 2300 2301 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 2302 { 2303 struct hci_cp_create_conn *cp; 2304 struct hci_conn *conn; 2305 2306 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2307 2308 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 2309 if (!cp) 2310 return; 2311 2312 hci_dev_lock(hdev); 2313 2314 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2315 2316 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); 2317 2318 if (status) { 2319 if (conn && conn->state == BT_CONNECT) { 2320 if (status != 0x0c || conn->attempt > 2) { 2321 conn->state = BT_CLOSED; 2322 hci_connect_cfm(conn, status); 2323 hci_conn_del(conn); 2324 } else 2325 conn->state = BT_CONNECT2; 2326 } 2327 } else { 2328 if (!conn) { 2329 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 2330 HCI_ROLE_MASTER); 2331 if (!conn) 2332 bt_dev_err(hdev, "no memory for new connection"); 2333 } 2334 } 2335 2336 hci_dev_unlock(hdev); 2337 } 2338 2339 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 2340 { 2341 struct hci_cp_add_sco *cp; 2342 struct hci_conn *acl, *sco; 2343 __u16 handle; 2344 2345 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2346 2347 if (!status) 2348 return; 2349 2350 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2351 if (!cp) 2352 return; 2353 2354 handle = __le16_to_cpu(cp->handle); 2355 2356 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2357 2358 hci_dev_lock(hdev); 2359 2360 acl = hci_conn_hash_lookup_handle(hdev, handle); 2361 if (acl) { 2362 sco = acl->link; 2363 if (sco) { 2364 sco->state = BT_CLOSED; 2365 2366 hci_connect_cfm(sco, status); 2367 hci_conn_del(sco); 2368 } 2369 } 2370 2371 hci_dev_unlock(hdev); 2372 } 2373 2374 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2375 { 2376 struct hci_cp_auth_requested *cp; 2377 struct hci_conn *conn; 2378 2379 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2380 2381 if (!status) 2382 return; 2383 2384 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2385 if (!cp) 2386 return; 2387 2388 hci_dev_lock(hdev); 2389 2390 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2391 if (conn) { 2392 if (conn->state == BT_CONFIG) { 2393 hci_connect_cfm(conn, status); 2394 hci_conn_drop(conn); 2395 } 2396 } 2397 2398 hci_dev_unlock(hdev); 2399 } 2400 2401 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2402 { 2403 struct hci_cp_set_conn_encrypt *cp; 2404 struct hci_conn *conn; 2405 2406 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2407 2408 if (!status) 2409 return; 2410 2411 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2412 if (!cp) 2413 return; 2414 2415 hci_dev_lock(hdev); 2416 2417 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2418 if (conn) { 2419 if (conn->state == BT_CONFIG) { 2420 hci_connect_cfm(conn, status); 2421 hci_conn_drop(conn); 2422 } 2423 } 2424 2425 hci_dev_unlock(hdev); 2426 } 2427 2428 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2429 struct hci_conn *conn) 2430 { 2431 if (conn->state != BT_CONFIG || !conn->out) 2432 return 0; 2433 2434 if (conn->pending_sec_level == BT_SECURITY_SDP) 2435 return 0; 2436 2437 /* Only request authentication for SSP connections or non-SSP 2438 * devices with sec_level MEDIUM or HIGH or if MITM protection 2439 * is requested. 2440 */ 2441 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2442 conn->pending_sec_level != BT_SECURITY_FIPS && 2443 conn->pending_sec_level != BT_SECURITY_HIGH && 2444 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2445 return 0; 2446 2447 return 1; 2448 } 2449 2450 static int hci_resolve_name(struct hci_dev *hdev, 2451 struct inquiry_entry *e) 2452 { 2453 struct hci_cp_remote_name_req cp; 2454 2455 memset(&cp, 0, sizeof(cp)); 2456 2457 bacpy(&cp.bdaddr, &e->data.bdaddr); 2458 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2459 cp.pscan_mode = e->data.pscan_mode; 2460 cp.clock_offset = e->data.clock_offset; 2461 2462 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2463 } 2464 2465 static bool hci_resolve_next_name(struct hci_dev *hdev) 2466 { 2467 struct discovery_state *discov = &hdev->discovery; 2468 struct inquiry_entry *e; 2469 2470 if (list_empty(&discov->resolve)) 2471 return false; 2472 2473 /* We should stop if we already spent too much time resolving names. */ 2474 if (time_after(jiffies, discov->name_resolve_timeout)) { 2475 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2476 return false; 2477 } 2478 2479 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2480 if (!e) 2481 return false; 2482 2483 if (hci_resolve_name(hdev, e) == 0) { 2484 e->name_state = NAME_PENDING; 2485 return true; 2486 } 2487 2488 return false; 2489 } 2490 2491 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2492 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2493 { 2494 struct discovery_state *discov = &hdev->discovery; 2495 struct inquiry_entry *e; 2496 2497 /* Update the mgmt connected state if necessary. Be careful with 2498 * conn objects that exist but are not (yet) connected however. 2499 * Only those in BT_CONFIG or BT_CONNECTED states can be 2500 * considered connected. 2501 */ 2502 if (conn && 2503 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2504 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2505 mgmt_device_connected(hdev, conn, name, name_len); 2506 2507 if (discov->state == DISCOVERY_STOPPED) 2508 return; 2509 2510 if (discov->state == DISCOVERY_STOPPING) 2511 goto discov_complete; 2512 2513 if (discov->state != DISCOVERY_RESOLVING) 2514 return; 2515 2516 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2517 /* If the device was not found in a list of found devices names of which 2518 * are pending. there is no need to continue resolving a next name as it 2519 * will be done upon receiving another Remote Name Request Complete 2520 * Event */ 2521 if (!e) 2522 return; 2523 2524 list_del(&e->list); 2525 2526 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2527 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2528 name, name_len); 2529 2530 if (hci_resolve_next_name(hdev)) 2531 return; 2532 2533 discov_complete: 2534 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2535 } 2536 2537 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2538 { 2539 struct hci_cp_remote_name_req *cp; 2540 struct hci_conn *conn; 2541 2542 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2543 2544 /* If successful wait for the name req complete event before 2545 * checking for the need to do authentication */ 2546 if (!status) 2547 return; 2548 2549 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2550 if (!cp) 2551 return; 2552 2553 hci_dev_lock(hdev); 2554 2555 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2556 2557 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2558 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2559 2560 if (!conn) 2561 goto unlock; 2562 2563 if (!hci_outgoing_auth_needed(hdev, conn)) 2564 goto unlock; 2565 2566 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2567 struct hci_cp_auth_requested auth_cp; 2568 2569 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2570 2571 auth_cp.handle = __cpu_to_le16(conn->handle); 2572 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2573 sizeof(auth_cp), &auth_cp); 2574 } 2575 2576 unlock: 2577 hci_dev_unlock(hdev); 2578 } 2579 2580 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2581 { 2582 struct hci_cp_read_remote_features *cp; 2583 struct hci_conn *conn; 2584 2585 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2586 2587 if (!status) 2588 return; 2589 2590 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2591 if (!cp) 2592 return; 2593 2594 hci_dev_lock(hdev); 2595 2596 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2597 if (conn) { 2598 if (conn->state == BT_CONFIG) { 2599 hci_connect_cfm(conn, status); 2600 hci_conn_drop(conn); 2601 } 2602 } 2603 2604 hci_dev_unlock(hdev); 2605 } 2606 2607 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2608 { 2609 struct hci_cp_read_remote_ext_features *cp; 2610 struct hci_conn *conn; 2611 2612 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2613 2614 if (!status) 2615 return; 2616 2617 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2618 if (!cp) 2619 return; 2620 2621 hci_dev_lock(hdev); 2622 2623 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2624 if (conn) { 2625 if (conn->state == BT_CONFIG) { 2626 hci_connect_cfm(conn, status); 2627 hci_conn_drop(conn); 2628 } 2629 } 2630 2631 hci_dev_unlock(hdev); 2632 } 2633 2634 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2635 { 2636 struct hci_cp_setup_sync_conn *cp; 2637 struct hci_conn *acl, *sco; 2638 __u16 handle; 2639 2640 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2641 2642 if (!status) 2643 return; 2644 2645 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2646 if (!cp) 2647 return; 2648 2649 handle = __le16_to_cpu(cp->handle); 2650 2651 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2652 2653 hci_dev_lock(hdev); 2654 2655 acl = hci_conn_hash_lookup_handle(hdev, handle); 2656 if (acl) { 2657 sco = acl->link; 2658 if (sco) { 2659 sco->state = BT_CLOSED; 2660 2661 hci_connect_cfm(sco, status); 2662 hci_conn_del(sco); 2663 } 2664 } 2665 2666 hci_dev_unlock(hdev); 2667 } 2668 2669 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2670 { 2671 struct hci_cp_enhanced_setup_sync_conn *cp; 2672 struct hci_conn *acl, *sco; 2673 __u16 handle; 2674 2675 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2676 2677 if (!status) 2678 return; 2679 2680 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2681 if (!cp) 2682 return; 2683 2684 handle = __le16_to_cpu(cp->handle); 2685 2686 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2687 2688 hci_dev_lock(hdev); 2689 2690 acl = hci_conn_hash_lookup_handle(hdev, handle); 2691 if (acl) { 2692 sco = acl->link; 2693 if (sco) { 2694 sco->state = BT_CLOSED; 2695 2696 hci_connect_cfm(sco, status); 2697 hci_conn_del(sco); 2698 } 2699 } 2700 2701 hci_dev_unlock(hdev); 2702 } 2703 2704 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2705 { 2706 struct hci_cp_sniff_mode *cp; 2707 struct hci_conn *conn; 2708 2709 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2710 2711 if (!status) 2712 return; 2713 2714 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2715 if (!cp) 2716 return; 2717 2718 hci_dev_lock(hdev); 2719 2720 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2721 if (conn) { 2722 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2723 2724 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2725 hci_sco_setup(conn, status); 2726 } 2727 2728 hci_dev_unlock(hdev); 2729 } 2730 2731 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2732 { 2733 struct hci_cp_exit_sniff_mode *cp; 2734 struct hci_conn *conn; 2735 2736 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2737 2738 if (!status) 2739 return; 2740 2741 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2742 if (!cp) 2743 return; 2744 2745 hci_dev_lock(hdev); 2746 2747 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2748 if (conn) { 2749 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2750 2751 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2752 hci_sco_setup(conn, status); 2753 } 2754 2755 hci_dev_unlock(hdev); 2756 } 2757 2758 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2759 { 2760 struct hci_cp_disconnect *cp; 2761 struct hci_conn_params *params; 2762 struct hci_conn *conn; 2763 bool mgmt_conn; 2764 2765 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2766 2767 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2768 * otherwise cleanup the connection immediately. 2769 */ 2770 if (!status && !hdev->suspended) 2771 return; 2772 2773 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2774 if (!cp) 2775 return; 2776 2777 hci_dev_lock(hdev); 2778 2779 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2780 if (!conn) 2781 goto unlock; 2782 2783 if (status) { 2784 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2785 conn->dst_type, status); 2786 2787 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2788 hdev->cur_adv_instance = conn->adv_instance; 2789 hci_enable_advertising(hdev); 2790 } 2791 2792 goto done; 2793 } 2794 2795 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2796 2797 if (conn->type == ACL_LINK) { 2798 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2799 hci_remove_link_key(hdev, &conn->dst); 2800 } 2801 2802 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2803 if (params) { 2804 switch (params->auto_connect) { 2805 case HCI_AUTO_CONN_LINK_LOSS: 2806 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2807 break; 2808 fallthrough; 2809 2810 case HCI_AUTO_CONN_DIRECT: 2811 case HCI_AUTO_CONN_ALWAYS: 2812 list_del_init(¶ms->action); 2813 list_add(¶ms->action, &hdev->pend_le_conns); 2814 break; 2815 2816 default: 2817 break; 2818 } 2819 } 2820 2821 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2822 cp->reason, mgmt_conn); 2823 2824 hci_disconn_cfm(conn, cp->reason); 2825 2826 done: 2827 /* If the disconnection failed for any reason, the upper layer 2828 * does not retry to disconnect in current implementation. 2829 * Hence, we need to do some basic cleanup here and re-enable 2830 * advertising if necessary. 2831 */ 2832 hci_conn_del(conn); 2833 unlock: 2834 hci_dev_unlock(hdev); 2835 } 2836 2837 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2838 { 2839 /* When using controller based address resolution, then the new 2840 * address types 0x02 and 0x03 are used. These types need to be 2841 * converted back into either public address or random address type 2842 */ 2843 switch (type) { 2844 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2845 if (resolved) 2846 *resolved = true; 2847 return ADDR_LE_DEV_PUBLIC; 2848 case ADDR_LE_DEV_RANDOM_RESOLVED: 2849 if (resolved) 2850 *resolved = true; 2851 return ADDR_LE_DEV_RANDOM; 2852 } 2853 2854 if (resolved) 2855 *resolved = false; 2856 return type; 2857 } 2858 2859 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2860 u8 peer_addr_type, u8 own_address_type, 2861 u8 filter_policy) 2862 { 2863 struct hci_conn *conn; 2864 2865 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2866 peer_addr_type); 2867 if (!conn) 2868 return; 2869 2870 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2871 2872 /* Store the initiator and responder address information which 2873 * is needed for SMP. These values will not change during the 2874 * lifetime of the connection. 2875 */ 2876 conn->init_addr_type = own_address_type; 2877 if (own_address_type == ADDR_LE_DEV_RANDOM) 2878 bacpy(&conn->init_addr, &hdev->random_addr); 2879 else 2880 bacpy(&conn->init_addr, &hdev->bdaddr); 2881 2882 conn->resp_addr_type = peer_addr_type; 2883 bacpy(&conn->resp_addr, peer_addr); 2884 2885 /* We don't want the connection attempt to stick around 2886 * indefinitely since LE doesn't have a page timeout concept 2887 * like BR/EDR. Set a timer for any connection that doesn't use 2888 * the accept list for connecting. 2889 */ 2890 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2891 queue_delayed_work(conn->hdev->workqueue, 2892 &conn->le_conn_timeout, 2893 conn->conn_timeout); 2894 } 2895 2896 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2897 { 2898 struct hci_cp_le_create_conn *cp; 2899 2900 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2901 2902 /* All connection failure handling is taken care of by the 2903 * hci_conn_failed function which is triggered by the HCI 2904 * request completion callbacks used for connecting. 2905 */ 2906 if (status) 2907 return; 2908 2909 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2910 if (!cp) 2911 return; 2912 2913 hci_dev_lock(hdev); 2914 2915 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2916 cp->own_address_type, cp->filter_policy); 2917 2918 hci_dev_unlock(hdev); 2919 } 2920 2921 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2922 { 2923 struct hci_cp_le_ext_create_conn *cp; 2924 2925 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2926 2927 /* All connection failure handling is taken care of by the 2928 * hci_conn_failed function which is triggered by the HCI 2929 * request completion callbacks used for connecting. 2930 */ 2931 if (status) 2932 return; 2933 2934 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2935 if (!cp) 2936 return; 2937 2938 hci_dev_lock(hdev); 2939 2940 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2941 cp->own_addr_type, cp->filter_policy); 2942 2943 hci_dev_unlock(hdev); 2944 } 2945 2946 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2947 { 2948 struct hci_cp_le_read_remote_features *cp; 2949 struct hci_conn *conn; 2950 2951 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2952 2953 if (!status) 2954 return; 2955 2956 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2957 if (!cp) 2958 return; 2959 2960 hci_dev_lock(hdev); 2961 2962 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2963 if (conn) { 2964 if (conn->state == BT_CONFIG) { 2965 hci_connect_cfm(conn, status); 2966 hci_conn_drop(conn); 2967 } 2968 } 2969 2970 hci_dev_unlock(hdev); 2971 } 2972 2973 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2974 { 2975 struct hci_cp_le_start_enc *cp; 2976 struct hci_conn *conn; 2977 2978 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2979 2980 if (!status) 2981 return; 2982 2983 hci_dev_lock(hdev); 2984 2985 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2986 if (!cp) 2987 goto unlock; 2988 2989 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2990 if (!conn) 2991 goto unlock; 2992 2993 if (conn->state != BT_CONNECTED) 2994 goto unlock; 2995 2996 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2997 hci_conn_drop(conn); 2998 2999 unlock: 3000 hci_dev_unlock(hdev); 3001 } 3002 3003 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 3004 { 3005 struct hci_cp_switch_role *cp; 3006 struct hci_conn *conn; 3007 3008 BT_DBG("%s status 0x%2.2x", hdev->name, status); 3009 3010 if (!status) 3011 return; 3012 3013 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 3014 if (!cp) 3015 return; 3016 3017 hci_dev_lock(hdev); 3018 3019 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 3020 if (conn) 3021 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 3022 3023 hci_dev_unlock(hdev); 3024 } 3025 3026 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, 3027 struct sk_buff *skb) 3028 { 3029 struct hci_ev_status *ev = data; 3030 struct discovery_state *discov = &hdev->discovery; 3031 struct inquiry_entry *e; 3032 3033 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3034 3035 hci_conn_check_pending(hdev); 3036 3037 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 3038 return; 3039 3040 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 3041 wake_up_bit(&hdev->flags, HCI_INQUIRY); 3042 3043 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3044 return; 3045 3046 hci_dev_lock(hdev); 3047 3048 if (discov->state != DISCOVERY_FINDING) 3049 goto unlock; 3050 3051 if (list_empty(&discov->resolve)) { 3052 /* When BR/EDR inquiry is active and no LE scanning is in 3053 * progress, then change discovery state to indicate completion. 3054 * 3055 * When running LE scanning and BR/EDR inquiry simultaneously 3056 * and the LE scan already finished, then change the discovery 3057 * state to indicate completion. 3058 */ 3059 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3060 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3061 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3062 goto unlock; 3063 } 3064 3065 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 3066 if (e && hci_resolve_name(hdev, e) == 0) { 3067 e->name_state = NAME_PENDING; 3068 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 3069 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 3070 } else { 3071 /* When BR/EDR inquiry is active and no LE scanning is in 3072 * progress, then change discovery state to indicate completion. 3073 * 3074 * When running LE scanning and BR/EDR inquiry simultaneously 3075 * and the LE scan already finished, then change the discovery 3076 * state to indicate completion. 3077 */ 3078 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3079 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3080 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3081 } 3082 3083 unlock: 3084 hci_dev_unlock(hdev); 3085 } 3086 3087 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, 3088 struct sk_buff *skb) 3089 { 3090 struct hci_ev_inquiry_result *ev = edata; 3091 struct inquiry_data data; 3092 int i; 3093 3094 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, 3095 flex_array_size(ev, info, ev->num))) 3096 return; 3097 3098 bt_dev_dbg(hdev, "num %d", ev->num); 3099 3100 if (!ev->num) 3101 return; 3102 3103 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3104 return; 3105 3106 hci_dev_lock(hdev); 3107 3108 for (i = 0; i < ev->num; i++) { 3109 struct inquiry_info *info = &ev->info[i]; 3110 u32 flags; 3111 3112 bacpy(&data.bdaddr, &info->bdaddr); 3113 data.pscan_rep_mode = info->pscan_rep_mode; 3114 data.pscan_period_mode = info->pscan_period_mode; 3115 data.pscan_mode = info->pscan_mode; 3116 memcpy(data.dev_class, info->dev_class, 3); 3117 data.clock_offset = info->clock_offset; 3118 data.rssi = HCI_RSSI_INVALID; 3119 data.ssp_mode = 0x00; 3120 3121 flags = hci_inquiry_cache_update(hdev, &data, false); 3122 3123 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3124 info->dev_class, HCI_RSSI_INVALID, 3125 flags, NULL, 0, NULL, 0, 0); 3126 } 3127 3128 hci_dev_unlock(hdev); 3129 } 3130 3131 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, 3132 struct sk_buff *skb) 3133 { 3134 struct hci_ev_conn_complete *ev = data; 3135 struct hci_conn *conn; 3136 u8 status = ev->status; 3137 3138 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3139 3140 hci_dev_lock(hdev); 3141 3142 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3143 if (!conn) { 3144 /* In case of error status and there is no connection pending 3145 * just unlock as there is nothing to cleanup. 3146 */ 3147 if (ev->status) 3148 goto unlock; 3149 3150 /* Connection may not exist if auto-connected. Check the bredr 3151 * allowlist to see if this device is allowed to auto connect. 3152 * If link is an ACL type, create a connection class 3153 * automatically. 3154 * 3155 * Auto-connect will only occur if the event filter is 3156 * programmed with a given address. Right now, event filter is 3157 * only used during suspend. 3158 */ 3159 if (ev->link_type == ACL_LINK && 3160 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 3161 &ev->bdaddr, 3162 BDADDR_BREDR)) { 3163 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3164 HCI_ROLE_SLAVE); 3165 if (!conn) { 3166 bt_dev_err(hdev, "no memory for new conn"); 3167 goto unlock; 3168 } 3169 } else { 3170 if (ev->link_type != SCO_LINK) 3171 goto unlock; 3172 3173 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 3174 &ev->bdaddr); 3175 if (!conn) 3176 goto unlock; 3177 3178 conn->type = SCO_LINK; 3179 } 3180 } 3181 3182 /* The HCI_Connection_Complete event is only sent once per connection. 3183 * Processing it more than once per connection can corrupt kernel memory. 3184 * 3185 * As the connection handle is set here for the first time, it indicates 3186 * whether the connection is already set up. 3187 */ 3188 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 3189 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 3190 goto unlock; 3191 } 3192 3193 if (!status) { 3194 conn->handle = __le16_to_cpu(ev->handle); 3195 if (conn->handle > HCI_CONN_HANDLE_MAX) { 3196 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 3197 conn->handle, HCI_CONN_HANDLE_MAX); 3198 status = HCI_ERROR_INVALID_PARAMETERS; 3199 goto done; 3200 } 3201 3202 if (conn->type == ACL_LINK) { 3203 conn->state = BT_CONFIG; 3204 hci_conn_hold(conn); 3205 3206 if (!conn->out && !hci_conn_ssp_enabled(conn) && 3207 !hci_find_link_key(hdev, &ev->bdaddr)) 3208 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3209 else 3210 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3211 } else 3212 conn->state = BT_CONNECTED; 3213 3214 hci_debugfs_create_conn(conn); 3215 hci_conn_add_sysfs(conn); 3216 3217 if (test_bit(HCI_AUTH, &hdev->flags)) 3218 set_bit(HCI_CONN_AUTH, &conn->flags); 3219 3220 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 3221 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3222 3223 /* Get remote features */ 3224 if (conn->type == ACL_LINK) { 3225 struct hci_cp_read_remote_features cp; 3226 cp.handle = ev->handle; 3227 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 3228 sizeof(cp), &cp); 3229 3230 hci_update_scan(hdev); 3231 } 3232 3233 /* Set packet type for incoming connection */ 3234 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 3235 struct hci_cp_change_conn_ptype cp; 3236 cp.handle = ev->handle; 3237 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3238 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 3239 &cp); 3240 } 3241 } 3242 3243 if (conn->type == ACL_LINK) 3244 hci_sco_setup(conn, ev->status); 3245 3246 done: 3247 if (status) { 3248 hci_conn_failed(conn, status); 3249 } else if (ev->link_type == SCO_LINK) { 3250 switch (conn->setting & SCO_AIRMODE_MASK) { 3251 case SCO_AIRMODE_CVSD: 3252 if (hdev->notify) 3253 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 3254 break; 3255 } 3256 3257 hci_connect_cfm(conn, status); 3258 } 3259 3260 unlock: 3261 hci_dev_unlock(hdev); 3262 3263 hci_conn_check_pending(hdev); 3264 } 3265 3266 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 3267 { 3268 struct hci_cp_reject_conn_req cp; 3269 3270 bacpy(&cp.bdaddr, bdaddr); 3271 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 3272 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 3273 } 3274 3275 static void hci_conn_request_evt(struct hci_dev *hdev, void *data, 3276 struct sk_buff *skb) 3277 { 3278 struct hci_ev_conn_request *ev = data; 3279 int mask = hdev->link_mode; 3280 struct inquiry_entry *ie; 3281 struct hci_conn *conn; 3282 __u8 flags = 0; 3283 3284 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); 3285 3286 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 3287 &flags); 3288 3289 if (!(mask & HCI_LM_ACCEPT)) { 3290 hci_reject_conn(hdev, &ev->bdaddr); 3291 return; 3292 } 3293 3294 hci_dev_lock(hdev); 3295 3296 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 3297 BDADDR_BREDR)) { 3298 hci_reject_conn(hdev, &ev->bdaddr); 3299 goto unlock; 3300 } 3301 3302 /* Require HCI_CONNECTABLE or an accept list entry to accept the 3303 * connection. These features are only touched through mgmt so 3304 * only do the checks if HCI_MGMT is set. 3305 */ 3306 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3307 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 3308 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 3309 BDADDR_BREDR)) { 3310 hci_reject_conn(hdev, &ev->bdaddr); 3311 goto unlock; 3312 } 3313 3314 /* Connection accepted */ 3315 3316 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3317 if (ie) 3318 memcpy(ie->data.dev_class, ev->dev_class, 3); 3319 3320 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 3321 &ev->bdaddr); 3322 if (!conn) { 3323 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3324 HCI_ROLE_SLAVE); 3325 if (!conn) { 3326 bt_dev_err(hdev, "no memory for new connection"); 3327 goto unlock; 3328 } 3329 } 3330 3331 memcpy(conn->dev_class, ev->dev_class, 3); 3332 3333 hci_dev_unlock(hdev); 3334 3335 if (ev->link_type == ACL_LINK || 3336 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 3337 struct hci_cp_accept_conn_req cp; 3338 conn->state = BT_CONNECT; 3339 3340 bacpy(&cp.bdaddr, &ev->bdaddr); 3341 3342 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 3343 cp.role = 0x00; /* Become central */ 3344 else 3345 cp.role = 0x01; /* Remain peripheral */ 3346 3347 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 3348 } else if (!(flags & HCI_PROTO_DEFER)) { 3349 struct hci_cp_accept_sync_conn_req cp; 3350 conn->state = BT_CONNECT; 3351 3352 bacpy(&cp.bdaddr, &ev->bdaddr); 3353 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3354 3355 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 3356 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 3357 cp.max_latency = cpu_to_le16(0xffff); 3358 cp.content_format = cpu_to_le16(hdev->voice_setting); 3359 cp.retrans_effort = 0xff; 3360 3361 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 3362 &cp); 3363 } else { 3364 conn->state = BT_CONNECT2; 3365 hci_connect_cfm(conn, 0); 3366 } 3367 3368 return; 3369 unlock: 3370 hci_dev_unlock(hdev); 3371 } 3372 3373 static u8 hci_to_mgmt_reason(u8 err) 3374 { 3375 switch (err) { 3376 case HCI_ERROR_CONNECTION_TIMEOUT: 3377 return MGMT_DEV_DISCONN_TIMEOUT; 3378 case HCI_ERROR_REMOTE_USER_TERM: 3379 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3380 case HCI_ERROR_REMOTE_POWER_OFF: 3381 return MGMT_DEV_DISCONN_REMOTE; 3382 case HCI_ERROR_LOCAL_HOST_TERM: 3383 return MGMT_DEV_DISCONN_LOCAL_HOST; 3384 default: 3385 return MGMT_DEV_DISCONN_UNKNOWN; 3386 } 3387 } 3388 3389 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, 3390 struct sk_buff *skb) 3391 { 3392 struct hci_ev_disconn_complete *ev = data; 3393 u8 reason; 3394 struct hci_conn_params *params; 3395 struct hci_conn *conn; 3396 bool mgmt_connected; 3397 3398 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3399 3400 hci_dev_lock(hdev); 3401 3402 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3403 if (!conn) 3404 goto unlock; 3405 3406 if (ev->status) { 3407 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3408 conn->dst_type, ev->status); 3409 goto unlock; 3410 } 3411 3412 conn->state = BT_CLOSED; 3413 3414 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3415 3416 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3417 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3418 else 3419 reason = hci_to_mgmt_reason(ev->reason); 3420 3421 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3422 reason, mgmt_connected); 3423 3424 if (conn->type == ACL_LINK) { 3425 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3426 hci_remove_link_key(hdev, &conn->dst); 3427 3428 hci_update_scan(hdev); 3429 } 3430 3431 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3432 if (params) { 3433 switch (params->auto_connect) { 3434 case HCI_AUTO_CONN_LINK_LOSS: 3435 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3436 break; 3437 fallthrough; 3438 3439 case HCI_AUTO_CONN_DIRECT: 3440 case HCI_AUTO_CONN_ALWAYS: 3441 list_del_init(¶ms->action); 3442 list_add(¶ms->action, &hdev->pend_le_conns); 3443 hci_update_passive_scan(hdev); 3444 break; 3445 3446 default: 3447 break; 3448 } 3449 } 3450 3451 hci_disconn_cfm(conn, ev->reason); 3452 3453 /* Re-enable advertising if necessary, since it might 3454 * have been disabled by the connection. From the 3455 * HCI_LE_Set_Advertise_Enable command description in 3456 * the core specification (v4.0): 3457 * "The Controller shall continue advertising until the Host 3458 * issues an LE_Set_Advertise_Enable command with 3459 * Advertising_Enable set to 0x00 (Advertising is disabled) 3460 * or until a connection is created or until the Advertising 3461 * is timed out due to Directed Advertising." 3462 */ 3463 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3464 hdev->cur_adv_instance = conn->adv_instance; 3465 hci_enable_advertising(hdev); 3466 } 3467 3468 hci_conn_del(conn); 3469 3470 unlock: 3471 hci_dev_unlock(hdev); 3472 } 3473 3474 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, 3475 struct sk_buff *skb) 3476 { 3477 struct hci_ev_auth_complete *ev = data; 3478 struct hci_conn *conn; 3479 3480 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3481 3482 hci_dev_lock(hdev); 3483 3484 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3485 if (!conn) 3486 goto unlock; 3487 3488 if (!ev->status) { 3489 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3490 3491 if (!hci_conn_ssp_enabled(conn) && 3492 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 3493 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 3494 } else { 3495 set_bit(HCI_CONN_AUTH, &conn->flags); 3496 conn->sec_level = conn->pending_sec_level; 3497 } 3498 } else { 3499 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3500 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3501 3502 mgmt_auth_failed(conn, ev->status); 3503 } 3504 3505 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3506 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 3507 3508 if (conn->state == BT_CONFIG) { 3509 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3510 struct hci_cp_set_conn_encrypt cp; 3511 cp.handle = ev->handle; 3512 cp.encrypt = 0x01; 3513 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3514 &cp); 3515 } else { 3516 conn->state = BT_CONNECTED; 3517 hci_connect_cfm(conn, ev->status); 3518 hci_conn_drop(conn); 3519 } 3520 } else { 3521 hci_auth_cfm(conn, ev->status); 3522 3523 hci_conn_hold(conn); 3524 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3525 hci_conn_drop(conn); 3526 } 3527 3528 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3529 if (!ev->status) { 3530 struct hci_cp_set_conn_encrypt cp; 3531 cp.handle = ev->handle; 3532 cp.encrypt = 0x01; 3533 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3534 &cp); 3535 } else { 3536 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3537 hci_encrypt_cfm(conn, ev->status); 3538 } 3539 } 3540 3541 unlock: 3542 hci_dev_unlock(hdev); 3543 } 3544 3545 static void hci_remote_name_evt(struct hci_dev *hdev, void *data, 3546 struct sk_buff *skb) 3547 { 3548 struct hci_ev_remote_name *ev = data; 3549 struct hci_conn *conn; 3550 3551 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3552 3553 hci_conn_check_pending(hdev); 3554 3555 hci_dev_lock(hdev); 3556 3557 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3558 3559 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3560 goto check_auth; 3561 3562 if (ev->status == 0) 3563 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3564 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3565 else 3566 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3567 3568 check_auth: 3569 if (!conn) 3570 goto unlock; 3571 3572 if (!hci_outgoing_auth_needed(hdev, conn)) 3573 goto unlock; 3574 3575 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3576 struct hci_cp_auth_requested cp; 3577 3578 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3579 3580 cp.handle = __cpu_to_le16(conn->handle); 3581 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3582 } 3583 3584 unlock: 3585 hci_dev_unlock(hdev); 3586 } 3587 3588 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, 3589 struct sk_buff *skb) 3590 { 3591 struct hci_ev_encrypt_change *ev = data; 3592 struct hci_conn *conn; 3593 3594 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3595 3596 hci_dev_lock(hdev); 3597 3598 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3599 if (!conn) 3600 goto unlock; 3601 3602 if (!ev->status) { 3603 if (ev->encrypt) { 3604 /* Encryption implies authentication */ 3605 set_bit(HCI_CONN_AUTH, &conn->flags); 3606 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3607 conn->sec_level = conn->pending_sec_level; 3608 3609 /* P-256 authentication key implies FIPS */ 3610 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3611 set_bit(HCI_CONN_FIPS, &conn->flags); 3612 3613 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3614 conn->type == LE_LINK) 3615 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3616 } else { 3617 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3618 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3619 } 3620 } 3621 3622 /* We should disregard the current RPA and generate a new one 3623 * whenever the encryption procedure fails. 3624 */ 3625 if (ev->status && conn->type == LE_LINK) { 3626 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3627 hci_adv_instances_set_rpa_expired(hdev, true); 3628 } 3629 3630 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3631 3632 /* Check link security requirements are met */ 3633 if (!hci_conn_check_link_mode(conn)) 3634 ev->status = HCI_ERROR_AUTH_FAILURE; 3635 3636 if (ev->status && conn->state == BT_CONNECTED) { 3637 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3638 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3639 3640 /* Notify upper layers so they can cleanup before 3641 * disconnecting. 3642 */ 3643 hci_encrypt_cfm(conn, ev->status); 3644 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3645 hci_conn_drop(conn); 3646 goto unlock; 3647 } 3648 3649 /* Try reading the encryption key size for encrypted ACL links */ 3650 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3651 struct hci_cp_read_enc_key_size cp; 3652 3653 /* Only send HCI_Read_Encryption_Key_Size if the 3654 * controller really supports it. If it doesn't, assume 3655 * the default size (16). 3656 */ 3657 if (!(hdev->commands[20] & 0x10)) { 3658 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3659 goto notify; 3660 } 3661 3662 cp.handle = cpu_to_le16(conn->handle); 3663 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, 3664 sizeof(cp), &cp)) { 3665 bt_dev_err(hdev, "sending read key size failed"); 3666 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3667 goto notify; 3668 } 3669 3670 goto unlock; 3671 } 3672 3673 /* Set the default Authenticated Payload Timeout after 3674 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3675 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3676 * sent when the link is active and Encryption is enabled, the conn 3677 * type can be either LE or ACL and controller must support LMP Ping. 3678 * Ensure for AES-CCM encryption as well. 3679 */ 3680 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3681 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3682 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3683 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3684 struct hci_cp_write_auth_payload_to cp; 3685 3686 cp.handle = cpu_to_le16(conn->handle); 3687 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3688 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3689 sizeof(cp), &cp)) { 3690 bt_dev_err(hdev, "write auth payload timeout failed"); 3691 goto notify; 3692 } 3693 3694 goto unlock; 3695 } 3696 3697 notify: 3698 hci_encrypt_cfm(conn, ev->status); 3699 3700 unlock: 3701 hci_dev_unlock(hdev); 3702 } 3703 3704 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, 3705 struct sk_buff *skb) 3706 { 3707 struct hci_ev_change_link_key_complete *ev = data; 3708 struct hci_conn *conn; 3709 3710 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3711 3712 hci_dev_lock(hdev); 3713 3714 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3715 if (conn) { 3716 if (!ev->status) 3717 set_bit(HCI_CONN_SECURE, &conn->flags); 3718 3719 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3720 3721 hci_key_change_cfm(conn, ev->status); 3722 } 3723 3724 hci_dev_unlock(hdev); 3725 } 3726 3727 static void hci_remote_features_evt(struct hci_dev *hdev, void *data, 3728 struct sk_buff *skb) 3729 { 3730 struct hci_ev_remote_features *ev = data; 3731 struct hci_conn *conn; 3732 3733 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3734 3735 hci_dev_lock(hdev); 3736 3737 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3738 if (!conn) 3739 goto unlock; 3740 3741 if (!ev->status) 3742 memcpy(conn->features[0], ev->features, 8); 3743 3744 if (conn->state != BT_CONFIG) 3745 goto unlock; 3746 3747 if (!ev->status && lmp_ext_feat_capable(hdev) && 3748 lmp_ext_feat_capable(conn)) { 3749 struct hci_cp_read_remote_ext_features cp; 3750 cp.handle = ev->handle; 3751 cp.page = 0x01; 3752 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3753 sizeof(cp), &cp); 3754 goto unlock; 3755 } 3756 3757 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3758 struct hci_cp_remote_name_req cp; 3759 memset(&cp, 0, sizeof(cp)); 3760 bacpy(&cp.bdaddr, &conn->dst); 3761 cp.pscan_rep_mode = 0x02; 3762 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3763 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3764 mgmt_device_connected(hdev, conn, NULL, 0); 3765 3766 if (!hci_outgoing_auth_needed(hdev, conn)) { 3767 conn->state = BT_CONNECTED; 3768 hci_connect_cfm(conn, ev->status); 3769 hci_conn_drop(conn); 3770 } 3771 3772 unlock: 3773 hci_dev_unlock(hdev); 3774 } 3775 3776 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3777 { 3778 cancel_delayed_work(&hdev->cmd_timer); 3779 3780 rcu_read_lock(); 3781 if (!test_bit(HCI_RESET, &hdev->flags)) { 3782 if (ncmd) { 3783 cancel_delayed_work(&hdev->ncmd_timer); 3784 atomic_set(&hdev->cmd_cnt, 1); 3785 } else { 3786 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 3787 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer, 3788 HCI_NCMD_TIMEOUT); 3789 } 3790 } 3791 rcu_read_unlock(); 3792 } 3793 3794 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, 3795 struct sk_buff *skb) 3796 { 3797 struct hci_rp_le_read_buffer_size_v2 *rp = data; 3798 3799 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3800 3801 if (rp->status) 3802 return rp->status; 3803 3804 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); 3805 hdev->le_pkts = rp->acl_max_pkt; 3806 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); 3807 hdev->iso_pkts = rp->iso_max_pkt; 3808 3809 hdev->le_cnt = hdev->le_pkts; 3810 hdev->iso_cnt = hdev->iso_pkts; 3811 3812 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, 3813 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); 3814 3815 return rp->status; 3816 } 3817 3818 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, 3819 struct sk_buff *skb) 3820 { 3821 struct hci_rp_le_set_cig_params *rp = data; 3822 struct hci_conn *conn; 3823 int i = 0; 3824 3825 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3826 3827 hci_dev_lock(hdev); 3828 3829 if (rp->status) { 3830 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) { 3831 conn->state = BT_CLOSED; 3832 hci_connect_cfm(conn, rp->status); 3833 hci_conn_del(conn); 3834 } 3835 goto unlock; 3836 } 3837 3838 rcu_read_lock(); 3839 3840 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 3841 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id || 3842 conn->state == BT_CONNECTED) 3843 continue; 3844 3845 conn->handle = __le16_to_cpu(rp->handle[i++]); 3846 3847 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn, 3848 conn->handle, conn->link); 3849 3850 /* Create CIS if LE is already connected */ 3851 if (conn->link && conn->link->state == BT_CONNECTED) 3852 hci_le_create_cis(conn->link); 3853 3854 if (i == rp->num_handles) 3855 break; 3856 } 3857 3858 rcu_read_unlock(); 3859 3860 unlock: 3861 hci_dev_unlock(hdev); 3862 3863 return rp->status; 3864 } 3865 3866 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, 3867 struct sk_buff *skb) 3868 { 3869 struct hci_rp_le_setup_iso_path *rp = data; 3870 struct hci_cp_le_setup_iso_path *cp; 3871 struct hci_conn *conn; 3872 3873 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3874 3875 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); 3876 if (!cp) 3877 return rp->status; 3878 3879 hci_dev_lock(hdev); 3880 3881 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 3882 if (!conn) 3883 goto unlock; 3884 3885 if (rp->status) { 3886 hci_connect_cfm(conn, rp->status); 3887 hci_conn_del(conn); 3888 goto unlock; 3889 } 3890 3891 switch (cp->direction) { 3892 /* Input (Host to Controller) */ 3893 case 0x00: 3894 /* Only confirm connection if output only */ 3895 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu) 3896 hci_connect_cfm(conn, rp->status); 3897 break; 3898 /* Output (Controller to Host) */ 3899 case 0x01: 3900 /* Confirm connection since conn->iso_qos is always configured 3901 * last. 3902 */ 3903 hci_connect_cfm(conn, rp->status); 3904 break; 3905 } 3906 3907 unlock: 3908 hci_dev_unlock(hdev); 3909 return rp->status; 3910 } 3911 3912 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) 3913 { 3914 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3915 } 3916 3917 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, 3918 struct sk_buff *skb) 3919 { 3920 struct hci_ev_status *rp = data; 3921 struct hci_cp_le_set_per_adv_params *cp; 3922 3923 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3924 3925 if (rp->status) 3926 return rp->status; 3927 3928 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); 3929 if (!cp) 3930 return rp->status; 3931 3932 /* TODO: set the conn state */ 3933 return rp->status; 3934 } 3935 3936 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, 3937 struct sk_buff *skb) 3938 { 3939 struct hci_ev_status *rp = data; 3940 __u8 *sent; 3941 3942 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3943 3944 if (rp->status) 3945 return rp->status; 3946 3947 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); 3948 if (!sent) 3949 return rp->status; 3950 3951 hci_dev_lock(hdev); 3952 3953 if (*sent) 3954 hci_dev_set_flag(hdev, HCI_LE_PER_ADV); 3955 else 3956 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); 3957 3958 hci_dev_unlock(hdev); 3959 3960 return rp->status; 3961 } 3962 3963 #define HCI_CC_VL(_op, _func, _min, _max) \ 3964 { \ 3965 .op = _op, \ 3966 .func = _func, \ 3967 .min_len = _min, \ 3968 .max_len = _max, \ 3969 } 3970 3971 #define HCI_CC(_op, _func, _len) \ 3972 HCI_CC_VL(_op, _func, _len, _len) 3973 3974 #define HCI_CC_STATUS(_op, _func) \ 3975 HCI_CC(_op, _func, sizeof(struct hci_ev_status)) 3976 3977 static const struct hci_cc { 3978 u16 op; 3979 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 3980 u16 min_len; 3981 u16 max_len; 3982 } hci_cc_table[] = { 3983 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), 3984 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), 3985 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), 3986 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, 3987 hci_cc_remote_name_req_cancel), 3988 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, 3989 sizeof(struct hci_rp_role_discovery)), 3990 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, 3991 sizeof(struct hci_rp_read_link_policy)), 3992 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, 3993 sizeof(struct hci_rp_write_link_policy)), 3994 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, 3995 sizeof(struct hci_rp_read_def_link_policy)), 3996 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, 3997 hci_cc_write_def_link_policy), 3998 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), 3999 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, 4000 sizeof(struct hci_rp_read_stored_link_key)), 4001 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, 4002 sizeof(struct hci_rp_delete_stored_link_key)), 4003 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), 4004 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, 4005 sizeof(struct hci_rp_read_local_name)), 4006 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), 4007 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), 4008 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), 4009 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), 4010 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, 4011 sizeof(struct hci_rp_read_class_of_dev)), 4012 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), 4013 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, 4014 sizeof(struct hci_rp_read_voice_setting)), 4015 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), 4016 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, 4017 sizeof(struct hci_rp_read_num_supported_iac)), 4018 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), 4019 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), 4020 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, 4021 sizeof(struct hci_rp_read_auth_payload_to)), 4022 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, 4023 sizeof(struct hci_rp_write_auth_payload_to)), 4024 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, 4025 sizeof(struct hci_rp_read_local_version)), 4026 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, 4027 sizeof(struct hci_rp_read_local_commands)), 4028 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, 4029 sizeof(struct hci_rp_read_local_features)), 4030 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, 4031 sizeof(struct hci_rp_read_local_ext_features)), 4032 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, 4033 sizeof(struct hci_rp_read_buffer_size)), 4034 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, 4035 sizeof(struct hci_rp_read_bd_addr)), 4036 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, 4037 sizeof(struct hci_rp_read_local_pairing_opts)), 4038 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, 4039 sizeof(struct hci_rp_read_page_scan_activity)), 4040 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 4041 hci_cc_write_page_scan_activity), 4042 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, 4043 sizeof(struct hci_rp_read_page_scan_type)), 4044 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), 4045 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, 4046 sizeof(struct hci_rp_read_data_block_size)), 4047 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, 4048 sizeof(struct hci_rp_read_flow_control_mode)), 4049 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, 4050 sizeof(struct hci_rp_read_local_amp_info)), 4051 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, 4052 sizeof(struct hci_rp_read_clock)), 4053 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, 4054 sizeof(struct hci_rp_read_enc_key_size)), 4055 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, 4056 sizeof(struct hci_rp_read_inq_rsp_tx_power)), 4057 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, 4058 hci_cc_read_def_err_data_reporting, 4059 sizeof(struct hci_rp_read_def_err_data_reporting)), 4060 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 4061 hci_cc_write_def_err_data_reporting), 4062 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, 4063 sizeof(struct hci_rp_pin_code_reply)), 4064 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, 4065 sizeof(struct hci_rp_pin_code_neg_reply)), 4066 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, 4067 sizeof(struct hci_rp_read_local_oob_data)), 4068 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, 4069 sizeof(struct hci_rp_read_local_oob_ext_data)), 4070 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, 4071 sizeof(struct hci_rp_le_read_buffer_size)), 4072 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, 4073 sizeof(struct hci_rp_le_read_local_features)), 4074 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, 4075 sizeof(struct hci_rp_le_read_adv_tx_power)), 4076 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, 4077 sizeof(struct hci_rp_user_confirm_reply)), 4078 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, 4079 sizeof(struct hci_rp_user_confirm_reply)), 4080 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, 4081 sizeof(struct hci_rp_user_confirm_reply)), 4082 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, 4083 sizeof(struct hci_rp_user_confirm_reply)), 4084 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), 4085 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), 4086 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), 4087 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), 4088 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 4089 hci_cc_le_read_accept_list_size, 4090 sizeof(struct hci_rp_le_read_accept_list_size)), 4091 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), 4092 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, 4093 hci_cc_le_add_to_accept_list), 4094 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 4095 hci_cc_le_del_from_accept_list), 4096 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, 4097 sizeof(struct hci_rp_le_read_supported_states)), 4098 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, 4099 sizeof(struct hci_rp_le_read_def_data_len)), 4100 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, 4101 hci_cc_le_write_def_data_len), 4102 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, 4103 hci_cc_le_add_to_resolv_list), 4104 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, 4105 hci_cc_le_del_from_resolv_list), 4106 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, 4107 hci_cc_le_clear_resolv_list), 4108 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, 4109 sizeof(struct hci_rp_le_read_resolv_list_size)), 4110 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 4111 hci_cc_le_set_addr_resolution_enable), 4112 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, 4113 sizeof(struct hci_rp_le_read_max_data_len)), 4114 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, 4115 hci_cc_write_le_host_supported), 4116 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), 4117 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, 4118 sizeof(struct hci_rp_read_rssi)), 4119 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, 4120 sizeof(struct hci_rp_read_tx_power)), 4121 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), 4122 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, 4123 hci_cc_le_set_ext_scan_param), 4124 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, 4125 hci_cc_le_set_ext_scan_enable), 4126 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), 4127 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 4128 hci_cc_le_read_num_adv_sets, 4129 sizeof(struct hci_rp_le_read_num_supported_adv_sets)), 4130 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, 4131 sizeof(struct hci_rp_le_set_ext_adv_params)), 4132 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, 4133 hci_cc_le_set_ext_adv_enable), 4134 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 4135 hci_cc_le_set_adv_set_random_addr), 4136 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), 4137 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), 4138 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), 4139 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, 4140 hci_cc_le_set_per_adv_enable), 4141 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, 4142 sizeof(struct hci_rp_le_read_transmit_power)), 4143 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), 4144 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, 4145 sizeof(struct hci_rp_le_read_buffer_size_v2)), 4146 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, 4147 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), 4148 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, 4149 sizeof(struct hci_rp_le_setup_iso_path)), 4150 }; 4151 4152 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, 4153 struct sk_buff *skb) 4154 { 4155 void *data; 4156 4157 if (skb->len < cc->min_len) { 4158 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", 4159 cc->op, skb->len, cc->min_len); 4160 return HCI_ERROR_UNSPECIFIED; 4161 } 4162 4163 /* Just warn if the length is over max_len size it still be possible to 4164 * partially parse the cc so leave to callback to decide if that is 4165 * acceptable. 4166 */ 4167 if (skb->len > cc->max_len) 4168 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", 4169 cc->op, skb->len, cc->max_len); 4170 4171 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); 4172 if (!data) 4173 return HCI_ERROR_UNSPECIFIED; 4174 4175 return cc->func(hdev, data, skb); 4176 } 4177 4178 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, 4179 struct sk_buff *skb, u16 *opcode, u8 *status, 4180 hci_req_complete_t *req_complete, 4181 hci_req_complete_skb_t *req_complete_skb) 4182 { 4183 struct hci_ev_cmd_complete *ev = data; 4184 int i; 4185 4186 *opcode = __le16_to_cpu(ev->opcode); 4187 4188 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4189 4190 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { 4191 if (hci_cc_table[i].op == *opcode) { 4192 *status = hci_cc_func(hdev, &hci_cc_table[i], skb); 4193 break; 4194 } 4195 } 4196 4197 if (i == ARRAY_SIZE(hci_cc_table)) { 4198 /* Unknown opcode, assume byte 0 contains the status, so 4199 * that e.g. __hci_cmd_sync() properly returns errors 4200 * for vendor specific commands send by HCI drivers. 4201 * If a vendor doesn't actually follow this convention we may 4202 * need to introduce a vendor CC table in order to properly set 4203 * the status. 4204 */ 4205 *status = skb->data[0]; 4206 } 4207 4208 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4209 4210 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 4211 req_complete_skb); 4212 4213 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4214 bt_dev_err(hdev, 4215 "unexpected event for opcode 0x%4.4x", *opcode); 4216 return; 4217 } 4218 4219 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4220 queue_work(hdev->workqueue, &hdev->cmd_work); 4221 } 4222 4223 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) 4224 { 4225 struct hci_cp_le_create_cis *cp; 4226 int i; 4227 4228 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4229 4230 if (!status) 4231 return; 4232 4233 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); 4234 if (!cp) 4235 return; 4236 4237 hci_dev_lock(hdev); 4238 4239 /* Remove connection if command failed */ 4240 for (i = 0; cp->num_cis; cp->num_cis--, i++) { 4241 struct hci_conn *conn; 4242 u16 handle; 4243 4244 handle = __le16_to_cpu(cp->cis[i].cis_handle); 4245 4246 conn = hci_conn_hash_lookup_handle(hdev, handle); 4247 if (conn) { 4248 conn->state = BT_CLOSED; 4249 hci_connect_cfm(conn, status); 4250 hci_conn_del(conn); 4251 } 4252 } 4253 4254 hci_dev_unlock(hdev); 4255 } 4256 4257 #define HCI_CS(_op, _func) \ 4258 { \ 4259 .op = _op, \ 4260 .func = _func, \ 4261 } 4262 4263 static const struct hci_cs { 4264 u16 op; 4265 void (*func)(struct hci_dev *hdev, __u8 status); 4266 } hci_cs_table[] = { 4267 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), 4268 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), 4269 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), 4270 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), 4271 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), 4272 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), 4273 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), 4274 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), 4275 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, 4276 hci_cs_read_remote_ext_features), 4277 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), 4278 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, 4279 hci_cs_enhanced_setup_sync_conn), 4280 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), 4281 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), 4282 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), 4283 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), 4284 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), 4285 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), 4286 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), 4287 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), 4288 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), 4289 }; 4290 4291 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, 4292 struct sk_buff *skb, u16 *opcode, u8 *status, 4293 hci_req_complete_t *req_complete, 4294 hci_req_complete_skb_t *req_complete_skb) 4295 { 4296 struct hci_ev_cmd_status *ev = data; 4297 int i; 4298 4299 *opcode = __le16_to_cpu(ev->opcode); 4300 *status = ev->status; 4301 4302 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4303 4304 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { 4305 if (hci_cs_table[i].op == *opcode) { 4306 hci_cs_table[i].func(hdev, ev->status); 4307 break; 4308 } 4309 } 4310 4311 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4312 4313 /* Indicate request completion if the command failed. Also, if 4314 * we're not waiting for a special event and we get a success 4315 * command status we should try to flag the request as completed 4316 * (since for this kind of commands there will not be a command 4317 * complete event). 4318 */ 4319 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { 4320 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 4321 req_complete_skb); 4322 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4323 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", 4324 *opcode); 4325 return; 4326 } 4327 } 4328 4329 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4330 queue_work(hdev->workqueue, &hdev->cmd_work); 4331 } 4332 4333 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, 4334 struct sk_buff *skb) 4335 { 4336 struct hci_ev_hardware_error *ev = data; 4337 4338 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); 4339 4340 hdev->hw_error_code = ev->code; 4341 4342 queue_work(hdev->req_workqueue, &hdev->error_reset); 4343 } 4344 4345 static void hci_role_change_evt(struct hci_dev *hdev, void *data, 4346 struct sk_buff *skb) 4347 { 4348 struct hci_ev_role_change *ev = data; 4349 struct hci_conn *conn; 4350 4351 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4352 4353 hci_dev_lock(hdev); 4354 4355 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4356 if (conn) { 4357 if (!ev->status) 4358 conn->role = ev->role; 4359 4360 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 4361 4362 hci_role_switch_cfm(conn, ev->status, ev->role); 4363 } 4364 4365 hci_dev_unlock(hdev); 4366 } 4367 4368 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, 4369 struct sk_buff *skb) 4370 { 4371 struct hci_ev_num_comp_pkts *ev = data; 4372 int i; 4373 4374 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, 4375 flex_array_size(ev, handles, ev->num))) 4376 return; 4377 4378 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 4379 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 4380 return; 4381 } 4382 4383 bt_dev_dbg(hdev, "num %d", ev->num); 4384 4385 for (i = 0; i < ev->num; i++) { 4386 struct hci_comp_pkts_info *info = &ev->handles[i]; 4387 struct hci_conn *conn; 4388 __u16 handle, count; 4389 4390 handle = __le16_to_cpu(info->handle); 4391 count = __le16_to_cpu(info->count); 4392 4393 conn = hci_conn_hash_lookup_handle(hdev, handle); 4394 if (!conn) 4395 continue; 4396 4397 conn->sent -= count; 4398 4399 switch (conn->type) { 4400 case ACL_LINK: 4401 hdev->acl_cnt += count; 4402 if (hdev->acl_cnt > hdev->acl_pkts) 4403 hdev->acl_cnt = hdev->acl_pkts; 4404 break; 4405 4406 case LE_LINK: 4407 if (hdev->le_pkts) { 4408 hdev->le_cnt += count; 4409 if (hdev->le_cnt > hdev->le_pkts) 4410 hdev->le_cnt = hdev->le_pkts; 4411 } else { 4412 hdev->acl_cnt += count; 4413 if (hdev->acl_cnt > hdev->acl_pkts) 4414 hdev->acl_cnt = hdev->acl_pkts; 4415 } 4416 break; 4417 4418 case SCO_LINK: 4419 hdev->sco_cnt += count; 4420 if (hdev->sco_cnt > hdev->sco_pkts) 4421 hdev->sco_cnt = hdev->sco_pkts; 4422 break; 4423 4424 case ISO_LINK: 4425 if (hdev->iso_pkts) { 4426 hdev->iso_cnt += count; 4427 if (hdev->iso_cnt > hdev->iso_pkts) 4428 hdev->iso_cnt = hdev->iso_pkts; 4429 } else if (hdev->le_pkts) { 4430 hdev->le_cnt += count; 4431 if (hdev->le_cnt > hdev->le_pkts) 4432 hdev->le_cnt = hdev->le_pkts; 4433 } else { 4434 hdev->acl_cnt += count; 4435 if (hdev->acl_cnt > hdev->acl_pkts) 4436 hdev->acl_cnt = hdev->acl_pkts; 4437 } 4438 break; 4439 4440 default: 4441 bt_dev_err(hdev, "unknown type %d conn %p", 4442 conn->type, conn); 4443 break; 4444 } 4445 } 4446 4447 queue_work(hdev->workqueue, &hdev->tx_work); 4448 } 4449 4450 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 4451 __u16 handle) 4452 { 4453 struct hci_chan *chan; 4454 4455 switch (hdev->dev_type) { 4456 case HCI_PRIMARY: 4457 return hci_conn_hash_lookup_handle(hdev, handle); 4458 case HCI_AMP: 4459 chan = hci_chan_lookup_handle(hdev, handle); 4460 if (chan) 4461 return chan->conn; 4462 break; 4463 default: 4464 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4465 break; 4466 } 4467 4468 return NULL; 4469 } 4470 4471 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, 4472 struct sk_buff *skb) 4473 { 4474 struct hci_ev_num_comp_blocks *ev = data; 4475 int i; 4476 4477 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, 4478 flex_array_size(ev, handles, ev->num_hndl))) 4479 return; 4480 4481 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 4482 bt_dev_err(hdev, "wrong event for mode %d", 4483 hdev->flow_ctl_mode); 4484 return; 4485 } 4486 4487 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, 4488 ev->num_hndl); 4489 4490 for (i = 0; i < ev->num_hndl; i++) { 4491 struct hci_comp_blocks_info *info = &ev->handles[i]; 4492 struct hci_conn *conn = NULL; 4493 __u16 handle, block_count; 4494 4495 handle = __le16_to_cpu(info->handle); 4496 block_count = __le16_to_cpu(info->blocks); 4497 4498 conn = __hci_conn_lookup_handle(hdev, handle); 4499 if (!conn) 4500 continue; 4501 4502 conn->sent -= block_count; 4503 4504 switch (conn->type) { 4505 case ACL_LINK: 4506 case AMP_LINK: 4507 hdev->block_cnt += block_count; 4508 if (hdev->block_cnt > hdev->num_blocks) 4509 hdev->block_cnt = hdev->num_blocks; 4510 break; 4511 4512 default: 4513 bt_dev_err(hdev, "unknown type %d conn %p", 4514 conn->type, conn); 4515 break; 4516 } 4517 } 4518 4519 queue_work(hdev->workqueue, &hdev->tx_work); 4520 } 4521 4522 static void hci_mode_change_evt(struct hci_dev *hdev, void *data, 4523 struct sk_buff *skb) 4524 { 4525 struct hci_ev_mode_change *ev = data; 4526 struct hci_conn *conn; 4527 4528 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4529 4530 hci_dev_lock(hdev); 4531 4532 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4533 if (conn) { 4534 conn->mode = ev->mode; 4535 4536 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4537 &conn->flags)) { 4538 if (conn->mode == HCI_CM_ACTIVE) 4539 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4540 else 4541 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4542 } 4543 4544 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4545 hci_sco_setup(conn, ev->status); 4546 } 4547 4548 hci_dev_unlock(hdev); 4549 } 4550 4551 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, 4552 struct sk_buff *skb) 4553 { 4554 struct hci_ev_pin_code_req *ev = data; 4555 struct hci_conn *conn; 4556 4557 bt_dev_dbg(hdev, ""); 4558 4559 hci_dev_lock(hdev); 4560 4561 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4562 if (!conn) 4563 goto unlock; 4564 4565 if (conn->state == BT_CONNECTED) { 4566 hci_conn_hold(conn); 4567 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4568 hci_conn_drop(conn); 4569 } 4570 4571 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4572 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4573 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4574 sizeof(ev->bdaddr), &ev->bdaddr); 4575 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4576 u8 secure; 4577 4578 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4579 secure = 1; 4580 else 4581 secure = 0; 4582 4583 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4584 } 4585 4586 unlock: 4587 hci_dev_unlock(hdev); 4588 } 4589 4590 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4591 { 4592 if (key_type == HCI_LK_CHANGED_COMBINATION) 4593 return; 4594 4595 conn->pin_length = pin_len; 4596 conn->key_type = key_type; 4597 4598 switch (key_type) { 4599 case HCI_LK_LOCAL_UNIT: 4600 case HCI_LK_REMOTE_UNIT: 4601 case HCI_LK_DEBUG_COMBINATION: 4602 return; 4603 case HCI_LK_COMBINATION: 4604 if (pin_len == 16) 4605 conn->pending_sec_level = BT_SECURITY_HIGH; 4606 else 4607 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4608 break; 4609 case HCI_LK_UNAUTH_COMBINATION_P192: 4610 case HCI_LK_UNAUTH_COMBINATION_P256: 4611 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4612 break; 4613 case HCI_LK_AUTH_COMBINATION_P192: 4614 conn->pending_sec_level = BT_SECURITY_HIGH; 4615 break; 4616 case HCI_LK_AUTH_COMBINATION_P256: 4617 conn->pending_sec_level = BT_SECURITY_FIPS; 4618 break; 4619 } 4620 } 4621 4622 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, 4623 struct sk_buff *skb) 4624 { 4625 struct hci_ev_link_key_req *ev = data; 4626 struct hci_cp_link_key_reply cp; 4627 struct hci_conn *conn; 4628 struct link_key *key; 4629 4630 bt_dev_dbg(hdev, ""); 4631 4632 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4633 return; 4634 4635 hci_dev_lock(hdev); 4636 4637 key = hci_find_link_key(hdev, &ev->bdaddr); 4638 if (!key) { 4639 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); 4640 goto not_found; 4641 } 4642 4643 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); 4644 4645 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4646 if (conn) { 4647 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4648 4649 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4650 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4651 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4652 bt_dev_dbg(hdev, "ignoring unauthenticated key"); 4653 goto not_found; 4654 } 4655 4656 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4657 (conn->pending_sec_level == BT_SECURITY_HIGH || 4658 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4659 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); 4660 goto not_found; 4661 } 4662 4663 conn_set_key(conn, key->type, key->pin_len); 4664 } 4665 4666 bacpy(&cp.bdaddr, &ev->bdaddr); 4667 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4668 4669 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4670 4671 hci_dev_unlock(hdev); 4672 4673 return; 4674 4675 not_found: 4676 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4677 hci_dev_unlock(hdev); 4678 } 4679 4680 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, 4681 struct sk_buff *skb) 4682 { 4683 struct hci_ev_link_key_notify *ev = data; 4684 struct hci_conn *conn; 4685 struct link_key *key; 4686 bool persistent; 4687 u8 pin_len = 0; 4688 4689 bt_dev_dbg(hdev, ""); 4690 4691 hci_dev_lock(hdev); 4692 4693 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4694 if (!conn) 4695 goto unlock; 4696 4697 hci_conn_hold(conn); 4698 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4699 hci_conn_drop(conn); 4700 4701 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4702 conn_set_key(conn, ev->key_type, conn->pin_length); 4703 4704 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4705 goto unlock; 4706 4707 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4708 ev->key_type, pin_len, &persistent); 4709 if (!key) 4710 goto unlock; 4711 4712 /* Update connection information since adding the key will have 4713 * fixed up the type in the case of changed combination keys. 4714 */ 4715 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4716 conn_set_key(conn, key->type, key->pin_len); 4717 4718 mgmt_new_link_key(hdev, key, persistent); 4719 4720 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4721 * is set. If it's not set simply remove the key from the kernel 4722 * list (we've still notified user space about it but with 4723 * store_hint being 0). 4724 */ 4725 if (key->type == HCI_LK_DEBUG_COMBINATION && 4726 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4727 list_del_rcu(&key->list); 4728 kfree_rcu(key, rcu); 4729 goto unlock; 4730 } 4731 4732 if (persistent) 4733 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4734 else 4735 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4736 4737 unlock: 4738 hci_dev_unlock(hdev); 4739 } 4740 4741 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, 4742 struct sk_buff *skb) 4743 { 4744 struct hci_ev_clock_offset *ev = data; 4745 struct hci_conn *conn; 4746 4747 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4748 4749 hci_dev_lock(hdev); 4750 4751 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4752 if (conn && !ev->status) { 4753 struct inquiry_entry *ie; 4754 4755 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4756 if (ie) { 4757 ie->data.clock_offset = ev->clock_offset; 4758 ie->timestamp = jiffies; 4759 } 4760 } 4761 4762 hci_dev_unlock(hdev); 4763 } 4764 4765 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, 4766 struct sk_buff *skb) 4767 { 4768 struct hci_ev_pkt_type_change *ev = data; 4769 struct hci_conn *conn; 4770 4771 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4772 4773 hci_dev_lock(hdev); 4774 4775 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4776 if (conn && !ev->status) 4777 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4778 4779 hci_dev_unlock(hdev); 4780 } 4781 4782 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, 4783 struct sk_buff *skb) 4784 { 4785 struct hci_ev_pscan_rep_mode *ev = data; 4786 struct inquiry_entry *ie; 4787 4788 bt_dev_dbg(hdev, ""); 4789 4790 hci_dev_lock(hdev); 4791 4792 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4793 if (ie) { 4794 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4795 ie->timestamp = jiffies; 4796 } 4797 4798 hci_dev_unlock(hdev); 4799 } 4800 4801 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, 4802 struct sk_buff *skb) 4803 { 4804 struct hci_ev_inquiry_result_rssi *ev = edata; 4805 struct inquiry_data data; 4806 int i; 4807 4808 bt_dev_dbg(hdev, "num_rsp %d", ev->num); 4809 4810 if (!ev->num) 4811 return; 4812 4813 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4814 return; 4815 4816 hci_dev_lock(hdev); 4817 4818 if (skb->len == array_size(ev->num, 4819 sizeof(struct inquiry_info_rssi_pscan))) { 4820 struct inquiry_info_rssi_pscan *info; 4821 4822 for (i = 0; i < ev->num; i++) { 4823 u32 flags; 4824 4825 info = hci_ev_skb_pull(hdev, skb, 4826 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4827 sizeof(*info)); 4828 if (!info) { 4829 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4830 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4831 goto unlock; 4832 } 4833 4834 bacpy(&data.bdaddr, &info->bdaddr); 4835 data.pscan_rep_mode = info->pscan_rep_mode; 4836 data.pscan_period_mode = info->pscan_period_mode; 4837 data.pscan_mode = info->pscan_mode; 4838 memcpy(data.dev_class, info->dev_class, 3); 4839 data.clock_offset = info->clock_offset; 4840 data.rssi = info->rssi; 4841 data.ssp_mode = 0x00; 4842 4843 flags = hci_inquiry_cache_update(hdev, &data, false); 4844 4845 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4846 info->dev_class, info->rssi, 4847 flags, NULL, 0, NULL, 0, 0); 4848 } 4849 } else if (skb->len == array_size(ev->num, 4850 sizeof(struct inquiry_info_rssi))) { 4851 struct inquiry_info_rssi *info; 4852 4853 for (i = 0; i < ev->num; i++) { 4854 u32 flags; 4855 4856 info = hci_ev_skb_pull(hdev, skb, 4857 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4858 sizeof(*info)); 4859 if (!info) { 4860 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4861 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4862 goto unlock; 4863 } 4864 4865 bacpy(&data.bdaddr, &info->bdaddr); 4866 data.pscan_rep_mode = info->pscan_rep_mode; 4867 data.pscan_period_mode = info->pscan_period_mode; 4868 data.pscan_mode = 0x00; 4869 memcpy(data.dev_class, info->dev_class, 3); 4870 data.clock_offset = info->clock_offset; 4871 data.rssi = info->rssi; 4872 data.ssp_mode = 0x00; 4873 4874 flags = hci_inquiry_cache_update(hdev, &data, false); 4875 4876 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4877 info->dev_class, info->rssi, 4878 flags, NULL, 0, NULL, 0, 0); 4879 } 4880 } else { 4881 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4882 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4883 } 4884 unlock: 4885 hci_dev_unlock(hdev); 4886 } 4887 4888 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, 4889 struct sk_buff *skb) 4890 { 4891 struct hci_ev_remote_ext_features *ev = data; 4892 struct hci_conn *conn; 4893 4894 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4895 4896 hci_dev_lock(hdev); 4897 4898 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4899 if (!conn) 4900 goto unlock; 4901 4902 if (ev->page < HCI_MAX_PAGES) 4903 memcpy(conn->features[ev->page], ev->features, 8); 4904 4905 if (!ev->status && ev->page == 0x01) { 4906 struct inquiry_entry *ie; 4907 4908 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4909 if (ie) 4910 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4911 4912 if (ev->features[0] & LMP_HOST_SSP) { 4913 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4914 } else { 4915 /* It is mandatory by the Bluetooth specification that 4916 * Extended Inquiry Results are only used when Secure 4917 * Simple Pairing is enabled, but some devices violate 4918 * this. 4919 * 4920 * To make these devices work, the internal SSP 4921 * enabled flag needs to be cleared if the remote host 4922 * features do not indicate SSP support */ 4923 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4924 } 4925 4926 if (ev->features[0] & LMP_HOST_SC) 4927 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4928 } 4929 4930 if (conn->state != BT_CONFIG) 4931 goto unlock; 4932 4933 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4934 struct hci_cp_remote_name_req cp; 4935 memset(&cp, 0, sizeof(cp)); 4936 bacpy(&cp.bdaddr, &conn->dst); 4937 cp.pscan_rep_mode = 0x02; 4938 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4939 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4940 mgmt_device_connected(hdev, conn, NULL, 0); 4941 4942 if (!hci_outgoing_auth_needed(hdev, conn)) { 4943 conn->state = BT_CONNECTED; 4944 hci_connect_cfm(conn, ev->status); 4945 hci_conn_drop(conn); 4946 } 4947 4948 unlock: 4949 hci_dev_unlock(hdev); 4950 } 4951 4952 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, 4953 struct sk_buff *skb) 4954 { 4955 struct hci_ev_sync_conn_complete *ev = data; 4956 struct hci_conn *conn; 4957 u8 status = ev->status; 4958 4959 switch (ev->link_type) { 4960 case SCO_LINK: 4961 case ESCO_LINK: 4962 break; 4963 default: 4964 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type 4965 * for HCI_Synchronous_Connection_Complete is limited to 4966 * either SCO or eSCO 4967 */ 4968 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); 4969 return; 4970 } 4971 4972 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4973 4974 hci_dev_lock(hdev); 4975 4976 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4977 if (!conn) { 4978 if (ev->link_type == ESCO_LINK) 4979 goto unlock; 4980 4981 /* When the link type in the event indicates SCO connection 4982 * and lookup of the connection object fails, then check 4983 * if an eSCO connection object exists. 4984 * 4985 * The core limits the synchronous connections to either 4986 * SCO or eSCO. The eSCO connection is preferred and tried 4987 * to be setup first and until successfully established, 4988 * the link type will be hinted as eSCO. 4989 */ 4990 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4991 if (!conn) 4992 goto unlock; 4993 } 4994 4995 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. 4996 * Processing it more than once per connection can corrupt kernel memory. 4997 * 4998 * As the connection handle is set here for the first time, it indicates 4999 * whether the connection is already set up. 5000 */ 5001 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 5002 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); 5003 goto unlock; 5004 } 5005 5006 switch (status) { 5007 case 0x00: 5008 conn->handle = __le16_to_cpu(ev->handle); 5009 if (conn->handle > HCI_CONN_HANDLE_MAX) { 5010 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 5011 conn->handle, HCI_CONN_HANDLE_MAX); 5012 status = HCI_ERROR_INVALID_PARAMETERS; 5013 conn->state = BT_CLOSED; 5014 break; 5015 } 5016 5017 conn->state = BT_CONNECTED; 5018 conn->type = ev->link_type; 5019 5020 hci_debugfs_create_conn(conn); 5021 hci_conn_add_sysfs(conn); 5022 break; 5023 5024 case 0x10: /* Connection Accept Timeout */ 5025 case 0x0d: /* Connection Rejected due to Limited Resources */ 5026 case 0x11: /* Unsupported Feature or Parameter Value */ 5027 case 0x1c: /* SCO interval rejected */ 5028 case 0x1a: /* Unsupported Remote Feature */ 5029 case 0x1e: /* Invalid LMP Parameters */ 5030 case 0x1f: /* Unspecified error */ 5031 case 0x20: /* Unsupported LMP Parameter value */ 5032 if (conn->out) { 5033 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 5034 (hdev->esco_type & EDR_ESCO_MASK); 5035 if (hci_setup_sync(conn, conn->link->handle)) 5036 goto unlock; 5037 } 5038 fallthrough; 5039 5040 default: 5041 conn->state = BT_CLOSED; 5042 break; 5043 } 5044 5045 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 5046 /* Notify only in case of SCO over HCI transport data path which 5047 * is zero and non-zero value shall be non-HCI transport data path 5048 */ 5049 if (conn->codec.data_path == 0 && hdev->notify) { 5050 switch (ev->air_mode) { 5051 case 0x02: 5052 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 5053 break; 5054 case 0x03: 5055 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 5056 break; 5057 } 5058 } 5059 5060 hci_connect_cfm(conn, status); 5061 if (status) 5062 hci_conn_del(conn); 5063 5064 unlock: 5065 hci_dev_unlock(hdev); 5066 } 5067 5068 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 5069 { 5070 size_t parsed = 0; 5071 5072 while (parsed < eir_len) { 5073 u8 field_len = eir[0]; 5074 5075 if (field_len == 0) 5076 return parsed; 5077 5078 parsed += field_len + 1; 5079 eir += field_len + 1; 5080 } 5081 5082 return eir_len; 5083 } 5084 5085 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, 5086 struct sk_buff *skb) 5087 { 5088 struct hci_ev_ext_inquiry_result *ev = edata; 5089 struct inquiry_data data; 5090 size_t eir_len; 5091 int i; 5092 5093 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, 5094 flex_array_size(ev, info, ev->num))) 5095 return; 5096 5097 bt_dev_dbg(hdev, "num %d", ev->num); 5098 5099 if (!ev->num) 5100 return; 5101 5102 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 5103 return; 5104 5105 hci_dev_lock(hdev); 5106 5107 for (i = 0; i < ev->num; i++) { 5108 struct extended_inquiry_info *info = &ev->info[i]; 5109 u32 flags; 5110 bool name_known; 5111 5112 bacpy(&data.bdaddr, &info->bdaddr); 5113 data.pscan_rep_mode = info->pscan_rep_mode; 5114 data.pscan_period_mode = info->pscan_period_mode; 5115 data.pscan_mode = 0x00; 5116 memcpy(data.dev_class, info->dev_class, 3); 5117 data.clock_offset = info->clock_offset; 5118 data.rssi = info->rssi; 5119 data.ssp_mode = 0x01; 5120 5121 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5122 name_known = eir_get_data(info->data, 5123 sizeof(info->data), 5124 EIR_NAME_COMPLETE, NULL); 5125 else 5126 name_known = true; 5127 5128 flags = hci_inquiry_cache_update(hdev, &data, name_known); 5129 5130 eir_len = eir_get_length(info->data, sizeof(info->data)); 5131 5132 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 5133 info->dev_class, info->rssi, 5134 flags, info->data, eir_len, NULL, 0, 0); 5135 } 5136 5137 hci_dev_unlock(hdev); 5138 } 5139 5140 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, 5141 struct sk_buff *skb) 5142 { 5143 struct hci_ev_key_refresh_complete *ev = data; 5144 struct hci_conn *conn; 5145 5146 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, 5147 __le16_to_cpu(ev->handle)); 5148 5149 hci_dev_lock(hdev); 5150 5151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5152 if (!conn) 5153 goto unlock; 5154 5155 /* For BR/EDR the necessary steps are taken through the 5156 * auth_complete event. 5157 */ 5158 if (conn->type != LE_LINK) 5159 goto unlock; 5160 5161 if (!ev->status) 5162 conn->sec_level = conn->pending_sec_level; 5163 5164 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 5165 5166 if (ev->status && conn->state == BT_CONNECTED) { 5167 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 5168 hci_conn_drop(conn); 5169 goto unlock; 5170 } 5171 5172 if (conn->state == BT_CONFIG) { 5173 if (!ev->status) 5174 conn->state = BT_CONNECTED; 5175 5176 hci_connect_cfm(conn, ev->status); 5177 hci_conn_drop(conn); 5178 } else { 5179 hci_auth_cfm(conn, ev->status); 5180 5181 hci_conn_hold(conn); 5182 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 5183 hci_conn_drop(conn); 5184 } 5185 5186 unlock: 5187 hci_dev_unlock(hdev); 5188 } 5189 5190 static u8 hci_get_auth_req(struct hci_conn *conn) 5191 { 5192 /* If remote requests no-bonding follow that lead */ 5193 if (conn->remote_auth == HCI_AT_NO_BONDING || 5194 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 5195 return conn->remote_auth | (conn->auth_type & 0x01); 5196 5197 /* If both remote and local have enough IO capabilities, require 5198 * MITM protection 5199 */ 5200 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 5201 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 5202 return conn->remote_auth | 0x01; 5203 5204 /* No MITM protection possible so ignore remote requirement */ 5205 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 5206 } 5207 5208 static u8 bredr_oob_data_present(struct hci_conn *conn) 5209 { 5210 struct hci_dev *hdev = conn->hdev; 5211 struct oob_data *data; 5212 5213 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 5214 if (!data) 5215 return 0x00; 5216 5217 if (bredr_sc_enabled(hdev)) { 5218 /* When Secure Connections is enabled, then just 5219 * return the present value stored with the OOB 5220 * data. The stored value contains the right present 5221 * information. However it can only be trusted when 5222 * not in Secure Connection Only mode. 5223 */ 5224 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 5225 return data->present; 5226 5227 /* When Secure Connections Only mode is enabled, then 5228 * the P-256 values are required. If they are not 5229 * available, then do not declare that OOB data is 5230 * present. 5231 */ 5232 if (!memcmp(data->rand256, ZERO_KEY, 16) || 5233 !memcmp(data->hash256, ZERO_KEY, 16)) 5234 return 0x00; 5235 5236 return 0x02; 5237 } 5238 5239 /* When Secure Connections is not enabled or actually 5240 * not supported by the hardware, then check that if 5241 * P-192 data values are present. 5242 */ 5243 if (!memcmp(data->rand192, ZERO_KEY, 16) || 5244 !memcmp(data->hash192, ZERO_KEY, 16)) 5245 return 0x00; 5246 5247 return 0x01; 5248 } 5249 5250 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, 5251 struct sk_buff *skb) 5252 { 5253 struct hci_ev_io_capa_request *ev = data; 5254 struct hci_conn *conn; 5255 5256 bt_dev_dbg(hdev, ""); 5257 5258 hci_dev_lock(hdev); 5259 5260 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5261 if (!conn) 5262 goto unlock; 5263 5264 hci_conn_hold(conn); 5265 5266 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5267 goto unlock; 5268 5269 /* Allow pairing if we're pairable, the initiators of the 5270 * pairing or if the remote is not requesting bonding. 5271 */ 5272 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 5273 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 5274 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 5275 struct hci_cp_io_capability_reply cp; 5276 5277 bacpy(&cp.bdaddr, &ev->bdaddr); 5278 /* Change the IO capability from KeyboardDisplay 5279 * to DisplayYesNo as it is not supported by BT spec. */ 5280 cp.capability = (conn->io_capability == 0x04) ? 5281 HCI_IO_DISPLAY_YESNO : conn->io_capability; 5282 5283 /* If we are initiators, there is no remote information yet */ 5284 if (conn->remote_auth == 0xff) { 5285 /* Request MITM protection if our IO caps allow it 5286 * except for the no-bonding case. 5287 */ 5288 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5289 conn->auth_type != HCI_AT_NO_BONDING) 5290 conn->auth_type |= 0x01; 5291 } else { 5292 conn->auth_type = hci_get_auth_req(conn); 5293 } 5294 5295 /* If we're not bondable, force one of the non-bondable 5296 * authentication requirement values. 5297 */ 5298 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 5299 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 5300 5301 cp.authentication = conn->auth_type; 5302 cp.oob_data = bredr_oob_data_present(conn); 5303 5304 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 5305 sizeof(cp), &cp); 5306 } else { 5307 struct hci_cp_io_capability_neg_reply cp; 5308 5309 bacpy(&cp.bdaddr, &ev->bdaddr); 5310 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 5311 5312 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 5313 sizeof(cp), &cp); 5314 } 5315 5316 unlock: 5317 hci_dev_unlock(hdev); 5318 } 5319 5320 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, 5321 struct sk_buff *skb) 5322 { 5323 struct hci_ev_io_capa_reply *ev = data; 5324 struct hci_conn *conn; 5325 5326 bt_dev_dbg(hdev, ""); 5327 5328 hci_dev_lock(hdev); 5329 5330 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5331 if (!conn) 5332 goto unlock; 5333 5334 conn->remote_cap = ev->capability; 5335 conn->remote_auth = ev->authentication; 5336 5337 unlock: 5338 hci_dev_unlock(hdev); 5339 } 5340 5341 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, 5342 struct sk_buff *skb) 5343 { 5344 struct hci_ev_user_confirm_req *ev = data; 5345 int loc_mitm, rem_mitm, confirm_hint = 0; 5346 struct hci_conn *conn; 5347 5348 bt_dev_dbg(hdev, ""); 5349 5350 hci_dev_lock(hdev); 5351 5352 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5353 goto unlock; 5354 5355 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5356 if (!conn) 5357 goto unlock; 5358 5359 loc_mitm = (conn->auth_type & 0x01); 5360 rem_mitm = (conn->remote_auth & 0x01); 5361 5362 /* If we require MITM but the remote device can't provide that 5363 * (it has NoInputNoOutput) then reject the confirmation 5364 * request. We check the security level here since it doesn't 5365 * necessarily match conn->auth_type. 5366 */ 5367 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 5368 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 5369 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); 5370 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 5371 sizeof(ev->bdaddr), &ev->bdaddr); 5372 goto unlock; 5373 } 5374 5375 /* If no side requires MITM protection; auto-accept */ 5376 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 5377 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 5378 5379 /* If we're not the initiators request authorization to 5380 * proceed from user space (mgmt_user_confirm with 5381 * confirm_hint set to 1). The exception is if neither 5382 * side had MITM or if the local IO capability is 5383 * NoInputNoOutput, in which case we do auto-accept 5384 */ 5385 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 5386 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5387 (loc_mitm || rem_mitm)) { 5388 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); 5389 confirm_hint = 1; 5390 goto confirm; 5391 } 5392 5393 /* If there already exists link key in local host, leave the 5394 * decision to user space since the remote device could be 5395 * legitimate or malicious. 5396 */ 5397 if (hci_find_link_key(hdev, &ev->bdaddr)) { 5398 bt_dev_dbg(hdev, "Local host already has link key"); 5399 confirm_hint = 1; 5400 goto confirm; 5401 } 5402 5403 BT_DBG("Auto-accept of user confirmation with %ums delay", 5404 hdev->auto_accept_delay); 5405 5406 if (hdev->auto_accept_delay > 0) { 5407 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 5408 queue_delayed_work(conn->hdev->workqueue, 5409 &conn->auto_accept_work, delay); 5410 goto unlock; 5411 } 5412 5413 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 5414 sizeof(ev->bdaddr), &ev->bdaddr); 5415 goto unlock; 5416 } 5417 5418 confirm: 5419 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 5420 le32_to_cpu(ev->passkey), confirm_hint); 5421 5422 unlock: 5423 hci_dev_unlock(hdev); 5424 } 5425 5426 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, 5427 struct sk_buff *skb) 5428 { 5429 struct hci_ev_user_passkey_req *ev = data; 5430 5431 bt_dev_dbg(hdev, ""); 5432 5433 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5434 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 5435 } 5436 5437 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, 5438 struct sk_buff *skb) 5439 { 5440 struct hci_ev_user_passkey_notify *ev = data; 5441 struct hci_conn *conn; 5442 5443 bt_dev_dbg(hdev, ""); 5444 5445 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5446 if (!conn) 5447 return; 5448 5449 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5450 conn->passkey_entered = 0; 5451 5452 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5453 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5454 conn->dst_type, conn->passkey_notify, 5455 conn->passkey_entered); 5456 } 5457 5458 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, 5459 struct sk_buff *skb) 5460 { 5461 struct hci_ev_keypress_notify *ev = data; 5462 struct hci_conn *conn; 5463 5464 bt_dev_dbg(hdev, ""); 5465 5466 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5467 if (!conn) 5468 return; 5469 5470 switch (ev->type) { 5471 case HCI_KEYPRESS_STARTED: 5472 conn->passkey_entered = 0; 5473 return; 5474 5475 case HCI_KEYPRESS_ENTERED: 5476 conn->passkey_entered++; 5477 break; 5478 5479 case HCI_KEYPRESS_ERASED: 5480 conn->passkey_entered--; 5481 break; 5482 5483 case HCI_KEYPRESS_CLEARED: 5484 conn->passkey_entered = 0; 5485 break; 5486 5487 case HCI_KEYPRESS_COMPLETED: 5488 return; 5489 } 5490 5491 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5492 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5493 conn->dst_type, conn->passkey_notify, 5494 conn->passkey_entered); 5495 } 5496 5497 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, 5498 struct sk_buff *skb) 5499 { 5500 struct hci_ev_simple_pair_complete *ev = data; 5501 struct hci_conn *conn; 5502 5503 bt_dev_dbg(hdev, ""); 5504 5505 hci_dev_lock(hdev); 5506 5507 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5508 if (!conn) 5509 goto unlock; 5510 5511 /* Reset the authentication requirement to unknown */ 5512 conn->remote_auth = 0xff; 5513 5514 /* To avoid duplicate auth_failed events to user space we check 5515 * the HCI_CONN_AUTH_PEND flag which will be set if we 5516 * initiated the authentication. A traditional auth_complete 5517 * event gets always produced as initiator and is also mapped to 5518 * the mgmt_auth_failed event */ 5519 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5520 mgmt_auth_failed(conn, ev->status); 5521 5522 hci_conn_drop(conn); 5523 5524 unlock: 5525 hci_dev_unlock(hdev); 5526 } 5527 5528 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, 5529 struct sk_buff *skb) 5530 { 5531 struct hci_ev_remote_host_features *ev = data; 5532 struct inquiry_entry *ie; 5533 struct hci_conn *conn; 5534 5535 bt_dev_dbg(hdev, ""); 5536 5537 hci_dev_lock(hdev); 5538 5539 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5540 if (conn) 5541 memcpy(conn->features[1], ev->features, 8); 5542 5543 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5544 if (ie) 5545 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5546 5547 hci_dev_unlock(hdev); 5548 } 5549 5550 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, 5551 struct sk_buff *skb) 5552 { 5553 struct hci_ev_remote_oob_data_request *ev = edata; 5554 struct oob_data *data; 5555 5556 bt_dev_dbg(hdev, ""); 5557 5558 hci_dev_lock(hdev); 5559 5560 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5561 goto unlock; 5562 5563 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5564 if (!data) { 5565 struct hci_cp_remote_oob_data_neg_reply cp; 5566 5567 bacpy(&cp.bdaddr, &ev->bdaddr); 5568 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5569 sizeof(cp), &cp); 5570 goto unlock; 5571 } 5572 5573 if (bredr_sc_enabled(hdev)) { 5574 struct hci_cp_remote_oob_ext_data_reply cp; 5575 5576 bacpy(&cp.bdaddr, &ev->bdaddr); 5577 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5578 memset(cp.hash192, 0, sizeof(cp.hash192)); 5579 memset(cp.rand192, 0, sizeof(cp.rand192)); 5580 } else { 5581 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5582 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5583 } 5584 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5585 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5586 5587 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5588 sizeof(cp), &cp); 5589 } else { 5590 struct hci_cp_remote_oob_data_reply cp; 5591 5592 bacpy(&cp.bdaddr, &ev->bdaddr); 5593 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5594 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5595 5596 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5597 sizeof(cp), &cp); 5598 } 5599 5600 unlock: 5601 hci_dev_unlock(hdev); 5602 } 5603 5604 #if IS_ENABLED(CONFIG_BT_HS) 5605 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, 5606 struct sk_buff *skb) 5607 { 5608 struct hci_ev_channel_selected *ev = data; 5609 struct hci_conn *hcon; 5610 5611 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); 5612 5613 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5614 if (!hcon) 5615 return; 5616 5617 amp_read_loc_assoc_final_data(hdev, hcon); 5618 } 5619 5620 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, 5621 struct sk_buff *skb) 5622 { 5623 struct hci_ev_phy_link_complete *ev = data; 5624 struct hci_conn *hcon, *bredr_hcon; 5625 5626 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, 5627 ev->status); 5628 5629 hci_dev_lock(hdev); 5630 5631 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5632 if (!hcon) 5633 goto unlock; 5634 5635 if (!hcon->amp_mgr) 5636 goto unlock; 5637 5638 if (ev->status) { 5639 hci_conn_del(hcon); 5640 goto unlock; 5641 } 5642 5643 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 5644 5645 hcon->state = BT_CONNECTED; 5646 bacpy(&hcon->dst, &bredr_hcon->dst); 5647 5648 hci_conn_hold(hcon); 5649 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 5650 hci_conn_drop(hcon); 5651 5652 hci_debugfs_create_conn(hcon); 5653 hci_conn_add_sysfs(hcon); 5654 5655 amp_physical_cfm(bredr_hcon, hcon); 5656 5657 unlock: 5658 hci_dev_unlock(hdev); 5659 } 5660 5661 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, 5662 struct sk_buff *skb) 5663 { 5664 struct hci_ev_logical_link_complete *ev = data; 5665 struct hci_conn *hcon; 5666 struct hci_chan *hchan; 5667 struct amp_mgr *mgr; 5668 5669 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 5670 le16_to_cpu(ev->handle), ev->phy_handle, ev->status); 5671 5672 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5673 if (!hcon) 5674 return; 5675 5676 /* Create AMP hchan */ 5677 hchan = hci_chan_create(hcon); 5678 if (!hchan) 5679 return; 5680 5681 hchan->handle = le16_to_cpu(ev->handle); 5682 hchan->amp = true; 5683 5684 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 5685 5686 mgr = hcon->amp_mgr; 5687 if (mgr && mgr->bredr_chan) { 5688 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 5689 5690 l2cap_chan_lock(bredr_chan); 5691 5692 bredr_chan->conn->mtu = hdev->block_mtu; 5693 l2cap_logical_cfm(bredr_chan, hchan, 0); 5694 hci_conn_hold(hcon); 5695 5696 l2cap_chan_unlock(bredr_chan); 5697 } 5698 } 5699 5700 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, 5701 struct sk_buff *skb) 5702 { 5703 struct hci_ev_disconn_logical_link_complete *ev = data; 5704 struct hci_chan *hchan; 5705 5706 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", 5707 le16_to_cpu(ev->handle), ev->status); 5708 5709 if (ev->status) 5710 return; 5711 5712 hci_dev_lock(hdev); 5713 5714 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5715 if (!hchan || !hchan->amp) 5716 goto unlock; 5717 5718 amp_destroy_logical_link(hchan, ev->reason); 5719 5720 unlock: 5721 hci_dev_unlock(hdev); 5722 } 5723 5724 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, 5725 struct sk_buff *skb) 5726 { 5727 struct hci_ev_disconn_phy_link_complete *ev = data; 5728 struct hci_conn *hcon; 5729 5730 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5731 5732 if (ev->status) 5733 return; 5734 5735 hci_dev_lock(hdev); 5736 5737 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5738 if (hcon && hcon->type == AMP_LINK) { 5739 hcon->state = BT_CLOSED; 5740 hci_disconn_cfm(hcon, ev->reason); 5741 hci_conn_del(hcon); 5742 } 5743 5744 hci_dev_unlock(hdev); 5745 } 5746 #endif 5747 5748 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5749 u8 bdaddr_type, bdaddr_t *local_rpa) 5750 { 5751 if (conn->out) { 5752 conn->dst_type = bdaddr_type; 5753 conn->resp_addr_type = bdaddr_type; 5754 bacpy(&conn->resp_addr, bdaddr); 5755 5756 /* Check if the controller has set a Local RPA then it must be 5757 * used instead or hdev->rpa. 5758 */ 5759 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5760 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5761 bacpy(&conn->init_addr, local_rpa); 5762 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5763 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5764 bacpy(&conn->init_addr, &conn->hdev->rpa); 5765 } else { 5766 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5767 &conn->init_addr_type); 5768 } 5769 } else { 5770 conn->resp_addr_type = conn->hdev->adv_addr_type; 5771 /* Check if the controller has set a Local RPA then it must be 5772 * used instead or hdev->rpa. 5773 */ 5774 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5775 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5776 bacpy(&conn->resp_addr, local_rpa); 5777 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5778 /* In case of ext adv, resp_addr will be updated in 5779 * Adv Terminated event. 5780 */ 5781 if (!ext_adv_capable(conn->hdev)) 5782 bacpy(&conn->resp_addr, 5783 &conn->hdev->random_addr); 5784 } else { 5785 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5786 } 5787 5788 conn->init_addr_type = bdaddr_type; 5789 bacpy(&conn->init_addr, bdaddr); 5790 5791 /* For incoming connections, set the default minimum 5792 * and maximum connection interval. They will be used 5793 * to check if the parameters are in range and if not 5794 * trigger the connection update procedure. 5795 */ 5796 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5797 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5798 } 5799 } 5800 5801 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5802 bdaddr_t *bdaddr, u8 bdaddr_type, 5803 bdaddr_t *local_rpa, u8 role, u16 handle, 5804 u16 interval, u16 latency, 5805 u16 supervision_timeout) 5806 { 5807 struct hci_conn_params *params; 5808 struct hci_conn *conn; 5809 struct smp_irk *irk; 5810 u8 addr_type; 5811 5812 hci_dev_lock(hdev); 5813 5814 /* All controllers implicitly stop advertising in the event of a 5815 * connection, so ensure that the state bit is cleared. 5816 */ 5817 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5818 5819 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); 5820 if (!conn) { 5821 /* In case of error status and there is no connection pending 5822 * just unlock as there is nothing to cleanup. 5823 */ 5824 if (status) 5825 goto unlock; 5826 5827 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5828 if (!conn) { 5829 bt_dev_err(hdev, "no memory for new connection"); 5830 goto unlock; 5831 } 5832 5833 conn->dst_type = bdaddr_type; 5834 5835 /* If we didn't have a hci_conn object previously 5836 * but we're in central role this must be something 5837 * initiated using an accept list. Since accept list based 5838 * connections are not "first class citizens" we don't 5839 * have full tracking of them. Therefore, we go ahead 5840 * with a "best effort" approach of determining the 5841 * initiator address based on the HCI_PRIVACY flag. 5842 */ 5843 if (conn->out) { 5844 conn->resp_addr_type = bdaddr_type; 5845 bacpy(&conn->resp_addr, bdaddr); 5846 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5847 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5848 bacpy(&conn->init_addr, &hdev->rpa); 5849 } else { 5850 hci_copy_identity_address(hdev, 5851 &conn->init_addr, 5852 &conn->init_addr_type); 5853 } 5854 } 5855 } else { 5856 cancel_delayed_work(&conn->le_conn_timeout); 5857 } 5858 5859 /* The HCI_LE_Connection_Complete event is only sent once per connection. 5860 * Processing it more than once per connection can corrupt kernel memory. 5861 * 5862 * As the connection handle is set here for the first time, it indicates 5863 * whether the connection is already set up. 5864 */ 5865 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 5866 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 5867 goto unlock; 5868 } 5869 5870 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5871 5872 /* Lookup the identity address from the stored connection 5873 * address and address type. 5874 * 5875 * When establishing connections to an identity address, the 5876 * connection procedure will store the resolvable random 5877 * address first. Now if it can be converted back into the 5878 * identity address, start using the identity address from 5879 * now on. 5880 */ 5881 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5882 if (irk) { 5883 bacpy(&conn->dst, &irk->bdaddr); 5884 conn->dst_type = irk->addr_type; 5885 } 5886 5887 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5888 5889 if (handle > HCI_CONN_HANDLE_MAX) { 5890 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle, 5891 HCI_CONN_HANDLE_MAX); 5892 status = HCI_ERROR_INVALID_PARAMETERS; 5893 } 5894 5895 /* All connection failure handling is taken care of by the 5896 * hci_conn_failed function which is triggered by the HCI 5897 * request completion callbacks used for connecting. 5898 */ 5899 if (status) 5900 goto unlock; 5901 5902 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5903 addr_type = BDADDR_LE_PUBLIC; 5904 else 5905 addr_type = BDADDR_LE_RANDOM; 5906 5907 /* Drop the connection if the device is blocked */ 5908 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5909 hci_conn_drop(conn); 5910 goto unlock; 5911 } 5912 5913 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5914 mgmt_device_connected(hdev, conn, NULL, 0); 5915 5916 conn->sec_level = BT_SECURITY_LOW; 5917 conn->handle = handle; 5918 conn->state = BT_CONFIG; 5919 5920 /* Store current advertising instance as connection advertising instance 5921 * when sotfware rotation is in use so it can be re-enabled when 5922 * disconnected. 5923 */ 5924 if (!ext_adv_capable(hdev)) 5925 conn->adv_instance = hdev->cur_adv_instance; 5926 5927 conn->le_conn_interval = interval; 5928 conn->le_conn_latency = latency; 5929 conn->le_supv_timeout = supervision_timeout; 5930 5931 hci_debugfs_create_conn(conn); 5932 hci_conn_add_sysfs(conn); 5933 5934 /* The remote features procedure is defined for central 5935 * role only. So only in case of an initiated connection 5936 * request the remote features. 5937 * 5938 * If the local controller supports peripheral-initiated features 5939 * exchange, then requesting the remote features in peripheral 5940 * role is possible. Otherwise just transition into the 5941 * connected state without requesting the remote features. 5942 */ 5943 if (conn->out || 5944 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5945 struct hci_cp_le_read_remote_features cp; 5946 5947 cp.handle = __cpu_to_le16(conn->handle); 5948 5949 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5950 sizeof(cp), &cp); 5951 5952 hci_conn_hold(conn); 5953 } else { 5954 conn->state = BT_CONNECTED; 5955 hci_connect_cfm(conn, status); 5956 } 5957 5958 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5959 conn->dst_type); 5960 if (params) { 5961 list_del_init(¶ms->action); 5962 if (params->conn) { 5963 hci_conn_drop(params->conn); 5964 hci_conn_put(params->conn); 5965 params->conn = NULL; 5966 } 5967 } 5968 5969 unlock: 5970 hci_update_passive_scan(hdev); 5971 hci_dev_unlock(hdev); 5972 } 5973 5974 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, 5975 struct sk_buff *skb) 5976 { 5977 struct hci_ev_le_conn_complete *ev = data; 5978 5979 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5980 5981 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5982 NULL, ev->role, le16_to_cpu(ev->handle), 5983 le16_to_cpu(ev->interval), 5984 le16_to_cpu(ev->latency), 5985 le16_to_cpu(ev->supervision_timeout)); 5986 } 5987 5988 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, 5989 struct sk_buff *skb) 5990 { 5991 struct hci_ev_le_enh_conn_complete *ev = data; 5992 5993 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5994 5995 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5996 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5997 le16_to_cpu(ev->interval), 5998 le16_to_cpu(ev->latency), 5999 le16_to_cpu(ev->supervision_timeout)); 6000 } 6001 6002 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, 6003 struct sk_buff *skb) 6004 { 6005 struct hci_evt_le_ext_adv_set_term *ev = data; 6006 struct hci_conn *conn; 6007 struct adv_info *adv, *n; 6008 6009 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6010 6011 /* The Bluetooth Core 5.3 specification clearly states that this event 6012 * shall not be sent when the Host disables the advertising set. So in 6013 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 6014 * 6015 * When the Host disables an advertising set, all cleanup is done via 6016 * its command callback and not needed to be duplicated here. 6017 */ 6018 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 6019 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 6020 return; 6021 } 6022 6023 hci_dev_lock(hdev); 6024 6025 adv = hci_find_adv_instance(hdev, ev->handle); 6026 6027 if (ev->status) { 6028 if (!adv) 6029 goto unlock; 6030 6031 /* Remove advertising as it has been terminated */ 6032 hci_remove_adv_instance(hdev, ev->handle); 6033 mgmt_advertising_removed(NULL, hdev, ev->handle); 6034 6035 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 6036 if (adv->enabled) 6037 goto unlock; 6038 } 6039 6040 /* We are no longer advertising, clear HCI_LE_ADV */ 6041 hci_dev_clear_flag(hdev, HCI_LE_ADV); 6042 goto unlock; 6043 } 6044 6045 if (adv) 6046 adv->enabled = false; 6047 6048 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 6049 if (conn) { 6050 /* Store handle in the connection so the correct advertising 6051 * instance can be re-enabled when disconnected. 6052 */ 6053 conn->adv_instance = ev->handle; 6054 6055 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 6056 bacmp(&conn->resp_addr, BDADDR_ANY)) 6057 goto unlock; 6058 6059 if (!ev->handle) { 6060 bacpy(&conn->resp_addr, &hdev->random_addr); 6061 goto unlock; 6062 } 6063 6064 if (adv) 6065 bacpy(&conn->resp_addr, &adv->random_addr); 6066 } 6067 6068 unlock: 6069 hci_dev_unlock(hdev); 6070 } 6071 6072 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, 6073 struct sk_buff *skb) 6074 { 6075 struct hci_ev_le_conn_update_complete *ev = data; 6076 struct hci_conn *conn; 6077 6078 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6079 6080 if (ev->status) 6081 return; 6082 6083 hci_dev_lock(hdev); 6084 6085 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6086 if (conn) { 6087 conn->le_conn_interval = le16_to_cpu(ev->interval); 6088 conn->le_conn_latency = le16_to_cpu(ev->latency); 6089 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 6090 } 6091 6092 hci_dev_unlock(hdev); 6093 } 6094 6095 /* This function requires the caller holds hdev->lock */ 6096 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 6097 bdaddr_t *addr, 6098 u8 addr_type, bool addr_resolved, 6099 u8 adv_type) 6100 { 6101 struct hci_conn *conn; 6102 struct hci_conn_params *params; 6103 6104 /* If the event is not connectable don't proceed further */ 6105 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 6106 return NULL; 6107 6108 /* Ignore if the device is blocked or hdev is suspended */ 6109 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 6110 hdev->suspended) 6111 return NULL; 6112 6113 /* Most controller will fail if we try to create new connections 6114 * while we have an existing one in peripheral role. 6115 */ 6116 if (hdev->conn_hash.le_num_peripheral > 0 && 6117 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 6118 !(hdev->le_states[3] & 0x10))) 6119 return NULL; 6120 6121 /* If we're not connectable only connect devices that we have in 6122 * our pend_le_conns list. 6123 */ 6124 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 6125 addr_type); 6126 if (!params) 6127 return NULL; 6128 6129 if (!params->explicit_connect) { 6130 switch (params->auto_connect) { 6131 case HCI_AUTO_CONN_DIRECT: 6132 /* Only devices advertising with ADV_DIRECT_IND are 6133 * triggering a connection attempt. This is allowing 6134 * incoming connections from peripheral devices. 6135 */ 6136 if (adv_type != LE_ADV_DIRECT_IND) 6137 return NULL; 6138 break; 6139 case HCI_AUTO_CONN_ALWAYS: 6140 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 6141 * are triggering a connection attempt. This means 6142 * that incoming connections from peripheral device are 6143 * accepted and also outgoing connections to peripheral 6144 * devices are established when found. 6145 */ 6146 break; 6147 default: 6148 return NULL; 6149 } 6150 } 6151 6152 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 6153 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 6154 HCI_ROLE_MASTER); 6155 if (!IS_ERR(conn)) { 6156 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 6157 * by higher layer that tried to connect, if no then 6158 * store the pointer since we don't really have any 6159 * other owner of the object besides the params that 6160 * triggered it. This way we can abort the connection if 6161 * the parameters get removed and keep the reference 6162 * count consistent once the connection is established. 6163 */ 6164 6165 if (!params->explicit_connect) 6166 params->conn = hci_conn_get(conn); 6167 6168 return conn; 6169 } 6170 6171 switch (PTR_ERR(conn)) { 6172 case -EBUSY: 6173 /* If hci_connect() returns -EBUSY it means there is already 6174 * an LE connection attempt going on. Since controllers don't 6175 * support more than one connection attempt at the time, we 6176 * don't consider this an error case. 6177 */ 6178 break; 6179 default: 6180 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 6181 return NULL; 6182 } 6183 6184 return NULL; 6185 } 6186 6187 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 6188 u8 bdaddr_type, bdaddr_t *direct_addr, 6189 u8 direct_addr_type, s8 rssi, u8 *data, u8 len, 6190 bool ext_adv, bool ctl_time, u64 instant) 6191 { 6192 struct discovery_state *d = &hdev->discovery; 6193 struct smp_irk *irk; 6194 struct hci_conn *conn; 6195 bool match, bdaddr_resolved; 6196 u32 flags; 6197 u8 *ptr; 6198 6199 switch (type) { 6200 case LE_ADV_IND: 6201 case LE_ADV_DIRECT_IND: 6202 case LE_ADV_SCAN_IND: 6203 case LE_ADV_NONCONN_IND: 6204 case LE_ADV_SCAN_RSP: 6205 break; 6206 default: 6207 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 6208 "type: 0x%02x", type); 6209 return; 6210 } 6211 6212 if (!ext_adv && len > HCI_MAX_AD_LENGTH) { 6213 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); 6214 return; 6215 } 6216 6217 /* Find the end of the data in case the report contains padded zero 6218 * bytes at the end causing an invalid length value. 6219 * 6220 * When data is NULL, len is 0 so there is no need for extra ptr 6221 * check as 'ptr < data + 0' is already false in such case. 6222 */ 6223 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 6224 if (ptr + 1 + *ptr > data + len) 6225 break; 6226 } 6227 6228 /* Adjust for actual length. This handles the case when remote 6229 * device is advertising with incorrect data length. 6230 */ 6231 len = ptr - data; 6232 6233 /* If the direct address is present, then this report is from 6234 * a LE Direct Advertising Report event. In that case it is 6235 * important to see if the address is matching the local 6236 * controller address. 6237 */ 6238 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) { 6239 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 6240 &bdaddr_resolved); 6241 6242 /* Only resolvable random addresses are valid for these 6243 * kind of reports and others can be ignored. 6244 */ 6245 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 6246 return; 6247 6248 /* If the controller is not using resolvable random 6249 * addresses, then this report can be ignored. 6250 */ 6251 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 6252 return; 6253 6254 /* If the local IRK of the controller does not match 6255 * with the resolvable random address provided, then 6256 * this report can be ignored. 6257 */ 6258 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 6259 return; 6260 } 6261 6262 /* Check if we need to convert to identity address */ 6263 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 6264 if (irk) { 6265 bdaddr = &irk->bdaddr; 6266 bdaddr_type = irk->addr_type; 6267 } 6268 6269 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 6270 6271 /* Check if we have been requested to connect to this device. 6272 * 6273 * direct_addr is set only for directed advertising reports (it is NULL 6274 * for advertising reports) and is already verified to be RPA above. 6275 */ 6276 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 6277 type); 6278 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 6279 /* Store report for later inclusion by 6280 * mgmt_device_connected 6281 */ 6282 memcpy(conn->le_adv_data, data, len); 6283 conn->le_adv_data_len = len; 6284 } 6285 6286 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 6287 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6288 else 6289 flags = 0; 6290 6291 /* All scan results should be sent up for Mesh systems */ 6292 if (hci_dev_test_flag(hdev, HCI_MESH)) { 6293 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6294 rssi, flags, data, len, NULL, 0, instant); 6295 return; 6296 } 6297 6298 /* Passive scanning shouldn't trigger any device found events, 6299 * except for devices marked as CONN_REPORT for which we do send 6300 * device found events, or advertisement monitoring requested. 6301 */ 6302 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 6303 if (type == LE_ADV_DIRECT_IND) 6304 return; 6305 6306 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 6307 bdaddr, bdaddr_type) && 6308 idr_is_empty(&hdev->adv_monitors_idr)) 6309 return; 6310 6311 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6312 rssi, flags, data, len, NULL, 0, 0); 6313 return; 6314 } 6315 6316 /* When receiving non-connectable or scannable undirected 6317 * advertising reports, this means that the remote device is 6318 * not connectable and then clearly indicate this in the 6319 * device found event. 6320 * 6321 * When receiving a scan response, then there is no way to 6322 * know if the remote device is connectable or not. However 6323 * since scan responses are merged with a previously seen 6324 * advertising report, the flags field from that report 6325 * will be used. 6326 * 6327 * In the really unlikely case that a controller get confused 6328 * and just sends a scan response event, then it is marked as 6329 * not connectable as well. 6330 */ 6331 if (type == LE_ADV_SCAN_RSP) 6332 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6333 6334 /* If there's nothing pending either store the data from this 6335 * event or send an immediate device found event if the data 6336 * should not be stored for later. 6337 */ 6338 if (!ext_adv && !has_pending_adv_report(hdev)) { 6339 /* If the report will trigger a SCAN_REQ store it for 6340 * later merging. 6341 */ 6342 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 6343 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6344 rssi, flags, data, len); 6345 return; 6346 } 6347 6348 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6349 rssi, flags, data, len, NULL, 0, 0); 6350 return; 6351 } 6352 6353 /* Check if the pending report is for the same device as the new one */ 6354 match = (!bacmp(bdaddr, &d->last_adv_addr) && 6355 bdaddr_type == d->last_adv_addr_type); 6356 6357 /* If the pending data doesn't match this report or this isn't a 6358 * scan response (e.g. we got a duplicate ADV_IND) then force 6359 * sending of the pending data. 6360 */ 6361 if (type != LE_ADV_SCAN_RSP || !match) { 6362 /* Send out whatever is in the cache, but skip duplicates */ 6363 if (!match) 6364 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6365 d->last_adv_addr_type, NULL, 6366 d->last_adv_rssi, d->last_adv_flags, 6367 d->last_adv_data, 6368 d->last_adv_data_len, NULL, 0, 0); 6369 6370 /* If the new report will trigger a SCAN_REQ store it for 6371 * later merging. 6372 */ 6373 if (!ext_adv && (type == LE_ADV_IND || 6374 type == LE_ADV_SCAN_IND)) { 6375 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6376 rssi, flags, data, len); 6377 return; 6378 } 6379 6380 /* The advertising reports cannot be merged, so clear 6381 * the pending report and send out a device found event. 6382 */ 6383 clear_pending_adv_report(hdev); 6384 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6385 rssi, flags, data, len, NULL, 0, 0); 6386 return; 6387 } 6388 6389 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 6390 * the new event is a SCAN_RSP. We can therefore proceed with 6391 * sending a merged device found event. 6392 */ 6393 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6394 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 6395 d->last_adv_data, d->last_adv_data_len, data, len, 0); 6396 clear_pending_adv_report(hdev); 6397 } 6398 6399 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, 6400 struct sk_buff *skb) 6401 { 6402 struct hci_ev_le_advertising_report *ev = data; 6403 u64 instant = jiffies; 6404 6405 if (!ev->num) 6406 return; 6407 6408 hci_dev_lock(hdev); 6409 6410 while (ev->num--) { 6411 struct hci_ev_le_advertising_info *info; 6412 s8 rssi; 6413 6414 info = hci_le_ev_skb_pull(hdev, skb, 6415 HCI_EV_LE_ADVERTISING_REPORT, 6416 sizeof(*info)); 6417 if (!info) 6418 break; 6419 6420 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, 6421 info->length + 1)) 6422 break; 6423 6424 if (info->length <= HCI_MAX_AD_LENGTH) { 6425 rssi = info->data[info->length]; 6426 process_adv_report(hdev, info->type, &info->bdaddr, 6427 info->bdaddr_type, NULL, 0, rssi, 6428 info->data, info->length, false, 6429 false, instant); 6430 } else { 6431 bt_dev_err(hdev, "Dropping invalid advertising data"); 6432 } 6433 } 6434 6435 hci_dev_unlock(hdev); 6436 } 6437 6438 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 6439 { 6440 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 6441 switch (evt_type) { 6442 case LE_LEGACY_ADV_IND: 6443 return LE_ADV_IND; 6444 case LE_LEGACY_ADV_DIRECT_IND: 6445 return LE_ADV_DIRECT_IND; 6446 case LE_LEGACY_ADV_SCAN_IND: 6447 return LE_ADV_SCAN_IND; 6448 case LE_LEGACY_NONCONN_IND: 6449 return LE_ADV_NONCONN_IND; 6450 case LE_LEGACY_SCAN_RSP_ADV: 6451 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 6452 return LE_ADV_SCAN_RSP; 6453 } 6454 6455 goto invalid; 6456 } 6457 6458 if (evt_type & LE_EXT_ADV_CONN_IND) { 6459 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6460 return LE_ADV_DIRECT_IND; 6461 6462 return LE_ADV_IND; 6463 } 6464 6465 if (evt_type & LE_EXT_ADV_SCAN_RSP) 6466 return LE_ADV_SCAN_RSP; 6467 6468 if (evt_type & LE_EXT_ADV_SCAN_IND) 6469 return LE_ADV_SCAN_IND; 6470 6471 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 6472 evt_type & LE_EXT_ADV_DIRECT_IND) 6473 return LE_ADV_NONCONN_IND; 6474 6475 invalid: 6476 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 6477 evt_type); 6478 6479 return LE_ADV_INVALID; 6480 } 6481 6482 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, 6483 struct sk_buff *skb) 6484 { 6485 struct hci_ev_le_ext_adv_report *ev = data; 6486 u64 instant = jiffies; 6487 6488 if (!ev->num) 6489 return; 6490 6491 hci_dev_lock(hdev); 6492 6493 while (ev->num--) { 6494 struct hci_ev_le_ext_adv_info *info; 6495 u8 legacy_evt_type; 6496 u16 evt_type; 6497 6498 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6499 sizeof(*info)); 6500 if (!info) 6501 break; 6502 6503 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6504 info->length)) 6505 break; 6506 6507 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK; 6508 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6509 if (legacy_evt_type != LE_ADV_INVALID) { 6510 process_adv_report(hdev, legacy_evt_type, &info->bdaddr, 6511 info->bdaddr_type, NULL, 0, 6512 info->rssi, info->data, info->length, 6513 !(evt_type & LE_EXT_ADV_LEGACY_PDU), 6514 false, instant); 6515 } 6516 } 6517 6518 hci_dev_unlock(hdev); 6519 } 6520 6521 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) 6522 { 6523 struct hci_cp_le_pa_term_sync cp; 6524 6525 memset(&cp, 0, sizeof(cp)); 6526 cp.handle = handle; 6527 6528 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); 6529 } 6530 6531 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, 6532 struct sk_buff *skb) 6533 { 6534 struct hci_ev_le_pa_sync_established *ev = data; 6535 int mask = hdev->link_mode; 6536 __u8 flags = 0; 6537 6538 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6539 6540 if (ev->status) 6541 return; 6542 6543 hci_dev_lock(hdev); 6544 6545 hci_dev_clear_flag(hdev, HCI_PA_SYNC); 6546 6547 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); 6548 if (!(mask & HCI_LM_ACCEPT)) 6549 hci_le_pa_term_sync(hdev, ev->handle); 6550 6551 hci_dev_unlock(hdev); 6552 } 6553 6554 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, 6555 struct sk_buff *skb) 6556 { 6557 struct hci_ev_le_remote_feat_complete *ev = data; 6558 struct hci_conn *conn; 6559 6560 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6561 6562 hci_dev_lock(hdev); 6563 6564 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6565 if (conn) { 6566 if (!ev->status) 6567 memcpy(conn->features[0], ev->features, 8); 6568 6569 if (conn->state == BT_CONFIG) { 6570 __u8 status; 6571 6572 /* If the local controller supports peripheral-initiated 6573 * features exchange, but the remote controller does 6574 * not, then it is possible that the error code 0x1a 6575 * for unsupported remote feature gets returned. 6576 * 6577 * In this specific case, allow the connection to 6578 * transition into connected state and mark it as 6579 * successful. 6580 */ 6581 if (!conn->out && ev->status == 0x1a && 6582 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6583 status = 0x00; 6584 else 6585 status = ev->status; 6586 6587 conn->state = BT_CONNECTED; 6588 hci_connect_cfm(conn, status); 6589 hci_conn_drop(conn); 6590 } 6591 } 6592 6593 hci_dev_unlock(hdev); 6594 } 6595 6596 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, 6597 struct sk_buff *skb) 6598 { 6599 struct hci_ev_le_ltk_req *ev = data; 6600 struct hci_cp_le_ltk_reply cp; 6601 struct hci_cp_le_ltk_neg_reply neg; 6602 struct hci_conn *conn; 6603 struct smp_ltk *ltk; 6604 6605 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6606 6607 hci_dev_lock(hdev); 6608 6609 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6610 if (conn == NULL) 6611 goto not_found; 6612 6613 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6614 if (!ltk) 6615 goto not_found; 6616 6617 if (smp_ltk_is_sc(ltk)) { 6618 /* With SC both EDiv and Rand are set to zero */ 6619 if (ev->ediv || ev->rand) 6620 goto not_found; 6621 } else { 6622 /* For non-SC keys check that EDiv and Rand match */ 6623 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6624 goto not_found; 6625 } 6626 6627 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6628 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6629 cp.handle = cpu_to_le16(conn->handle); 6630 6631 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6632 6633 conn->enc_key_size = ltk->enc_size; 6634 6635 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6636 6637 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6638 * temporary key used to encrypt a connection following 6639 * pairing. It is used during the Encrypted Session Setup to 6640 * distribute the keys. Later, security can be re-established 6641 * using a distributed LTK. 6642 */ 6643 if (ltk->type == SMP_STK) { 6644 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6645 list_del_rcu(<k->list); 6646 kfree_rcu(ltk, rcu); 6647 } else { 6648 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6649 } 6650 6651 hci_dev_unlock(hdev); 6652 6653 return; 6654 6655 not_found: 6656 neg.handle = ev->handle; 6657 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6658 hci_dev_unlock(hdev); 6659 } 6660 6661 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6662 u8 reason) 6663 { 6664 struct hci_cp_le_conn_param_req_neg_reply cp; 6665 6666 cp.handle = cpu_to_le16(handle); 6667 cp.reason = reason; 6668 6669 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6670 &cp); 6671 } 6672 6673 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, 6674 struct sk_buff *skb) 6675 { 6676 struct hci_ev_le_remote_conn_param_req *ev = data; 6677 struct hci_cp_le_conn_param_req_reply cp; 6678 struct hci_conn *hcon; 6679 u16 handle, min, max, latency, timeout; 6680 6681 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6682 6683 handle = le16_to_cpu(ev->handle); 6684 min = le16_to_cpu(ev->interval_min); 6685 max = le16_to_cpu(ev->interval_max); 6686 latency = le16_to_cpu(ev->latency); 6687 timeout = le16_to_cpu(ev->timeout); 6688 6689 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6690 if (!hcon || hcon->state != BT_CONNECTED) 6691 return send_conn_param_neg_reply(hdev, handle, 6692 HCI_ERROR_UNKNOWN_CONN_ID); 6693 6694 if (hci_check_conn_params(min, max, latency, timeout)) 6695 return send_conn_param_neg_reply(hdev, handle, 6696 HCI_ERROR_INVALID_LL_PARAMS); 6697 6698 if (hcon->role == HCI_ROLE_MASTER) { 6699 struct hci_conn_params *params; 6700 u8 store_hint; 6701 6702 hci_dev_lock(hdev); 6703 6704 params = hci_conn_params_lookup(hdev, &hcon->dst, 6705 hcon->dst_type); 6706 if (params) { 6707 params->conn_min_interval = min; 6708 params->conn_max_interval = max; 6709 params->conn_latency = latency; 6710 params->supervision_timeout = timeout; 6711 store_hint = 0x01; 6712 } else { 6713 store_hint = 0x00; 6714 } 6715 6716 hci_dev_unlock(hdev); 6717 6718 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6719 store_hint, min, max, latency, timeout); 6720 } 6721 6722 cp.handle = ev->handle; 6723 cp.interval_min = ev->interval_min; 6724 cp.interval_max = ev->interval_max; 6725 cp.latency = ev->latency; 6726 cp.timeout = ev->timeout; 6727 cp.min_ce_len = 0; 6728 cp.max_ce_len = 0; 6729 6730 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6731 } 6732 6733 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, 6734 struct sk_buff *skb) 6735 { 6736 struct hci_ev_le_direct_adv_report *ev = data; 6737 u64 instant = jiffies; 6738 int i; 6739 6740 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, 6741 flex_array_size(ev, info, ev->num))) 6742 return; 6743 6744 if (!ev->num) 6745 return; 6746 6747 hci_dev_lock(hdev); 6748 6749 for (i = 0; i < ev->num; i++) { 6750 struct hci_ev_le_direct_adv_info *info = &ev->info[i]; 6751 6752 process_adv_report(hdev, info->type, &info->bdaddr, 6753 info->bdaddr_type, &info->direct_addr, 6754 info->direct_addr_type, info->rssi, NULL, 0, 6755 false, false, instant); 6756 } 6757 6758 hci_dev_unlock(hdev); 6759 } 6760 6761 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, 6762 struct sk_buff *skb) 6763 { 6764 struct hci_ev_le_phy_update_complete *ev = data; 6765 struct hci_conn *conn; 6766 6767 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6768 6769 if (ev->status) 6770 return; 6771 6772 hci_dev_lock(hdev); 6773 6774 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6775 if (!conn) 6776 goto unlock; 6777 6778 conn->le_tx_phy = ev->tx_phy; 6779 conn->le_rx_phy = ev->rx_phy; 6780 6781 unlock: 6782 hci_dev_unlock(hdev); 6783 } 6784 6785 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, 6786 struct sk_buff *skb) 6787 { 6788 struct hci_evt_le_cis_established *ev = data; 6789 struct hci_conn *conn; 6790 u16 handle = __le16_to_cpu(ev->handle); 6791 6792 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6793 6794 hci_dev_lock(hdev); 6795 6796 conn = hci_conn_hash_lookup_handle(hdev, handle); 6797 if (!conn) { 6798 bt_dev_err(hdev, 6799 "Unable to find connection with handle 0x%4.4x", 6800 handle); 6801 goto unlock; 6802 } 6803 6804 if (conn->type != ISO_LINK) { 6805 bt_dev_err(hdev, 6806 "Invalid connection link type handle 0x%4.4x", 6807 handle); 6808 goto unlock; 6809 } 6810 6811 if (conn->role == HCI_ROLE_SLAVE) { 6812 __le32 interval; 6813 6814 memset(&interval, 0, sizeof(interval)); 6815 6816 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); 6817 conn->iso_qos.in.interval = le32_to_cpu(interval); 6818 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); 6819 conn->iso_qos.out.interval = le32_to_cpu(interval); 6820 conn->iso_qos.in.latency = le16_to_cpu(ev->interval); 6821 conn->iso_qos.out.latency = le16_to_cpu(ev->interval); 6822 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu); 6823 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu); 6824 conn->iso_qos.in.phy = ev->c_phy; 6825 conn->iso_qos.out.phy = ev->p_phy; 6826 } 6827 6828 if (!ev->status) { 6829 conn->state = BT_CONNECTED; 6830 hci_debugfs_create_conn(conn); 6831 hci_conn_add_sysfs(conn); 6832 hci_iso_setup_path(conn); 6833 goto unlock; 6834 } 6835 6836 hci_connect_cfm(conn, ev->status); 6837 hci_conn_del(conn); 6838 6839 unlock: 6840 hci_dev_unlock(hdev); 6841 } 6842 6843 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) 6844 { 6845 struct hci_cp_le_reject_cis cp; 6846 6847 memset(&cp, 0, sizeof(cp)); 6848 cp.handle = handle; 6849 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 6850 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); 6851 } 6852 6853 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) 6854 { 6855 struct hci_cp_le_accept_cis cp; 6856 6857 memset(&cp, 0, sizeof(cp)); 6858 cp.handle = handle; 6859 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); 6860 } 6861 6862 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, 6863 struct sk_buff *skb) 6864 { 6865 struct hci_evt_le_cis_req *ev = data; 6866 u16 acl_handle, cis_handle; 6867 struct hci_conn *acl, *cis; 6868 int mask; 6869 __u8 flags = 0; 6870 6871 acl_handle = __le16_to_cpu(ev->acl_handle); 6872 cis_handle = __le16_to_cpu(ev->cis_handle); 6873 6874 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", 6875 acl_handle, cis_handle, ev->cig_id, ev->cis_id); 6876 6877 hci_dev_lock(hdev); 6878 6879 acl = hci_conn_hash_lookup_handle(hdev, acl_handle); 6880 if (!acl) 6881 goto unlock; 6882 6883 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags); 6884 if (!(mask & HCI_LM_ACCEPT)) { 6885 hci_le_reject_cis(hdev, ev->cis_handle); 6886 goto unlock; 6887 } 6888 6889 cis = hci_conn_hash_lookup_handle(hdev, cis_handle); 6890 if (!cis) { 6891 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE); 6892 if (!cis) { 6893 hci_le_reject_cis(hdev, ev->cis_handle); 6894 goto unlock; 6895 } 6896 cis->handle = cis_handle; 6897 } 6898 6899 cis->iso_qos.cig = ev->cig_id; 6900 cis->iso_qos.cis = ev->cis_id; 6901 6902 if (!(flags & HCI_PROTO_DEFER)) { 6903 hci_le_accept_cis(hdev, ev->cis_handle); 6904 } else { 6905 cis->state = BT_CONNECT2; 6906 hci_connect_cfm(cis, 0); 6907 } 6908 6909 unlock: 6910 hci_dev_unlock(hdev); 6911 } 6912 6913 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, 6914 struct sk_buff *skb) 6915 { 6916 struct hci_evt_le_create_big_complete *ev = data; 6917 struct hci_conn *conn; 6918 6919 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6920 6921 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, 6922 flex_array_size(ev, bis_handle, ev->num_bis))) 6923 return; 6924 6925 hci_dev_lock(hdev); 6926 6927 conn = hci_conn_hash_lookup_big(hdev, ev->handle); 6928 if (!conn) 6929 goto unlock; 6930 6931 if (conn->type != ISO_LINK) { 6932 bt_dev_err(hdev, 6933 "Invalid connection link type handle 0x%2.2x", 6934 ev->handle); 6935 goto unlock; 6936 } 6937 6938 if (ev->num_bis) 6939 conn->handle = __le16_to_cpu(ev->bis_handle[0]); 6940 6941 if (!ev->status) { 6942 conn->state = BT_CONNECTED; 6943 hci_debugfs_create_conn(conn); 6944 hci_conn_add_sysfs(conn); 6945 hci_iso_setup_path(conn); 6946 goto unlock; 6947 } 6948 6949 hci_connect_cfm(conn, ev->status); 6950 hci_conn_del(conn); 6951 6952 unlock: 6953 hci_dev_unlock(hdev); 6954 } 6955 6956 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, 6957 struct sk_buff *skb) 6958 { 6959 struct hci_evt_le_big_sync_estabilished *ev = data; 6960 struct hci_conn *bis; 6961 int i; 6962 6963 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6964 6965 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 6966 flex_array_size(ev, bis, ev->num_bis))) 6967 return; 6968 6969 if (ev->status) 6970 return; 6971 6972 hci_dev_lock(hdev); 6973 6974 for (i = 0; i < ev->num_bis; i++) { 6975 u16 handle = le16_to_cpu(ev->bis[i]); 6976 __le32 interval; 6977 6978 bis = hci_conn_hash_lookup_handle(hdev, handle); 6979 if (!bis) { 6980 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, 6981 HCI_ROLE_SLAVE); 6982 if (!bis) 6983 continue; 6984 bis->handle = handle; 6985 } 6986 6987 bis->iso_qos.big = ev->handle; 6988 memset(&interval, 0, sizeof(interval)); 6989 memcpy(&interval, ev->latency, sizeof(ev->latency)); 6990 bis->iso_qos.in.interval = le32_to_cpu(interval); 6991 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ 6992 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100; 6993 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu); 6994 6995 hci_connect_cfm(bis, ev->status); 6996 } 6997 6998 hci_dev_unlock(hdev); 6999 } 7000 7001 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, 7002 struct sk_buff *skb) 7003 { 7004 struct hci_evt_le_big_info_adv_report *ev = data; 7005 int mask = hdev->link_mode; 7006 __u8 flags = 0; 7007 7008 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); 7009 7010 hci_dev_lock(hdev); 7011 7012 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); 7013 if (!(mask & HCI_LM_ACCEPT)) 7014 hci_le_pa_term_sync(hdev, ev->sync_handle); 7015 7016 hci_dev_unlock(hdev); 7017 } 7018 7019 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ 7020 [_op] = { \ 7021 .func = _func, \ 7022 .min_len = _min_len, \ 7023 .max_len = _max_len, \ 7024 } 7025 7026 #define HCI_LE_EV(_op, _func, _len) \ 7027 HCI_LE_EV_VL(_op, _func, _len, _len) 7028 7029 #define HCI_LE_EV_STATUS(_op, _func) \ 7030 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) 7031 7032 /* Entries in this table shall have their position according to the subevent 7033 * opcode they handle so the use of the macros above is recommend since it does 7034 * attempt to initialize at its proper index using Designated Initializers that 7035 * way events without a callback function can be ommited. 7036 */ 7037 static const struct hci_le_ev { 7038 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 7039 u16 min_len; 7040 u16 max_len; 7041 } hci_le_ev_table[U8_MAX + 1] = { 7042 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ 7043 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, 7044 sizeof(struct hci_ev_le_conn_complete)), 7045 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ 7046 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, 7047 sizeof(struct hci_ev_le_advertising_report), 7048 HCI_MAX_EVENT_SIZE), 7049 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ 7050 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, 7051 hci_le_conn_update_complete_evt, 7052 sizeof(struct hci_ev_le_conn_update_complete)), 7053 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ 7054 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, 7055 hci_le_remote_feat_complete_evt, 7056 sizeof(struct hci_ev_le_remote_feat_complete)), 7057 /* [0x05 = HCI_EV_LE_LTK_REQ] */ 7058 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, 7059 sizeof(struct hci_ev_le_ltk_req)), 7060 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ 7061 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, 7062 hci_le_remote_conn_param_req_evt, 7063 sizeof(struct hci_ev_le_remote_conn_param_req)), 7064 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ 7065 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, 7066 hci_le_enh_conn_complete_evt, 7067 sizeof(struct hci_ev_le_enh_conn_complete)), 7068 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ 7069 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, 7070 sizeof(struct hci_ev_le_direct_adv_report), 7071 HCI_MAX_EVENT_SIZE), 7072 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ 7073 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, 7074 sizeof(struct hci_ev_le_phy_update_complete)), 7075 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ 7076 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, 7077 sizeof(struct hci_ev_le_ext_adv_report), 7078 HCI_MAX_EVENT_SIZE), 7079 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ 7080 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, 7081 hci_le_pa_sync_estabilished_evt, 7082 sizeof(struct hci_ev_le_pa_sync_established)), 7083 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ 7084 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, 7085 sizeof(struct hci_evt_le_ext_adv_set_term)), 7086 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ 7087 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt, 7088 sizeof(struct hci_evt_le_cis_established)), 7089 /* [0x1a = HCI_EVT_LE_CIS_REQ] */ 7090 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, 7091 sizeof(struct hci_evt_le_cis_req)), 7092 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ 7093 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, 7094 hci_le_create_big_complete_evt, 7095 sizeof(struct hci_evt_le_create_big_complete), 7096 HCI_MAX_EVENT_SIZE), 7097 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ 7098 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 7099 hci_le_big_sync_established_evt, 7100 sizeof(struct hci_evt_le_big_sync_estabilished), 7101 HCI_MAX_EVENT_SIZE), 7102 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ 7103 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, 7104 hci_le_big_info_adv_report_evt, 7105 sizeof(struct hci_evt_le_big_info_adv_report), 7106 HCI_MAX_EVENT_SIZE), 7107 }; 7108 7109 static void hci_le_meta_evt(struct hci_dev *hdev, void *data, 7110 struct sk_buff *skb, u16 *opcode, u8 *status, 7111 hci_req_complete_t *req_complete, 7112 hci_req_complete_skb_t *req_complete_skb) 7113 { 7114 struct hci_ev_le_meta *ev = data; 7115 const struct hci_le_ev *subev; 7116 7117 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); 7118 7119 /* Only match event if command OGF is for LE */ 7120 if (hdev->sent_cmd && 7121 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && 7122 hci_skb_event(hdev->sent_cmd) == ev->subevent) { 7123 *opcode = hci_skb_opcode(hdev->sent_cmd); 7124 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, 7125 req_complete_skb); 7126 } 7127 7128 subev = &hci_le_ev_table[ev->subevent]; 7129 if (!subev->func) 7130 return; 7131 7132 if (skb->len < subev->min_len) { 7133 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", 7134 ev->subevent, skb->len, subev->min_len); 7135 return; 7136 } 7137 7138 /* Just warn if the length is over max_len size it still be 7139 * possible to partially parse the event so leave to callback to 7140 * decide if that is acceptable. 7141 */ 7142 if (skb->len > subev->max_len) 7143 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", 7144 ev->subevent, skb->len, subev->max_len); 7145 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); 7146 if (!data) 7147 return; 7148 7149 subev->func(hdev, data, skb); 7150 } 7151 7152 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 7153 u8 event, struct sk_buff *skb) 7154 { 7155 struct hci_ev_cmd_complete *ev; 7156 struct hci_event_hdr *hdr; 7157 7158 if (!skb) 7159 return false; 7160 7161 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); 7162 if (!hdr) 7163 return false; 7164 7165 if (event) { 7166 if (hdr->evt != event) 7167 return false; 7168 return true; 7169 } 7170 7171 /* Check if request ended in Command Status - no way to retrieve 7172 * any extra parameters in this case. 7173 */ 7174 if (hdr->evt == HCI_EV_CMD_STATUS) 7175 return false; 7176 7177 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 7178 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 7179 hdr->evt); 7180 return false; 7181 } 7182 7183 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); 7184 if (!ev) 7185 return false; 7186 7187 if (opcode != __le16_to_cpu(ev->opcode)) { 7188 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 7189 __le16_to_cpu(ev->opcode)); 7190 return false; 7191 } 7192 7193 return true; 7194 } 7195 7196 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 7197 struct sk_buff *skb) 7198 { 7199 struct hci_ev_le_advertising_info *adv; 7200 struct hci_ev_le_direct_adv_info *direct_adv; 7201 struct hci_ev_le_ext_adv_info *ext_adv; 7202 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 7203 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 7204 7205 hci_dev_lock(hdev); 7206 7207 /* If we are currently suspended and this is the first BT event seen, 7208 * save the wake reason associated with the event. 7209 */ 7210 if (!hdev->suspended || hdev->wake_reason) 7211 goto unlock; 7212 7213 /* Default to remote wake. Values for wake_reason are documented in the 7214 * Bluez mgmt api docs. 7215 */ 7216 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 7217 7218 /* Once configured for remote wakeup, we should only wake up for 7219 * reconnections. It's useful to see which device is waking us up so 7220 * keep track of the bdaddr of the connection event that woke us up. 7221 */ 7222 if (event == HCI_EV_CONN_REQUEST) { 7223 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 7224 hdev->wake_addr_type = BDADDR_BREDR; 7225 } else if (event == HCI_EV_CONN_COMPLETE) { 7226 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 7227 hdev->wake_addr_type = BDADDR_BREDR; 7228 } else if (event == HCI_EV_LE_META) { 7229 struct hci_ev_le_meta *le_ev = (void *)skb->data; 7230 u8 subevent = le_ev->subevent; 7231 u8 *ptr = &skb->data[sizeof(*le_ev)]; 7232 u8 num_reports = *ptr; 7233 7234 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 7235 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 7236 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 7237 num_reports) { 7238 adv = (void *)(ptr + 1); 7239 direct_adv = (void *)(ptr + 1); 7240 ext_adv = (void *)(ptr + 1); 7241 7242 switch (subevent) { 7243 case HCI_EV_LE_ADVERTISING_REPORT: 7244 bacpy(&hdev->wake_addr, &adv->bdaddr); 7245 hdev->wake_addr_type = adv->bdaddr_type; 7246 break; 7247 case HCI_EV_LE_DIRECT_ADV_REPORT: 7248 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 7249 hdev->wake_addr_type = direct_adv->bdaddr_type; 7250 break; 7251 case HCI_EV_LE_EXT_ADV_REPORT: 7252 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 7253 hdev->wake_addr_type = ext_adv->bdaddr_type; 7254 break; 7255 } 7256 } 7257 } else { 7258 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 7259 } 7260 7261 unlock: 7262 hci_dev_unlock(hdev); 7263 } 7264 7265 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ 7266 [_op] = { \ 7267 .req = false, \ 7268 .func = _func, \ 7269 .min_len = _min_len, \ 7270 .max_len = _max_len, \ 7271 } 7272 7273 #define HCI_EV(_op, _func, _len) \ 7274 HCI_EV_VL(_op, _func, _len, _len) 7275 7276 #define HCI_EV_STATUS(_op, _func) \ 7277 HCI_EV(_op, _func, sizeof(struct hci_ev_status)) 7278 7279 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ 7280 [_op] = { \ 7281 .req = true, \ 7282 .func_req = _func, \ 7283 .min_len = _min_len, \ 7284 .max_len = _max_len, \ 7285 } 7286 7287 #define HCI_EV_REQ(_op, _func, _len) \ 7288 HCI_EV_REQ_VL(_op, _func, _len, _len) 7289 7290 /* Entries in this table shall have their position according to the event opcode 7291 * they handle so the use of the macros above is recommend since it does attempt 7292 * to initialize at its proper index using Designated Initializers that way 7293 * events without a callback function don't have entered. 7294 */ 7295 static const struct hci_ev { 7296 bool req; 7297 union { 7298 void (*func)(struct hci_dev *hdev, void *data, 7299 struct sk_buff *skb); 7300 void (*func_req)(struct hci_dev *hdev, void *data, 7301 struct sk_buff *skb, u16 *opcode, u8 *status, 7302 hci_req_complete_t *req_complete, 7303 hci_req_complete_skb_t *req_complete_skb); 7304 }; 7305 u16 min_len; 7306 u16 max_len; 7307 } hci_ev_table[U8_MAX + 1] = { 7308 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ 7309 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), 7310 /* [0x02 = HCI_EV_INQUIRY_RESULT] */ 7311 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, 7312 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), 7313 /* [0x03 = HCI_EV_CONN_COMPLETE] */ 7314 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, 7315 sizeof(struct hci_ev_conn_complete)), 7316 /* [0x04 = HCI_EV_CONN_REQUEST] */ 7317 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, 7318 sizeof(struct hci_ev_conn_request)), 7319 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ 7320 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, 7321 sizeof(struct hci_ev_disconn_complete)), 7322 /* [0x06 = HCI_EV_AUTH_COMPLETE] */ 7323 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, 7324 sizeof(struct hci_ev_auth_complete)), 7325 /* [0x07 = HCI_EV_REMOTE_NAME] */ 7326 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, 7327 sizeof(struct hci_ev_remote_name)), 7328 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ 7329 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, 7330 sizeof(struct hci_ev_encrypt_change)), 7331 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ 7332 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, 7333 hci_change_link_key_complete_evt, 7334 sizeof(struct hci_ev_change_link_key_complete)), 7335 /* [0x0b = HCI_EV_REMOTE_FEATURES] */ 7336 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, 7337 sizeof(struct hci_ev_remote_features)), 7338 /* [0x0e = HCI_EV_CMD_COMPLETE] */ 7339 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, 7340 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), 7341 /* [0x0f = HCI_EV_CMD_STATUS] */ 7342 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, 7343 sizeof(struct hci_ev_cmd_status)), 7344 /* [0x10 = HCI_EV_CMD_STATUS] */ 7345 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, 7346 sizeof(struct hci_ev_hardware_error)), 7347 /* [0x12 = HCI_EV_ROLE_CHANGE] */ 7348 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, 7349 sizeof(struct hci_ev_role_change)), 7350 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ 7351 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, 7352 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), 7353 /* [0x14 = HCI_EV_MODE_CHANGE] */ 7354 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, 7355 sizeof(struct hci_ev_mode_change)), 7356 /* [0x16 = HCI_EV_PIN_CODE_REQ] */ 7357 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, 7358 sizeof(struct hci_ev_pin_code_req)), 7359 /* [0x17 = HCI_EV_LINK_KEY_REQ] */ 7360 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, 7361 sizeof(struct hci_ev_link_key_req)), 7362 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ 7363 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, 7364 sizeof(struct hci_ev_link_key_notify)), 7365 /* [0x1c = HCI_EV_CLOCK_OFFSET] */ 7366 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, 7367 sizeof(struct hci_ev_clock_offset)), 7368 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ 7369 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, 7370 sizeof(struct hci_ev_pkt_type_change)), 7371 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ 7372 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, 7373 sizeof(struct hci_ev_pscan_rep_mode)), 7374 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ 7375 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, 7376 hci_inquiry_result_with_rssi_evt, 7377 sizeof(struct hci_ev_inquiry_result_rssi), 7378 HCI_MAX_EVENT_SIZE), 7379 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ 7380 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, 7381 sizeof(struct hci_ev_remote_ext_features)), 7382 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ 7383 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, 7384 sizeof(struct hci_ev_sync_conn_complete)), 7385 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ 7386 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, 7387 hci_extended_inquiry_result_evt, 7388 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), 7389 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ 7390 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, 7391 sizeof(struct hci_ev_key_refresh_complete)), 7392 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ 7393 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, 7394 sizeof(struct hci_ev_io_capa_request)), 7395 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ 7396 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, 7397 sizeof(struct hci_ev_io_capa_reply)), 7398 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ 7399 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, 7400 sizeof(struct hci_ev_user_confirm_req)), 7401 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ 7402 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, 7403 sizeof(struct hci_ev_user_passkey_req)), 7404 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ 7405 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, 7406 sizeof(struct hci_ev_remote_oob_data_request)), 7407 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ 7408 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, 7409 sizeof(struct hci_ev_simple_pair_complete)), 7410 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ 7411 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, 7412 sizeof(struct hci_ev_user_passkey_notify)), 7413 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ 7414 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, 7415 sizeof(struct hci_ev_keypress_notify)), 7416 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ 7417 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, 7418 sizeof(struct hci_ev_remote_host_features)), 7419 /* [0x3e = HCI_EV_LE_META] */ 7420 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, 7421 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), 7422 #if IS_ENABLED(CONFIG_BT_HS) 7423 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ 7424 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, 7425 sizeof(struct hci_ev_phy_link_complete)), 7426 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ 7427 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, 7428 sizeof(struct hci_ev_channel_selected)), 7429 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ 7430 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, 7431 hci_disconn_loglink_complete_evt, 7432 sizeof(struct hci_ev_disconn_logical_link_complete)), 7433 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ 7434 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, 7435 sizeof(struct hci_ev_logical_link_complete)), 7436 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ 7437 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, 7438 hci_disconn_phylink_complete_evt, 7439 sizeof(struct hci_ev_disconn_phy_link_complete)), 7440 #endif 7441 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ 7442 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, 7443 sizeof(struct hci_ev_num_comp_blocks)), 7444 /* [0xff = HCI_EV_VENDOR] */ 7445 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), 7446 }; 7447 7448 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, 7449 u16 *opcode, u8 *status, 7450 hci_req_complete_t *req_complete, 7451 hci_req_complete_skb_t *req_complete_skb) 7452 { 7453 const struct hci_ev *ev = &hci_ev_table[event]; 7454 void *data; 7455 7456 if (!ev->func) 7457 return; 7458 7459 if (skb->len < ev->min_len) { 7460 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", 7461 event, skb->len, ev->min_len); 7462 return; 7463 } 7464 7465 /* Just warn if the length is over max_len size it still be 7466 * possible to partially parse the event so leave to callback to 7467 * decide if that is acceptable. 7468 */ 7469 if (skb->len > ev->max_len) 7470 bt_dev_warn_ratelimited(hdev, 7471 "unexpected event 0x%2.2x length: %u > %u", 7472 event, skb->len, ev->max_len); 7473 7474 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); 7475 if (!data) 7476 return; 7477 7478 if (ev->req) 7479 ev->func_req(hdev, data, skb, opcode, status, req_complete, 7480 req_complete_skb); 7481 else 7482 ev->func(hdev, data, skb); 7483 } 7484 7485 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 7486 { 7487 struct hci_event_hdr *hdr = (void *) skb->data; 7488 hci_req_complete_t req_complete = NULL; 7489 hci_req_complete_skb_t req_complete_skb = NULL; 7490 struct sk_buff *orig_skb = NULL; 7491 u8 status = 0, event, req_evt = 0; 7492 u16 opcode = HCI_OP_NOP; 7493 7494 if (skb->len < sizeof(*hdr)) { 7495 bt_dev_err(hdev, "Malformed HCI Event"); 7496 goto done; 7497 } 7498 7499 kfree_skb(hdev->recv_event); 7500 hdev->recv_event = skb_clone(skb, GFP_KERNEL); 7501 7502 event = hdr->evt; 7503 if (!event) { 7504 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", 7505 event); 7506 goto done; 7507 } 7508 7509 /* Only match event if command OGF is not for LE */ 7510 if (hdev->sent_cmd && 7511 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && 7512 hci_skb_event(hdev->sent_cmd) == event) { 7513 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), 7514 status, &req_complete, &req_complete_skb); 7515 req_evt = event; 7516 } 7517 7518 /* If it looks like we might end up having to call 7519 * req_complete_skb, store a pristine copy of the skb since the 7520 * various handlers may modify the original one through 7521 * skb_pull() calls, etc. 7522 */ 7523 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 7524 event == HCI_EV_CMD_COMPLETE) 7525 orig_skb = skb_clone(skb, GFP_KERNEL); 7526 7527 skb_pull(skb, HCI_EVENT_HDR_SIZE); 7528 7529 /* Store wake reason if we're suspended */ 7530 hci_store_wake_reason(hdev, event, skb); 7531 7532 bt_dev_dbg(hdev, "event 0x%2.2x", event); 7533 7534 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, 7535 &req_complete_skb); 7536 7537 if (req_complete) { 7538 req_complete(hdev, status, opcode); 7539 } else if (req_complete_skb) { 7540 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 7541 kfree_skb(orig_skb); 7542 orig_skb = NULL; 7543 } 7544 req_complete_skb(hdev, status, opcode, orig_skb); 7545 } 7546 7547 done: 7548 kfree_skb(orig_skb); 7549 kfree_skb(skb); 7550 hdev->stat.evt_rx++; 7551 } 7552