1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 #include "eir.h" 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 42 "\x00\x00\x00\x00\x00\x00\x00\x00" 43 44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 45 46 /* Handle HCI Event packets */ 47 48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 49 u8 ev, size_t len) 50 { 51 void *data; 52 53 data = skb_pull_data(skb, len); 54 if (!data) 55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); 56 57 return data; 58 } 59 60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 61 u16 op, size_t len) 62 { 63 void *data; 64 65 data = skb_pull_data(skb, len); 66 if (!data) 67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); 68 69 return data; 70 } 71 72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 73 u8 ev, size_t len) 74 { 75 void *data; 76 77 data = skb_pull_data(skb, len); 78 if (!data) 79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); 80 81 return data; 82 } 83 84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, 85 struct sk_buff *skb) 86 { 87 struct hci_ev_status *rp = data; 88 89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 90 91 /* It is possible that we receive Inquiry Complete event right 92 * before we receive Inquiry Cancel Command Complete event, in 93 * which case the latter event should have status of Command 94 * Disallowed (0x0c). This should not be treated as error, since 95 * we actually achieve what Inquiry Cancel wants to achieve, 96 * which is to end the last Inquiry session. 97 */ 98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 100 rp->status = 0x00; 101 } 102 103 if (rp->status) 104 return rp->status; 105 106 clear_bit(HCI_INQUIRY, &hdev->flags); 107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 108 wake_up_bit(&hdev->flags, HCI_INQUIRY); 109 110 hci_dev_lock(hdev); 111 /* Set discovery state to stopped if we're not doing LE active 112 * scanning. 113 */ 114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 115 hdev->le_scan_type != LE_SCAN_ACTIVE) 116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 117 hci_dev_unlock(hdev); 118 119 hci_conn_check_pending(hdev); 120 121 return rp->status; 122 } 123 124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, 125 struct sk_buff *skb) 126 { 127 struct hci_ev_status *rp = data; 128 129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 130 131 if (rp->status) 132 return rp->status; 133 134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 135 136 return rp->status; 137 } 138 139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, 140 struct sk_buff *skb) 141 { 142 struct hci_ev_status *rp = data; 143 144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 145 146 if (rp->status) 147 return rp->status; 148 149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 150 151 hci_conn_check_pending(hdev); 152 153 return rp->status; 154 } 155 156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, 157 struct sk_buff *skb) 158 { 159 struct hci_ev_status *rp = data; 160 161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 162 163 return rp->status; 164 } 165 166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, 167 struct sk_buff *skb) 168 { 169 struct hci_rp_role_discovery *rp = data; 170 struct hci_conn *conn; 171 172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 173 174 if (rp->status) 175 return rp->status; 176 177 hci_dev_lock(hdev); 178 179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 180 if (conn) 181 conn->role = rp->role; 182 183 hci_dev_unlock(hdev); 184 185 return rp->status; 186 } 187 188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, 189 struct sk_buff *skb) 190 { 191 struct hci_rp_read_link_policy *rp = data; 192 struct hci_conn *conn; 193 194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 195 196 if (rp->status) 197 return rp->status; 198 199 hci_dev_lock(hdev); 200 201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 202 if (conn) 203 conn->link_policy = __le16_to_cpu(rp->policy); 204 205 hci_dev_unlock(hdev); 206 207 return rp->status; 208 } 209 210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, 211 struct sk_buff *skb) 212 { 213 struct hci_rp_write_link_policy *rp = data; 214 struct hci_conn *conn; 215 void *sent; 216 217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 218 219 if (rp->status) 220 return rp->status; 221 222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 223 if (!sent) 224 return rp->status; 225 226 hci_dev_lock(hdev); 227 228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 229 if (conn) 230 conn->link_policy = get_unaligned_le16(sent + 2); 231 232 hci_dev_unlock(hdev); 233 234 return rp->status; 235 } 236 237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, 238 struct sk_buff *skb) 239 { 240 struct hci_rp_read_def_link_policy *rp = data; 241 242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 243 244 if (rp->status) 245 return rp->status; 246 247 hdev->link_policy = __le16_to_cpu(rp->policy); 248 249 return rp->status; 250 } 251 252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, 253 struct sk_buff *skb) 254 { 255 struct hci_ev_status *rp = data; 256 void *sent; 257 258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 259 260 if (rp->status) 261 return rp->status; 262 263 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 264 if (!sent) 265 return rp->status; 266 267 hdev->link_policy = get_unaligned_le16(sent); 268 269 return rp->status; 270 } 271 272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) 273 { 274 struct hci_ev_status *rp = data; 275 276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 277 278 clear_bit(HCI_RESET, &hdev->flags); 279 280 if (rp->status) 281 return rp->status; 282 283 /* Reset all non-persistent flags */ 284 hci_dev_clear_volatile_flags(hdev); 285 286 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 287 288 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 289 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 290 291 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 292 hdev->adv_data_len = 0; 293 294 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 295 hdev->scan_rsp_data_len = 0; 296 297 hdev->le_scan_type = LE_SCAN_PASSIVE; 298 299 hdev->ssp_debug_mode = 0; 300 301 hci_bdaddr_list_clear(&hdev->le_accept_list); 302 hci_bdaddr_list_clear(&hdev->le_resolv_list); 303 304 return rp->status; 305 } 306 307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, 308 struct sk_buff *skb) 309 { 310 struct hci_rp_read_stored_link_key *rp = data; 311 struct hci_cp_read_stored_link_key *sent; 312 313 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 314 315 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 316 if (!sent) 317 return rp->status; 318 319 if (!rp->status && sent->read_all == 0x01) { 320 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 321 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 322 } 323 324 return rp->status; 325 } 326 327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, 328 struct sk_buff *skb) 329 { 330 struct hci_rp_delete_stored_link_key *rp = data; 331 u16 num_keys; 332 333 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 334 335 if (rp->status) 336 return rp->status; 337 338 num_keys = le16_to_cpu(rp->num_keys); 339 340 if (num_keys <= hdev->stored_num_keys) 341 hdev->stored_num_keys -= num_keys; 342 else 343 hdev->stored_num_keys = 0; 344 345 return rp->status; 346 } 347 348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, 349 struct sk_buff *skb) 350 { 351 struct hci_ev_status *rp = data; 352 void *sent; 353 354 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 355 356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 357 if (!sent) 358 return rp->status; 359 360 hci_dev_lock(hdev); 361 362 if (hci_dev_test_flag(hdev, HCI_MGMT)) 363 mgmt_set_local_name_complete(hdev, sent, rp->status); 364 else if (!rp->status) 365 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 366 367 hci_dev_unlock(hdev); 368 369 return rp->status; 370 } 371 372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, 373 struct sk_buff *skb) 374 { 375 struct hci_rp_read_local_name *rp = data; 376 377 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 378 379 if (rp->status) 380 return rp->status; 381 382 if (hci_dev_test_flag(hdev, HCI_SETUP) || 383 hci_dev_test_flag(hdev, HCI_CONFIG)) 384 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 385 386 return rp->status; 387 } 388 389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, 390 struct sk_buff *skb) 391 { 392 struct hci_ev_status *rp = data; 393 void *sent; 394 395 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 396 397 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 398 if (!sent) 399 return rp->status; 400 401 hci_dev_lock(hdev); 402 403 if (!rp->status) { 404 __u8 param = *((__u8 *) sent); 405 406 if (param == AUTH_ENABLED) 407 set_bit(HCI_AUTH, &hdev->flags); 408 else 409 clear_bit(HCI_AUTH, &hdev->flags); 410 } 411 412 if (hci_dev_test_flag(hdev, HCI_MGMT)) 413 mgmt_auth_enable_complete(hdev, rp->status); 414 415 hci_dev_unlock(hdev); 416 417 return rp->status; 418 } 419 420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, 421 struct sk_buff *skb) 422 { 423 struct hci_ev_status *rp = data; 424 __u8 param; 425 void *sent; 426 427 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 428 429 if (rp->status) 430 return rp->status; 431 432 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 433 if (!sent) 434 return rp->status; 435 436 param = *((__u8 *) sent); 437 438 if (param) 439 set_bit(HCI_ENCRYPT, &hdev->flags); 440 else 441 clear_bit(HCI_ENCRYPT, &hdev->flags); 442 443 return rp->status; 444 } 445 446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, 447 struct sk_buff *skb) 448 { 449 struct hci_ev_status *rp = data; 450 __u8 param; 451 void *sent; 452 453 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 454 455 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 456 if (!sent) 457 return rp->status; 458 459 param = *((__u8 *) sent); 460 461 hci_dev_lock(hdev); 462 463 if (rp->status) { 464 hdev->discov_timeout = 0; 465 goto done; 466 } 467 468 if (param & SCAN_INQUIRY) 469 set_bit(HCI_ISCAN, &hdev->flags); 470 else 471 clear_bit(HCI_ISCAN, &hdev->flags); 472 473 if (param & SCAN_PAGE) 474 set_bit(HCI_PSCAN, &hdev->flags); 475 else 476 clear_bit(HCI_PSCAN, &hdev->flags); 477 478 done: 479 hci_dev_unlock(hdev); 480 481 return rp->status; 482 } 483 484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, 485 struct sk_buff *skb) 486 { 487 struct hci_ev_status *rp = data; 488 struct hci_cp_set_event_filter *cp; 489 void *sent; 490 491 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 492 493 if (rp->status) 494 return rp->status; 495 496 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 497 if (!sent) 498 return rp->status; 499 500 cp = (struct hci_cp_set_event_filter *)sent; 501 502 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 503 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 504 else 505 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 506 507 return rp->status; 508 } 509 510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, 511 struct sk_buff *skb) 512 { 513 struct hci_rp_read_class_of_dev *rp = data; 514 515 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 516 517 if (rp->status) 518 return rp->status; 519 520 memcpy(hdev->dev_class, rp->dev_class, 3); 521 522 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], 523 hdev->dev_class[1], hdev->dev_class[0]); 524 525 return rp->status; 526 } 527 528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, 529 struct sk_buff *skb) 530 { 531 struct hci_ev_status *rp = data; 532 void *sent; 533 534 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 535 536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 537 if (!sent) 538 return rp->status; 539 540 hci_dev_lock(hdev); 541 542 if (!rp->status) 543 memcpy(hdev->dev_class, sent, 3); 544 545 if (hci_dev_test_flag(hdev, HCI_MGMT)) 546 mgmt_set_class_of_dev_complete(hdev, sent, rp->status); 547 548 hci_dev_unlock(hdev); 549 550 return rp->status; 551 } 552 553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, 554 struct sk_buff *skb) 555 { 556 struct hci_rp_read_voice_setting *rp = data; 557 __u16 setting; 558 559 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 560 561 if (rp->status) 562 return rp->status; 563 564 setting = __le16_to_cpu(rp->voice_setting); 565 566 if (hdev->voice_setting == setting) 567 return rp->status; 568 569 hdev->voice_setting = setting; 570 571 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 572 573 if (hdev->notify) 574 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 575 576 return rp->status; 577 } 578 579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, 580 struct sk_buff *skb) 581 { 582 struct hci_ev_status *rp = data; 583 __u16 setting; 584 void *sent; 585 586 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 587 588 if (rp->status) 589 return rp->status; 590 591 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 592 if (!sent) 593 return rp->status; 594 595 setting = get_unaligned_le16(sent); 596 597 if (hdev->voice_setting == setting) 598 return rp->status; 599 600 hdev->voice_setting = setting; 601 602 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 603 604 if (hdev->notify) 605 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 606 607 return rp->status; 608 } 609 610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, 611 struct sk_buff *skb) 612 { 613 struct hci_rp_read_num_supported_iac *rp = data; 614 615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 616 617 if (rp->status) 618 return rp->status; 619 620 hdev->num_iac = rp->num_iac; 621 622 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); 623 624 return rp->status; 625 } 626 627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, 628 struct sk_buff *skb) 629 { 630 struct hci_ev_status *rp = data; 631 struct hci_cp_write_ssp_mode *sent; 632 633 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 634 635 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 636 if (!sent) 637 return rp->status; 638 639 hci_dev_lock(hdev); 640 641 if (!rp->status) { 642 if (sent->mode) 643 hdev->features[1][0] |= LMP_HOST_SSP; 644 else 645 hdev->features[1][0] &= ~LMP_HOST_SSP; 646 } 647 648 if (!rp->status) { 649 if (sent->mode) 650 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 651 else 652 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 653 } 654 655 hci_dev_unlock(hdev); 656 657 return rp->status; 658 } 659 660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, 661 struct sk_buff *skb) 662 { 663 struct hci_ev_status *rp = data; 664 struct hci_cp_write_sc_support *sent; 665 666 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 667 668 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 669 if (!sent) 670 return rp->status; 671 672 hci_dev_lock(hdev); 673 674 if (!rp->status) { 675 if (sent->support) 676 hdev->features[1][0] |= LMP_HOST_SC; 677 else 678 hdev->features[1][0] &= ~LMP_HOST_SC; 679 } 680 681 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { 682 if (sent->support) 683 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 684 else 685 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 686 } 687 688 hci_dev_unlock(hdev); 689 690 return rp->status; 691 } 692 693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, 694 struct sk_buff *skb) 695 { 696 struct hci_rp_read_local_version *rp = data; 697 698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 699 700 if (rp->status) 701 return rp->status; 702 703 if (hci_dev_test_flag(hdev, HCI_SETUP) || 704 hci_dev_test_flag(hdev, HCI_CONFIG)) { 705 hdev->hci_ver = rp->hci_ver; 706 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 707 hdev->lmp_ver = rp->lmp_ver; 708 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 709 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 710 } 711 712 return rp->status; 713 } 714 715 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, 716 struct sk_buff *skb) 717 { 718 struct hci_rp_read_local_commands *rp = data; 719 720 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 721 722 if (rp->status) 723 return rp->status; 724 725 if (hci_dev_test_flag(hdev, HCI_SETUP) || 726 hci_dev_test_flag(hdev, HCI_CONFIG)) 727 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 728 729 return rp->status; 730 } 731 732 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, 733 struct sk_buff *skb) 734 { 735 struct hci_rp_read_auth_payload_to *rp = data; 736 struct hci_conn *conn; 737 738 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 739 740 if (rp->status) 741 return rp->status; 742 743 hci_dev_lock(hdev); 744 745 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 746 if (conn) 747 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 748 749 hci_dev_unlock(hdev); 750 751 return rp->status; 752 } 753 754 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, 755 struct sk_buff *skb) 756 { 757 struct hci_rp_write_auth_payload_to *rp = data; 758 struct hci_conn *conn; 759 void *sent; 760 761 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 762 763 if (rp->status) 764 return rp->status; 765 766 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 767 if (!sent) 768 return rp->status; 769 770 hci_dev_lock(hdev); 771 772 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 773 if (conn) 774 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 775 776 hci_dev_unlock(hdev); 777 778 return rp->status; 779 } 780 781 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, 782 struct sk_buff *skb) 783 { 784 struct hci_rp_read_local_features *rp = data; 785 786 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 787 788 if (rp->status) 789 return rp->status; 790 791 memcpy(hdev->features, rp->features, 8); 792 793 /* Adjust default settings according to features 794 * supported by device. */ 795 796 if (hdev->features[0][0] & LMP_3SLOT) 797 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 798 799 if (hdev->features[0][0] & LMP_5SLOT) 800 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 801 802 if (hdev->features[0][1] & LMP_HV2) { 803 hdev->pkt_type |= (HCI_HV2); 804 hdev->esco_type |= (ESCO_HV2); 805 } 806 807 if (hdev->features[0][1] & LMP_HV3) { 808 hdev->pkt_type |= (HCI_HV3); 809 hdev->esco_type |= (ESCO_HV3); 810 } 811 812 if (lmp_esco_capable(hdev)) 813 hdev->esco_type |= (ESCO_EV3); 814 815 if (hdev->features[0][4] & LMP_EV4) 816 hdev->esco_type |= (ESCO_EV4); 817 818 if (hdev->features[0][4] & LMP_EV5) 819 hdev->esco_type |= (ESCO_EV5); 820 821 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 822 hdev->esco_type |= (ESCO_2EV3); 823 824 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 825 hdev->esco_type |= (ESCO_3EV3); 826 827 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 828 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 829 830 return rp->status; 831 } 832 833 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, 834 struct sk_buff *skb) 835 { 836 struct hci_rp_read_local_ext_features *rp = data; 837 838 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 839 840 if (rp->status) 841 return rp->status; 842 843 if (hdev->max_page < rp->max_page) 844 hdev->max_page = rp->max_page; 845 846 if (rp->page < HCI_MAX_PAGES) 847 memcpy(hdev->features[rp->page], rp->features, 8); 848 849 return rp->status; 850 } 851 852 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, 853 struct sk_buff *skb) 854 { 855 struct hci_rp_read_flow_control_mode *rp = data; 856 857 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 858 859 if (rp->status) 860 return rp->status; 861 862 hdev->flow_ctl_mode = rp->mode; 863 864 return rp->status; 865 } 866 867 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, 868 struct sk_buff *skb) 869 { 870 struct hci_rp_read_buffer_size *rp = data; 871 872 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 873 874 if (rp->status) 875 return rp->status; 876 877 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 878 hdev->sco_mtu = rp->sco_mtu; 879 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 880 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 881 882 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 883 hdev->sco_mtu = 64; 884 hdev->sco_pkts = 8; 885 } 886 887 hdev->acl_cnt = hdev->acl_pkts; 888 hdev->sco_cnt = hdev->sco_pkts; 889 890 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 891 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 892 893 return rp->status; 894 } 895 896 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, 897 struct sk_buff *skb) 898 { 899 struct hci_rp_read_bd_addr *rp = data; 900 901 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 902 903 if (rp->status) 904 return rp->status; 905 906 if (test_bit(HCI_INIT, &hdev->flags)) 907 bacpy(&hdev->bdaddr, &rp->bdaddr); 908 909 if (hci_dev_test_flag(hdev, HCI_SETUP)) 910 bacpy(&hdev->setup_addr, &rp->bdaddr); 911 912 return rp->status; 913 } 914 915 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, 916 struct sk_buff *skb) 917 { 918 struct hci_rp_read_local_pairing_opts *rp = data; 919 920 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 921 922 if (rp->status) 923 return rp->status; 924 925 if (hci_dev_test_flag(hdev, HCI_SETUP) || 926 hci_dev_test_flag(hdev, HCI_CONFIG)) { 927 hdev->pairing_opts = rp->pairing_opts; 928 hdev->max_enc_key_size = rp->max_key_size; 929 } 930 931 return rp->status; 932 } 933 934 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, 935 struct sk_buff *skb) 936 { 937 struct hci_rp_read_page_scan_activity *rp = data; 938 939 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 940 941 if (rp->status) 942 return rp->status; 943 944 if (test_bit(HCI_INIT, &hdev->flags)) { 945 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 946 hdev->page_scan_window = __le16_to_cpu(rp->window); 947 } 948 949 return rp->status; 950 } 951 952 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, 953 struct sk_buff *skb) 954 { 955 struct hci_ev_status *rp = data; 956 struct hci_cp_write_page_scan_activity *sent; 957 958 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 959 960 if (rp->status) 961 return rp->status; 962 963 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 964 if (!sent) 965 return rp->status; 966 967 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 968 hdev->page_scan_window = __le16_to_cpu(sent->window); 969 970 return rp->status; 971 } 972 973 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, 974 struct sk_buff *skb) 975 { 976 struct hci_rp_read_page_scan_type *rp = data; 977 978 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 979 980 if (rp->status) 981 return rp->status; 982 983 if (test_bit(HCI_INIT, &hdev->flags)) 984 hdev->page_scan_type = rp->type; 985 986 return rp->status; 987 } 988 989 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, 990 struct sk_buff *skb) 991 { 992 struct hci_ev_status *rp = data; 993 u8 *type; 994 995 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 996 997 if (rp->status) 998 return rp->status; 999 1000 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 1001 if (type) 1002 hdev->page_scan_type = *type; 1003 1004 return rp->status; 1005 } 1006 1007 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, 1008 struct sk_buff *skb) 1009 { 1010 struct hci_rp_read_data_block_size *rp = data; 1011 1012 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1013 1014 if (rp->status) 1015 return rp->status; 1016 1017 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 1018 hdev->block_len = __le16_to_cpu(rp->block_len); 1019 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 1020 1021 hdev->block_cnt = hdev->num_blocks; 1022 1023 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 1024 hdev->block_cnt, hdev->block_len); 1025 1026 return rp->status; 1027 } 1028 1029 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, 1030 struct sk_buff *skb) 1031 { 1032 struct hci_rp_read_clock *rp = data; 1033 struct hci_cp_read_clock *cp; 1034 struct hci_conn *conn; 1035 1036 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1037 1038 if (rp->status) 1039 return rp->status; 1040 1041 hci_dev_lock(hdev); 1042 1043 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 1044 if (!cp) 1045 goto unlock; 1046 1047 if (cp->which == 0x00) { 1048 hdev->clock = le32_to_cpu(rp->clock); 1049 goto unlock; 1050 } 1051 1052 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1053 if (conn) { 1054 conn->clock = le32_to_cpu(rp->clock); 1055 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 1056 } 1057 1058 unlock: 1059 hci_dev_unlock(hdev); 1060 return rp->status; 1061 } 1062 1063 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, 1064 struct sk_buff *skb) 1065 { 1066 struct hci_rp_read_local_amp_info *rp = data; 1067 1068 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1069 1070 if (rp->status) 1071 return rp->status; 1072 1073 hdev->amp_status = rp->amp_status; 1074 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 1075 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 1076 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 1077 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 1078 hdev->amp_type = rp->amp_type; 1079 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 1080 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 1081 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 1082 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 1083 1084 return rp->status; 1085 } 1086 1087 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, 1088 struct sk_buff *skb) 1089 { 1090 struct hci_rp_read_inq_rsp_tx_power *rp = data; 1091 1092 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1093 1094 if (rp->status) 1095 return rp->status; 1096 1097 hdev->inq_tx_power = rp->tx_power; 1098 1099 return rp->status; 1100 } 1101 1102 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, 1103 struct sk_buff *skb) 1104 { 1105 struct hci_rp_read_def_err_data_reporting *rp = data; 1106 1107 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1108 1109 if (rp->status) 1110 return rp->status; 1111 1112 hdev->err_data_reporting = rp->err_data_reporting; 1113 1114 return rp->status; 1115 } 1116 1117 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, 1118 struct sk_buff *skb) 1119 { 1120 struct hci_ev_status *rp = data; 1121 struct hci_cp_write_def_err_data_reporting *cp; 1122 1123 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1124 1125 if (rp->status) 1126 return rp->status; 1127 1128 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 1129 if (!cp) 1130 return rp->status; 1131 1132 hdev->err_data_reporting = cp->err_data_reporting; 1133 1134 return rp->status; 1135 } 1136 1137 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, 1138 struct sk_buff *skb) 1139 { 1140 struct hci_rp_pin_code_reply *rp = data; 1141 struct hci_cp_pin_code_reply *cp; 1142 struct hci_conn *conn; 1143 1144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1145 1146 hci_dev_lock(hdev); 1147 1148 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1149 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1150 1151 if (rp->status) 1152 goto unlock; 1153 1154 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1155 if (!cp) 1156 goto unlock; 1157 1158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1159 if (conn) 1160 conn->pin_length = cp->pin_len; 1161 1162 unlock: 1163 hci_dev_unlock(hdev); 1164 return rp->status; 1165 } 1166 1167 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, 1168 struct sk_buff *skb) 1169 { 1170 struct hci_rp_pin_code_neg_reply *rp = data; 1171 1172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1173 1174 hci_dev_lock(hdev); 1175 1176 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1177 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1178 rp->status); 1179 1180 hci_dev_unlock(hdev); 1181 1182 return rp->status; 1183 } 1184 1185 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, 1186 struct sk_buff *skb) 1187 { 1188 struct hci_rp_le_read_buffer_size *rp = data; 1189 1190 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1191 1192 if (rp->status) 1193 return rp->status; 1194 1195 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1196 hdev->le_pkts = rp->le_max_pkt; 1197 1198 hdev->le_cnt = hdev->le_pkts; 1199 1200 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1201 1202 return rp->status; 1203 } 1204 1205 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, 1206 struct sk_buff *skb) 1207 { 1208 struct hci_rp_le_read_local_features *rp = data; 1209 1210 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1211 1212 if (rp->status) 1213 return rp->status; 1214 1215 memcpy(hdev->le_features, rp->features, 8); 1216 1217 return rp->status; 1218 } 1219 1220 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, 1221 struct sk_buff *skb) 1222 { 1223 struct hci_rp_le_read_adv_tx_power *rp = data; 1224 1225 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1226 1227 if (rp->status) 1228 return rp->status; 1229 1230 hdev->adv_tx_power = rp->tx_power; 1231 1232 return rp->status; 1233 } 1234 1235 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, 1236 struct sk_buff *skb) 1237 { 1238 struct hci_rp_user_confirm_reply *rp = data; 1239 1240 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1241 1242 hci_dev_lock(hdev); 1243 1244 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1245 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1246 rp->status); 1247 1248 hci_dev_unlock(hdev); 1249 1250 return rp->status; 1251 } 1252 1253 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, 1254 struct sk_buff *skb) 1255 { 1256 struct hci_rp_user_confirm_reply *rp = data; 1257 1258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1259 1260 hci_dev_lock(hdev); 1261 1262 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1263 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1264 ACL_LINK, 0, rp->status); 1265 1266 hci_dev_unlock(hdev); 1267 1268 return rp->status; 1269 } 1270 1271 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, 1272 struct sk_buff *skb) 1273 { 1274 struct hci_rp_user_confirm_reply *rp = data; 1275 1276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1277 1278 hci_dev_lock(hdev); 1279 1280 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1281 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1282 0, rp->status); 1283 1284 hci_dev_unlock(hdev); 1285 1286 return rp->status; 1287 } 1288 1289 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, 1290 struct sk_buff *skb) 1291 { 1292 struct hci_rp_user_confirm_reply *rp = data; 1293 1294 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1295 1296 hci_dev_lock(hdev); 1297 1298 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1299 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1300 ACL_LINK, 0, rp->status); 1301 1302 hci_dev_unlock(hdev); 1303 1304 return rp->status; 1305 } 1306 1307 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, 1308 struct sk_buff *skb) 1309 { 1310 struct hci_rp_read_local_oob_data *rp = data; 1311 1312 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1313 1314 return rp->status; 1315 } 1316 1317 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, 1318 struct sk_buff *skb) 1319 { 1320 struct hci_rp_read_local_oob_ext_data *rp = data; 1321 1322 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1323 1324 return rp->status; 1325 } 1326 1327 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, 1328 struct sk_buff *skb) 1329 { 1330 struct hci_ev_status *rp = data; 1331 bdaddr_t *sent; 1332 1333 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1334 1335 if (rp->status) 1336 return rp->status; 1337 1338 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1339 if (!sent) 1340 return rp->status; 1341 1342 hci_dev_lock(hdev); 1343 1344 bacpy(&hdev->random_addr, sent); 1345 1346 if (!bacmp(&hdev->rpa, sent)) { 1347 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1348 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1349 secs_to_jiffies(hdev->rpa_timeout)); 1350 } 1351 1352 hci_dev_unlock(hdev); 1353 1354 return rp->status; 1355 } 1356 1357 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, 1358 struct sk_buff *skb) 1359 { 1360 struct hci_ev_status *rp = data; 1361 struct hci_cp_le_set_default_phy *cp; 1362 1363 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1364 1365 if (rp->status) 1366 return rp->status; 1367 1368 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1369 if (!cp) 1370 return rp->status; 1371 1372 hci_dev_lock(hdev); 1373 1374 hdev->le_tx_def_phys = cp->tx_phys; 1375 hdev->le_rx_def_phys = cp->rx_phys; 1376 1377 hci_dev_unlock(hdev); 1378 1379 return rp->status; 1380 } 1381 1382 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, 1383 struct sk_buff *skb) 1384 { 1385 struct hci_ev_status *rp = data; 1386 struct hci_cp_le_set_adv_set_rand_addr *cp; 1387 struct adv_info *adv; 1388 1389 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1390 1391 if (rp->status) 1392 return rp->status; 1393 1394 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1395 /* Update only in case the adv instance since handle 0x00 shall be using 1396 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1397 * non-extended adverting. 1398 */ 1399 if (!cp || !cp->handle) 1400 return rp->status; 1401 1402 hci_dev_lock(hdev); 1403 1404 adv = hci_find_adv_instance(hdev, cp->handle); 1405 if (adv) { 1406 bacpy(&adv->random_addr, &cp->bdaddr); 1407 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1408 adv->rpa_expired = false; 1409 queue_delayed_work(hdev->workqueue, 1410 &adv->rpa_expired_cb, 1411 secs_to_jiffies(hdev->rpa_timeout)); 1412 } 1413 } 1414 1415 hci_dev_unlock(hdev); 1416 1417 return rp->status; 1418 } 1419 1420 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, 1421 struct sk_buff *skb) 1422 { 1423 struct hci_ev_status *rp = data; 1424 u8 *instance; 1425 int err; 1426 1427 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1428 1429 if (rp->status) 1430 return rp->status; 1431 1432 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1433 if (!instance) 1434 return rp->status; 1435 1436 hci_dev_lock(hdev); 1437 1438 err = hci_remove_adv_instance(hdev, *instance); 1439 if (!err) 1440 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1441 *instance); 1442 1443 hci_dev_unlock(hdev); 1444 1445 return rp->status; 1446 } 1447 1448 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, 1449 struct sk_buff *skb) 1450 { 1451 struct hci_ev_status *rp = data; 1452 struct adv_info *adv, *n; 1453 int err; 1454 1455 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1456 1457 if (rp->status) 1458 return rp->status; 1459 1460 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1461 return rp->status; 1462 1463 hci_dev_lock(hdev); 1464 1465 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1466 u8 instance = adv->instance; 1467 1468 err = hci_remove_adv_instance(hdev, instance); 1469 if (!err) 1470 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1471 hdev, instance); 1472 } 1473 1474 hci_dev_unlock(hdev); 1475 1476 return rp->status; 1477 } 1478 1479 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, 1480 struct sk_buff *skb) 1481 { 1482 struct hci_rp_le_read_transmit_power *rp = data; 1483 1484 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1485 1486 if (rp->status) 1487 return rp->status; 1488 1489 hdev->min_le_tx_power = rp->min_le_tx_power; 1490 hdev->max_le_tx_power = rp->max_le_tx_power; 1491 1492 return rp->status; 1493 } 1494 1495 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, 1496 struct sk_buff *skb) 1497 { 1498 struct hci_ev_status *rp = data; 1499 struct hci_cp_le_set_privacy_mode *cp; 1500 struct hci_conn_params *params; 1501 1502 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1503 1504 if (rp->status) 1505 return rp->status; 1506 1507 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); 1508 if (!cp) 1509 return rp->status; 1510 1511 hci_dev_lock(hdev); 1512 1513 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); 1514 if (params) 1515 params->privacy_mode = cp->mode; 1516 1517 hci_dev_unlock(hdev); 1518 1519 return rp->status; 1520 } 1521 1522 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, 1523 struct sk_buff *skb) 1524 { 1525 struct hci_ev_status *rp = data; 1526 __u8 *sent; 1527 1528 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1529 1530 if (rp->status) 1531 return rp->status; 1532 1533 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1534 if (!sent) 1535 return rp->status; 1536 1537 hci_dev_lock(hdev); 1538 1539 /* If we're doing connection initiation as peripheral. Set a 1540 * timeout in case something goes wrong. 1541 */ 1542 if (*sent) { 1543 struct hci_conn *conn; 1544 1545 hci_dev_set_flag(hdev, HCI_LE_ADV); 1546 1547 conn = hci_lookup_le_connect(hdev); 1548 if (conn) 1549 queue_delayed_work(hdev->workqueue, 1550 &conn->le_conn_timeout, 1551 conn->conn_timeout); 1552 } else { 1553 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1554 } 1555 1556 hci_dev_unlock(hdev); 1557 1558 return rp->status; 1559 } 1560 1561 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, 1562 struct sk_buff *skb) 1563 { 1564 struct hci_cp_le_set_ext_adv_enable *cp; 1565 struct hci_cp_ext_adv_set *set; 1566 struct adv_info *adv = NULL, *n; 1567 struct hci_ev_status *rp = data; 1568 1569 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1570 1571 if (rp->status) 1572 return rp->status; 1573 1574 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1575 if (!cp) 1576 return rp->status; 1577 1578 set = (void *)cp->data; 1579 1580 hci_dev_lock(hdev); 1581 1582 if (cp->num_of_sets) 1583 adv = hci_find_adv_instance(hdev, set->handle); 1584 1585 if (cp->enable) { 1586 struct hci_conn *conn; 1587 1588 hci_dev_set_flag(hdev, HCI_LE_ADV); 1589 1590 if (adv) 1591 adv->enabled = true; 1592 1593 conn = hci_lookup_le_connect(hdev); 1594 if (conn) 1595 queue_delayed_work(hdev->workqueue, 1596 &conn->le_conn_timeout, 1597 conn->conn_timeout); 1598 } else { 1599 if (cp->num_of_sets) { 1600 if (adv) 1601 adv->enabled = false; 1602 1603 /* If just one instance was disabled check if there are 1604 * any other instance enabled before clearing HCI_LE_ADV 1605 */ 1606 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1607 list) { 1608 if (adv->enabled) 1609 goto unlock; 1610 } 1611 } else { 1612 /* All instances shall be considered disabled */ 1613 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1614 list) 1615 adv->enabled = false; 1616 } 1617 1618 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1619 } 1620 1621 unlock: 1622 hci_dev_unlock(hdev); 1623 return rp->status; 1624 } 1625 1626 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, 1627 struct sk_buff *skb) 1628 { 1629 struct hci_cp_le_set_scan_param *cp; 1630 struct hci_ev_status *rp = data; 1631 1632 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1633 1634 if (rp->status) 1635 return rp->status; 1636 1637 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1638 if (!cp) 1639 return rp->status; 1640 1641 hci_dev_lock(hdev); 1642 1643 hdev->le_scan_type = cp->type; 1644 1645 hci_dev_unlock(hdev); 1646 1647 return rp->status; 1648 } 1649 1650 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, 1651 struct sk_buff *skb) 1652 { 1653 struct hci_cp_le_set_ext_scan_params *cp; 1654 struct hci_ev_status *rp = data; 1655 struct hci_cp_le_scan_phy_params *phy_param; 1656 1657 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1658 1659 if (rp->status) 1660 return rp->status; 1661 1662 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1663 if (!cp) 1664 return rp->status; 1665 1666 phy_param = (void *)cp->data; 1667 1668 hci_dev_lock(hdev); 1669 1670 hdev->le_scan_type = phy_param->type; 1671 1672 hci_dev_unlock(hdev); 1673 1674 return rp->status; 1675 } 1676 1677 static bool has_pending_adv_report(struct hci_dev *hdev) 1678 { 1679 struct discovery_state *d = &hdev->discovery; 1680 1681 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1682 } 1683 1684 static void clear_pending_adv_report(struct hci_dev *hdev) 1685 { 1686 struct discovery_state *d = &hdev->discovery; 1687 1688 bacpy(&d->last_adv_addr, BDADDR_ANY); 1689 d->last_adv_data_len = 0; 1690 } 1691 1692 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1693 u8 bdaddr_type, s8 rssi, u32 flags, 1694 u8 *data, u8 len) 1695 { 1696 struct discovery_state *d = &hdev->discovery; 1697 1698 if (len > HCI_MAX_AD_LENGTH) 1699 return; 1700 1701 bacpy(&d->last_adv_addr, bdaddr); 1702 d->last_adv_addr_type = bdaddr_type; 1703 d->last_adv_rssi = rssi; 1704 d->last_adv_flags = flags; 1705 memcpy(d->last_adv_data, data, len); 1706 d->last_adv_data_len = len; 1707 } 1708 1709 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1710 { 1711 hci_dev_lock(hdev); 1712 1713 switch (enable) { 1714 case LE_SCAN_ENABLE: 1715 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1716 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1717 clear_pending_adv_report(hdev); 1718 break; 1719 1720 case LE_SCAN_DISABLE: 1721 /* We do this here instead of when setting DISCOVERY_STOPPED 1722 * since the latter would potentially require waiting for 1723 * inquiry to stop too. 1724 */ 1725 if (has_pending_adv_report(hdev)) { 1726 struct discovery_state *d = &hdev->discovery; 1727 1728 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1729 d->last_adv_addr_type, NULL, 1730 d->last_adv_rssi, d->last_adv_flags, 1731 d->last_adv_data, 1732 d->last_adv_data_len, NULL, 0); 1733 } 1734 1735 /* Cancel this timer so that we don't try to disable scanning 1736 * when it's already disabled. 1737 */ 1738 cancel_delayed_work(&hdev->le_scan_disable); 1739 1740 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1741 1742 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1743 * interrupted scanning due to a connect request. Mark 1744 * therefore discovery as stopped. 1745 */ 1746 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1747 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1748 1749 break; 1750 1751 default: 1752 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1753 enable); 1754 break; 1755 } 1756 1757 hci_dev_unlock(hdev); 1758 } 1759 1760 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, 1761 struct sk_buff *skb) 1762 { 1763 struct hci_cp_le_set_scan_enable *cp; 1764 struct hci_ev_status *rp = data; 1765 1766 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1767 1768 if (rp->status) 1769 return rp->status; 1770 1771 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1772 if (!cp) 1773 return rp->status; 1774 1775 le_set_scan_enable_complete(hdev, cp->enable); 1776 1777 return rp->status; 1778 } 1779 1780 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, 1781 struct sk_buff *skb) 1782 { 1783 struct hci_cp_le_set_ext_scan_enable *cp; 1784 struct hci_ev_status *rp = data; 1785 1786 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1787 1788 if (rp->status) 1789 return rp->status; 1790 1791 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1792 if (!cp) 1793 return rp->status; 1794 1795 le_set_scan_enable_complete(hdev, cp->enable); 1796 1797 return rp->status; 1798 } 1799 1800 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, 1801 struct sk_buff *skb) 1802 { 1803 struct hci_rp_le_read_num_supported_adv_sets *rp = data; 1804 1805 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, 1806 rp->num_of_sets); 1807 1808 if (rp->status) 1809 return rp->status; 1810 1811 hdev->le_num_of_adv_sets = rp->num_of_sets; 1812 1813 return rp->status; 1814 } 1815 1816 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, 1817 struct sk_buff *skb) 1818 { 1819 struct hci_rp_le_read_accept_list_size *rp = data; 1820 1821 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 1822 1823 if (rp->status) 1824 return rp->status; 1825 1826 hdev->le_accept_list_size = rp->size; 1827 1828 return rp->status; 1829 } 1830 1831 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, 1832 struct sk_buff *skb) 1833 { 1834 struct hci_ev_status *rp = data; 1835 1836 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1837 1838 if (rp->status) 1839 return rp->status; 1840 1841 hci_dev_lock(hdev); 1842 hci_bdaddr_list_clear(&hdev->le_accept_list); 1843 hci_dev_unlock(hdev); 1844 1845 return rp->status; 1846 } 1847 1848 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, 1849 struct sk_buff *skb) 1850 { 1851 struct hci_cp_le_add_to_accept_list *sent; 1852 struct hci_ev_status *rp = data; 1853 1854 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1855 1856 if (rp->status) 1857 return rp->status; 1858 1859 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1860 if (!sent) 1861 return rp->status; 1862 1863 hci_dev_lock(hdev); 1864 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1865 sent->bdaddr_type); 1866 hci_dev_unlock(hdev); 1867 1868 return rp->status; 1869 } 1870 1871 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, 1872 struct sk_buff *skb) 1873 { 1874 struct hci_cp_le_del_from_accept_list *sent; 1875 struct hci_ev_status *rp = data; 1876 1877 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1878 1879 if (rp->status) 1880 return rp->status; 1881 1882 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1883 if (!sent) 1884 return rp->status; 1885 1886 hci_dev_lock(hdev); 1887 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1888 sent->bdaddr_type); 1889 hci_dev_unlock(hdev); 1890 1891 return rp->status; 1892 } 1893 1894 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, 1895 struct sk_buff *skb) 1896 { 1897 struct hci_rp_le_read_supported_states *rp = data; 1898 1899 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1900 1901 if (rp->status) 1902 return rp->status; 1903 1904 memcpy(hdev->le_states, rp->le_states, 8); 1905 1906 return rp->status; 1907 } 1908 1909 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, 1910 struct sk_buff *skb) 1911 { 1912 struct hci_rp_le_read_def_data_len *rp = data; 1913 1914 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1915 1916 if (rp->status) 1917 return rp->status; 1918 1919 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1920 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1921 1922 return rp->status; 1923 } 1924 1925 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, 1926 struct sk_buff *skb) 1927 { 1928 struct hci_cp_le_write_def_data_len *sent; 1929 struct hci_ev_status *rp = data; 1930 1931 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1932 1933 if (rp->status) 1934 return rp->status; 1935 1936 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1937 if (!sent) 1938 return rp->status; 1939 1940 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1941 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1942 1943 return rp->status; 1944 } 1945 1946 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, 1947 struct sk_buff *skb) 1948 { 1949 struct hci_cp_le_add_to_resolv_list *sent; 1950 struct hci_ev_status *rp = data; 1951 1952 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1953 1954 if (rp->status) 1955 return rp->status; 1956 1957 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1958 if (!sent) 1959 return rp->status; 1960 1961 hci_dev_lock(hdev); 1962 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1963 sent->bdaddr_type, sent->peer_irk, 1964 sent->local_irk); 1965 hci_dev_unlock(hdev); 1966 1967 return rp->status; 1968 } 1969 1970 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, 1971 struct sk_buff *skb) 1972 { 1973 struct hci_cp_le_del_from_resolv_list *sent; 1974 struct hci_ev_status *rp = data; 1975 1976 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1977 1978 if (rp->status) 1979 return rp->status; 1980 1981 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1982 if (!sent) 1983 return rp->status; 1984 1985 hci_dev_lock(hdev); 1986 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1987 sent->bdaddr_type); 1988 hci_dev_unlock(hdev); 1989 1990 return rp->status; 1991 } 1992 1993 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, 1994 struct sk_buff *skb) 1995 { 1996 struct hci_ev_status *rp = data; 1997 1998 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1999 2000 if (rp->status) 2001 return rp->status; 2002 2003 hci_dev_lock(hdev); 2004 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2005 hci_dev_unlock(hdev); 2006 2007 return rp->status; 2008 } 2009 2010 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, 2011 struct sk_buff *skb) 2012 { 2013 struct hci_rp_le_read_resolv_list_size *rp = data; 2014 2015 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 2016 2017 if (rp->status) 2018 return rp->status; 2019 2020 hdev->le_resolv_list_size = rp->size; 2021 2022 return rp->status; 2023 } 2024 2025 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, 2026 struct sk_buff *skb) 2027 { 2028 struct hci_ev_status *rp = data; 2029 __u8 *sent; 2030 2031 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2032 2033 if (rp->status) 2034 return rp->status; 2035 2036 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 2037 if (!sent) 2038 return rp->status; 2039 2040 hci_dev_lock(hdev); 2041 2042 if (*sent) 2043 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 2044 else 2045 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 2046 2047 hci_dev_unlock(hdev); 2048 2049 return rp->status; 2050 } 2051 2052 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, 2053 struct sk_buff *skb) 2054 { 2055 struct hci_rp_le_read_max_data_len *rp = data; 2056 2057 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2058 2059 if (rp->status) 2060 return rp->status; 2061 2062 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 2063 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 2064 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 2065 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 2066 2067 return rp->status; 2068 } 2069 2070 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, 2071 struct sk_buff *skb) 2072 { 2073 struct hci_cp_write_le_host_supported *sent; 2074 struct hci_ev_status *rp = data; 2075 2076 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2077 2078 if (rp->status) 2079 return rp->status; 2080 2081 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 2082 if (!sent) 2083 return rp->status; 2084 2085 hci_dev_lock(hdev); 2086 2087 if (sent->le) { 2088 hdev->features[1][0] |= LMP_HOST_LE; 2089 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 2090 } else { 2091 hdev->features[1][0] &= ~LMP_HOST_LE; 2092 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 2093 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 2094 } 2095 2096 if (sent->simul) 2097 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 2098 else 2099 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 2100 2101 hci_dev_unlock(hdev); 2102 2103 return rp->status; 2104 } 2105 2106 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, 2107 struct sk_buff *skb) 2108 { 2109 struct hci_cp_le_set_adv_param *cp; 2110 struct hci_ev_status *rp = data; 2111 2112 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2113 2114 if (rp->status) 2115 return rp->status; 2116 2117 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 2118 if (!cp) 2119 return rp->status; 2120 2121 hci_dev_lock(hdev); 2122 hdev->adv_addr_type = cp->own_address_type; 2123 hci_dev_unlock(hdev); 2124 2125 return rp->status; 2126 } 2127 2128 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, 2129 struct sk_buff *skb) 2130 { 2131 struct hci_rp_le_set_ext_adv_params *rp = data; 2132 struct hci_cp_le_set_ext_adv_params *cp; 2133 struct adv_info *adv_instance; 2134 2135 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2136 2137 if (rp->status) 2138 return rp->status; 2139 2140 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 2141 if (!cp) 2142 return rp->status; 2143 2144 hci_dev_lock(hdev); 2145 hdev->adv_addr_type = cp->own_addr_type; 2146 if (!cp->handle) { 2147 /* Store in hdev for instance 0 */ 2148 hdev->adv_tx_power = rp->tx_power; 2149 } else { 2150 adv_instance = hci_find_adv_instance(hdev, cp->handle); 2151 if (adv_instance) 2152 adv_instance->tx_power = rp->tx_power; 2153 } 2154 /* Update adv data as tx power is known now */ 2155 hci_req_update_adv_data(hdev, cp->handle); 2156 2157 hci_dev_unlock(hdev); 2158 2159 return rp->status; 2160 } 2161 2162 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, 2163 struct sk_buff *skb) 2164 { 2165 struct hci_rp_read_rssi *rp = data; 2166 struct hci_conn *conn; 2167 2168 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2169 2170 if (rp->status) 2171 return rp->status; 2172 2173 hci_dev_lock(hdev); 2174 2175 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2176 if (conn) 2177 conn->rssi = rp->rssi; 2178 2179 hci_dev_unlock(hdev); 2180 2181 return rp->status; 2182 } 2183 2184 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, 2185 struct sk_buff *skb) 2186 { 2187 struct hci_cp_read_tx_power *sent; 2188 struct hci_rp_read_tx_power *rp = data; 2189 struct hci_conn *conn; 2190 2191 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2192 2193 if (rp->status) 2194 return rp->status; 2195 2196 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 2197 if (!sent) 2198 return rp->status; 2199 2200 hci_dev_lock(hdev); 2201 2202 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2203 if (!conn) 2204 goto unlock; 2205 2206 switch (sent->type) { 2207 case 0x00: 2208 conn->tx_power = rp->tx_power; 2209 break; 2210 case 0x01: 2211 conn->max_tx_power = rp->tx_power; 2212 break; 2213 } 2214 2215 unlock: 2216 hci_dev_unlock(hdev); 2217 return rp->status; 2218 } 2219 2220 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, 2221 struct sk_buff *skb) 2222 { 2223 struct hci_ev_status *rp = data; 2224 u8 *mode; 2225 2226 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2227 2228 if (rp->status) 2229 return rp->status; 2230 2231 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 2232 if (mode) 2233 hdev->ssp_debug_mode = *mode; 2234 2235 return rp->status; 2236 } 2237 2238 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 2239 { 2240 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2241 2242 if (status) { 2243 hci_conn_check_pending(hdev); 2244 return; 2245 } 2246 2247 set_bit(HCI_INQUIRY, &hdev->flags); 2248 } 2249 2250 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 2251 { 2252 struct hci_cp_create_conn *cp; 2253 struct hci_conn *conn; 2254 2255 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2256 2257 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 2258 if (!cp) 2259 return; 2260 2261 hci_dev_lock(hdev); 2262 2263 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2264 2265 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); 2266 2267 if (status) { 2268 if (conn && conn->state == BT_CONNECT) { 2269 if (status != 0x0c || conn->attempt > 2) { 2270 conn->state = BT_CLOSED; 2271 hci_connect_cfm(conn, status); 2272 hci_conn_del(conn); 2273 } else 2274 conn->state = BT_CONNECT2; 2275 } 2276 } else { 2277 if (!conn) { 2278 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 2279 HCI_ROLE_MASTER); 2280 if (!conn) 2281 bt_dev_err(hdev, "no memory for new connection"); 2282 } 2283 } 2284 2285 hci_dev_unlock(hdev); 2286 } 2287 2288 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 2289 { 2290 struct hci_cp_add_sco *cp; 2291 struct hci_conn *acl, *sco; 2292 __u16 handle; 2293 2294 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2295 2296 if (!status) 2297 return; 2298 2299 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2300 if (!cp) 2301 return; 2302 2303 handle = __le16_to_cpu(cp->handle); 2304 2305 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2306 2307 hci_dev_lock(hdev); 2308 2309 acl = hci_conn_hash_lookup_handle(hdev, handle); 2310 if (acl) { 2311 sco = acl->link; 2312 if (sco) { 2313 sco->state = BT_CLOSED; 2314 2315 hci_connect_cfm(sco, status); 2316 hci_conn_del(sco); 2317 } 2318 } 2319 2320 hci_dev_unlock(hdev); 2321 } 2322 2323 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2324 { 2325 struct hci_cp_auth_requested *cp; 2326 struct hci_conn *conn; 2327 2328 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2329 2330 if (!status) 2331 return; 2332 2333 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2334 if (!cp) 2335 return; 2336 2337 hci_dev_lock(hdev); 2338 2339 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2340 if (conn) { 2341 if (conn->state == BT_CONFIG) { 2342 hci_connect_cfm(conn, status); 2343 hci_conn_drop(conn); 2344 } 2345 } 2346 2347 hci_dev_unlock(hdev); 2348 } 2349 2350 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2351 { 2352 struct hci_cp_set_conn_encrypt *cp; 2353 struct hci_conn *conn; 2354 2355 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2356 2357 if (!status) 2358 return; 2359 2360 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2361 if (!cp) 2362 return; 2363 2364 hci_dev_lock(hdev); 2365 2366 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2367 if (conn) { 2368 if (conn->state == BT_CONFIG) { 2369 hci_connect_cfm(conn, status); 2370 hci_conn_drop(conn); 2371 } 2372 } 2373 2374 hci_dev_unlock(hdev); 2375 } 2376 2377 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2378 struct hci_conn *conn) 2379 { 2380 if (conn->state != BT_CONFIG || !conn->out) 2381 return 0; 2382 2383 if (conn->pending_sec_level == BT_SECURITY_SDP) 2384 return 0; 2385 2386 /* Only request authentication for SSP connections or non-SSP 2387 * devices with sec_level MEDIUM or HIGH or if MITM protection 2388 * is requested. 2389 */ 2390 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2391 conn->pending_sec_level != BT_SECURITY_FIPS && 2392 conn->pending_sec_level != BT_SECURITY_HIGH && 2393 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2394 return 0; 2395 2396 return 1; 2397 } 2398 2399 static int hci_resolve_name(struct hci_dev *hdev, 2400 struct inquiry_entry *e) 2401 { 2402 struct hci_cp_remote_name_req cp; 2403 2404 memset(&cp, 0, sizeof(cp)); 2405 2406 bacpy(&cp.bdaddr, &e->data.bdaddr); 2407 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2408 cp.pscan_mode = e->data.pscan_mode; 2409 cp.clock_offset = e->data.clock_offset; 2410 2411 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2412 } 2413 2414 static bool hci_resolve_next_name(struct hci_dev *hdev) 2415 { 2416 struct discovery_state *discov = &hdev->discovery; 2417 struct inquiry_entry *e; 2418 2419 if (list_empty(&discov->resolve)) 2420 return false; 2421 2422 /* We should stop if we already spent too much time resolving names. */ 2423 if (time_after(jiffies, discov->name_resolve_timeout)) { 2424 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2425 return false; 2426 } 2427 2428 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2429 if (!e) 2430 return false; 2431 2432 if (hci_resolve_name(hdev, e) == 0) { 2433 e->name_state = NAME_PENDING; 2434 return true; 2435 } 2436 2437 return false; 2438 } 2439 2440 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2441 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2442 { 2443 struct discovery_state *discov = &hdev->discovery; 2444 struct inquiry_entry *e; 2445 2446 /* Update the mgmt connected state if necessary. Be careful with 2447 * conn objects that exist but are not (yet) connected however. 2448 * Only those in BT_CONFIG or BT_CONNECTED states can be 2449 * considered connected. 2450 */ 2451 if (conn && 2452 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2453 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2454 mgmt_device_connected(hdev, conn, name, name_len); 2455 2456 if (discov->state == DISCOVERY_STOPPED) 2457 return; 2458 2459 if (discov->state == DISCOVERY_STOPPING) 2460 goto discov_complete; 2461 2462 if (discov->state != DISCOVERY_RESOLVING) 2463 return; 2464 2465 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2466 /* If the device was not found in a list of found devices names of which 2467 * are pending. there is no need to continue resolving a next name as it 2468 * will be done upon receiving another Remote Name Request Complete 2469 * Event */ 2470 if (!e) 2471 return; 2472 2473 list_del(&e->list); 2474 2475 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2476 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2477 name, name_len); 2478 2479 if (hci_resolve_next_name(hdev)) 2480 return; 2481 2482 discov_complete: 2483 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2484 } 2485 2486 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2487 { 2488 struct hci_cp_remote_name_req *cp; 2489 struct hci_conn *conn; 2490 2491 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2492 2493 /* If successful wait for the name req complete event before 2494 * checking for the need to do authentication */ 2495 if (!status) 2496 return; 2497 2498 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2499 if (!cp) 2500 return; 2501 2502 hci_dev_lock(hdev); 2503 2504 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2505 2506 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2507 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2508 2509 if (!conn) 2510 goto unlock; 2511 2512 if (!hci_outgoing_auth_needed(hdev, conn)) 2513 goto unlock; 2514 2515 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2516 struct hci_cp_auth_requested auth_cp; 2517 2518 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2519 2520 auth_cp.handle = __cpu_to_le16(conn->handle); 2521 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2522 sizeof(auth_cp), &auth_cp); 2523 } 2524 2525 unlock: 2526 hci_dev_unlock(hdev); 2527 } 2528 2529 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2530 { 2531 struct hci_cp_read_remote_features *cp; 2532 struct hci_conn *conn; 2533 2534 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2535 2536 if (!status) 2537 return; 2538 2539 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2540 if (!cp) 2541 return; 2542 2543 hci_dev_lock(hdev); 2544 2545 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2546 if (conn) { 2547 if (conn->state == BT_CONFIG) { 2548 hci_connect_cfm(conn, status); 2549 hci_conn_drop(conn); 2550 } 2551 } 2552 2553 hci_dev_unlock(hdev); 2554 } 2555 2556 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2557 { 2558 struct hci_cp_read_remote_ext_features *cp; 2559 struct hci_conn *conn; 2560 2561 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2562 2563 if (!status) 2564 return; 2565 2566 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2567 if (!cp) 2568 return; 2569 2570 hci_dev_lock(hdev); 2571 2572 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2573 if (conn) { 2574 if (conn->state == BT_CONFIG) { 2575 hci_connect_cfm(conn, status); 2576 hci_conn_drop(conn); 2577 } 2578 } 2579 2580 hci_dev_unlock(hdev); 2581 } 2582 2583 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2584 { 2585 struct hci_cp_setup_sync_conn *cp; 2586 struct hci_conn *acl, *sco; 2587 __u16 handle; 2588 2589 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2590 2591 if (!status) 2592 return; 2593 2594 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2595 if (!cp) 2596 return; 2597 2598 handle = __le16_to_cpu(cp->handle); 2599 2600 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2601 2602 hci_dev_lock(hdev); 2603 2604 acl = hci_conn_hash_lookup_handle(hdev, handle); 2605 if (acl) { 2606 sco = acl->link; 2607 if (sco) { 2608 sco->state = BT_CLOSED; 2609 2610 hci_connect_cfm(sco, status); 2611 hci_conn_del(sco); 2612 } 2613 } 2614 2615 hci_dev_unlock(hdev); 2616 } 2617 2618 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2619 { 2620 struct hci_cp_enhanced_setup_sync_conn *cp; 2621 struct hci_conn *acl, *sco; 2622 __u16 handle; 2623 2624 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2625 2626 if (!status) 2627 return; 2628 2629 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2630 if (!cp) 2631 return; 2632 2633 handle = __le16_to_cpu(cp->handle); 2634 2635 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2636 2637 hci_dev_lock(hdev); 2638 2639 acl = hci_conn_hash_lookup_handle(hdev, handle); 2640 if (acl) { 2641 sco = acl->link; 2642 if (sco) { 2643 sco->state = BT_CLOSED; 2644 2645 hci_connect_cfm(sco, status); 2646 hci_conn_del(sco); 2647 } 2648 } 2649 2650 hci_dev_unlock(hdev); 2651 } 2652 2653 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2654 { 2655 struct hci_cp_sniff_mode *cp; 2656 struct hci_conn *conn; 2657 2658 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2659 2660 if (!status) 2661 return; 2662 2663 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2664 if (!cp) 2665 return; 2666 2667 hci_dev_lock(hdev); 2668 2669 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2670 if (conn) { 2671 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2672 2673 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2674 hci_sco_setup(conn, status); 2675 } 2676 2677 hci_dev_unlock(hdev); 2678 } 2679 2680 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2681 { 2682 struct hci_cp_exit_sniff_mode *cp; 2683 struct hci_conn *conn; 2684 2685 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2686 2687 if (!status) 2688 return; 2689 2690 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2691 if (!cp) 2692 return; 2693 2694 hci_dev_lock(hdev); 2695 2696 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2697 if (conn) { 2698 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2699 2700 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2701 hci_sco_setup(conn, status); 2702 } 2703 2704 hci_dev_unlock(hdev); 2705 } 2706 2707 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2708 { 2709 struct hci_cp_disconnect *cp; 2710 struct hci_conn_params *params; 2711 struct hci_conn *conn; 2712 bool mgmt_conn; 2713 2714 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2715 2716 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2717 * otherwise cleanup the connection immediately. 2718 */ 2719 if (!status && !hdev->suspended) 2720 return; 2721 2722 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2723 if (!cp) 2724 return; 2725 2726 hci_dev_lock(hdev); 2727 2728 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2729 if (!conn) 2730 goto unlock; 2731 2732 if (status) { 2733 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2734 conn->dst_type, status); 2735 2736 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2737 hdev->cur_adv_instance = conn->adv_instance; 2738 hci_enable_advertising(hdev); 2739 } 2740 2741 goto done; 2742 } 2743 2744 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2745 2746 if (conn->type == ACL_LINK) { 2747 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2748 hci_remove_link_key(hdev, &conn->dst); 2749 } 2750 2751 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2752 if (params) { 2753 switch (params->auto_connect) { 2754 case HCI_AUTO_CONN_LINK_LOSS: 2755 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2756 break; 2757 fallthrough; 2758 2759 case HCI_AUTO_CONN_DIRECT: 2760 case HCI_AUTO_CONN_ALWAYS: 2761 list_del_init(¶ms->action); 2762 list_add(¶ms->action, &hdev->pend_le_conns); 2763 break; 2764 2765 default: 2766 break; 2767 } 2768 } 2769 2770 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2771 cp->reason, mgmt_conn); 2772 2773 hci_disconn_cfm(conn, cp->reason); 2774 2775 done: 2776 /* If the disconnection failed for any reason, the upper layer 2777 * does not retry to disconnect in current implementation. 2778 * Hence, we need to do some basic cleanup here and re-enable 2779 * advertising if necessary. 2780 */ 2781 hci_conn_del(conn); 2782 unlock: 2783 hci_dev_unlock(hdev); 2784 } 2785 2786 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2787 { 2788 /* When using controller based address resolution, then the new 2789 * address types 0x02 and 0x03 are used. These types need to be 2790 * converted back into either public address or random address type 2791 */ 2792 switch (type) { 2793 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2794 if (resolved) 2795 *resolved = true; 2796 return ADDR_LE_DEV_PUBLIC; 2797 case ADDR_LE_DEV_RANDOM_RESOLVED: 2798 if (resolved) 2799 *resolved = true; 2800 return ADDR_LE_DEV_RANDOM; 2801 } 2802 2803 if (resolved) 2804 *resolved = false; 2805 return type; 2806 } 2807 2808 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2809 u8 peer_addr_type, u8 own_address_type, 2810 u8 filter_policy) 2811 { 2812 struct hci_conn *conn; 2813 2814 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2815 peer_addr_type); 2816 if (!conn) 2817 return; 2818 2819 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2820 2821 /* Store the initiator and responder address information which 2822 * is needed for SMP. These values will not change during the 2823 * lifetime of the connection. 2824 */ 2825 conn->init_addr_type = own_address_type; 2826 if (own_address_type == ADDR_LE_DEV_RANDOM) 2827 bacpy(&conn->init_addr, &hdev->random_addr); 2828 else 2829 bacpy(&conn->init_addr, &hdev->bdaddr); 2830 2831 conn->resp_addr_type = peer_addr_type; 2832 bacpy(&conn->resp_addr, peer_addr); 2833 2834 /* We don't want the connection attempt to stick around 2835 * indefinitely since LE doesn't have a page timeout concept 2836 * like BR/EDR. Set a timer for any connection that doesn't use 2837 * the accept list for connecting. 2838 */ 2839 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2840 queue_delayed_work(conn->hdev->workqueue, 2841 &conn->le_conn_timeout, 2842 conn->conn_timeout); 2843 } 2844 2845 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2846 { 2847 struct hci_cp_le_create_conn *cp; 2848 2849 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2850 2851 /* All connection failure handling is taken care of by the 2852 * hci_conn_failed function which is triggered by the HCI 2853 * request completion callbacks used for connecting. 2854 */ 2855 if (status) 2856 return; 2857 2858 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2859 if (!cp) 2860 return; 2861 2862 hci_dev_lock(hdev); 2863 2864 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2865 cp->own_address_type, cp->filter_policy); 2866 2867 hci_dev_unlock(hdev); 2868 } 2869 2870 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2871 { 2872 struct hci_cp_le_ext_create_conn *cp; 2873 2874 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2875 2876 /* All connection failure handling is taken care of by the 2877 * hci_conn_failed function which is triggered by the HCI 2878 * request completion callbacks used for connecting. 2879 */ 2880 if (status) 2881 return; 2882 2883 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2884 if (!cp) 2885 return; 2886 2887 hci_dev_lock(hdev); 2888 2889 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2890 cp->own_addr_type, cp->filter_policy); 2891 2892 hci_dev_unlock(hdev); 2893 } 2894 2895 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2896 { 2897 struct hci_cp_le_read_remote_features *cp; 2898 struct hci_conn *conn; 2899 2900 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2901 2902 if (!status) 2903 return; 2904 2905 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2906 if (!cp) 2907 return; 2908 2909 hci_dev_lock(hdev); 2910 2911 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2912 if (conn) { 2913 if (conn->state == BT_CONFIG) { 2914 hci_connect_cfm(conn, status); 2915 hci_conn_drop(conn); 2916 } 2917 } 2918 2919 hci_dev_unlock(hdev); 2920 } 2921 2922 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2923 { 2924 struct hci_cp_le_start_enc *cp; 2925 struct hci_conn *conn; 2926 2927 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2928 2929 if (!status) 2930 return; 2931 2932 hci_dev_lock(hdev); 2933 2934 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2935 if (!cp) 2936 goto unlock; 2937 2938 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2939 if (!conn) 2940 goto unlock; 2941 2942 if (conn->state != BT_CONNECTED) 2943 goto unlock; 2944 2945 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2946 hci_conn_drop(conn); 2947 2948 unlock: 2949 hci_dev_unlock(hdev); 2950 } 2951 2952 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2953 { 2954 struct hci_cp_switch_role *cp; 2955 struct hci_conn *conn; 2956 2957 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2958 2959 if (!status) 2960 return; 2961 2962 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2963 if (!cp) 2964 return; 2965 2966 hci_dev_lock(hdev); 2967 2968 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2969 if (conn) 2970 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2971 2972 hci_dev_unlock(hdev); 2973 } 2974 2975 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, 2976 struct sk_buff *skb) 2977 { 2978 struct hci_ev_status *ev = data; 2979 struct discovery_state *discov = &hdev->discovery; 2980 struct inquiry_entry *e; 2981 2982 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 2983 2984 hci_conn_check_pending(hdev); 2985 2986 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2987 return; 2988 2989 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2990 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2991 2992 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2993 return; 2994 2995 hci_dev_lock(hdev); 2996 2997 if (discov->state != DISCOVERY_FINDING) 2998 goto unlock; 2999 3000 if (list_empty(&discov->resolve)) { 3001 /* When BR/EDR inquiry is active and no LE scanning is in 3002 * progress, then change discovery state to indicate completion. 3003 * 3004 * When running LE scanning and BR/EDR inquiry simultaneously 3005 * and the LE scan already finished, then change the discovery 3006 * state to indicate completion. 3007 */ 3008 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3009 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3010 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3011 goto unlock; 3012 } 3013 3014 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 3015 if (e && hci_resolve_name(hdev, e) == 0) { 3016 e->name_state = NAME_PENDING; 3017 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 3018 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 3019 } else { 3020 /* When BR/EDR inquiry is active and no LE scanning is in 3021 * progress, then change discovery state to indicate completion. 3022 * 3023 * When running LE scanning and BR/EDR inquiry simultaneously 3024 * and the LE scan already finished, then change the discovery 3025 * state to indicate completion. 3026 */ 3027 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3028 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3029 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3030 } 3031 3032 unlock: 3033 hci_dev_unlock(hdev); 3034 } 3035 3036 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, 3037 struct sk_buff *skb) 3038 { 3039 struct hci_ev_inquiry_result *ev = edata; 3040 struct inquiry_data data; 3041 int i; 3042 3043 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, 3044 flex_array_size(ev, info, ev->num))) 3045 return; 3046 3047 bt_dev_dbg(hdev, "num %d", ev->num); 3048 3049 if (!ev->num) 3050 return; 3051 3052 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3053 return; 3054 3055 hci_dev_lock(hdev); 3056 3057 for (i = 0; i < ev->num; i++) { 3058 struct inquiry_info *info = &ev->info[i]; 3059 u32 flags; 3060 3061 bacpy(&data.bdaddr, &info->bdaddr); 3062 data.pscan_rep_mode = info->pscan_rep_mode; 3063 data.pscan_period_mode = info->pscan_period_mode; 3064 data.pscan_mode = info->pscan_mode; 3065 memcpy(data.dev_class, info->dev_class, 3); 3066 data.clock_offset = info->clock_offset; 3067 data.rssi = HCI_RSSI_INVALID; 3068 data.ssp_mode = 0x00; 3069 3070 flags = hci_inquiry_cache_update(hdev, &data, false); 3071 3072 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3073 info->dev_class, HCI_RSSI_INVALID, 3074 flags, NULL, 0, NULL, 0); 3075 } 3076 3077 hci_dev_unlock(hdev); 3078 } 3079 3080 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, 3081 struct sk_buff *skb) 3082 { 3083 struct hci_ev_conn_complete *ev = data; 3084 struct hci_conn *conn; 3085 u8 status = ev->status; 3086 3087 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3088 3089 hci_dev_lock(hdev); 3090 3091 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3092 if (!conn) { 3093 /* In case of error status and there is no connection pending 3094 * just unlock as there is nothing to cleanup. 3095 */ 3096 if (ev->status) 3097 goto unlock; 3098 3099 /* Connection may not exist if auto-connected. Check the bredr 3100 * allowlist to see if this device is allowed to auto connect. 3101 * If link is an ACL type, create a connection class 3102 * automatically. 3103 * 3104 * Auto-connect will only occur if the event filter is 3105 * programmed with a given address. Right now, event filter is 3106 * only used during suspend. 3107 */ 3108 if (ev->link_type == ACL_LINK && 3109 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 3110 &ev->bdaddr, 3111 BDADDR_BREDR)) { 3112 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3113 HCI_ROLE_SLAVE); 3114 if (!conn) { 3115 bt_dev_err(hdev, "no memory for new conn"); 3116 goto unlock; 3117 } 3118 } else { 3119 if (ev->link_type != SCO_LINK) 3120 goto unlock; 3121 3122 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 3123 &ev->bdaddr); 3124 if (!conn) 3125 goto unlock; 3126 3127 conn->type = SCO_LINK; 3128 } 3129 } 3130 3131 /* The HCI_Connection_Complete event is only sent once per connection. 3132 * Processing it more than once per connection can corrupt kernel memory. 3133 * 3134 * As the connection handle is set here for the first time, it indicates 3135 * whether the connection is already set up. 3136 */ 3137 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 3138 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 3139 goto unlock; 3140 } 3141 3142 if (!status) { 3143 conn->handle = __le16_to_cpu(ev->handle); 3144 if (conn->handle > HCI_CONN_HANDLE_MAX) { 3145 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 3146 conn->handle, HCI_CONN_HANDLE_MAX); 3147 status = HCI_ERROR_INVALID_PARAMETERS; 3148 goto done; 3149 } 3150 3151 if (conn->type == ACL_LINK) { 3152 conn->state = BT_CONFIG; 3153 hci_conn_hold(conn); 3154 3155 if (!conn->out && !hci_conn_ssp_enabled(conn) && 3156 !hci_find_link_key(hdev, &ev->bdaddr)) 3157 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3158 else 3159 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3160 } else 3161 conn->state = BT_CONNECTED; 3162 3163 hci_debugfs_create_conn(conn); 3164 hci_conn_add_sysfs(conn); 3165 3166 if (test_bit(HCI_AUTH, &hdev->flags)) 3167 set_bit(HCI_CONN_AUTH, &conn->flags); 3168 3169 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 3170 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3171 3172 /* Get remote features */ 3173 if (conn->type == ACL_LINK) { 3174 struct hci_cp_read_remote_features cp; 3175 cp.handle = ev->handle; 3176 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 3177 sizeof(cp), &cp); 3178 3179 hci_update_scan(hdev); 3180 } 3181 3182 /* Set packet type for incoming connection */ 3183 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 3184 struct hci_cp_change_conn_ptype cp; 3185 cp.handle = ev->handle; 3186 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3187 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 3188 &cp); 3189 } 3190 } 3191 3192 if (conn->type == ACL_LINK) 3193 hci_sco_setup(conn, ev->status); 3194 3195 done: 3196 if (status) { 3197 hci_conn_failed(conn, status); 3198 } else if (ev->link_type == SCO_LINK) { 3199 switch (conn->setting & SCO_AIRMODE_MASK) { 3200 case SCO_AIRMODE_CVSD: 3201 if (hdev->notify) 3202 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 3203 break; 3204 } 3205 3206 hci_connect_cfm(conn, status); 3207 } 3208 3209 unlock: 3210 hci_dev_unlock(hdev); 3211 3212 hci_conn_check_pending(hdev); 3213 } 3214 3215 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 3216 { 3217 struct hci_cp_reject_conn_req cp; 3218 3219 bacpy(&cp.bdaddr, bdaddr); 3220 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 3221 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 3222 } 3223 3224 static void hci_conn_request_evt(struct hci_dev *hdev, void *data, 3225 struct sk_buff *skb) 3226 { 3227 struct hci_ev_conn_request *ev = data; 3228 int mask = hdev->link_mode; 3229 struct inquiry_entry *ie; 3230 struct hci_conn *conn; 3231 __u8 flags = 0; 3232 3233 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); 3234 3235 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 3236 &flags); 3237 3238 if (!(mask & HCI_LM_ACCEPT)) { 3239 hci_reject_conn(hdev, &ev->bdaddr); 3240 return; 3241 } 3242 3243 hci_dev_lock(hdev); 3244 3245 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 3246 BDADDR_BREDR)) { 3247 hci_reject_conn(hdev, &ev->bdaddr); 3248 goto unlock; 3249 } 3250 3251 /* Require HCI_CONNECTABLE or an accept list entry to accept the 3252 * connection. These features are only touched through mgmt so 3253 * only do the checks if HCI_MGMT is set. 3254 */ 3255 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3256 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 3257 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 3258 BDADDR_BREDR)) { 3259 hci_reject_conn(hdev, &ev->bdaddr); 3260 goto unlock; 3261 } 3262 3263 /* Connection accepted */ 3264 3265 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3266 if (ie) 3267 memcpy(ie->data.dev_class, ev->dev_class, 3); 3268 3269 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 3270 &ev->bdaddr); 3271 if (!conn) { 3272 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3273 HCI_ROLE_SLAVE); 3274 if (!conn) { 3275 bt_dev_err(hdev, "no memory for new connection"); 3276 goto unlock; 3277 } 3278 } 3279 3280 memcpy(conn->dev_class, ev->dev_class, 3); 3281 3282 hci_dev_unlock(hdev); 3283 3284 if (ev->link_type == ACL_LINK || 3285 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 3286 struct hci_cp_accept_conn_req cp; 3287 conn->state = BT_CONNECT; 3288 3289 bacpy(&cp.bdaddr, &ev->bdaddr); 3290 3291 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 3292 cp.role = 0x00; /* Become central */ 3293 else 3294 cp.role = 0x01; /* Remain peripheral */ 3295 3296 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 3297 } else if (!(flags & HCI_PROTO_DEFER)) { 3298 struct hci_cp_accept_sync_conn_req cp; 3299 conn->state = BT_CONNECT; 3300 3301 bacpy(&cp.bdaddr, &ev->bdaddr); 3302 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3303 3304 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 3305 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 3306 cp.max_latency = cpu_to_le16(0xffff); 3307 cp.content_format = cpu_to_le16(hdev->voice_setting); 3308 cp.retrans_effort = 0xff; 3309 3310 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 3311 &cp); 3312 } else { 3313 conn->state = BT_CONNECT2; 3314 hci_connect_cfm(conn, 0); 3315 } 3316 3317 return; 3318 unlock: 3319 hci_dev_unlock(hdev); 3320 } 3321 3322 static u8 hci_to_mgmt_reason(u8 err) 3323 { 3324 switch (err) { 3325 case HCI_ERROR_CONNECTION_TIMEOUT: 3326 return MGMT_DEV_DISCONN_TIMEOUT; 3327 case HCI_ERROR_REMOTE_USER_TERM: 3328 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3329 case HCI_ERROR_REMOTE_POWER_OFF: 3330 return MGMT_DEV_DISCONN_REMOTE; 3331 case HCI_ERROR_LOCAL_HOST_TERM: 3332 return MGMT_DEV_DISCONN_LOCAL_HOST; 3333 default: 3334 return MGMT_DEV_DISCONN_UNKNOWN; 3335 } 3336 } 3337 3338 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, 3339 struct sk_buff *skb) 3340 { 3341 struct hci_ev_disconn_complete *ev = data; 3342 u8 reason; 3343 struct hci_conn_params *params; 3344 struct hci_conn *conn; 3345 bool mgmt_connected; 3346 3347 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3348 3349 hci_dev_lock(hdev); 3350 3351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3352 if (!conn) 3353 goto unlock; 3354 3355 if (ev->status) { 3356 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3357 conn->dst_type, ev->status); 3358 goto unlock; 3359 } 3360 3361 conn->state = BT_CLOSED; 3362 3363 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3364 3365 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3366 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3367 else 3368 reason = hci_to_mgmt_reason(ev->reason); 3369 3370 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3371 reason, mgmt_connected); 3372 3373 if (conn->type == ACL_LINK) { 3374 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3375 hci_remove_link_key(hdev, &conn->dst); 3376 3377 hci_update_scan(hdev); 3378 } 3379 3380 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3381 if (params) { 3382 switch (params->auto_connect) { 3383 case HCI_AUTO_CONN_LINK_LOSS: 3384 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3385 break; 3386 fallthrough; 3387 3388 case HCI_AUTO_CONN_DIRECT: 3389 case HCI_AUTO_CONN_ALWAYS: 3390 list_del_init(¶ms->action); 3391 list_add(¶ms->action, &hdev->pend_le_conns); 3392 hci_update_passive_scan(hdev); 3393 break; 3394 3395 default: 3396 break; 3397 } 3398 } 3399 3400 hci_disconn_cfm(conn, ev->reason); 3401 3402 /* Re-enable advertising if necessary, since it might 3403 * have been disabled by the connection. From the 3404 * HCI_LE_Set_Advertise_Enable command description in 3405 * the core specification (v4.0): 3406 * "The Controller shall continue advertising until the Host 3407 * issues an LE_Set_Advertise_Enable command with 3408 * Advertising_Enable set to 0x00 (Advertising is disabled) 3409 * or until a connection is created or until the Advertising 3410 * is timed out due to Directed Advertising." 3411 */ 3412 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3413 hdev->cur_adv_instance = conn->adv_instance; 3414 hci_enable_advertising(hdev); 3415 } 3416 3417 hci_conn_del(conn); 3418 3419 unlock: 3420 hci_dev_unlock(hdev); 3421 } 3422 3423 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, 3424 struct sk_buff *skb) 3425 { 3426 struct hci_ev_auth_complete *ev = data; 3427 struct hci_conn *conn; 3428 3429 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3430 3431 hci_dev_lock(hdev); 3432 3433 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3434 if (!conn) 3435 goto unlock; 3436 3437 if (!ev->status) { 3438 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3439 3440 if (!hci_conn_ssp_enabled(conn) && 3441 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 3442 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 3443 } else { 3444 set_bit(HCI_CONN_AUTH, &conn->flags); 3445 conn->sec_level = conn->pending_sec_level; 3446 } 3447 } else { 3448 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3449 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3450 3451 mgmt_auth_failed(conn, ev->status); 3452 } 3453 3454 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3455 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 3456 3457 if (conn->state == BT_CONFIG) { 3458 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3459 struct hci_cp_set_conn_encrypt cp; 3460 cp.handle = ev->handle; 3461 cp.encrypt = 0x01; 3462 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3463 &cp); 3464 } else { 3465 conn->state = BT_CONNECTED; 3466 hci_connect_cfm(conn, ev->status); 3467 hci_conn_drop(conn); 3468 } 3469 } else { 3470 hci_auth_cfm(conn, ev->status); 3471 3472 hci_conn_hold(conn); 3473 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3474 hci_conn_drop(conn); 3475 } 3476 3477 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3478 if (!ev->status) { 3479 struct hci_cp_set_conn_encrypt cp; 3480 cp.handle = ev->handle; 3481 cp.encrypt = 0x01; 3482 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3483 &cp); 3484 } else { 3485 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3486 hci_encrypt_cfm(conn, ev->status); 3487 } 3488 } 3489 3490 unlock: 3491 hci_dev_unlock(hdev); 3492 } 3493 3494 static void hci_remote_name_evt(struct hci_dev *hdev, void *data, 3495 struct sk_buff *skb) 3496 { 3497 struct hci_ev_remote_name *ev = data; 3498 struct hci_conn *conn; 3499 3500 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3501 3502 hci_conn_check_pending(hdev); 3503 3504 hci_dev_lock(hdev); 3505 3506 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3507 3508 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3509 goto check_auth; 3510 3511 if (ev->status == 0) 3512 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3513 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3514 else 3515 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3516 3517 check_auth: 3518 if (!conn) 3519 goto unlock; 3520 3521 if (!hci_outgoing_auth_needed(hdev, conn)) 3522 goto unlock; 3523 3524 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3525 struct hci_cp_auth_requested cp; 3526 3527 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3528 3529 cp.handle = __cpu_to_le16(conn->handle); 3530 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3531 } 3532 3533 unlock: 3534 hci_dev_unlock(hdev); 3535 } 3536 3537 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 3538 u16 opcode, struct sk_buff *skb) 3539 { 3540 const struct hci_rp_read_enc_key_size *rp; 3541 struct hci_conn *conn; 3542 u16 handle; 3543 3544 BT_DBG("%s status 0x%02x", hdev->name, status); 3545 3546 if (!skb || skb->len < sizeof(*rp)) { 3547 bt_dev_err(hdev, "invalid read key size response"); 3548 return; 3549 } 3550 3551 rp = (void *)skb->data; 3552 handle = le16_to_cpu(rp->handle); 3553 3554 hci_dev_lock(hdev); 3555 3556 conn = hci_conn_hash_lookup_handle(hdev, handle); 3557 if (!conn) 3558 goto unlock; 3559 3560 /* While unexpected, the read_enc_key_size command may fail. The most 3561 * secure approach is to then assume the key size is 0 to force a 3562 * disconnection. 3563 */ 3564 if (rp->status) { 3565 bt_dev_err(hdev, "failed to read key size for handle %u", 3566 handle); 3567 conn->enc_key_size = 0; 3568 } else { 3569 conn->enc_key_size = rp->key_size; 3570 } 3571 3572 hci_encrypt_cfm(conn, 0); 3573 3574 unlock: 3575 hci_dev_unlock(hdev); 3576 } 3577 3578 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, 3579 struct sk_buff *skb) 3580 { 3581 struct hci_ev_encrypt_change *ev = data; 3582 struct hci_conn *conn; 3583 3584 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3585 3586 hci_dev_lock(hdev); 3587 3588 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3589 if (!conn) 3590 goto unlock; 3591 3592 if (!ev->status) { 3593 if (ev->encrypt) { 3594 /* Encryption implies authentication */ 3595 set_bit(HCI_CONN_AUTH, &conn->flags); 3596 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3597 conn->sec_level = conn->pending_sec_level; 3598 3599 /* P-256 authentication key implies FIPS */ 3600 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3601 set_bit(HCI_CONN_FIPS, &conn->flags); 3602 3603 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3604 conn->type == LE_LINK) 3605 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3606 } else { 3607 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3608 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3609 } 3610 } 3611 3612 /* We should disregard the current RPA and generate a new one 3613 * whenever the encryption procedure fails. 3614 */ 3615 if (ev->status && conn->type == LE_LINK) { 3616 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3617 hci_adv_instances_set_rpa_expired(hdev, true); 3618 } 3619 3620 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3621 3622 /* Check link security requirements are met */ 3623 if (!hci_conn_check_link_mode(conn)) 3624 ev->status = HCI_ERROR_AUTH_FAILURE; 3625 3626 if (ev->status && conn->state == BT_CONNECTED) { 3627 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3628 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3629 3630 /* Notify upper layers so they can cleanup before 3631 * disconnecting. 3632 */ 3633 hci_encrypt_cfm(conn, ev->status); 3634 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3635 hci_conn_drop(conn); 3636 goto unlock; 3637 } 3638 3639 /* Try reading the encryption key size for encrypted ACL links */ 3640 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3641 struct hci_cp_read_enc_key_size cp; 3642 struct hci_request req; 3643 3644 /* Only send HCI_Read_Encryption_Key_Size if the 3645 * controller really supports it. If it doesn't, assume 3646 * the default size (16). 3647 */ 3648 if (!(hdev->commands[20] & 0x10)) { 3649 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3650 goto notify; 3651 } 3652 3653 hci_req_init(&req, hdev); 3654 3655 cp.handle = cpu_to_le16(conn->handle); 3656 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3657 3658 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 3659 bt_dev_err(hdev, "sending read key size failed"); 3660 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3661 goto notify; 3662 } 3663 3664 goto unlock; 3665 } 3666 3667 /* Set the default Authenticated Payload Timeout after 3668 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3669 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3670 * sent when the link is active and Encryption is enabled, the conn 3671 * type can be either LE or ACL and controller must support LMP Ping. 3672 * Ensure for AES-CCM encryption as well. 3673 */ 3674 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3675 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3676 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3677 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3678 struct hci_cp_write_auth_payload_to cp; 3679 3680 cp.handle = cpu_to_le16(conn->handle); 3681 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3682 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3683 sizeof(cp), &cp); 3684 } 3685 3686 notify: 3687 hci_encrypt_cfm(conn, ev->status); 3688 3689 unlock: 3690 hci_dev_unlock(hdev); 3691 } 3692 3693 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, 3694 struct sk_buff *skb) 3695 { 3696 struct hci_ev_change_link_key_complete *ev = data; 3697 struct hci_conn *conn; 3698 3699 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3700 3701 hci_dev_lock(hdev); 3702 3703 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3704 if (conn) { 3705 if (!ev->status) 3706 set_bit(HCI_CONN_SECURE, &conn->flags); 3707 3708 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3709 3710 hci_key_change_cfm(conn, ev->status); 3711 } 3712 3713 hci_dev_unlock(hdev); 3714 } 3715 3716 static void hci_remote_features_evt(struct hci_dev *hdev, void *data, 3717 struct sk_buff *skb) 3718 { 3719 struct hci_ev_remote_features *ev = data; 3720 struct hci_conn *conn; 3721 3722 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3723 3724 hci_dev_lock(hdev); 3725 3726 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3727 if (!conn) 3728 goto unlock; 3729 3730 if (!ev->status) 3731 memcpy(conn->features[0], ev->features, 8); 3732 3733 if (conn->state != BT_CONFIG) 3734 goto unlock; 3735 3736 if (!ev->status && lmp_ext_feat_capable(hdev) && 3737 lmp_ext_feat_capable(conn)) { 3738 struct hci_cp_read_remote_ext_features cp; 3739 cp.handle = ev->handle; 3740 cp.page = 0x01; 3741 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3742 sizeof(cp), &cp); 3743 goto unlock; 3744 } 3745 3746 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3747 struct hci_cp_remote_name_req cp; 3748 memset(&cp, 0, sizeof(cp)); 3749 bacpy(&cp.bdaddr, &conn->dst); 3750 cp.pscan_rep_mode = 0x02; 3751 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3752 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3753 mgmt_device_connected(hdev, conn, NULL, 0); 3754 3755 if (!hci_outgoing_auth_needed(hdev, conn)) { 3756 conn->state = BT_CONNECTED; 3757 hci_connect_cfm(conn, ev->status); 3758 hci_conn_drop(conn); 3759 } 3760 3761 unlock: 3762 hci_dev_unlock(hdev); 3763 } 3764 3765 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3766 { 3767 cancel_delayed_work(&hdev->cmd_timer); 3768 3769 if (!test_bit(HCI_RESET, &hdev->flags)) { 3770 if (ncmd) { 3771 cancel_delayed_work(&hdev->ncmd_timer); 3772 atomic_set(&hdev->cmd_cnt, 1); 3773 } else { 3774 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 3775 schedule_delayed_work(&hdev->ncmd_timer, 3776 HCI_NCMD_TIMEOUT); 3777 } 3778 } 3779 } 3780 3781 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, 3782 struct sk_buff *skb) 3783 { 3784 struct hci_rp_le_read_buffer_size_v2 *rp = data; 3785 3786 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3787 3788 if (rp->status) 3789 return rp->status; 3790 3791 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); 3792 hdev->le_pkts = rp->acl_max_pkt; 3793 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); 3794 hdev->iso_pkts = rp->iso_max_pkt; 3795 3796 hdev->le_cnt = hdev->le_pkts; 3797 hdev->iso_cnt = hdev->iso_pkts; 3798 3799 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, 3800 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); 3801 3802 return rp->status; 3803 } 3804 3805 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, 3806 struct sk_buff *skb) 3807 { 3808 struct hci_rp_le_set_cig_params *rp = data; 3809 struct hci_conn *conn; 3810 int i = 0; 3811 3812 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3813 3814 hci_dev_lock(hdev); 3815 3816 if (rp->status) { 3817 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) { 3818 conn->state = BT_CLOSED; 3819 hci_connect_cfm(conn, rp->status); 3820 hci_conn_del(conn); 3821 } 3822 goto unlock; 3823 } 3824 3825 rcu_read_lock(); 3826 3827 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 3828 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id || 3829 conn->state == BT_CONNECTED) 3830 continue; 3831 3832 conn->handle = __le16_to_cpu(rp->handle[i++]); 3833 3834 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn, 3835 conn->handle, conn->link); 3836 3837 /* Create CIS if LE is already connected */ 3838 if (conn->link && conn->link->state == BT_CONNECTED) 3839 hci_le_create_cis(conn->link); 3840 3841 if (i == rp->num_handles) 3842 break; 3843 } 3844 3845 rcu_read_unlock(); 3846 3847 unlock: 3848 hci_dev_unlock(hdev); 3849 3850 return rp->status; 3851 } 3852 3853 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, 3854 struct sk_buff *skb) 3855 { 3856 struct hci_rp_le_setup_iso_path *rp = data; 3857 struct hci_cp_le_setup_iso_path *cp; 3858 struct hci_conn *conn; 3859 3860 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3861 3862 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); 3863 if (!cp) 3864 return rp->status; 3865 3866 hci_dev_lock(hdev); 3867 3868 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 3869 if (!conn) 3870 goto unlock; 3871 3872 if (rp->status) { 3873 hci_connect_cfm(conn, rp->status); 3874 hci_conn_del(conn); 3875 goto unlock; 3876 } 3877 3878 switch (cp->direction) { 3879 /* Input (Host to Controller) */ 3880 case 0x00: 3881 /* Only confirm connection if output only */ 3882 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu) 3883 hci_connect_cfm(conn, rp->status); 3884 break; 3885 /* Output (Controller to Host) */ 3886 case 0x01: 3887 /* Confirm connection since conn->iso_qos is always configured 3888 * last. 3889 */ 3890 hci_connect_cfm(conn, rp->status); 3891 break; 3892 } 3893 3894 unlock: 3895 hci_dev_unlock(hdev); 3896 return rp->status; 3897 } 3898 3899 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) 3900 { 3901 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3902 } 3903 3904 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, 3905 struct sk_buff *skb) 3906 { 3907 struct hci_ev_status *rp = data; 3908 struct hci_cp_le_set_per_adv_params *cp; 3909 3910 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3911 3912 if (rp->status) 3913 return rp->status; 3914 3915 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); 3916 if (!cp) 3917 return rp->status; 3918 3919 /* TODO: set the conn state */ 3920 return rp->status; 3921 } 3922 3923 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, 3924 struct sk_buff *skb) 3925 { 3926 struct hci_ev_status *rp = data; 3927 __u8 *sent; 3928 3929 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3930 3931 if (rp->status) 3932 return rp->status; 3933 3934 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); 3935 if (!sent) 3936 return rp->status; 3937 3938 hci_dev_lock(hdev); 3939 3940 if (*sent) 3941 hci_dev_set_flag(hdev, HCI_LE_PER_ADV); 3942 else 3943 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); 3944 3945 hci_dev_unlock(hdev); 3946 3947 return rp->status; 3948 } 3949 3950 #define HCI_CC_VL(_op, _func, _min, _max) \ 3951 { \ 3952 .op = _op, \ 3953 .func = _func, \ 3954 .min_len = _min, \ 3955 .max_len = _max, \ 3956 } 3957 3958 #define HCI_CC(_op, _func, _len) \ 3959 HCI_CC_VL(_op, _func, _len, _len) 3960 3961 #define HCI_CC_STATUS(_op, _func) \ 3962 HCI_CC(_op, _func, sizeof(struct hci_ev_status)) 3963 3964 static const struct hci_cc { 3965 u16 op; 3966 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 3967 u16 min_len; 3968 u16 max_len; 3969 } hci_cc_table[] = { 3970 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), 3971 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), 3972 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), 3973 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, 3974 hci_cc_remote_name_req_cancel), 3975 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, 3976 sizeof(struct hci_rp_role_discovery)), 3977 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, 3978 sizeof(struct hci_rp_read_link_policy)), 3979 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, 3980 sizeof(struct hci_rp_write_link_policy)), 3981 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, 3982 sizeof(struct hci_rp_read_def_link_policy)), 3983 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, 3984 hci_cc_write_def_link_policy), 3985 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), 3986 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, 3987 sizeof(struct hci_rp_read_stored_link_key)), 3988 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, 3989 sizeof(struct hci_rp_delete_stored_link_key)), 3990 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), 3991 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, 3992 sizeof(struct hci_rp_read_local_name)), 3993 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), 3994 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), 3995 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), 3996 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), 3997 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, 3998 sizeof(struct hci_rp_read_class_of_dev)), 3999 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), 4000 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, 4001 sizeof(struct hci_rp_read_voice_setting)), 4002 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), 4003 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, 4004 sizeof(struct hci_rp_read_num_supported_iac)), 4005 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), 4006 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), 4007 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, 4008 sizeof(struct hci_rp_read_auth_payload_to)), 4009 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, 4010 sizeof(struct hci_rp_write_auth_payload_to)), 4011 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, 4012 sizeof(struct hci_rp_read_local_version)), 4013 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, 4014 sizeof(struct hci_rp_read_local_commands)), 4015 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, 4016 sizeof(struct hci_rp_read_local_features)), 4017 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, 4018 sizeof(struct hci_rp_read_local_ext_features)), 4019 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, 4020 sizeof(struct hci_rp_read_buffer_size)), 4021 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, 4022 sizeof(struct hci_rp_read_bd_addr)), 4023 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, 4024 sizeof(struct hci_rp_read_local_pairing_opts)), 4025 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, 4026 sizeof(struct hci_rp_read_page_scan_activity)), 4027 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 4028 hci_cc_write_page_scan_activity), 4029 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, 4030 sizeof(struct hci_rp_read_page_scan_type)), 4031 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), 4032 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, 4033 sizeof(struct hci_rp_read_data_block_size)), 4034 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, 4035 sizeof(struct hci_rp_read_flow_control_mode)), 4036 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, 4037 sizeof(struct hci_rp_read_local_amp_info)), 4038 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, 4039 sizeof(struct hci_rp_read_clock)), 4040 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, 4041 sizeof(struct hci_rp_read_inq_rsp_tx_power)), 4042 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, 4043 hci_cc_read_def_err_data_reporting, 4044 sizeof(struct hci_rp_read_def_err_data_reporting)), 4045 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 4046 hci_cc_write_def_err_data_reporting), 4047 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, 4048 sizeof(struct hci_rp_pin_code_reply)), 4049 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, 4050 sizeof(struct hci_rp_pin_code_neg_reply)), 4051 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, 4052 sizeof(struct hci_rp_read_local_oob_data)), 4053 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, 4054 sizeof(struct hci_rp_read_local_oob_ext_data)), 4055 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, 4056 sizeof(struct hci_rp_le_read_buffer_size)), 4057 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, 4058 sizeof(struct hci_rp_le_read_local_features)), 4059 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, 4060 sizeof(struct hci_rp_le_read_adv_tx_power)), 4061 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, 4062 sizeof(struct hci_rp_user_confirm_reply)), 4063 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, 4064 sizeof(struct hci_rp_user_confirm_reply)), 4065 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, 4066 sizeof(struct hci_rp_user_confirm_reply)), 4067 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, 4068 sizeof(struct hci_rp_user_confirm_reply)), 4069 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), 4070 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), 4071 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), 4072 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), 4073 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 4074 hci_cc_le_read_accept_list_size, 4075 sizeof(struct hci_rp_le_read_accept_list_size)), 4076 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), 4077 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, 4078 hci_cc_le_add_to_accept_list), 4079 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 4080 hci_cc_le_del_from_accept_list), 4081 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, 4082 sizeof(struct hci_rp_le_read_supported_states)), 4083 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, 4084 sizeof(struct hci_rp_le_read_def_data_len)), 4085 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, 4086 hci_cc_le_write_def_data_len), 4087 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, 4088 hci_cc_le_add_to_resolv_list), 4089 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, 4090 hci_cc_le_del_from_resolv_list), 4091 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, 4092 hci_cc_le_clear_resolv_list), 4093 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, 4094 sizeof(struct hci_rp_le_read_resolv_list_size)), 4095 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 4096 hci_cc_le_set_addr_resolution_enable), 4097 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, 4098 sizeof(struct hci_rp_le_read_max_data_len)), 4099 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, 4100 hci_cc_write_le_host_supported), 4101 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), 4102 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, 4103 sizeof(struct hci_rp_read_rssi)), 4104 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, 4105 sizeof(struct hci_rp_read_tx_power)), 4106 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), 4107 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, 4108 hci_cc_le_set_ext_scan_param), 4109 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, 4110 hci_cc_le_set_ext_scan_enable), 4111 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), 4112 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 4113 hci_cc_le_read_num_adv_sets, 4114 sizeof(struct hci_rp_le_read_num_supported_adv_sets)), 4115 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, 4116 sizeof(struct hci_rp_le_set_ext_adv_params)), 4117 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, 4118 hci_cc_le_set_ext_adv_enable), 4119 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 4120 hci_cc_le_set_adv_set_random_addr), 4121 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), 4122 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), 4123 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), 4124 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, 4125 hci_cc_le_set_per_adv_enable), 4126 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, 4127 sizeof(struct hci_rp_le_read_transmit_power)), 4128 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), 4129 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, 4130 sizeof(struct hci_rp_le_read_buffer_size_v2)), 4131 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, 4132 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), 4133 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, 4134 sizeof(struct hci_rp_le_setup_iso_path)), 4135 }; 4136 4137 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, 4138 struct sk_buff *skb) 4139 { 4140 void *data; 4141 4142 if (skb->len < cc->min_len) { 4143 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", 4144 cc->op, skb->len, cc->min_len); 4145 return HCI_ERROR_UNSPECIFIED; 4146 } 4147 4148 /* Just warn if the length is over max_len size it still be possible to 4149 * partially parse the cc so leave to callback to decide if that is 4150 * acceptable. 4151 */ 4152 if (skb->len > cc->max_len) 4153 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", 4154 cc->op, skb->len, cc->max_len); 4155 4156 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); 4157 if (!data) 4158 return HCI_ERROR_UNSPECIFIED; 4159 4160 return cc->func(hdev, data, skb); 4161 } 4162 4163 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, 4164 struct sk_buff *skb, u16 *opcode, u8 *status, 4165 hci_req_complete_t *req_complete, 4166 hci_req_complete_skb_t *req_complete_skb) 4167 { 4168 struct hci_ev_cmd_complete *ev = data; 4169 int i; 4170 4171 *opcode = __le16_to_cpu(ev->opcode); 4172 4173 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4174 4175 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { 4176 if (hci_cc_table[i].op == *opcode) { 4177 *status = hci_cc_func(hdev, &hci_cc_table[i], skb); 4178 break; 4179 } 4180 } 4181 4182 if (i == ARRAY_SIZE(hci_cc_table)) { 4183 /* Unknown opcode, assume byte 0 contains the status, so 4184 * that e.g. __hci_cmd_sync() properly returns errors 4185 * for vendor specific commands send by HCI drivers. 4186 * If a vendor doesn't actually follow this convention we may 4187 * need to introduce a vendor CC table in order to properly set 4188 * the status. 4189 */ 4190 *status = skb->data[0]; 4191 } 4192 4193 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4194 4195 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 4196 req_complete_skb); 4197 4198 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4199 bt_dev_err(hdev, 4200 "unexpected event for opcode 0x%4.4x", *opcode); 4201 return; 4202 } 4203 4204 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4205 queue_work(hdev->workqueue, &hdev->cmd_work); 4206 } 4207 4208 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) 4209 { 4210 struct hci_cp_le_create_cis *cp; 4211 int i; 4212 4213 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4214 4215 if (!status) 4216 return; 4217 4218 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); 4219 if (!cp) 4220 return; 4221 4222 hci_dev_lock(hdev); 4223 4224 /* Remove connection if command failed */ 4225 for (i = 0; cp->num_cis; cp->num_cis--, i++) { 4226 struct hci_conn *conn; 4227 u16 handle; 4228 4229 handle = __le16_to_cpu(cp->cis[i].cis_handle); 4230 4231 conn = hci_conn_hash_lookup_handle(hdev, handle); 4232 if (conn) { 4233 conn->state = BT_CLOSED; 4234 hci_connect_cfm(conn, status); 4235 hci_conn_del(conn); 4236 } 4237 } 4238 4239 hci_dev_unlock(hdev); 4240 } 4241 4242 #define HCI_CS(_op, _func) \ 4243 { \ 4244 .op = _op, \ 4245 .func = _func, \ 4246 } 4247 4248 static const struct hci_cs { 4249 u16 op; 4250 void (*func)(struct hci_dev *hdev, __u8 status); 4251 } hci_cs_table[] = { 4252 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), 4253 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), 4254 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), 4255 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), 4256 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), 4257 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), 4258 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), 4259 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), 4260 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, 4261 hci_cs_read_remote_ext_features), 4262 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), 4263 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, 4264 hci_cs_enhanced_setup_sync_conn), 4265 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), 4266 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), 4267 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), 4268 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), 4269 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), 4270 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), 4271 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), 4272 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), 4273 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), 4274 }; 4275 4276 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, 4277 struct sk_buff *skb, u16 *opcode, u8 *status, 4278 hci_req_complete_t *req_complete, 4279 hci_req_complete_skb_t *req_complete_skb) 4280 { 4281 struct hci_ev_cmd_status *ev = data; 4282 int i; 4283 4284 *opcode = __le16_to_cpu(ev->opcode); 4285 *status = ev->status; 4286 4287 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4288 4289 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { 4290 if (hci_cs_table[i].op == *opcode) { 4291 hci_cs_table[i].func(hdev, ev->status); 4292 break; 4293 } 4294 } 4295 4296 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4297 4298 /* Indicate request completion if the command failed. Also, if 4299 * we're not waiting for a special event and we get a success 4300 * command status we should try to flag the request as completed 4301 * (since for this kind of commands there will not be a command 4302 * complete event). 4303 */ 4304 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { 4305 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 4306 req_complete_skb); 4307 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4308 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", 4309 *opcode); 4310 return; 4311 } 4312 } 4313 4314 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4315 queue_work(hdev->workqueue, &hdev->cmd_work); 4316 } 4317 4318 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, 4319 struct sk_buff *skb) 4320 { 4321 struct hci_ev_hardware_error *ev = data; 4322 4323 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); 4324 4325 hdev->hw_error_code = ev->code; 4326 4327 queue_work(hdev->req_workqueue, &hdev->error_reset); 4328 } 4329 4330 static void hci_role_change_evt(struct hci_dev *hdev, void *data, 4331 struct sk_buff *skb) 4332 { 4333 struct hci_ev_role_change *ev = data; 4334 struct hci_conn *conn; 4335 4336 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4337 4338 hci_dev_lock(hdev); 4339 4340 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4341 if (conn) { 4342 if (!ev->status) 4343 conn->role = ev->role; 4344 4345 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 4346 4347 hci_role_switch_cfm(conn, ev->status, ev->role); 4348 } 4349 4350 hci_dev_unlock(hdev); 4351 } 4352 4353 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, 4354 struct sk_buff *skb) 4355 { 4356 struct hci_ev_num_comp_pkts *ev = data; 4357 int i; 4358 4359 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, 4360 flex_array_size(ev, handles, ev->num))) 4361 return; 4362 4363 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 4364 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 4365 return; 4366 } 4367 4368 bt_dev_dbg(hdev, "num %d", ev->num); 4369 4370 for (i = 0; i < ev->num; i++) { 4371 struct hci_comp_pkts_info *info = &ev->handles[i]; 4372 struct hci_conn *conn; 4373 __u16 handle, count; 4374 4375 handle = __le16_to_cpu(info->handle); 4376 count = __le16_to_cpu(info->count); 4377 4378 conn = hci_conn_hash_lookup_handle(hdev, handle); 4379 if (!conn) 4380 continue; 4381 4382 conn->sent -= count; 4383 4384 switch (conn->type) { 4385 case ACL_LINK: 4386 hdev->acl_cnt += count; 4387 if (hdev->acl_cnt > hdev->acl_pkts) 4388 hdev->acl_cnt = hdev->acl_pkts; 4389 break; 4390 4391 case LE_LINK: 4392 if (hdev->le_pkts) { 4393 hdev->le_cnt += count; 4394 if (hdev->le_cnt > hdev->le_pkts) 4395 hdev->le_cnt = hdev->le_pkts; 4396 } else { 4397 hdev->acl_cnt += count; 4398 if (hdev->acl_cnt > hdev->acl_pkts) 4399 hdev->acl_cnt = hdev->acl_pkts; 4400 } 4401 break; 4402 4403 case SCO_LINK: 4404 hdev->sco_cnt += count; 4405 if (hdev->sco_cnt > hdev->sco_pkts) 4406 hdev->sco_cnt = hdev->sco_pkts; 4407 break; 4408 4409 case ISO_LINK: 4410 if (hdev->iso_pkts) { 4411 hdev->iso_cnt += count; 4412 if (hdev->iso_cnt > hdev->iso_pkts) 4413 hdev->iso_cnt = hdev->iso_pkts; 4414 } else if (hdev->le_pkts) { 4415 hdev->le_cnt += count; 4416 if (hdev->le_cnt > hdev->le_pkts) 4417 hdev->le_cnt = hdev->le_pkts; 4418 } else { 4419 hdev->acl_cnt += count; 4420 if (hdev->acl_cnt > hdev->acl_pkts) 4421 hdev->acl_cnt = hdev->acl_pkts; 4422 } 4423 break; 4424 4425 default: 4426 bt_dev_err(hdev, "unknown type %d conn %p", 4427 conn->type, conn); 4428 break; 4429 } 4430 } 4431 4432 queue_work(hdev->workqueue, &hdev->tx_work); 4433 } 4434 4435 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 4436 __u16 handle) 4437 { 4438 struct hci_chan *chan; 4439 4440 switch (hdev->dev_type) { 4441 case HCI_PRIMARY: 4442 return hci_conn_hash_lookup_handle(hdev, handle); 4443 case HCI_AMP: 4444 chan = hci_chan_lookup_handle(hdev, handle); 4445 if (chan) 4446 return chan->conn; 4447 break; 4448 default: 4449 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4450 break; 4451 } 4452 4453 return NULL; 4454 } 4455 4456 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, 4457 struct sk_buff *skb) 4458 { 4459 struct hci_ev_num_comp_blocks *ev = data; 4460 int i; 4461 4462 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, 4463 flex_array_size(ev, handles, ev->num_hndl))) 4464 return; 4465 4466 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 4467 bt_dev_err(hdev, "wrong event for mode %d", 4468 hdev->flow_ctl_mode); 4469 return; 4470 } 4471 4472 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, 4473 ev->num_hndl); 4474 4475 for (i = 0; i < ev->num_hndl; i++) { 4476 struct hci_comp_blocks_info *info = &ev->handles[i]; 4477 struct hci_conn *conn = NULL; 4478 __u16 handle, block_count; 4479 4480 handle = __le16_to_cpu(info->handle); 4481 block_count = __le16_to_cpu(info->blocks); 4482 4483 conn = __hci_conn_lookup_handle(hdev, handle); 4484 if (!conn) 4485 continue; 4486 4487 conn->sent -= block_count; 4488 4489 switch (conn->type) { 4490 case ACL_LINK: 4491 case AMP_LINK: 4492 hdev->block_cnt += block_count; 4493 if (hdev->block_cnt > hdev->num_blocks) 4494 hdev->block_cnt = hdev->num_blocks; 4495 break; 4496 4497 default: 4498 bt_dev_err(hdev, "unknown type %d conn %p", 4499 conn->type, conn); 4500 break; 4501 } 4502 } 4503 4504 queue_work(hdev->workqueue, &hdev->tx_work); 4505 } 4506 4507 static void hci_mode_change_evt(struct hci_dev *hdev, void *data, 4508 struct sk_buff *skb) 4509 { 4510 struct hci_ev_mode_change *ev = data; 4511 struct hci_conn *conn; 4512 4513 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4514 4515 hci_dev_lock(hdev); 4516 4517 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4518 if (conn) { 4519 conn->mode = ev->mode; 4520 4521 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4522 &conn->flags)) { 4523 if (conn->mode == HCI_CM_ACTIVE) 4524 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4525 else 4526 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4527 } 4528 4529 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4530 hci_sco_setup(conn, ev->status); 4531 } 4532 4533 hci_dev_unlock(hdev); 4534 } 4535 4536 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, 4537 struct sk_buff *skb) 4538 { 4539 struct hci_ev_pin_code_req *ev = data; 4540 struct hci_conn *conn; 4541 4542 bt_dev_dbg(hdev, ""); 4543 4544 hci_dev_lock(hdev); 4545 4546 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4547 if (!conn) 4548 goto unlock; 4549 4550 if (conn->state == BT_CONNECTED) { 4551 hci_conn_hold(conn); 4552 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4553 hci_conn_drop(conn); 4554 } 4555 4556 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4557 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4558 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4559 sizeof(ev->bdaddr), &ev->bdaddr); 4560 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4561 u8 secure; 4562 4563 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4564 secure = 1; 4565 else 4566 secure = 0; 4567 4568 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4569 } 4570 4571 unlock: 4572 hci_dev_unlock(hdev); 4573 } 4574 4575 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4576 { 4577 if (key_type == HCI_LK_CHANGED_COMBINATION) 4578 return; 4579 4580 conn->pin_length = pin_len; 4581 conn->key_type = key_type; 4582 4583 switch (key_type) { 4584 case HCI_LK_LOCAL_UNIT: 4585 case HCI_LK_REMOTE_UNIT: 4586 case HCI_LK_DEBUG_COMBINATION: 4587 return; 4588 case HCI_LK_COMBINATION: 4589 if (pin_len == 16) 4590 conn->pending_sec_level = BT_SECURITY_HIGH; 4591 else 4592 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4593 break; 4594 case HCI_LK_UNAUTH_COMBINATION_P192: 4595 case HCI_LK_UNAUTH_COMBINATION_P256: 4596 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4597 break; 4598 case HCI_LK_AUTH_COMBINATION_P192: 4599 conn->pending_sec_level = BT_SECURITY_HIGH; 4600 break; 4601 case HCI_LK_AUTH_COMBINATION_P256: 4602 conn->pending_sec_level = BT_SECURITY_FIPS; 4603 break; 4604 } 4605 } 4606 4607 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, 4608 struct sk_buff *skb) 4609 { 4610 struct hci_ev_link_key_req *ev = data; 4611 struct hci_cp_link_key_reply cp; 4612 struct hci_conn *conn; 4613 struct link_key *key; 4614 4615 bt_dev_dbg(hdev, ""); 4616 4617 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4618 return; 4619 4620 hci_dev_lock(hdev); 4621 4622 key = hci_find_link_key(hdev, &ev->bdaddr); 4623 if (!key) { 4624 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); 4625 goto not_found; 4626 } 4627 4628 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); 4629 4630 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4631 if (conn) { 4632 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4633 4634 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4635 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4636 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4637 bt_dev_dbg(hdev, "ignoring unauthenticated key"); 4638 goto not_found; 4639 } 4640 4641 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4642 (conn->pending_sec_level == BT_SECURITY_HIGH || 4643 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4644 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); 4645 goto not_found; 4646 } 4647 4648 conn_set_key(conn, key->type, key->pin_len); 4649 } 4650 4651 bacpy(&cp.bdaddr, &ev->bdaddr); 4652 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4653 4654 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4655 4656 hci_dev_unlock(hdev); 4657 4658 return; 4659 4660 not_found: 4661 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4662 hci_dev_unlock(hdev); 4663 } 4664 4665 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, 4666 struct sk_buff *skb) 4667 { 4668 struct hci_ev_link_key_notify *ev = data; 4669 struct hci_conn *conn; 4670 struct link_key *key; 4671 bool persistent; 4672 u8 pin_len = 0; 4673 4674 bt_dev_dbg(hdev, ""); 4675 4676 hci_dev_lock(hdev); 4677 4678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4679 if (!conn) 4680 goto unlock; 4681 4682 hci_conn_hold(conn); 4683 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4684 hci_conn_drop(conn); 4685 4686 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4687 conn_set_key(conn, ev->key_type, conn->pin_length); 4688 4689 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4690 goto unlock; 4691 4692 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4693 ev->key_type, pin_len, &persistent); 4694 if (!key) 4695 goto unlock; 4696 4697 /* Update connection information since adding the key will have 4698 * fixed up the type in the case of changed combination keys. 4699 */ 4700 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4701 conn_set_key(conn, key->type, key->pin_len); 4702 4703 mgmt_new_link_key(hdev, key, persistent); 4704 4705 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4706 * is set. If it's not set simply remove the key from the kernel 4707 * list (we've still notified user space about it but with 4708 * store_hint being 0). 4709 */ 4710 if (key->type == HCI_LK_DEBUG_COMBINATION && 4711 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4712 list_del_rcu(&key->list); 4713 kfree_rcu(key, rcu); 4714 goto unlock; 4715 } 4716 4717 if (persistent) 4718 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4719 else 4720 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4721 4722 unlock: 4723 hci_dev_unlock(hdev); 4724 } 4725 4726 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, 4727 struct sk_buff *skb) 4728 { 4729 struct hci_ev_clock_offset *ev = data; 4730 struct hci_conn *conn; 4731 4732 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4733 4734 hci_dev_lock(hdev); 4735 4736 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4737 if (conn && !ev->status) { 4738 struct inquiry_entry *ie; 4739 4740 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4741 if (ie) { 4742 ie->data.clock_offset = ev->clock_offset; 4743 ie->timestamp = jiffies; 4744 } 4745 } 4746 4747 hci_dev_unlock(hdev); 4748 } 4749 4750 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, 4751 struct sk_buff *skb) 4752 { 4753 struct hci_ev_pkt_type_change *ev = data; 4754 struct hci_conn *conn; 4755 4756 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4757 4758 hci_dev_lock(hdev); 4759 4760 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4761 if (conn && !ev->status) 4762 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4763 4764 hci_dev_unlock(hdev); 4765 } 4766 4767 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, 4768 struct sk_buff *skb) 4769 { 4770 struct hci_ev_pscan_rep_mode *ev = data; 4771 struct inquiry_entry *ie; 4772 4773 bt_dev_dbg(hdev, ""); 4774 4775 hci_dev_lock(hdev); 4776 4777 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4778 if (ie) { 4779 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4780 ie->timestamp = jiffies; 4781 } 4782 4783 hci_dev_unlock(hdev); 4784 } 4785 4786 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, 4787 struct sk_buff *skb) 4788 { 4789 struct hci_ev_inquiry_result_rssi *ev = edata; 4790 struct inquiry_data data; 4791 int i; 4792 4793 bt_dev_dbg(hdev, "num_rsp %d", ev->num); 4794 4795 if (!ev->num) 4796 return; 4797 4798 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4799 return; 4800 4801 hci_dev_lock(hdev); 4802 4803 if (skb->len == array_size(ev->num, 4804 sizeof(struct inquiry_info_rssi_pscan))) { 4805 struct inquiry_info_rssi_pscan *info; 4806 4807 for (i = 0; i < ev->num; i++) { 4808 u32 flags; 4809 4810 info = hci_ev_skb_pull(hdev, skb, 4811 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4812 sizeof(*info)); 4813 if (!info) { 4814 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4815 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4816 goto unlock; 4817 } 4818 4819 bacpy(&data.bdaddr, &info->bdaddr); 4820 data.pscan_rep_mode = info->pscan_rep_mode; 4821 data.pscan_period_mode = info->pscan_period_mode; 4822 data.pscan_mode = info->pscan_mode; 4823 memcpy(data.dev_class, info->dev_class, 3); 4824 data.clock_offset = info->clock_offset; 4825 data.rssi = info->rssi; 4826 data.ssp_mode = 0x00; 4827 4828 flags = hci_inquiry_cache_update(hdev, &data, false); 4829 4830 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4831 info->dev_class, info->rssi, 4832 flags, NULL, 0, NULL, 0); 4833 } 4834 } else if (skb->len == array_size(ev->num, 4835 sizeof(struct inquiry_info_rssi))) { 4836 struct inquiry_info_rssi *info; 4837 4838 for (i = 0; i < ev->num; i++) { 4839 u32 flags; 4840 4841 info = hci_ev_skb_pull(hdev, skb, 4842 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4843 sizeof(*info)); 4844 if (!info) { 4845 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4846 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4847 goto unlock; 4848 } 4849 4850 bacpy(&data.bdaddr, &info->bdaddr); 4851 data.pscan_rep_mode = info->pscan_rep_mode; 4852 data.pscan_period_mode = info->pscan_period_mode; 4853 data.pscan_mode = 0x00; 4854 memcpy(data.dev_class, info->dev_class, 3); 4855 data.clock_offset = info->clock_offset; 4856 data.rssi = info->rssi; 4857 data.ssp_mode = 0x00; 4858 4859 flags = hci_inquiry_cache_update(hdev, &data, false); 4860 4861 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4862 info->dev_class, info->rssi, 4863 flags, NULL, 0, NULL, 0); 4864 } 4865 } else { 4866 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4867 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4868 } 4869 unlock: 4870 hci_dev_unlock(hdev); 4871 } 4872 4873 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, 4874 struct sk_buff *skb) 4875 { 4876 struct hci_ev_remote_ext_features *ev = data; 4877 struct hci_conn *conn; 4878 4879 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4880 4881 hci_dev_lock(hdev); 4882 4883 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4884 if (!conn) 4885 goto unlock; 4886 4887 if (ev->page < HCI_MAX_PAGES) 4888 memcpy(conn->features[ev->page], ev->features, 8); 4889 4890 if (!ev->status && ev->page == 0x01) { 4891 struct inquiry_entry *ie; 4892 4893 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4894 if (ie) 4895 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4896 4897 if (ev->features[0] & LMP_HOST_SSP) { 4898 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4899 } else { 4900 /* It is mandatory by the Bluetooth specification that 4901 * Extended Inquiry Results are only used when Secure 4902 * Simple Pairing is enabled, but some devices violate 4903 * this. 4904 * 4905 * To make these devices work, the internal SSP 4906 * enabled flag needs to be cleared if the remote host 4907 * features do not indicate SSP support */ 4908 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4909 } 4910 4911 if (ev->features[0] & LMP_HOST_SC) 4912 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4913 } 4914 4915 if (conn->state != BT_CONFIG) 4916 goto unlock; 4917 4918 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4919 struct hci_cp_remote_name_req cp; 4920 memset(&cp, 0, sizeof(cp)); 4921 bacpy(&cp.bdaddr, &conn->dst); 4922 cp.pscan_rep_mode = 0x02; 4923 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4924 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4925 mgmt_device_connected(hdev, conn, NULL, 0); 4926 4927 if (!hci_outgoing_auth_needed(hdev, conn)) { 4928 conn->state = BT_CONNECTED; 4929 hci_connect_cfm(conn, ev->status); 4930 hci_conn_drop(conn); 4931 } 4932 4933 unlock: 4934 hci_dev_unlock(hdev); 4935 } 4936 4937 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, 4938 struct sk_buff *skb) 4939 { 4940 struct hci_ev_sync_conn_complete *ev = data; 4941 struct hci_conn *conn; 4942 u8 status = ev->status; 4943 4944 switch (ev->link_type) { 4945 case SCO_LINK: 4946 case ESCO_LINK: 4947 break; 4948 default: 4949 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type 4950 * for HCI_Synchronous_Connection_Complete is limited to 4951 * either SCO or eSCO 4952 */ 4953 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); 4954 return; 4955 } 4956 4957 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4958 4959 hci_dev_lock(hdev); 4960 4961 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4962 if (!conn) { 4963 if (ev->link_type == ESCO_LINK) 4964 goto unlock; 4965 4966 /* When the link type in the event indicates SCO connection 4967 * and lookup of the connection object fails, then check 4968 * if an eSCO connection object exists. 4969 * 4970 * The core limits the synchronous connections to either 4971 * SCO or eSCO. The eSCO connection is preferred and tried 4972 * to be setup first and until successfully established, 4973 * the link type will be hinted as eSCO. 4974 */ 4975 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4976 if (!conn) 4977 goto unlock; 4978 } 4979 4980 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. 4981 * Processing it more than once per connection can corrupt kernel memory. 4982 * 4983 * As the connection handle is set here for the first time, it indicates 4984 * whether the connection is already set up. 4985 */ 4986 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 4987 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); 4988 goto unlock; 4989 } 4990 4991 switch (status) { 4992 case 0x00: 4993 conn->handle = __le16_to_cpu(ev->handle); 4994 if (conn->handle > HCI_CONN_HANDLE_MAX) { 4995 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 4996 conn->handle, HCI_CONN_HANDLE_MAX); 4997 status = HCI_ERROR_INVALID_PARAMETERS; 4998 conn->state = BT_CLOSED; 4999 break; 5000 } 5001 5002 conn->state = BT_CONNECTED; 5003 conn->type = ev->link_type; 5004 5005 hci_debugfs_create_conn(conn); 5006 hci_conn_add_sysfs(conn); 5007 break; 5008 5009 case 0x10: /* Connection Accept Timeout */ 5010 case 0x0d: /* Connection Rejected due to Limited Resources */ 5011 case 0x11: /* Unsupported Feature or Parameter Value */ 5012 case 0x1c: /* SCO interval rejected */ 5013 case 0x1a: /* Unsupported Remote Feature */ 5014 case 0x1e: /* Invalid LMP Parameters */ 5015 case 0x1f: /* Unspecified error */ 5016 case 0x20: /* Unsupported LMP Parameter value */ 5017 if (conn->out) { 5018 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 5019 (hdev->esco_type & EDR_ESCO_MASK); 5020 if (hci_setup_sync(conn, conn->link->handle)) 5021 goto unlock; 5022 } 5023 fallthrough; 5024 5025 default: 5026 conn->state = BT_CLOSED; 5027 break; 5028 } 5029 5030 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 5031 /* Notify only in case of SCO over HCI transport data path which 5032 * is zero and non-zero value shall be non-HCI transport data path 5033 */ 5034 if (conn->codec.data_path == 0 && hdev->notify) { 5035 switch (ev->air_mode) { 5036 case 0x02: 5037 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 5038 break; 5039 case 0x03: 5040 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 5041 break; 5042 } 5043 } 5044 5045 hci_connect_cfm(conn, status); 5046 if (status) 5047 hci_conn_del(conn); 5048 5049 unlock: 5050 hci_dev_unlock(hdev); 5051 } 5052 5053 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 5054 { 5055 size_t parsed = 0; 5056 5057 while (parsed < eir_len) { 5058 u8 field_len = eir[0]; 5059 5060 if (field_len == 0) 5061 return parsed; 5062 5063 parsed += field_len + 1; 5064 eir += field_len + 1; 5065 } 5066 5067 return eir_len; 5068 } 5069 5070 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, 5071 struct sk_buff *skb) 5072 { 5073 struct hci_ev_ext_inquiry_result *ev = edata; 5074 struct inquiry_data data; 5075 size_t eir_len; 5076 int i; 5077 5078 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, 5079 flex_array_size(ev, info, ev->num))) 5080 return; 5081 5082 bt_dev_dbg(hdev, "num %d", ev->num); 5083 5084 if (!ev->num) 5085 return; 5086 5087 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 5088 return; 5089 5090 hci_dev_lock(hdev); 5091 5092 for (i = 0; i < ev->num; i++) { 5093 struct extended_inquiry_info *info = &ev->info[i]; 5094 u32 flags; 5095 bool name_known; 5096 5097 bacpy(&data.bdaddr, &info->bdaddr); 5098 data.pscan_rep_mode = info->pscan_rep_mode; 5099 data.pscan_period_mode = info->pscan_period_mode; 5100 data.pscan_mode = 0x00; 5101 memcpy(data.dev_class, info->dev_class, 3); 5102 data.clock_offset = info->clock_offset; 5103 data.rssi = info->rssi; 5104 data.ssp_mode = 0x01; 5105 5106 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5107 name_known = eir_get_data(info->data, 5108 sizeof(info->data), 5109 EIR_NAME_COMPLETE, NULL); 5110 else 5111 name_known = true; 5112 5113 flags = hci_inquiry_cache_update(hdev, &data, name_known); 5114 5115 eir_len = eir_get_length(info->data, sizeof(info->data)); 5116 5117 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 5118 info->dev_class, info->rssi, 5119 flags, info->data, eir_len, NULL, 0); 5120 } 5121 5122 hci_dev_unlock(hdev); 5123 } 5124 5125 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, 5126 struct sk_buff *skb) 5127 { 5128 struct hci_ev_key_refresh_complete *ev = data; 5129 struct hci_conn *conn; 5130 5131 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, 5132 __le16_to_cpu(ev->handle)); 5133 5134 hci_dev_lock(hdev); 5135 5136 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5137 if (!conn) 5138 goto unlock; 5139 5140 /* For BR/EDR the necessary steps are taken through the 5141 * auth_complete event. 5142 */ 5143 if (conn->type != LE_LINK) 5144 goto unlock; 5145 5146 if (!ev->status) 5147 conn->sec_level = conn->pending_sec_level; 5148 5149 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 5150 5151 if (ev->status && conn->state == BT_CONNECTED) { 5152 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 5153 hci_conn_drop(conn); 5154 goto unlock; 5155 } 5156 5157 if (conn->state == BT_CONFIG) { 5158 if (!ev->status) 5159 conn->state = BT_CONNECTED; 5160 5161 hci_connect_cfm(conn, ev->status); 5162 hci_conn_drop(conn); 5163 } else { 5164 hci_auth_cfm(conn, ev->status); 5165 5166 hci_conn_hold(conn); 5167 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 5168 hci_conn_drop(conn); 5169 } 5170 5171 unlock: 5172 hci_dev_unlock(hdev); 5173 } 5174 5175 static u8 hci_get_auth_req(struct hci_conn *conn) 5176 { 5177 /* If remote requests no-bonding follow that lead */ 5178 if (conn->remote_auth == HCI_AT_NO_BONDING || 5179 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 5180 return conn->remote_auth | (conn->auth_type & 0x01); 5181 5182 /* If both remote and local have enough IO capabilities, require 5183 * MITM protection 5184 */ 5185 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 5186 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 5187 return conn->remote_auth | 0x01; 5188 5189 /* No MITM protection possible so ignore remote requirement */ 5190 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 5191 } 5192 5193 static u8 bredr_oob_data_present(struct hci_conn *conn) 5194 { 5195 struct hci_dev *hdev = conn->hdev; 5196 struct oob_data *data; 5197 5198 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 5199 if (!data) 5200 return 0x00; 5201 5202 if (bredr_sc_enabled(hdev)) { 5203 /* When Secure Connections is enabled, then just 5204 * return the present value stored with the OOB 5205 * data. The stored value contains the right present 5206 * information. However it can only be trusted when 5207 * not in Secure Connection Only mode. 5208 */ 5209 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 5210 return data->present; 5211 5212 /* When Secure Connections Only mode is enabled, then 5213 * the P-256 values are required. If they are not 5214 * available, then do not declare that OOB data is 5215 * present. 5216 */ 5217 if (!memcmp(data->rand256, ZERO_KEY, 16) || 5218 !memcmp(data->hash256, ZERO_KEY, 16)) 5219 return 0x00; 5220 5221 return 0x02; 5222 } 5223 5224 /* When Secure Connections is not enabled or actually 5225 * not supported by the hardware, then check that if 5226 * P-192 data values are present. 5227 */ 5228 if (!memcmp(data->rand192, ZERO_KEY, 16) || 5229 !memcmp(data->hash192, ZERO_KEY, 16)) 5230 return 0x00; 5231 5232 return 0x01; 5233 } 5234 5235 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, 5236 struct sk_buff *skb) 5237 { 5238 struct hci_ev_io_capa_request *ev = data; 5239 struct hci_conn *conn; 5240 5241 bt_dev_dbg(hdev, ""); 5242 5243 hci_dev_lock(hdev); 5244 5245 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5246 if (!conn) 5247 goto unlock; 5248 5249 hci_conn_hold(conn); 5250 5251 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5252 goto unlock; 5253 5254 /* Allow pairing if we're pairable, the initiators of the 5255 * pairing or if the remote is not requesting bonding. 5256 */ 5257 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 5258 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 5259 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 5260 struct hci_cp_io_capability_reply cp; 5261 5262 bacpy(&cp.bdaddr, &ev->bdaddr); 5263 /* Change the IO capability from KeyboardDisplay 5264 * to DisplayYesNo as it is not supported by BT spec. */ 5265 cp.capability = (conn->io_capability == 0x04) ? 5266 HCI_IO_DISPLAY_YESNO : conn->io_capability; 5267 5268 /* If we are initiators, there is no remote information yet */ 5269 if (conn->remote_auth == 0xff) { 5270 /* Request MITM protection if our IO caps allow it 5271 * except for the no-bonding case. 5272 */ 5273 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5274 conn->auth_type != HCI_AT_NO_BONDING) 5275 conn->auth_type |= 0x01; 5276 } else { 5277 conn->auth_type = hci_get_auth_req(conn); 5278 } 5279 5280 /* If we're not bondable, force one of the non-bondable 5281 * authentication requirement values. 5282 */ 5283 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 5284 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 5285 5286 cp.authentication = conn->auth_type; 5287 cp.oob_data = bredr_oob_data_present(conn); 5288 5289 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 5290 sizeof(cp), &cp); 5291 } else { 5292 struct hci_cp_io_capability_neg_reply cp; 5293 5294 bacpy(&cp.bdaddr, &ev->bdaddr); 5295 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 5296 5297 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 5298 sizeof(cp), &cp); 5299 } 5300 5301 unlock: 5302 hci_dev_unlock(hdev); 5303 } 5304 5305 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, 5306 struct sk_buff *skb) 5307 { 5308 struct hci_ev_io_capa_reply *ev = data; 5309 struct hci_conn *conn; 5310 5311 bt_dev_dbg(hdev, ""); 5312 5313 hci_dev_lock(hdev); 5314 5315 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5316 if (!conn) 5317 goto unlock; 5318 5319 conn->remote_cap = ev->capability; 5320 conn->remote_auth = ev->authentication; 5321 5322 unlock: 5323 hci_dev_unlock(hdev); 5324 } 5325 5326 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, 5327 struct sk_buff *skb) 5328 { 5329 struct hci_ev_user_confirm_req *ev = data; 5330 int loc_mitm, rem_mitm, confirm_hint = 0; 5331 struct hci_conn *conn; 5332 5333 bt_dev_dbg(hdev, ""); 5334 5335 hci_dev_lock(hdev); 5336 5337 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5338 goto unlock; 5339 5340 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5341 if (!conn) 5342 goto unlock; 5343 5344 loc_mitm = (conn->auth_type & 0x01); 5345 rem_mitm = (conn->remote_auth & 0x01); 5346 5347 /* If we require MITM but the remote device can't provide that 5348 * (it has NoInputNoOutput) then reject the confirmation 5349 * request. We check the security level here since it doesn't 5350 * necessarily match conn->auth_type. 5351 */ 5352 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 5353 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 5354 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); 5355 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 5356 sizeof(ev->bdaddr), &ev->bdaddr); 5357 goto unlock; 5358 } 5359 5360 /* If no side requires MITM protection; auto-accept */ 5361 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 5362 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 5363 5364 /* If we're not the initiators request authorization to 5365 * proceed from user space (mgmt_user_confirm with 5366 * confirm_hint set to 1). The exception is if neither 5367 * side had MITM or if the local IO capability is 5368 * NoInputNoOutput, in which case we do auto-accept 5369 */ 5370 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 5371 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5372 (loc_mitm || rem_mitm)) { 5373 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); 5374 confirm_hint = 1; 5375 goto confirm; 5376 } 5377 5378 /* If there already exists link key in local host, leave the 5379 * decision to user space since the remote device could be 5380 * legitimate or malicious. 5381 */ 5382 if (hci_find_link_key(hdev, &ev->bdaddr)) { 5383 bt_dev_dbg(hdev, "Local host already has link key"); 5384 confirm_hint = 1; 5385 goto confirm; 5386 } 5387 5388 BT_DBG("Auto-accept of user confirmation with %ums delay", 5389 hdev->auto_accept_delay); 5390 5391 if (hdev->auto_accept_delay > 0) { 5392 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 5393 queue_delayed_work(conn->hdev->workqueue, 5394 &conn->auto_accept_work, delay); 5395 goto unlock; 5396 } 5397 5398 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 5399 sizeof(ev->bdaddr), &ev->bdaddr); 5400 goto unlock; 5401 } 5402 5403 confirm: 5404 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 5405 le32_to_cpu(ev->passkey), confirm_hint); 5406 5407 unlock: 5408 hci_dev_unlock(hdev); 5409 } 5410 5411 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, 5412 struct sk_buff *skb) 5413 { 5414 struct hci_ev_user_passkey_req *ev = data; 5415 5416 bt_dev_dbg(hdev, ""); 5417 5418 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5419 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 5420 } 5421 5422 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, 5423 struct sk_buff *skb) 5424 { 5425 struct hci_ev_user_passkey_notify *ev = data; 5426 struct hci_conn *conn; 5427 5428 bt_dev_dbg(hdev, ""); 5429 5430 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5431 if (!conn) 5432 return; 5433 5434 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5435 conn->passkey_entered = 0; 5436 5437 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5438 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5439 conn->dst_type, conn->passkey_notify, 5440 conn->passkey_entered); 5441 } 5442 5443 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, 5444 struct sk_buff *skb) 5445 { 5446 struct hci_ev_keypress_notify *ev = data; 5447 struct hci_conn *conn; 5448 5449 bt_dev_dbg(hdev, ""); 5450 5451 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5452 if (!conn) 5453 return; 5454 5455 switch (ev->type) { 5456 case HCI_KEYPRESS_STARTED: 5457 conn->passkey_entered = 0; 5458 return; 5459 5460 case HCI_KEYPRESS_ENTERED: 5461 conn->passkey_entered++; 5462 break; 5463 5464 case HCI_KEYPRESS_ERASED: 5465 conn->passkey_entered--; 5466 break; 5467 5468 case HCI_KEYPRESS_CLEARED: 5469 conn->passkey_entered = 0; 5470 break; 5471 5472 case HCI_KEYPRESS_COMPLETED: 5473 return; 5474 } 5475 5476 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5477 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5478 conn->dst_type, conn->passkey_notify, 5479 conn->passkey_entered); 5480 } 5481 5482 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, 5483 struct sk_buff *skb) 5484 { 5485 struct hci_ev_simple_pair_complete *ev = data; 5486 struct hci_conn *conn; 5487 5488 bt_dev_dbg(hdev, ""); 5489 5490 hci_dev_lock(hdev); 5491 5492 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5493 if (!conn) 5494 goto unlock; 5495 5496 /* Reset the authentication requirement to unknown */ 5497 conn->remote_auth = 0xff; 5498 5499 /* To avoid duplicate auth_failed events to user space we check 5500 * the HCI_CONN_AUTH_PEND flag which will be set if we 5501 * initiated the authentication. A traditional auth_complete 5502 * event gets always produced as initiator and is also mapped to 5503 * the mgmt_auth_failed event */ 5504 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5505 mgmt_auth_failed(conn, ev->status); 5506 5507 hci_conn_drop(conn); 5508 5509 unlock: 5510 hci_dev_unlock(hdev); 5511 } 5512 5513 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, 5514 struct sk_buff *skb) 5515 { 5516 struct hci_ev_remote_host_features *ev = data; 5517 struct inquiry_entry *ie; 5518 struct hci_conn *conn; 5519 5520 bt_dev_dbg(hdev, ""); 5521 5522 hci_dev_lock(hdev); 5523 5524 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5525 if (conn) 5526 memcpy(conn->features[1], ev->features, 8); 5527 5528 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5529 if (ie) 5530 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5531 5532 hci_dev_unlock(hdev); 5533 } 5534 5535 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, 5536 struct sk_buff *skb) 5537 { 5538 struct hci_ev_remote_oob_data_request *ev = edata; 5539 struct oob_data *data; 5540 5541 bt_dev_dbg(hdev, ""); 5542 5543 hci_dev_lock(hdev); 5544 5545 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5546 goto unlock; 5547 5548 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5549 if (!data) { 5550 struct hci_cp_remote_oob_data_neg_reply cp; 5551 5552 bacpy(&cp.bdaddr, &ev->bdaddr); 5553 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5554 sizeof(cp), &cp); 5555 goto unlock; 5556 } 5557 5558 if (bredr_sc_enabled(hdev)) { 5559 struct hci_cp_remote_oob_ext_data_reply cp; 5560 5561 bacpy(&cp.bdaddr, &ev->bdaddr); 5562 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5563 memset(cp.hash192, 0, sizeof(cp.hash192)); 5564 memset(cp.rand192, 0, sizeof(cp.rand192)); 5565 } else { 5566 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5567 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5568 } 5569 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5570 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5571 5572 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5573 sizeof(cp), &cp); 5574 } else { 5575 struct hci_cp_remote_oob_data_reply cp; 5576 5577 bacpy(&cp.bdaddr, &ev->bdaddr); 5578 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5579 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5580 5581 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5582 sizeof(cp), &cp); 5583 } 5584 5585 unlock: 5586 hci_dev_unlock(hdev); 5587 } 5588 5589 #if IS_ENABLED(CONFIG_BT_HS) 5590 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, 5591 struct sk_buff *skb) 5592 { 5593 struct hci_ev_channel_selected *ev = data; 5594 struct hci_conn *hcon; 5595 5596 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); 5597 5598 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5599 if (!hcon) 5600 return; 5601 5602 amp_read_loc_assoc_final_data(hdev, hcon); 5603 } 5604 5605 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, 5606 struct sk_buff *skb) 5607 { 5608 struct hci_ev_phy_link_complete *ev = data; 5609 struct hci_conn *hcon, *bredr_hcon; 5610 5611 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, 5612 ev->status); 5613 5614 hci_dev_lock(hdev); 5615 5616 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5617 if (!hcon) 5618 goto unlock; 5619 5620 if (!hcon->amp_mgr) 5621 goto unlock; 5622 5623 if (ev->status) { 5624 hci_conn_del(hcon); 5625 goto unlock; 5626 } 5627 5628 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 5629 5630 hcon->state = BT_CONNECTED; 5631 bacpy(&hcon->dst, &bredr_hcon->dst); 5632 5633 hci_conn_hold(hcon); 5634 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 5635 hci_conn_drop(hcon); 5636 5637 hci_debugfs_create_conn(hcon); 5638 hci_conn_add_sysfs(hcon); 5639 5640 amp_physical_cfm(bredr_hcon, hcon); 5641 5642 unlock: 5643 hci_dev_unlock(hdev); 5644 } 5645 5646 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, 5647 struct sk_buff *skb) 5648 { 5649 struct hci_ev_logical_link_complete *ev = data; 5650 struct hci_conn *hcon; 5651 struct hci_chan *hchan; 5652 struct amp_mgr *mgr; 5653 5654 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 5655 le16_to_cpu(ev->handle), ev->phy_handle, ev->status); 5656 5657 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5658 if (!hcon) 5659 return; 5660 5661 /* Create AMP hchan */ 5662 hchan = hci_chan_create(hcon); 5663 if (!hchan) 5664 return; 5665 5666 hchan->handle = le16_to_cpu(ev->handle); 5667 hchan->amp = true; 5668 5669 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 5670 5671 mgr = hcon->amp_mgr; 5672 if (mgr && mgr->bredr_chan) { 5673 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 5674 5675 l2cap_chan_lock(bredr_chan); 5676 5677 bredr_chan->conn->mtu = hdev->block_mtu; 5678 l2cap_logical_cfm(bredr_chan, hchan, 0); 5679 hci_conn_hold(hcon); 5680 5681 l2cap_chan_unlock(bredr_chan); 5682 } 5683 } 5684 5685 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, 5686 struct sk_buff *skb) 5687 { 5688 struct hci_ev_disconn_logical_link_complete *ev = data; 5689 struct hci_chan *hchan; 5690 5691 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", 5692 le16_to_cpu(ev->handle), ev->status); 5693 5694 if (ev->status) 5695 return; 5696 5697 hci_dev_lock(hdev); 5698 5699 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5700 if (!hchan || !hchan->amp) 5701 goto unlock; 5702 5703 amp_destroy_logical_link(hchan, ev->reason); 5704 5705 unlock: 5706 hci_dev_unlock(hdev); 5707 } 5708 5709 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, 5710 struct sk_buff *skb) 5711 { 5712 struct hci_ev_disconn_phy_link_complete *ev = data; 5713 struct hci_conn *hcon; 5714 5715 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5716 5717 if (ev->status) 5718 return; 5719 5720 hci_dev_lock(hdev); 5721 5722 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5723 if (hcon && hcon->type == AMP_LINK) { 5724 hcon->state = BT_CLOSED; 5725 hci_disconn_cfm(hcon, ev->reason); 5726 hci_conn_del(hcon); 5727 } 5728 5729 hci_dev_unlock(hdev); 5730 } 5731 #endif 5732 5733 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5734 u8 bdaddr_type, bdaddr_t *local_rpa) 5735 { 5736 if (conn->out) { 5737 conn->dst_type = bdaddr_type; 5738 conn->resp_addr_type = bdaddr_type; 5739 bacpy(&conn->resp_addr, bdaddr); 5740 5741 /* Check if the controller has set a Local RPA then it must be 5742 * used instead or hdev->rpa. 5743 */ 5744 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5745 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5746 bacpy(&conn->init_addr, local_rpa); 5747 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5748 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5749 bacpy(&conn->init_addr, &conn->hdev->rpa); 5750 } else { 5751 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5752 &conn->init_addr_type); 5753 } 5754 } else { 5755 conn->resp_addr_type = conn->hdev->adv_addr_type; 5756 /* Check if the controller has set a Local RPA then it must be 5757 * used instead or hdev->rpa. 5758 */ 5759 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5760 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5761 bacpy(&conn->resp_addr, local_rpa); 5762 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5763 /* In case of ext adv, resp_addr will be updated in 5764 * Adv Terminated event. 5765 */ 5766 if (!ext_adv_capable(conn->hdev)) 5767 bacpy(&conn->resp_addr, 5768 &conn->hdev->random_addr); 5769 } else { 5770 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5771 } 5772 5773 conn->init_addr_type = bdaddr_type; 5774 bacpy(&conn->init_addr, bdaddr); 5775 5776 /* For incoming connections, set the default minimum 5777 * and maximum connection interval. They will be used 5778 * to check if the parameters are in range and if not 5779 * trigger the connection update procedure. 5780 */ 5781 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5782 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5783 } 5784 } 5785 5786 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5787 bdaddr_t *bdaddr, u8 bdaddr_type, 5788 bdaddr_t *local_rpa, u8 role, u16 handle, 5789 u16 interval, u16 latency, 5790 u16 supervision_timeout) 5791 { 5792 struct hci_conn_params *params; 5793 struct hci_conn *conn; 5794 struct smp_irk *irk; 5795 u8 addr_type; 5796 5797 hci_dev_lock(hdev); 5798 5799 /* All controllers implicitly stop advertising in the event of a 5800 * connection, so ensure that the state bit is cleared. 5801 */ 5802 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5803 5804 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); 5805 if (!conn) { 5806 /* In case of error status and there is no connection pending 5807 * just unlock as there is nothing to cleanup. 5808 */ 5809 if (status) 5810 goto unlock; 5811 5812 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5813 if (!conn) { 5814 bt_dev_err(hdev, "no memory for new connection"); 5815 goto unlock; 5816 } 5817 5818 conn->dst_type = bdaddr_type; 5819 5820 /* If we didn't have a hci_conn object previously 5821 * but we're in central role this must be something 5822 * initiated using an accept list. Since accept list based 5823 * connections are not "first class citizens" we don't 5824 * have full tracking of them. Therefore, we go ahead 5825 * with a "best effort" approach of determining the 5826 * initiator address based on the HCI_PRIVACY flag. 5827 */ 5828 if (conn->out) { 5829 conn->resp_addr_type = bdaddr_type; 5830 bacpy(&conn->resp_addr, bdaddr); 5831 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5832 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5833 bacpy(&conn->init_addr, &hdev->rpa); 5834 } else { 5835 hci_copy_identity_address(hdev, 5836 &conn->init_addr, 5837 &conn->init_addr_type); 5838 } 5839 } 5840 } else { 5841 cancel_delayed_work(&conn->le_conn_timeout); 5842 } 5843 5844 /* The HCI_LE_Connection_Complete event is only sent once per connection. 5845 * Processing it more than once per connection can corrupt kernel memory. 5846 * 5847 * As the connection handle is set here for the first time, it indicates 5848 * whether the connection is already set up. 5849 */ 5850 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 5851 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 5852 goto unlock; 5853 } 5854 5855 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5856 5857 /* Lookup the identity address from the stored connection 5858 * address and address type. 5859 * 5860 * When establishing connections to an identity address, the 5861 * connection procedure will store the resolvable random 5862 * address first. Now if it can be converted back into the 5863 * identity address, start using the identity address from 5864 * now on. 5865 */ 5866 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5867 if (irk) { 5868 bacpy(&conn->dst, &irk->bdaddr); 5869 conn->dst_type = irk->addr_type; 5870 } 5871 5872 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5873 5874 if (handle > HCI_CONN_HANDLE_MAX) { 5875 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle, 5876 HCI_CONN_HANDLE_MAX); 5877 status = HCI_ERROR_INVALID_PARAMETERS; 5878 } 5879 5880 /* All connection failure handling is taken care of by the 5881 * hci_conn_failed function which is triggered by the HCI 5882 * request completion callbacks used for connecting. 5883 */ 5884 if (status) 5885 goto unlock; 5886 5887 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5888 addr_type = BDADDR_LE_PUBLIC; 5889 else 5890 addr_type = BDADDR_LE_RANDOM; 5891 5892 /* Drop the connection if the device is blocked */ 5893 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5894 hci_conn_drop(conn); 5895 goto unlock; 5896 } 5897 5898 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5899 mgmt_device_connected(hdev, conn, NULL, 0); 5900 5901 conn->sec_level = BT_SECURITY_LOW; 5902 conn->handle = handle; 5903 conn->state = BT_CONFIG; 5904 5905 /* Store current advertising instance as connection advertising instance 5906 * when sotfware rotation is in use so it can be re-enabled when 5907 * disconnected. 5908 */ 5909 if (!ext_adv_capable(hdev)) 5910 conn->adv_instance = hdev->cur_adv_instance; 5911 5912 conn->le_conn_interval = interval; 5913 conn->le_conn_latency = latency; 5914 conn->le_supv_timeout = supervision_timeout; 5915 5916 hci_debugfs_create_conn(conn); 5917 hci_conn_add_sysfs(conn); 5918 5919 /* The remote features procedure is defined for central 5920 * role only. So only in case of an initiated connection 5921 * request the remote features. 5922 * 5923 * If the local controller supports peripheral-initiated features 5924 * exchange, then requesting the remote features in peripheral 5925 * role is possible. Otherwise just transition into the 5926 * connected state without requesting the remote features. 5927 */ 5928 if (conn->out || 5929 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5930 struct hci_cp_le_read_remote_features cp; 5931 5932 cp.handle = __cpu_to_le16(conn->handle); 5933 5934 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5935 sizeof(cp), &cp); 5936 5937 hci_conn_hold(conn); 5938 } else { 5939 conn->state = BT_CONNECTED; 5940 hci_connect_cfm(conn, status); 5941 } 5942 5943 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5944 conn->dst_type); 5945 if (params) { 5946 list_del_init(¶ms->action); 5947 if (params->conn) { 5948 hci_conn_drop(params->conn); 5949 hci_conn_put(params->conn); 5950 params->conn = NULL; 5951 } 5952 } 5953 5954 unlock: 5955 hci_update_passive_scan(hdev); 5956 hci_dev_unlock(hdev); 5957 } 5958 5959 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, 5960 struct sk_buff *skb) 5961 { 5962 struct hci_ev_le_conn_complete *ev = data; 5963 5964 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5965 5966 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5967 NULL, ev->role, le16_to_cpu(ev->handle), 5968 le16_to_cpu(ev->interval), 5969 le16_to_cpu(ev->latency), 5970 le16_to_cpu(ev->supervision_timeout)); 5971 } 5972 5973 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, 5974 struct sk_buff *skb) 5975 { 5976 struct hci_ev_le_enh_conn_complete *ev = data; 5977 5978 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5979 5980 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5981 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5982 le16_to_cpu(ev->interval), 5983 le16_to_cpu(ev->latency), 5984 le16_to_cpu(ev->supervision_timeout)); 5985 } 5986 5987 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, 5988 struct sk_buff *skb) 5989 { 5990 struct hci_evt_le_ext_adv_set_term *ev = data; 5991 struct hci_conn *conn; 5992 struct adv_info *adv, *n; 5993 5994 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5995 5996 /* The Bluetooth Core 5.3 specification clearly states that this event 5997 * shall not be sent when the Host disables the advertising set. So in 5998 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 5999 * 6000 * When the Host disables an advertising set, all cleanup is done via 6001 * its command callback and not needed to be duplicated here. 6002 */ 6003 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 6004 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 6005 return; 6006 } 6007 6008 hci_dev_lock(hdev); 6009 6010 adv = hci_find_adv_instance(hdev, ev->handle); 6011 6012 if (ev->status) { 6013 if (!adv) 6014 goto unlock; 6015 6016 /* Remove advertising as it has been terminated */ 6017 hci_remove_adv_instance(hdev, ev->handle); 6018 mgmt_advertising_removed(NULL, hdev, ev->handle); 6019 6020 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 6021 if (adv->enabled) 6022 goto unlock; 6023 } 6024 6025 /* We are no longer advertising, clear HCI_LE_ADV */ 6026 hci_dev_clear_flag(hdev, HCI_LE_ADV); 6027 goto unlock; 6028 } 6029 6030 if (adv) 6031 adv->enabled = false; 6032 6033 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 6034 if (conn) { 6035 /* Store handle in the connection so the correct advertising 6036 * instance can be re-enabled when disconnected. 6037 */ 6038 conn->adv_instance = ev->handle; 6039 6040 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 6041 bacmp(&conn->resp_addr, BDADDR_ANY)) 6042 goto unlock; 6043 6044 if (!ev->handle) { 6045 bacpy(&conn->resp_addr, &hdev->random_addr); 6046 goto unlock; 6047 } 6048 6049 if (adv) 6050 bacpy(&conn->resp_addr, &adv->random_addr); 6051 } 6052 6053 unlock: 6054 hci_dev_unlock(hdev); 6055 } 6056 6057 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, 6058 struct sk_buff *skb) 6059 { 6060 struct hci_ev_le_conn_update_complete *ev = data; 6061 struct hci_conn *conn; 6062 6063 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6064 6065 if (ev->status) 6066 return; 6067 6068 hci_dev_lock(hdev); 6069 6070 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6071 if (conn) { 6072 conn->le_conn_interval = le16_to_cpu(ev->interval); 6073 conn->le_conn_latency = le16_to_cpu(ev->latency); 6074 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 6075 } 6076 6077 hci_dev_unlock(hdev); 6078 } 6079 6080 /* This function requires the caller holds hdev->lock */ 6081 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 6082 bdaddr_t *addr, 6083 u8 addr_type, bool addr_resolved, 6084 u8 adv_type) 6085 { 6086 struct hci_conn *conn; 6087 struct hci_conn_params *params; 6088 6089 /* If the event is not connectable don't proceed further */ 6090 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 6091 return NULL; 6092 6093 /* Ignore if the device is blocked or hdev is suspended */ 6094 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 6095 hdev->suspended) 6096 return NULL; 6097 6098 /* Most controller will fail if we try to create new connections 6099 * while we have an existing one in peripheral role. 6100 */ 6101 if (hdev->conn_hash.le_num_peripheral > 0 && 6102 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 6103 !(hdev->le_states[3] & 0x10))) 6104 return NULL; 6105 6106 /* If we're not connectable only connect devices that we have in 6107 * our pend_le_conns list. 6108 */ 6109 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 6110 addr_type); 6111 if (!params) 6112 return NULL; 6113 6114 if (!params->explicit_connect) { 6115 switch (params->auto_connect) { 6116 case HCI_AUTO_CONN_DIRECT: 6117 /* Only devices advertising with ADV_DIRECT_IND are 6118 * triggering a connection attempt. This is allowing 6119 * incoming connections from peripheral devices. 6120 */ 6121 if (adv_type != LE_ADV_DIRECT_IND) 6122 return NULL; 6123 break; 6124 case HCI_AUTO_CONN_ALWAYS: 6125 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 6126 * are triggering a connection attempt. This means 6127 * that incoming connections from peripheral device are 6128 * accepted and also outgoing connections to peripheral 6129 * devices are established when found. 6130 */ 6131 break; 6132 default: 6133 return NULL; 6134 } 6135 } 6136 6137 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 6138 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 6139 HCI_ROLE_MASTER); 6140 if (!IS_ERR(conn)) { 6141 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 6142 * by higher layer that tried to connect, if no then 6143 * store the pointer since we don't really have any 6144 * other owner of the object besides the params that 6145 * triggered it. This way we can abort the connection if 6146 * the parameters get removed and keep the reference 6147 * count consistent once the connection is established. 6148 */ 6149 6150 if (!params->explicit_connect) 6151 params->conn = hci_conn_get(conn); 6152 6153 return conn; 6154 } 6155 6156 switch (PTR_ERR(conn)) { 6157 case -EBUSY: 6158 /* If hci_connect() returns -EBUSY it means there is already 6159 * an LE connection attempt going on. Since controllers don't 6160 * support more than one connection attempt at the time, we 6161 * don't consider this an error case. 6162 */ 6163 break; 6164 default: 6165 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 6166 return NULL; 6167 } 6168 6169 return NULL; 6170 } 6171 6172 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 6173 u8 bdaddr_type, bdaddr_t *direct_addr, 6174 u8 direct_addr_type, s8 rssi, u8 *data, u8 len, 6175 bool ext_adv) 6176 { 6177 struct discovery_state *d = &hdev->discovery; 6178 struct smp_irk *irk; 6179 struct hci_conn *conn; 6180 bool match, bdaddr_resolved; 6181 u32 flags; 6182 u8 *ptr; 6183 6184 switch (type) { 6185 case LE_ADV_IND: 6186 case LE_ADV_DIRECT_IND: 6187 case LE_ADV_SCAN_IND: 6188 case LE_ADV_NONCONN_IND: 6189 case LE_ADV_SCAN_RSP: 6190 break; 6191 default: 6192 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 6193 "type: 0x%02x", type); 6194 return; 6195 } 6196 6197 if (!ext_adv && len > HCI_MAX_AD_LENGTH) { 6198 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); 6199 return; 6200 } 6201 6202 /* Find the end of the data in case the report contains padded zero 6203 * bytes at the end causing an invalid length value. 6204 * 6205 * When data is NULL, len is 0 so there is no need for extra ptr 6206 * check as 'ptr < data + 0' is already false in such case. 6207 */ 6208 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 6209 if (ptr + 1 + *ptr > data + len) 6210 break; 6211 } 6212 6213 /* Adjust for actual length. This handles the case when remote 6214 * device is advertising with incorrect data length. 6215 */ 6216 len = ptr - data; 6217 6218 /* If the direct address is present, then this report is from 6219 * a LE Direct Advertising Report event. In that case it is 6220 * important to see if the address is matching the local 6221 * controller address. 6222 */ 6223 if (direct_addr) { 6224 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 6225 &bdaddr_resolved); 6226 6227 /* Only resolvable random addresses are valid for these 6228 * kind of reports and others can be ignored. 6229 */ 6230 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 6231 return; 6232 6233 /* If the controller is not using resolvable random 6234 * addresses, then this report can be ignored. 6235 */ 6236 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 6237 return; 6238 6239 /* If the local IRK of the controller does not match 6240 * with the resolvable random address provided, then 6241 * this report can be ignored. 6242 */ 6243 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 6244 return; 6245 } 6246 6247 /* Check if we need to convert to identity address */ 6248 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 6249 if (irk) { 6250 bdaddr = &irk->bdaddr; 6251 bdaddr_type = irk->addr_type; 6252 } 6253 6254 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 6255 6256 /* Check if we have been requested to connect to this device. 6257 * 6258 * direct_addr is set only for directed advertising reports (it is NULL 6259 * for advertising reports) and is already verified to be RPA above. 6260 */ 6261 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 6262 type); 6263 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 6264 /* Store report for later inclusion by 6265 * mgmt_device_connected 6266 */ 6267 memcpy(conn->le_adv_data, data, len); 6268 conn->le_adv_data_len = len; 6269 } 6270 6271 /* Passive scanning shouldn't trigger any device found events, 6272 * except for devices marked as CONN_REPORT for which we do send 6273 * device found events, or advertisement monitoring requested. 6274 */ 6275 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 6276 if (type == LE_ADV_DIRECT_IND) 6277 return; 6278 6279 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 6280 bdaddr, bdaddr_type) && 6281 idr_is_empty(&hdev->adv_monitors_idr)) 6282 return; 6283 6284 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 6285 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6286 else 6287 flags = 0; 6288 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6289 rssi, flags, data, len, NULL, 0); 6290 return; 6291 } 6292 6293 /* When receiving non-connectable or scannable undirected 6294 * advertising reports, this means that the remote device is 6295 * not connectable and then clearly indicate this in the 6296 * device found event. 6297 * 6298 * When receiving a scan response, then there is no way to 6299 * know if the remote device is connectable or not. However 6300 * since scan responses are merged with a previously seen 6301 * advertising report, the flags field from that report 6302 * will be used. 6303 * 6304 * In the really unlikely case that a controller get confused 6305 * and just sends a scan response event, then it is marked as 6306 * not connectable as well. 6307 */ 6308 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 6309 type == LE_ADV_SCAN_RSP) 6310 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6311 else 6312 flags = 0; 6313 6314 /* If there's nothing pending either store the data from this 6315 * event or send an immediate device found event if the data 6316 * should not be stored for later. 6317 */ 6318 if (!ext_adv && !has_pending_adv_report(hdev)) { 6319 /* If the report will trigger a SCAN_REQ store it for 6320 * later merging. 6321 */ 6322 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 6323 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6324 rssi, flags, data, len); 6325 return; 6326 } 6327 6328 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6329 rssi, flags, data, len, NULL, 0); 6330 return; 6331 } 6332 6333 /* Check if the pending report is for the same device as the new one */ 6334 match = (!bacmp(bdaddr, &d->last_adv_addr) && 6335 bdaddr_type == d->last_adv_addr_type); 6336 6337 /* If the pending data doesn't match this report or this isn't a 6338 * scan response (e.g. we got a duplicate ADV_IND) then force 6339 * sending of the pending data. 6340 */ 6341 if (type != LE_ADV_SCAN_RSP || !match) { 6342 /* Send out whatever is in the cache, but skip duplicates */ 6343 if (!match) 6344 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6345 d->last_adv_addr_type, NULL, 6346 d->last_adv_rssi, d->last_adv_flags, 6347 d->last_adv_data, 6348 d->last_adv_data_len, NULL, 0); 6349 6350 /* If the new report will trigger a SCAN_REQ store it for 6351 * later merging. 6352 */ 6353 if (!ext_adv && (type == LE_ADV_IND || 6354 type == LE_ADV_SCAN_IND)) { 6355 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6356 rssi, flags, data, len); 6357 return; 6358 } 6359 6360 /* The advertising reports cannot be merged, so clear 6361 * the pending report and send out a device found event. 6362 */ 6363 clear_pending_adv_report(hdev); 6364 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6365 rssi, flags, data, len, NULL, 0); 6366 return; 6367 } 6368 6369 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 6370 * the new event is a SCAN_RSP. We can therefore proceed with 6371 * sending a merged device found event. 6372 */ 6373 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6374 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 6375 d->last_adv_data, d->last_adv_data_len, data, len); 6376 clear_pending_adv_report(hdev); 6377 } 6378 6379 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, 6380 struct sk_buff *skb) 6381 { 6382 struct hci_ev_le_advertising_report *ev = data; 6383 6384 if (!ev->num) 6385 return; 6386 6387 hci_dev_lock(hdev); 6388 6389 while (ev->num--) { 6390 struct hci_ev_le_advertising_info *info; 6391 s8 rssi; 6392 6393 info = hci_le_ev_skb_pull(hdev, skb, 6394 HCI_EV_LE_ADVERTISING_REPORT, 6395 sizeof(*info)); 6396 if (!info) 6397 break; 6398 6399 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, 6400 info->length + 1)) 6401 break; 6402 6403 if (info->length <= HCI_MAX_AD_LENGTH) { 6404 rssi = info->data[info->length]; 6405 process_adv_report(hdev, info->type, &info->bdaddr, 6406 info->bdaddr_type, NULL, 0, rssi, 6407 info->data, info->length, false); 6408 } else { 6409 bt_dev_err(hdev, "Dropping invalid advertising data"); 6410 } 6411 } 6412 6413 hci_dev_unlock(hdev); 6414 } 6415 6416 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 6417 { 6418 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 6419 switch (evt_type) { 6420 case LE_LEGACY_ADV_IND: 6421 return LE_ADV_IND; 6422 case LE_LEGACY_ADV_DIRECT_IND: 6423 return LE_ADV_DIRECT_IND; 6424 case LE_LEGACY_ADV_SCAN_IND: 6425 return LE_ADV_SCAN_IND; 6426 case LE_LEGACY_NONCONN_IND: 6427 return LE_ADV_NONCONN_IND; 6428 case LE_LEGACY_SCAN_RSP_ADV: 6429 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 6430 return LE_ADV_SCAN_RSP; 6431 } 6432 6433 goto invalid; 6434 } 6435 6436 if (evt_type & LE_EXT_ADV_CONN_IND) { 6437 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6438 return LE_ADV_DIRECT_IND; 6439 6440 return LE_ADV_IND; 6441 } 6442 6443 if (evt_type & LE_EXT_ADV_SCAN_RSP) 6444 return LE_ADV_SCAN_RSP; 6445 6446 if (evt_type & LE_EXT_ADV_SCAN_IND) 6447 return LE_ADV_SCAN_IND; 6448 6449 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 6450 evt_type & LE_EXT_ADV_DIRECT_IND) 6451 return LE_ADV_NONCONN_IND; 6452 6453 invalid: 6454 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 6455 evt_type); 6456 6457 return LE_ADV_INVALID; 6458 } 6459 6460 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, 6461 struct sk_buff *skb) 6462 { 6463 struct hci_ev_le_ext_adv_report *ev = data; 6464 6465 if (!ev->num) 6466 return; 6467 6468 hci_dev_lock(hdev); 6469 6470 while (ev->num--) { 6471 struct hci_ev_le_ext_adv_info *info; 6472 u8 legacy_evt_type; 6473 u16 evt_type; 6474 6475 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6476 sizeof(*info)); 6477 if (!info) 6478 break; 6479 6480 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6481 info->length)) 6482 break; 6483 6484 evt_type = __le16_to_cpu(info->type); 6485 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6486 if (legacy_evt_type != LE_ADV_INVALID) { 6487 process_adv_report(hdev, legacy_evt_type, &info->bdaddr, 6488 info->bdaddr_type, NULL, 0, 6489 info->rssi, info->data, info->length, 6490 !(evt_type & LE_EXT_ADV_LEGACY_PDU)); 6491 } 6492 } 6493 6494 hci_dev_unlock(hdev); 6495 } 6496 6497 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) 6498 { 6499 struct hci_cp_le_pa_term_sync cp; 6500 6501 memset(&cp, 0, sizeof(cp)); 6502 cp.handle = handle; 6503 6504 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); 6505 } 6506 6507 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, 6508 struct sk_buff *skb) 6509 { 6510 struct hci_ev_le_pa_sync_established *ev = data; 6511 int mask = hdev->link_mode; 6512 __u8 flags = 0; 6513 6514 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6515 6516 if (ev->status) 6517 return; 6518 6519 hci_dev_lock(hdev); 6520 6521 hci_dev_clear_flag(hdev, HCI_PA_SYNC); 6522 6523 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); 6524 if (!(mask & HCI_LM_ACCEPT)) 6525 hci_le_pa_term_sync(hdev, ev->handle); 6526 6527 hci_dev_unlock(hdev); 6528 } 6529 6530 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, 6531 struct sk_buff *skb) 6532 { 6533 struct hci_ev_le_remote_feat_complete *ev = data; 6534 struct hci_conn *conn; 6535 6536 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6537 6538 hci_dev_lock(hdev); 6539 6540 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6541 if (conn) { 6542 if (!ev->status) 6543 memcpy(conn->features[0], ev->features, 8); 6544 6545 if (conn->state == BT_CONFIG) { 6546 __u8 status; 6547 6548 /* If the local controller supports peripheral-initiated 6549 * features exchange, but the remote controller does 6550 * not, then it is possible that the error code 0x1a 6551 * for unsupported remote feature gets returned. 6552 * 6553 * In this specific case, allow the connection to 6554 * transition into connected state and mark it as 6555 * successful. 6556 */ 6557 if (!conn->out && ev->status == 0x1a && 6558 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6559 status = 0x00; 6560 else 6561 status = ev->status; 6562 6563 conn->state = BT_CONNECTED; 6564 hci_connect_cfm(conn, status); 6565 hci_conn_drop(conn); 6566 } 6567 } 6568 6569 hci_dev_unlock(hdev); 6570 } 6571 6572 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, 6573 struct sk_buff *skb) 6574 { 6575 struct hci_ev_le_ltk_req *ev = data; 6576 struct hci_cp_le_ltk_reply cp; 6577 struct hci_cp_le_ltk_neg_reply neg; 6578 struct hci_conn *conn; 6579 struct smp_ltk *ltk; 6580 6581 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6582 6583 hci_dev_lock(hdev); 6584 6585 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6586 if (conn == NULL) 6587 goto not_found; 6588 6589 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6590 if (!ltk) 6591 goto not_found; 6592 6593 if (smp_ltk_is_sc(ltk)) { 6594 /* With SC both EDiv and Rand are set to zero */ 6595 if (ev->ediv || ev->rand) 6596 goto not_found; 6597 } else { 6598 /* For non-SC keys check that EDiv and Rand match */ 6599 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6600 goto not_found; 6601 } 6602 6603 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6604 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6605 cp.handle = cpu_to_le16(conn->handle); 6606 6607 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6608 6609 conn->enc_key_size = ltk->enc_size; 6610 6611 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6612 6613 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6614 * temporary key used to encrypt a connection following 6615 * pairing. It is used during the Encrypted Session Setup to 6616 * distribute the keys. Later, security can be re-established 6617 * using a distributed LTK. 6618 */ 6619 if (ltk->type == SMP_STK) { 6620 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6621 list_del_rcu(<k->list); 6622 kfree_rcu(ltk, rcu); 6623 } else { 6624 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6625 } 6626 6627 hci_dev_unlock(hdev); 6628 6629 return; 6630 6631 not_found: 6632 neg.handle = ev->handle; 6633 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6634 hci_dev_unlock(hdev); 6635 } 6636 6637 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6638 u8 reason) 6639 { 6640 struct hci_cp_le_conn_param_req_neg_reply cp; 6641 6642 cp.handle = cpu_to_le16(handle); 6643 cp.reason = reason; 6644 6645 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6646 &cp); 6647 } 6648 6649 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, 6650 struct sk_buff *skb) 6651 { 6652 struct hci_ev_le_remote_conn_param_req *ev = data; 6653 struct hci_cp_le_conn_param_req_reply cp; 6654 struct hci_conn *hcon; 6655 u16 handle, min, max, latency, timeout; 6656 6657 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6658 6659 handle = le16_to_cpu(ev->handle); 6660 min = le16_to_cpu(ev->interval_min); 6661 max = le16_to_cpu(ev->interval_max); 6662 latency = le16_to_cpu(ev->latency); 6663 timeout = le16_to_cpu(ev->timeout); 6664 6665 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6666 if (!hcon || hcon->state != BT_CONNECTED) 6667 return send_conn_param_neg_reply(hdev, handle, 6668 HCI_ERROR_UNKNOWN_CONN_ID); 6669 6670 if (hci_check_conn_params(min, max, latency, timeout)) 6671 return send_conn_param_neg_reply(hdev, handle, 6672 HCI_ERROR_INVALID_LL_PARAMS); 6673 6674 if (hcon->role == HCI_ROLE_MASTER) { 6675 struct hci_conn_params *params; 6676 u8 store_hint; 6677 6678 hci_dev_lock(hdev); 6679 6680 params = hci_conn_params_lookup(hdev, &hcon->dst, 6681 hcon->dst_type); 6682 if (params) { 6683 params->conn_min_interval = min; 6684 params->conn_max_interval = max; 6685 params->conn_latency = latency; 6686 params->supervision_timeout = timeout; 6687 store_hint = 0x01; 6688 } else { 6689 store_hint = 0x00; 6690 } 6691 6692 hci_dev_unlock(hdev); 6693 6694 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6695 store_hint, min, max, latency, timeout); 6696 } 6697 6698 cp.handle = ev->handle; 6699 cp.interval_min = ev->interval_min; 6700 cp.interval_max = ev->interval_max; 6701 cp.latency = ev->latency; 6702 cp.timeout = ev->timeout; 6703 cp.min_ce_len = 0; 6704 cp.max_ce_len = 0; 6705 6706 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6707 } 6708 6709 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, 6710 struct sk_buff *skb) 6711 { 6712 struct hci_ev_le_direct_adv_report *ev = data; 6713 int i; 6714 6715 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, 6716 flex_array_size(ev, info, ev->num))) 6717 return; 6718 6719 if (!ev->num) 6720 return; 6721 6722 hci_dev_lock(hdev); 6723 6724 for (i = 0; i < ev->num; i++) { 6725 struct hci_ev_le_direct_adv_info *info = &ev->info[i]; 6726 6727 process_adv_report(hdev, info->type, &info->bdaddr, 6728 info->bdaddr_type, &info->direct_addr, 6729 info->direct_addr_type, info->rssi, NULL, 0, 6730 false); 6731 } 6732 6733 hci_dev_unlock(hdev); 6734 } 6735 6736 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, 6737 struct sk_buff *skb) 6738 { 6739 struct hci_ev_le_phy_update_complete *ev = data; 6740 struct hci_conn *conn; 6741 6742 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6743 6744 if (ev->status) 6745 return; 6746 6747 hci_dev_lock(hdev); 6748 6749 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6750 if (!conn) 6751 goto unlock; 6752 6753 conn->le_tx_phy = ev->tx_phy; 6754 conn->le_rx_phy = ev->rx_phy; 6755 6756 unlock: 6757 hci_dev_unlock(hdev); 6758 } 6759 6760 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, 6761 struct sk_buff *skb) 6762 { 6763 struct hci_evt_le_cis_established *ev = data; 6764 struct hci_conn *conn; 6765 u16 handle = __le16_to_cpu(ev->handle); 6766 6767 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6768 6769 hci_dev_lock(hdev); 6770 6771 conn = hci_conn_hash_lookup_handle(hdev, handle); 6772 if (!conn) { 6773 bt_dev_err(hdev, 6774 "Unable to find connection with handle 0x%4.4x", 6775 handle); 6776 goto unlock; 6777 } 6778 6779 if (conn->role == HCI_ROLE_SLAVE) { 6780 __le32 interval; 6781 6782 memset(&interval, 0, sizeof(interval)); 6783 6784 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); 6785 conn->iso_qos.in.interval = le32_to_cpu(interval); 6786 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); 6787 conn->iso_qos.out.interval = le32_to_cpu(interval); 6788 conn->iso_qos.in.latency = le16_to_cpu(ev->interval); 6789 conn->iso_qos.out.latency = le16_to_cpu(ev->interval); 6790 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu); 6791 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu); 6792 conn->iso_qos.in.phy = ev->c_phy; 6793 conn->iso_qos.out.phy = ev->p_phy; 6794 } 6795 6796 if (!ev->status) { 6797 conn->state = BT_CONNECTED; 6798 hci_debugfs_create_conn(conn); 6799 hci_conn_add_sysfs(conn); 6800 hci_iso_setup_path(conn); 6801 goto unlock; 6802 } 6803 6804 hci_connect_cfm(conn, ev->status); 6805 hci_conn_del(conn); 6806 6807 unlock: 6808 hci_dev_unlock(hdev); 6809 } 6810 6811 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) 6812 { 6813 struct hci_cp_le_reject_cis cp; 6814 6815 memset(&cp, 0, sizeof(cp)); 6816 cp.handle = handle; 6817 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 6818 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); 6819 } 6820 6821 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) 6822 { 6823 struct hci_cp_le_accept_cis cp; 6824 6825 memset(&cp, 0, sizeof(cp)); 6826 cp.handle = handle; 6827 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); 6828 } 6829 6830 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, 6831 struct sk_buff *skb) 6832 { 6833 struct hci_evt_le_cis_req *ev = data; 6834 u16 acl_handle, cis_handle; 6835 struct hci_conn *acl, *cis; 6836 int mask; 6837 __u8 flags = 0; 6838 6839 acl_handle = __le16_to_cpu(ev->acl_handle); 6840 cis_handle = __le16_to_cpu(ev->cis_handle); 6841 6842 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", 6843 acl_handle, cis_handle, ev->cig_id, ev->cis_id); 6844 6845 hci_dev_lock(hdev); 6846 6847 acl = hci_conn_hash_lookup_handle(hdev, acl_handle); 6848 if (!acl) 6849 goto unlock; 6850 6851 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags); 6852 if (!(mask & HCI_LM_ACCEPT)) { 6853 hci_le_reject_cis(hdev, ev->cis_handle); 6854 goto unlock; 6855 } 6856 6857 cis = hci_conn_hash_lookup_handle(hdev, cis_handle); 6858 if (!cis) { 6859 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE); 6860 if (!cis) { 6861 hci_le_reject_cis(hdev, ev->cis_handle); 6862 goto unlock; 6863 } 6864 cis->handle = cis_handle; 6865 } 6866 6867 cis->iso_qos.cig = ev->cig_id; 6868 cis->iso_qos.cis = ev->cis_id; 6869 6870 if (!(flags & HCI_PROTO_DEFER)) { 6871 hci_le_accept_cis(hdev, ev->cis_handle); 6872 } else { 6873 cis->state = BT_CONNECT2; 6874 hci_connect_cfm(cis, 0); 6875 } 6876 6877 unlock: 6878 hci_dev_unlock(hdev); 6879 } 6880 6881 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, 6882 struct sk_buff *skb) 6883 { 6884 struct hci_evt_le_create_big_complete *ev = data; 6885 struct hci_conn *conn; 6886 6887 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6888 6889 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, 6890 flex_array_size(ev, bis_handle, ev->num_bis))) 6891 return; 6892 6893 hci_dev_lock(hdev); 6894 6895 conn = hci_conn_hash_lookup_big(hdev, ev->handle); 6896 if (!conn) 6897 goto unlock; 6898 6899 if (ev->num_bis) 6900 conn->handle = __le16_to_cpu(ev->bis_handle[0]); 6901 6902 if (!ev->status) { 6903 conn->state = BT_CONNECTED; 6904 hci_debugfs_create_conn(conn); 6905 hci_conn_add_sysfs(conn); 6906 hci_iso_setup_path(conn); 6907 goto unlock; 6908 } 6909 6910 hci_connect_cfm(conn, ev->status); 6911 hci_conn_del(conn); 6912 6913 unlock: 6914 hci_dev_unlock(hdev); 6915 } 6916 6917 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, 6918 struct sk_buff *skb) 6919 { 6920 struct hci_evt_le_big_sync_estabilished *ev = data; 6921 struct hci_conn *bis; 6922 int i; 6923 6924 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6925 6926 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 6927 flex_array_size(ev, bis, ev->num_bis))) 6928 return; 6929 6930 if (ev->status) 6931 return; 6932 6933 hci_dev_lock(hdev); 6934 6935 for (i = 0; i < ev->num_bis; i++) { 6936 u16 handle = le16_to_cpu(ev->bis[i]); 6937 __le32 interval; 6938 6939 bis = hci_conn_hash_lookup_handle(hdev, handle); 6940 if (!bis) { 6941 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, 6942 HCI_ROLE_SLAVE); 6943 if (!bis) 6944 continue; 6945 bis->handle = handle; 6946 } 6947 6948 bis->iso_qos.big = ev->handle; 6949 memset(&interval, 0, sizeof(interval)); 6950 memcpy(&interval, ev->latency, sizeof(ev->latency)); 6951 bis->iso_qos.in.interval = le32_to_cpu(interval); 6952 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ 6953 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100; 6954 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu); 6955 6956 hci_connect_cfm(bis, ev->status); 6957 } 6958 6959 hci_dev_unlock(hdev); 6960 } 6961 6962 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, 6963 struct sk_buff *skb) 6964 { 6965 struct hci_evt_le_big_info_adv_report *ev = data; 6966 int mask = hdev->link_mode; 6967 __u8 flags = 0; 6968 6969 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); 6970 6971 hci_dev_lock(hdev); 6972 6973 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); 6974 if (!(mask & HCI_LM_ACCEPT)) 6975 hci_le_pa_term_sync(hdev, ev->sync_handle); 6976 6977 hci_dev_unlock(hdev); 6978 } 6979 6980 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ 6981 [_op] = { \ 6982 .func = _func, \ 6983 .min_len = _min_len, \ 6984 .max_len = _max_len, \ 6985 } 6986 6987 #define HCI_LE_EV(_op, _func, _len) \ 6988 HCI_LE_EV_VL(_op, _func, _len, _len) 6989 6990 #define HCI_LE_EV_STATUS(_op, _func) \ 6991 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) 6992 6993 /* Entries in this table shall have their position according to the subevent 6994 * opcode they handle so the use of the macros above is recommend since it does 6995 * attempt to initialize at its proper index using Designated Initializers that 6996 * way events without a callback function can be ommited. 6997 */ 6998 static const struct hci_le_ev { 6999 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 7000 u16 min_len; 7001 u16 max_len; 7002 } hci_le_ev_table[U8_MAX + 1] = { 7003 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ 7004 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, 7005 sizeof(struct hci_ev_le_conn_complete)), 7006 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ 7007 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, 7008 sizeof(struct hci_ev_le_advertising_report), 7009 HCI_MAX_EVENT_SIZE), 7010 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ 7011 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, 7012 hci_le_conn_update_complete_evt, 7013 sizeof(struct hci_ev_le_conn_update_complete)), 7014 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ 7015 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, 7016 hci_le_remote_feat_complete_evt, 7017 sizeof(struct hci_ev_le_remote_feat_complete)), 7018 /* [0x05 = HCI_EV_LE_LTK_REQ] */ 7019 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, 7020 sizeof(struct hci_ev_le_ltk_req)), 7021 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ 7022 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, 7023 hci_le_remote_conn_param_req_evt, 7024 sizeof(struct hci_ev_le_remote_conn_param_req)), 7025 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ 7026 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, 7027 hci_le_enh_conn_complete_evt, 7028 sizeof(struct hci_ev_le_enh_conn_complete)), 7029 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ 7030 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, 7031 sizeof(struct hci_ev_le_direct_adv_report), 7032 HCI_MAX_EVENT_SIZE), 7033 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ 7034 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, 7035 sizeof(struct hci_ev_le_phy_update_complete)), 7036 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ 7037 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, 7038 sizeof(struct hci_ev_le_ext_adv_report), 7039 HCI_MAX_EVENT_SIZE), 7040 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ 7041 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, 7042 hci_le_pa_sync_estabilished_evt, 7043 sizeof(struct hci_ev_le_pa_sync_established)), 7044 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ 7045 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, 7046 sizeof(struct hci_evt_le_ext_adv_set_term)), 7047 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ 7048 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt, 7049 sizeof(struct hci_evt_le_cis_established)), 7050 /* [0x1a = HCI_EVT_LE_CIS_REQ] */ 7051 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, 7052 sizeof(struct hci_evt_le_cis_req)), 7053 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ 7054 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, 7055 hci_le_create_big_complete_evt, 7056 sizeof(struct hci_evt_le_create_big_complete), 7057 HCI_MAX_EVENT_SIZE), 7058 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ 7059 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 7060 hci_le_big_sync_established_evt, 7061 sizeof(struct hci_evt_le_big_sync_estabilished), 7062 HCI_MAX_EVENT_SIZE), 7063 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ 7064 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, 7065 hci_le_big_info_adv_report_evt, 7066 sizeof(struct hci_evt_le_big_info_adv_report), 7067 HCI_MAX_EVENT_SIZE), 7068 }; 7069 7070 static void hci_le_meta_evt(struct hci_dev *hdev, void *data, 7071 struct sk_buff *skb, u16 *opcode, u8 *status, 7072 hci_req_complete_t *req_complete, 7073 hci_req_complete_skb_t *req_complete_skb) 7074 { 7075 struct hci_ev_le_meta *ev = data; 7076 const struct hci_le_ev *subev; 7077 7078 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); 7079 7080 /* Only match event if command OGF is for LE */ 7081 if (hdev->sent_cmd && 7082 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && 7083 hci_skb_event(hdev->sent_cmd) == ev->subevent) { 7084 *opcode = hci_skb_opcode(hdev->sent_cmd); 7085 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, 7086 req_complete_skb); 7087 } 7088 7089 subev = &hci_le_ev_table[ev->subevent]; 7090 if (!subev->func) 7091 return; 7092 7093 if (skb->len < subev->min_len) { 7094 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", 7095 ev->subevent, skb->len, subev->min_len); 7096 return; 7097 } 7098 7099 /* Just warn if the length is over max_len size it still be 7100 * possible to partially parse the event so leave to callback to 7101 * decide if that is acceptable. 7102 */ 7103 if (skb->len > subev->max_len) 7104 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", 7105 ev->subevent, skb->len, subev->max_len); 7106 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); 7107 if (!data) 7108 return; 7109 7110 subev->func(hdev, data, skb); 7111 } 7112 7113 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 7114 u8 event, struct sk_buff *skb) 7115 { 7116 struct hci_ev_cmd_complete *ev; 7117 struct hci_event_hdr *hdr; 7118 7119 if (!skb) 7120 return false; 7121 7122 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); 7123 if (!hdr) 7124 return false; 7125 7126 if (event) { 7127 if (hdr->evt != event) 7128 return false; 7129 return true; 7130 } 7131 7132 /* Check if request ended in Command Status - no way to retrieve 7133 * any extra parameters in this case. 7134 */ 7135 if (hdr->evt == HCI_EV_CMD_STATUS) 7136 return false; 7137 7138 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 7139 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 7140 hdr->evt); 7141 return false; 7142 } 7143 7144 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); 7145 if (!ev) 7146 return false; 7147 7148 if (opcode != __le16_to_cpu(ev->opcode)) { 7149 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 7150 __le16_to_cpu(ev->opcode)); 7151 return false; 7152 } 7153 7154 return true; 7155 } 7156 7157 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 7158 struct sk_buff *skb) 7159 { 7160 struct hci_ev_le_advertising_info *adv; 7161 struct hci_ev_le_direct_adv_info *direct_adv; 7162 struct hci_ev_le_ext_adv_info *ext_adv; 7163 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 7164 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 7165 7166 hci_dev_lock(hdev); 7167 7168 /* If we are currently suspended and this is the first BT event seen, 7169 * save the wake reason associated with the event. 7170 */ 7171 if (!hdev->suspended || hdev->wake_reason) 7172 goto unlock; 7173 7174 /* Default to remote wake. Values for wake_reason are documented in the 7175 * Bluez mgmt api docs. 7176 */ 7177 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 7178 7179 /* Once configured for remote wakeup, we should only wake up for 7180 * reconnections. It's useful to see which device is waking us up so 7181 * keep track of the bdaddr of the connection event that woke us up. 7182 */ 7183 if (event == HCI_EV_CONN_REQUEST) { 7184 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 7185 hdev->wake_addr_type = BDADDR_BREDR; 7186 } else if (event == HCI_EV_CONN_COMPLETE) { 7187 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 7188 hdev->wake_addr_type = BDADDR_BREDR; 7189 } else if (event == HCI_EV_LE_META) { 7190 struct hci_ev_le_meta *le_ev = (void *)skb->data; 7191 u8 subevent = le_ev->subevent; 7192 u8 *ptr = &skb->data[sizeof(*le_ev)]; 7193 u8 num_reports = *ptr; 7194 7195 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 7196 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 7197 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 7198 num_reports) { 7199 adv = (void *)(ptr + 1); 7200 direct_adv = (void *)(ptr + 1); 7201 ext_adv = (void *)(ptr + 1); 7202 7203 switch (subevent) { 7204 case HCI_EV_LE_ADVERTISING_REPORT: 7205 bacpy(&hdev->wake_addr, &adv->bdaddr); 7206 hdev->wake_addr_type = adv->bdaddr_type; 7207 break; 7208 case HCI_EV_LE_DIRECT_ADV_REPORT: 7209 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 7210 hdev->wake_addr_type = direct_adv->bdaddr_type; 7211 break; 7212 case HCI_EV_LE_EXT_ADV_REPORT: 7213 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 7214 hdev->wake_addr_type = ext_adv->bdaddr_type; 7215 break; 7216 } 7217 } 7218 } else { 7219 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 7220 } 7221 7222 unlock: 7223 hci_dev_unlock(hdev); 7224 } 7225 7226 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ 7227 [_op] = { \ 7228 .req = false, \ 7229 .func = _func, \ 7230 .min_len = _min_len, \ 7231 .max_len = _max_len, \ 7232 } 7233 7234 #define HCI_EV(_op, _func, _len) \ 7235 HCI_EV_VL(_op, _func, _len, _len) 7236 7237 #define HCI_EV_STATUS(_op, _func) \ 7238 HCI_EV(_op, _func, sizeof(struct hci_ev_status)) 7239 7240 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ 7241 [_op] = { \ 7242 .req = true, \ 7243 .func_req = _func, \ 7244 .min_len = _min_len, \ 7245 .max_len = _max_len, \ 7246 } 7247 7248 #define HCI_EV_REQ(_op, _func, _len) \ 7249 HCI_EV_REQ_VL(_op, _func, _len, _len) 7250 7251 /* Entries in this table shall have their position according to the event opcode 7252 * they handle so the use of the macros above is recommend since it does attempt 7253 * to initialize at its proper index using Designated Initializers that way 7254 * events without a callback function don't have entered. 7255 */ 7256 static const struct hci_ev { 7257 bool req; 7258 union { 7259 void (*func)(struct hci_dev *hdev, void *data, 7260 struct sk_buff *skb); 7261 void (*func_req)(struct hci_dev *hdev, void *data, 7262 struct sk_buff *skb, u16 *opcode, u8 *status, 7263 hci_req_complete_t *req_complete, 7264 hci_req_complete_skb_t *req_complete_skb); 7265 }; 7266 u16 min_len; 7267 u16 max_len; 7268 } hci_ev_table[U8_MAX + 1] = { 7269 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ 7270 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), 7271 /* [0x02 = HCI_EV_INQUIRY_RESULT] */ 7272 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, 7273 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), 7274 /* [0x03 = HCI_EV_CONN_COMPLETE] */ 7275 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, 7276 sizeof(struct hci_ev_conn_complete)), 7277 /* [0x04 = HCI_EV_CONN_REQUEST] */ 7278 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, 7279 sizeof(struct hci_ev_conn_request)), 7280 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ 7281 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, 7282 sizeof(struct hci_ev_disconn_complete)), 7283 /* [0x06 = HCI_EV_AUTH_COMPLETE] */ 7284 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, 7285 sizeof(struct hci_ev_auth_complete)), 7286 /* [0x07 = HCI_EV_REMOTE_NAME] */ 7287 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, 7288 sizeof(struct hci_ev_remote_name)), 7289 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ 7290 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, 7291 sizeof(struct hci_ev_encrypt_change)), 7292 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ 7293 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, 7294 hci_change_link_key_complete_evt, 7295 sizeof(struct hci_ev_change_link_key_complete)), 7296 /* [0x0b = HCI_EV_REMOTE_FEATURES] */ 7297 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, 7298 sizeof(struct hci_ev_remote_features)), 7299 /* [0x0e = HCI_EV_CMD_COMPLETE] */ 7300 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, 7301 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), 7302 /* [0x0f = HCI_EV_CMD_STATUS] */ 7303 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, 7304 sizeof(struct hci_ev_cmd_status)), 7305 /* [0x10 = HCI_EV_CMD_STATUS] */ 7306 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, 7307 sizeof(struct hci_ev_hardware_error)), 7308 /* [0x12 = HCI_EV_ROLE_CHANGE] */ 7309 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, 7310 sizeof(struct hci_ev_role_change)), 7311 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ 7312 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, 7313 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), 7314 /* [0x14 = HCI_EV_MODE_CHANGE] */ 7315 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, 7316 sizeof(struct hci_ev_mode_change)), 7317 /* [0x16 = HCI_EV_PIN_CODE_REQ] */ 7318 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, 7319 sizeof(struct hci_ev_pin_code_req)), 7320 /* [0x17 = HCI_EV_LINK_KEY_REQ] */ 7321 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, 7322 sizeof(struct hci_ev_link_key_req)), 7323 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ 7324 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, 7325 sizeof(struct hci_ev_link_key_notify)), 7326 /* [0x1c = HCI_EV_CLOCK_OFFSET] */ 7327 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, 7328 sizeof(struct hci_ev_clock_offset)), 7329 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ 7330 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, 7331 sizeof(struct hci_ev_pkt_type_change)), 7332 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ 7333 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, 7334 sizeof(struct hci_ev_pscan_rep_mode)), 7335 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ 7336 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, 7337 hci_inquiry_result_with_rssi_evt, 7338 sizeof(struct hci_ev_inquiry_result_rssi), 7339 HCI_MAX_EVENT_SIZE), 7340 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ 7341 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, 7342 sizeof(struct hci_ev_remote_ext_features)), 7343 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ 7344 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, 7345 sizeof(struct hci_ev_sync_conn_complete)), 7346 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ 7347 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, 7348 hci_extended_inquiry_result_evt, 7349 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), 7350 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ 7351 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, 7352 sizeof(struct hci_ev_key_refresh_complete)), 7353 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ 7354 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, 7355 sizeof(struct hci_ev_io_capa_request)), 7356 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ 7357 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, 7358 sizeof(struct hci_ev_io_capa_reply)), 7359 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ 7360 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, 7361 sizeof(struct hci_ev_user_confirm_req)), 7362 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ 7363 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, 7364 sizeof(struct hci_ev_user_passkey_req)), 7365 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ 7366 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, 7367 sizeof(struct hci_ev_remote_oob_data_request)), 7368 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ 7369 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, 7370 sizeof(struct hci_ev_simple_pair_complete)), 7371 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ 7372 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, 7373 sizeof(struct hci_ev_user_passkey_notify)), 7374 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ 7375 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, 7376 sizeof(struct hci_ev_keypress_notify)), 7377 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ 7378 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, 7379 sizeof(struct hci_ev_remote_host_features)), 7380 /* [0x3e = HCI_EV_LE_META] */ 7381 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, 7382 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), 7383 #if IS_ENABLED(CONFIG_BT_HS) 7384 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ 7385 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, 7386 sizeof(struct hci_ev_phy_link_complete)), 7387 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ 7388 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, 7389 sizeof(struct hci_ev_channel_selected)), 7390 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ 7391 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, 7392 hci_disconn_loglink_complete_evt, 7393 sizeof(struct hci_ev_disconn_logical_link_complete)), 7394 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ 7395 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, 7396 sizeof(struct hci_ev_logical_link_complete)), 7397 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ 7398 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, 7399 hci_disconn_phylink_complete_evt, 7400 sizeof(struct hci_ev_disconn_phy_link_complete)), 7401 #endif 7402 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ 7403 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, 7404 sizeof(struct hci_ev_num_comp_blocks)), 7405 /* [0xff = HCI_EV_VENDOR] */ 7406 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), 7407 }; 7408 7409 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, 7410 u16 *opcode, u8 *status, 7411 hci_req_complete_t *req_complete, 7412 hci_req_complete_skb_t *req_complete_skb) 7413 { 7414 const struct hci_ev *ev = &hci_ev_table[event]; 7415 void *data; 7416 7417 if (!ev->func) 7418 return; 7419 7420 if (skb->len < ev->min_len) { 7421 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", 7422 event, skb->len, ev->min_len); 7423 return; 7424 } 7425 7426 /* Just warn if the length is over max_len size it still be 7427 * possible to partially parse the event so leave to callback to 7428 * decide if that is acceptable. 7429 */ 7430 if (skb->len > ev->max_len) 7431 bt_dev_warn_ratelimited(hdev, 7432 "unexpected event 0x%2.2x length: %u > %u", 7433 event, skb->len, ev->max_len); 7434 7435 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); 7436 if (!data) 7437 return; 7438 7439 if (ev->req) 7440 ev->func_req(hdev, data, skb, opcode, status, req_complete, 7441 req_complete_skb); 7442 else 7443 ev->func(hdev, data, skb); 7444 } 7445 7446 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 7447 { 7448 struct hci_event_hdr *hdr = (void *) skb->data; 7449 hci_req_complete_t req_complete = NULL; 7450 hci_req_complete_skb_t req_complete_skb = NULL; 7451 struct sk_buff *orig_skb = NULL; 7452 u8 status = 0, event, req_evt = 0; 7453 u16 opcode = HCI_OP_NOP; 7454 7455 if (skb->len < sizeof(*hdr)) { 7456 bt_dev_err(hdev, "Malformed HCI Event"); 7457 goto done; 7458 } 7459 7460 kfree_skb(hdev->recv_event); 7461 hdev->recv_event = skb_clone(skb, GFP_KERNEL); 7462 7463 event = hdr->evt; 7464 if (!event) { 7465 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", 7466 event); 7467 goto done; 7468 } 7469 7470 /* Only match event if command OGF is not for LE */ 7471 if (hdev->sent_cmd && 7472 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && 7473 hci_skb_event(hdev->sent_cmd) == event) { 7474 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), 7475 status, &req_complete, &req_complete_skb); 7476 req_evt = event; 7477 } 7478 7479 /* If it looks like we might end up having to call 7480 * req_complete_skb, store a pristine copy of the skb since the 7481 * various handlers may modify the original one through 7482 * skb_pull() calls, etc. 7483 */ 7484 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 7485 event == HCI_EV_CMD_COMPLETE) 7486 orig_skb = skb_clone(skb, GFP_KERNEL); 7487 7488 skb_pull(skb, HCI_EVENT_HDR_SIZE); 7489 7490 /* Store wake reason if we're suspended */ 7491 hci_store_wake_reason(hdev, event, skb); 7492 7493 bt_dev_dbg(hdev, "event 0x%2.2x", event); 7494 7495 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, 7496 &req_complete_skb); 7497 7498 if (req_complete) { 7499 req_complete(hdev, status, opcode); 7500 } else if (req_complete_skb) { 7501 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 7502 kfree_skb(orig_skb); 7503 orig_skb = NULL; 7504 } 7505 req_complete_skb(hdev, status, opcode, orig_skb); 7506 } 7507 7508 done: 7509 kfree_skb(orig_skb); 7510 kfree_skb(skb); 7511 hdev->stat.evt_rx++; 7512 } 7513