1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 #include "eir.h" 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 42 "\x00\x00\x00\x00\x00\x00\x00\x00" 43 44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 45 46 /* Handle HCI Event packets */ 47 48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 49 u8 ev, size_t len) 50 { 51 void *data; 52 53 data = skb_pull_data(skb, len); 54 if (!data) 55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); 56 57 return data; 58 } 59 60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 61 u16 op, size_t len) 62 { 63 void *data; 64 65 data = skb_pull_data(skb, len); 66 if (!data) 67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); 68 69 return data; 70 } 71 72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 73 u8 ev, size_t len) 74 { 75 void *data; 76 77 data = skb_pull_data(skb, len); 78 if (!data) 79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); 80 81 return data; 82 } 83 84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, 85 struct sk_buff *skb) 86 { 87 struct hci_ev_status *rp = data; 88 89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 90 91 /* It is possible that we receive Inquiry Complete event right 92 * before we receive Inquiry Cancel Command Complete event, in 93 * which case the latter event should have status of Command 94 * Disallowed (0x0c). This should not be treated as error, since 95 * we actually achieve what Inquiry Cancel wants to achieve, 96 * which is to end the last Inquiry session. 97 */ 98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 100 rp->status = 0x00; 101 } 102 103 if (rp->status) 104 return rp->status; 105 106 clear_bit(HCI_INQUIRY, &hdev->flags); 107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 108 wake_up_bit(&hdev->flags, HCI_INQUIRY); 109 110 hci_dev_lock(hdev); 111 /* Set discovery state to stopped if we're not doing LE active 112 * scanning. 113 */ 114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 115 hdev->le_scan_type != LE_SCAN_ACTIVE) 116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 117 hci_dev_unlock(hdev); 118 119 hci_conn_check_pending(hdev); 120 121 return rp->status; 122 } 123 124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, 125 struct sk_buff *skb) 126 { 127 struct hci_ev_status *rp = data; 128 129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 130 131 if (rp->status) 132 return rp->status; 133 134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 135 136 return rp->status; 137 } 138 139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, 140 struct sk_buff *skb) 141 { 142 struct hci_ev_status *rp = data; 143 144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 145 146 if (rp->status) 147 return rp->status; 148 149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 150 151 hci_conn_check_pending(hdev); 152 153 return rp->status; 154 } 155 156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, 157 struct sk_buff *skb) 158 { 159 struct hci_ev_status *rp = data; 160 161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 162 163 return rp->status; 164 } 165 166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, 167 struct sk_buff *skb) 168 { 169 struct hci_rp_role_discovery *rp = data; 170 struct hci_conn *conn; 171 172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 173 174 if (rp->status) 175 return rp->status; 176 177 hci_dev_lock(hdev); 178 179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 180 if (conn) 181 conn->role = rp->role; 182 183 hci_dev_unlock(hdev); 184 185 return rp->status; 186 } 187 188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, 189 struct sk_buff *skb) 190 { 191 struct hci_rp_read_link_policy *rp = data; 192 struct hci_conn *conn; 193 194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 195 196 if (rp->status) 197 return rp->status; 198 199 hci_dev_lock(hdev); 200 201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 202 if (conn) 203 conn->link_policy = __le16_to_cpu(rp->policy); 204 205 hci_dev_unlock(hdev); 206 207 return rp->status; 208 } 209 210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, 211 struct sk_buff *skb) 212 { 213 struct hci_rp_write_link_policy *rp = data; 214 struct hci_conn *conn; 215 void *sent; 216 217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 218 219 if (rp->status) 220 return rp->status; 221 222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 223 if (!sent) 224 return rp->status; 225 226 hci_dev_lock(hdev); 227 228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 229 if (conn) 230 conn->link_policy = get_unaligned_le16(sent + 2); 231 232 hci_dev_unlock(hdev); 233 234 return rp->status; 235 } 236 237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, 238 struct sk_buff *skb) 239 { 240 struct hci_rp_read_def_link_policy *rp = data; 241 242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 243 244 if (rp->status) 245 return rp->status; 246 247 hdev->link_policy = __le16_to_cpu(rp->policy); 248 249 return rp->status; 250 } 251 252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, 253 struct sk_buff *skb) 254 { 255 struct hci_ev_status *rp = data; 256 void *sent; 257 258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 259 260 if (rp->status) 261 return rp->status; 262 263 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 264 if (!sent) 265 return rp->status; 266 267 hdev->link_policy = get_unaligned_le16(sent); 268 269 return rp->status; 270 } 271 272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) 273 { 274 struct hci_ev_status *rp = data; 275 276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 277 278 clear_bit(HCI_RESET, &hdev->flags); 279 280 if (rp->status) 281 return rp->status; 282 283 /* Reset all non-persistent flags */ 284 hci_dev_clear_volatile_flags(hdev); 285 286 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 287 288 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 289 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 290 291 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 292 hdev->adv_data_len = 0; 293 294 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 295 hdev->scan_rsp_data_len = 0; 296 297 hdev->le_scan_type = LE_SCAN_PASSIVE; 298 299 hdev->ssp_debug_mode = 0; 300 301 hci_bdaddr_list_clear(&hdev->le_accept_list); 302 hci_bdaddr_list_clear(&hdev->le_resolv_list); 303 304 return rp->status; 305 } 306 307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, 308 struct sk_buff *skb) 309 { 310 struct hci_rp_read_stored_link_key *rp = data; 311 struct hci_cp_read_stored_link_key *sent; 312 313 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 314 315 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 316 if (!sent) 317 return rp->status; 318 319 if (!rp->status && sent->read_all == 0x01) { 320 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 321 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 322 } 323 324 return rp->status; 325 } 326 327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, 328 struct sk_buff *skb) 329 { 330 struct hci_rp_delete_stored_link_key *rp = data; 331 332 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 333 334 if (rp->status) 335 return rp->status; 336 337 if (rp->num_keys <= hdev->stored_num_keys) 338 hdev->stored_num_keys -= le16_to_cpu(rp->num_keys); 339 else 340 hdev->stored_num_keys = 0; 341 342 return rp->status; 343 } 344 345 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, 346 struct sk_buff *skb) 347 { 348 struct hci_ev_status *rp = data; 349 void *sent; 350 351 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 352 353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 354 if (!sent) 355 return rp->status; 356 357 hci_dev_lock(hdev); 358 359 if (hci_dev_test_flag(hdev, HCI_MGMT)) 360 mgmt_set_local_name_complete(hdev, sent, rp->status); 361 else if (!rp->status) 362 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 363 364 hci_dev_unlock(hdev); 365 366 return rp->status; 367 } 368 369 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, 370 struct sk_buff *skb) 371 { 372 struct hci_rp_read_local_name *rp = data; 373 374 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 375 376 if (rp->status) 377 return rp->status; 378 379 if (hci_dev_test_flag(hdev, HCI_SETUP) || 380 hci_dev_test_flag(hdev, HCI_CONFIG)) 381 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 382 383 return rp->status; 384 } 385 386 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, 387 struct sk_buff *skb) 388 { 389 struct hci_ev_status *rp = data; 390 void *sent; 391 392 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 393 394 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 395 if (!sent) 396 return rp->status; 397 398 hci_dev_lock(hdev); 399 400 if (!rp->status) { 401 __u8 param = *((__u8 *) sent); 402 403 if (param == AUTH_ENABLED) 404 set_bit(HCI_AUTH, &hdev->flags); 405 else 406 clear_bit(HCI_AUTH, &hdev->flags); 407 } 408 409 if (hci_dev_test_flag(hdev, HCI_MGMT)) 410 mgmt_auth_enable_complete(hdev, rp->status); 411 412 hci_dev_unlock(hdev); 413 414 return rp->status; 415 } 416 417 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, 418 struct sk_buff *skb) 419 { 420 struct hci_ev_status *rp = data; 421 __u8 param; 422 void *sent; 423 424 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 425 426 if (rp->status) 427 return rp->status; 428 429 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 430 if (!sent) 431 return rp->status; 432 433 param = *((__u8 *) sent); 434 435 if (param) 436 set_bit(HCI_ENCRYPT, &hdev->flags); 437 else 438 clear_bit(HCI_ENCRYPT, &hdev->flags); 439 440 return rp->status; 441 } 442 443 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, 444 struct sk_buff *skb) 445 { 446 struct hci_ev_status *rp = data; 447 __u8 param; 448 void *sent; 449 450 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 451 452 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 453 if (!sent) 454 return rp->status; 455 456 param = *((__u8 *) sent); 457 458 hci_dev_lock(hdev); 459 460 if (rp->status) { 461 hdev->discov_timeout = 0; 462 goto done; 463 } 464 465 if (param & SCAN_INQUIRY) 466 set_bit(HCI_ISCAN, &hdev->flags); 467 else 468 clear_bit(HCI_ISCAN, &hdev->flags); 469 470 if (param & SCAN_PAGE) 471 set_bit(HCI_PSCAN, &hdev->flags); 472 else 473 clear_bit(HCI_PSCAN, &hdev->flags); 474 475 done: 476 hci_dev_unlock(hdev); 477 478 return rp->status; 479 } 480 481 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, 482 struct sk_buff *skb) 483 { 484 struct hci_ev_status *rp = data; 485 struct hci_cp_set_event_filter *cp; 486 void *sent; 487 488 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 489 490 if (rp->status) 491 return rp->status; 492 493 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 494 if (!sent) 495 return rp->status; 496 497 cp = (struct hci_cp_set_event_filter *)sent; 498 499 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 500 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 501 else 502 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 503 504 return rp->status; 505 } 506 507 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, 508 struct sk_buff *skb) 509 { 510 struct hci_rp_read_class_of_dev *rp = data; 511 512 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 513 514 if (rp->status) 515 return rp->status; 516 517 memcpy(hdev->dev_class, rp->dev_class, 3); 518 519 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], 520 hdev->dev_class[1], hdev->dev_class[0]); 521 522 return rp->status; 523 } 524 525 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, 526 struct sk_buff *skb) 527 { 528 struct hci_ev_status *rp = data; 529 void *sent; 530 531 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 532 533 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 534 if (!sent) 535 return rp->status; 536 537 hci_dev_lock(hdev); 538 539 if (!rp->status) 540 memcpy(hdev->dev_class, sent, 3); 541 542 if (hci_dev_test_flag(hdev, HCI_MGMT)) 543 mgmt_set_class_of_dev_complete(hdev, sent, rp->status); 544 545 hci_dev_unlock(hdev); 546 547 return rp->status; 548 } 549 550 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, 551 struct sk_buff *skb) 552 { 553 struct hci_rp_read_voice_setting *rp = data; 554 __u16 setting; 555 556 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 557 558 if (rp->status) 559 return rp->status; 560 561 setting = __le16_to_cpu(rp->voice_setting); 562 563 if (hdev->voice_setting == setting) 564 return rp->status; 565 566 hdev->voice_setting = setting; 567 568 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 569 570 if (hdev->notify) 571 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 572 573 return rp->status; 574 } 575 576 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, 577 struct sk_buff *skb) 578 { 579 struct hci_ev_status *rp = data; 580 __u16 setting; 581 void *sent; 582 583 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 584 585 if (rp->status) 586 return rp->status; 587 588 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 589 if (!sent) 590 return rp->status; 591 592 setting = get_unaligned_le16(sent); 593 594 if (hdev->voice_setting == setting) 595 return rp->status; 596 597 hdev->voice_setting = setting; 598 599 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 600 601 if (hdev->notify) 602 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 603 604 return rp->status; 605 } 606 607 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, 608 struct sk_buff *skb) 609 { 610 struct hci_rp_read_num_supported_iac *rp = data; 611 612 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 613 614 if (rp->status) 615 return rp->status; 616 617 hdev->num_iac = rp->num_iac; 618 619 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); 620 621 return rp->status; 622 } 623 624 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, 625 struct sk_buff *skb) 626 { 627 struct hci_ev_status *rp = data; 628 struct hci_cp_write_ssp_mode *sent; 629 630 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 631 632 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 633 if (!sent) 634 return rp->status; 635 636 hci_dev_lock(hdev); 637 638 if (!rp->status) { 639 if (sent->mode) 640 hdev->features[1][0] |= LMP_HOST_SSP; 641 else 642 hdev->features[1][0] &= ~LMP_HOST_SSP; 643 } 644 645 if (!rp->status) { 646 if (sent->mode) 647 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 648 else 649 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 650 } 651 652 hci_dev_unlock(hdev); 653 654 return rp->status; 655 } 656 657 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, 658 struct sk_buff *skb) 659 { 660 struct hci_ev_status *rp = data; 661 struct hci_cp_write_sc_support *sent; 662 663 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 664 665 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 666 if (!sent) 667 return rp->status; 668 669 hci_dev_lock(hdev); 670 671 if (!rp->status) { 672 if (sent->support) 673 hdev->features[1][0] |= LMP_HOST_SC; 674 else 675 hdev->features[1][0] &= ~LMP_HOST_SC; 676 } 677 678 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { 679 if (sent->support) 680 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 681 else 682 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 683 } 684 685 hci_dev_unlock(hdev); 686 687 return rp->status; 688 } 689 690 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, 691 struct sk_buff *skb) 692 { 693 struct hci_rp_read_local_version *rp = data; 694 695 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 696 697 if (rp->status) 698 return rp->status; 699 700 if (hci_dev_test_flag(hdev, HCI_SETUP) || 701 hci_dev_test_flag(hdev, HCI_CONFIG)) { 702 hdev->hci_ver = rp->hci_ver; 703 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 704 hdev->lmp_ver = rp->lmp_ver; 705 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 706 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 707 } 708 709 return rp->status; 710 } 711 712 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, 713 struct sk_buff *skb) 714 { 715 struct hci_rp_read_local_commands *rp = data; 716 717 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 718 719 if (rp->status) 720 return rp->status; 721 722 if (hci_dev_test_flag(hdev, HCI_SETUP) || 723 hci_dev_test_flag(hdev, HCI_CONFIG)) 724 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 725 726 return rp->status; 727 } 728 729 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, 730 struct sk_buff *skb) 731 { 732 struct hci_rp_read_auth_payload_to *rp = data; 733 struct hci_conn *conn; 734 735 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 736 737 if (rp->status) 738 return rp->status; 739 740 hci_dev_lock(hdev); 741 742 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 743 if (conn) 744 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 745 746 hci_dev_unlock(hdev); 747 748 return rp->status; 749 } 750 751 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, 752 struct sk_buff *skb) 753 { 754 struct hci_rp_write_auth_payload_to *rp = data; 755 struct hci_conn *conn; 756 void *sent; 757 758 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 759 760 if (rp->status) 761 return rp->status; 762 763 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 764 if (!sent) 765 return rp->status; 766 767 hci_dev_lock(hdev); 768 769 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 770 if (conn) 771 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 772 773 hci_dev_unlock(hdev); 774 775 return rp->status; 776 } 777 778 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, 779 struct sk_buff *skb) 780 { 781 struct hci_rp_read_local_features *rp = data; 782 783 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 784 785 if (rp->status) 786 return rp->status; 787 788 memcpy(hdev->features, rp->features, 8); 789 790 /* Adjust default settings according to features 791 * supported by device. */ 792 793 if (hdev->features[0][0] & LMP_3SLOT) 794 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 795 796 if (hdev->features[0][0] & LMP_5SLOT) 797 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 798 799 if (hdev->features[0][1] & LMP_HV2) { 800 hdev->pkt_type |= (HCI_HV2); 801 hdev->esco_type |= (ESCO_HV2); 802 } 803 804 if (hdev->features[0][1] & LMP_HV3) { 805 hdev->pkt_type |= (HCI_HV3); 806 hdev->esco_type |= (ESCO_HV3); 807 } 808 809 if (lmp_esco_capable(hdev)) 810 hdev->esco_type |= (ESCO_EV3); 811 812 if (hdev->features[0][4] & LMP_EV4) 813 hdev->esco_type |= (ESCO_EV4); 814 815 if (hdev->features[0][4] & LMP_EV5) 816 hdev->esco_type |= (ESCO_EV5); 817 818 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 819 hdev->esco_type |= (ESCO_2EV3); 820 821 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 822 hdev->esco_type |= (ESCO_3EV3); 823 824 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 825 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 826 827 return rp->status; 828 } 829 830 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, 831 struct sk_buff *skb) 832 { 833 struct hci_rp_read_local_ext_features *rp = data; 834 835 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 836 837 if (rp->status) 838 return rp->status; 839 840 if (hdev->max_page < rp->max_page) 841 hdev->max_page = rp->max_page; 842 843 if (rp->page < HCI_MAX_PAGES) 844 memcpy(hdev->features[rp->page], rp->features, 8); 845 846 return rp->status; 847 } 848 849 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, 850 struct sk_buff *skb) 851 { 852 struct hci_rp_read_flow_control_mode *rp = data; 853 854 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 855 856 if (rp->status) 857 return rp->status; 858 859 hdev->flow_ctl_mode = rp->mode; 860 861 return rp->status; 862 } 863 864 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, 865 struct sk_buff *skb) 866 { 867 struct hci_rp_read_buffer_size *rp = data; 868 869 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 870 871 if (rp->status) 872 return rp->status; 873 874 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 875 hdev->sco_mtu = rp->sco_mtu; 876 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 877 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 878 879 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 880 hdev->sco_mtu = 64; 881 hdev->sco_pkts = 8; 882 } 883 884 hdev->acl_cnt = hdev->acl_pkts; 885 hdev->sco_cnt = hdev->sco_pkts; 886 887 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 888 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 889 890 return rp->status; 891 } 892 893 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, 894 struct sk_buff *skb) 895 { 896 struct hci_rp_read_bd_addr *rp = data; 897 898 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 899 900 if (rp->status) 901 return rp->status; 902 903 if (test_bit(HCI_INIT, &hdev->flags)) 904 bacpy(&hdev->bdaddr, &rp->bdaddr); 905 906 if (hci_dev_test_flag(hdev, HCI_SETUP)) 907 bacpy(&hdev->setup_addr, &rp->bdaddr); 908 909 return rp->status; 910 } 911 912 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, 913 struct sk_buff *skb) 914 { 915 struct hci_rp_read_local_pairing_opts *rp = data; 916 917 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 918 919 if (rp->status) 920 return rp->status; 921 922 if (hci_dev_test_flag(hdev, HCI_SETUP) || 923 hci_dev_test_flag(hdev, HCI_CONFIG)) { 924 hdev->pairing_opts = rp->pairing_opts; 925 hdev->max_enc_key_size = rp->max_key_size; 926 } 927 928 return rp->status; 929 } 930 931 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, 932 struct sk_buff *skb) 933 { 934 struct hci_rp_read_page_scan_activity *rp = data; 935 936 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 937 938 if (rp->status) 939 return rp->status; 940 941 if (test_bit(HCI_INIT, &hdev->flags)) { 942 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 943 hdev->page_scan_window = __le16_to_cpu(rp->window); 944 } 945 946 return rp->status; 947 } 948 949 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, 950 struct sk_buff *skb) 951 { 952 struct hci_ev_status *rp = data; 953 struct hci_cp_write_page_scan_activity *sent; 954 955 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 956 957 if (rp->status) 958 return rp->status; 959 960 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 961 if (!sent) 962 return rp->status; 963 964 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 965 hdev->page_scan_window = __le16_to_cpu(sent->window); 966 967 return rp->status; 968 } 969 970 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, 971 struct sk_buff *skb) 972 { 973 struct hci_rp_read_page_scan_type *rp = data; 974 975 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 976 977 if (rp->status) 978 return rp->status; 979 980 if (test_bit(HCI_INIT, &hdev->flags)) 981 hdev->page_scan_type = rp->type; 982 983 return rp->status; 984 } 985 986 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, 987 struct sk_buff *skb) 988 { 989 struct hci_ev_status *rp = data; 990 u8 *type; 991 992 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 993 994 if (rp->status) 995 return rp->status; 996 997 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 998 if (type) 999 hdev->page_scan_type = *type; 1000 1001 return rp->status; 1002 } 1003 1004 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, 1005 struct sk_buff *skb) 1006 { 1007 struct hci_rp_read_data_block_size *rp = data; 1008 1009 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1010 1011 if (rp->status) 1012 return rp->status; 1013 1014 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 1015 hdev->block_len = __le16_to_cpu(rp->block_len); 1016 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 1017 1018 hdev->block_cnt = hdev->num_blocks; 1019 1020 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 1021 hdev->block_cnt, hdev->block_len); 1022 1023 return rp->status; 1024 } 1025 1026 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, 1027 struct sk_buff *skb) 1028 { 1029 struct hci_rp_read_clock *rp = data; 1030 struct hci_cp_read_clock *cp; 1031 struct hci_conn *conn; 1032 1033 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1034 1035 if (rp->status) 1036 return rp->status; 1037 1038 hci_dev_lock(hdev); 1039 1040 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 1041 if (!cp) 1042 goto unlock; 1043 1044 if (cp->which == 0x00) { 1045 hdev->clock = le32_to_cpu(rp->clock); 1046 goto unlock; 1047 } 1048 1049 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1050 if (conn) { 1051 conn->clock = le32_to_cpu(rp->clock); 1052 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 1053 } 1054 1055 unlock: 1056 hci_dev_unlock(hdev); 1057 return rp->status; 1058 } 1059 1060 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, 1061 struct sk_buff *skb) 1062 { 1063 struct hci_rp_read_local_amp_info *rp = data; 1064 1065 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1066 1067 if (rp->status) 1068 return rp->status; 1069 1070 hdev->amp_status = rp->amp_status; 1071 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 1072 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 1073 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 1074 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 1075 hdev->amp_type = rp->amp_type; 1076 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 1077 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 1078 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 1079 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 1080 1081 return rp->status; 1082 } 1083 1084 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, 1085 struct sk_buff *skb) 1086 { 1087 struct hci_rp_read_inq_rsp_tx_power *rp = data; 1088 1089 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1090 1091 if (rp->status) 1092 return rp->status; 1093 1094 hdev->inq_tx_power = rp->tx_power; 1095 1096 return rp->status; 1097 } 1098 1099 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, 1100 struct sk_buff *skb) 1101 { 1102 struct hci_rp_read_def_err_data_reporting *rp = data; 1103 1104 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1105 1106 if (rp->status) 1107 return rp->status; 1108 1109 hdev->err_data_reporting = rp->err_data_reporting; 1110 1111 return rp->status; 1112 } 1113 1114 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, 1115 struct sk_buff *skb) 1116 { 1117 struct hci_ev_status *rp = data; 1118 struct hci_cp_write_def_err_data_reporting *cp; 1119 1120 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1121 1122 if (rp->status) 1123 return rp->status; 1124 1125 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 1126 if (!cp) 1127 return rp->status; 1128 1129 hdev->err_data_reporting = cp->err_data_reporting; 1130 1131 return rp->status; 1132 } 1133 1134 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, 1135 struct sk_buff *skb) 1136 { 1137 struct hci_rp_pin_code_reply *rp = data; 1138 struct hci_cp_pin_code_reply *cp; 1139 struct hci_conn *conn; 1140 1141 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1142 1143 hci_dev_lock(hdev); 1144 1145 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1146 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1147 1148 if (rp->status) 1149 goto unlock; 1150 1151 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1152 if (!cp) 1153 goto unlock; 1154 1155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1156 if (conn) 1157 conn->pin_length = cp->pin_len; 1158 1159 unlock: 1160 hci_dev_unlock(hdev); 1161 return rp->status; 1162 } 1163 1164 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, 1165 struct sk_buff *skb) 1166 { 1167 struct hci_rp_pin_code_neg_reply *rp = data; 1168 1169 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1170 1171 hci_dev_lock(hdev); 1172 1173 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1174 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1175 rp->status); 1176 1177 hci_dev_unlock(hdev); 1178 1179 return rp->status; 1180 } 1181 1182 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, 1183 struct sk_buff *skb) 1184 { 1185 struct hci_rp_le_read_buffer_size *rp = data; 1186 1187 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1188 1189 if (rp->status) 1190 return rp->status; 1191 1192 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1193 hdev->le_pkts = rp->le_max_pkt; 1194 1195 hdev->le_cnt = hdev->le_pkts; 1196 1197 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1198 1199 return rp->status; 1200 } 1201 1202 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, 1203 struct sk_buff *skb) 1204 { 1205 struct hci_rp_le_read_local_features *rp = data; 1206 1207 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1208 1209 if (rp->status) 1210 return rp->status; 1211 1212 memcpy(hdev->le_features, rp->features, 8); 1213 1214 return rp->status; 1215 } 1216 1217 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, 1218 struct sk_buff *skb) 1219 { 1220 struct hci_rp_le_read_adv_tx_power *rp = data; 1221 1222 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1223 1224 if (rp->status) 1225 return rp->status; 1226 1227 hdev->adv_tx_power = rp->tx_power; 1228 1229 return rp->status; 1230 } 1231 1232 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, 1233 struct sk_buff *skb) 1234 { 1235 struct hci_rp_user_confirm_reply *rp = data; 1236 1237 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1238 1239 hci_dev_lock(hdev); 1240 1241 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1242 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1243 rp->status); 1244 1245 hci_dev_unlock(hdev); 1246 1247 return rp->status; 1248 } 1249 1250 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, 1251 struct sk_buff *skb) 1252 { 1253 struct hci_rp_user_confirm_reply *rp = data; 1254 1255 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1256 1257 hci_dev_lock(hdev); 1258 1259 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1260 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1261 ACL_LINK, 0, rp->status); 1262 1263 hci_dev_unlock(hdev); 1264 1265 return rp->status; 1266 } 1267 1268 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, 1269 struct sk_buff *skb) 1270 { 1271 struct hci_rp_user_confirm_reply *rp = data; 1272 1273 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1274 1275 hci_dev_lock(hdev); 1276 1277 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1278 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1279 0, rp->status); 1280 1281 hci_dev_unlock(hdev); 1282 1283 return rp->status; 1284 } 1285 1286 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, 1287 struct sk_buff *skb) 1288 { 1289 struct hci_rp_user_confirm_reply *rp = data; 1290 1291 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1292 1293 hci_dev_lock(hdev); 1294 1295 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1296 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1297 ACL_LINK, 0, rp->status); 1298 1299 hci_dev_unlock(hdev); 1300 1301 return rp->status; 1302 } 1303 1304 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, 1305 struct sk_buff *skb) 1306 { 1307 struct hci_rp_read_local_oob_data *rp = data; 1308 1309 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1310 1311 return rp->status; 1312 } 1313 1314 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, 1315 struct sk_buff *skb) 1316 { 1317 struct hci_rp_read_local_oob_ext_data *rp = data; 1318 1319 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1320 1321 return rp->status; 1322 } 1323 1324 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, 1325 struct sk_buff *skb) 1326 { 1327 struct hci_ev_status *rp = data; 1328 bdaddr_t *sent; 1329 1330 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1331 1332 if (rp->status) 1333 return rp->status; 1334 1335 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1336 if (!sent) 1337 return rp->status; 1338 1339 hci_dev_lock(hdev); 1340 1341 bacpy(&hdev->random_addr, sent); 1342 1343 if (!bacmp(&hdev->rpa, sent)) { 1344 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1345 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1346 secs_to_jiffies(hdev->rpa_timeout)); 1347 } 1348 1349 hci_dev_unlock(hdev); 1350 1351 return rp->status; 1352 } 1353 1354 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, 1355 struct sk_buff *skb) 1356 { 1357 struct hci_ev_status *rp = data; 1358 struct hci_cp_le_set_default_phy *cp; 1359 1360 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1361 1362 if (rp->status) 1363 return rp->status; 1364 1365 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1366 if (!cp) 1367 return rp->status; 1368 1369 hci_dev_lock(hdev); 1370 1371 hdev->le_tx_def_phys = cp->tx_phys; 1372 hdev->le_rx_def_phys = cp->rx_phys; 1373 1374 hci_dev_unlock(hdev); 1375 1376 return rp->status; 1377 } 1378 1379 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, 1380 struct sk_buff *skb) 1381 { 1382 struct hci_ev_status *rp = data; 1383 struct hci_cp_le_set_adv_set_rand_addr *cp; 1384 struct adv_info *adv; 1385 1386 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1387 1388 if (rp->status) 1389 return rp->status; 1390 1391 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1392 /* Update only in case the adv instance since handle 0x00 shall be using 1393 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1394 * non-extended adverting. 1395 */ 1396 if (!cp || !cp->handle) 1397 return rp->status; 1398 1399 hci_dev_lock(hdev); 1400 1401 adv = hci_find_adv_instance(hdev, cp->handle); 1402 if (adv) { 1403 bacpy(&adv->random_addr, &cp->bdaddr); 1404 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1405 adv->rpa_expired = false; 1406 queue_delayed_work(hdev->workqueue, 1407 &adv->rpa_expired_cb, 1408 secs_to_jiffies(hdev->rpa_timeout)); 1409 } 1410 } 1411 1412 hci_dev_unlock(hdev); 1413 1414 return rp->status; 1415 } 1416 1417 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, 1418 struct sk_buff *skb) 1419 { 1420 struct hci_ev_status *rp = data; 1421 u8 *instance; 1422 int err; 1423 1424 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1425 1426 if (rp->status) 1427 return rp->status; 1428 1429 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1430 if (!instance) 1431 return rp->status; 1432 1433 hci_dev_lock(hdev); 1434 1435 err = hci_remove_adv_instance(hdev, *instance); 1436 if (!err) 1437 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1438 *instance); 1439 1440 hci_dev_unlock(hdev); 1441 1442 return rp->status; 1443 } 1444 1445 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, 1446 struct sk_buff *skb) 1447 { 1448 struct hci_ev_status *rp = data; 1449 struct adv_info *adv, *n; 1450 int err; 1451 1452 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1453 1454 if (rp->status) 1455 return rp->status; 1456 1457 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1458 return rp->status; 1459 1460 hci_dev_lock(hdev); 1461 1462 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1463 u8 instance = adv->instance; 1464 1465 err = hci_remove_adv_instance(hdev, instance); 1466 if (!err) 1467 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1468 hdev, instance); 1469 } 1470 1471 hci_dev_unlock(hdev); 1472 1473 return rp->status; 1474 } 1475 1476 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, 1477 struct sk_buff *skb) 1478 { 1479 struct hci_rp_le_read_transmit_power *rp = data; 1480 1481 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1482 1483 if (rp->status) 1484 return rp->status; 1485 1486 hdev->min_le_tx_power = rp->min_le_tx_power; 1487 hdev->max_le_tx_power = rp->max_le_tx_power; 1488 1489 return rp->status; 1490 } 1491 1492 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, 1493 struct sk_buff *skb) 1494 { 1495 struct hci_ev_status *rp = data; 1496 struct hci_cp_le_set_privacy_mode *cp; 1497 struct hci_conn_params *params; 1498 1499 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1500 1501 if (rp->status) 1502 return rp->status; 1503 1504 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); 1505 if (!cp) 1506 return rp->status; 1507 1508 hci_dev_lock(hdev); 1509 1510 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); 1511 if (params) 1512 params->privacy_mode = cp->mode; 1513 1514 hci_dev_unlock(hdev); 1515 1516 return rp->status; 1517 } 1518 1519 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, 1520 struct sk_buff *skb) 1521 { 1522 struct hci_ev_status *rp = data; 1523 __u8 *sent; 1524 1525 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1526 1527 if (rp->status) 1528 return rp->status; 1529 1530 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1531 if (!sent) 1532 return rp->status; 1533 1534 hci_dev_lock(hdev); 1535 1536 /* If we're doing connection initiation as peripheral. Set a 1537 * timeout in case something goes wrong. 1538 */ 1539 if (*sent) { 1540 struct hci_conn *conn; 1541 1542 hci_dev_set_flag(hdev, HCI_LE_ADV); 1543 1544 conn = hci_lookup_le_connect(hdev); 1545 if (conn) 1546 queue_delayed_work(hdev->workqueue, 1547 &conn->le_conn_timeout, 1548 conn->conn_timeout); 1549 } else { 1550 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1551 } 1552 1553 hci_dev_unlock(hdev); 1554 1555 return rp->status; 1556 } 1557 1558 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, 1559 struct sk_buff *skb) 1560 { 1561 struct hci_cp_le_set_ext_adv_enable *cp; 1562 struct hci_cp_ext_adv_set *set; 1563 struct adv_info *adv = NULL, *n; 1564 struct hci_ev_status *rp = data; 1565 1566 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1567 1568 if (rp->status) 1569 return rp->status; 1570 1571 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1572 if (!cp) 1573 return rp->status; 1574 1575 set = (void *)cp->data; 1576 1577 hci_dev_lock(hdev); 1578 1579 if (cp->num_of_sets) 1580 adv = hci_find_adv_instance(hdev, set->handle); 1581 1582 if (cp->enable) { 1583 struct hci_conn *conn; 1584 1585 hci_dev_set_flag(hdev, HCI_LE_ADV); 1586 1587 if (adv) 1588 adv->enabled = true; 1589 1590 conn = hci_lookup_le_connect(hdev); 1591 if (conn) 1592 queue_delayed_work(hdev->workqueue, 1593 &conn->le_conn_timeout, 1594 conn->conn_timeout); 1595 } else { 1596 if (cp->num_of_sets) { 1597 if (adv) 1598 adv->enabled = false; 1599 1600 /* If just one instance was disabled check if there are 1601 * any other instance enabled before clearing HCI_LE_ADV 1602 */ 1603 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1604 list) { 1605 if (adv->enabled) 1606 goto unlock; 1607 } 1608 } else { 1609 /* All instances shall be considered disabled */ 1610 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1611 list) 1612 adv->enabled = false; 1613 } 1614 1615 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1616 } 1617 1618 unlock: 1619 hci_dev_unlock(hdev); 1620 return rp->status; 1621 } 1622 1623 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, 1624 struct sk_buff *skb) 1625 { 1626 struct hci_cp_le_set_scan_param *cp; 1627 struct hci_ev_status *rp = data; 1628 1629 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1630 1631 if (rp->status) 1632 return rp->status; 1633 1634 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1635 if (!cp) 1636 return rp->status; 1637 1638 hci_dev_lock(hdev); 1639 1640 hdev->le_scan_type = cp->type; 1641 1642 hci_dev_unlock(hdev); 1643 1644 return rp->status; 1645 } 1646 1647 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, 1648 struct sk_buff *skb) 1649 { 1650 struct hci_cp_le_set_ext_scan_params *cp; 1651 struct hci_ev_status *rp = data; 1652 struct hci_cp_le_scan_phy_params *phy_param; 1653 1654 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1655 1656 if (rp->status) 1657 return rp->status; 1658 1659 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1660 if (!cp) 1661 return rp->status; 1662 1663 phy_param = (void *)cp->data; 1664 1665 hci_dev_lock(hdev); 1666 1667 hdev->le_scan_type = phy_param->type; 1668 1669 hci_dev_unlock(hdev); 1670 1671 return rp->status; 1672 } 1673 1674 static bool has_pending_adv_report(struct hci_dev *hdev) 1675 { 1676 struct discovery_state *d = &hdev->discovery; 1677 1678 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1679 } 1680 1681 static void clear_pending_adv_report(struct hci_dev *hdev) 1682 { 1683 struct discovery_state *d = &hdev->discovery; 1684 1685 bacpy(&d->last_adv_addr, BDADDR_ANY); 1686 d->last_adv_data_len = 0; 1687 } 1688 1689 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1690 u8 bdaddr_type, s8 rssi, u32 flags, 1691 u8 *data, u8 len) 1692 { 1693 struct discovery_state *d = &hdev->discovery; 1694 1695 if (len > HCI_MAX_AD_LENGTH) 1696 return; 1697 1698 bacpy(&d->last_adv_addr, bdaddr); 1699 d->last_adv_addr_type = bdaddr_type; 1700 d->last_adv_rssi = rssi; 1701 d->last_adv_flags = flags; 1702 memcpy(d->last_adv_data, data, len); 1703 d->last_adv_data_len = len; 1704 } 1705 1706 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1707 { 1708 hci_dev_lock(hdev); 1709 1710 switch (enable) { 1711 case LE_SCAN_ENABLE: 1712 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1713 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1714 clear_pending_adv_report(hdev); 1715 break; 1716 1717 case LE_SCAN_DISABLE: 1718 /* We do this here instead of when setting DISCOVERY_STOPPED 1719 * since the latter would potentially require waiting for 1720 * inquiry to stop too. 1721 */ 1722 if (has_pending_adv_report(hdev)) { 1723 struct discovery_state *d = &hdev->discovery; 1724 1725 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1726 d->last_adv_addr_type, NULL, 1727 d->last_adv_rssi, d->last_adv_flags, 1728 d->last_adv_data, 1729 d->last_adv_data_len, NULL, 0); 1730 } 1731 1732 /* Cancel this timer so that we don't try to disable scanning 1733 * when it's already disabled. 1734 */ 1735 cancel_delayed_work(&hdev->le_scan_disable); 1736 1737 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1738 1739 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1740 * interrupted scanning due to a connect request. Mark 1741 * therefore discovery as stopped. 1742 */ 1743 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1744 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1745 1746 break; 1747 1748 default: 1749 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1750 enable); 1751 break; 1752 } 1753 1754 hci_dev_unlock(hdev); 1755 } 1756 1757 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, 1758 struct sk_buff *skb) 1759 { 1760 struct hci_cp_le_set_scan_enable *cp; 1761 struct hci_ev_status *rp = data; 1762 1763 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1764 1765 if (rp->status) 1766 return rp->status; 1767 1768 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1769 if (!cp) 1770 return rp->status; 1771 1772 le_set_scan_enable_complete(hdev, cp->enable); 1773 1774 return rp->status; 1775 } 1776 1777 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, 1778 struct sk_buff *skb) 1779 { 1780 struct hci_cp_le_set_ext_scan_enable *cp; 1781 struct hci_ev_status *rp = data; 1782 1783 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1784 1785 if (rp->status) 1786 return rp->status; 1787 1788 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1789 if (!cp) 1790 return rp->status; 1791 1792 le_set_scan_enable_complete(hdev, cp->enable); 1793 1794 return rp->status; 1795 } 1796 1797 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, 1798 struct sk_buff *skb) 1799 { 1800 struct hci_rp_le_read_num_supported_adv_sets *rp = data; 1801 1802 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, 1803 rp->num_of_sets); 1804 1805 if (rp->status) 1806 return rp->status; 1807 1808 hdev->le_num_of_adv_sets = rp->num_of_sets; 1809 1810 return rp->status; 1811 } 1812 1813 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, 1814 struct sk_buff *skb) 1815 { 1816 struct hci_rp_le_read_accept_list_size *rp = data; 1817 1818 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 1819 1820 if (rp->status) 1821 return rp->status; 1822 1823 hdev->le_accept_list_size = rp->size; 1824 1825 return rp->status; 1826 } 1827 1828 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, 1829 struct sk_buff *skb) 1830 { 1831 struct hci_ev_status *rp = data; 1832 1833 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1834 1835 if (rp->status) 1836 return rp->status; 1837 1838 hci_bdaddr_list_clear(&hdev->le_accept_list); 1839 1840 return rp->status; 1841 } 1842 1843 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, 1844 struct sk_buff *skb) 1845 { 1846 struct hci_cp_le_add_to_accept_list *sent; 1847 struct hci_ev_status *rp = data; 1848 1849 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1850 1851 if (rp->status) 1852 return rp->status; 1853 1854 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1855 if (!sent) 1856 return rp->status; 1857 1858 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1859 sent->bdaddr_type); 1860 1861 return rp->status; 1862 } 1863 1864 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, 1865 struct sk_buff *skb) 1866 { 1867 struct hci_cp_le_del_from_accept_list *sent; 1868 struct hci_ev_status *rp = data; 1869 1870 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1871 1872 if (rp->status) 1873 return rp->status; 1874 1875 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1876 if (!sent) 1877 return rp->status; 1878 1879 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1880 sent->bdaddr_type); 1881 1882 return rp->status; 1883 } 1884 1885 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, 1886 struct sk_buff *skb) 1887 { 1888 struct hci_rp_le_read_supported_states *rp = data; 1889 1890 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1891 1892 if (rp->status) 1893 return rp->status; 1894 1895 memcpy(hdev->le_states, rp->le_states, 8); 1896 1897 return rp->status; 1898 } 1899 1900 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, 1901 struct sk_buff *skb) 1902 { 1903 struct hci_rp_le_read_def_data_len *rp = data; 1904 1905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1906 1907 if (rp->status) 1908 return rp->status; 1909 1910 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1911 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1912 1913 return rp->status; 1914 } 1915 1916 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, 1917 struct sk_buff *skb) 1918 { 1919 struct hci_cp_le_write_def_data_len *sent; 1920 struct hci_ev_status *rp = data; 1921 1922 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1923 1924 if (rp->status) 1925 return rp->status; 1926 1927 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1928 if (!sent) 1929 return rp->status; 1930 1931 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1932 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1933 1934 return rp->status; 1935 } 1936 1937 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, 1938 struct sk_buff *skb) 1939 { 1940 struct hci_cp_le_add_to_resolv_list *sent; 1941 struct hci_ev_status *rp = data; 1942 1943 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1944 1945 if (rp->status) 1946 return rp->status; 1947 1948 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1949 if (!sent) 1950 return rp->status; 1951 1952 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1953 sent->bdaddr_type, sent->peer_irk, 1954 sent->local_irk); 1955 1956 return rp->status; 1957 } 1958 1959 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, 1960 struct sk_buff *skb) 1961 { 1962 struct hci_cp_le_del_from_resolv_list *sent; 1963 struct hci_ev_status *rp = data; 1964 1965 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1966 1967 if (rp->status) 1968 return rp->status; 1969 1970 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1971 if (!sent) 1972 return rp->status; 1973 1974 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1975 sent->bdaddr_type); 1976 1977 return rp->status; 1978 } 1979 1980 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, 1981 struct sk_buff *skb) 1982 { 1983 struct hci_ev_status *rp = data; 1984 1985 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1986 1987 if (rp->status) 1988 return rp->status; 1989 1990 hci_bdaddr_list_clear(&hdev->le_resolv_list); 1991 1992 return rp->status; 1993 } 1994 1995 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, 1996 struct sk_buff *skb) 1997 { 1998 struct hci_rp_le_read_resolv_list_size *rp = data; 1999 2000 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 2001 2002 if (rp->status) 2003 return rp->status; 2004 2005 hdev->le_resolv_list_size = rp->size; 2006 2007 return rp->status; 2008 } 2009 2010 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, 2011 struct sk_buff *skb) 2012 { 2013 struct hci_ev_status *rp = data; 2014 __u8 *sent; 2015 2016 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2017 2018 if (rp->status) 2019 return rp->status; 2020 2021 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 2022 if (!sent) 2023 return rp->status; 2024 2025 hci_dev_lock(hdev); 2026 2027 if (*sent) 2028 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 2029 else 2030 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 2031 2032 hci_dev_unlock(hdev); 2033 2034 return rp->status; 2035 } 2036 2037 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, 2038 struct sk_buff *skb) 2039 { 2040 struct hci_rp_le_read_max_data_len *rp = data; 2041 2042 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2043 2044 if (rp->status) 2045 return rp->status; 2046 2047 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 2048 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 2049 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 2050 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 2051 2052 return rp->status; 2053 } 2054 2055 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, 2056 struct sk_buff *skb) 2057 { 2058 struct hci_cp_write_le_host_supported *sent; 2059 struct hci_ev_status *rp = data; 2060 2061 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2062 2063 if (rp->status) 2064 return rp->status; 2065 2066 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 2067 if (!sent) 2068 return rp->status; 2069 2070 hci_dev_lock(hdev); 2071 2072 if (sent->le) { 2073 hdev->features[1][0] |= LMP_HOST_LE; 2074 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 2075 } else { 2076 hdev->features[1][0] &= ~LMP_HOST_LE; 2077 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 2078 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 2079 } 2080 2081 if (sent->simul) 2082 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 2083 else 2084 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 2085 2086 hci_dev_unlock(hdev); 2087 2088 return rp->status; 2089 } 2090 2091 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, 2092 struct sk_buff *skb) 2093 { 2094 struct hci_cp_le_set_adv_param *cp; 2095 struct hci_ev_status *rp = data; 2096 2097 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2098 2099 if (rp->status) 2100 return rp->status; 2101 2102 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 2103 if (!cp) 2104 return rp->status; 2105 2106 hci_dev_lock(hdev); 2107 hdev->adv_addr_type = cp->own_address_type; 2108 hci_dev_unlock(hdev); 2109 2110 return rp->status; 2111 } 2112 2113 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, 2114 struct sk_buff *skb) 2115 { 2116 struct hci_rp_le_set_ext_adv_params *rp = data; 2117 struct hci_cp_le_set_ext_adv_params *cp; 2118 struct adv_info *adv_instance; 2119 2120 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2121 2122 if (rp->status) 2123 return rp->status; 2124 2125 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 2126 if (!cp) 2127 return rp->status; 2128 2129 hci_dev_lock(hdev); 2130 hdev->adv_addr_type = cp->own_addr_type; 2131 if (!cp->handle) { 2132 /* Store in hdev for instance 0 */ 2133 hdev->adv_tx_power = rp->tx_power; 2134 } else { 2135 adv_instance = hci_find_adv_instance(hdev, cp->handle); 2136 if (adv_instance) 2137 adv_instance->tx_power = rp->tx_power; 2138 } 2139 /* Update adv data as tx power is known now */ 2140 hci_req_update_adv_data(hdev, cp->handle); 2141 2142 hci_dev_unlock(hdev); 2143 2144 return rp->status; 2145 } 2146 2147 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, 2148 struct sk_buff *skb) 2149 { 2150 struct hci_rp_read_rssi *rp = data; 2151 struct hci_conn *conn; 2152 2153 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2154 2155 if (rp->status) 2156 return rp->status; 2157 2158 hci_dev_lock(hdev); 2159 2160 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2161 if (conn) 2162 conn->rssi = rp->rssi; 2163 2164 hci_dev_unlock(hdev); 2165 2166 return rp->status; 2167 } 2168 2169 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, 2170 struct sk_buff *skb) 2171 { 2172 struct hci_cp_read_tx_power *sent; 2173 struct hci_rp_read_tx_power *rp = data; 2174 struct hci_conn *conn; 2175 2176 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2177 2178 if (rp->status) 2179 return rp->status; 2180 2181 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 2182 if (!sent) 2183 return rp->status; 2184 2185 hci_dev_lock(hdev); 2186 2187 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2188 if (!conn) 2189 goto unlock; 2190 2191 switch (sent->type) { 2192 case 0x00: 2193 conn->tx_power = rp->tx_power; 2194 break; 2195 case 0x01: 2196 conn->max_tx_power = rp->tx_power; 2197 break; 2198 } 2199 2200 unlock: 2201 hci_dev_unlock(hdev); 2202 return rp->status; 2203 } 2204 2205 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, 2206 struct sk_buff *skb) 2207 { 2208 struct hci_ev_status *rp = data; 2209 u8 *mode; 2210 2211 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2212 2213 if (rp->status) 2214 return rp->status; 2215 2216 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 2217 if (mode) 2218 hdev->ssp_debug_mode = *mode; 2219 2220 return rp->status; 2221 } 2222 2223 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 2224 { 2225 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2226 2227 if (status) { 2228 hci_conn_check_pending(hdev); 2229 return; 2230 } 2231 2232 set_bit(HCI_INQUIRY, &hdev->flags); 2233 } 2234 2235 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 2236 { 2237 struct hci_cp_create_conn *cp; 2238 struct hci_conn *conn; 2239 2240 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2241 2242 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 2243 if (!cp) 2244 return; 2245 2246 hci_dev_lock(hdev); 2247 2248 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2249 2250 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); 2251 2252 if (status) { 2253 if (conn && conn->state == BT_CONNECT) { 2254 if (status != 0x0c || conn->attempt > 2) { 2255 conn->state = BT_CLOSED; 2256 hci_connect_cfm(conn, status); 2257 hci_conn_del(conn); 2258 } else 2259 conn->state = BT_CONNECT2; 2260 } 2261 } else { 2262 if (!conn) { 2263 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 2264 HCI_ROLE_MASTER); 2265 if (!conn) 2266 bt_dev_err(hdev, "no memory for new connection"); 2267 } 2268 } 2269 2270 hci_dev_unlock(hdev); 2271 } 2272 2273 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 2274 { 2275 struct hci_cp_add_sco *cp; 2276 struct hci_conn *acl, *sco; 2277 __u16 handle; 2278 2279 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2280 2281 if (!status) 2282 return; 2283 2284 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2285 if (!cp) 2286 return; 2287 2288 handle = __le16_to_cpu(cp->handle); 2289 2290 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2291 2292 hci_dev_lock(hdev); 2293 2294 acl = hci_conn_hash_lookup_handle(hdev, handle); 2295 if (acl) { 2296 sco = acl->link; 2297 if (sco) { 2298 sco->state = BT_CLOSED; 2299 2300 hci_connect_cfm(sco, status); 2301 hci_conn_del(sco); 2302 } 2303 } 2304 2305 hci_dev_unlock(hdev); 2306 } 2307 2308 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2309 { 2310 struct hci_cp_auth_requested *cp; 2311 struct hci_conn *conn; 2312 2313 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2314 2315 if (!status) 2316 return; 2317 2318 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2319 if (!cp) 2320 return; 2321 2322 hci_dev_lock(hdev); 2323 2324 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2325 if (conn) { 2326 if (conn->state == BT_CONFIG) { 2327 hci_connect_cfm(conn, status); 2328 hci_conn_drop(conn); 2329 } 2330 } 2331 2332 hci_dev_unlock(hdev); 2333 } 2334 2335 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2336 { 2337 struct hci_cp_set_conn_encrypt *cp; 2338 struct hci_conn *conn; 2339 2340 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2341 2342 if (!status) 2343 return; 2344 2345 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2346 if (!cp) 2347 return; 2348 2349 hci_dev_lock(hdev); 2350 2351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2352 if (conn) { 2353 if (conn->state == BT_CONFIG) { 2354 hci_connect_cfm(conn, status); 2355 hci_conn_drop(conn); 2356 } 2357 } 2358 2359 hci_dev_unlock(hdev); 2360 } 2361 2362 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2363 struct hci_conn *conn) 2364 { 2365 if (conn->state != BT_CONFIG || !conn->out) 2366 return 0; 2367 2368 if (conn->pending_sec_level == BT_SECURITY_SDP) 2369 return 0; 2370 2371 /* Only request authentication for SSP connections or non-SSP 2372 * devices with sec_level MEDIUM or HIGH or if MITM protection 2373 * is requested. 2374 */ 2375 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2376 conn->pending_sec_level != BT_SECURITY_FIPS && 2377 conn->pending_sec_level != BT_SECURITY_HIGH && 2378 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2379 return 0; 2380 2381 return 1; 2382 } 2383 2384 static int hci_resolve_name(struct hci_dev *hdev, 2385 struct inquiry_entry *e) 2386 { 2387 struct hci_cp_remote_name_req cp; 2388 2389 memset(&cp, 0, sizeof(cp)); 2390 2391 bacpy(&cp.bdaddr, &e->data.bdaddr); 2392 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2393 cp.pscan_mode = e->data.pscan_mode; 2394 cp.clock_offset = e->data.clock_offset; 2395 2396 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2397 } 2398 2399 static bool hci_resolve_next_name(struct hci_dev *hdev) 2400 { 2401 struct discovery_state *discov = &hdev->discovery; 2402 struct inquiry_entry *e; 2403 2404 if (list_empty(&discov->resolve)) 2405 return false; 2406 2407 /* We should stop if we already spent too much time resolving names. */ 2408 if (time_after(jiffies, discov->name_resolve_timeout)) { 2409 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2410 return false; 2411 } 2412 2413 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2414 if (!e) 2415 return false; 2416 2417 if (hci_resolve_name(hdev, e) == 0) { 2418 e->name_state = NAME_PENDING; 2419 return true; 2420 } 2421 2422 return false; 2423 } 2424 2425 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2426 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2427 { 2428 struct discovery_state *discov = &hdev->discovery; 2429 struct inquiry_entry *e; 2430 2431 /* Update the mgmt connected state if necessary. Be careful with 2432 * conn objects that exist but are not (yet) connected however. 2433 * Only those in BT_CONFIG or BT_CONNECTED states can be 2434 * considered connected. 2435 */ 2436 if (conn && 2437 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2438 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2439 mgmt_device_connected(hdev, conn, name, name_len); 2440 2441 if (discov->state == DISCOVERY_STOPPED) 2442 return; 2443 2444 if (discov->state == DISCOVERY_STOPPING) 2445 goto discov_complete; 2446 2447 if (discov->state != DISCOVERY_RESOLVING) 2448 return; 2449 2450 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2451 /* If the device was not found in a list of found devices names of which 2452 * are pending. there is no need to continue resolving a next name as it 2453 * will be done upon receiving another Remote Name Request Complete 2454 * Event */ 2455 if (!e) 2456 return; 2457 2458 list_del(&e->list); 2459 2460 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2461 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2462 name, name_len); 2463 2464 if (hci_resolve_next_name(hdev)) 2465 return; 2466 2467 discov_complete: 2468 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2469 } 2470 2471 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2472 { 2473 struct hci_cp_remote_name_req *cp; 2474 struct hci_conn *conn; 2475 2476 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2477 2478 /* If successful wait for the name req complete event before 2479 * checking for the need to do authentication */ 2480 if (!status) 2481 return; 2482 2483 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2484 if (!cp) 2485 return; 2486 2487 hci_dev_lock(hdev); 2488 2489 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2490 2491 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2492 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2493 2494 if (!conn) 2495 goto unlock; 2496 2497 if (!hci_outgoing_auth_needed(hdev, conn)) 2498 goto unlock; 2499 2500 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2501 struct hci_cp_auth_requested auth_cp; 2502 2503 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2504 2505 auth_cp.handle = __cpu_to_le16(conn->handle); 2506 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2507 sizeof(auth_cp), &auth_cp); 2508 } 2509 2510 unlock: 2511 hci_dev_unlock(hdev); 2512 } 2513 2514 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2515 { 2516 struct hci_cp_read_remote_features *cp; 2517 struct hci_conn *conn; 2518 2519 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2520 2521 if (!status) 2522 return; 2523 2524 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2525 if (!cp) 2526 return; 2527 2528 hci_dev_lock(hdev); 2529 2530 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2531 if (conn) { 2532 if (conn->state == BT_CONFIG) { 2533 hci_connect_cfm(conn, status); 2534 hci_conn_drop(conn); 2535 } 2536 } 2537 2538 hci_dev_unlock(hdev); 2539 } 2540 2541 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2542 { 2543 struct hci_cp_read_remote_ext_features *cp; 2544 struct hci_conn *conn; 2545 2546 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2547 2548 if (!status) 2549 return; 2550 2551 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2552 if (!cp) 2553 return; 2554 2555 hci_dev_lock(hdev); 2556 2557 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2558 if (conn) { 2559 if (conn->state == BT_CONFIG) { 2560 hci_connect_cfm(conn, status); 2561 hci_conn_drop(conn); 2562 } 2563 } 2564 2565 hci_dev_unlock(hdev); 2566 } 2567 2568 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2569 { 2570 struct hci_cp_setup_sync_conn *cp; 2571 struct hci_conn *acl, *sco; 2572 __u16 handle; 2573 2574 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2575 2576 if (!status) 2577 return; 2578 2579 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2580 if (!cp) 2581 return; 2582 2583 handle = __le16_to_cpu(cp->handle); 2584 2585 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2586 2587 hci_dev_lock(hdev); 2588 2589 acl = hci_conn_hash_lookup_handle(hdev, handle); 2590 if (acl) { 2591 sco = acl->link; 2592 if (sco) { 2593 sco->state = BT_CLOSED; 2594 2595 hci_connect_cfm(sco, status); 2596 hci_conn_del(sco); 2597 } 2598 } 2599 2600 hci_dev_unlock(hdev); 2601 } 2602 2603 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2604 { 2605 struct hci_cp_enhanced_setup_sync_conn *cp; 2606 struct hci_conn *acl, *sco; 2607 __u16 handle; 2608 2609 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2610 2611 if (!status) 2612 return; 2613 2614 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2615 if (!cp) 2616 return; 2617 2618 handle = __le16_to_cpu(cp->handle); 2619 2620 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2621 2622 hci_dev_lock(hdev); 2623 2624 acl = hci_conn_hash_lookup_handle(hdev, handle); 2625 if (acl) { 2626 sco = acl->link; 2627 if (sco) { 2628 sco->state = BT_CLOSED; 2629 2630 hci_connect_cfm(sco, status); 2631 hci_conn_del(sco); 2632 } 2633 } 2634 2635 hci_dev_unlock(hdev); 2636 } 2637 2638 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2639 { 2640 struct hci_cp_sniff_mode *cp; 2641 struct hci_conn *conn; 2642 2643 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2644 2645 if (!status) 2646 return; 2647 2648 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2649 if (!cp) 2650 return; 2651 2652 hci_dev_lock(hdev); 2653 2654 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2655 if (conn) { 2656 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2657 2658 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2659 hci_sco_setup(conn, status); 2660 } 2661 2662 hci_dev_unlock(hdev); 2663 } 2664 2665 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2666 { 2667 struct hci_cp_exit_sniff_mode *cp; 2668 struct hci_conn *conn; 2669 2670 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2671 2672 if (!status) 2673 return; 2674 2675 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2676 if (!cp) 2677 return; 2678 2679 hci_dev_lock(hdev); 2680 2681 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2682 if (conn) { 2683 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2684 2685 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2686 hci_sco_setup(conn, status); 2687 } 2688 2689 hci_dev_unlock(hdev); 2690 } 2691 2692 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2693 { 2694 struct hci_cp_disconnect *cp; 2695 struct hci_conn_params *params; 2696 struct hci_conn *conn; 2697 bool mgmt_conn; 2698 2699 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2700 2701 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2702 * otherwise cleanup the connection immediately. 2703 */ 2704 if (!status && !hdev->suspended) 2705 return; 2706 2707 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2708 if (!cp) 2709 return; 2710 2711 hci_dev_lock(hdev); 2712 2713 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2714 if (!conn) 2715 goto unlock; 2716 2717 if (status) { 2718 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2719 conn->dst_type, status); 2720 2721 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2722 hdev->cur_adv_instance = conn->adv_instance; 2723 hci_enable_advertising(hdev); 2724 } 2725 2726 goto done; 2727 } 2728 2729 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2730 2731 if (conn->type == ACL_LINK) { 2732 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2733 hci_remove_link_key(hdev, &conn->dst); 2734 } 2735 2736 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2737 if (params) { 2738 switch (params->auto_connect) { 2739 case HCI_AUTO_CONN_LINK_LOSS: 2740 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2741 break; 2742 fallthrough; 2743 2744 case HCI_AUTO_CONN_DIRECT: 2745 case HCI_AUTO_CONN_ALWAYS: 2746 list_del_init(¶ms->action); 2747 list_add(¶ms->action, &hdev->pend_le_conns); 2748 break; 2749 2750 default: 2751 break; 2752 } 2753 } 2754 2755 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2756 cp->reason, mgmt_conn); 2757 2758 hci_disconn_cfm(conn, cp->reason); 2759 2760 done: 2761 /* If the disconnection failed for any reason, the upper layer 2762 * does not retry to disconnect in current implementation. 2763 * Hence, we need to do some basic cleanup here and re-enable 2764 * advertising if necessary. 2765 */ 2766 hci_conn_del(conn); 2767 unlock: 2768 hci_dev_unlock(hdev); 2769 } 2770 2771 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2772 { 2773 /* When using controller based address resolution, then the new 2774 * address types 0x02 and 0x03 are used. These types need to be 2775 * converted back into either public address or random address type 2776 */ 2777 switch (type) { 2778 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2779 if (resolved) 2780 *resolved = true; 2781 return ADDR_LE_DEV_PUBLIC; 2782 case ADDR_LE_DEV_RANDOM_RESOLVED: 2783 if (resolved) 2784 *resolved = true; 2785 return ADDR_LE_DEV_RANDOM; 2786 } 2787 2788 if (resolved) 2789 *resolved = false; 2790 return type; 2791 } 2792 2793 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2794 u8 peer_addr_type, u8 own_address_type, 2795 u8 filter_policy) 2796 { 2797 struct hci_conn *conn; 2798 2799 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2800 peer_addr_type); 2801 if (!conn) 2802 return; 2803 2804 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2805 2806 /* Store the initiator and responder address information which 2807 * is needed for SMP. These values will not change during the 2808 * lifetime of the connection. 2809 */ 2810 conn->init_addr_type = own_address_type; 2811 if (own_address_type == ADDR_LE_DEV_RANDOM) 2812 bacpy(&conn->init_addr, &hdev->random_addr); 2813 else 2814 bacpy(&conn->init_addr, &hdev->bdaddr); 2815 2816 conn->resp_addr_type = peer_addr_type; 2817 bacpy(&conn->resp_addr, peer_addr); 2818 2819 /* We don't want the connection attempt to stick around 2820 * indefinitely since LE doesn't have a page timeout concept 2821 * like BR/EDR. Set a timer for any connection that doesn't use 2822 * the accept list for connecting. 2823 */ 2824 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2825 queue_delayed_work(conn->hdev->workqueue, 2826 &conn->le_conn_timeout, 2827 conn->conn_timeout); 2828 } 2829 2830 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2831 { 2832 struct hci_cp_le_create_conn *cp; 2833 2834 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2835 2836 /* All connection failure handling is taken care of by the 2837 * hci_le_conn_failed function which is triggered by the HCI 2838 * request completion callbacks used for connecting. 2839 */ 2840 if (status) 2841 return; 2842 2843 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2844 if (!cp) 2845 return; 2846 2847 hci_dev_lock(hdev); 2848 2849 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2850 cp->own_address_type, cp->filter_policy); 2851 2852 hci_dev_unlock(hdev); 2853 } 2854 2855 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2856 { 2857 struct hci_cp_le_ext_create_conn *cp; 2858 2859 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2860 2861 /* All connection failure handling is taken care of by the 2862 * hci_le_conn_failed function which is triggered by the HCI 2863 * request completion callbacks used for connecting. 2864 */ 2865 if (status) 2866 return; 2867 2868 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2869 if (!cp) 2870 return; 2871 2872 hci_dev_lock(hdev); 2873 2874 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2875 cp->own_addr_type, cp->filter_policy); 2876 2877 hci_dev_unlock(hdev); 2878 } 2879 2880 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2881 { 2882 struct hci_cp_le_read_remote_features *cp; 2883 struct hci_conn *conn; 2884 2885 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2886 2887 if (!status) 2888 return; 2889 2890 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2891 if (!cp) 2892 return; 2893 2894 hci_dev_lock(hdev); 2895 2896 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2897 if (conn) { 2898 if (conn->state == BT_CONFIG) { 2899 hci_connect_cfm(conn, status); 2900 hci_conn_drop(conn); 2901 } 2902 } 2903 2904 hci_dev_unlock(hdev); 2905 } 2906 2907 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2908 { 2909 struct hci_cp_le_start_enc *cp; 2910 struct hci_conn *conn; 2911 2912 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2913 2914 if (!status) 2915 return; 2916 2917 hci_dev_lock(hdev); 2918 2919 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2920 if (!cp) 2921 goto unlock; 2922 2923 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2924 if (!conn) 2925 goto unlock; 2926 2927 if (conn->state != BT_CONNECTED) 2928 goto unlock; 2929 2930 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2931 hci_conn_drop(conn); 2932 2933 unlock: 2934 hci_dev_unlock(hdev); 2935 } 2936 2937 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2938 { 2939 struct hci_cp_switch_role *cp; 2940 struct hci_conn *conn; 2941 2942 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2943 2944 if (!status) 2945 return; 2946 2947 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2948 if (!cp) 2949 return; 2950 2951 hci_dev_lock(hdev); 2952 2953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2954 if (conn) 2955 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2956 2957 hci_dev_unlock(hdev); 2958 } 2959 2960 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, 2961 struct sk_buff *skb) 2962 { 2963 struct hci_ev_status *ev = data; 2964 struct discovery_state *discov = &hdev->discovery; 2965 struct inquiry_entry *e; 2966 2967 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 2968 2969 hci_conn_check_pending(hdev); 2970 2971 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2972 return; 2973 2974 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2975 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2976 2977 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2978 return; 2979 2980 hci_dev_lock(hdev); 2981 2982 if (discov->state != DISCOVERY_FINDING) 2983 goto unlock; 2984 2985 if (list_empty(&discov->resolve)) { 2986 /* When BR/EDR inquiry is active and no LE scanning is in 2987 * progress, then change discovery state to indicate completion. 2988 * 2989 * When running LE scanning and BR/EDR inquiry simultaneously 2990 * and the LE scan already finished, then change the discovery 2991 * state to indicate completion. 2992 */ 2993 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2994 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2995 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2996 goto unlock; 2997 } 2998 2999 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 3000 if (e && hci_resolve_name(hdev, e) == 0) { 3001 e->name_state = NAME_PENDING; 3002 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 3003 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 3004 } else { 3005 /* When BR/EDR inquiry is active and no LE scanning is in 3006 * progress, then change discovery state to indicate completion. 3007 * 3008 * When running LE scanning and BR/EDR inquiry simultaneously 3009 * and the LE scan already finished, then change the discovery 3010 * state to indicate completion. 3011 */ 3012 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3013 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3014 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3015 } 3016 3017 unlock: 3018 hci_dev_unlock(hdev); 3019 } 3020 3021 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, 3022 struct sk_buff *skb) 3023 { 3024 struct hci_ev_inquiry_result *ev = edata; 3025 struct inquiry_data data; 3026 int i; 3027 3028 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, 3029 flex_array_size(ev, info, ev->num))) 3030 return; 3031 3032 bt_dev_dbg(hdev, "num %d", ev->num); 3033 3034 if (!ev->num) 3035 return; 3036 3037 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3038 return; 3039 3040 hci_dev_lock(hdev); 3041 3042 for (i = 0; i < ev->num; i++) { 3043 struct inquiry_info *info = &ev->info[i]; 3044 u32 flags; 3045 3046 bacpy(&data.bdaddr, &info->bdaddr); 3047 data.pscan_rep_mode = info->pscan_rep_mode; 3048 data.pscan_period_mode = info->pscan_period_mode; 3049 data.pscan_mode = info->pscan_mode; 3050 memcpy(data.dev_class, info->dev_class, 3); 3051 data.clock_offset = info->clock_offset; 3052 data.rssi = HCI_RSSI_INVALID; 3053 data.ssp_mode = 0x00; 3054 3055 flags = hci_inquiry_cache_update(hdev, &data, false); 3056 3057 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3058 info->dev_class, HCI_RSSI_INVALID, 3059 flags, NULL, 0, NULL, 0); 3060 } 3061 3062 hci_dev_unlock(hdev); 3063 } 3064 3065 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, 3066 struct sk_buff *skb) 3067 { 3068 struct hci_ev_conn_complete *ev = data; 3069 struct hci_conn *conn; 3070 3071 if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) { 3072 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for invalid handle"); 3073 return; 3074 } 3075 3076 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3077 3078 hci_dev_lock(hdev); 3079 3080 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3081 if (!conn) { 3082 /* Connection may not exist if auto-connected. Check the bredr 3083 * allowlist to see if this device is allowed to auto connect. 3084 * If link is an ACL type, create a connection class 3085 * automatically. 3086 * 3087 * Auto-connect will only occur if the event filter is 3088 * programmed with a given address. Right now, event filter is 3089 * only used during suspend. 3090 */ 3091 if (ev->link_type == ACL_LINK && 3092 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 3093 &ev->bdaddr, 3094 BDADDR_BREDR)) { 3095 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3096 HCI_ROLE_SLAVE); 3097 if (!conn) { 3098 bt_dev_err(hdev, "no memory for new conn"); 3099 goto unlock; 3100 } 3101 } else { 3102 if (ev->link_type != SCO_LINK) 3103 goto unlock; 3104 3105 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 3106 &ev->bdaddr); 3107 if (!conn) 3108 goto unlock; 3109 3110 conn->type = SCO_LINK; 3111 } 3112 } 3113 3114 /* The HCI_Connection_Complete event is only sent once per connection. 3115 * Processing it more than once per connection can corrupt kernel memory. 3116 * 3117 * As the connection handle is set here for the first time, it indicates 3118 * whether the connection is already set up. 3119 */ 3120 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 3121 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 3122 goto unlock; 3123 } 3124 3125 if (!ev->status) { 3126 conn->handle = __le16_to_cpu(ev->handle); 3127 3128 if (conn->type == ACL_LINK) { 3129 conn->state = BT_CONFIG; 3130 hci_conn_hold(conn); 3131 3132 if (!conn->out && !hci_conn_ssp_enabled(conn) && 3133 !hci_find_link_key(hdev, &ev->bdaddr)) 3134 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3135 else 3136 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3137 } else 3138 conn->state = BT_CONNECTED; 3139 3140 hci_debugfs_create_conn(conn); 3141 hci_conn_add_sysfs(conn); 3142 3143 if (test_bit(HCI_AUTH, &hdev->flags)) 3144 set_bit(HCI_CONN_AUTH, &conn->flags); 3145 3146 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 3147 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3148 3149 /* Get remote features */ 3150 if (conn->type == ACL_LINK) { 3151 struct hci_cp_read_remote_features cp; 3152 cp.handle = ev->handle; 3153 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 3154 sizeof(cp), &cp); 3155 3156 hci_req_update_scan(hdev); 3157 } 3158 3159 /* Set packet type for incoming connection */ 3160 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 3161 struct hci_cp_change_conn_ptype cp; 3162 cp.handle = ev->handle; 3163 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3164 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 3165 &cp); 3166 } 3167 } else { 3168 conn->state = BT_CLOSED; 3169 if (conn->type == ACL_LINK) 3170 mgmt_connect_failed(hdev, &conn->dst, conn->type, 3171 conn->dst_type, ev->status); 3172 } 3173 3174 if (conn->type == ACL_LINK) 3175 hci_sco_setup(conn, ev->status); 3176 3177 if (ev->status) { 3178 hci_connect_cfm(conn, ev->status); 3179 hci_conn_del(conn); 3180 } else if (ev->link_type == SCO_LINK) { 3181 switch (conn->setting & SCO_AIRMODE_MASK) { 3182 case SCO_AIRMODE_CVSD: 3183 if (hdev->notify) 3184 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 3185 break; 3186 } 3187 3188 hci_connect_cfm(conn, ev->status); 3189 } 3190 3191 unlock: 3192 hci_dev_unlock(hdev); 3193 3194 hci_conn_check_pending(hdev); 3195 } 3196 3197 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 3198 { 3199 struct hci_cp_reject_conn_req cp; 3200 3201 bacpy(&cp.bdaddr, bdaddr); 3202 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 3203 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 3204 } 3205 3206 static void hci_conn_request_evt(struct hci_dev *hdev, void *data, 3207 struct sk_buff *skb) 3208 { 3209 struct hci_ev_conn_request *ev = data; 3210 int mask = hdev->link_mode; 3211 struct inquiry_entry *ie; 3212 struct hci_conn *conn; 3213 __u8 flags = 0; 3214 3215 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); 3216 3217 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 3218 &flags); 3219 3220 if (!(mask & HCI_LM_ACCEPT)) { 3221 hci_reject_conn(hdev, &ev->bdaddr); 3222 return; 3223 } 3224 3225 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 3226 BDADDR_BREDR)) { 3227 hci_reject_conn(hdev, &ev->bdaddr); 3228 return; 3229 } 3230 3231 /* Require HCI_CONNECTABLE or an accept list entry to accept the 3232 * connection. These features are only touched through mgmt so 3233 * only do the checks if HCI_MGMT is set. 3234 */ 3235 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3236 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 3237 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 3238 BDADDR_BREDR)) { 3239 hci_reject_conn(hdev, &ev->bdaddr); 3240 return; 3241 } 3242 3243 /* Connection accepted */ 3244 3245 hci_dev_lock(hdev); 3246 3247 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3248 if (ie) 3249 memcpy(ie->data.dev_class, ev->dev_class, 3); 3250 3251 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 3252 &ev->bdaddr); 3253 if (!conn) { 3254 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3255 HCI_ROLE_SLAVE); 3256 if (!conn) { 3257 bt_dev_err(hdev, "no memory for new connection"); 3258 hci_dev_unlock(hdev); 3259 return; 3260 } 3261 } 3262 3263 memcpy(conn->dev_class, ev->dev_class, 3); 3264 3265 hci_dev_unlock(hdev); 3266 3267 if (ev->link_type == ACL_LINK || 3268 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 3269 struct hci_cp_accept_conn_req cp; 3270 conn->state = BT_CONNECT; 3271 3272 bacpy(&cp.bdaddr, &ev->bdaddr); 3273 3274 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 3275 cp.role = 0x00; /* Become central */ 3276 else 3277 cp.role = 0x01; /* Remain peripheral */ 3278 3279 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 3280 } else if (!(flags & HCI_PROTO_DEFER)) { 3281 struct hci_cp_accept_sync_conn_req cp; 3282 conn->state = BT_CONNECT; 3283 3284 bacpy(&cp.bdaddr, &ev->bdaddr); 3285 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3286 3287 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 3288 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 3289 cp.max_latency = cpu_to_le16(0xffff); 3290 cp.content_format = cpu_to_le16(hdev->voice_setting); 3291 cp.retrans_effort = 0xff; 3292 3293 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 3294 &cp); 3295 } else { 3296 conn->state = BT_CONNECT2; 3297 hci_connect_cfm(conn, 0); 3298 } 3299 } 3300 3301 static u8 hci_to_mgmt_reason(u8 err) 3302 { 3303 switch (err) { 3304 case HCI_ERROR_CONNECTION_TIMEOUT: 3305 return MGMT_DEV_DISCONN_TIMEOUT; 3306 case HCI_ERROR_REMOTE_USER_TERM: 3307 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3308 case HCI_ERROR_REMOTE_POWER_OFF: 3309 return MGMT_DEV_DISCONN_REMOTE; 3310 case HCI_ERROR_LOCAL_HOST_TERM: 3311 return MGMT_DEV_DISCONN_LOCAL_HOST; 3312 default: 3313 return MGMT_DEV_DISCONN_UNKNOWN; 3314 } 3315 } 3316 3317 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, 3318 struct sk_buff *skb) 3319 { 3320 struct hci_ev_disconn_complete *ev = data; 3321 u8 reason; 3322 struct hci_conn_params *params; 3323 struct hci_conn *conn; 3324 bool mgmt_connected; 3325 3326 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3327 3328 hci_dev_lock(hdev); 3329 3330 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3331 if (!conn) 3332 goto unlock; 3333 3334 if (ev->status) { 3335 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3336 conn->dst_type, ev->status); 3337 goto unlock; 3338 } 3339 3340 conn->state = BT_CLOSED; 3341 3342 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3343 3344 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3345 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3346 else 3347 reason = hci_to_mgmt_reason(ev->reason); 3348 3349 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3350 reason, mgmt_connected); 3351 3352 if (conn->type == ACL_LINK) { 3353 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3354 hci_remove_link_key(hdev, &conn->dst); 3355 3356 hci_req_update_scan(hdev); 3357 } 3358 3359 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3360 if (params) { 3361 switch (params->auto_connect) { 3362 case HCI_AUTO_CONN_LINK_LOSS: 3363 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3364 break; 3365 fallthrough; 3366 3367 case HCI_AUTO_CONN_DIRECT: 3368 case HCI_AUTO_CONN_ALWAYS: 3369 list_del_init(¶ms->action); 3370 list_add(¶ms->action, &hdev->pend_le_conns); 3371 hci_update_passive_scan(hdev); 3372 break; 3373 3374 default: 3375 break; 3376 } 3377 } 3378 3379 hci_disconn_cfm(conn, ev->reason); 3380 3381 /* Re-enable advertising if necessary, since it might 3382 * have been disabled by the connection. From the 3383 * HCI_LE_Set_Advertise_Enable command description in 3384 * the core specification (v4.0): 3385 * "The Controller shall continue advertising until the Host 3386 * issues an LE_Set_Advertise_Enable command with 3387 * Advertising_Enable set to 0x00 (Advertising is disabled) 3388 * or until a connection is created or until the Advertising 3389 * is timed out due to Directed Advertising." 3390 */ 3391 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3392 hdev->cur_adv_instance = conn->adv_instance; 3393 hci_enable_advertising(hdev); 3394 } 3395 3396 hci_conn_del(conn); 3397 3398 unlock: 3399 hci_dev_unlock(hdev); 3400 } 3401 3402 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, 3403 struct sk_buff *skb) 3404 { 3405 struct hci_ev_auth_complete *ev = data; 3406 struct hci_conn *conn; 3407 3408 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3409 3410 hci_dev_lock(hdev); 3411 3412 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3413 if (!conn) 3414 goto unlock; 3415 3416 if (!ev->status) { 3417 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3418 3419 if (!hci_conn_ssp_enabled(conn) && 3420 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 3421 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 3422 } else { 3423 set_bit(HCI_CONN_AUTH, &conn->flags); 3424 conn->sec_level = conn->pending_sec_level; 3425 } 3426 } else { 3427 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3428 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3429 3430 mgmt_auth_failed(conn, ev->status); 3431 } 3432 3433 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3434 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 3435 3436 if (conn->state == BT_CONFIG) { 3437 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3438 struct hci_cp_set_conn_encrypt cp; 3439 cp.handle = ev->handle; 3440 cp.encrypt = 0x01; 3441 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3442 &cp); 3443 } else { 3444 conn->state = BT_CONNECTED; 3445 hci_connect_cfm(conn, ev->status); 3446 hci_conn_drop(conn); 3447 } 3448 } else { 3449 hci_auth_cfm(conn, ev->status); 3450 3451 hci_conn_hold(conn); 3452 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3453 hci_conn_drop(conn); 3454 } 3455 3456 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3457 if (!ev->status) { 3458 struct hci_cp_set_conn_encrypt cp; 3459 cp.handle = ev->handle; 3460 cp.encrypt = 0x01; 3461 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3462 &cp); 3463 } else { 3464 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3465 hci_encrypt_cfm(conn, ev->status); 3466 } 3467 } 3468 3469 unlock: 3470 hci_dev_unlock(hdev); 3471 } 3472 3473 static void hci_remote_name_evt(struct hci_dev *hdev, void *data, 3474 struct sk_buff *skb) 3475 { 3476 struct hci_ev_remote_name *ev = data; 3477 struct hci_conn *conn; 3478 3479 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3480 3481 hci_conn_check_pending(hdev); 3482 3483 hci_dev_lock(hdev); 3484 3485 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3486 3487 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3488 goto check_auth; 3489 3490 if (ev->status == 0) 3491 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3492 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3493 else 3494 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3495 3496 check_auth: 3497 if (!conn) 3498 goto unlock; 3499 3500 if (!hci_outgoing_auth_needed(hdev, conn)) 3501 goto unlock; 3502 3503 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3504 struct hci_cp_auth_requested cp; 3505 3506 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3507 3508 cp.handle = __cpu_to_le16(conn->handle); 3509 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3510 } 3511 3512 unlock: 3513 hci_dev_unlock(hdev); 3514 } 3515 3516 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 3517 u16 opcode, struct sk_buff *skb) 3518 { 3519 const struct hci_rp_read_enc_key_size *rp; 3520 struct hci_conn *conn; 3521 u16 handle; 3522 3523 BT_DBG("%s status 0x%02x", hdev->name, status); 3524 3525 if (!skb || skb->len < sizeof(*rp)) { 3526 bt_dev_err(hdev, "invalid read key size response"); 3527 return; 3528 } 3529 3530 rp = (void *)skb->data; 3531 handle = le16_to_cpu(rp->handle); 3532 3533 hci_dev_lock(hdev); 3534 3535 conn = hci_conn_hash_lookup_handle(hdev, handle); 3536 if (!conn) 3537 goto unlock; 3538 3539 /* While unexpected, the read_enc_key_size command may fail. The most 3540 * secure approach is to then assume the key size is 0 to force a 3541 * disconnection. 3542 */ 3543 if (rp->status) { 3544 bt_dev_err(hdev, "failed to read key size for handle %u", 3545 handle); 3546 conn->enc_key_size = 0; 3547 } else { 3548 conn->enc_key_size = rp->key_size; 3549 } 3550 3551 hci_encrypt_cfm(conn, 0); 3552 3553 unlock: 3554 hci_dev_unlock(hdev); 3555 } 3556 3557 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, 3558 struct sk_buff *skb) 3559 { 3560 struct hci_ev_encrypt_change *ev = data; 3561 struct hci_conn *conn; 3562 3563 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3564 3565 hci_dev_lock(hdev); 3566 3567 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3568 if (!conn) 3569 goto unlock; 3570 3571 if (!ev->status) { 3572 if (ev->encrypt) { 3573 /* Encryption implies authentication */ 3574 set_bit(HCI_CONN_AUTH, &conn->flags); 3575 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3576 conn->sec_level = conn->pending_sec_level; 3577 3578 /* P-256 authentication key implies FIPS */ 3579 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3580 set_bit(HCI_CONN_FIPS, &conn->flags); 3581 3582 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3583 conn->type == LE_LINK) 3584 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3585 } else { 3586 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3587 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3588 } 3589 } 3590 3591 /* We should disregard the current RPA and generate a new one 3592 * whenever the encryption procedure fails. 3593 */ 3594 if (ev->status && conn->type == LE_LINK) { 3595 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3596 hci_adv_instances_set_rpa_expired(hdev, true); 3597 } 3598 3599 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3600 3601 /* Check link security requirements are met */ 3602 if (!hci_conn_check_link_mode(conn)) 3603 ev->status = HCI_ERROR_AUTH_FAILURE; 3604 3605 if (ev->status && conn->state == BT_CONNECTED) { 3606 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3607 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3608 3609 /* Notify upper layers so they can cleanup before 3610 * disconnecting. 3611 */ 3612 hci_encrypt_cfm(conn, ev->status); 3613 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3614 hci_conn_drop(conn); 3615 goto unlock; 3616 } 3617 3618 /* Try reading the encryption key size for encrypted ACL links */ 3619 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3620 struct hci_cp_read_enc_key_size cp; 3621 struct hci_request req; 3622 3623 /* Only send HCI_Read_Encryption_Key_Size if the 3624 * controller really supports it. If it doesn't, assume 3625 * the default size (16). 3626 */ 3627 if (!(hdev->commands[20] & 0x10)) { 3628 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3629 goto notify; 3630 } 3631 3632 hci_req_init(&req, hdev); 3633 3634 cp.handle = cpu_to_le16(conn->handle); 3635 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3636 3637 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 3638 bt_dev_err(hdev, "sending read key size failed"); 3639 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3640 goto notify; 3641 } 3642 3643 goto unlock; 3644 } 3645 3646 /* Set the default Authenticated Payload Timeout after 3647 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3648 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3649 * sent when the link is active and Encryption is enabled, the conn 3650 * type can be either LE or ACL and controller must support LMP Ping. 3651 * Ensure for AES-CCM encryption as well. 3652 */ 3653 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3654 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3655 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3656 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3657 struct hci_cp_write_auth_payload_to cp; 3658 3659 cp.handle = cpu_to_le16(conn->handle); 3660 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3661 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3662 sizeof(cp), &cp); 3663 } 3664 3665 notify: 3666 hci_encrypt_cfm(conn, ev->status); 3667 3668 unlock: 3669 hci_dev_unlock(hdev); 3670 } 3671 3672 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, 3673 struct sk_buff *skb) 3674 { 3675 struct hci_ev_change_link_key_complete *ev = data; 3676 struct hci_conn *conn; 3677 3678 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3679 3680 hci_dev_lock(hdev); 3681 3682 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3683 if (conn) { 3684 if (!ev->status) 3685 set_bit(HCI_CONN_SECURE, &conn->flags); 3686 3687 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3688 3689 hci_key_change_cfm(conn, ev->status); 3690 } 3691 3692 hci_dev_unlock(hdev); 3693 } 3694 3695 static void hci_remote_features_evt(struct hci_dev *hdev, void *data, 3696 struct sk_buff *skb) 3697 { 3698 struct hci_ev_remote_features *ev = data; 3699 struct hci_conn *conn; 3700 3701 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3702 3703 hci_dev_lock(hdev); 3704 3705 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3706 if (!conn) 3707 goto unlock; 3708 3709 if (!ev->status) 3710 memcpy(conn->features[0], ev->features, 8); 3711 3712 if (conn->state != BT_CONFIG) 3713 goto unlock; 3714 3715 if (!ev->status && lmp_ext_feat_capable(hdev) && 3716 lmp_ext_feat_capable(conn)) { 3717 struct hci_cp_read_remote_ext_features cp; 3718 cp.handle = ev->handle; 3719 cp.page = 0x01; 3720 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3721 sizeof(cp), &cp); 3722 goto unlock; 3723 } 3724 3725 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3726 struct hci_cp_remote_name_req cp; 3727 memset(&cp, 0, sizeof(cp)); 3728 bacpy(&cp.bdaddr, &conn->dst); 3729 cp.pscan_rep_mode = 0x02; 3730 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3731 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3732 mgmt_device_connected(hdev, conn, NULL, 0); 3733 3734 if (!hci_outgoing_auth_needed(hdev, conn)) { 3735 conn->state = BT_CONNECTED; 3736 hci_connect_cfm(conn, ev->status); 3737 hci_conn_drop(conn); 3738 } 3739 3740 unlock: 3741 hci_dev_unlock(hdev); 3742 } 3743 3744 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3745 { 3746 cancel_delayed_work(&hdev->cmd_timer); 3747 3748 if (!test_bit(HCI_RESET, &hdev->flags)) { 3749 if (ncmd) { 3750 cancel_delayed_work(&hdev->ncmd_timer); 3751 atomic_set(&hdev->cmd_cnt, 1); 3752 } else { 3753 schedule_delayed_work(&hdev->ncmd_timer, 3754 HCI_NCMD_TIMEOUT); 3755 } 3756 } 3757 } 3758 3759 #define HCI_CC_VL(_op, _func, _min, _max) \ 3760 { \ 3761 .op = _op, \ 3762 .func = _func, \ 3763 .min_len = _min, \ 3764 .max_len = _max, \ 3765 } 3766 3767 #define HCI_CC(_op, _func, _len) \ 3768 HCI_CC_VL(_op, _func, _len, _len) 3769 3770 #define HCI_CC_STATUS(_op, _func) \ 3771 HCI_CC(_op, _func, sizeof(struct hci_ev_status)) 3772 3773 static const struct hci_cc { 3774 u16 op; 3775 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 3776 u16 min_len; 3777 u16 max_len; 3778 } hci_cc_table[] = { 3779 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), 3780 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), 3781 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), 3782 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, 3783 hci_cc_remote_name_req_cancel), 3784 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, 3785 sizeof(struct hci_rp_role_discovery)), 3786 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, 3787 sizeof(struct hci_rp_read_link_policy)), 3788 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, 3789 sizeof(struct hci_rp_write_link_policy)), 3790 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, 3791 sizeof(struct hci_rp_read_def_link_policy)), 3792 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, 3793 hci_cc_write_def_link_policy), 3794 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), 3795 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, 3796 sizeof(struct hci_rp_read_stored_link_key)), 3797 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, 3798 sizeof(struct hci_rp_delete_stored_link_key)), 3799 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), 3800 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, 3801 sizeof(struct hci_rp_read_local_name)), 3802 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), 3803 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), 3804 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), 3805 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), 3806 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, 3807 sizeof(struct hci_rp_read_class_of_dev)), 3808 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), 3809 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, 3810 sizeof(struct hci_rp_read_voice_setting)), 3811 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), 3812 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, 3813 sizeof(struct hci_rp_read_num_supported_iac)), 3814 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), 3815 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), 3816 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, 3817 sizeof(struct hci_rp_read_auth_payload_to)), 3818 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, 3819 sizeof(struct hci_rp_write_auth_payload_to)), 3820 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, 3821 sizeof(struct hci_rp_read_local_version)), 3822 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, 3823 sizeof(struct hci_rp_read_local_commands)), 3824 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, 3825 sizeof(struct hci_rp_read_local_features)), 3826 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, 3827 sizeof(struct hci_rp_read_local_ext_features)), 3828 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, 3829 sizeof(struct hci_rp_read_buffer_size)), 3830 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, 3831 sizeof(struct hci_rp_read_bd_addr)), 3832 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, 3833 sizeof(struct hci_rp_read_local_pairing_opts)), 3834 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, 3835 sizeof(struct hci_rp_read_page_scan_activity)), 3836 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 3837 hci_cc_write_page_scan_activity), 3838 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, 3839 sizeof(struct hci_rp_read_page_scan_type)), 3840 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), 3841 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, 3842 sizeof(struct hci_rp_read_data_block_size)), 3843 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, 3844 sizeof(struct hci_rp_read_flow_control_mode)), 3845 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, 3846 sizeof(struct hci_rp_read_local_amp_info)), 3847 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, 3848 sizeof(struct hci_rp_read_clock)), 3849 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, 3850 sizeof(struct hci_rp_read_inq_rsp_tx_power)), 3851 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, 3852 hci_cc_read_def_err_data_reporting, 3853 sizeof(struct hci_rp_read_def_err_data_reporting)), 3854 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 3855 hci_cc_write_def_err_data_reporting), 3856 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, 3857 sizeof(struct hci_rp_pin_code_reply)), 3858 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, 3859 sizeof(struct hci_rp_pin_code_neg_reply)), 3860 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, 3861 sizeof(struct hci_rp_read_local_oob_data)), 3862 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, 3863 sizeof(struct hci_rp_read_local_oob_ext_data)), 3864 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, 3865 sizeof(struct hci_rp_le_read_buffer_size)), 3866 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, 3867 sizeof(struct hci_rp_le_read_local_features)), 3868 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, 3869 sizeof(struct hci_rp_le_read_adv_tx_power)), 3870 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, 3871 sizeof(struct hci_rp_user_confirm_reply)), 3872 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, 3873 sizeof(struct hci_rp_user_confirm_reply)), 3874 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, 3875 sizeof(struct hci_rp_user_confirm_reply)), 3876 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, 3877 sizeof(struct hci_rp_user_confirm_reply)), 3878 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), 3879 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), 3880 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), 3881 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), 3882 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 3883 hci_cc_le_read_accept_list_size, 3884 sizeof(struct hci_rp_le_read_accept_list_size)), 3885 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), 3886 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, 3887 hci_cc_le_add_to_accept_list), 3888 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 3889 hci_cc_le_del_from_accept_list), 3890 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, 3891 sizeof(struct hci_rp_le_read_supported_states)), 3892 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, 3893 sizeof(struct hci_rp_le_read_def_data_len)), 3894 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, 3895 hci_cc_le_write_def_data_len), 3896 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, 3897 hci_cc_le_add_to_resolv_list), 3898 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, 3899 hci_cc_le_del_from_resolv_list), 3900 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, 3901 hci_cc_le_clear_resolv_list), 3902 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, 3903 sizeof(struct hci_rp_le_read_resolv_list_size)), 3904 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 3905 hci_cc_le_set_addr_resolution_enable), 3906 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, 3907 sizeof(struct hci_rp_le_read_max_data_len)), 3908 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, 3909 hci_cc_write_le_host_supported), 3910 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), 3911 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, 3912 sizeof(struct hci_rp_read_rssi)), 3913 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, 3914 sizeof(struct hci_rp_read_tx_power)), 3915 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), 3916 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, 3917 hci_cc_le_set_ext_scan_param), 3918 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, 3919 hci_cc_le_set_ext_scan_enable), 3920 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), 3921 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 3922 hci_cc_le_read_num_adv_sets, 3923 sizeof(struct hci_rp_le_read_num_supported_adv_sets)), 3924 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, 3925 sizeof(struct hci_rp_le_set_ext_adv_params)), 3926 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, 3927 hci_cc_le_set_ext_adv_enable), 3928 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 3929 hci_cc_le_set_adv_set_random_addr), 3930 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), 3931 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), 3932 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, 3933 sizeof(struct hci_rp_le_read_transmit_power)), 3934 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode) 3935 }; 3936 3937 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, 3938 struct sk_buff *skb) 3939 { 3940 void *data; 3941 3942 if (skb->len < cc->min_len) { 3943 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", 3944 cc->op, skb->len, cc->min_len); 3945 return HCI_ERROR_UNSPECIFIED; 3946 } 3947 3948 /* Just warn if the length is over max_len size it still be possible to 3949 * partially parse the cc so leave to callback to decide if that is 3950 * acceptable. 3951 */ 3952 if (skb->len > cc->max_len) 3953 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", 3954 cc->op, skb->len, cc->max_len); 3955 3956 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); 3957 if (!data) 3958 return HCI_ERROR_UNSPECIFIED; 3959 3960 return cc->func(hdev, data, skb); 3961 } 3962 3963 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, 3964 struct sk_buff *skb, u16 *opcode, u8 *status, 3965 hci_req_complete_t *req_complete, 3966 hci_req_complete_skb_t *req_complete_skb) 3967 { 3968 struct hci_ev_cmd_complete *ev = data; 3969 int i; 3970 3971 *opcode = __le16_to_cpu(ev->opcode); 3972 3973 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 3974 3975 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { 3976 if (hci_cc_table[i].op == *opcode) { 3977 *status = hci_cc_func(hdev, &hci_cc_table[i], skb); 3978 break; 3979 } 3980 } 3981 3982 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 3983 3984 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 3985 req_complete_skb); 3986 3987 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3988 bt_dev_err(hdev, 3989 "unexpected event for opcode 0x%4.4x", *opcode); 3990 return; 3991 } 3992 3993 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3994 queue_work(hdev->workqueue, &hdev->cmd_work); 3995 } 3996 3997 #define HCI_CS(_op, _func) \ 3998 { \ 3999 .op = _op, \ 4000 .func = _func, \ 4001 } 4002 4003 static const struct hci_cs { 4004 u16 op; 4005 void (*func)(struct hci_dev *hdev, __u8 status); 4006 } hci_cs_table[] = { 4007 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), 4008 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), 4009 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), 4010 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), 4011 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), 4012 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), 4013 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), 4014 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), 4015 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, 4016 hci_cs_read_remote_ext_features), 4017 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), 4018 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, 4019 hci_cs_enhanced_setup_sync_conn), 4020 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), 4021 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), 4022 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), 4023 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), 4024 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), 4025 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), 4026 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn) 4027 }; 4028 4029 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, 4030 struct sk_buff *skb, u16 *opcode, u8 *status, 4031 hci_req_complete_t *req_complete, 4032 hci_req_complete_skb_t *req_complete_skb) 4033 { 4034 struct hci_ev_cmd_status *ev = data; 4035 int i; 4036 4037 *opcode = __le16_to_cpu(ev->opcode); 4038 *status = ev->status; 4039 4040 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4041 4042 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { 4043 if (hci_cs_table[i].op == *opcode) { 4044 hci_cs_table[i].func(hdev, ev->status); 4045 break; 4046 } 4047 } 4048 4049 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4050 4051 /* Indicate request completion if the command failed. Also, if 4052 * we're not waiting for a special event and we get a success 4053 * command status we should try to flag the request as completed 4054 * (since for this kind of commands there will not be a command 4055 * complete event). 4056 */ 4057 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { 4058 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 4059 req_complete_skb); 4060 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4061 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", 4062 *opcode); 4063 return; 4064 } 4065 } 4066 4067 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4068 queue_work(hdev->workqueue, &hdev->cmd_work); 4069 } 4070 4071 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, 4072 struct sk_buff *skb) 4073 { 4074 struct hci_ev_hardware_error *ev = data; 4075 4076 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); 4077 4078 hdev->hw_error_code = ev->code; 4079 4080 queue_work(hdev->req_workqueue, &hdev->error_reset); 4081 } 4082 4083 static void hci_role_change_evt(struct hci_dev *hdev, void *data, 4084 struct sk_buff *skb) 4085 { 4086 struct hci_ev_role_change *ev = data; 4087 struct hci_conn *conn; 4088 4089 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4090 4091 hci_dev_lock(hdev); 4092 4093 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4094 if (conn) { 4095 if (!ev->status) 4096 conn->role = ev->role; 4097 4098 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 4099 4100 hci_role_switch_cfm(conn, ev->status, ev->role); 4101 } 4102 4103 hci_dev_unlock(hdev); 4104 } 4105 4106 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, 4107 struct sk_buff *skb) 4108 { 4109 struct hci_ev_num_comp_pkts *ev = data; 4110 int i; 4111 4112 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, 4113 flex_array_size(ev, handles, ev->num))) 4114 return; 4115 4116 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 4117 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 4118 return; 4119 } 4120 4121 bt_dev_dbg(hdev, "num %d", ev->num); 4122 4123 for (i = 0; i < ev->num; i++) { 4124 struct hci_comp_pkts_info *info = &ev->handles[i]; 4125 struct hci_conn *conn; 4126 __u16 handle, count; 4127 4128 handle = __le16_to_cpu(info->handle); 4129 count = __le16_to_cpu(info->count); 4130 4131 conn = hci_conn_hash_lookup_handle(hdev, handle); 4132 if (!conn) 4133 continue; 4134 4135 conn->sent -= count; 4136 4137 switch (conn->type) { 4138 case ACL_LINK: 4139 hdev->acl_cnt += count; 4140 if (hdev->acl_cnt > hdev->acl_pkts) 4141 hdev->acl_cnt = hdev->acl_pkts; 4142 break; 4143 4144 case LE_LINK: 4145 if (hdev->le_pkts) { 4146 hdev->le_cnt += count; 4147 if (hdev->le_cnt > hdev->le_pkts) 4148 hdev->le_cnt = hdev->le_pkts; 4149 } else { 4150 hdev->acl_cnt += count; 4151 if (hdev->acl_cnt > hdev->acl_pkts) 4152 hdev->acl_cnt = hdev->acl_pkts; 4153 } 4154 break; 4155 4156 case SCO_LINK: 4157 hdev->sco_cnt += count; 4158 if (hdev->sco_cnt > hdev->sco_pkts) 4159 hdev->sco_cnt = hdev->sco_pkts; 4160 break; 4161 4162 default: 4163 bt_dev_err(hdev, "unknown type %d conn %p", 4164 conn->type, conn); 4165 break; 4166 } 4167 } 4168 4169 queue_work(hdev->workqueue, &hdev->tx_work); 4170 } 4171 4172 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 4173 __u16 handle) 4174 { 4175 struct hci_chan *chan; 4176 4177 switch (hdev->dev_type) { 4178 case HCI_PRIMARY: 4179 return hci_conn_hash_lookup_handle(hdev, handle); 4180 case HCI_AMP: 4181 chan = hci_chan_lookup_handle(hdev, handle); 4182 if (chan) 4183 return chan->conn; 4184 break; 4185 default: 4186 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4187 break; 4188 } 4189 4190 return NULL; 4191 } 4192 4193 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, 4194 struct sk_buff *skb) 4195 { 4196 struct hci_ev_num_comp_blocks *ev = data; 4197 int i; 4198 4199 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, 4200 flex_array_size(ev, handles, ev->num_hndl))) 4201 return; 4202 4203 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 4204 bt_dev_err(hdev, "wrong event for mode %d", 4205 hdev->flow_ctl_mode); 4206 return; 4207 } 4208 4209 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, 4210 ev->num_hndl); 4211 4212 for (i = 0; i < ev->num_hndl; i++) { 4213 struct hci_comp_blocks_info *info = &ev->handles[i]; 4214 struct hci_conn *conn = NULL; 4215 __u16 handle, block_count; 4216 4217 handle = __le16_to_cpu(info->handle); 4218 block_count = __le16_to_cpu(info->blocks); 4219 4220 conn = __hci_conn_lookup_handle(hdev, handle); 4221 if (!conn) 4222 continue; 4223 4224 conn->sent -= block_count; 4225 4226 switch (conn->type) { 4227 case ACL_LINK: 4228 case AMP_LINK: 4229 hdev->block_cnt += block_count; 4230 if (hdev->block_cnt > hdev->num_blocks) 4231 hdev->block_cnt = hdev->num_blocks; 4232 break; 4233 4234 default: 4235 bt_dev_err(hdev, "unknown type %d conn %p", 4236 conn->type, conn); 4237 break; 4238 } 4239 } 4240 4241 queue_work(hdev->workqueue, &hdev->tx_work); 4242 } 4243 4244 static void hci_mode_change_evt(struct hci_dev *hdev, void *data, 4245 struct sk_buff *skb) 4246 { 4247 struct hci_ev_mode_change *ev = data; 4248 struct hci_conn *conn; 4249 4250 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4251 4252 hci_dev_lock(hdev); 4253 4254 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4255 if (conn) { 4256 conn->mode = ev->mode; 4257 4258 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4259 &conn->flags)) { 4260 if (conn->mode == HCI_CM_ACTIVE) 4261 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4262 else 4263 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4264 } 4265 4266 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4267 hci_sco_setup(conn, ev->status); 4268 } 4269 4270 hci_dev_unlock(hdev); 4271 } 4272 4273 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, 4274 struct sk_buff *skb) 4275 { 4276 struct hci_ev_pin_code_req *ev = data; 4277 struct hci_conn *conn; 4278 4279 bt_dev_dbg(hdev, ""); 4280 4281 hci_dev_lock(hdev); 4282 4283 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4284 if (!conn) 4285 goto unlock; 4286 4287 if (conn->state == BT_CONNECTED) { 4288 hci_conn_hold(conn); 4289 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4290 hci_conn_drop(conn); 4291 } 4292 4293 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4294 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4295 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4296 sizeof(ev->bdaddr), &ev->bdaddr); 4297 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4298 u8 secure; 4299 4300 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4301 secure = 1; 4302 else 4303 secure = 0; 4304 4305 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4306 } 4307 4308 unlock: 4309 hci_dev_unlock(hdev); 4310 } 4311 4312 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4313 { 4314 if (key_type == HCI_LK_CHANGED_COMBINATION) 4315 return; 4316 4317 conn->pin_length = pin_len; 4318 conn->key_type = key_type; 4319 4320 switch (key_type) { 4321 case HCI_LK_LOCAL_UNIT: 4322 case HCI_LK_REMOTE_UNIT: 4323 case HCI_LK_DEBUG_COMBINATION: 4324 return; 4325 case HCI_LK_COMBINATION: 4326 if (pin_len == 16) 4327 conn->pending_sec_level = BT_SECURITY_HIGH; 4328 else 4329 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4330 break; 4331 case HCI_LK_UNAUTH_COMBINATION_P192: 4332 case HCI_LK_UNAUTH_COMBINATION_P256: 4333 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4334 break; 4335 case HCI_LK_AUTH_COMBINATION_P192: 4336 conn->pending_sec_level = BT_SECURITY_HIGH; 4337 break; 4338 case HCI_LK_AUTH_COMBINATION_P256: 4339 conn->pending_sec_level = BT_SECURITY_FIPS; 4340 break; 4341 } 4342 } 4343 4344 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, 4345 struct sk_buff *skb) 4346 { 4347 struct hci_ev_link_key_req *ev = data; 4348 struct hci_cp_link_key_reply cp; 4349 struct hci_conn *conn; 4350 struct link_key *key; 4351 4352 bt_dev_dbg(hdev, ""); 4353 4354 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4355 return; 4356 4357 hci_dev_lock(hdev); 4358 4359 key = hci_find_link_key(hdev, &ev->bdaddr); 4360 if (!key) { 4361 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); 4362 goto not_found; 4363 } 4364 4365 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); 4366 4367 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4368 if (conn) { 4369 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4370 4371 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4372 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4373 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4374 bt_dev_dbg(hdev, "ignoring unauthenticated key"); 4375 goto not_found; 4376 } 4377 4378 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4379 (conn->pending_sec_level == BT_SECURITY_HIGH || 4380 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4381 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); 4382 goto not_found; 4383 } 4384 4385 conn_set_key(conn, key->type, key->pin_len); 4386 } 4387 4388 bacpy(&cp.bdaddr, &ev->bdaddr); 4389 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4390 4391 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4392 4393 hci_dev_unlock(hdev); 4394 4395 return; 4396 4397 not_found: 4398 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4399 hci_dev_unlock(hdev); 4400 } 4401 4402 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, 4403 struct sk_buff *skb) 4404 { 4405 struct hci_ev_link_key_notify *ev = data; 4406 struct hci_conn *conn; 4407 struct link_key *key; 4408 bool persistent; 4409 u8 pin_len = 0; 4410 4411 bt_dev_dbg(hdev, ""); 4412 4413 hci_dev_lock(hdev); 4414 4415 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4416 if (!conn) 4417 goto unlock; 4418 4419 hci_conn_hold(conn); 4420 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4421 hci_conn_drop(conn); 4422 4423 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4424 conn_set_key(conn, ev->key_type, conn->pin_length); 4425 4426 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4427 goto unlock; 4428 4429 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4430 ev->key_type, pin_len, &persistent); 4431 if (!key) 4432 goto unlock; 4433 4434 /* Update connection information since adding the key will have 4435 * fixed up the type in the case of changed combination keys. 4436 */ 4437 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4438 conn_set_key(conn, key->type, key->pin_len); 4439 4440 mgmt_new_link_key(hdev, key, persistent); 4441 4442 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4443 * is set. If it's not set simply remove the key from the kernel 4444 * list (we've still notified user space about it but with 4445 * store_hint being 0). 4446 */ 4447 if (key->type == HCI_LK_DEBUG_COMBINATION && 4448 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4449 list_del_rcu(&key->list); 4450 kfree_rcu(key, rcu); 4451 goto unlock; 4452 } 4453 4454 if (persistent) 4455 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4456 else 4457 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4458 4459 unlock: 4460 hci_dev_unlock(hdev); 4461 } 4462 4463 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, 4464 struct sk_buff *skb) 4465 { 4466 struct hci_ev_clock_offset *ev = data; 4467 struct hci_conn *conn; 4468 4469 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4470 4471 hci_dev_lock(hdev); 4472 4473 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4474 if (conn && !ev->status) { 4475 struct inquiry_entry *ie; 4476 4477 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4478 if (ie) { 4479 ie->data.clock_offset = ev->clock_offset; 4480 ie->timestamp = jiffies; 4481 } 4482 } 4483 4484 hci_dev_unlock(hdev); 4485 } 4486 4487 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, 4488 struct sk_buff *skb) 4489 { 4490 struct hci_ev_pkt_type_change *ev = data; 4491 struct hci_conn *conn; 4492 4493 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4494 4495 hci_dev_lock(hdev); 4496 4497 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4498 if (conn && !ev->status) 4499 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4500 4501 hci_dev_unlock(hdev); 4502 } 4503 4504 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, 4505 struct sk_buff *skb) 4506 { 4507 struct hci_ev_pscan_rep_mode *ev = data; 4508 struct inquiry_entry *ie; 4509 4510 bt_dev_dbg(hdev, ""); 4511 4512 hci_dev_lock(hdev); 4513 4514 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4515 if (ie) { 4516 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4517 ie->timestamp = jiffies; 4518 } 4519 4520 hci_dev_unlock(hdev); 4521 } 4522 4523 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, 4524 struct sk_buff *skb) 4525 { 4526 struct hci_ev_inquiry_result_rssi *ev = edata; 4527 struct inquiry_data data; 4528 int i; 4529 4530 bt_dev_dbg(hdev, "num_rsp %d", ev->num); 4531 4532 if (!ev->num) 4533 return; 4534 4535 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4536 return; 4537 4538 hci_dev_lock(hdev); 4539 4540 if (skb->len == array_size(ev->num, 4541 sizeof(struct inquiry_info_rssi_pscan))) { 4542 struct inquiry_info_rssi_pscan *info; 4543 4544 for (i = 0; i < ev->num; i++) { 4545 u32 flags; 4546 4547 info = hci_ev_skb_pull(hdev, skb, 4548 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4549 sizeof(*info)); 4550 if (!info) { 4551 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4552 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4553 goto unlock; 4554 } 4555 4556 bacpy(&data.bdaddr, &info->bdaddr); 4557 data.pscan_rep_mode = info->pscan_rep_mode; 4558 data.pscan_period_mode = info->pscan_period_mode; 4559 data.pscan_mode = info->pscan_mode; 4560 memcpy(data.dev_class, info->dev_class, 3); 4561 data.clock_offset = info->clock_offset; 4562 data.rssi = info->rssi; 4563 data.ssp_mode = 0x00; 4564 4565 flags = hci_inquiry_cache_update(hdev, &data, false); 4566 4567 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4568 info->dev_class, info->rssi, 4569 flags, NULL, 0, NULL, 0); 4570 } 4571 } else if (skb->len == array_size(ev->num, 4572 sizeof(struct inquiry_info_rssi))) { 4573 struct inquiry_info_rssi *info; 4574 4575 for (i = 0; i < ev->num; i++) { 4576 u32 flags; 4577 4578 info = hci_ev_skb_pull(hdev, skb, 4579 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4580 sizeof(*info)); 4581 if (!info) { 4582 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4583 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4584 goto unlock; 4585 } 4586 4587 bacpy(&data.bdaddr, &info->bdaddr); 4588 data.pscan_rep_mode = info->pscan_rep_mode; 4589 data.pscan_period_mode = info->pscan_period_mode; 4590 data.pscan_mode = 0x00; 4591 memcpy(data.dev_class, info->dev_class, 3); 4592 data.clock_offset = info->clock_offset; 4593 data.rssi = info->rssi; 4594 data.ssp_mode = 0x00; 4595 4596 flags = hci_inquiry_cache_update(hdev, &data, false); 4597 4598 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4599 info->dev_class, info->rssi, 4600 flags, NULL, 0, NULL, 0); 4601 } 4602 } else { 4603 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4604 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4605 } 4606 unlock: 4607 hci_dev_unlock(hdev); 4608 } 4609 4610 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, 4611 struct sk_buff *skb) 4612 { 4613 struct hci_ev_remote_ext_features *ev = data; 4614 struct hci_conn *conn; 4615 4616 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4617 4618 hci_dev_lock(hdev); 4619 4620 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4621 if (!conn) 4622 goto unlock; 4623 4624 if (ev->page < HCI_MAX_PAGES) 4625 memcpy(conn->features[ev->page], ev->features, 8); 4626 4627 if (!ev->status && ev->page == 0x01) { 4628 struct inquiry_entry *ie; 4629 4630 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4631 if (ie) 4632 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4633 4634 if (ev->features[0] & LMP_HOST_SSP) { 4635 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4636 } else { 4637 /* It is mandatory by the Bluetooth specification that 4638 * Extended Inquiry Results are only used when Secure 4639 * Simple Pairing is enabled, but some devices violate 4640 * this. 4641 * 4642 * To make these devices work, the internal SSP 4643 * enabled flag needs to be cleared if the remote host 4644 * features do not indicate SSP support */ 4645 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4646 } 4647 4648 if (ev->features[0] & LMP_HOST_SC) 4649 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4650 } 4651 4652 if (conn->state != BT_CONFIG) 4653 goto unlock; 4654 4655 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4656 struct hci_cp_remote_name_req cp; 4657 memset(&cp, 0, sizeof(cp)); 4658 bacpy(&cp.bdaddr, &conn->dst); 4659 cp.pscan_rep_mode = 0x02; 4660 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4661 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4662 mgmt_device_connected(hdev, conn, NULL, 0); 4663 4664 if (!hci_outgoing_auth_needed(hdev, conn)) { 4665 conn->state = BT_CONNECTED; 4666 hci_connect_cfm(conn, ev->status); 4667 hci_conn_drop(conn); 4668 } 4669 4670 unlock: 4671 hci_dev_unlock(hdev); 4672 } 4673 4674 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, 4675 struct sk_buff *skb) 4676 { 4677 struct hci_ev_sync_conn_complete *ev = data; 4678 struct hci_conn *conn; 4679 4680 switch (ev->link_type) { 4681 case SCO_LINK: 4682 case ESCO_LINK: 4683 break; 4684 default: 4685 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type 4686 * for HCI_Synchronous_Connection_Complete is limited to 4687 * either SCO or eSCO 4688 */ 4689 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); 4690 return; 4691 } 4692 4693 if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) { 4694 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete for invalid handle"); 4695 return; 4696 } 4697 4698 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4699 4700 hci_dev_lock(hdev); 4701 4702 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4703 if (!conn) { 4704 if (ev->link_type == ESCO_LINK) 4705 goto unlock; 4706 4707 /* When the link type in the event indicates SCO connection 4708 * and lookup of the connection object fails, then check 4709 * if an eSCO connection object exists. 4710 * 4711 * The core limits the synchronous connections to either 4712 * SCO or eSCO. The eSCO connection is preferred and tried 4713 * to be setup first and until successfully established, 4714 * the link type will be hinted as eSCO. 4715 */ 4716 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4717 if (!conn) 4718 goto unlock; 4719 } 4720 4721 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. 4722 * Processing it more than once per connection can corrupt kernel memory. 4723 * 4724 * As the connection handle is set here for the first time, it indicates 4725 * whether the connection is already set up. 4726 */ 4727 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 4728 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); 4729 goto unlock; 4730 } 4731 4732 switch (ev->status) { 4733 case 0x00: 4734 conn->handle = __le16_to_cpu(ev->handle); 4735 conn->state = BT_CONNECTED; 4736 conn->type = ev->link_type; 4737 4738 hci_debugfs_create_conn(conn); 4739 hci_conn_add_sysfs(conn); 4740 break; 4741 4742 case 0x10: /* Connection Accept Timeout */ 4743 case 0x0d: /* Connection Rejected due to Limited Resources */ 4744 case 0x11: /* Unsupported Feature or Parameter Value */ 4745 case 0x1c: /* SCO interval rejected */ 4746 case 0x1a: /* Unsupported Remote Feature */ 4747 case 0x1e: /* Invalid LMP Parameters */ 4748 case 0x1f: /* Unspecified error */ 4749 case 0x20: /* Unsupported LMP Parameter value */ 4750 if (conn->out) { 4751 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 4752 (hdev->esco_type & EDR_ESCO_MASK); 4753 if (hci_setup_sync(conn, conn->link->handle)) 4754 goto unlock; 4755 } 4756 fallthrough; 4757 4758 default: 4759 conn->state = BT_CLOSED; 4760 break; 4761 } 4762 4763 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 4764 /* Notify only in case of SCO over HCI transport data path which 4765 * is zero and non-zero value shall be non-HCI transport data path 4766 */ 4767 if (conn->codec.data_path == 0 && hdev->notify) { 4768 switch (ev->air_mode) { 4769 case 0x02: 4770 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 4771 break; 4772 case 0x03: 4773 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 4774 break; 4775 } 4776 } 4777 4778 hci_connect_cfm(conn, ev->status); 4779 if (ev->status) 4780 hci_conn_del(conn); 4781 4782 unlock: 4783 hci_dev_unlock(hdev); 4784 } 4785 4786 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 4787 { 4788 size_t parsed = 0; 4789 4790 while (parsed < eir_len) { 4791 u8 field_len = eir[0]; 4792 4793 if (field_len == 0) 4794 return parsed; 4795 4796 parsed += field_len + 1; 4797 eir += field_len + 1; 4798 } 4799 4800 return eir_len; 4801 } 4802 4803 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, 4804 struct sk_buff *skb) 4805 { 4806 struct hci_ev_ext_inquiry_result *ev = edata; 4807 struct inquiry_data data; 4808 size_t eir_len; 4809 int i; 4810 4811 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, 4812 flex_array_size(ev, info, ev->num))) 4813 return; 4814 4815 bt_dev_dbg(hdev, "num %d", ev->num); 4816 4817 if (!ev->num) 4818 return; 4819 4820 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4821 return; 4822 4823 hci_dev_lock(hdev); 4824 4825 for (i = 0; i < ev->num; i++) { 4826 struct extended_inquiry_info *info = &ev->info[i]; 4827 u32 flags; 4828 bool name_known; 4829 4830 bacpy(&data.bdaddr, &info->bdaddr); 4831 data.pscan_rep_mode = info->pscan_rep_mode; 4832 data.pscan_period_mode = info->pscan_period_mode; 4833 data.pscan_mode = 0x00; 4834 memcpy(data.dev_class, info->dev_class, 3); 4835 data.clock_offset = info->clock_offset; 4836 data.rssi = info->rssi; 4837 data.ssp_mode = 0x01; 4838 4839 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4840 name_known = eir_get_data(info->data, 4841 sizeof(info->data), 4842 EIR_NAME_COMPLETE, NULL); 4843 else 4844 name_known = true; 4845 4846 flags = hci_inquiry_cache_update(hdev, &data, name_known); 4847 4848 eir_len = eir_get_length(info->data, sizeof(info->data)); 4849 4850 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4851 info->dev_class, info->rssi, 4852 flags, info->data, eir_len, NULL, 0); 4853 } 4854 4855 hci_dev_unlock(hdev); 4856 } 4857 4858 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, 4859 struct sk_buff *skb) 4860 { 4861 struct hci_ev_key_refresh_complete *ev = data; 4862 struct hci_conn *conn; 4863 4864 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, 4865 __le16_to_cpu(ev->handle)); 4866 4867 hci_dev_lock(hdev); 4868 4869 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4870 if (!conn) 4871 goto unlock; 4872 4873 /* For BR/EDR the necessary steps are taken through the 4874 * auth_complete event. 4875 */ 4876 if (conn->type != LE_LINK) 4877 goto unlock; 4878 4879 if (!ev->status) 4880 conn->sec_level = conn->pending_sec_level; 4881 4882 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 4883 4884 if (ev->status && conn->state == BT_CONNECTED) { 4885 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 4886 hci_conn_drop(conn); 4887 goto unlock; 4888 } 4889 4890 if (conn->state == BT_CONFIG) { 4891 if (!ev->status) 4892 conn->state = BT_CONNECTED; 4893 4894 hci_connect_cfm(conn, ev->status); 4895 hci_conn_drop(conn); 4896 } else { 4897 hci_auth_cfm(conn, ev->status); 4898 4899 hci_conn_hold(conn); 4900 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4901 hci_conn_drop(conn); 4902 } 4903 4904 unlock: 4905 hci_dev_unlock(hdev); 4906 } 4907 4908 static u8 hci_get_auth_req(struct hci_conn *conn) 4909 { 4910 /* If remote requests no-bonding follow that lead */ 4911 if (conn->remote_auth == HCI_AT_NO_BONDING || 4912 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 4913 return conn->remote_auth | (conn->auth_type & 0x01); 4914 4915 /* If both remote and local have enough IO capabilities, require 4916 * MITM protection 4917 */ 4918 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 4919 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 4920 return conn->remote_auth | 0x01; 4921 4922 /* No MITM protection possible so ignore remote requirement */ 4923 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 4924 } 4925 4926 static u8 bredr_oob_data_present(struct hci_conn *conn) 4927 { 4928 struct hci_dev *hdev = conn->hdev; 4929 struct oob_data *data; 4930 4931 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 4932 if (!data) 4933 return 0x00; 4934 4935 if (bredr_sc_enabled(hdev)) { 4936 /* When Secure Connections is enabled, then just 4937 * return the present value stored with the OOB 4938 * data. The stored value contains the right present 4939 * information. However it can only be trusted when 4940 * not in Secure Connection Only mode. 4941 */ 4942 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 4943 return data->present; 4944 4945 /* When Secure Connections Only mode is enabled, then 4946 * the P-256 values are required. If they are not 4947 * available, then do not declare that OOB data is 4948 * present. 4949 */ 4950 if (!memcmp(data->rand256, ZERO_KEY, 16) || 4951 !memcmp(data->hash256, ZERO_KEY, 16)) 4952 return 0x00; 4953 4954 return 0x02; 4955 } 4956 4957 /* When Secure Connections is not enabled or actually 4958 * not supported by the hardware, then check that if 4959 * P-192 data values are present. 4960 */ 4961 if (!memcmp(data->rand192, ZERO_KEY, 16) || 4962 !memcmp(data->hash192, ZERO_KEY, 16)) 4963 return 0x00; 4964 4965 return 0x01; 4966 } 4967 4968 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, 4969 struct sk_buff *skb) 4970 { 4971 struct hci_ev_io_capa_request *ev = data; 4972 struct hci_conn *conn; 4973 4974 bt_dev_dbg(hdev, ""); 4975 4976 hci_dev_lock(hdev); 4977 4978 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4979 if (!conn) 4980 goto unlock; 4981 4982 hci_conn_hold(conn); 4983 4984 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4985 goto unlock; 4986 4987 /* Allow pairing if we're pairable, the initiators of the 4988 * pairing or if the remote is not requesting bonding. 4989 */ 4990 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 4991 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 4992 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 4993 struct hci_cp_io_capability_reply cp; 4994 4995 bacpy(&cp.bdaddr, &ev->bdaddr); 4996 /* Change the IO capability from KeyboardDisplay 4997 * to DisplayYesNo as it is not supported by BT spec. */ 4998 cp.capability = (conn->io_capability == 0x04) ? 4999 HCI_IO_DISPLAY_YESNO : conn->io_capability; 5000 5001 /* If we are initiators, there is no remote information yet */ 5002 if (conn->remote_auth == 0xff) { 5003 /* Request MITM protection if our IO caps allow it 5004 * except for the no-bonding case. 5005 */ 5006 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5007 conn->auth_type != HCI_AT_NO_BONDING) 5008 conn->auth_type |= 0x01; 5009 } else { 5010 conn->auth_type = hci_get_auth_req(conn); 5011 } 5012 5013 /* If we're not bondable, force one of the non-bondable 5014 * authentication requirement values. 5015 */ 5016 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 5017 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 5018 5019 cp.authentication = conn->auth_type; 5020 cp.oob_data = bredr_oob_data_present(conn); 5021 5022 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 5023 sizeof(cp), &cp); 5024 } else { 5025 struct hci_cp_io_capability_neg_reply cp; 5026 5027 bacpy(&cp.bdaddr, &ev->bdaddr); 5028 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 5029 5030 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 5031 sizeof(cp), &cp); 5032 } 5033 5034 unlock: 5035 hci_dev_unlock(hdev); 5036 } 5037 5038 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, 5039 struct sk_buff *skb) 5040 { 5041 struct hci_ev_io_capa_reply *ev = data; 5042 struct hci_conn *conn; 5043 5044 bt_dev_dbg(hdev, ""); 5045 5046 hci_dev_lock(hdev); 5047 5048 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5049 if (!conn) 5050 goto unlock; 5051 5052 conn->remote_cap = ev->capability; 5053 conn->remote_auth = ev->authentication; 5054 5055 unlock: 5056 hci_dev_unlock(hdev); 5057 } 5058 5059 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, 5060 struct sk_buff *skb) 5061 { 5062 struct hci_ev_user_confirm_req *ev = data; 5063 int loc_mitm, rem_mitm, confirm_hint = 0; 5064 struct hci_conn *conn; 5065 5066 bt_dev_dbg(hdev, ""); 5067 5068 hci_dev_lock(hdev); 5069 5070 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5071 goto unlock; 5072 5073 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5074 if (!conn) 5075 goto unlock; 5076 5077 loc_mitm = (conn->auth_type & 0x01); 5078 rem_mitm = (conn->remote_auth & 0x01); 5079 5080 /* If we require MITM but the remote device can't provide that 5081 * (it has NoInputNoOutput) then reject the confirmation 5082 * request. We check the security level here since it doesn't 5083 * necessarily match conn->auth_type. 5084 */ 5085 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 5086 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 5087 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); 5088 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 5089 sizeof(ev->bdaddr), &ev->bdaddr); 5090 goto unlock; 5091 } 5092 5093 /* If no side requires MITM protection; auto-accept */ 5094 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 5095 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 5096 5097 /* If we're not the initiators request authorization to 5098 * proceed from user space (mgmt_user_confirm with 5099 * confirm_hint set to 1). The exception is if neither 5100 * side had MITM or if the local IO capability is 5101 * NoInputNoOutput, in which case we do auto-accept 5102 */ 5103 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 5104 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5105 (loc_mitm || rem_mitm)) { 5106 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); 5107 confirm_hint = 1; 5108 goto confirm; 5109 } 5110 5111 /* If there already exists link key in local host, leave the 5112 * decision to user space since the remote device could be 5113 * legitimate or malicious. 5114 */ 5115 if (hci_find_link_key(hdev, &ev->bdaddr)) { 5116 bt_dev_dbg(hdev, "Local host already has link key"); 5117 confirm_hint = 1; 5118 goto confirm; 5119 } 5120 5121 BT_DBG("Auto-accept of user confirmation with %ums delay", 5122 hdev->auto_accept_delay); 5123 5124 if (hdev->auto_accept_delay > 0) { 5125 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 5126 queue_delayed_work(conn->hdev->workqueue, 5127 &conn->auto_accept_work, delay); 5128 goto unlock; 5129 } 5130 5131 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 5132 sizeof(ev->bdaddr), &ev->bdaddr); 5133 goto unlock; 5134 } 5135 5136 confirm: 5137 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 5138 le32_to_cpu(ev->passkey), confirm_hint); 5139 5140 unlock: 5141 hci_dev_unlock(hdev); 5142 } 5143 5144 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, 5145 struct sk_buff *skb) 5146 { 5147 struct hci_ev_user_passkey_req *ev = data; 5148 5149 bt_dev_dbg(hdev, ""); 5150 5151 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5152 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 5153 } 5154 5155 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, 5156 struct sk_buff *skb) 5157 { 5158 struct hci_ev_user_passkey_notify *ev = data; 5159 struct hci_conn *conn; 5160 5161 bt_dev_dbg(hdev, ""); 5162 5163 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5164 if (!conn) 5165 return; 5166 5167 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5168 conn->passkey_entered = 0; 5169 5170 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5171 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5172 conn->dst_type, conn->passkey_notify, 5173 conn->passkey_entered); 5174 } 5175 5176 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, 5177 struct sk_buff *skb) 5178 { 5179 struct hci_ev_keypress_notify *ev = data; 5180 struct hci_conn *conn; 5181 5182 bt_dev_dbg(hdev, ""); 5183 5184 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5185 if (!conn) 5186 return; 5187 5188 switch (ev->type) { 5189 case HCI_KEYPRESS_STARTED: 5190 conn->passkey_entered = 0; 5191 return; 5192 5193 case HCI_KEYPRESS_ENTERED: 5194 conn->passkey_entered++; 5195 break; 5196 5197 case HCI_KEYPRESS_ERASED: 5198 conn->passkey_entered--; 5199 break; 5200 5201 case HCI_KEYPRESS_CLEARED: 5202 conn->passkey_entered = 0; 5203 break; 5204 5205 case HCI_KEYPRESS_COMPLETED: 5206 return; 5207 } 5208 5209 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5210 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5211 conn->dst_type, conn->passkey_notify, 5212 conn->passkey_entered); 5213 } 5214 5215 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, 5216 struct sk_buff *skb) 5217 { 5218 struct hci_ev_simple_pair_complete *ev = data; 5219 struct hci_conn *conn; 5220 5221 bt_dev_dbg(hdev, ""); 5222 5223 hci_dev_lock(hdev); 5224 5225 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5226 if (!conn) 5227 goto unlock; 5228 5229 /* Reset the authentication requirement to unknown */ 5230 conn->remote_auth = 0xff; 5231 5232 /* To avoid duplicate auth_failed events to user space we check 5233 * the HCI_CONN_AUTH_PEND flag which will be set if we 5234 * initiated the authentication. A traditional auth_complete 5235 * event gets always produced as initiator and is also mapped to 5236 * the mgmt_auth_failed event */ 5237 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5238 mgmt_auth_failed(conn, ev->status); 5239 5240 hci_conn_drop(conn); 5241 5242 unlock: 5243 hci_dev_unlock(hdev); 5244 } 5245 5246 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, 5247 struct sk_buff *skb) 5248 { 5249 struct hci_ev_remote_host_features *ev = data; 5250 struct inquiry_entry *ie; 5251 struct hci_conn *conn; 5252 5253 bt_dev_dbg(hdev, ""); 5254 5255 hci_dev_lock(hdev); 5256 5257 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5258 if (conn) 5259 memcpy(conn->features[1], ev->features, 8); 5260 5261 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5262 if (ie) 5263 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5264 5265 hci_dev_unlock(hdev); 5266 } 5267 5268 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, 5269 struct sk_buff *skb) 5270 { 5271 struct hci_ev_remote_oob_data_request *ev = edata; 5272 struct oob_data *data; 5273 5274 bt_dev_dbg(hdev, ""); 5275 5276 hci_dev_lock(hdev); 5277 5278 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5279 goto unlock; 5280 5281 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5282 if (!data) { 5283 struct hci_cp_remote_oob_data_neg_reply cp; 5284 5285 bacpy(&cp.bdaddr, &ev->bdaddr); 5286 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5287 sizeof(cp), &cp); 5288 goto unlock; 5289 } 5290 5291 if (bredr_sc_enabled(hdev)) { 5292 struct hci_cp_remote_oob_ext_data_reply cp; 5293 5294 bacpy(&cp.bdaddr, &ev->bdaddr); 5295 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5296 memset(cp.hash192, 0, sizeof(cp.hash192)); 5297 memset(cp.rand192, 0, sizeof(cp.rand192)); 5298 } else { 5299 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5300 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5301 } 5302 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5303 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5304 5305 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5306 sizeof(cp), &cp); 5307 } else { 5308 struct hci_cp_remote_oob_data_reply cp; 5309 5310 bacpy(&cp.bdaddr, &ev->bdaddr); 5311 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5312 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5313 5314 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5315 sizeof(cp), &cp); 5316 } 5317 5318 unlock: 5319 hci_dev_unlock(hdev); 5320 } 5321 5322 #if IS_ENABLED(CONFIG_BT_HS) 5323 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, 5324 struct sk_buff *skb) 5325 { 5326 struct hci_ev_channel_selected *ev = data; 5327 struct hci_conn *hcon; 5328 5329 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); 5330 5331 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5332 if (!hcon) 5333 return; 5334 5335 amp_read_loc_assoc_final_data(hdev, hcon); 5336 } 5337 5338 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, 5339 struct sk_buff *skb) 5340 { 5341 struct hci_ev_phy_link_complete *ev = data; 5342 struct hci_conn *hcon, *bredr_hcon; 5343 5344 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, 5345 ev->status); 5346 5347 hci_dev_lock(hdev); 5348 5349 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5350 if (!hcon) 5351 goto unlock; 5352 5353 if (!hcon->amp_mgr) 5354 goto unlock; 5355 5356 if (ev->status) { 5357 hci_conn_del(hcon); 5358 goto unlock; 5359 } 5360 5361 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 5362 5363 hcon->state = BT_CONNECTED; 5364 bacpy(&hcon->dst, &bredr_hcon->dst); 5365 5366 hci_conn_hold(hcon); 5367 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 5368 hci_conn_drop(hcon); 5369 5370 hci_debugfs_create_conn(hcon); 5371 hci_conn_add_sysfs(hcon); 5372 5373 amp_physical_cfm(bredr_hcon, hcon); 5374 5375 unlock: 5376 hci_dev_unlock(hdev); 5377 } 5378 5379 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, 5380 struct sk_buff *skb) 5381 { 5382 struct hci_ev_logical_link_complete *ev = data; 5383 struct hci_conn *hcon; 5384 struct hci_chan *hchan; 5385 struct amp_mgr *mgr; 5386 5387 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 5388 le16_to_cpu(ev->handle), ev->phy_handle, ev->status); 5389 5390 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5391 if (!hcon) 5392 return; 5393 5394 /* Create AMP hchan */ 5395 hchan = hci_chan_create(hcon); 5396 if (!hchan) 5397 return; 5398 5399 hchan->handle = le16_to_cpu(ev->handle); 5400 hchan->amp = true; 5401 5402 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 5403 5404 mgr = hcon->amp_mgr; 5405 if (mgr && mgr->bredr_chan) { 5406 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 5407 5408 l2cap_chan_lock(bredr_chan); 5409 5410 bredr_chan->conn->mtu = hdev->block_mtu; 5411 l2cap_logical_cfm(bredr_chan, hchan, 0); 5412 hci_conn_hold(hcon); 5413 5414 l2cap_chan_unlock(bredr_chan); 5415 } 5416 } 5417 5418 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, 5419 struct sk_buff *skb) 5420 { 5421 struct hci_ev_disconn_logical_link_complete *ev = data; 5422 struct hci_chan *hchan; 5423 5424 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", 5425 le16_to_cpu(ev->handle), ev->status); 5426 5427 if (ev->status) 5428 return; 5429 5430 hci_dev_lock(hdev); 5431 5432 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5433 if (!hchan || !hchan->amp) 5434 goto unlock; 5435 5436 amp_destroy_logical_link(hchan, ev->reason); 5437 5438 unlock: 5439 hci_dev_unlock(hdev); 5440 } 5441 5442 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, 5443 struct sk_buff *skb) 5444 { 5445 struct hci_ev_disconn_phy_link_complete *ev = data; 5446 struct hci_conn *hcon; 5447 5448 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5449 5450 if (ev->status) 5451 return; 5452 5453 hci_dev_lock(hdev); 5454 5455 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5456 if (hcon && hcon->type == AMP_LINK) { 5457 hcon->state = BT_CLOSED; 5458 hci_disconn_cfm(hcon, ev->reason); 5459 hci_conn_del(hcon); 5460 } 5461 5462 hci_dev_unlock(hdev); 5463 } 5464 #endif 5465 5466 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5467 u8 bdaddr_type, bdaddr_t *local_rpa) 5468 { 5469 if (conn->out) { 5470 conn->dst_type = bdaddr_type; 5471 conn->resp_addr_type = bdaddr_type; 5472 bacpy(&conn->resp_addr, bdaddr); 5473 5474 /* Check if the controller has set a Local RPA then it must be 5475 * used instead or hdev->rpa. 5476 */ 5477 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5478 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5479 bacpy(&conn->init_addr, local_rpa); 5480 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5481 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5482 bacpy(&conn->init_addr, &conn->hdev->rpa); 5483 } else { 5484 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5485 &conn->init_addr_type); 5486 } 5487 } else { 5488 conn->resp_addr_type = conn->hdev->adv_addr_type; 5489 /* Check if the controller has set a Local RPA then it must be 5490 * used instead or hdev->rpa. 5491 */ 5492 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5493 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5494 bacpy(&conn->resp_addr, local_rpa); 5495 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5496 /* In case of ext adv, resp_addr will be updated in 5497 * Adv Terminated event. 5498 */ 5499 if (!ext_adv_capable(conn->hdev)) 5500 bacpy(&conn->resp_addr, 5501 &conn->hdev->random_addr); 5502 } else { 5503 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5504 } 5505 5506 conn->init_addr_type = bdaddr_type; 5507 bacpy(&conn->init_addr, bdaddr); 5508 5509 /* For incoming connections, set the default minimum 5510 * and maximum connection interval. They will be used 5511 * to check if the parameters are in range and if not 5512 * trigger the connection update procedure. 5513 */ 5514 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5515 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5516 } 5517 } 5518 5519 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5520 bdaddr_t *bdaddr, u8 bdaddr_type, 5521 bdaddr_t *local_rpa, u8 role, u16 handle, 5522 u16 interval, u16 latency, 5523 u16 supervision_timeout) 5524 { 5525 struct hci_conn_params *params; 5526 struct hci_conn *conn; 5527 struct smp_irk *irk; 5528 u8 addr_type; 5529 5530 if (handle > HCI_CONN_HANDLE_MAX) { 5531 bt_dev_err(hdev, "Ignoring HCI_LE_Connection_Complete for invalid handle"); 5532 return; 5533 } 5534 5535 hci_dev_lock(hdev); 5536 5537 /* All controllers implicitly stop advertising in the event of a 5538 * connection, so ensure that the state bit is cleared. 5539 */ 5540 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5541 5542 conn = hci_lookup_le_connect(hdev); 5543 if (!conn) { 5544 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5545 if (!conn) { 5546 bt_dev_err(hdev, "no memory for new connection"); 5547 goto unlock; 5548 } 5549 5550 conn->dst_type = bdaddr_type; 5551 5552 /* If we didn't have a hci_conn object previously 5553 * but we're in central role this must be something 5554 * initiated using an accept list. Since accept list based 5555 * connections are not "first class citizens" we don't 5556 * have full tracking of them. Therefore, we go ahead 5557 * with a "best effort" approach of determining the 5558 * initiator address based on the HCI_PRIVACY flag. 5559 */ 5560 if (conn->out) { 5561 conn->resp_addr_type = bdaddr_type; 5562 bacpy(&conn->resp_addr, bdaddr); 5563 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5564 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5565 bacpy(&conn->init_addr, &hdev->rpa); 5566 } else { 5567 hci_copy_identity_address(hdev, 5568 &conn->init_addr, 5569 &conn->init_addr_type); 5570 } 5571 } 5572 } else { 5573 cancel_delayed_work(&conn->le_conn_timeout); 5574 } 5575 5576 /* The HCI_LE_Connection_Complete event is only sent once per connection. 5577 * Processing it more than once per connection can corrupt kernel memory. 5578 * 5579 * As the connection handle is set here for the first time, it indicates 5580 * whether the connection is already set up. 5581 */ 5582 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 5583 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 5584 goto unlock; 5585 } 5586 5587 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5588 5589 /* Lookup the identity address from the stored connection 5590 * address and address type. 5591 * 5592 * When establishing connections to an identity address, the 5593 * connection procedure will store the resolvable random 5594 * address first. Now if it can be converted back into the 5595 * identity address, start using the identity address from 5596 * now on. 5597 */ 5598 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5599 if (irk) { 5600 bacpy(&conn->dst, &irk->bdaddr); 5601 conn->dst_type = irk->addr_type; 5602 } 5603 5604 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5605 5606 if (status) { 5607 hci_le_conn_failed(conn, status); 5608 goto unlock; 5609 } 5610 5611 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5612 addr_type = BDADDR_LE_PUBLIC; 5613 else 5614 addr_type = BDADDR_LE_RANDOM; 5615 5616 /* Drop the connection if the device is blocked */ 5617 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5618 hci_conn_drop(conn); 5619 goto unlock; 5620 } 5621 5622 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5623 mgmt_device_connected(hdev, conn, NULL, 0); 5624 5625 conn->sec_level = BT_SECURITY_LOW; 5626 conn->handle = handle; 5627 conn->state = BT_CONFIG; 5628 5629 /* Store current advertising instance as connection advertising instance 5630 * when sotfware rotation is in use so it can be re-enabled when 5631 * disconnected. 5632 */ 5633 if (!ext_adv_capable(hdev)) 5634 conn->adv_instance = hdev->cur_adv_instance; 5635 5636 conn->le_conn_interval = interval; 5637 conn->le_conn_latency = latency; 5638 conn->le_supv_timeout = supervision_timeout; 5639 5640 hci_debugfs_create_conn(conn); 5641 hci_conn_add_sysfs(conn); 5642 5643 /* The remote features procedure is defined for central 5644 * role only. So only in case of an initiated connection 5645 * request the remote features. 5646 * 5647 * If the local controller supports peripheral-initiated features 5648 * exchange, then requesting the remote features in peripheral 5649 * role is possible. Otherwise just transition into the 5650 * connected state without requesting the remote features. 5651 */ 5652 if (conn->out || 5653 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5654 struct hci_cp_le_read_remote_features cp; 5655 5656 cp.handle = __cpu_to_le16(conn->handle); 5657 5658 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5659 sizeof(cp), &cp); 5660 5661 hci_conn_hold(conn); 5662 } else { 5663 conn->state = BT_CONNECTED; 5664 hci_connect_cfm(conn, status); 5665 } 5666 5667 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5668 conn->dst_type); 5669 if (params) { 5670 list_del_init(¶ms->action); 5671 if (params->conn) { 5672 hci_conn_drop(params->conn); 5673 hci_conn_put(params->conn); 5674 params->conn = NULL; 5675 } 5676 } 5677 5678 unlock: 5679 hci_update_passive_scan(hdev); 5680 hci_dev_unlock(hdev); 5681 } 5682 5683 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, 5684 struct sk_buff *skb) 5685 { 5686 struct hci_ev_le_conn_complete *ev = data; 5687 5688 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5689 5690 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5691 NULL, ev->role, le16_to_cpu(ev->handle), 5692 le16_to_cpu(ev->interval), 5693 le16_to_cpu(ev->latency), 5694 le16_to_cpu(ev->supervision_timeout)); 5695 } 5696 5697 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, 5698 struct sk_buff *skb) 5699 { 5700 struct hci_ev_le_enh_conn_complete *ev = data; 5701 5702 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5703 5704 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5705 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5706 le16_to_cpu(ev->interval), 5707 le16_to_cpu(ev->latency), 5708 le16_to_cpu(ev->supervision_timeout)); 5709 } 5710 5711 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, 5712 struct sk_buff *skb) 5713 { 5714 struct hci_evt_le_ext_adv_set_term *ev = data; 5715 struct hci_conn *conn; 5716 struct adv_info *adv, *n; 5717 5718 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5719 5720 /* The Bluetooth Core 5.3 specification clearly states that this event 5721 * shall not be sent when the Host disables the advertising set. So in 5722 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 5723 * 5724 * When the Host disables an advertising set, all cleanup is done via 5725 * its command callback and not needed to be duplicated here. 5726 */ 5727 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 5728 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 5729 return; 5730 } 5731 5732 hci_dev_lock(hdev); 5733 5734 adv = hci_find_adv_instance(hdev, ev->handle); 5735 5736 if (ev->status) { 5737 if (!adv) 5738 goto unlock; 5739 5740 /* Remove advertising as it has been terminated */ 5741 hci_remove_adv_instance(hdev, ev->handle); 5742 mgmt_advertising_removed(NULL, hdev, ev->handle); 5743 5744 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 5745 if (adv->enabled) 5746 goto unlock; 5747 } 5748 5749 /* We are no longer advertising, clear HCI_LE_ADV */ 5750 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5751 goto unlock; 5752 } 5753 5754 if (adv) 5755 adv->enabled = false; 5756 5757 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 5758 if (conn) { 5759 /* Store handle in the connection so the correct advertising 5760 * instance can be re-enabled when disconnected. 5761 */ 5762 conn->adv_instance = ev->handle; 5763 5764 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 5765 bacmp(&conn->resp_addr, BDADDR_ANY)) 5766 goto unlock; 5767 5768 if (!ev->handle) { 5769 bacpy(&conn->resp_addr, &hdev->random_addr); 5770 goto unlock; 5771 } 5772 5773 if (adv) 5774 bacpy(&conn->resp_addr, &adv->random_addr); 5775 } 5776 5777 unlock: 5778 hci_dev_unlock(hdev); 5779 } 5780 5781 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, 5782 struct sk_buff *skb) 5783 { 5784 struct hci_ev_le_conn_update_complete *ev = data; 5785 struct hci_conn *conn; 5786 5787 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5788 5789 if (ev->status) 5790 return; 5791 5792 hci_dev_lock(hdev); 5793 5794 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5795 if (conn) { 5796 conn->le_conn_interval = le16_to_cpu(ev->interval); 5797 conn->le_conn_latency = le16_to_cpu(ev->latency); 5798 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 5799 } 5800 5801 hci_dev_unlock(hdev); 5802 } 5803 5804 /* This function requires the caller holds hdev->lock */ 5805 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5806 bdaddr_t *addr, 5807 u8 addr_type, bool addr_resolved, 5808 u8 adv_type) 5809 { 5810 struct hci_conn *conn; 5811 struct hci_conn_params *params; 5812 5813 /* If the event is not connectable don't proceed further */ 5814 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 5815 return NULL; 5816 5817 /* Ignore if the device is blocked or hdev is suspended */ 5818 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 5819 hdev->suspended) 5820 return NULL; 5821 5822 /* Most controller will fail if we try to create new connections 5823 * while we have an existing one in peripheral role. 5824 */ 5825 if (hdev->conn_hash.le_num_peripheral > 0 && 5826 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 5827 !(hdev->le_states[3] & 0x10))) 5828 return NULL; 5829 5830 /* If we're not connectable only connect devices that we have in 5831 * our pend_le_conns list. 5832 */ 5833 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 5834 addr_type); 5835 if (!params) 5836 return NULL; 5837 5838 if (!params->explicit_connect) { 5839 switch (params->auto_connect) { 5840 case HCI_AUTO_CONN_DIRECT: 5841 /* Only devices advertising with ADV_DIRECT_IND are 5842 * triggering a connection attempt. This is allowing 5843 * incoming connections from peripheral devices. 5844 */ 5845 if (adv_type != LE_ADV_DIRECT_IND) 5846 return NULL; 5847 break; 5848 case HCI_AUTO_CONN_ALWAYS: 5849 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 5850 * are triggering a connection attempt. This means 5851 * that incoming connections from peripheral device are 5852 * accepted and also outgoing connections to peripheral 5853 * devices are established when found. 5854 */ 5855 break; 5856 default: 5857 return NULL; 5858 } 5859 } 5860 5861 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 5862 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 5863 HCI_ROLE_MASTER); 5864 if (!IS_ERR(conn)) { 5865 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5866 * by higher layer that tried to connect, if no then 5867 * store the pointer since we don't really have any 5868 * other owner of the object besides the params that 5869 * triggered it. This way we can abort the connection if 5870 * the parameters get removed and keep the reference 5871 * count consistent once the connection is established. 5872 */ 5873 5874 if (!params->explicit_connect) 5875 params->conn = hci_conn_get(conn); 5876 5877 return conn; 5878 } 5879 5880 switch (PTR_ERR(conn)) { 5881 case -EBUSY: 5882 /* If hci_connect() returns -EBUSY it means there is already 5883 * an LE connection attempt going on. Since controllers don't 5884 * support more than one connection attempt at the time, we 5885 * don't consider this an error case. 5886 */ 5887 break; 5888 default: 5889 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 5890 return NULL; 5891 } 5892 5893 return NULL; 5894 } 5895 5896 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 5897 u8 bdaddr_type, bdaddr_t *direct_addr, 5898 u8 direct_addr_type, s8 rssi, u8 *data, u8 len, 5899 bool ext_adv) 5900 { 5901 struct discovery_state *d = &hdev->discovery; 5902 struct smp_irk *irk; 5903 struct hci_conn *conn; 5904 bool match, bdaddr_resolved; 5905 u32 flags; 5906 u8 *ptr; 5907 5908 switch (type) { 5909 case LE_ADV_IND: 5910 case LE_ADV_DIRECT_IND: 5911 case LE_ADV_SCAN_IND: 5912 case LE_ADV_NONCONN_IND: 5913 case LE_ADV_SCAN_RSP: 5914 break; 5915 default: 5916 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 5917 "type: 0x%02x", type); 5918 return; 5919 } 5920 5921 if (!ext_adv && len > HCI_MAX_AD_LENGTH) { 5922 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); 5923 return; 5924 } 5925 5926 /* Find the end of the data in case the report contains padded zero 5927 * bytes at the end causing an invalid length value. 5928 * 5929 * When data is NULL, len is 0 so there is no need for extra ptr 5930 * check as 'ptr < data + 0' is already false in such case. 5931 */ 5932 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 5933 if (ptr + 1 + *ptr > data + len) 5934 break; 5935 } 5936 5937 /* Adjust for actual length. This handles the case when remote 5938 * device is advertising with incorrect data length. 5939 */ 5940 len = ptr - data; 5941 5942 /* If the direct address is present, then this report is from 5943 * a LE Direct Advertising Report event. In that case it is 5944 * important to see if the address is matching the local 5945 * controller address. 5946 */ 5947 if (direct_addr) { 5948 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 5949 &bdaddr_resolved); 5950 5951 /* Only resolvable random addresses are valid for these 5952 * kind of reports and others can be ignored. 5953 */ 5954 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 5955 return; 5956 5957 /* If the controller is not using resolvable random 5958 * addresses, then this report can be ignored. 5959 */ 5960 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 5961 return; 5962 5963 /* If the local IRK of the controller does not match 5964 * with the resolvable random address provided, then 5965 * this report can be ignored. 5966 */ 5967 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 5968 return; 5969 } 5970 5971 /* Check if we need to convert to identity address */ 5972 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 5973 if (irk) { 5974 bdaddr = &irk->bdaddr; 5975 bdaddr_type = irk->addr_type; 5976 } 5977 5978 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 5979 5980 /* Check if we have been requested to connect to this device. 5981 * 5982 * direct_addr is set only for directed advertising reports (it is NULL 5983 * for advertising reports) and is already verified to be RPA above. 5984 */ 5985 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 5986 type); 5987 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 5988 /* Store report for later inclusion by 5989 * mgmt_device_connected 5990 */ 5991 memcpy(conn->le_adv_data, data, len); 5992 conn->le_adv_data_len = len; 5993 } 5994 5995 /* Passive scanning shouldn't trigger any device found events, 5996 * except for devices marked as CONN_REPORT for which we do send 5997 * device found events, or advertisement monitoring requested. 5998 */ 5999 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 6000 if (type == LE_ADV_DIRECT_IND) 6001 return; 6002 6003 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 6004 bdaddr, bdaddr_type) && 6005 idr_is_empty(&hdev->adv_monitors_idr)) 6006 return; 6007 6008 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 6009 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6010 else 6011 flags = 0; 6012 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6013 rssi, flags, data, len, NULL, 0); 6014 return; 6015 } 6016 6017 /* When receiving non-connectable or scannable undirected 6018 * advertising reports, this means that the remote device is 6019 * not connectable and then clearly indicate this in the 6020 * device found event. 6021 * 6022 * When receiving a scan response, then there is no way to 6023 * know if the remote device is connectable or not. However 6024 * since scan responses are merged with a previously seen 6025 * advertising report, the flags field from that report 6026 * will be used. 6027 * 6028 * In the really unlikely case that a controller get confused 6029 * and just sends a scan response event, then it is marked as 6030 * not connectable as well. 6031 */ 6032 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 6033 type == LE_ADV_SCAN_RSP) 6034 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6035 else 6036 flags = 0; 6037 6038 /* If there's nothing pending either store the data from this 6039 * event or send an immediate device found event if the data 6040 * should not be stored for later. 6041 */ 6042 if (!ext_adv && !has_pending_adv_report(hdev)) { 6043 /* If the report will trigger a SCAN_REQ store it for 6044 * later merging. 6045 */ 6046 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 6047 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6048 rssi, flags, data, len); 6049 return; 6050 } 6051 6052 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6053 rssi, flags, data, len, NULL, 0); 6054 return; 6055 } 6056 6057 /* Check if the pending report is for the same device as the new one */ 6058 match = (!bacmp(bdaddr, &d->last_adv_addr) && 6059 bdaddr_type == d->last_adv_addr_type); 6060 6061 /* If the pending data doesn't match this report or this isn't a 6062 * scan response (e.g. we got a duplicate ADV_IND) then force 6063 * sending of the pending data. 6064 */ 6065 if (type != LE_ADV_SCAN_RSP || !match) { 6066 /* Send out whatever is in the cache, but skip duplicates */ 6067 if (!match) 6068 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6069 d->last_adv_addr_type, NULL, 6070 d->last_adv_rssi, d->last_adv_flags, 6071 d->last_adv_data, 6072 d->last_adv_data_len, NULL, 0); 6073 6074 /* If the new report will trigger a SCAN_REQ store it for 6075 * later merging. 6076 */ 6077 if (!ext_adv && (type == LE_ADV_IND || 6078 type == LE_ADV_SCAN_IND)) { 6079 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6080 rssi, flags, data, len); 6081 return; 6082 } 6083 6084 /* The advertising reports cannot be merged, so clear 6085 * the pending report and send out a device found event. 6086 */ 6087 clear_pending_adv_report(hdev); 6088 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6089 rssi, flags, data, len, NULL, 0); 6090 return; 6091 } 6092 6093 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 6094 * the new event is a SCAN_RSP. We can therefore proceed with 6095 * sending a merged device found event. 6096 */ 6097 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6098 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 6099 d->last_adv_data, d->last_adv_data_len, data, len); 6100 clear_pending_adv_report(hdev); 6101 } 6102 6103 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, 6104 struct sk_buff *skb) 6105 { 6106 struct hci_ev_le_advertising_report *ev = data; 6107 6108 if (!ev->num) 6109 return; 6110 6111 hci_dev_lock(hdev); 6112 6113 while (ev->num--) { 6114 struct hci_ev_le_advertising_info *info; 6115 s8 rssi; 6116 6117 info = hci_le_ev_skb_pull(hdev, skb, 6118 HCI_EV_LE_ADVERTISING_REPORT, 6119 sizeof(*info)); 6120 if (!info) 6121 break; 6122 6123 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, 6124 info->length + 1)) 6125 break; 6126 6127 if (info->length <= HCI_MAX_AD_LENGTH) { 6128 rssi = info->data[info->length]; 6129 process_adv_report(hdev, info->type, &info->bdaddr, 6130 info->bdaddr_type, NULL, 0, rssi, 6131 info->data, info->length, false); 6132 } else { 6133 bt_dev_err(hdev, "Dropping invalid advertising data"); 6134 } 6135 } 6136 6137 hci_dev_unlock(hdev); 6138 } 6139 6140 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 6141 { 6142 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 6143 switch (evt_type) { 6144 case LE_LEGACY_ADV_IND: 6145 return LE_ADV_IND; 6146 case LE_LEGACY_ADV_DIRECT_IND: 6147 return LE_ADV_DIRECT_IND; 6148 case LE_LEGACY_ADV_SCAN_IND: 6149 return LE_ADV_SCAN_IND; 6150 case LE_LEGACY_NONCONN_IND: 6151 return LE_ADV_NONCONN_IND; 6152 case LE_LEGACY_SCAN_RSP_ADV: 6153 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 6154 return LE_ADV_SCAN_RSP; 6155 } 6156 6157 goto invalid; 6158 } 6159 6160 if (evt_type & LE_EXT_ADV_CONN_IND) { 6161 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6162 return LE_ADV_DIRECT_IND; 6163 6164 return LE_ADV_IND; 6165 } 6166 6167 if (evt_type & LE_EXT_ADV_SCAN_RSP) 6168 return LE_ADV_SCAN_RSP; 6169 6170 if (evt_type & LE_EXT_ADV_SCAN_IND) 6171 return LE_ADV_SCAN_IND; 6172 6173 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 6174 evt_type & LE_EXT_ADV_DIRECT_IND) 6175 return LE_ADV_NONCONN_IND; 6176 6177 invalid: 6178 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 6179 evt_type); 6180 6181 return LE_ADV_INVALID; 6182 } 6183 6184 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, 6185 struct sk_buff *skb) 6186 { 6187 struct hci_ev_le_ext_adv_report *ev = data; 6188 6189 if (!ev->num) 6190 return; 6191 6192 hci_dev_lock(hdev); 6193 6194 while (ev->num--) { 6195 struct hci_ev_le_ext_adv_info *info; 6196 u8 legacy_evt_type; 6197 u16 evt_type; 6198 6199 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6200 sizeof(*info)); 6201 if (!info) 6202 break; 6203 6204 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6205 info->length)) 6206 break; 6207 6208 evt_type = __le16_to_cpu(info->type); 6209 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6210 if (legacy_evt_type != LE_ADV_INVALID) { 6211 process_adv_report(hdev, legacy_evt_type, &info->bdaddr, 6212 info->bdaddr_type, NULL, 0, 6213 info->rssi, info->data, info->length, 6214 !(evt_type & LE_EXT_ADV_LEGACY_PDU)); 6215 } 6216 } 6217 6218 hci_dev_unlock(hdev); 6219 } 6220 6221 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, 6222 struct sk_buff *skb) 6223 { 6224 struct hci_ev_le_remote_feat_complete *ev = data; 6225 struct hci_conn *conn; 6226 6227 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6228 6229 hci_dev_lock(hdev); 6230 6231 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6232 if (conn) { 6233 if (!ev->status) 6234 memcpy(conn->features[0], ev->features, 8); 6235 6236 if (conn->state == BT_CONFIG) { 6237 __u8 status; 6238 6239 /* If the local controller supports peripheral-initiated 6240 * features exchange, but the remote controller does 6241 * not, then it is possible that the error code 0x1a 6242 * for unsupported remote feature gets returned. 6243 * 6244 * In this specific case, allow the connection to 6245 * transition into connected state and mark it as 6246 * successful. 6247 */ 6248 if (!conn->out && ev->status == 0x1a && 6249 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6250 status = 0x00; 6251 else 6252 status = ev->status; 6253 6254 conn->state = BT_CONNECTED; 6255 hci_connect_cfm(conn, status); 6256 hci_conn_drop(conn); 6257 } 6258 } 6259 6260 hci_dev_unlock(hdev); 6261 } 6262 6263 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, 6264 struct sk_buff *skb) 6265 { 6266 struct hci_ev_le_ltk_req *ev = data; 6267 struct hci_cp_le_ltk_reply cp; 6268 struct hci_cp_le_ltk_neg_reply neg; 6269 struct hci_conn *conn; 6270 struct smp_ltk *ltk; 6271 6272 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6273 6274 hci_dev_lock(hdev); 6275 6276 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6277 if (conn == NULL) 6278 goto not_found; 6279 6280 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6281 if (!ltk) 6282 goto not_found; 6283 6284 if (smp_ltk_is_sc(ltk)) { 6285 /* With SC both EDiv and Rand are set to zero */ 6286 if (ev->ediv || ev->rand) 6287 goto not_found; 6288 } else { 6289 /* For non-SC keys check that EDiv and Rand match */ 6290 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6291 goto not_found; 6292 } 6293 6294 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6295 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6296 cp.handle = cpu_to_le16(conn->handle); 6297 6298 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6299 6300 conn->enc_key_size = ltk->enc_size; 6301 6302 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6303 6304 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6305 * temporary key used to encrypt a connection following 6306 * pairing. It is used during the Encrypted Session Setup to 6307 * distribute the keys. Later, security can be re-established 6308 * using a distributed LTK. 6309 */ 6310 if (ltk->type == SMP_STK) { 6311 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6312 list_del_rcu(<k->list); 6313 kfree_rcu(ltk, rcu); 6314 } else { 6315 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6316 } 6317 6318 hci_dev_unlock(hdev); 6319 6320 return; 6321 6322 not_found: 6323 neg.handle = ev->handle; 6324 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6325 hci_dev_unlock(hdev); 6326 } 6327 6328 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6329 u8 reason) 6330 { 6331 struct hci_cp_le_conn_param_req_neg_reply cp; 6332 6333 cp.handle = cpu_to_le16(handle); 6334 cp.reason = reason; 6335 6336 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6337 &cp); 6338 } 6339 6340 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, 6341 struct sk_buff *skb) 6342 { 6343 struct hci_ev_le_remote_conn_param_req *ev = data; 6344 struct hci_cp_le_conn_param_req_reply cp; 6345 struct hci_conn *hcon; 6346 u16 handle, min, max, latency, timeout; 6347 6348 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6349 6350 handle = le16_to_cpu(ev->handle); 6351 min = le16_to_cpu(ev->interval_min); 6352 max = le16_to_cpu(ev->interval_max); 6353 latency = le16_to_cpu(ev->latency); 6354 timeout = le16_to_cpu(ev->timeout); 6355 6356 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6357 if (!hcon || hcon->state != BT_CONNECTED) 6358 return send_conn_param_neg_reply(hdev, handle, 6359 HCI_ERROR_UNKNOWN_CONN_ID); 6360 6361 if (hci_check_conn_params(min, max, latency, timeout)) 6362 return send_conn_param_neg_reply(hdev, handle, 6363 HCI_ERROR_INVALID_LL_PARAMS); 6364 6365 if (hcon->role == HCI_ROLE_MASTER) { 6366 struct hci_conn_params *params; 6367 u8 store_hint; 6368 6369 hci_dev_lock(hdev); 6370 6371 params = hci_conn_params_lookup(hdev, &hcon->dst, 6372 hcon->dst_type); 6373 if (params) { 6374 params->conn_min_interval = min; 6375 params->conn_max_interval = max; 6376 params->conn_latency = latency; 6377 params->supervision_timeout = timeout; 6378 store_hint = 0x01; 6379 } else { 6380 store_hint = 0x00; 6381 } 6382 6383 hci_dev_unlock(hdev); 6384 6385 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6386 store_hint, min, max, latency, timeout); 6387 } 6388 6389 cp.handle = ev->handle; 6390 cp.interval_min = ev->interval_min; 6391 cp.interval_max = ev->interval_max; 6392 cp.latency = ev->latency; 6393 cp.timeout = ev->timeout; 6394 cp.min_ce_len = 0; 6395 cp.max_ce_len = 0; 6396 6397 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6398 } 6399 6400 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, 6401 struct sk_buff *skb) 6402 { 6403 struct hci_ev_le_direct_adv_report *ev = data; 6404 int i; 6405 6406 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, 6407 flex_array_size(ev, info, ev->num))) 6408 return; 6409 6410 if (!ev->num) 6411 return; 6412 6413 hci_dev_lock(hdev); 6414 6415 for (i = 0; i < ev->num; i++) { 6416 struct hci_ev_le_direct_adv_info *info = &ev->info[i]; 6417 6418 process_adv_report(hdev, info->type, &info->bdaddr, 6419 info->bdaddr_type, &info->direct_addr, 6420 info->direct_addr_type, info->rssi, NULL, 0, 6421 false); 6422 } 6423 6424 hci_dev_unlock(hdev); 6425 } 6426 6427 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, 6428 struct sk_buff *skb) 6429 { 6430 struct hci_ev_le_phy_update_complete *ev = data; 6431 struct hci_conn *conn; 6432 6433 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6434 6435 if (ev->status) 6436 return; 6437 6438 hci_dev_lock(hdev); 6439 6440 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6441 if (!conn) 6442 goto unlock; 6443 6444 conn->le_tx_phy = ev->tx_phy; 6445 conn->le_rx_phy = ev->rx_phy; 6446 6447 unlock: 6448 hci_dev_unlock(hdev); 6449 } 6450 6451 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ 6452 [_op] = { \ 6453 .func = _func, \ 6454 .min_len = _min_len, \ 6455 .max_len = _max_len, \ 6456 } 6457 6458 #define HCI_LE_EV(_op, _func, _len) \ 6459 HCI_LE_EV_VL(_op, _func, _len, _len) 6460 6461 #define HCI_LE_EV_STATUS(_op, _func) \ 6462 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) 6463 6464 /* Entries in this table shall have their position according to the subevent 6465 * opcode they handle so the use of the macros above is recommend since it does 6466 * attempt to initialize at its proper index using Designated Initializers that 6467 * way events without a callback function can be ommited. 6468 */ 6469 static const struct hci_le_ev { 6470 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 6471 u16 min_len; 6472 u16 max_len; 6473 } hci_le_ev_table[U8_MAX + 1] = { 6474 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ 6475 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, 6476 sizeof(struct hci_ev_le_conn_complete)), 6477 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ 6478 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, 6479 sizeof(struct hci_ev_le_advertising_report), 6480 HCI_MAX_EVENT_SIZE), 6481 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ 6482 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, 6483 hci_le_conn_update_complete_evt, 6484 sizeof(struct hci_ev_le_conn_update_complete)), 6485 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ 6486 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, 6487 hci_le_remote_feat_complete_evt, 6488 sizeof(struct hci_ev_le_remote_feat_complete)), 6489 /* [0x05 = HCI_EV_LE_LTK_REQ] */ 6490 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, 6491 sizeof(struct hci_ev_le_ltk_req)), 6492 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ 6493 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, 6494 hci_le_remote_conn_param_req_evt, 6495 sizeof(struct hci_ev_le_remote_conn_param_req)), 6496 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ 6497 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, 6498 hci_le_enh_conn_complete_evt, 6499 sizeof(struct hci_ev_le_enh_conn_complete)), 6500 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ 6501 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, 6502 sizeof(struct hci_ev_le_direct_adv_report), 6503 HCI_MAX_EVENT_SIZE), 6504 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ 6505 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, 6506 sizeof(struct hci_ev_le_phy_update_complete)), 6507 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ 6508 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, 6509 sizeof(struct hci_ev_le_ext_adv_report), 6510 HCI_MAX_EVENT_SIZE), 6511 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ 6512 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, 6513 sizeof(struct hci_evt_le_ext_adv_set_term)), 6514 }; 6515 6516 static void hci_le_meta_evt(struct hci_dev *hdev, void *data, 6517 struct sk_buff *skb, u16 *opcode, u8 *status, 6518 hci_req_complete_t *req_complete, 6519 hci_req_complete_skb_t *req_complete_skb) 6520 { 6521 struct hci_ev_le_meta *ev = data; 6522 const struct hci_le_ev *subev; 6523 6524 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); 6525 6526 /* Only match event if command OGF is for LE */ 6527 if (hdev->sent_cmd && 6528 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && 6529 hci_skb_event(hdev->sent_cmd) == ev->subevent) { 6530 *opcode = hci_skb_opcode(hdev->sent_cmd); 6531 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, 6532 req_complete_skb); 6533 } 6534 6535 subev = &hci_le_ev_table[ev->subevent]; 6536 if (!subev->func) 6537 return; 6538 6539 if (skb->len < subev->min_len) { 6540 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", 6541 ev->subevent, skb->len, subev->min_len); 6542 return; 6543 } 6544 6545 /* Just warn if the length is over max_len size it still be 6546 * possible to partially parse the event so leave to callback to 6547 * decide if that is acceptable. 6548 */ 6549 if (skb->len > subev->max_len) 6550 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", 6551 ev->subevent, skb->len, subev->max_len); 6552 6553 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); 6554 if (!data) 6555 return; 6556 6557 subev->func(hdev, data, skb); 6558 } 6559 6560 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 6561 u8 event, struct sk_buff *skb) 6562 { 6563 struct hci_ev_cmd_complete *ev; 6564 struct hci_event_hdr *hdr; 6565 6566 if (!skb) 6567 return false; 6568 6569 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); 6570 if (!hdr) 6571 return false; 6572 6573 if (event) { 6574 if (hdr->evt != event) 6575 return false; 6576 return true; 6577 } 6578 6579 /* Check if request ended in Command Status - no way to retrieve 6580 * any extra parameters in this case. 6581 */ 6582 if (hdr->evt == HCI_EV_CMD_STATUS) 6583 return false; 6584 6585 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 6586 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 6587 hdr->evt); 6588 return false; 6589 } 6590 6591 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); 6592 if (!ev) 6593 return false; 6594 6595 if (opcode != __le16_to_cpu(ev->opcode)) { 6596 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 6597 __le16_to_cpu(ev->opcode)); 6598 return false; 6599 } 6600 6601 return true; 6602 } 6603 6604 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 6605 struct sk_buff *skb) 6606 { 6607 struct hci_ev_le_advertising_info *adv; 6608 struct hci_ev_le_direct_adv_info *direct_adv; 6609 struct hci_ev_le_ext_adv_info *ext_adv; 6610 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 6611 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 6612 6613 hci_dev_lock(hdev); 6614 6615 /* If we are currently suspended and this is the first BT event seen, 6616 * save the wake reason associated with the event. 6617 */ 6618 if (!hdev->suspended || hdev->wake_reason) 6619 goto unlock; 6620 6621 /* Default to remote wake. Values for wake_reason are documented in the 6622 * Bluez mgmt api docs. 6623 */ 6624 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 6625 6626 /* Once configured for remote wakeup, we should only wake up for 6627 * reconnections. It's useful to see which device is waking us up so 6628 * keep track of the bdaddr of the connection event that woke us up. 6629 */ 6630 if (event == HCI_EV_CONN_REQUEST) { 6631 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 6632 hdev->wake_addr_type = BDADDR_BREDR; 6633 } else if (event == HCI_EV_CONN_COMPLETE) { 6634 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 6635 hdev->wake_addr_type = BDADDR_BREDR; 6636 } else if (event == HCI_EV_LE_META) { 6637 struct hci_ev_le_meta *le_ev = (void *)skb->data; 6638 u8 subevent = le_ev->subevent; 6639 u8 *ptr = &skb->data[sizeof(*le_ev)]; 6640 u8 num_reports = *ptr; 6641 6642 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 6643 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 6644 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 6645 num_reports) { 6646 adv = (void *)(ptr + 1); 6647 direct_adv = (void *)(ptr + 1); 6648 ext_adv = (void *)(ptr + 1); 6649 6650 switch (subevent) { 6651 case HCI_EV_LE_ADVERTISING_REPORT: 6652 bacpy(&hdev->wake_addr, &adv->bdaddr); 6653 hdev->wake_addr_type = adv->bdaddr_type; 6654 break; 6655 case HCI_EV_LE_DIRECT_ADV_REPORT: 6656 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 6657 hdev->wake_addr_type = direct_adv->bdaddr_type; 6658 break; 6659 case HCI_EV_LE_EXT_ADV_REPORT: 6660 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 6661 hdev->wake_addr_type = ext_adv->bdaddr_type; 6662 break; 6663 } 6664 } 6665 } else { 6666 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 6667 } 6668 6669 unlock: 6670 hci_dev_unlock(hdev); 6671 } 6672 6673 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ 6674 [_op] = { \ 6675 .req = false, \ 6676 .func = _func, \ 6677 .min_len = _min_len, \ 6678 .max_len = _max_len, \ 6679 } 6680 6681 #define HCI_EV(_op, _func, _len) \ 6682 HCI_EV_VL(_op, _func, _len, _len) 6683 6684 #define HCI_EV_STATUS(_op, _func) \ 6685 HCI_EV(_op, _func, sizeof(struct hci_ev_status)) 6686 6687 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ 6688 [_op] = { \ 6689 .req = true, \ 6690 .func_req = _func, \ 6691 .min_len = _min_len, \ 6692 .max_len = _max_len, \ 6693 } 6694 6695 #define HCI_EV_REQ(_op, _func, _len) \ 6696 HCI_EV_REQ_VL(_op, _func, _len, _len) 6697 6698 /* Entries in this table shall have their position according to the event opcode 6699 * they handle so the use of the macros above is recommend since it does attempt 6700 * to initialize at its proper index using Designated Initializers that way 6701 * events without a callback function don't have entered. 6702 */ 6703 static const struct hci_ev { 6704 bool req; 6705 union { 6706 void (*func)(struct hci_dev *hdev, void *data, 6707 struct sk_buff *skb); 6708 void (*func_req)(struct hci_dev *hdev, void *data, 6709 struct sk_buff *skb, u16 *opcode, u8 *status, 6710 hci_req_complete_t *req_complete, 6711 hci_req_complete_skb_t *req_complete_skb); 6712 }; 6713 u16 min_len; 6714 u16 max_len; 6715 } hci_ev_table[U8_MAX + 1] = { 6716 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ 6717 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), 6718 /* [0x02 = HCI_EV_INQUIRY_RESULT] */ 6719 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, 6720 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), 6721 /* [0x03 = HCI_EV_CONN_COMPLETE] */ 6722 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, 6723 sizeof(struct hci_ev_conn_complete)), 6724 /* [0x04 = HCI_EV_CONN_REQUEST] */ 6725 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, 6726 sizeof(struct hci_ev_conn_request)), 6727 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ 6728 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, 6729 sizeof(struct hci_ev_disconn_complete)), 6730 /* [0x06 = HCI_EV_AUTH_COMPLETE] */ 6731 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, 6732 sizeof(struct hci_ev_auth_complete)), 6733 /* [0x07 = HCI_EV_REMOTE_NAME] */ 6734 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, 6735 sizeof(struct hci_ev_remote_name)), 6736 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ 6737 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, 6738 sizeof(struct hci_ev_encrypt_change)), 6739 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ 6740 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, 6741 hci_change_link_key_complete_evt, 6742 sizeof(struct hci_ev_change_link_key_complete)), 6743 /* [0x0b = HCI_EV_REMOTE_FEATURES] */ 6744 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, 6745 sizeof(struct hci_ev_remote_features)), 6746 /* [0x0e = HCI_EV_CMD_COMPLETE] */ 6747 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, 6748 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), 6749 /* [0x0f = HCI_EV_CMD_STATUS] */ 6750 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, 6751 sizeof(struct hci_ev_cmd_status)), 6752 /* [0x10 = HCI_EV_CMD_STATUS] */ 6753 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, 6754 sizeof(struct hci_ev_hardware_error)), 6755 /* [0x12 = HCI_EV_ROLE_CHANGE] */ 6756 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, 6757 sizeof(struct hci_ev_role_change)), 6758 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ 6759 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, 6760 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), 6761 /* [0x14 = HCI_EV_MODE_CHANGE] */ 6762 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, 6763 sizeof(struct hci_ev_mode_change)), 6764 /* [0x16 = HCI_EV_PIN_CODE_REQ] */ 6765 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, 6766 sizeof(struct hci_ev_pin_code_req)), 6767 /* [0x17 = HCI_EV_LINK_KEY_REQ] */ 6768 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, 6769 sizeof(struct hci_ev_link_key_req)), 6770 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ 6771 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, 6772 sizeof(struct hci_ev_link_key_notify)), 6773 /* [0x1c = HCI_EV_CLOCK_OFFSET] */ 6774 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, 6775 sizeof(struct hci_ev_clock_offset)), 6776 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ 6777 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, 6778 sizeof(struct hci_ev_pkt_type_change)), 6779 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ 6780 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, 6781 sizeof(struct hci_ev_pscan_rep_mode)), 6782 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ 6783 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, 6784 hci_inquiry_result_with_rssi_evt, 6785 sizeof(struct hci_ev_inquiry_result_rssi), 6786 HCI_MAX_EVENT_SIZE), 6787 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ 6788 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, 6789 sizeof(struct hci_ev_remote_ext_features)), 6790 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ 6791 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, 6792 sizeof(struct hci_ev_sync_conn_complete)), 6793 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ 6794 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, 6795 hci_extended_inquiry_result_evt, 6796 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), 6797 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ 6798 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, 6799 sizeof(struct hci_ev_key_refresh_complete)), 6800 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ 6801 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, 6802 sizeof(struct hci_ev_io_capa_request)), 6803 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ 6804 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, 6805 sizeof(struct hci_ev_io_capa_reply)), 6806 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ 6807 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, 6808 sizeof(struct hci_ev_user_confirm_req)), 6809 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ 6810 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, 6811 sizeof(struct hci_ev_user_passkey_req)), 6812 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ 6813 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, 6814 sizeof(struct hci_ev_remote_oob_data_request)), 6815 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ 6816 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, 6817 sizeof(struct hci_ev_simple_pair_complete)), 6818 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ 6819 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, 6820 sizeof(struct hci_ev_user_passkey_notify)), 6821 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ 6822 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, 6823 sizeof(struct hci_ev_keypress_notify)), 6824 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ 6825 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, 6826 sizeof(struct hci_ev_remote_host_features)), 6827 /* [0x3e = HCI_EV_LE_META] */ 6828 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, 6829 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), 6830 #if IS_ENABLED(CONFIG_BT_HS) 6831 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ 6832 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, 6833 sizeof(struct hci_ev_phy_link_complete)), 6834 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ 6835 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, 6836 sizeof(struct hci_ev_channel_selected)), 6837 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ 6838 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, 6839 hci_disconn_loglink_complete_evt, 6840 sizeof(struct hci_ev_disconn_logical_link_complete)), 6841 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ 6842 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, 6843 sizeof(struct hci_ev_logical_link_complete)), 6844 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ 6845 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, 6846 hci_disconn_phylink_complete_evt, 6847 sizeof(struct hci_ev_disconn_phy_link_complete)), 6848 #endif 6849 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ 6850 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, 6851 sizeof(struct hci_ev_num_comp_blocks)), 6852 /* [0xff = HCI_EV_VENDOR] */ 6853 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), 6854 }; 6855 6856 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, 6857 u16 *opcode, u8 *status, 6858 hci_req_complete_t *req_complete, 6859 hci_req_complete_skb_t *req_complete_skb) 6860 { 6861 const struct hci_ev *ev = &hci_ev_table[event]; 6862 void *data; 6863 6864 if (!ev->func) 6865 return; 6866 6867 if (skb->len < ev->min_len) { 6868 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", 6869 event, skb->len, ev->min_len); 6870 return; 6871 } 6872 6873 /* Just warn if the length is over max_len size it still be 6874 * possible to partially parse the event so leave to callback to 6875 * decide if that is acceptable. 6876 */ 6877 if (skb->len > ev->max_len) 6878 bt_dev_warn_ratelimited(hdev, 6879 "unexpected event 0x%2.2x length: %u > %u", 6880 event, skb->len, ev->max_len); 6881 6882 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); 6883 if (!data) 6884 return; 6885 6886 if (ev->req) 6887 ev->func_req(hdev, data, skb, opcode, status, req_complete, 6888 req_complete_skb); 6889 else 6890 ev->func(hdev, data, skb); 6891 } 6892 6893 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 6894 { 6895 struct hci_event_hdr *hdr = (void *) skb->data; 6896 hci_req_complete_t req_complete = NULL; 6897 hci_req_complete_skb_t req_complete_skb = NULL; 6898 struct sk_buff *orig_skb = NULL; 6899 u8 status = 0, event, req_evt = 0; 6900 u16 opcode = HCI_OP_NOP; 6901 6902 if (skb->len < sizeof(*hdr)) { 6903 bt_dev_err(hdev, "Malformed HCI Event"); 6904 goto done; 6905 } 6906 6907 event = hdr->evt; 6908 if (!event) { 6909 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", 6910 event); 6911 goto done; 6912 } 6913 6914 /* Only match event if command OGF is not for LE */ 6915 if (hdev->sent_cmd && 6916 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && 6917 hci_skb_event(hdev->sent_cmd) == event) { 6918 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), 6919 status, &req_complete, &req_complete_skb); 6920 req_evt = event; 6921 } 6922 6923 /* If it looks like we might end up having to call 6924 * req_complete_skb, store a pristine copy of the skb since the 6925 * various handlers may modify the original one through 6926 * skb_pull() calls, etc. 6927 */ 6928 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 6929 event == HCI_EV_CMD_COMPLETE) 6930 orig_skb = skb_clone(skb, GFP_KERNEL); 6931 6932 skb_pull(skb, HCI_EVENT_HDR_SIZE); 6933 6934 /* Store wake reason if we're suspended */ 6935 hci_store_wake_reason(hdev, event, skb); 6936 6937 bt_dev_dbg(hdev, "event 0x%2.2x", event); 6938 6939 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, 6940 &req_complete_skb); 6941 6942 if (req_complete) { 6943 req_complete(hdev, status, opcode); 6944 } else if (req_complete_skb) { 6945 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 6946 kfree_skb(orig_skb); 6947 orig_skb = NULL; 6948 } 6949 req_complete_skb(hdev, status, opcode, orig_skb); 6950 } 6951 6952 done: 6953 kfree_skb(orig_skb); 6954 kfree_skb(skb); 6955 hdev->stat.evt_rx++; 6956 } 6957