1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 41 "\x00\x00\x00\x00\x00\x00\x00\x00" 42 43 /* Handle HCI Event packets */ 44 45 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, 46 u8 *new_status) 47 { 48 __u8 status = *((__u8 *) skb->data); 49 50 BT_DBG("%s status 0x%2.2x", hdev->name, status); 51 52 /* It is possible that we receive Inquiry Complete event right 53 * before we receive Inquiry Cancel Command Complete event, in 54 * which case the latter event should have status of Command 55 * Disallowed (0x0c). This should not be treated as error, since 56 * we actually achieve what Inquiry Cancel wants to achieve, 57 * which is to end the last Inquiry session. 58 */ 59 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 60 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 61 status = 0x00; 62 } 63 64 *new_status = status; 65 66 if (status) 67 return; 68 69 clear_bit(HCI_INQUIRY, &hdev->flags); 70 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 71 wake_up_bit(&hdev->flags, HCI_INQUIRY); 72 73 hci_dev_lock(hdev); 74 /* Set discovery state to stopped if we're not doing LE active 75 * scanning. 76 */ 77 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 78 hdev->le_scan_type != LE_SCAN_ACTIVE) 79 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 80 hci_dev_unlock(hdev); 81 82 hci_conn_check_pending(hdev); 83 } 84 85 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 86 { 87 __u8 status = *((__u8 *) skb->data); 88 89 BT_DBG("%s status 0x%2.2x", hdev->name, status); 90 91 if (status) 92 return; 93 94 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 95 } 96 97 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 98 { 99 __u8 status = *((__u8 *) skb->data); 100 101 BT_DBG("%s status 0x%2.2x", hdev->name, status); 102 103 if (status) 104 return; 105 106 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 107 108 hci_conn_check_pending(hdev); 109 } 110 111 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, 112 struct sk_buff *skb) 113 { 114 BT_DBG("%s", hdev->name); 115 } 116 117 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 118 { 119 struct hci_rp_role_discovery *rp = (void *) skb->data; 120 struct hci_conn *conn; 121 122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 123 124 if (rp->status) 125 return; 126 127 hci_dev_lock(hdev); 128 129 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 130 if (conn) 131 conn->role = rp->role; 132 133 hci_dev_unlock(hdev); 134 } 135 136 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 137 { 138 struct hci_rp_read_link_policy *rp = (void *) skb->data; 139 struct hci_conn *conn; 140 141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 142 143 if (rp->status) 144 return; 145 146 hci_dev_lock(hdev); 147 148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 149 if (conn) 150 conn->link_policy = __le16_to_cpu(rp->policy); 151 152 hci_dev_unlock(hdev); 153 } 154 155 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 156 { 157 struct hci_rp_write_link_policy *rp = (void *) skb->data; 158 struct hci_conn *conn; 159 void *sent; 160 161 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 162 163 if (rp->status) 164 return; 165 166 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 167 if (!sent) 168 return; 169 170 hci_dev_lock(hdev); 171 172 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 173 if (conn) 174 conn->link_policy = get_unaligned_le16(sent + 2); 175 176 hci_dev_unlock(hdev); 177 } 178 179 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, 180 struct sk_buff *skb) 181 { 182 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 183 184 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 185 186 if (rp->status) 187 return; 188 189 hdev->link_policy = __le16_to_cpu(rp->policy); 190 } 191 192 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, 193 struct sk_buff *skb) 194 { 195 __u8 status = *((__u8 *) skb->data); 196 void *sent; 197 198 BT_DBG("%s status 0x%2.2x", hdev->name, status); 199 200 if (status) 201 return; 202 203 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 204 if (!sent) 205 return; 206 207 hdev->link_policy = get_unaligned_le16(sent); 208 } 209 210 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 211 { 212 __u8 status = *((__u8 *) skb->data); 213 214 BT_DBG("%s status 0x%2.2x", hdev->name, status); 215 216 clear_bit(HCI_RESET, &hdev->flags); 217 218 if (status) 219 return; 220 221 /* Reset all non-persistent flags */ 222 hci_dev_clear_volatile_flags(hdev); 223 224 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 225 226 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 227 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 228 229 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 230 hdev->adv_data_len = 0; 231 232 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 233 hdev->scan_rsp_data_len = 0; 234 235 hdev->le_scan_type = LE_SCAN_PASSIVE; 236 237 hdev->ssp_debug_mode = 0; 238 239 hci_bdaddr_list_clear(&hdev->le_white_list); 240 hci_bdaddr_list_clear(&hdev->le_resolv_list); 241 } 242 243 static void hci_cc_read_stored_link_key(struct hci_dev *hdev, 244 struct sk_buff *skb) 245 { 246 struct hci_rp_read_stored_link_key *rp = (void *)skb->data; 247 struct hci_cp_read_stored_link_key *sent; 248 249 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 250 251 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 252 if (!sent) 253 return; 254 255 if (!rp->status && sent->read_all == 0x01) { 256 hdev->stored_max_keys = rp->max_keys; 257 hdev->stored_num_keys = rp->num_keys; 258 } 259 } 260 261 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 262 struct sk_buff *skb) 263 { 264 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data; 265 266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 267 268 if (rp->status) 269 return; 270 271 if (rp->num_keys <= hdev->stored_num_keys) 272 hdev->stored_num_keys -= rp->num_keys; 273 else 274 hdev->stored_num_keys = 0; 275 } 276 277 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 278 { 279 __u8 status = *((__u8 *) skb->data); 280 void *sent; 281 282 BT_DBG("%s status 0x%2.2x", hdev->name, status); 283 284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 285 if (!sent) 286 return; 287 288 hci_dev_lock(hdev); 289 290 if (hci_dev_test_flag(hdev, HCI_MGMT)) 291 mgmt_set_local_name_complete(hdev, sent, status); 292 else if (!status) 293 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 294 295 hci_dev_unlock(hdev); 296 } 297 298 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 299 { 300 struct hci_rp_read_local_name *rp = (void *) skb->data; 301 302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 303 304 if (rp->status) 305 return; 306 307 if (hci_dev_test_flag(hdev, HCI_SETUP) || 308 hci_dev_test_flag(hdev, HCI_CONFIG)) 309 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 310 } 311 312 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 313 { 314 __u8 status = *((__u8 *) skb->data); 315 void *sent; 316 317 BT_DBG("%s status 0x%2.2x", hdev->name, status); 318 319 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 320 if (!sent) 321 return; 322 323 hci_dev_lock(hdev); 324 325 if (!status) { 326 __u8 param = *((__u8 *) sent); 327 328 if (param == AUTH_ENABLED) 329 set_bit(HCI_AUTH, &hdev->flags); 330 else 331 clear_bit(HCI_AUTH, &hdev->flags); 332 } 333 334 if (hci_dev_test_flag(hdev, HCI_MGMT)) 335 mgmt_auth_enable_complete(hdev, status); 336 337 hci_dev_unlock(hdev); 338 } 339 340 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 341 { 342 __u8 status = *((__u8 *) skb->data); 343 __u8 param; 344 void *sent; 345 346 BT_DBG("%s status 0x%2.2x", hdev->name, status); 347 348 if (status) 349 return; 350 351 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 352 if (!sent) 353 return; 354 355 param = *((__u8 *) sent); 356 357 if (param) 358 set_bit(HCI_ENCRYPT, &hdev->flags); 359 else 360 clear_bit(HCI_ENCRYPT, &hdev->flags); 361 } 362 363 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 364 { 365 __u8 status = *((__u8 *) skb->data); 366 __u8 param; 367 void *sent; 368 369 BT_DBG("%s status 0x%2.2x", hdev->name, status); 370 371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 372 if (!sent) 373 return; 374 375 param = *((__u8 *) sent); 376 377 hci_dev_lock(hdev); 378 379 if (status) { 380 hdev->discov_timeout = 0; 381 goto done; 382 } 383 384 if (param & SCAN_INQUIRY) 385 set_bit(HCI_ISCAN, &hdev->flags); 386 else 387 clear_bit(HCI_ISCAN, &hdev->flags); 388 389 if (param & SCAN_PAGE) 390 set_bit(HCI_PSCAN, &hdev->flags); 391 else 392 clear_bit(HCI_PSCAN, &hdev->flags); 393 394 done: 395 hci_dev_unlock(hdev); 396 } 397 398 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 399 { 400 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 401 402 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 403 404 if (rp->status) 405 return; 406 407 memcpy(hdev->dev_class, rp->dev_class, 3); 408 409 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 410 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 411 } 412 413 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 414 { 415 __u8 status = *((__u8 *) skb->data); 416 void *sent; 417 418 BT_DBG("%s status 0x%2.2x", hdev->name, status); 419 420 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 421 if (!sent) 422 return; 423 424 hci_dev_lock(hdev); 425 426 if (status == 0) 427 memcpy(hdev->dev_class, sent, 3); 428 429 if (hci_dev_test_flag(hdev, HCI_MGMT)) 430 mgmt_set_class_of_dev_complete(hdev, sent, status); 431 432 hci_dev_unlock(hdev); 433 } 434 435 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 436 { 437 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 438 __u16 setting; 439 440 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 441 442 if (rp->status) 443 return; 444 445 setting = __le16_to_cpu(rp->voice_setting); 446 447 if (hdev->voice_setting == setting) 448 return; 449 450 hdev->voice_setting = setting; 451 452 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 453 454 if (hdev->notify) 455 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 456 } 457 458 static void hci_cc_write_voice_setting(struct hci_dev *hdev, 459 struct sk_buff *skb) 460 { 461 __u8 status = *((__u8 *) skb->data); 462 __u16 setting; 463 void *sent; 464 465 BT_DBG("%s status 0x%2.2x", hdev->name, status); 466 467 if (status) 468 return; 469 470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 471 if (!sent) 472 return; 473 474 setting = get_unaligned_le16(sent); 475 476 if (hdev->voice_setting == setting) 477 return; 478 479 hdev->voice_setting = setting; 480 481 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 482 483 if (hdev->notify) 484 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 485 } 486 487 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev, 488 struct sk_buff *skb) 489 { 490 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data; 491 492 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 493 494 if (rp->status) 495 return; 496 497 hdev->num_iac = rp->num_iac; 498 499 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac); 500 } 501 502 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 503 { 504 __u8 status = *((__u8 *) skb->data); 505 struct hci_cp_write_ssp_mode *sent; 506 507 BT_DBG("%s status 0x%2.2x", hdev->name, status); 508 509 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 510 if (!sent) 511 return; 512 513 hci_dev_lock(hdev); 514 515 if (!status) { 516 if (sent->mode) 517 hdev->features[1][0] |= LMP_HOST_SSP; 518 else 519 hdev->features[1][0] &= ~LMP_HOST_SSP; 520 } 521 522 if (hci_dev_test_flag(hdev, HCI_MGMT)) 523 mgmt_ssp_enable_complete(hdev, sent->mode, status); 524 else if (!status) { 525 if (sent->mode) 526 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 527 else 528 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 529 } 530 531 hci_dev_unlock(hdev); 532 } 533 534 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) 535 { 536 u8 status = *((u8 *) skb->data); 537 struct hci_cp_write_sc_support *sent; 538 539 BT_DBG("%s status 0x%2.2x", hdev->name, status); 540 541 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 542 if (!sent) 543 return; 544 545 hci_dev_lock(hdev); 546 547 if (!status) { 548 if (sent->support) 549 hdev->features[1][0] |= LMP_HOST_SC; 550 else 551 hdev->features[1][0] &= ~LMP_HOST_SC; 552 } 553 554 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) { 555 if (sent->support) 556 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 557 else 558 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 559 } 560 561 hci_dev_unlock(hdev); 562 } 563 564 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 565 { 566 struct hci_rp_read_local_version *rp = (void *) skb->data; 567 568 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 569 570 if (rp->status) 571 return; 572 573 if (hci_dev_test_flag(hdev, HCI_SETUP) || 574 hci_dev_test_flag(hdev, HCI_CONFIG)) { 575 hdev->hci_ver = rp->hci_ver; 576 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 577 hdev->lmp_ver = rp->lmp_ver; 578 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 579 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 580 } 581 } 582 583 static void hci_cc_read_local_commands(struct hci_dev *hdev, 584 struct sk_buff *skb) 585 { 586 struct hci_rp_read_local_commands *rp = (void *) skb->data; 587 588 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 589 590 if (rp->status) 591 return; 592 593 if (hci_dev_test_flag(hdev, HCI_SETUP) || 594 hci_dev_test_flag(hdev, HCI_CONFIG)) 595 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 596 } 597 598 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, 599 struct sk_buff *skb) 600 { 601 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data; 602 struct hci_conn *conn; 603 604 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 605 606 if (rp->status) 607 return; 608 609 hci_dev_lock(hdev); 610 611 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 612 if (conn) 613 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 614 615 hci_dev_unlock(hdev); 616 } 617 618 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, 619 struct sk_buff *skb) 620 { 621 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data; 622 struct hci_conn *conn; 623 void *sent; 624 625 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 626 627 if (rp->status) 628 return; 629 630 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 631 if (!sent) 632 return; 633 634 hci_dev_lock(hdev); 635 636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 637 if (conn) 638 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 639 640 hci_dev_unlock(hdev); 641 } 642 643 static void hci_cc_read_local_features(struct hci_dev *hdev, 644 struct sk_buff *skb) 645 { 646 struct hci_rp_read_local_features *rp = (void *) skb->data; 647 648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 649 650 if (rp->status) 651 return; 652 653 memcpy(hdev->features, rp->features, 8); 654 655 /* Adjust default settings according to features 656 * supported by device. */ 657 658 if (hdev->features[0][0] & LMP_3SLOT) 659 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 660 661 if (hdev->features[0][0] & LMP_5SLOT) 662 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 663 664 if (hdev->features[0][1] & LMP_HV2) { 665 hdev->pkt_type |= (HCI_HV2); 666 hdev->esco_type |= (ESCO_HV2); 667 } 668 669 if (hdev->features[0][1] & LMP_HV3) { 670 hdev->pkt_type |= (HCI_HV3); 671 hdev->esco_type |= (ESCO_HV3); 672 } 673 674 if (lmp_esco_capable(hdev)) 675 hdev->esco_type |= (ESCO_EV3); 676 677 if (hdev->features[0][4] & LMP_EV4) 678 hdev->esco_type |= (ESCO_EV4); 679 680 if (hdev->features[0][4] & LMP_EV5) 681 hdev->esco_type |= (ESCO_EV5); 682 683 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 684 hdev->esco_type |= (ESCO_2EV3); 685 686 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 687 hdev->esco_type |= (ESCO_3EV3); 688 689 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 690 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 691 } 692 693 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 694 struct sk_buff *skb) 695 { 696 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 697 698 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 699 700 if (rp->status) 701 return; 702 703 if (hdev->max_page < rp->max_page) 704 hdev->max_page = rp->max_page; 705 706 if (rp->page < HCI_MAX_PAGES) 707 memcpy(hdev->features[rp->page], rp->features, 8); 708 } 709 710 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 711 struct sk_buff *skb) 712 { 713 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 714 715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 716 717 if (rp->status) 718 return; 719 720 hdev->flow_ctl_mode = rp->mode; 721 } 722 723 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 724 { 725 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 726 727 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 728 729 if (rp->status) 730 return; 731 732 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 733 hdev->sco_mtu = rp->sco_mtu; 734 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 735 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 736 737 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 738 hdev->sco_mtu = 64; 739 hdev->sco_pkts = 8; 740 } 741 742 hdev->acl_cnt = hdev->acl_pkts; 743 hdev->sco_cnt = hdev->sco_pkts; 744 745 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 746 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 747 } 748 749 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 750 { 751 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 752 753 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 754 755 if (rp->status) 756 return; 757 758 if (test_bit(HCI_INIT, &hdev->flags)) 759 bacpy(&hdev->bdaddr, &rp->bdaddr); 760 761 if (hci_dev_test_flag(hdev, HCI_SETUP)) 762 bacpy(&hdev->setup_addr, &rp->bdaddr); 763 } 764 765 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev, 766 struct sk_buff *skb) 767 { 768 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data; 769 770 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 771 772 if (rp->status) 773 return; 774 775 if (hci_dev_test_flag(hdev, HCI_SETUP) || 776 hci_dev_test_flag(hdev, HCI_CONFIG)) { 777 hdev->pairing_opts = rp->pairing_opts; 778 hdev->max_enc_key_size = rp->max_key_size; 779 } 780 } 781 782 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 783 struct sk_buff *skb) 784 { 785 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; 786 787 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 788 789 if (rp->status) 790 return; 791 792 if (test_bit(HCI_INIT, &hdev->flags)) { 793 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 794 hdev->page_scan_window = __le16_to_cpu(rp->window); 795 } 796 } 797 798 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, 799 struct sk_buff *skb) 800 { 801 u8 status = *((u8 *) skb->data); 802 struct hci_cp_write_page_scan_activity *sent; 803 804 BT_DBG("%s status 0x%2.2x", hdev->name, status); 805 806 if (status) 807 return; 808 809 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 810 if (!sent) 811 return; 812 813 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 814 hdev->page_scan_window = __le16_to_cpu(sent->window); 815 } 816 817 static void hci_cc_read_page_scan_type(struct hci_dev *hdev, 818 struct sk_buff *skb) 819 { 820 struct hci_rp_read_page_scan_type *rp = (void *) skb->data; 821 822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 823 824 if (rp->status) 825 return; 826 827 if (test_bit(HCI_INIT, &hdev->flags)) 828 hdev->page_scan_type = rp->type; 829 } 830 831 static void hci_cc_write_page_scan_type(struct hci_dev *hdev, 832 struct sk_buff *skb) 833 { 834 u8 status = *((u8 *) skb->data); 835 u8 *type; 836 837 BT_DBG("%s status 0x%2.2x", hdev->name, status); 838 839 if (status) 840 return; 841 842 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 843 if (type) 844 hdev->page_scan_type = *type; 845 } 846 847 static void hci_cc_read_data_block_size(struct hci_dev *hdev, 848 struct sk_buff *skb) 849 { 850 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 851 852 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 853 854 if (rp->status) 855 return; 856 857 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 858 hdev->block_len = __le16_to_cpu(rp->block_len); 859 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 860 861 hdev->block_cnt = hdev->num_blocks; 862 863 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 864 hdev->block_cnt, hdev->block_len); 865 } 866 867 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb) 868 { 869 struct hci_rp_read_clock *rp = (void *) skb->data; 870 struct hci_cp_read_clock *cp; 871 struct hci_conn *conn; 872 873 BT_DBG("%s", hdev->name); 874 875 if (skb->len < sizeof(*rp)) 876 return; 877 878 if (rp->status) 879 return; 880 881 hci_dev_lock(hdev); 882 883 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 884 if (!cp) 885 goto unlock; 886 887 if (cp->which == 0x00) { 888 hdev->clock = le32_to_cpu(rp->clock); 889 goto unlock; 890 } 891 892 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 893 if (conn) { 894 conn->clock = le32_to_cpu(rp->clock); 895 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 896 } 897 898 unlock: 899 hci_dev_unlock(hdev); 900 } 901 902 static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 903 struct sk_buff *skb) 904 { 905 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 906 907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 908 909 if (rp->status) 910 return; 911 912 hdev->amp_status = rp->amp_status; 913 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 914 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 915 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 916 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 917 hdev->amp_type = rp->amp_type; 918 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 919 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 920 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 921 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 922 } 923 924 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 925 struct sk_buff *skb) 926 { 927 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 928 929 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 930 931 if (rp->status) 932 return; 933 934 hdev->inq_tx_power = rp->tx_power; 935 } 936 937 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, 938 struct sk_buff *skb) 939 { 940 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data; 941 942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 943 944 if (rp->status) 945 return; 946 947 hdev->err_data_reporting = rp->err_data_reporting; 948 } 949 950 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, 951 struct sk_buff *skb) 952 { 953 __u8 status = *((__u8 *)skb->data); 954 struct hci_cp_write_def_err_data_reporting *cp; 955 956 BT_DBG("%s status 0x%2.2x", hdev->name, status); 957 958 if (status) 959 return; 960 961 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 962 if (!cp) 963 return; 964 965 hdev->err_data_reporting = cp->err_data_reporting; 966 } 967 968 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 969 { 970 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 971 struct hci_cp_pin_code_reply *cp; 972 struct hci_conn *conn; 973 974 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 975 976 hci_dev_lock(hdev); 977 978 if (hci_dev_test_flag(hdev, HCI_MGMT)) 979 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 980 981 if (rp->status) 982 goto unlock; 983 984 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 985 if (!cp) 986 goto unlock; 987 988 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 989 if (conn) 990 conn->pin_length = cp->pin_len; 991 992 unlock: 993 hci_dev_unlock(hdev); 994 } 995 996 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 997 { 998 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 999 1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1001 1002 hci_dev_lock(hdev); 1003 1004 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1005 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1006 rp->status); 1007 1008 hci_dev_unlock(hdev); 1009 } 1010 1011 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 1012 struct sk_buff *skb) 1013 { 1014 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 1015 1016 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1017 1018 if (rp->status) 1019 return; 1020 1021 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1022 hdev->le_pkts = rp->le_max_pkt; 1023 1024 hdev->le_cnt = hdev->le_pkts; 1025 1026 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1027 } 1028 1029 static void hci_cc_le_read_local_features(struct hci_dev *hdev, 1030 struct sk_buff *skb) 1031 { 1032 struct hci_rp_le_read_local_features *rp = (void *) skb->data; 1033 1034 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1035 1036 if (rp->status) 1037 return; 1038 1039 memcpy(hdev->le_features, rp->features, 8); 1040 } 1041 1042 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 1043 struct sk_buff *skb) 1044 { 1045 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; 1046 1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1048 1049 if (rp->status) 1050 return; 1051 1052 hdev->adv_tx_power = rp->tx_power; 1053 } 1054 1055 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 1056 { 1057 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1058 1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1060 1061 hci_dev_lock(hdev); 1062 1063 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1064 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1065 rp->status); 1066 1067 hci_dev_unlock(hdev); 1068 } 1069 1070 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 1071 struct sk_buff *skb) 1072 { 1073 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1074 1075 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1076 1077 hci_dev_lock(hdev); 1078 1079 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1080 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1081 ACL_LINK, 0, rp->status); 1082 1083 hci_dev_unlock(hdev); 1084 } 1085 1086 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 1087 { 1088 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1089 1090 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1091 1092 hci_dev_lock(hdev); 1093 1094 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1095 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1096 0, rp->status); 1097 1098 hci_dev_unlock(hdev); 1099 } 1100 1101 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1102 struct sk_buff *skb) 1103 { 1104 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1105 1106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1107 1108 hci_dev_lock(hdev); 1109 1110 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1111 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1112 ACL_LINK, 0, rp->status); 1113 1114 hci_dev_unlock(hdev); 1115 } 1116 1117 static void hci_cc_read_local_oob_data(struct hci_dev *hdev, 1118 struct sk_buff *skb) 1119 { 1120 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1121 1122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1123 } 1124 1125 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, 1126 struct sk_buff *skb) 1127 { 1128 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; 1129 1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1131 } 1132 1133 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) 1134 { 1135 __u8 status = *((__u8 *) skb->data); 1136 bdaddr_t *sent; 1137 1138 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1139 1140 if (status) 1141 return; 1142 1143 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1144 if (!sent) 1145 return; 1146 1147 hci_dev_lock(hdev); 1148 1149 bacpy(&hdev->random_addr, sent); 1150 1151 hci_dev_unlock(hdev); 1152 } 1153 1154 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb) 1155 { 1156 __u8 status = *((__u8 *) skb->data); 1157 struct hci_cp_le_set_default_phy *cp; 1158 1159 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1160 1161 if (status) 1162 return; 1163 1164 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1165 if (!cp) 1166 return; 1167 1168 hci_dev_lock(hdev); 1169 1170 hdev->le_tx_def_phys = cp->tx_phys; 1171 hdev->le_rx_def_phys = cp->rx_phys; 1172 1173 hci_dev_unlock(hdev); 1174 } 1175 1176 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, 1177 struct sk_buff *skb) 1178 { 1179 __u8 status = *((__u8 *) skb->data); 1180 struct hci_cp_le_set_adv_set_rand_addr *cp; 1181 struct adv_info *adv_instance; 1182 1183 if (status) 1184 return; 1185 1186 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1187 if (!cp) 1188 return; 1189 1190 hci_dev_lock(hdev); 1191 1192 if (!hdev->cur_adv_instance) { 1193 /* Store in hdev for instance 0 (Set adv and Directed advs) */ 1194 bacpy(&hdev->random_addr, &cp->bdaddr); 1195 } else { 1196 adv_instance = hci_find_adv_instance(hdev, 1197 hdev->cur_adv_instance); 1198 if (adv_instance) 1199 bacpy(&adv_instance->random_addr, &cp->bdaddr); 1200 } 1201 1202 hci_dev_unlock(hdev); 1203 } 1204 1205 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 1206 { 1207 __u8 *sent, status = *((__u8 *) skb->data); 1208 1209 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1210 1211 if (status) 1212 return; 1213 1214 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1215 if (!sent) 1216 return; 1217 1218 hci_dev_lock(hdev); 1219 1220 /* If we're doing connection initiation as peripheral. Set a 1221 * timeout in case something goes wrong. 1222 */ 1223 if (*sent) { 1224 struct hci_conn *conn; 1225 1226 hci_dev_set_flag(hdev, HCI_LE_ADV); 1227 1228 conn = hci_lookup_le_connect(hdev); 1229 if (conn) 1230 queue_delayed_work(hdev->workqueue, 1231 &conn->le_conn_timeout, 1232 conn->conn_timeout); 1233 } else { 1234 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1235 } 1236 1237 hci_dev_unlock(hdev); 1238 } 1239 1240 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, 1241 struct sk_buff *skb) 1242 { 1243 struct hci_cp_le_set_ext_adv_enable *cp; 1244 __u8 status = *((__u8 *) skb->data); 1245 1246 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1247 1248 if (status) 1249 return; 1250 1251 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1252 if (!cp) 1253 return; 1254 1255 hci_dev_lock(hdev); 1256 1257 if (cp->enable) { 1258 struct hci_conn *conn; 1259 1260 hci_dev_set_flag(hdev, HCI_LE_ADV); 1261 1262 conn = hci_lookup_le_connect(hdev); 1263 if (conn) 1264 queue_delayed_work(hdev->workqueue, 1265 &conn->le_conn_timeout, 1266 conn->conn_timeout); 1267 } else { 1268 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1269 } 1270 1271 hci_dev_unlock(hdev); 1272 } 1273 1274 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 1275 { 1276 struct hci_cp_le_set_scan_param *cp; 1277 __u8 status = *((__u8 *) skb->data); 1278 1279 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1280 1281 if (status) 1282 return; 1283 1284 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1285 if (!cp) 1286 return; 1287 1288 hci_dev_lock(hdev); 1289 1290 hdev->le_scan_type = cp->type; 1291 1292 hci_dev_unlock(hdev); 1293 } 1294 1295 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, 1296 struct sk_buff *skb) 1297 { 1298 struct hci_cp_le_set_ext_scan_params *cp; 1299 __u8 status = *((__u8 *) skb->data); 1300 struct hci_cp_le_scan_phy_params *phy_param; 1301 1302 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1303 1304 if (status) 1305 return; 1306 1307 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1308 if (!cp) 1309 return; 1310 1311 phy_param = (void *)cp->data; 1312 1313 hci_dev_lock(hdev); 1314 1315 hdev->le_scan_type = phy_param->type; 1316 1317 hci_dev_unlock(hdev); 1318 } 1319 1320 static bool has_pending_adv_report(struct hci_dev *hdev) 1321 { 1322 struct discovery_state *d = &hdev->discovery; 1323 1324 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1325 } 1326 1327 static void clear_pending_adv_report(struct hci_dev *hdev) 1328 { 1329 struct discovery_state *d = &hdev->discovery; 1330 1331 bacpy(&d->last_adv_addr, BDADDR_ANY); 1332 d->last_adv_data_len = 0; 1333 } 1334 1335 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1336 u8 bdaddr_type, s8 rssi, u32 flags, 1337 u8 *data, u8 len) 1338 { 1339 struct discovery_state *d = &hdev->discovery; 1340 1341 bacpy(&d->last_adv_addr, bdaddr); 1342 d->last_adv_addr_type = bdaddr_type; 1343 d->last_adv_rssi = rssi; 1344 d->last_adv_flags = flags; 1345 memcpy(d->last_adv_data, data, len); 1346 d->last_adv_data_len = len; 1347 } 1348 1349 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1350 { 1351 hci_dev_lock(hdev); 1352 1353 switch (enable) { 1354 case LE_SCAN_ENABLE: 1355 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1356 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1357 clear_pending_adv_report(hdev); 1358 break; 1359 1360 case LE_SCAN_DISABLE: 1361 /* We do this here instead of when setting DISCOVERY_STOPPED 1362 * since the latter would potentially require waiting for 1363 * inquiry to stop too. 1364 */ 1365 if (has_pending_adv_report(hdev)) { 1366 struct discovery_state *d = &hdev->discovery; 1367 1368 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1369 d->last_adv_addr_type, NULL, 1370 d->last_adv_rssi, d->last_adv_flags, 1371 d->last_adv_data, 1372 d->last_adv_data_len, NULL, 0); 1373 } 1374 1375 /* Cancel this timer so that we don't try to disable scanning 1376 * when it's already disabled. 1377 */ 1378 cancel_delayed_work(&hdev->le_scan_disable); 1379 1380 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1381 1382 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1383 * interrupted scanning due to a connect request. Mark 1384 * therefore discovery as stopped. If this was not 1385 * because of a connect request advertising might have 1386 * been disabled because of active scanning, so 1387 * re-enable it again if necessary. 1388 */ 1389 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1390 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1391 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && 1392 hdev->discovery.state == DISCOVERY_FINDING) 1393 hci_req_reenable_advertising(hdev); 1394 1395 break; 1396 1397 default: 1398 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1399 enable); 1400 break; 1401 } 1402 1403 hci_dev_unlock(hdev); 1404 } 1405 1406 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1407 struct sk_buff *skb) 1408 { 1409 struct hci_cp_le_set_scan_enable *cp; 1410 __u8 status = *((__u8 *) skb->data); 1411 1412 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1413 1414 if (status) 1415 return; 1416 1417 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1418 if (!cp) 1419 return; 1420 1421 le_set_scan_enable_complete(hdev, cp->enable); 1422 } 1423 1424 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, 1425 struct sk_buff *skb) 1426 { 1427 struct hci_cp_le_set_ext_scan_enable *cp; 1428 __u8 status = *((__u8 *) skb->data); 1429 1430 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1431 1432 if (status) 1433 return; 1434 1435 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1436 if (!cp) 1437 return; 1438 1439 le_set_scan_enable_complete(hdev, cp->enable); 1440 } 1441 1442 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, 1443 struct sk_buff *skb) 1444 { 1445 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data; 1446 1447 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status, 1448 rp->num_of_sets); 1449 1450 if (rp->status) 1451 return; 1452 1453 hdev->le_num_of_adv_sets = rp->num_of_sets; 1454 } 1455 1456 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, 1457 struct sk_buff *skb) 1458 { 1459 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data; 1460 1461 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1462 1463 if (rp->status) 1464 return; 1465 1466 hdev->le_white_list_size = rp->size; 1467 } 1468 1469 static void hci_cc_le_clear_white_list(struct hci_dev *hdev, 1470 struct sk_buff *skb) 1471 { 1472 __u8 status = *((__u8 *) skb->data); 1473 1474 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1475 1476 if (status) 1477 return; 1478 1479 hci_bdaddr_list_clear(&hdev->le_white_list); 1480 } 1481 1482 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev, 1483 struct sk_buff *skb) 1484 { 1485 struct hci_cp_le_add_to_white_list *sent; 1486 __u8 status = *((__u8 *) skb->data); 1487 1488 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1489 1490 if (status) 1491 return; 1492 1493 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST); 1494 if (!sent) 1495 return; 1496 1497 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr, 1498 sent->bdaddr_type); 1499 } 1500 1501 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev, 1502 struct sk_buff *skb) 1503 { 1504 struct hci_cp_le_del_from_white_list *sent; 1505 __u8 status = *((__u8 *) skb->data); 1506 1507 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1508 1509 if (status) 1510 return; 1511 1512 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST); 1513 if (!sent) 1514 return; 1515 1516 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr, 1517 sent->bdaddr_type); 1518 } 1519 1520 static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1521 struct sk_buff *skb) 1522 { 1523 struct hci_rp_le_read_supported_states *rp = (void *) skb->data; 1524 1525 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1526 1527 if (rp->status) 1528 return; 1529 1530 memcpy(hdev->le_states, rp->le_states, 8); 1531 } 1532 1533 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev, 1534 struct sk_buff *skb) 1535 { 1536 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data; 1537 1538 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1539 1540 if (rp->status) 1541 return; 1542 1543 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1544 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1545 } 1546 1547 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev, 1548 struct sk_buff *skb) 1549 { 1550 struct hci_cp_le_write_def_data_len *sent; 1551 __u8 status = *((__u8 *) skb->data); 1552 1553 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1554 1555 if (status) 1556 return; 1557 1558 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1559 if (!sent) 1560 return; 1561 1562 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1563 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1564 } 1565 1566 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, 1567 struct sk_buff *skb) 1568 { 1569 struct hci_cp_le_add_to_resolv_list *sent; 1570 __u8 status = *((__u8 *) skb->data); 1571 1572 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1573 1574 if (status) 1575 return; 1576 1577 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1578 if (!sent) 1579 return; 1580 1581 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1582 sent->bdaddr_type, sent->peer_irk, 1583 sent->local_irk); 1584 } 1585 1586 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, 1587 struct sk_buff *skb) 1588 { 1589 struct hci_cp_le_del_from_resolv_list *sent; 1590 __u8 status = *((__u8 *) skb->data); 1591 1592 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1593 1594 if (status) 1595 return; 1596 1597 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1598 if (!sent) 1599 return; 1600 1601 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1602 sent->bdaddr_type); 1603 } 1604 1605 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev, 1606 struct sk_buff *skb) 1607 { 1608 __u8 status = *((__u8 *) skb->data); 1609 1610 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1611 1612 if (status) 1613 return; 1614 1615 hci_bdaddr_list_clear(&hdev->le_resolv_list); 1616 } 1617 1618 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, 1619 struct sk_buff *skb) 1620 { 1621 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data; 1622 1623 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1624 1625 if (rp->status) 1626 return; 1627 1628 hdev->le_resolv_list_size = rp->size; 1629 } 1630 1631 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, 1632 struct sk_buff *skb) 1633 { 1634 __u8 *sent, status = *((__u8 *) skb->data); 1635 1636 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1637 1638 if (status) 1639 return; 1640 1641 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 1642 if (!sent) 1643 return; 1644 1645 hci_dev_lock(hdev); 1646 1647 if (*sent) 1648 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 1649 else 1650 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 1651 1652 hci_dev_unlock(hdev); 1653 } 1654 1655 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, 1656 struct sk_buff *skb) 1657 { 1658 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data; 1659 1660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1661 1662 if (rp->status) 1663 return; 1664 1665 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 1666 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 1667 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 1668 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 1669 } 1670 1671 static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1672 struct sk_buff *skb) 1673 { 1674 struct hci_cp_write_le_host_supported *sent; 1675 __u8 status = *((__u8 *) skb->data); 1676 1677 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1678 1679 if (status) 1680 return; 1681 1682 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1683 if (!sent) 1684 return; 1685 1686 hci_dev_lock(hdev); 1687 1688 if (sent->le) { 1689 hdev->features[1][0] |= LMP_HOST_LE; 1690 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 1691 } else { 1692 hdev->features[1][0] &= ~LMP_HOST_LE; 1693 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 1694 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 1695 } 1696 1697 if (sent->simul) 1698 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 1699 else 1700 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 1701 1702 hci_dev_unlock(hdev); 1703 } 1704 1705 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1706 { 1707 struct hci_cp_le_set_adv_param *cp; 1708 u8 status = *((u8 *) skb->data); 1709 1710 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1711 1712 if (status) 1713 return; 1714 1715 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 1716 if (!cp) 1717 return; 1718 1719 hci_dev_lock(hdev); 1720 hdev->adv_addr_type = cp->own_address_type; 1721 hci_dev_unlock(hdev); 1722 } 1723 1724 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1725 { 1726 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data; 1727 struct hci_cp_le_set_ext_adv_params *cp; 1728 struct adv_info *adv_instance; 1729 1730 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1731 1732 if (rp->status) 1733 return; 1734 1735 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 1736 if (!cp) 1737 return; 1738 1739 hci_dev_lock(hdev); 1740 hdev->adv_addr_type = cp->own_addr_type; 1741 if (!hdev->cur_adv_instance) { 1742 /* Store in hdev for instance 0 */ 1743 hdev->adv_tx_power = rp->tx_power; 1744 } else { 1745 adv_instance = hci_find_adv_instance(hdev, 1746 hdev->cur_adv_instance); 1747 if (adv_instance) 1748 adv_instance->tx_power = rp->tx_power; 1749 } 1750 /* Update adv data as tx power is known now */ 1751 hci_req_update_adv_data(hdev, hdev->cur_adv_instance); 1752 hci_dev_unlock(hdev); 1753 } 1754 1755 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) 1756 { 1757 struct hci_rp_read_rssi *rp = (void *) skb->data; 1758 struct hci_conn *conn; 1759 1760 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1761 1762 if (rp->status) 1763 return; 1764 1765 hci_dev_lock(hdev); 1766 1767 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1768 if (conn) 1769 conn->rssi = rp->rssi; 1770 1771 hci_dev_unlock(hdev); 1772 } 1773 1774 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) 1775 { 1776 struct hci_cp_read_tx_power *sent; 1777 struct hci_rp_read_tx_power *rp = (void *) skb->data; 1778 struct hci_conn *conn; 1779 1780 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1781 1782 if (rp->status) 1783 return; 1784 1785 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 1786 if (!sent) 1787 return; 1788 1789 hci_dev_lock(hdev); 1790 1791 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1792 if (!conn) 1793 goto unlock; 1794 1795 switch (sent->type) { 1796 case 0x00: 1797 conn->tx_power = rp->tx_power; 1798 break; 1799 case 0x01: 1800 conn->max_tx_power = rp->tx_power; 1801 break; 1802 } 1803 1804 unlock: 1805 hci_dev_unlock(hdev); 1806 } 1807 1808 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb) 1809 { 1810 u8 status = *((u8 *) skb->data); 1811 u8 *mode; 1812 1813 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1814 1815 if (status) 1816 return; 1817 1818 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 1819 if (mode) 1820 hdev->ssp_debug_mode = *mode; 1821 } 1822 1823 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1824 { 1825 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1826 1827 if (status) { 1828 hci_conn_check_pending(hdev); 1829 return; 1830 } 1831 1832 set_bit(HCI_INQUIRY, &hdev->flags); 1833 } 1834 1835 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1836 { 1837 struct hci_cp_create_conn *cp; 1838 struct hci_conn *conn; 1839 1840 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1841 1842 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1843 if (!cp) 1844 return; 1845 1846 hci_dev_lock(hdev); 1847 1848 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1849 1850 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); 1851 1852 if (status) { 1853 if (conn && conn->state == BT_CONNECT) { 1854 if (status != 0x0c || conn->attempt > 2) { 1855 conn->state = BT_CLOSED; 1856 hci_connect_cfm(conn, status); 1857 hci_conn_del(conn); 1858 } else 1859 conn->state = BT_CONNECT2; 1860 } 1861 } else { 1862 if (!conn) { 1863 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 1864 HCI_ROLE_MASTER); 1865 if (!conn) 1866 bt_dev_err(hdev, "no memory for new connection"); 1867 } 1868 } 1869 1870 hci_dev_unlock(hdev); 1871 } 1872 1873 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1874 { 1875 struct hci_cp_add_sco *cp; 1876 struct hci_conn *acl, *sco; 1877 __u16 handle; 1878 1879 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1880 1881 if (!status) 1882 return; 1883 1884 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1885 if (!cp) 1886 return; 1887 1888 handle = __le16_to_cpu(cp->handle); 1889 1890 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1891 1892 hci_dev_lock(hdev); 1893 1894 acl = hci_conn_hash_lookup_handle(hdev, handle); 1895 if (acl) { 1896 sco = acl->link; 1897 if (sco) { 1898 sco->state = BT_CLOSED; 1899 1900 hci_connect_cfm(sco, status); 1901 hci_conn_del(sco); 1902 } 1903 } 1904 1905 hci_dev_unlock(hdev); 1906 } 1907 1908 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1909 { 1910 struct hci_cp_auth_requested *cp; 1911 struct hci_conn *conn; 1912 1913 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1914 1915 if (!status) 1916 return; 1917 1918 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1919 if (!cp) 1920 return; 1921 1922 hci_dev_lock(hdev); 1923 1924 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1925 if (conn) { 1926 if (conn->state == BT_CONFIG) { 1927 hci_connect_cfm(conn, status); 1928 hci_conn_drop(conn); 1929 } 1930 } 1931 1932 hci_dev_unlock(hdev); 1933 } 1934 1935 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1936 { 1937 struct hci_cp_set_conn_encrypt *cp; 1938 struct hci_conn *conn; 1939 1940 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1941 1942 if (!status) 1943 return; 1944 1945 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1946 if (!cp) 1947 return; 1948 1949 hci_dev_lock(hdev); 1950 1951 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1952 if (conn) { 1953 if (conn->state == BT_CONFIG) { 1954 hci_connect_cfm(conn, status); 1955 hci_conn_drop(conn); 1956 } 1957 } 1958 1959 hci_dev_unlock(hdev); 1960 } 1961 1962 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1963 struct hci_conn *conn) 1964 { 1965 if (conn->state != BT_CONFIG || !conn->out) 1966 return 0; 1967 1968 if (conn->pending_sec_level == BT_SECURITY_SDP) 1969 return 0; 1970 1971 /* Only request authentication for SSP connections or non-SSP 1972 * devices with sec_level MEDIUM or HIGH or if MITM protection 1973 * is requested. 1974 */ 1975 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 1976 conn->pending_sec_level != BT_SECURITY_FIPS && 1977 conn->pending_sec_level != BT_SECURITY_HIGH && 1978 conn->pending_sec_level != BT_SECURITY_MEDIUM) 1979 return 0; 1980 1981 return 1; 1982 } 1983 1984 static int hci_resolve_name(struct hci_dev *hdev, 1985 struct inquiry_entry *e) 1986 { 1987 struct hci_cp_remote_name_req cp; 1988 1989 memset(&cp, 0, sizeof(cp)); 1990 1991 bacpy(&cp.bdaddr, &e->data.bdaddr); 1992 cp.pscan_rep_mode = e->data.pscan_rep_mode; 1993 cp.pscan_mode = e->data.pscan_mode; 1994 cp.clock_offset = e->data.clock_offset; 1995 1996 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1997 } 1998 1999 static bool hci_resolve_next_name(struct hci_dev *hdev) 2000 { 2001 struct discovery_state *discov = &hdev->discovery; 2002 struct inquiry_entry *e; 2003 2004 if (list_empty(&discov->resolve)) 2005 return false; 2006 2007 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2008 if (!e) 2009 return false; 2010 2011 if (hci_resolve_name(hdev, e) == 0) { 2012 e->name_state = NAME_PENDING; 2013 return true; 2014 } 2015 2016 return false; 2017 } 2018 2019 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2020 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2021 { 2022 struct discovery_state *discov = &hdev->discovery; 2023 struct inquiry_entry *e; 2024 2025 /* Update the mgmt connected state if necessary. Be careful with 2026 * conn objects that exist but are not (yet) connected however. 2027 * Only those in BT_CONFIG or BT_CONNECTED states can be 2028 * considered connected. 2029 */ 2030 if (conn && 2031 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2032 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2033 mgmt_device_connected(hdev, conn, 0, name, name_len); 2034 2035 if (discov->state == DISCOVERY_STOPPED) 2036 return; 2037 2038 if (discov->state == DISCOVERY_STOPPING) 2039 goto discov_complete; 2040 2041 if (discov->state != DISCOVERY_RESOLVING) 2042 return; 2043 2044 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2045 /* If the device was not found in a list of found devices names of which 2046 * are pending. there is no need to continue resolving a next name as it 2047 * will be done upon receiving another Remote Name Request Complete 2048 * Event */ 2049 if (!e) 2050 return; 2051 2052 list_del(&e->list); 2053 if (name) { 2054 e->name_state = NAME_KNOWN; 2055 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 2056 e->data.rssi, name, name_len); 2057 } else { 2058 e->name_state = NAME_NOT_KNOWN; 2059 } 2060 2061 if (hci_resolve_next_name(hdev)) 2062 return; 2063 2064 discov_complete: 2065 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2066 } 2067 2068 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2069 { 2070 struct hci_cp_remote_name_req *cp; 2071 struct hci_conn *conn; 2072 2073 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2074 2075 /* If successful wait for the name req complete event before 2076 * checking for the need to do authentication */ 2077 if (!status) 2078 return; 2079 2080 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2081 if (!cp) 2082 return; 2083 2084 hci_dev_lock(hdev); 2085 2086 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2087 2088 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2089 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2090 2091 if (!conn) 2092 goto unlock; 2093 2094 if (!hci_outgoing_auth_needed(hdev, conn)) 2095 goto unlock; 2096 2097 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2098 struct hci_cp_auth_requested auth_cp; 2099 2100 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2101 2102 auth_cp.handle = __cpu_to_le16(conn->handle); 2103 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2104 sizeof(auth_cp), &auth_cp); 2105 } 2106 2107 unlock: 2108 hci_dev_unlock(hdev); 2109 } 2110 2111 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2112 { 2113 struct hci_cp_read_remote_features *cp; 2114 struct hci_conn *conn; 2115 2116 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2117 2118 if (!status) 2119 return; 2120 2121 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2122 if (!cp) 2123 return; 2124 2125 hci_dev_lock(hdev); 2126 2127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2128 if (conn) { 2129 if (conn->state == BT_CONFIG) { 2130 hci_connect_cfm(conn, status); 2131 hci_conn_drop(conn); 2132 } 2133 } 2134 2135 hci_dev_unlock(hdev); 2136 } 2137 2138 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2139 { 2140 struct hci_cp_read_remote_ext_features *cp; 2141 struct hci_conn *conn; 2142 2143 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2144 2145 if (!status) 2146 return; 2147 2148 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2149 if (!cp) 2150 return; 2151 2152 hci_dev_lock(hdev); 2153 2154 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2155 if (conn) { 2156 if (conn->state == BT_CONFIG) { 2157 hci_connect_cfm(conn, status); 2158 hci_conn_drop(conn); 2159 } 2160 } 2161 2162 hci_dev_unlock(hdev); 2163 } 2164 2165 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2166 { 2167 struct hci_cp_setup_sync_conn *cp; 2168 struct hci_conn *acl, *sco; 2169 __u16 handle; 2170 2171 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2172 2173 if (!status) 2174 return; 2175 2176 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2177 if (!cp) 2178 return; 2179 2180 handle = __le16_to_cpu(cp->handle); 2181 2182 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 2183 2184 hci_dev_lock(hdev); 2185 2186 acl = hci_conn_hash_lookup_handle(hdev, handle); 2187 if (acl) { 2188 sco = acl->link; 2189 if (sco) { 2190 sco->state = BT_CLOSED; 2191 2192 hci_connect_cfm(sco, status); 2193 hci_conn_del(sco); 2194 } 2195 } 2196 2197 hci_dev_unlock(hdev); 2198 } 2199 2200 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2201 { 2202 struct hci_cp_sniff_mode *cp; 2203 struct hci_conn *conn; 2204 2205 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2206 2207 if (!status) 2208 return; 2209 2210 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2211 if (!cp) 2212 return; 2213 2214 hci_dev_lock(hdev); 2215 2216 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2217 if (conn) { 2218 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2219 2220 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2221 hci_sco_setup(conn, status); 2222 } 2223 2224 hci_dev_unlock(hdev); 2225 } 2226 2227 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2228 { 2229 struct hci_cp_exit_sniff_mode *cp; 2230 struct hci_conn *conn; 2231 2232 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2233 2234 if (!status) 2235 return; 2236 2237 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2238 if (!cp) 2239 return; 2240 2241 hci_dev_lock(hdev); 2242 2243 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2244 if (conn) { 2245 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2246 2247 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2248 hci_sco_setup(conn, status); 2249 } 2250 2251 hci_dev_unlock(hdev); 2252 } 2253 2254 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2255 { 2256 struct hci_cp_disconnect *cp; 2257 struct hci_conn *conn; 2258 2259 if (!status) 2260 return; 2261 2262 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2263 if (!cp) 2264 return; 2265 2266 hci_dev_lock(hdev); 2267 2268 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2269 if (conn) { 2270 u8 type = conn->type; 2271 2272 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2273 conn->dst_type, status); 2274 2275 /* If the disconnection failed for any reason, the upper layer 2276 * does not retry to disconnect in current implementation. 2277 * Hence, we need to do some basic cleanup here and re-enable 2278 * advertising if necessary. 2279 */ 2280 hci_conn_del(conn); 2281 if (type == LE_LINK) 2282 hci_req_reenable_advertising(hdev); 2283 } 2284 2285 hci_dev_unlock(hdev); 2286 } 2287 2288 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2289 u8 peer_addr_type, u8 own_address_type, 2290 u8 filter_policy) 2291 { 2292 struct hci_conn *conn; 2293 2294 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2295 peer_addr_type); 2296 if (!conn) 2297 return; 2298 2299 /* Store the initiator and responder address information which 2300 * is needed for SMP. These values will not change during the 2301 * lifetime of the connection. 2302 */ 2303 conn->init_addr_type = own_address_type; 2304 if (own_address_type == ADDR_LE_DEV_RANDOM) 2305 bacpy(&conn->init_addr, &hdev->random_addr); 2306 else 2307 bacpy(&conn->init_addr, &hdev->bdaddr); 2308 2309 conn->resp_addr_type = peer_addr_type; 2310 bacpy(&conn->resp_addr, peer_addr); 2311 2312 /* We don't want the connection attempt to stick around 2313 * indefinitely since LE doesn't have a page timeout concept 2314 * like BR/EDR. Set a timer for any connection that doesn't use 2315 * the white list for connecting. 2316 */ 2317 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2318 queue_delayed_work(conn->hdev->workqueue, 2319 &conn->le_conn_timeout, 2320 conn->conn_timeout); 2321 } 2322 2323 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2324 { 2325 struct hci_cp_le_create_conn *cp; 2326 2327 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2328 2329 /* All connection failure handling is taken care of by the 2330 * hci_le_conn_failed function which is triggered by the HCI 2331 * request completion callbacks used for connecting. 2332 */ 2333 if (status) 2334 return; 2335 2336 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2337 if (!cp) 2338 return; 2339 2340 hci_dev_lock(hdev); 2341 2342 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2343 cp->own_address_type, cp->filter_policy); 2344 2345 hci_dev_unlock(hdev); 2346 } 2347 2348 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2349 { 2350 struct hci_cp_le_ext_create_conn *cp; 2351 2352 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2353 2354 /* All connection failure handling is taken care of by the 2355 * hci_le_conn_failed function which is triggered by the HCI 2356 * request completion callbacks used for connecting. 2357 */ 2358 if (status) 2359 return; 2360 2361 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2362 if (!cp) 2363 return; 2364 2365 hci_dev_lock(hdev); 2366 2367 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2368 cp->own_addr_type, cp->filter_policy); 2369 2370 hci_dev_unlock(hdev); 2371 } 2372 2373 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2374 { 2375 struct hci_cp_le_read_remote_features *cp; 2376 struct hci_conn *conn; 2377 2378 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2379 2380 if (!status) 2381 return; 2382 2383 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2384 if (!cp) 2385 return; 2386 2387 hci_dev_lock(hdev); 2388 2389 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2390 if (conn) { 2391 if (conn->state == BT_CONFIG) { 2392 hci_connect_cfm(conn, status); 2393 hci_conn_drop(conn); 2394 } 2395 } 2396 2397 hci_dev_unlock(hdev); 2398 } 2399 2400 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2401 { 2402 struct hci_cp_le_start_enc *cp; 2403 struct hci_conn *conn; 2404 2405 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2406 2407 if (!status) 2408 return; 2409 2410 hci_dev_lock(hdev); 2411 2412 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2413 if (!cp) 2414 goto unlock; 2415 2416 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2417 if (!conn) 2418 goto unlock; 2419 2420 if (conn->state != BT_CONNECTED) 2421 goto unlock; 2422 2423 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2424 hci_conn_drop(conn); 2425 2426 unlock: 2427 hci_dev_unlock(hdev); 2428 } 2429 2430 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2431 { 2432 struct hci_cp_switch_role *cp; 2433 struct hci_conn *conn; 2434 2435 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2436 2437 if (!status) 2438 return; 2439 2440 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2441 if (!cp) 2442 return; 2443 2444 hci_dev_lock(hdev); 2445 2446 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2447 if (conn) 2448 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2449 2450 hci_dev_unlock(hdev); 2451 } 2452 2453 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2454 { 2455 __u8 status = *((__u8 *) skb->data); 2456 struct discovery_state *discov = &hdev->discovery; 2457 struct inquiry_entry *e; 2458 2459 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2460 2461 hci_conn_check_pending(hdev); 2462 2463 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2464 return; 2465 2466 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2467 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2468 2469 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2470 return; 2471 2472 hci_dev_lock(hdev); 2473 2474 if (discov->state != DISCOVERY_FINDING) 2475 goto unlock; 2476 2477 if (list_empty(&discov->resolve)) { 2478 /* When BR/EDR inquiry is active and no LE scanning is in 2479 * progress, then change discovery state to indicate completion. 2480 * 2481 * When running LE scanning and BR/EDR inquiry simultaneously 2482 * and the LE scan already finished, then change the discovery 2483 * state to indicate completion. 2484 */ 2485 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2486 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2487 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2488 goto unlock; 2489 } 2490 2491 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2492 if (e && hci_resolve_name(hdev, e) == 0) { 2493 e->name_state = NAME_PENDING; 2494 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 2495 } else { 2496 /* When BR/EDR inquiry is active and no LE scanning is in 2497 * progress, then change discovery state to indicate completion. 2498 * 2499 * When running LE scanning and BR/EDR inquiry simultaneously 2500 * and the LE scan already finished, then change the discovery 2501 * state to indicate completion. 2502 */ 2503 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2504 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2505 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2506 } 2507 2508 unlock: 2509 hci_dev_unlock(hdev); 2510 } 2511 2512 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2513 { 2514 struct inquiry_data data; 2515 struct inquiry_info *info = (void *) (skb->data + 1); 2516 int num_rsp = *((__u8 *) skb->data); 2517 2518 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2519 2520 if (!num_rsp) 2521 return; 2522 2523 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 2524 return; 2525 2526 hci_dev_lock(hdev); 2527 2528 for (; num_rsp; num_rsp--, info++) { 2529 u32 flags; 2530 2531 bacpy(&data.bdaddr, &info->bdaddr); 2532 data.pscan_rep_mode = info->pscan_rep_mode; 2533 data.pscan_period_mode = info->pscan_period_mode; 2534 data.pscan_mode = info->pscan_mode; 2535 memcpy(data.dev_class, info->dev_class, 3); 2536 data.clock_offset = info->clock_offset; 2537 data.rssi = HCI_RSSI_INVALID; 2538 data.ssp_mode = 0x00; 2539 2540 flags = hci_inquiry_cache_update(hdev, &data, false); 2541 2542 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2543 info->dev_class, HCI_RSSI_INVALID, 2544 flags, NULL, 0, NULL, 0); 2545 } 2546 2547 hci_dev_unlock(hdev); 2548 } 2549 2550 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2551 { 2552 struct hci_ev_conn_complete *ev = (void *) skb->data; 2553 struct inquiry_entry *ie; 2554 struct hci_conn *conn; 2555 2556 BT_DBG("%s", hdev->name); 2557 2558 hci_dev_lock(hdev); 2559 2560 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2561 if (!conn) { 2562 /* Connection may not exist if auto-connected. Check the inquiry 2563 * cache to see if we've already discovered this bdaddr before. 2564 * If found and link is an ACL type, create a connection class 2565 * automatically. 2566 */ 2567 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2568 if (ie && ev->link_type == ACL_LINK) { 2569 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2570 HCI_ROLE_SLAVE); 2571 if (!conn) { 2572 bt_dev_err(hdev, "no memory for new conn"); 2573 goto unlock; 2574 } 2575 } else { 2576 if (ev->link_type != SCO_LINK) 2577 goto unlock; 2578 2579 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 2580 &ev->bdaddr); 2581 if (!conn) 2582 goto unlock; 2583 2584 conn->type = SCO_LINK; 2585 } 2586 } 2587 2588 if (!ev->status) { 2589 conn->handle = __le16_to_cpu(ev->handle); 2590 2591 if (conn->type == ACL_LINK) { 2592 conn->state = BT_CONFIG; 2593 hci_conn_hold(conn); 2594 2595 if (!conn->out && !hci_conn_ssp_enabled(conn) && 2596 !hci_find_link_key(hdev, &ev->bdaddr)) 2597 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2598 else 2599 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2600 } else 2601 conn->state = BT_CONNECTED; 2602 2603 hci_debugfs_create_conn(conn); 2604 hci_conn_add_sysfs(conn); 2605 2606 if (test_bit(HCI_AUTH, &hdev->flags)) 2607 set_bit(HCI_CONN_AUTH, &conn->flags); 2608 2609 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 2610 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 2611 2612 /* Get remote features */ 2613 if (conn->type == ACL_LINK) { 2614 struct hci_cp_read_remote_features cp; 2615 cp.handle = ev->handle; 2616 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 2617 sizeof(cp), &cp); 2618 2619 hci_req_update_scan(hdev); 2620 } 2621 2622 /* Set packet type for incoming connection */ 2623 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 2624 struct hci_cp_change_conn_ptype cp; 2625 cp.handle = ev->handle; 2626 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2627 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 2628 &cp); 2629 } 2630 } else { 2631 conn->state = BT_CLOSED; 2632 if (conn->type == ACL_LINK) 2633 mgmt_connect_failed(hdev, &conn->dst, conn->type, 2634 conn->dst_type, ev->status); 2635 } 2636 2637 if (conn->type == ACL_LINK) 2638 hci_sco_setup(conn, ev->status); 2639 2640 if (ev->status) { 2641 hci_connect_cfm(conn, ev->status); 2642 hci_conn_del(conn); 2643 } else if (ev->link_type == SCO_LINK) { 2644 switch (conn->setting & SCO_AIRMODE_MASK) { 2645 case SCO_AIRMODE_CVSD: 2646 if (hdev->notify) 2647 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 2648 break; 2649 } 2650 2651 hci_connect_cfm(conn, ev->status); 2652 } 2653 2654 unlock: 2655 hci_dev_unlock(hdev); 2656 2657 hci_conn_check_pending(hdev); 2658 } 2659 2660 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 2661 { 2662 struct hci_cp_reject_conn_req cp; 2663 2664 bacpy(&cp.bdaddr, bdaddr); 2665 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 2666 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 2667 } 2668 2669 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2670 { 2671 struct hci_ev_conn_request *ev = (void *) skb->data; 2672 int mask = hdev->link_mode; 2673 struct inquiry_entry *ie; 2674 struct hci_conn *conn; 2675 __u8 flags = 0; 2676 2677 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 2678 ev->link_type); 2679 2680 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 2681 &flags); 2682 2683 if (!(mask & HCI_LM_ACCEPT)) { 2684 hci_reject_conn(hdev, &ev->bdaddr); 2685 return; 2686 } 2687 2688 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr, 2689 BDADDR_BREDR)) { 2690 hci_reject_conn(hdev, &ev->bdaddr); 2691 return; 2692 } 2693 2694 /* Require HCI_CONNECTABLE or a whitelist entry to accept the 2695 * connection. These features are only touched through mgmt so 2696 * only do the checks if HCI_MGMT is set. 2697 */ 2698 if (hci_dev_test_flag(hdev, HCI_MGMT) && 2699 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 2700 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, 2701 BDADDR_BREDR)) { 2702 hci_reject_conn(hdev, &ev->bdaddr); 2703 return; 2704 } 2705 2706 /* Connection accepted */ 2707 2708 hci_dev_lock(hdev); 2709 2710 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2711 if (ie) 2712 memcpy(ie->data.dev_class, ev->dev_class, 3); 2713 2714 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 2715 &ev->bdaddr); 2716 if (!conn) { 2717 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2718 HCI_ROLE_SLAVE); 2719 if (!conn) { 2720 bt_dev_err(hdev, "no memory for new connection"); 2721 hci_dev_unlock(hdev); 2722 return; 2723 } 2724 } 2725 2726 memcpy(conn->dev_class, ev->dev_class, 3); 2727 2728 hci_dev_unlock(hdev); 2729 2730 if (ev->link_type == ACL_LINK || 2731 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 2732 struct hci_cp_accept_conn_req cp; 2733 conn->state = BT_CONNECT; 2734 2735 bacpy(&cp.bdaddr, &ev->bdaddr); 2736 2737 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 2738 cp.role = 0x00; /* Become master */ 2739 else 2740 cp.role = 0x01; /* Remain slave */ 2741 2742 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 2743 } else if (!(flags & HCI_PROTO_DEFER)) { 2744 struct hci_cp_accept_sync_conn_req cp; 2745 conn->state = BT_CONNECT; 2746 2747 bacpy(&cp.bdaddr, &ev->bdaddr); 2748 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2749 2750 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 2751 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 2752 cp.max_latency = cpu_to_le16(0xffff); 2753 cp.content_format = cpu_to_le16(hdev->voice_setting); 2754 cp.retrans_effort = 0xff; 2755 2756 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 2757 &cp); 2758 } else { 2759 conn->state = BT_CONNECT2; 2760 hci_connect_cfm(conn, 0); 2761 } 2762 } 2763 2764 static u8 hci_to_mgmt_reason(u8 err) 2765 { 2766 switch (err) { 2767 case HCI_ERROR_CONNECTION_TIMEOUT: 2768 return MGMT_DEV_DISCONN_TIMEOUT; 2769 case HCI_ERROR_REMOTE_USER_TERM: 2770 case HCI_ERROR_REMOTE_LOW_RESOURCES: 2771 case HCI_ERROR_REMOTE_POWER_OFF: 2772 return MGMT_DEV_DISCONN_REMOTE; 2773 case HCI_ERROR_LOCAL_HOST_TERM: 2774 return MGMT_DEV_DISCONN_LOCAL_HOST; 2775 default: 2776 return MGMT_DEV_DISCONN_UNKNOWN; 2777 } 2778 } 2779 2780 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2781 { 2782 struct hci_ev_disconn_complete *ev = (void *) skb->data; 2783 u8 reason; 2784 struct hci_conn_params *params; 2785 struct hci_conn *conn; 2786 bool mgmt_connected; 2787 u8 type; 2788 2789 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2790 2791 hci_dev_lock(hdev); 2792 2793 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2794 if (!conn) 2795 goto unlock; 2796 2797 if (ev->status) { 2798 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2799 conn->dst_type, ev->status); 2800 goto unlock; 2801 } 2802 2803 conn->state = BT_CLOSED; 2804 2805 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2806 2807 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 2808 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 2809 else 2810 reason = hci_to_mgmt_reason(ev->reason); 2811 2812 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2813 reason, mgmt_connected); 2814 2815 if (conn->type == ACL_LINK) { 2816 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2817 hci_remove_link_key(hdev, &conn->dst); 2818 2819 hci_req_update_scan(hdev); 2820 } 2821 2822 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2823 if (params) { 2824 switch (params->auto_connect) { 2825 case HCI_AUTO_CONN_LINK_LOSS: 2826 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2827 break; 2828 /* Fall through */ 2829 2830 case HCI_AUTO_CONN_DIRECT: 2831 case HCI_AUTO_CONN_ALWAYS: 2832 list_del_init(¶ms->action); 2833 list_add(¶ms->action, &hdev->pend_le_conns); 2834 hci_update_background_scan(hdev); 2835 break; 2836 2837 default: 2838 break; 2839 } 2840 } 2841 2842 type = conn->type; 2843 2844 hci_disconn_cfm(conn, ev->reason); 2845 hci_conn_del(conn); 2846 2847 /* The suspend notifier is waiting for all devices to disconnect so 2848 * clear the bit from pending tasks and inform the wait queue. 2849 */ 2850 if (list_empty(&hdev->conn_hash.list) && 2851 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) { 2852 wake_up(&hdev->suspend_wait_q); 2853 } 2854 2855 /* Re-enable advertising if necessary, since it might 2856 * have been disabled by the connection. From the 2857 * HCI_LE_Set_Advertise_Enable command description in 2858 * the core specification (v4.0): 2859 * "The Controller shall continue advertising until the Host 2860 * issues an LE_Set_Advertise_Enable command with 2861 * Advertising_Enable set to 0x00 (Advertising is disabled) 2862 * or until a connection is created or until the Advertising 2863 * is timed out due to Directed Advertising." 2864 */ 2865 if (type == LE_LINK) 2866 hci_req_reenable_advertising(hdev); 2867 2868 unlock: 2869 hci_dev_unlock(hdev); 2870 } 2871 2872 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2873 { 2874 struct hci_ev_auth_complete *ev = (void *) skb->data; 2875 struct hci_conn *conn; 2876 2877 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2878 2879 hci_dev_lock(hdev); 2880 2881 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2882 if (!conn) 2883 goto unlock; 2884 2885 if (!ev->status) { 2886 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 2887 2888 if (!hci_conn_ssp_enabled(conn) && 2889 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 2890 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 2891 } else { 2892 set_bit(HCI_CONN_AUTH, &conn->flags); 2893 conn->sec_level = conn->pending_sec_level; 2894 } 2895 } else { 2896 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 2897 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 2898 2899 mgmt_auth_failed(conn, ev->status); 2900 } 2901 2902 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2903 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 2904 2905 if (conn->state == BT_CONFIG) { 2906 if (!ev->status && hci_conn_ssp_enabled(conn)) { 2907 struct hci_cp_set_conn_encrypt cp; 2908 cp.handle = ev->handle; 2909 cp.encrypt = 0x01; 2910 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2911 &cp); 2912 } else { 2913 conn->state = BT_CONNECTED; 2914 hci_connect_cfm(conn, ev->status); 2915 hci_conn_drop(conn); 2916 } 2917 } else { 2918 hci_auth_cfm(conn, ev->status); 2919 2920 hci_conn_hold(conn); 2921 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2922 hci_conn_drop(conn); 2923 } 2924 2925 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 2926 if (!ev->status) { 2927 struct hci_cp_set_conn_encrypt cp; 2928 cp.handle = ev->handle; 2929 cp.encrypt = 0x01; 2930 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2931 &cp); 2932 } else { 2933 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2934 hci_encrypt_cfm(conn, ev->status, 0x00); 2935 } 2936 } 2937 2938 unlock: 2939 hci_dev_unlock(hdev); 2940 } 2941 2942 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 2943 { 2944 struct hci_ev_remote_name *ev = (void *) skb->data; 2945 struct hci_conn *conn; 2946 2947 BT_DBG("%s", hdev->name); 2948 2949 hci_conn_check_pending(hdev); 2950 2951 hci_dev_lock(hdev); 2952 2953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2954 2955 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2956 goto check_auth; 2957 2958 if (ev->status == 0) 2959 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 2960 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 2961 else 2962 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 2963 2964 check_auth: 2965 if (!conn) 2966 goto unlock; 2967 2968 if (!hci_outgoing_auth_needed(hdev, conn)) 2969 goto unlock; 2970 2971 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2972 struct hci_cp_auth_requested cp; 2973 2974 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2975 2976 cp.handle = __cpu_to_le16(conn->handle); 2977 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 2978 } 2979 2980 unlock: 2981 hci_dev_unlock(hdev); 2982 } 2983 2984 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 2985 u16 opcode, struct sk_buff *skb) 2986 { 2987 const struct hci_rp_read_enc_key_size *rp; 2988 struct hci_conn *conn; 2989 u16 handle; 2990 2991 BT_DBG("%s status 0x%02x", hdev->name, status); 2992 2993 if (!skb || skb->len < sizeof(*rp)) { 2994 bt_dev_err(hdev, "invalid read key size response"); 2995 return; 2996 } 2997 2998 rp = (void *)skb->data; 2999 handle = le16_to_cpu(rp->handle); 3000 3001 hci_dev_lock(hdev); 3002 3003 conn = hci_conn_hash_lookup_handle(hdev, handle); 3004 if (!conn) 3005 goto unlock; 3006 3007 /* While unexpected, the read_enc_key_size command may fail. The most 3008 * secure approach is to then assume the key size is 0 to force a 3009 * disconnection. 3010 */ 3011 if (rp->status) { 3012 bt_dev_err(hdev, "failed to read key size for handle %u", 3013 handle); 3014 conn->enc_key_size = 0; 3015 } else { 3016 conn->enc_key_size = rp->key_size; 3017 } 3018 3019 if (conn->state == BT_CONFIG) { 3020 conn->state = BT_CONNECTED; 3021 hci_connect_cfm(conn, 0); 3022 hci_conn_drop(conn); 3023 } else { 3024 u8 encrypt; 3025 3026 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 3027 encrypt = 0x00; 3028 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) 3029 encrypt = 0x02; 3030 else 3031 encrypt = 0x01; 3032 3033 hci_encrypt_cfm(conn, 0, encrypt); 3034 } 3035 3036 unlock: 3037 hci_dev_unlock(hdev); 3038 } 3039 3040 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3041 { 3042 struct hci_ev_encrypt_change *ev = (void *) skb->data; 3043 struct hci_conn *conn; 3044 3045 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3046 3047 hci_dev_lock(hdev); 3048 3049 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3050 if (!conn) 3051 goto unlock; 3052 3053 if (!ev->status) { 3054 if (ev->encrypt) { 3055 /* Encryption implies authentication */ 3056 set_bit(HCI_CONN_AUTH, &conn->flags); 3057 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3058 conn->sec_level = conn->pending_sec_level; 3059 3060 /* P-256 authentication key implies FIPS */ 3061 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3062 set_bit(HCI_CONN_FIPS, &conn->flags); 3063 3064 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3065 conn->type == LE_LINK) 3066 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3067 } else { 3068 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3069 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3070 } 3071 } 3072 3073 /* We should disregard the current RPA and generate a new one 3074 * whenever the encryption procedure fails. 3075 */ 3076 if (ev->status && conn->type == LE_LINK) { 3077 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3078 hci_adv_instances_set_rpa_expired(hdev, true); 3079 } 3080 3081 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3082 3083 if (ev->status && conn->state == BT_CONNECTED) { 3084 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3085 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3086 3087 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3088 hci_conn_drop(conn); 3089 goto unlock; 3090 } 3091 3092 /* In Secure Connections Only mode, do not allow any connections 3093 * that are not encrypted with AES-CCM using a P-256 authenticated 3094 * combination key. 3095 */ 3096 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && 3097 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || 3098 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { 3099 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); 3100 hci_conn_drop(conn); 3101 goto unlock; 3102 } 3103 3104 /* Try reading the encryption key size for encrypted ACL links */ 3105 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3106 struct hci_cp_read_enc_key_size cp; 3107 struct hci_request req; 3108 3109 /* Only send HCI_Read_Encryption_Key_Size if the 3110 * controller really supports it. If it doesn't, assume 3111 * the default size (16). 3112 */ 3113 if (!(hdev->commands[20] & 0x10)) { 3114 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3115 goto notify; 3116 } 3117 3118 hci_req_init(&req, hdev); 3119 3120 cp.handle = cpu_to_le16(conn->handle); 3121 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3122 3123 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 3124 bt_dev_err(hdev, "sending read key size failed"); 3125 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3126 goto notify; 3127 } 3128 3129 goto unlock; 3130 } 3131 3132 /* Set the default Authenticated Payload Timeout after 3133 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3134 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3135 * sent when the link is active and Encryption is enabled, the conn 3136 * type can be either LE or ACL and controller must support LMP Ping. 3137 * Ensure for AES-CCM encryption as well. 3138 */ 3139 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3140 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3141 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3142 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3143 struct hci_cp_write_auth_payload_to cp; 3144 3145 cp.handle = cpu_to_le16(conn->handle); 3146 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3147 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3148 sizeof(cp), &cp); 3149 } 3150 3151 notify: 3152 if (conn->state == BT_CONFIG) { 3153 if (!ev->status) 3154 conn->state = BT_CONNECTED; 3155 3156 hci_connect_cfm(conn, ev->status); 3157 hci_conn_drop(conn); 3158 } else 3159 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 3160 3161 unlock: 3162 hci_dev_unlock(hdev); 3163 } 3164 3165 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, 3166 struct sk_buff *skb) 3167 { 3168 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 3169 struct hci_conn *conn; 3170 3171 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3172 3173 hci_dev_lock(hdev); 3174 3175 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3176 if (conn) { 3177 if (!ev->status) 3178 set_bit(HCI_CONN_SECURE, &conn->flags); 3179 3180 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3181 3182 hci_key_change_cfm(conn, ev->status); 3183 } 3184 3185 hci_dev_unlock(hdev); 3186 } 3187 3188 static void hci_remote_features_evt(struct hci_dev *hdev, 3189 struct sk_buff *skb) 3190 { 3191 struct hci_ev_remote_features *ev = (void *) skb->data; 3192 struct hci_conn *conn; 3193 3194 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3195 3196 hci_dev_lock(hdev); 3197 3198 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3199 if (!conn) 3200 goto unlock; 3201 3202 if (!ev->status) 3203 memcpy(conn->features[0], ev->features, 8); 3204 3205 if (conn->state != BT_CONFIG) 3206 goto unlock; 3207 3208 if (!ev->status && lmp_ext_feat_capable(hdev) && 3209 lmp_ext_feat_capable(conn)) { 3210 struct hci_cp_read_remote_ext_features cp; 3211 cp.handle = ev->handle; 3212 cp.page = 0x01; 3213 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3214 sizeof(cp), &cp); 3215 goto unlock; 3216 } 3217 3218 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3219 struct hci_cp_remote_name_req cp; 3220 memset(&cp, 0, sizeof(cp)); 3221 bacpy(&cp.bdaddr, &conn->dst); 3222 cp.pscan_rep_mode = 0x02; 3223 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3224 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3225 mgmt_device_connected(hdev, conn, 0, NULL, 0); 3226 3227 if (!hci_outgoing_auth_needed(hdev, conn)) { 3228 conn->state = BT_CONNECTED; 3229 hci_connect_cfm(conn, ev->status); 3230 hci_conn_drop(conn); 3231 } 3232 3233 unlock: 3234 hci_dev_unlock(hdev); 3235 } 3236 3237 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, 3238 u16 *opcode, u8 *status, 3239 hci_req_complete_t *req_complete, 3240 hci_req_complete_skb_t *req_complete_skb) 3241 { 3242 struct hci_ev_cmd_complete *ev = (void *) skb->data; 3243 3244 *opcode = __le16_to_cpu(ev->opcode); 3245 *status = skb->data[sizeof(*ev)]; 3246 3247 skb_pull(skb, sizeof(*ev)); 3248 3249 switch (*opcode) { 3250 case HCI_OP_INQUIRY_CANCEL: 3251 hci_cc_inquiry_cancel(hdev, skb, status); 3252 break; 3253 3254 case HCI_OP_PERIODIC_INQ: 3255 hci_cc_periodic_inq(hdev, skb); 3256 break; 3257 3258 case HCI_OP_EXIT_PERIODIC_INQ: 3259 hci_cc_exit_periodic_inq(hdev, skb); 3260 break; 3261 3262 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 3263 hci_cc_remote_name_req_cancel(hdev, skb); 3264 break; 3265 3266 case HCI_OP_ROLE_DISCOVERY: 3267 hci_cc_role_discovery(hdev, skb); 3268 break; 3269 3270 case HCI_OP_READ_LINK_POLICY: 3271 hci_cc_read_link_policy(hdev, skb); 3272 break; 3273 3274 case HCI_OP_WRITE_LINK_POLICY: 3275 hci_cc_write_link_policy(hdev, skb); 3276 break; 3277 3278 case HCI_OP_READ_DEF_LINK_POLICY: 3279 hci_cc_read_def_link_policy(hdev, skb); 3280 break; 3281 3282 case HCI_OP_WRITE_DEF_LINK_POLICY: 3283 hci_cc_write_def_link_policy(hdev, skb); 3284 break; 3285 3286 case HCI_OP_RESET: 3287 hci_cc_reset(hdev, skb); 3288 break; 3289 3290 case HCI_OP_READ_STORED_LINK_KEY: 3291 hci_cc_read_stored_link_key(hdev, skb); 3292 break; 3293 3294 case HCI_OP_DELETE_STORED_LINK_KEY: 3295 hci_cc_delete_stored_link_key(hdev, skb); 3296 break; 3297 3298 case HCI_OP_WRITE_LOCAL_NAME: 3299 hci_cc_write_local_name(hdev, skb); 3300 break; 3301 3302 case HCI_OP_READ_LOCAL_NAME: 3303 hci_cc_read_local_name(hdev, skb); 3304 break; 3305 3306 case HCI_OP_WRITE_AUTH_ENABLE: 3307 hci_cc_write_auth_enable(hdev, skb); 3308 break; 3309 3310 case HCI_OP_WRITE_ENCRYPT_MODE: 3311 hci_cc_write_encrypt_mode(hdev, skb); 3312 break; 3313 3314 case HCI_OP_WRITE_SCAN_ENABLE: 3315 hci_cc_write_scan_enable(hdev, skb); 3316 break; 3317 3318 case HCI_OP_READ_CLASS_OF_DEV: 3319 hci_cc_read_class_of_dev(hdev, skb); 3320 break; 3321 3322 case HCI_OP_WRITE_CLASS_OF_DEV: 3323 hci_cc_write_class_of_dev(hdev, skb); 3324 break; 3325 3326 case HCI_OP_READ_VOICE_SETTING: 3327 hci_cc_read_voice_setting(hdev, skb); 3328 break; 3329 3330 case HCI_OP_WRITE_VOICE_SETTING: 3331 hci_cc_write_voice_setting(hdev, skb); 3332 break; 3333 3334 case HCI_OP_READ_NUM_SUPPORTED_IAC: 3335 hci_cc_read_num_supported_iac(hdev, skb); 3336 break; 3337 3338 case HCI_OP_WRITE_SSP_MODE: 3339 hci_cc_write_ssp_mode(hdev, skb); 3340 break; 3341 3342 case HCI_OP_WRITE_SC_SUPPORT: 3343 hci_cc_write_sc_support(hdev, skb); 3344 break; 3345 3346 case HCI_OP_READ_AUTH_PAYLOAD_TO: 3347 hci_cc_read_auth_payload_timeout(hdev, skb); 3348 break; 3349 3350 case HCI_OP_WRITE_AUTH_PAYLOAD_TO: 3351 hci_cc_write_auth_payload_timeout(hdev, skb); 3352 break; 3353 3354 case HCI_OP_READ_LOCAL_VERSION: 3355 hci_cc_read_local_version(hdev, skb); 3356 break; 3357 3358 case HCI_OP_READ_LOCAL_COMMANDS: 3359 hci_cc_read_local_commands(hdev, skb); 3360 break; 3361 3362 case HCI_OP_READ_LOCAL_FEATURES: 3363 hci_cc_read_local_features(hdev, skb); 3364 break; 3365 3366 case HCI_OP_READ_LOCAL_EXT_FEATURES: 3367 hci_cc_read_local_ext_features(hdev, skb); 3368 break; 3369 3370 case HCI_OP_READ_BUFFER_SIZE: 3371 hci_cc_read_buffer_size(hdev, skb); 3372 break; 3373 3374 case HCI_OP_READ_BD_ADDR: 3375 hci_cc_read_bd_addr(hdev, skb); 3376 break; 3377 3378 case HCI_OP_READ_LOCAL_PAIRING_OPTS: 3379 hci_cc_read_local_pairing_opts(hdev, skb); 3380 break; 3381 3382 case HCI_OP_READ_PAGE_SCAN_ACTIVITY: 3383 hci_cc_read_page_scan_activity(hdev, skb); 3384 break; 3385 3386 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: 3387 hci_cc_write_page_scan_activity(hdev, skb); 3388 break; 3389 3390 case HCI_OP_READ_PAGE_SCAN_TYPE: 3391 hci_cc_read_page_scan_type(hdev, skb); 3392 break; 3393 3394 case HCI_OP_WRITE_PAGE_SCAN_TYPE: 3395 hci_cc_write_page_scan_type(hdev, skb); 3396 break; 3397 3398 case HCI_OP_READ_DATA_BLOCK_SIZE: 3399 hci_cc_read_data_block_size(hdev, skb); 3400 break; 3401 3402 case HCI_OP_READ_FLOW_CONTROL_MODE: 3403 hci_cc_read_flow_control_mode(hdev, skb); 3404 break; 3405 3406 case HCI_OP_READ_LOCAL_AMP_INFO: 3407 hci_cc_read_local_amp_info(hdev, skb); 3408 break; 3409 3410 case HCI_OP_READ_CLOCK: 3411 hci_cc_read_clock(hdev, skb); 3412 break; 3413 3414 case HCI_OP_READ_INQ_RSP_TX_POWER: 3415 hci_cc_read_inq_rsp_tx_power(hdev, skb); 3416 break; 3417 3418 case HCI_OP_READ_DEF_ERR_DATA_REPORTING: 3419 hci_cc_read_def_err_data_reporting(hdev, skb); 3420 break; 3421 3422 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING: 3423 hci_cc_write_def_err_data_reporting(hdev, skb); 3424 break; 3425 3426 case HCI_OP_PIN_CODE_REPLY: 3427 hci_cc_pin_code_reply(hdev, skb); 3428 break; 3429 3430 case HCI_OP_PIN_CODE_NEG_REPLY: 3431 hci_cc_pin_code_neg_reply(hdev, skb); 3432 break; 3433 3434 case HCI_OP_READ_LOCAL_OOB_DATA: 3435 hci_cc_read_local_oob_data(hdev, skb); 3436 break; 3437 3438 case HCI_OP_READ_LOCAL_OOB_EXT_DATA: 3439 hci_cc_read_local_oob_ext_data(hdev, skb); 3440 break; 3441 3442 case HCI_OP_LE_READ_BUFFER_SIZE: 3443 hci_cc_le_read_buffer_size(hdev, skb); 3444 break; 3445 3446 case HCI_OP_LE_READ_LOCAL_FEATURES: 3447 hci_cc_le_read_local_features(hdev, skb); 3448 break; 3449 3450 case HCI_OP_LE_READ_ADV_TX_POWER: 3451 hci_cc_le_read_adv_tx_power(hdev, skb); 3452 break; 3453 3454 case HCI_OP_USER_CONFIRM_REPLY: 3455 hci_cc_user_confirm_reply(hdev, skb); 3456 break; 3457 3458 case HCI_OP_USER_CONFIRM_NEG_REPLY: 3459 hci_cc_user_confirm_neg_reply(hdev, skb); 3460 break; 3461 3462 case HCI_OP_USER_PASSKEY_REPLY: 3463 hci_cc_user_passkey_reply(hdev, skb); 3464 break; 3465 3466 case HCI_OP_USER_PASSKEY_NEG_REPLY: 3467 hci_cc_user_passkey_neg_reply(hdev, skb); 3468 break; 3469 3470 case HCI_OP_LE_SET_RANDOM_ADDR: 3471 hci_cc_le_set_random_addr(hdev, skb); 3472 break; 3473 3474 case HCI_OP_LE_SET_ADV_ENABLE: 3475 hci_cc_le_set_adv_enable(hdev, skb); 3476 break; 3477 3478 case HCI_OP_LE_SET_SCAN_PARAM: 3479 hci_cc_le_set_scan_param(hdev, skb); 3480 break; 3481 3482 case HCI_OP_LE_SET_SCAN_ENABLE: 3483 hci_cc_le_set_scan_enable(hdev, skb); 3484 break; 3485 3486 case HCI_OP_LE_READ_WHITE_LIST_SIZE: 3487 hci_cc_le_read_white_list_size(hdev, skb); 3488 break; 3489 3490 case HCI_OP_LE_CLEAR_WHITE_LIST: 3491 hci_cc_le_clear_white_list(hdev, skb); 3492 break; 3493 3494 case HCI_OP_LE_ADD_TO_WHITE_LIST: 3495 hci_cc_le_add_to_white_list(hdev, skb); 3496 break; 3497 3498 case HCI_OP_LE_DEL_FROM_WHITE_LIST: 3499 hci_cc_le_del_from_white_list(hdev, skb); 3500 break; 3501 3502 case HCI_OP_LE_READ_SUPPORTED_STATES: 3503 hci_cc_le_read_supported_states(hdev, skb); 3504 break; 3505 3506 case HCI_OP_LE_READ_DEF_DATA_LEN: 3507 hci_cc_le_read_def_data_len(hdev, skb); 3508 break; 3509 3510 case HCI_OP_LE_WRITE_DEF_DATA_LEN: 3511 hci_cc_le_write_def_data_len(hdev, skb); 3512 break; 3513 3514 case HCI_OP_LE_ADD_TO_RESOLV_LIST: 3515 hci_cc_le_add_to_resolv_list(hdev, skb); 3516 break; 3517 3518 case HCI_OP_LE_DEL_FROM_RESOLV_LIST: 3519 hci_cc_le_del_from_resolv_list(hdev, skb); 3520 break; 3521 3522 case HCI_OP_LE_CLEAR_RESOLV_LIST: 3523 hci_cc_le_clear_resolv_list(hdev, skb); 3524 break; 3525 3526 case HCI_OP_LE_READ_RESOLV_LIST_SIZE: 3527 hci_cc_le_read_resolv_list_size(hdev, skb); 3528 break; 3529 3530 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE: 3531 hci_cc_le_set_addr_resolution_enable(hdev, skb); 3532 break; 3533 3534 case HCI_OP_LE_READ_MAX_DATA_LEN: 3535 hci_cc_le_read_max_data_len(hdev, skb); 3536 break; 3537 3538 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 3539 hci_cc_write_le_host_supported(hdev, skb); 3540 break; 3541 3542 case HCI_OP_LE_SET_ADV_PARAM: 3543 hci_cc_set_adv_param(hdev, skb); 3544 break; 3545 3546 case HCI_OP_READ_RSSI: 3547 hci_cc_read_rssi(hdev, skb); 3548 break; 3549 3550 case HCI_OP_READ_TX_POWER: 3551 hci_cc_read_tx_power(hdev, skb); 3552 break; 3553 3554 case HCI_OP_WRITE_SSP_DEBUG_MODE: 3555 hci_cc_write_ssp_debug_mode(hdev, skb); 3556 break; 3557 3558 case HCI_OP_LE_SET_EXT_SCAN_PARAMS: 3559 hci_cc_le_set_ext_scan_param(hdev, skb); 3560 break; 3561 3562 case HCI_OP_LE_SET_EXT_SCAN_ENABLE: 3563 hci_cc_le_set_ext_scan_enable(hdev, skb); 3564 break; 3565 3566 case HCI_OP_LE_SET_DEFAULT_PHY: 3567 hci_cc_le_set_default_phy(hdev, skb); 3568 break; 3569 3570 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS: 3571 hci_cc_le_read_num_adv_sets(hdev, skb); 3572 break; 3573 3574 case HCI_OP_LE_SET_EXT_ADV_PARAMS: 3575 hci_cc_set_ext_adv_param(hdev, skb); 3576 break; 3577 3578 case HCI_OP_LE_SET_EXT_ADV_ENABLE: 3579 hci_cc_le_set_ext_adv_enable(hdev, skb); 3580 break; 3581 3582 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR: 3583 hci_cc_le_set_adv_set_random_addr(hdev, skb); 3584 break; 3585 3586 default: 3587 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3588 break; 3589 } 3590 3591 if (*opcode != HCI_OP_NOP) 3592 cancel_delayed_work(&hdev->cmd_timer); 3593 3594 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) 3595 atomic_set(&hdev->cmd_cnt, 1); 3596 3597 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 3598 req_complete_skb); 3599 3600 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3601 bt_dev_err(hdev, 3602 "unexpected event for opcode 0x%4.4x", *opcode); 3603 return; 3604 } 3605 3606 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3607 queue_work(hdev->workqueue, &hdev->cmd_work); 3608 } 3609 3610 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, 3611 u16 *opcode, u8 *status, 3612 hci_req_complete_t *req_complete, 3613 hci_req_complete_skb_t *req_complete_skb) 3614 { 3615 struct hci_ev_cmd_status *ev = (void *) skb->data; 3616 3617 skb_pull(skb, sizeof(*ev)); 3618 3619 *opcode = __le16_to_cpu(ev->opcode); 3620 *status = ev->status; 3621 3622 switch (*opcode) { 3623 case HCI_OP_INQUIRY: 3624 hci_cs_inquiry(hdev, ev->status); 3625 break; 3626 3627 case HCI_OP_CREATE_CONN: 3628 hci_cs_create_conn(hdev, ev->status); 3629 break; 3630 3631 case HCI_OP_DISCONNECT: 3632 hci_cs_disconnect(hdev, ev->status); 3633 break; 3634 3635 case HCI_OP_ADD_SCO: 3636 hci_cs_add_sco(hdev, ev->status); 3637 break; 3638 3639 case HCI_OP_AUTH_REQUESTED: 3640 hci_cs_auth_requested(hdev, ev->status); 3641 break; 3642 3643 case HCI_OP_SET_CONN_ENCRYPT: 3644 hci_cs_set_conn_encrypt(hdev, ev->status); 3645 break; 3646 3647 case HCI_OP_REMOTE_NAME_REQ: 3648 hci_cs_remote_name_req(hdev, ev->status); 3649 break; 3650 3651 case HCI_OP_READ_REMOTE_FEATURES: 3652 hci_cs_read_remote_features(hdev, ev->status); 3653 break; 3654 3655 case HCI_OP_READ_REMOTE_EXT_FEATURES: 3656 hci_cs_read_remote_ext_features(hdev, ev->status); 3657 break; 3658 3659 case HCI_OP_SETUP_SYNC_CONN: 3660 hci_cs_setup_sync_conn(hdev, ev->status); 3661 break; 3662 3663 case HCI_OP_SNIFF_MODE: 3664 hci_cs_sniff_mode(hdev, ev->status); 3665 break; 3666 3667 case HCI_OP_EXIT_SNIFF_MODE: 3668 hci_cs_exit_sniff_mode(hdev, ev->status); 3669 break; 3670 3671 case HCI_OP_SWITCH_ROLE: 3672 hci_cs_switch_role(hdev, ev->status); 3673 break; 3674 3675 case HCI_OP_LE_CREATE_CONN: 3676 hci_cs_le_create_conn(hdev, ev->status); 3677 break; 3678 3679 case HCI_OP_LE_READ_REMOTE_FEATURES: 3680 hci_cs_le_read_remote_features(hdev, ev->status); 3681 break; 3682 3683 case HCI_OP_LE_START_ENC: 3684 hci_cs_le_start_enc(hdev, ev->status); 3685 break; 3686 3687 case HCI_OP_LE_EXT_CREATE_CONN: 3688 hci_cs_le_ext_create_conn(hdev, ev->status); 3689 break; 3690 3691 default: 3692 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3693 break; 3694 } 3695 3696 if (*opcode != HCI_OP_NOP) 3697 cancel_delayed_work(&hdev->cmd_timer); 3698 3699 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) 3700 atomic_set(&hdev->cmd_cnt, 1); 3701 3702 /* Indicate request completion if the command failed. Also, if 3703 * we're not waiting for a special event and we get a success 3704 * command status we should try to flag the request as completed 3705 * (since for this kind of commands there will not be a command 3706 * complete event). 3707 */ 3708 if (ev->status || 3709 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event)) 3710 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 3711 req_complete_skb); 3712 3713 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3714 bt_dev_err(hdev, 3715 "unexpected event for opcode 0x%4.4x", *opcode); 3716 return; 3717 } 3718 3719 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3720 queue_work(hdev->workqueue, &hdev->cmd_work); 3721 } 3722 3723 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) 3724 { 3725 struct hci_ev_hardware_error *ev = (void *) skb->data; 3726 3727 hdev->hw_error_code = ev->code; 3728 3729 queue_work(hdev->req_workqueue, &hdev->error_reset); 3730 } 3731 3732 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3733 { 3734 struct hci_ev_role_change *ev = (void *) skb->data; 3735 struct hci_conn *conn; 3736 3737 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3738 3739 hci_dev_lock(hdev); 3740 3741 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3742 if (conn) { 3743 if (!ev->status) 3744 conn->role = ev->role; 3745 3746 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 3747 3748 hci_role_switch_cfm(conn, ev->status, ev->role); 3749 } 3750 3751 hci_dev_unlock(hdev); 3752 } 3753 3754 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 3755 { 3756 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 3757 int i; 3758 3759 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 3760 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 3761 return; 3762 } 3763 3764 if (skb->len < sizeof(*ev) || 3765 skb->len < struct_size(ev, handles, ev->num_hndl)) { 3766 BT_DBG("%s bad parameters", hdev->name); 3767 return; 3768 } 3769 3770 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 3771 3772 for (i = 0; i < ev->num_hndl; i++) { 3773 struct hci_comp_pkts_info *info = &ev->handles[i]; 3774 struct hci_conn *conn; 3775 __u16 handle, count; 3776 3777 handle = __le16_to_cpu(info->handle); 3778 count = __le16_to_cpu(info->count); 3779 3780 conn = hci_conn_hash_lookup_handle(hdev, handle); 3781 if (!conn) 3782 continue; 3783 3784 conn->sent -= count; 3785 3786 switch (conn->type) { 3787 case ACL_LINK: 3788 hdev->acl_cnt += count; 3789 if (hdev->acl_cnt > hdev->acl_pkts) 3790 hdev->acl_cnt = hdev->acl_pkts; 3791 break; 3792 3793 case LE_LINK: 3794 if (hdev->le_pkts) { 3795 hdev->le_cnt += count; 3796 if (hdev->le_cnt > hdev->le_pkts) 3797 hdev->le_cnt = hdev->le_pkts; 3798 } else { 3799 hdev->acl_cnt += count; 3800 if (hdev->acl_cnt > hdev->acl_pkts) 3801 hdev->acl_cnt = hdev->acl_pkts; 3802 } 3803 break; 3804 3805 case SCO_LINK: 3806 hdev->sco_cnt += count; 3807 if (hdev->sco_cnt > hdev->sco_pkts) 3808 hdev->sco_cnt = hdev->sco_pkts; 3809 break; 3810 3811 default: 3812 bt_dev_err(hdev, "unknown type %d conn %p", 3813 conn->type, conn); 3814 break; 3815 } 3816 } 3817 3818 queue_work(hdev->workqueue, &hdev->tx_work); 3819 } 3820 3821 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 3822 __u16 handle) 3823 { 3824 struct hci_chan *chan; 3825 3826 switch (hdev->dev_type) { 3827 case HCI_PRIMARY: 3828 return hci_conn_hash_lookup_handle(hdev, handle); 3829 case HCI_AMP: 3830 chan = hci_chan_lookup_handle(hdev, handle); 3831 if (chan) 3832 return chan->conn; 3833 break; 3834 default: 3835 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 3836 break; 3837 } 3838 3839 return NULL; 3840 } 3841 3842 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 3843 { 3844 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 3845 int i; 3846 3847 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 3848 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 3849 return; 3850 } 3851 3852 if (skb->len < sizeof(*ev) || 3853 skb->len < struct_size(ev, handles, ev->num_hndl)) { 3854 BT_DBG("%s bad parameters", hdev->name); 3855 return; 3856 } 3857 3858 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 3859 ev->num_hndl); 3860 3861 for (i = 0; i < ev->num_hndl; i++) { 3862 struct hci_comp_blocks_info *info = &ev->handles[i]; 3863 struct hci_conn *conn = NULL; 3864 __u16 handle, block_count; 3865 3866 handle = __le16_to_cpu(info->handle); 3867 block_count = __le16_to_cpu(info->blocks); 3868 3869 conn = __hci_conn_lookup_handle(hdev, handle); 3870 if (!conn) 3871 continue; 3872 3873 conn->sent -= block_count; 3874 3875 switch (conn->type) { 3876 case ACL_LINK: 3877 case AMP_LINK: 3878 hdev->block_cnt += block_count; 3879 if (hdev->block_cnt > hdev->num_blocks) 3880 hdev->block_cnt = hdev->num_blocks; 3881 break; 3882 3883 default: 3884 bt_dev_err(hdev, "unknown type %d conn %p", 3885 conn->type, conn); 3886 break; 3887 } 3888 } 3889 3890 queue_work(hdev->workqueue, &hdev->tx_work); 3891 } 3892 3893 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3894 { 3895 struct hci_ev_mode_change *ev = (void *) skb->data; 3896 struct hci_conn *conn; 3897 3898 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3899 3900 hci_dev_lock(hdev); 3901 3902 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3903 if (conn) { 3904 conn->mode = ev->mode; 3905 3906 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 3907 &conn->flags)) { 3908 if (conn->mode == HCI_CM_ACTIVE) 3909 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 3910 else 3911 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 3912 } 3913 3914 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 3915 hci_sco_setup(conn, ev->status); 3916 } 3917 3918 hci_dev_unlock(hdev); 3919 } 3920 3921 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3922 { 3923 struct hci_ev_pin_code_req *ev = (void *) skb->data; 3924 struct hci_conn *conn; 3925 3926 BT_DBG("%s", hdev->name); 3927 3928 hci_dev_lock(hdev); 3929 3930 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3931 if (!conn) 3932 goto unlock; 3933 3934 if (conn->state == BT_CONNECTED) { 3935 hci_conn_hold(conn); 3936 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3937 hci_conn_drop(conn); 3938 } 3939 3940 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 3941 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 3942 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 3943 sizeof(ev->bdaddr), &ev->bdaddr); 3944 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 3945 u8 secure; 3946 3947 if (conn->pending_sec_level == BT_SECURITY_HIGH) 3948 secure = 1; 3949 else 3950 secure = 0; 3951 3952 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 3953 } 3954 3955 unlock: 3956 hci_dev_unlock(hdev); 3957 } 3958 3959 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 3960 { 3961 if (key_type == HCI_LK_CHANGED_COMBINATION) 3962 return; 3963 3964 conn->pin_length = pin_len; 3965 conn->key_type = key_type; 3966 3967 switch (key_type) { 3968 case HCI_LK_LOCAL_UNIT: 3969 case HCI_LK_REMOTE_UNIT: 3970 case HCI_LK_DEBUG_COMBINATION: 3971 return; 3972 case HCI_LK_COMBINATION: 3973 if (pin_len == 16) 3974 conn->pending_sec_level = BT_SECURITY_HIGH; 3975 else 3976 conn->pending_sec_level = BT_SECURITY_MEDIUM; 3977 break; 3978 case HCI_LK_UNAUTH_COMBINATION_P192: 3979 case HCI_LK_UNAUTH_COMBINATION_P256: 3980 conn->pending_sec_level = BT_SECURITY_MEDIUM; 3981 break; 3982 case HCI_LK_AUTH_COMBINATION_P192: 3983 conn->pending_sec_level = BT_SECURITY_HIGH; 3984 break; 3985 case HCI_LK_AUTH_COMBINATION_P256: 3986 conn->pending_sec_level = BT_SECURITY_FIPS; 3987 break; 3988 } 3989 } 3990 3991 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3992 { 3993 struct hci_ev_link_key_req *ev = (void *) skb->data; 3994 struct hci_cp_link_key_reply cp; 3995 struct hci_conn *conn; 3996 struct link_key *key; 3997 3998 BT_DBG("%s", hdev->name); 3999 4000 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4001 return; 4002 4003 hci_dev_lock(hdev); 4004 4005 key = hci_find_link_key(hdev, &ev->bdaddr); 4006 if (!key) { 4007 BT_DBG("%s link key not found for %pMR", hdev->name, 4008 &ev->bdaddr); 4009 goto not_found; 4010 } 4011 4012 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 4013 &ev->bdaddr); 4014 4015 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4016 if (conn) { 4017 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4018 4019 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4020 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4021 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4022 BT_DBG("%s ignoring unauthenticated key", hdev->name); 4023 goto not_found; 4024 } 4025 4026 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4027 (conn->pending_sec_level == BT_SECURITY_HIGH || 4028 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4029 BT_DBG("%s ignoring key unauthenticated for high security", 4030 hdev->name); 4031 goto not_found; 4032 } 4033 4034 conn_set_key(conn, key->type, key->pin_len); 4035 } 4036 4037 bacpy(&cp.bdaddr, &ev->bdaddr); 4038 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4039 4040 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4041 4042 hci_dev_unlock(hdev); 4043 4044 return; 4045 4046 not_found: 4047 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4048 hci_dev_unlock(hdev); 4049 } 4050 4051 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 4052 { 4053 struct hci_ev_link_key_notify *ev = (void *) skb->data; 4054 struct hci_conn *conn; 4055 struct link_key *key; 4056 bool persistent; 4057 u8 pin_len = 0; 4058 4059 BT_DBG("%s", hdev->name); 4060 4061 hci_dev_lock(hdev); 4062 4063 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4064 if (!conn) 4065 goto unlock; 4066 4067 hci_conn_hold(conn); 4068 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4069 hci_conn_drop(conn); 4070 4071 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4072 conn_set_key(conn, ev->key_type, conn->pin_length); 4073 4074 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4075 goto unlock; 4076 4077 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4078 ev->key_type, pin_len, &persistent); 4079 if (!key) 4080 goto unlock; 4081 4082 /* Update connection information since adding the key will have 4083 * fixed up the type in the case of changed combination keys. 4084 */ 4085 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4086 conn_set_key(conn, key->type, key->pin_len); 4087 4088 mgmt_new_link_key(hdev, key, persistent); 4089 4090 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4091 * is set. If it's not set simply remove the key from the kernel 4092 * list (we've still notified user space about it but with 4093 * store_hint being 0). 4094 */ 4095 if (key->type == HCI_LK_DEBUG_COMBINATION && 4096 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4097 list_del_rcu(&key->list); 4098 kfree_rcu(key, rcu); 4099 goto unlock; 4100 } 4101 4102 if (persistent) 4103 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4104 else 4105 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4106 4107 unlock: 4108 hci_dev_unlock(hdev); 4109 } 4110 4111 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 4112 { 4113 struct hci_ev_clock_offset *ev = (void *) skb->data; 4114 struct hci_conn *conn; 4115 4116 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4117 4118 hci_dev_lock(hdev); 4119 4120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4121 if (conn && !ev->status) { 4122 struct inquiry_entry *ie; 4123 4124 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4125 if (ie) { 4126 ie->data.clock_offset = ev->clock_offset; 4127 ie->timestamp = jiffies; 4128 } 4129 } 4130 4131 hci_dev_unlock(hdev); 4132 } 4133 4134 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 4135 { 4136 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 4137 struct hci_conn *conn; 4138 4139 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4140 4141 hci_dev_lock(hdev); 4142 4143 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4144 if (conn && !ev->status) 4145 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4146 4147 hci_dev_unlock(hdev); 4148 } 4149 4150 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 4151 { 4152 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 4153 struct inquiry_entry *ie; 4154 4155 BT_DBG("%s", hdev->name); 4156 4157 hci_dev_lock(hdev); 4158 4159 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4160 if (ie) { 4161 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4162 ie->timestamp = jiffies; 4163 } 4164 4165 hci_dev_unlock(hdev); 4166 } 4167 4168 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, 4169 struct sk_buff *skb) 4170 { 4171 struct inquiry_data data; 4172 int num_rsp = *((__u8 *) skb->data); 4173 4174 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 4175 4176 if (!num_rsp) 4177 return; 4178 4179 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4180 return; 4181 4182 hci_dev_lock(hdev); 4183 4184 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 4185 struct inquiry_info_with_rssi_and_pscan_mode *info; 4186 info = (void *) (skb->data + 1); 4187 4188 for (; num_rsp; num_rsp--, info++) { 4189 u32 flags; 4190 4191 bacpy(&data.bdaddr, &info->bdaddr); 4192 data.pscan_rep_mode = info->pscan_rep_mode; 4193 data.pscan_period_mode = info->pscan_period_mode; 4194 data.pscan_mode = info->pscan_mode; 4195 memcpy(data.dev_class, info->dev_class, 3); 4196 data.clock_offset = info->clock_offset; 4197 data.rssi = info->rssi; 4198 data.ssp_mode = 0x00; 4199 4200 flags = hci_inquiry_cache_update(hdev, &data, false); 4201 4202 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4203 info->dev_class, info->rssi, 4204 flags, NULL, 0, NULL, 0); 4205 } 4206 } else { 4207 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 4208 4209 for (; num_rsp; num_rsp--, info++) { 4210 u32 flags; 4211 4212 bacpy(&data.bdaddr, &info->bdaddr); 4213 data.pscan_rep_mode = info->pscan_rep_mode; 4214 data.pscan_period_mode = info->pscan_period_mode; 4215 data.pscan_mode = 0x00; 4216 memcpy(data.dev_class, info->dev_class, 3); 4217 data.clock_offset = info->clock_offset; 4218 data.rssi = info->rssi; 4219 data.ssp_mode = 0x00; 4220 4221 flags = hci_inquiry_cache_update(hdev, &data, false); 4222 4223 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4224 info->dev_class, info->rssi, 4225 flags, NULL, 0, NULL, 0); 4226 } 4227 } 4228 4229 hci_dev_unlock(hdev); 4230 } 4231 4232 static void hci_remote_ext_features_evt(struct hci_dev *hdev, 4233 struct sk_buff *skb) 4234 { 4235 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 4236 struct hci_conn *conn; 4237 4238 BT_DBG("%s", hdev->name); 4239 4240 hci_dev_lock(hdev); 4241 4242 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4243 if (!conn) 4244 goto unlock; 4245 4246 if (ev->page < HCI_MAX_PAGES) 4247 memcpy(conn->features[ev->page], ev->features, 8); 4248 4249 if (!ev->status && ev->page == 0x01) { 4250 struct inquiry_entry *ie; 4251 4252 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4253 if (ie) 4254 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4255 4256 if (ev->features[0] & LMP_HOST_SSP) { 4257 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4258 } else { 4259 /* It is mandatory by the Bluetooth specification that 4260 * Extended Inquiry Results are only used when Secure 4261 * Simple Pairing is enabled, but some devices violate 4262 * this. 4263 * 4264 * To make these devices work, the internal SSP 4265 * enabled flag needs to be cleared if the remote host 4266 * features do not indicate SSP support */ 4267 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4268 } 4269 4270 if (ev->features[0] & LMP_HOST_SC) 4271 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4272 } 4273 4274 if (conn->state != BT_CONFIG) 4275 goto unlock; 4276 4277 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4278 struct hci_cp_remote_name_req cp; 4279 memset(&cp, 0, sizeof(cp)); 4280 bacpy(&cp.bdaddr, &conn->dst); 4281 cp.pscan_rep_mode = 0x02; 4282 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4283 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4284 mgmt_device_connected(hdev, conn, 0, NULL, 0); 4285 4286 if (!hci_outgoing_auth_needed(hdev, conn)) { 4287 conn->state = BT_CONNECTED; 4288 hci_connect_cfm(conn, ev->status); 4289 hci_conn_drop(conn); 4290 } 4291 4292 unlock: 4293 hci_dev_unlock(hdev); 4294 } 4295 4296 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, 4297 struct sk_buff *skb) 4298 { 4299 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 4300 struct hci_conn *conn; 4301 4302 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4303 4304 hci_dev_lock(hdev); 4305 4306 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4307 if (!conn) { 4308 if (ev->link_type == ESCO_LINK) 4309 goto unlock; 4310 4311 /* When the link type in the event indicates SCO connection 4312 * and lookup of the connection object fails, then check 4313 * if an eSCO connection object exists. 4314 * 4315 * The core limits the synchronous connections to either 4316 * SCO or eSCO. The eSCO connection is preferred and tried 4317 * to be setup first and until successfully established, 4318 * the link type will be hinted as eSCO. 4319 */ 4320 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4321 if (!conn) 4322 goto unlock; 4323 } 4324 4325 switch (ev->status) { 4326 case 0x00: 4327 conn->handle = __le16_to_cpu(ev->handle); 4328 conn->state = BT_CONNECTED; 4329 conn->type = ev->link_type; 4330 4331 hci_debugfs_create_conn(conn); 4332 hci_conn_add_sysfs(conn); 4333 break; 4334 4335 case 0x10: /* Connection Accept Timeout */ 4336 case 0x0d: /* Connection Rejected due to Limited Resources */ 4337 case 0x11: /* Unsupported Feature or Parameter Value */ 4338 case 0x1c: /* SCO interval rejected */ 4339 case 0x1a: /* Unsupported Remote Feature */ 4340 case 0x1f: /* Unspecified error */ 4341 case 0x20: /* Unsupported LMP Parameter value */ 4342 if (conn->out) { 4343 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 4344 (hdev->esco_type & EDR_ESCO_MASK); 4345 if (hci_setup_sync(conn, conn->link->handle)) 4346 goto unlock; 4347 } 4348 /* fall through */ 4349 4350 default: 4351 conn->state = BT_CLOSED; 4352 break; 4353 } 4354 4355 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 4356 4357 switch (conn->setting & SCO_AIRMODE_MASK) { 4358 case SCO_AIRMODE_CVSD: 4359 if (hdev->notify) 4360 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 4361 break; 4362 case SCO_AIRMODE_TRANSP: 4363 if (hdev->notify) 4364 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 4365 break; 4366 } 4367 4368 hci_connect_cfm(conn, ev->status); 4369 if (ev->status) 4370 hci_conn_del(conn); 4371 4372 unlock: 4373 hci_dev_unlock(hdev); 4374 } 4375 4376 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 4377 { 4378 size_t parsed = 0; 4379 4380 while (parsed < eir_len) { 4381 u8 field_len = eir[0]; 4382 4383 if (field_len == 0) 4384 return parsed; 4385 4386 parsed += field_len + 1; 4387 eir += field_len + 1; 4388 } 4389 4390 return eir_len; 4391 } 4392 4393 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 4394 struct sk_buff *skb) 4395 { 4396 struct inquiry_data data; 4397 struct extended_inquiry_info *info = (void *) (skb->data + 1); 4398 int num_rsp = *((__u8 *) skb->data); 4399 size_t eir_len; 4400 4401 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 4402 4403 if (!num_rsp) 4404 return; 4405 4406 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4407 return; 4408 4409 hci_dev_lock(hdev); 4410 4411 for (; num_rsp; num_rsp--, info++) { 4412 u32 flags; 4413 bool name_known; 4414 4415 bacpy(&data.bdaddr, &info->bdaddr); 4416 data.pscan_rep_mode = info->pscan_rep_mode; 4417 data.pscan_period_mode = info->pscan_period_mode; 4418 data.pscan_mode = 0x00; 4419 memcpy(data.dev_class, info->dev_class, 3); 4420 data.clock_offset = info->clock_offset; 4421 data.rssi = info->rssi; 4422 data.ssp_mode = 0x01; 4423 4424 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4425 name_known = eir_get_data(info->data, 4426 sizeof(info->data), 4427 EIR_NAME_COMPLETE, NULL); 4428 else 4429 name_known = true; 4430 4431 flags = hci_inquiry_cache_update(hdev, &data, name_known); 4432 4433 eir_len = eir_get_length(info->data, sizeof(info->data)); 4434 4435 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4436 info->dev_class, info->rssi, 4437 flags, info->data, eir_len, NULL, 0); 4438 } 4439 4440 hci_dev_unlock(hdev); 4441 } 4442 4443 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, 4444 struct sk_buff *skb) 4445 { 4446 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 4447 struct hci_conn *conn; 4448 4449 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, 4450 __le16_to_cpu(ev->handle)); 4451 4452 hci_dev_lock(hdev); 4453 4454 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4455 if (!conn) 4456 goto unlock; 4457 4458 /* For BR/EDR the necessary steps are taken through the 4459 * auth_complete event. 4460 */ 4461 if (conn->type != LE_LINK) 4462 goto unlock; 4463 4464 if (!ev->status) 4465 conn->sec_level = conn->pending_sec_level; 4466 4467 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 4468 4469 if (ev->status && conn->state == BT_CONNECTED) { 4470 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 4471 hci_conn_drop(conn); 4472 goto unlock; 4473 } 4474 4475 if (conn->state == BT_CONFIG) { 4476 if (!ev->status) 4477 conn->state = BT_CONNECTED; 4478 4479 hci_connect_cfm(conn, ev->status); 4480 hci_conn_drop(conn); 4481 } else { 4482 hci_auth_cfm(conn, ev->status); 4483 4484 hci_conn_hold(conn); 4485 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4486 hci_conn_drop(conn); 4487 } 4488 4489 unlock: 4490 hci_dev_unlock(hdev); 4491 } 4492 4493 static u8 hci_get_auth_req(struct hci_conn *conn) 4494 { 4495 /* If remote requests no-bonding follow that lead */ 4496 if (conn->remote_auth == HCI_AT_NO_BONDING || 4497 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 4498 return conn->remote_auth | (conn->auth_type & 0x01); 4499 4500 /* If both remote and local have enough IO capabilities, require 4501 * MITM protection 4502 */ 4503 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 4504 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 4505 return conn->remote_auth | 0x01; 4506 4507 /* No MITM protection possible so ignore remote requirement */ 4508 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 4509 } 4510 4511 static u8 bredr_oob_data_present(struct hci_conn *conn) 4512 { 4513 struct hci_dev *hdev = conn->hdev; 4514 struct oob_data *data; 4515 4516 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 4517 if (!data) 4518 return 0x00; 4519 4520 if (bredr_sc_enabled(hdev)) { 4521 /* When Secure Connections is enabled, then just 4522 * return the present value stored with the OOB 4523 * data. The stored value contains the right present 4524 * information. However it can only be trusted when 4525 * not in Secure Connection Only mode. 4526 */ 4527 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 4528 return data->present; 4529 4530 /* When Secure Connections Only mode is enabled, then 4531 * the P-256 values are required. If they are not 4532 * available, then do not declare that OOB data is 4533 * present. 4534 */ 4535 if (!memcmp(data->rand256, ZERO_KEY, 16) || 4536 !memcmp(data->hash256, ZERO_KEY, 16)) 4537 return 0x00; 4538 4539 return 0x02; 4540 } 4541 4542 /* When Secure Connections is not enabled or actually 4543 * not supported by the hardware, then check that if 4544 * P-192 data values are present. 4545 */ 4546 if (!memcmp(data->rand192, ZERO_KEY, 16) || 4547 !memcmp(data->hash192, ZERO_KEY, 16)) 4548 return 0x00; 4549 4550 return 0x01; 4551 } 4552 4553 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4554 { 4555 struct hci_ev_io_capa_request *ev = (void *) skb->data; 4556 struct hci_conn *conn; 4557 4558 BT_DBG("%s", hdev->name); 4559 4560 hci_dev_lock(hdev); 4561 4562 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4563 if (!conn) 4564 goto unlock; 4565 4566 hci_conn_hold(conn); 4567 4568 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4569 goto unlock; 4570 4571 /* Allow pairing if we're pairable, the initiators of the 4572 * pairing or if the remote is not requesting bonding. 4573 */ 4574 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 4575 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 4576 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 4577 struct hci_cp_io_capability_reply cp; 4578 4579 bacpy(&cp.bdaddr, &ev->bdaddr); 4580 /* Change the IO capability from KeyboardDisplay 4581 * to DisplayYesNo as it is not supported by BT spec. */ 4582 cp.capability = (conn->io_capability == 0x04) ? 4583 HCI_IO_DISPLAY_YESNO : conn->io_capability; 4584 4585 /* If we are initiators, there is no remote information yet */ 4586 if (conn->remote_auth == 0xff) { 4587 /* Request MITM protection if our IO caps allow it 4588 * except for the no-bonding case. 4589 */ 4590 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4591 conn->auth_type != HCI_AT_NO_BONDING) 4592 conn->auth_type |= 0x01; 4593 } else { 4594 conn->auth_type = hci_get_auth_req(conn); 4595 } 4596 4597 /* If we're not bondable, force one of the non-bondable 4598 * authentication requirement values. 4599 */ 4600 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 4601 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 4602 4603 cp.authentication = conn->auth_type; 4604 cp.oob_data = bredr_oob_data_present(conn); 4605 4606 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 4607 sizeof(cp), &cp); 4608 } else { 4609 struct hci_cp_io_capability_neg_reply cp; 4610 4611 bacpy(&cp.bdaddr, &ev->bdaddr); 4612 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 4613 4614 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 4615 sizeof(cp), &cp); 4616 } 4617 4618 unlock: 4619 hci_dev_unlock(hdev); 4620 } 4621 4622 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 4623 { 4624 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 4625 struct hci_conn *conn; 4626 4627 BT_DBG("%s", hdev->name); 4628 4629 hci_dev_lock(hdev); 4630 4631 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4632 if (!conn) 4633 goto unlock; 4634 4635 conn->remote_cap = ev->capability; 4636 conn->remote_auth = ev->authentication; 4637 4638 unlock: 4639 hci_dev_unlock(hdev); 4640 } 4641 4642 static void hci_user_confirm_request_evt(struct hci_dev *hdev, 4643 struct sk_buff *skb) 4644 { 4645 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 4646 int loc_mitm, rem_mitm, confirm_hint = 0; 4647 struct hci_conn *conn; 4648 4649 BT_DBG("%s", hdev->name); 4650 4651 hci_dev_lock(hdev); 4652 4653 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4654 goto unlock; 4655 4656 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4657 if (!conn) 4658 goto unlock; 4659 4660 loc_mitm = (conn->auth_type & 0x01); 4661 rem_mitm = (conn->remote_auth & 0x01); 4662 4663 /* If we require MITM but the remote device can't provide that 4664 * (it has NoInputNoOutput) then reject the confirmation 4665 * request. We check the security level here since it doesn't 4666 * necessarily match conn->auth_type. 4667 */ 4668 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 4669 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 4670 BT_DBG("Rejecting request: remote device can't provide MITM"); 4671 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 4672 sizeof(ev->bdaddr), &ev->bdaddr); 4673 goto unlock; 4674 } 4675 4676 /* If no side requires MITM protection; auto-accept */ 4677 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 4678 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 4679 4680 /* If we're not the initiators request authorization to 4681 * proceed from user space (mgmt_user_confirm with 4682 * confirm_hint set to 1). The exception is if neither 4683 * side had MITM or if the local IO capability is 4684 * NoInputNoOutput, in which case we do auto-accept 4685 */ 4686 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 4687 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4688 (loc_mitm || rem_mitm)) { 4689 BT_DBG("Confirming auto-accept as acceptor"); 4690 confirm_hint = 1; 4691 goto confirm; 4692 } 4693 4694 /* If there already exists link key in local host, leave the 4695 * decision to user space since the remote device could be 4696 * legitimate or malicious. 4697 */ 4698 if (hci_find_link_key(hdev, &ev->bdaddr)) { 4699 bt_dev_dbg(hdev, "Local host already has link key"); 4700 confirm_hint = 1; 4701 goto confirm; 4702 } 4703 4704 BT_DBG("Auto-accept of user confirmation with %ums delay", 4705 hdev->auto_accept_delay); 4706 4707 if (hdev->auto_accept_delay > 0) { 4708 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 4709 queue_delayed_work(conn->hdev->workqueue, 4710 &conn->auto_accept_work, delay); 4711 goto unlock; 4712 } 4713 4714 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 4715 sizeof(ev->bdaddr), &ev->bdaddr); 4716 goto unlock; 4717 } 4718 4719 confirm: 4720 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 4721 le32_to_cpu(ev->passkey), confirm_hint); 4722 4723 unlock: 4724 hci_dev_unlock(hdev); 4725 } 4726 4727 static void hci_user_passkey_request_evt(struct hci_dev *hdev, 4728 struct sk_buff *skb) 4729 { 4730 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 4731 4732 BT_DBG("%s", hdev->name); 4733 4734 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4735 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 4736 } 4737 4738 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, 4739 struct sk_buff *skb) 4740 { 4741 struct hci_ev_user_passkey_notify *ev = (void *) skb->data; 4742 struct hci_conn *conn; 4743 4744 BT_DBG("%s", hdev->name); 4745 4746 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4747 if (!conn) 4748 return; 4749 4750 conn->passkey_notify = __le32_to_cpu(ev->passkey); 4751 conn->passkey_entered = 0; 4752 4753 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4754 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 4755 conn->dst_type, conn->passkey_notify, 4756 conn->passkey_entered); 4757 } 4758 4759 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 4760 { 4761 struct hci_ev_keypress_notify *ev = (void *) skb->data; 4762 struct hci_conn *conn; 4763 4764 BT_DBG("%s", hdev->name); 4765 4766 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4767 if (!conn) 4768 return; 4769 4770 switch (ev->type) { 4771 case HCI_KEYPRESS_STARTED: 4772 conn->passkey_entered = 0; 4773 return; 4774 4775 case HCI_KEYPRESS_ENTERED: 4776 conn->passkey_entered++; 4777 break; 4778 4779 case HCI_KEYPRESS_ERASED: 4780 conn->passkey_entered--; 4781 break; 4782 4783 case HCI_KEYPRESS_CLEARED: 4784 conn->passkey_entered = 0; 4785 break; 4786 4787 case HCI_KEYPRESS_COMPLETED: 4788 return; 4789 } 4790 4791 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4792 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 4793 conn->dst_type, conn->passkey_notify, 4794 conn->passkey_entered); 4795 } 4796 4797 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 4798 struct sk_buff *skb) 4799 { 4800 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 4801 struct hci_conn *conn; 4802 4803 BT_DBG("%s", hdev->name); 4804 4805 hci_dev_lock(hdev); 4806 4807 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4808 if (!conn) 4809 goto unlock; 4810 4811 /* Reset the authentication requirement to unknown */ 4812 conn->remote_auth = 0xff; 4813 4814 /* To avoid duplicate auth_failed events to user space we check 4815 * the HCI_CONN_AUTH_PEND flag which will be set if we 4816 * initiated the authentication. A traditional auth_complete 4817 * event gets always produced as initiator and is also mapped to 4818 * the mgmt_auth_failed event */ 4819 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 4820 mgmt_auth_failed(conn, ev->status); 4821 4822 hci_conn_drop(conn); 4823 4824 unlock: 4825 hci_dev_unlock(hdev); 4826 } 4827 4828 static void hci_remote_host_features_evt(struct hci_dev *hdev, 4829 struct sk_buff *skb) 4830 { 4831 struct hci_ev_remote_host_features *ev = (void *) skb->data; 4832 struct inquiry_entry *ie; 4833 struct hci_conn *conn; 4834 4835 BT_DBG("%s", hdev->name); 4836 4837 hci_dev_lock(hdev); 4838 4839 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4840 if (conn) 4841 memcpy(conn->features[1], ev->features, 8); 4842 4843 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4844 if (ie) 4845 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4846 4847 hci_dev_unlock(hdev); 4848 } 4849 4850 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 4851 struct sk_buff *skb) 4852 { 4853 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 4854 struct oob_data *data; 4855 4856 BT_DBG("%s", hdev->name); 4857 4858 hci_dev_lock(hdev); 4859 4860 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4861 goto unlock; 4862 4863 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 4864 if (!data) { 4865 struct hci_cp_remote_oob_data_neg_reply cp; 4866 4867 bacpy(&cp.bdaddr, &ev->bdaddr); 4868 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 4869 sizeof(cp), &cp); 4870 goto unlock; 4871 } 4872 4873 if (bredr_sc_enabled(hdev)) { 4874 struct hci_cp_remote_oob_ext_data_reply cp; 4875 4876 bacpy(&cp.bdaddr, &ev->bdaddr); 4877 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 4878 memset(cp.hash192, 0, sizeof(cp.hash192)); 4879 memset(cp.rand192, 0, sizeof(cp.rand192)); 4880 } else { 4881 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 4882 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 4883 } 4884 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 4885 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 4886 4887 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 4888 sizeof(cp), &cp); 4889 } else { 4890 struct hci_cp_remote_oob_data_reply cp; 4891 4892 bacpy(&cp.bdaddr, &ev->bdaddr); 4893 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 4894 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 4895 4896 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 4897 sizeof(cp), &cp); 4898 } 4899 4900 unlock: 4901 hci_dev_unlock(hdev); 4902 } 4903 4904 #if IS_ENABLED(CONFIG_BT_HS) 4905 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) 4906 { 4907 struct hci_ev_channel_selected *ev = (void *)skb->data; 4908 struct hci_conn *hcon; 4909 4910 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); 4911 4912 skb_pull(skb, sizeof(*ev)); 4913 4914 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4915 if (!hcon) 4916 return; 4917 4918 amp_read_loc_assoc_final_data(hdev, hcon); 4919 } 4920 4921 static void hci_phy_link_complete_evt(struct hci_dev *hdev, 4922 struct sk_buff *skb) 4923 { 4924 struct hci_ev_phy_link_complete *ev = (void *) skb->data; 4925 struct hci_conn *hcon, *bredr_hcon; 4926 4927 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, 4928 ev->status); 4929 4930 hci_dev_lock(hdev); 4931 4932 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4933 if (!hcon) { 4934 hci_dev_unlock(hdev); 4935 return; 4936 } 4937 4938 if (ev->status) { 4939 hci_conn_del(hcon); 4940 hci_dev_unlock(hdev); 4941 return; 4942 } 4943 4944 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 4945 4946 hcon->state = BT_CONNECTED; 4947 bacpy(&hcon->dst, &bredr_hcon->dst); 4948 4949 hci_conn_hold(hcon); 4950 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 4951 hci_conn_drop(hcon); 4952 4953 hci_debugfs_create_conn(hcon); 4954 hci_conn_add_sysfs(hcon); 4955 4956 amp_physical_cfm(bredr_hcon, hcon); 4957 4958 hci_dev_unlock(hdev); 4959 } 4960 4961 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4962 { 4963 struct hci_ev_logical_link_complete *ev = (void *) skb->data; 4964 struct hci_conn *hcon; 4965 struct hci_chan *hchan; 4966 struct amp_mgr *mgr; 4967 4968 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 4969 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, 4970 ev->status); 4971 4972 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4973 if (!hcon) 4974 return; 4975 4976 /* Create AMP hchan */ 4977 hchan = hci_chan_create(hcon); 4978 if (!hchan) 4979 return; 4980 4981 hchan->handle = le16_to_cpu(ev->handle); 4982 4983 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 4984 4985 mgr = hcon->amp_mgr; 4986 if (mgr && mgr->bredr_chan) { 4987 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 4988 4989 l2cap_chan_lock(bredr_chan); 4990 4991 bredr_chan->conn->mtu = hdev->block_mtu; 4992 l2cap_logical_cfm(bredr_chan, hchan, 0); 4993 hci_conn_hold(hcon); 4994 4995 l2cap_chan_unlock(bredr_chan); 4996 } 4997 } 4998 4999 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, 5000 struct sk_buff *skb) 5001 { 5002 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; 5003 struct hci_chan *hchan; 5004 5005 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, 5006 le16_to_cpu(ev->handle), ev->status); 5007 5008 if (ev->status) 5009 return; 5010 5011 hci_dev_lock(hdev); 5012 5013 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5014 if (!hchan) 5015 goto unlock; 5016 5017 amp_destroy_logical_link(hchan, ev->reason); 5018 5019 unlock: 5020 hci_dev_unlock(hdev); 5021 } 5022 5023 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, 5024 struct sk_buff *skb) 5025 { 5026 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; 5027 struct hci_conn *hcon; 5028 5029 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5030 5031 if (ev->status) 5032 return; 5033 5034 hci_dev_lock(hdev); 5035 5036 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5037 if (hcon) { 5038 hcon->state = BT_CLOSED; 5039 hci_conn_del(hcon); 5040 } 5041 5042 hci_dev_unlock(hdev); 5043 } 5044 #endif 5045 5046 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5047 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle, 5048 u16 interval, u16 latency, u16 supervision_timeout) 5049 { 5050 struct hci_conn_params *params; 5051 struct hci_conn *conn; 5052 struct smp_irk *irk; 5053 u8 addr_type; 5054 5055 hci_dev_lock(hdev); 5056 5057 /* All controllers implicitly stop advertising in the event of a 5058 * connection, so ensure that the state bit is cleared. 5059 */ 5060 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5061 5062 conn = hci_lookup_le_connect(hdev); 5063 if (!conn) { 5064 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5065 if (!conn) { 5066 bt_dev_err(hdev, "no memory for new connection"); 5067 goto unlock; 5068 } 5069 5070 conn->dst_type = bdaddr_type; 5071 5072 /* If we didn't have a hci_conn object previously 5073 * but we're in master role this must be something 5074 * initiated using a white list. Since white list based 5075 * connections are not "first class citizens" we don't 5076 * have full tracking of them. Therefore, we go ahead 5077 * with a "best effort" approach of determining the 5078 * initiator address based on the HCI_PRIVACY flag. 5079 */ 5080 if (conn->out) { 5081 conn->resp_addr_type = bdaddr_type; 5082 bacpy(&conn->resp_addr, bdaddr); 5083 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5084 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5085 bacpy(&conn->init_addr, &hdev->rpa); 5086 } else { 5087 hci_copy_identity_address(hdev, 5088 &conn->init_addr, 5089 &conn->init_addr_type); 5090 } 5091 } 5092 } else { 5093 cancel_delayed_work(&conn->le_conn_timeout); 5094 } 5095 5096 if (!conn->out) { 5097 /* Set the responder (our side) address type based on 5098 * the advertising address type. 5099 */ 5100 conn->resp_addr_type = hdev->adv_addr_type; 5101 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5102 /* In case of ext adv, resp_addr will be updated in 5103 * Adv Terminated event. 5104 */ 5105 if (!ext_adv_capable(hdev)) 5106 bacpy(&conn->resp_addr, &hdev->random_addr); 5107 } else { 5108 bacpy(&conn->resp_addr, &hdev->bdaddr); 5109 } 5110 5111 conn->init_addr_type = bdaddr_type; 5112 bacpy(&conn->init_addr, bdaddr); 5113 5114 /* For incoming connections, set the default minimum 5115 * and maximum connection interval. They will be used 5116 * to check if the parameters are in range and if not 5117 * trigger the connection update procedure. 5118 */ 5119 conn->le_conn_min_interval = hdev->le_conn_min_interval; 5120 conn->le_conn_max_interval = hdev->le_conn_max_interval; 5121 } 5122 5123 /* Lookup the identity address from the stored connection 5124 * address and address type. 5125 * 5126 * When establishing connections to an identity address, the 5127 * connection procedure will store the resolvable random 5128 * address first. Now if it can be converted back into the 5129 * identity address, start using the identity address from 5130 * now on. 5131 */ 5132 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5133 if (irk) { 5134 bacpy(&conn->dst, &irk->bdaddr); 5135 conn->dst_type = irk->addr_type; 5136 } 5137 5138 if (status) { 5139 hci_le_conn_failed(conn, status); 5140 goto unlock; 5141 } 5142 5143 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5144 addr_type = BDADDR_LE_PUBLIC; 5145 else 5146 addr_type = BDADDR_LE_RANDOM; 5147 5148 /* Drop the connection if the device is blocked */ 5149 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) { 5150 hci_conn_drop(conn); 5151 goto unlock; 5152 } 5153 5154 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5155 mgmt_device_connected(hdev, conn, 0, NULL, 0); 5156 5157 conn->sec_level = BT_SECURITY_LOW; 5158 conn->handle = handle; 5159 conn->state = BT_CONFIG; 5160 5161 conn->le_conn_interval = interval; 5162 conn->le_conn_latency = latency; 5163 conn->le_supv_timeout = supervision_timeout; 5164 5165 hci_debugfs_create_conn(conn); 5166 hci_conn_add_sysfs(conn); 5167 5168 /* The remote features procedure is defined for master 5169 * role only. So only in case of an initiated connection 5170 * request the remote features. 5171 * 5172 * If the local controller supports slave-initiated features 5173 * exchange, then requesting the remote features in slave 5174 * role is possible. Otherwise just transition into the 5175 * connected state without requesting the remote features. 5176 */ 5177 if (conn->out || 5178 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) { 5179 struct hci_cp_le_read_remote_features cp; 5180 5181 cp.handle = __cpu_to_le16(conn->handle); 5182 5183 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5184 sizeof(cp), &cp); 5185 5186 hci_conn_hold(conn); 5187 } else { 5188 conn->state = BT_CONNECTED; 5189 hci_connect_cfm(conn, status); 5190 } 5191 5192 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5193 conn->dst_type); 5194 if (params) { 5195 list_del_init(¶ms->action); 5196 if (params->conn) { 5197 hci_conn_drop(params->conn); 5198 hci_conn_put(params->conn); 5199 params->conn = NULL; 5200 } 5201 } 5202 5203 unlock: 5204 hci_update_background_scan(hdev); 5205 hci_dev_unlock(hdev); 5206 } 5207 5208 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 5209 { 5210 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 5211 5212 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5213 5214 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5215 ev->role, le16_to_cpu(ev->handle), 5216 le16_to_cpu(ev->interval), 5217 le16_to_cpu(ev->latency), 5218 le16_to_cpu(ev->supervision_timeout)); 5219 } 5220 5221 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, 5222 struct sk_buff *skb) 5223 { 5224 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data; 5225 5226 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5227 5228 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5229 ev->role, le16_to_cpu(ev->handle), 5230 le16_to_cpu(ev->interval), 5231 le16_to_cpu(ev->latency), 5232 le16_to_cpu(ev->supervision_timeout)); 5233 } 5234 5235 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) 5236 { 5237 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; 5238 struct hci_conn *conn; 5239 5240 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5241 5242 if (ev->status) 5243 return; 5244 5245 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 5246 if (conn) { 5247 struct adv_info *adv_instance; 5248 5249 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM) 5250 return; 5251 5252 if (!hdev->cur_adv_instance) { 5253 bacpy(&conn->resp_addr, &hdev->random_addr); 5254 return; 5255 } 5256 5257 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); 5258 if (adv_instance) 5259 bacpy(&conn->resp_addr, &adv_instance->random_addr); 5260 } 5261 } 5262 5263 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, 5264 struct sk_buff *skb) 5265 { 5266 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data; 5267 struct hci_conn *conn; 5268 5269 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5270 5271 if (ev->status) 5272 return; 5273 5274 hci_dev_lock(hdev); 5275 5276 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5277 if (conn) { 5278 conn->le_conn_interval = le16_to_cpu(ev->interval); 5279 conn->le_conn_latency = le16_to_cpu(ev->latency); 5280 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 5281 } 5282 5283 hci_dev_unlock(hdev); 5284 } 5285 5286 /* This function requires the caller holds hdev->lock */ 5287 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5288 bdaddr_t *addr, 5289 u8 addr_type, u8 adv_type, 5290 bdaddr_t *direct_rpa) 5291 { 5292 struct hci_conn *conn; 5293 struct hci_conn_params *params; 5294 5295 /* If the event is not connectable don't proceed further */ 5296 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 5297 return NULL; 5298 5299 /* Ignore if the device is blocked */ 5300 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type)) 5301 return NULL; 5302 5303 /* Most controller will fail if we try to create new connections 5304 * while we have an existing one in slave role. 5305 */ 5306 if (hdev->conn_hash.le_num_slave > 0 && 5307 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 5308 !(hdev->le_states[3] & 0x10))) 5309 return NULL; 5310 5311 /* If we're not connectable only connect devices that we have in 5312 * our pend_le_conns list. 5313 */ 5314 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 5315 addr_type); 5316 if (!params) 5317 return NULL; 5318 5319 if (!params->explicit_connect) { 5320 switch (params->auto_connect) { 5321 case HCI_AUTO_CONN_DIRECT: 5322 /* Only devices advertising with ADV_DIRECT_IND are 5323 * triggering a connection attempt. This is allowing 5324 * incoming connections from slave devices. 5325 */ 5326 if (adv_type != LE_ADV_DIRECT_IND) 5327 return NULL; 5328 break; 5329 case HCI_AUTO_CONN_ALWAYS: 5330 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 5331 * are triggering a connection attempt. This means 5332 * that incoming connections from slave device are 5333 * accepted and also outgoing connections to slave 5334 * devices are established when found. 5335 */ 5336 break; 5337 default: 5338 return NULL; 5339 } 5340 } 5341 5342 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, 5343 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER, 5344 direct_rpa); 5345 if (!IS_ERR(conn)) { 5346 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5347 * by higher layer that tried to connect, if no then 5348 * store the pointer since we don't really have any 5349 * other owner of the object besides the params that 5350 * triggered it. This way we can abort the connection if 5351 * the parameters get removed and keep the reference 5352 * count consistent once the connection is established. 5353 */ 5354 5355 if (!params->explicit_connect) 5356 params->conn = hci_conn_get(conn); 5357 5358 return conn; 5359 } 5360 5361 switch (PTR_ERR(conn)) { 5362 case -EBUSY: 5363 /* If hci_connect() returns -EBUSY it means there is already 5364 * an LE connection attempt going on. Since controllers don't 5365 * support more than one connection attempt at the time, we 5366 * don't consider this an error case. 5367 */ 5368 break; 5369 default: 5370 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 5371 return NULL; 5372 } 5373 5374 return NULL; 5375 } 5376 5377 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 5378 u8 bdaddr_type, bdaddr_t *direct_addr, 5379 u8 direct_addr_type, s8 rssi, u8 *data, u8 len) 5380 { 5381 struct discovery_state *d = &hdev->discovery; 5382 struct smp_irk *irk; 5383 struct hci_conn *conn; 5384 bool match; 5385 u32 flags; 5386 u8 *ptr, real_len; 5387 5388 switch (type) { 5389 case LE_ADV_IND: 5390 case LE_ADV_DIRECT_IND: 5391 case LE_ADV_SCAN_IND: 5392 case LE_ADV_NONCONN_IND: 5393 case LE_ADV_SCAN_RSP: 5394 break; 5395 default: 5396 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 5397 "type: 0x%02x", type); 5398 return; 5399 } 5400 5401 /* Find the end of the data in case the report contains padded zero 5402 * bytes at the end causing an invalid length value. 5403 * 5404 * When data is NULL, len is 0 so there is no need for extra ptr 5405 * check as 'ptr < data + 0' is already false in such case. 5406 */ 5407 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 5408 if (ptr + 1 + *ptr > data + len) 5409 break; 5410 } 5411 5412 real_len = ptr - data; 5413 5414 /* Adjust for actual length */ 5415 if (len != real_len) { 5416 bt_dev_err_ratelimited(hdev, "advertising data len corrected %u -> %u", 5417 len, real_len); 5418 len = real_len; 5419 } 5420 5421 /* If the direct address is present, then this report is from 5422 * a LE Direct Advertising Report event. In that case it is 5423 * important to see if the address is matching the local 5424 * controller address. 5425 */ 5426 if (direct_addr) { 5427 /* Only resolvable random addresses are valid for these 5428 * kind of reports and others can be ignored. 5429 */ 5430 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 5431 return; 5432 5433 /* If the controller is not using resolvable random 5434 * addresses, then this report can be ignored. 5435 */ 5436 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 5437 return; 5438 5439 /* If the local IRK of the controller does not match 5440 * with the resolvable random address provided, then 5441 * this report can be ignored. 5442 */ 5443 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 5444 return; 5445 } 5446 5447 /* Check if we need to convert to identity address */ 5448 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 5449 if (irk) { 5450 bdaddr = &irk->bdaddr; 5451 bdaddr_type = irk->addr_type; 5452 } 5453 5454 /* Check if we have been requested to connect to this device. 5455 * 5456 * direct_addr is set only for directed advertising reports (it is NULL 5457 * for advertising reports) and is already verified to be RPA above. 5458 */ 5459 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, 5460 direct_addr); 5461 if (conn && type == LE_ADV_IND) { 5462 /* Store report for later inclusion by 5463 * mgmt_device_connected 5464 */ 5465 memcpy(conn->le_adv_data, data, len); 5466 conn->le_adv_data_len = len; 5467 } 5468 5469 /* Passive scanning shouldn't trigger any device found events, 5470 * except for devices marked as CONN_REPORT for which we do send 5471 * device found events. 5472 */ 5473 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 5474 if (type == LE_ADV_DIRECT_IND) 5475 return; 5476 5477 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 5478 bdaddr, bdaddr_type)) 5479 return; 5480 5481 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 5482 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5483 else 5484 flags = 0; 5485 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5486 rssi, flags, data, len, NULL, 0); 5487 return; 5488 } 5489 5490 /* When receiving non-connectable or scannable undirected 5491 * advertising reports, this means that the remote device is 5492 * not connectable and then clearly indicate this in the 5493 * device found event. 5494 * 5495 * When receiving a scan response, then there is no way to 5496 * know if the remote device is connectable or not. However 5497 * since scan responses are merged with a previously seen 5498 * advertising report, the flags field from that report 5499 * will be used. 5500 * 5501 * In the really unlikely case that a controller get confused 5502 * and just sends a scan response event, then it is marked as 5503 * not connectable as well. 5504 */ 5505 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 5506 type == LE_ADV_SCAN_RSP) 5507 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5508 else 5509 flags = 0; 5510 5511 /* If there's nothing pending either store the data from this 5512 * event or send an immediate device found event if the data 5513 * should not be stored for later. 5514 */ 5515 if (!has_pending_adv_report(hdev)) { 5516 /* If the report will trigger a SCAN_REQ store it for 5517 * later merging. 5518 */ 5519 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 5520 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 5521 rssi, flags, data, len); 5522 return; 5523 } 5524 5525 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5526 rssi, flags, data, len, NULL, 0); 5527 return; 5528 } 5529 5530 /* Check if the pending report is for the same device as the new one */ 5531 match = (!bacmp(bdaddr, &d->last_adv_addr) && 5532 bdaddr_type == d->last_adv_addr_type); 5533 5534 /* If the pending data doesn't match this report or this isn't a 5535 * scan response (e.g. we got a duplicate ADV_IND) then force 5536 * sending of the pending data. 5537 */ 5538 if (type != LE_ADV_SCAN_RSP || !match) { 5539 /* Send out whatever is in the cache, but skip duplicates */ 5540 if (!match) 5541 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 5542 d->last_adv_addr_type, NULL, 5543 d->last_adv_rssi, d->last_adv_flags, 5544 d->last_adv_data, 5545 d->last_adv_data_len, NULL, 0); 5546 5547 /* If the new report will trigger a SCAN_REQ store it for 5548 * later merging. 5549 */ 5550 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 5551 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 5552 rssi, flags, data, len); 5553 return; 5554 } 5555 5556 /* The advertising reports cannot be merged, so clear 5557 * the pending report and send out a device found event. 5558 */ 5559 clear_pending_adv_report(hdev); 5560 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5561 rssi, flags, data, len, NULL, 0); 5562 return; 5563 } 5564 5565 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 5566 * the new event is a SCAN_RSP. We can therefore proceed with 5567 * sending a merged device found event. 5568 */ 5569 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 5570 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 5571 d->last_adv_data, d->last_adv_data_len, data, len); 5572 clear_pending_adv_report(hdev); 5573 } 5574 5575 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 5576 { 5577 u8 num_reports = skb->data[0]; 5578 void *ptr = &skb->data[1]; 5579 5580 hci_dev_lock(hdev); 5581 5582 while (num_reports--) { 5583 struct hci_ev_le_advertising_info *ev = ptr; 5584 s8 rssi; 5585 5586 if (ev->length <= HCI_MAX_AD_LENGTH) { 5587 rssi = ev->data[ev->length]; 5588 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 5589 ev->bdaddr_type, NULL, 0, rssi, 5590 ev->data, ev->length); 5591 } else { 5592 bt_dev_err(hdev, "Dropping invalid advertising data"); 5593 } 5594 5595 ptr += sizeof(*ev) + ev->length + 1; 5596 } 5597 5598 hci_dev_unlock(hdev); 5599 } 5600 5601 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 5602 { 5603 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 5604 switch (evt_type) { 5605 case LE_LEGACY_ADV_IND: 5606 return LE_ADV_IND; 5607 case LE_LEGACY_ADV_DIRECT_IND: 5608 return LE_ADV_DIRECT_IND; 5609 case LE_LEGACY_ADV_SCAN_IND: 5610 return LE_ADV_SCAN_IND; 5611 case LE_LEGACY_NONCONN_IND: 5612 return LE_ADV_NONCONN_IND; 5613 case LE_LEGACY_SCAN_RSP_ADV: 5614 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 5615 return LE_ADV_SCAN_RSP; 5616 } 5617 5618 goto invalid; 5619 } 5620 5621 if (evt_type & LE_EXT_ADV_CONN_IND) { 5622 if (evt_type & LE_EXT_ADV_DIRECT_IND) 5623 return LE_ADV_DIRECT_IND; 5624 5625 return LE_ADV_IND; 5626 } 5627 5628 if (evt_type & LE_EXT_ADV_SCAN_RSP) 5629 return LE_ADV_SCAN_RSP; 5630 5631 if (evt_type & LE_EXT_ADV_SCAN_IND) 5632 return LE_ADV_SCAN_IND; 5633 5634 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 5635 evt_type & LE_EXT_ADV_DIRECT_IND) 5636 return LE_ADV_NONCONN_IND; 5637 5638 invalid: 5639 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 5640 evt_type); 5641 5642 return LE_ADV_INVALID; 5643 } 5644 5645 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 5646 { 5647 u8 num_reports = skb->data[0]; 5648 void *ptr = &skb->data[1]; 5649 5650 hci_dev_lock(hdev); 5651 5652 while (num_reports--) { 5653 struct hci_ev_le_ext_adv_report *ev = ptr; 5654 u8 legacy_evt_type; 5655 u16 evt_type; 5656 5657 evt_type = __le16_to_cpu(ev->evt_type); 5658 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 5659 if (legacy_evt_type != LE_ADV_INVALID) { 5660 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, 5661 ev->bdaddr_type, NULL, 0, ev->rssi, 5662 ev->data, ev->length); 5663 } 5664 5665 ptr += sizeof(*ev) + ev->length; 5666 } 5667 5668 hci_dev_unlock(hdev); 5669 } 5670 5671 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, 5672 struct sk_buff *skb) 5673 { 5674 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data; 5675 struct hci_conn *conn; 5676 5677 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5678 5679 hci_dev_lock(hdev); 5680 5681 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5682 if (conn) { 5683 if (!ev->status) 5684 memcpy(conn->features[0], ev->features, 8); 5685 5686 if (conn->state == BT_CONFIG) { 5687 __u8 status; 5688 5689 /* If the local controller supports slave-initiated 5690 * features exchange, but the remote controller does 5691 * not, then it is possible that the error code 0x1a 5692 * for unsupported remote feature gets returned. 5693 * 5694 * In this specific case, allow the connection to 5695 * transition into connected state and mark it as 5696 * successful. 5697 */ 5698 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) && 5699 !conn->out && ev->status == 0x1a) 5700 status = 0x00; 5701 else 5702 status = ev->status; 5703 5704 conn->state = BT_CONNECTED; 5705 hci_connect_cfm(conn, status); 5706 hci_conn_drop(conn); 5707 } 5708 } 5709 5710 hci_dev_unlock(hdev); 5711 } 5712 5713 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 5714 { 5715 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 5716 struct hci_cp_le_ltk_reply cp; 5717 struct hci_cp_le_ltk_neg_reply neg; 5718 struct hci_conn *conn; 5719 struct smp_ltk *ltk; 5720 5721 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); 5722 5723 hci_dev_lock(hdev); 5724 5725 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5726 if (conn == NULL) 5727 goto not_found; 5728 5729 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 5730 if (!ltk) 5731 goto not_found; 5732 5733 if (smp_ltk_is_sc(ltk)) { 5734 /* With SC both EDiv and Rand are set to zero */ 5735 if (ev->ediv || ev->rand) 5736 goto not_found; 5737 } else { 5738 /* For non-SC keys check that EDiv and Rand match */ 5739 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 5740 goto not_found; 5741 } 5742 5743 memcpy(cp.ltk, ltk->val, ltk->enc_size); 5744 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 5745 cp.handle = cpu_to_le16(conn->handle); 5746 5747 conn->pending_sec_level = smp_ltk_sec_level(ltk); 5748 5749 conn->enc_key_size = ltk->enc_size; 5750 5751 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 5752 5753 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 5754 * temporary key used to encrypt a connection following 5755 * pairing. It is used during the Encrypted Session Setup to 5756 * distribute the keys. Later, security can be re-established 5757 * using a distributed LTK. 5758 */ 5759 if (ltk->type == SMP_STK) { 5760 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 5761 list_del_rcu(<k->list); 5762 kfree_rcu(ltk, rcu); 5763 } else { 5764 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 5765 } 5766 5767 hci_dev_unlock(hdev); 5768 5769 return; 5770 5771 not_found: 5772 neg.handle = ev->handle; 5773 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 5774 hci_dev_unlock(hdev); 5775 } 5776 5777 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 5778 u8 reason) 5779 { 5780 struct hci_cp_le_conn_param_req_neg_reply cp; 5781 5782 cp.handle = cpu_to_le16(handle); 5783 cp.reason = reason; 5784 5785 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 5786 &cp); 5787 } 5788 5789 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, 5790 struct sk_buff *skb) 5791 { 5792 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data; 5793 struct hci_cp_le_conn_param_req_reply cp; 5794 struct hci_conn *hcon; 5795 u16 handle, min, max, latency, timeout; 5796 5797 handle = le16_to_cpu(ev->handle); 5798 min = le16_to_cpu(ev->interval_min); 5799 max = le16_to_cpu(ev->interval_max); 5800 latency = le16_to_cpu(ev->latency); 5801 timeout = le16_to_cpu(ev->timeout); 5802 5803 hcon = hci_conn_hash_lookup_handle(hdev, handle); 5804 if (!hcon || hcon->state != BT_CONNECTED) 5805 return send_conn_param_neg_reply(hdev, handle, 5806 HCI_ERROR_UNKNOWN_CONN_ID); 5807 5808 if (hci_check_conn_params(min, max, latency, timeout)) 5809 return send_conn_param_neg_reply(hdev, handle, 5810 HCI_ERROR_INVALID_LL_PARAMS); 5811 5812 if (hcon->role == HCI_ROLE_MASTER) { 5813 struct hci_conn_params *params; 5814 u8 store_hint; 5815 5816 hci_dev_lock(hdev); 5817 5818 params = hci_conn_params_lookup(hdev, &hcon->dst, 5819 hcon->dst_type); 5820 if (params) { 5821 params->conn_min_interval = min; 5822 params->conn_max_interval = max; 5823 params->conn_latency = latency; 5824 params->supervision_timeout = timeout; 5825 store_hint = 0x01; 5826 } else{ 5827 store_hint = 0x00; 5828 } 5829 5830 hci_dev_unlock(hdev); 5831 5832 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 5833 store_hint, min, max, latency, timeout); 5834 } 5835 5836 cp.handle = ev->handle; 5837 cp.interval_min = ev->interval_min; 5838 cp.interval_max = ev->interval_max; 5839 cp.latency = ev->latency; 5840 cp.timeout = ev->timeout; 5841 cp.min_ce_len = 0; 5842 cp.max_ce_len = 0; 5843 5844 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 5845 } 5846 5847 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, 5848 struct sk_buff *skb) 5849 { 5850 u8 num_reports = skb->data[0]; 5851 void *ptr = &skb->data[1]; 5852 5853 hci_dev_lock(hdev); 5854 5855 while (num_reports--) { 5856 struct hci_ev_le_direct_adv_info *ev = ptr; 5857 5858 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 5859 ev->bdaddr_type, &ev->direct_addr, 5860 ev->direct_addr_type, ev->rssi, NULL, 0); 5861 5862 ptr += sizeof(*ev); 5863 } 5864 5865 hci_dev_unlock(hdev); 5866 } 5867 5868 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb) 5869 { 5870 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data; 5871 struct hci_conn *conn; 5872 5873 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5874 5875 if (!ev->status) 5876 return; 5877 5878 hci_dev_lock(hdev); 5879 5880 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5881 if (!conn) 5882 goto unlock; 5883 5884 conn->le_tx_phy = ev->tx_phy; 5885 conn->le_rx_phy = ev->rx_phy; 5886 5887 unlock: 5888 hci_dev_unlock(hdev); 5889 } 5890 5891 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 5892 { 5893 struct hci_ev_le_meta *le_ev = (void *) skb->data; 5894 5895 skb_pull(skb, sizeof(*le_ev)); 5896 5897 switch (le_ev->subevent) { 5898 case HCI_EV_LE_CONN_COMPLETE: 5899 hci_le_conn_complete_evt(hdev, skb); 5900 break; 5901 5902 case HCI_EV_LE_CONN_UPDATE_COMPLETE: 5903 hci_le_conn_update_complete_evt(hdev, skb); 5904 break; 5905 5906 case HCI_EV_LE_ADVERTISING_REPORT: 5907 hci_le_adv_report_evt(hdev, skb); 5908 break; 5909 5910 case HCI_EV_LE_REMOTE_FEAT_COMPLETE: 5911 hci_le_remote_feat_complete_evt(hdev, skb); 5912 break; 5913 5914 case HCI_EV_LE_LTK_REQ: 5915 hci_le_ltk_request_evt(hdev, skb); 5916 break; 5917 5918 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ: 5919 hci_le_remote_conn_param_req_evt(hdev, skb); 5920 break; 5921 5922 case HCI_EV_LE_DIRECT_ADV_REPORT: 5923 hci_le_direct_adv_report_evt(hdev, skb); 5924 break; 5925 5926 case HCI_EV_LE_PHY_UPDATE_COMPLETE: 5927 hci_le_phy_update_evt(hdev, skb); 5928 break; 5929 5930 case HCI_EV_LE_EXT_ADV_REPORT: 5931 hci_le_ext_adv_report_evt(hdev, skb); 5932 break; 5933 5934 case HCI_EV_LE_ENHANCED_CONN_COMPLETE: 5935 hci_le_enh_conn_complete_evt(hdev, skb); 5936 break; 5937 5938 case HCI_EV_LE_EXT_ADV_SET_TERM: 5939 hci_le_ext_adv_term_evt(hdev, skb); 5940 break; 5941 5942 default: 5943 break; 5944 } 5945 } 5946 5947 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 5948 u8 event, struct sk_buff *skb) 5949 { 5950 struct hci_ev_cmd_complete *ev; 5951 struct hci_event_hdr *hdr; 5952 5953 if (!skb) 5954 return false; 5955 5956 if (skb->len < sizeof(*hdr)) { 5957 bt_dev_err(hdev, "too short HCI event"); 5958 return false; 5959 } 5960 5961 hdr = (void *) skb->data; 5962 skb_pull(skb, HCI_EVENT_HDR_SIZE); 5963 5964 if (event) { 5965 if (hdr->evt != event) 5966 return false; 5967 return true; 5968 } 5969 5970 /* Check if request ended in Command Status - no way to retreive 5971 * any extra parameters in this case. 5972 */ 5973 if (hdr->evt == HCI_EV_CMD_STATUS) 5974 return false; 5975 5976 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 5977 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 5978 hdr->evt); 5979 return false; 5980 } 5981 5982 if (skb->len < sizeof(*ev)) { 5983 bt_dev_err(hdev, "too short cmd_complete event"); 5984 return false; 5985 } 5986 5987 ev = (void *) skb->data; 5988 skb_pull(skb, sizeof(*ev)); 5989 5990 if (opcode != __le16_to_cpu(ev->opcode)) { 5991 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 5992 __le16_to_cpu(ev->opcode)); 5993 return false; 5994 } 5995 5996 return true; 5997 } 5998 5999 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 6000 { 6001 struct hci_event_hdr *hdr = (void *) skb->data; 6002 hci_req_complete_t req_complete = NULL; 6003 hci_req_complete_skb_t req_complete_skb = NULL; 6004 struct sk_buff *orig_skb = NULL; 6005 u8 status = 0, event = hdr->evt, req_evt = 0; 6006 u16 opcode = HCI_OP_NOP; 6007 6008 if (!event) { 6009 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); 6010 goto done; 6011 } 6012 6013 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { 6014 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; 6015 opcode = __le16_to_cpu(cmd_hdr->opcode); 6016 hci_req_cmd_complete(hdev, opcode, status, &req_complete, 6017 &req_complete_skb); 6018 req_evt = event; 6019 } 6020 6021 /* If it looks like we might end up having to call 6022 * req_complete_skb, store a pristine copy of the skb since the 6023 * various handlers may modify the original one through 6024 * skb_pull() calls, etc. 6025 */ 6026 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 6027 event == HCI_EV_CMD_COMPLETE) 6028 orig_skb = skb_clone(skb, GFP_KERNEL); 6029 6030 skb_pull(skb, HCI_EVENT_HDR_SIZE); 6031 6032 switch (event) { 6033 case HCI_EV_INQUIRY_COMPLETE: 6034 hci_inquiry_complete_evt(hdev, skb); 6035 break; 6036 6037 case HCI_EV_INQUIRY_RESULT: 6038 hci_inquiry_result_evt(hdev, skb); 6039 break; 6040 6041 case HCI_EV_CONN_COMPLETE: 6042 hci_conn_complete_evt(hdev, skb); 6043 break; 6044 6045 case HCI_EV_CONN_REQUEST: 6046 hci_conn_request_evt(hdev, skb); 6047 break; 6048 6049 case HCI_EV_DISCONN_COMPLETE: 6050 hci_disconn_complete_evt(hdev, skb); 6051 break; 6052 6053 case HCI_EV_AUTH_COMPLETE: 6054 hci_auth_complete_evt(hdev, skb); 6055 break; 6056 6057 case HCI_EV_REMOTE_NAME: 6058 hci_remote_name_evt(hdev, skb); 6059 break; 6060 6061 case HCI_EV_ENCRYPT_CHANGE: 6062 hci_encrypt_change_evt(hdev, skb); 6063 break; 6064 6065 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 6066 hci_change_link_key_complete_evt(hdev, skb); 6067 break; 6068 6069 case HCI_EV_REMOTE_FEATURES: 6070 hci_remote_features_evt(hdev, skb); 6071 break; 6072 6073 case HCI_EV_CMD_COMPLETE: 6074 hci_cmd_complete_evt(hdev, skb, &opcode, &status, 6075 &req_complete, &req_complete_skb); 6076 break; 6077 6078 case HCI_EV_CMD_STATUS: 6079 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete, 6080 &req_complete_skb); 6081 break; 6082 6083 case HCI_EV_HARDWARE_ERROR: 6084 hci_hardware_error_evt(hdev, skb); 6085 break; 6086 6087 case HCI_EV_ROLE_CHANGE: 6088 hci_role_change_evt(hdev, skb); 6089 break; 6090 6091 case HCI_EV_NUM_COMP_PKTS: 6092 hci_num_comp_pkts_evt(hdev, skb); 6093 break; 6094 6095 case HCI_EV_MODE_CHANGE: 6096 hci_mode_change_evt(hdev, skb); 6097 break; 6098 6099 case HCI_EV_PIN_CODE_REQ: 6100 hci_pin_code_request_evt(hdev, skb); 6101 break; 6102 6103 case HCI_EV_LINK_KEY_REQ: 6104 hci_link_key_request_evt(hdev, skb); 6105 break; 6106 6107 case HCI_EV_LINK_KEY_NOTIFY: 6108 hci_link_key_notify_evt(hdev, skb); 6109 break; 6110 6111 case HCI_EV_CLOCK_OFFSET: 6112 hci_clock_offset_evt(hdev, skb); 6113 break; 6114 6115 case HCI_EV_PKT_TYPE_CHANGE: 6116 hci_pkt_type_change_evt(hdev, skb); 6117 break; 6118 6119 case HCI_EV_PSCAN_REP_MODE: 6120 hci_pscan_rep_mode_evt(hdev, skb); 6121 break; 6122 6123 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 6124 hci_inquiry_result_with_rssi_evt(hdev, skb); 6125 break; 6126 6127 case HCI_EV_REMOTE_EXT_FEATURES: 6128 hci_remote_ext_features_evt(hdev, skb); 6129 break; 6130 6131 case HCI_EV_SYNC_CONN_COMPLETE: 6132 hci_sync_conn_complete_evt(hdev, skb); 6133 break; 6134 6135 case HCI_EV_EXTENDED_INQUIRY_RESULT: 6136 hci_extended_inquiry_result_evt(hdev, skb); 6137 break; 6138 6139 case HCI_EV_KEY_REFRESH_COMPLETE: 6140 hci_key_refresh_complete_evt(hdev, skb); 6141 break; 6142 6143 case HCI_EV_IO_CAPA_REQUEST: 6144 hci_io_capa_request_evt(hdev, skb); 6145 break; 6146 6147 case HCI_EV_IO_CAPA_REPLY: 6148 hci_io_capa_reply_evt(hdev, skb); 6149 break; 6150 6151 case HCI_EV_USER_CONFIRM_REQUEST: 6152 hci_user_confirm_request_evt(hdev, skb); 6153 break; 6154 6155 case HCI_EV_USER_PASSKEY_REQUEST: 6156 hci_user_passkey_request_evt(hdev, skb); 6157 break; 6158 6159 case HCI_EV_USER_PASSKEY_NOTIFY: 6160 hci_user_passkey_notify_evt(hdev, skb); 6161 break; 6162 6163 case HCI_EV_KEYPRESS_NOTIFY: 6164 hci_keypress_notify_evt(hdev, skb); 6165 break; 6166 6167 case HCI_EV_SIMPLE_PAIR_COMPLETE: 6168 hci_simple_pair_complete_evt(hdev, skb); 6169 break; 6170 6171 case HCI_EV_REMOTE_HOST_FEATURES: 6172 hci_remote_host_features_evt(hdev, skb); 6173 break; 6174 6175 case HCI_EV_LE_META: 6176 hci_le_meta_evt(hdev, skb); 6177 break; 6178 6179 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 6180 hci_remote_oob_data_request_evt(hdev, skb); 6181 break; 6182 6183 #if IS_ENABLED(CONFIG_BT_HS) 6184 case HCI_EV_CHANNEL_SELECTED: 6185 hci_chan_selected_evt(hdev, skb); 6186 break; 6187 6188 case HCI_EV_PHY_LINK_COMPLETE: 6189 hci_phy_link_complete_evt(hdev, skb); 6190 break; 6191 6192 case HCI_EV_LOGICAL_LINK_COMPLETE: 6193 hci_loglink_complete_evt(hdev, skb); 6194 break; 6195 6196 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: 6197 hci_disconn_loglink_complete_evt(hdev, skb); 6198 break; 6199 6200 case HCI_EV_DISCONN_PHY_LINK_COMPLETE: 6201 hci_disconn_phylink_complete_evt(hdev, skb); 6202 break; 6203 #endif 6204 6205 case HCI_EV_NUM_COMP_BLOCKS: 6206 hci_num_comp_blocks_evt(hdev, skb); 6207 break; 6208 6209 case HCI_EV_VENDOR: 6210 msft_vendor_evt(hdev, skb); 6211 break; 6212 6213 default: 6214 BT_DBG("%s event 0x%2.2x", hdev->name, event); 6215 break; 6216 } 6217 6218 if (req_complete) { 6219 req_complete(hdev, status, opcode); 6220 } else if (req_complete_skb) { 6221 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 6222 kfree_skb(orig_skb); 6223 orig_skb = NULL; 6224 } 6225 req_complete_skb(hdev, status, opcode, orig_skb); 6226 } 6227 6228 done: 6229 kfree_skb(orig_skb); 6230 kfree_skb(skb); 6231 hdev->stat.evt_rx++; 6232 } 6233