1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "a2mp.h" 34 #include "amp.h" 35 #include "smp.h" 36 37 /* Handle HCI Event packets */ 38 39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 40 { 41 __u8 status = *((__u8 *) skb->data); 42 43 BT_DBG("%s status 0x%2.2x", hdev->name, status); 44 45 if (status) 46 return; 47 48 clear_bit(HCI_INQUIRY, &hdev->flags); 49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 50 wake_up_bit(&hdev->flags, HCI_INQUIRY); 51 52 hci_dev_lock(hdev); 53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 54 hci_dev_unlock(hdev); 55 56 hci_conn_check_pending(hdev); 57 } 58 59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 60 { 61 __u8 status = *((__u8 *) skb->data); 62 63 BT_DBG("%s status 0x%2.2x", hdev->name, status); 64 65 if (status) 66 return; 67 68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 69 } 70 71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 72 { 73 __u8 status = *((__u8 *) skb->data); 74 75 BT_DBG("%s status 0x%2.2x", hdev->name, status); 76 77 if (status) 78 return; 79 80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 81 82 hci_conn_check_pending(hdev); 83 } 84 85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, 86 struct sk_buff *skb) 87 { 88 BT_DBG("%s", hdev->name); 89 } 90 91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 92 { 93 struct hci_rp_role_discovery *rp = (void *) skb->data; 94 struct hci_conn *conn; 95 96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 97 98 if (rp->status) 99 return; 100 101 hci_dev_lock(hdev); 102 103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 104 if (conn) 105 conn->role = rp->role; 106 107 hci_dev_unlock(hdev); 108 } 109 110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 111 { 112 struct hci_rp_read_link_policy *rp = (void *) skb->data; 113 struct hci_conn *conn; 114 115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 116 117 if (rp->status) 118 return; 119 120 hci_dev_lock(hdev); 121 122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 123 if (conn) 124 conn->link_policy = __le16_to_cpu(rp->policy); 125 126 hci_dev_unlock(hdev); 127 } 128 129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 130 { 131 struct hci_rp_write_link_policy *rp = (void *) skb->data; 132 struct hci_conn *conn; 133 void *sent; 134 135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 136 137 if (rp->status) 138 return; 139 140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 141 if (!sent) 142 return; 143 144 hci_dev_lock(hdev); 145 146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 147 if (conn) 148 conn->link_policy = get_unaligned_le16(sent + 2); 149 150 hci_dev_unlock(hdev); 151 } 152 153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, 154 struct sk_buff *skb) 155 { 156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 157 158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 159 160 if (rp->status) 161 return; 162 163 hdev->link_policy = __le16_to_cpu(rp->policy); 164 } 165 166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, 167 struct sk_buff *skb) 168 { 169 __u8 status = *((__u8 *) skb->data); 170 void *sent; 171 172 BT_DBG("%s status 0x%2.2x", hdev->name, status); 173 174 if (status) 175 return; 176 177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 178 if (!sent) 179 return; 180 181 hdev->link_policy = get_unaligned_le16(sent); 182 } 183 184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 185 { 186 __u8 status = *((__u8 *) skb->data); 187 188 BT_DBG("%s status 0x%2.2x", hdev->name, status); 189 190 clear_bit(HCI_RESET, &hdev->flags); 191 192 /* Reset all non-persistent flags */ 193 hdev->dev_flags &= ~HCI_PERSISTENT_MASK; 194 195 hdev->discovery.state = DISCOVERY_STOPPED; 196 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 197 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 198 199 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 200 hdev->adv_data_len = 0; 201 202 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 203 hdev->scan_rsp_data_len = 0; 204 205 hdev->le_scan_type = LE_SCAN_PASSIVE; 206 207 hdev->ssp_debug_mode = 0; 208 } 209 210 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 211 { 212 __u8 status = *((__u8 *) skb->data); 213 void *sent; 214 215 BT_DBG("%s status 0x%2.2x", hdev->name, status); 216 217 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 218 if (!sent) 219 return; 220 221 hci_dev_lock(hdev); 222 223 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 224 mgmt_set_local_name_complete(hdev, sent, status); 225 else if (!status) 226 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 227 228 hci_dev_unlock(hdev); 229 } 230 231 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 232 { 233 struct hci_rp_read_local_name *rp = (void *) skb->data; 234 235 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 236 237 if (rp->status) 238 return; 239 240 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 241 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 242 } 243 244 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 245 { 246 __u8 status = *((__u8 *) skb->data); 247 void *sent; 248 249 BT_DBG("%s status 0x%2.2x", hdev->name, status); 250 251 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 252 if (!sent) 253 return; 254 255 if (!status) { 256 __u8 param = *((__u8 *) sent); 257 258 if (param == AUTH_ENABLED) 259 set_bit(HCI_AUTH, &hdev->flags); 260 else 261 clear_bit(HCI_AUTH, &hdev->flags); 262 } 263 264 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 265 mgmt_auth_enable_complete(hdev, status); 266 } 267 268 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 269 { 270 __u8 status = *((__u8 *) skb->data); 271 __u8 param; 272 void *sent; 273 274 BT_DBG("%s status 0x%2.2x", hdev->name, status); 275 276 if (status) 277 return; 278 279 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 280 if (!sent) 281 return; 282 283 param = *((__u8 *) sent); 284 285 if (param) 286 set_bit(HCI_ENCRYPT, &hdev->flags); 287 else 288 clear_bit(HCI_ENCRYPT, &hdev->flags); 289 } 290 291 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 292 { 293 __u8 status = *((__u8 *) skb->data); 294 __u8 param; 295 void *sent; 296 297 BT_DBG("%s status 0x%2.2x", hdev->name, status); 298 299 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 300 if (!sent) 301 return; 302 303 param = *((__u8 *) sent); 304 305 hci_dev_lock(hdev); 306 307 if (status) { 308 hdev->discov_timeout = 0; 309 goto done; 310 } 311 312 if (param & SCAN_INQUIRY) 313 set_bit(HCI_ISCAN, &hdev->flags); 314 else 315 clear_bit(HCI_ISCAN, &hdev->flags); 316 317 if (param & SCAN_PAGE) 318 set_bit(HCI_PSCAN, &hdev->flags); 319 else 320 clear_bit(HCI_PSCAN, &hdev->flags); 321 322 done: 323 hci_dev_unlock(hdev); 324 } 325 326 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 327 { 328 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 329 330 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 331 332 if (rp->status) 333 return; 334 335 memcpy(hdev->dev_class, rp->dev_class, 3); 336 337 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 338 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 339 } 340 341 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 342 { 343 __u8 status = *((__u8 *) skb->data); 344 void *sent; 345 346 BT_DBG("%s status 0x%2.2x", hdev->name, status); 347 348 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 349 if (!sent) 350 return; 351 352 hci_dev_lock(hdev); 353 354 if (status == 0) 355 memcpy(hdev->dev_class, sent, 3); 356 357 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 358 mgmt_set_class_of_dev_complete(hdev, sent, status); 359 360 hci_dev_unlock(hdev); 361 } 362 363 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 364 { 365 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 366 __u16 setting; 367 368 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 369 370 if (rp->status) 371 return; 372 373 setting = __le16_to_cpu(rp->voice_setting); 374 375 if (hdev->voice_setting == setting) 376 return; 377 378 hdev->voice_setting = setting; 379 380 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 381 382 if (hdev->notify) 383 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 384 } 385 386 static void hci_cc_write_voice_setting(struct hci_dev *hdev, 387 struct sk_buff *skb) 388 { 389 __u8 status = *((__u8 *) skb->data); 390 __u16 setting; 391 void *sent; 392 393 BT_DBG("%s status 0x%2.2x", hdev->name, status); 394 395 if (status) 396 return; 397 398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 399 if (!sent) 400 return; 401 402 setting = get_unaligned_le16(sent); 403 404 if (hdev->voice_setting == setting) 405 return; 406 407 hdev->voice_setting = setting; 408 409 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 410 411 if (hdev->notify) 412 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 413 } 414 415 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev, 416 struct sk_buff *skb) 417 { 418 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data; 419 420 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 421 422 if (rp->status) 423 return; 424 425 hdev->num_iac = rp->num_iac; 426 427 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac); 428 } 429 430 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 431 { 432 __u8 status = *((__u8 *) skb->data); 433 struct hci_cp_write_ssp_mode *sent; 434 435 BT_DBG("%s status 0x%2.2x", hdev->name, status); 436 437 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 438 if (!sent) 439 return; 440 441 if (!status) { 442 if (sent->mode) 443 hdev->features[1][0] |= LMP_HOST_SSP; 444 else 445 hdev->features[1][0] &= ~LMP_HOST_SSP; 446 } 447 448 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 449 mgmt_ssp_enable_complete(hdev, sent->mode, status); 450 else if (!status) { 451 if (sent->mode) 452 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 453 else 454 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 455 } 456 } 457 458 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) 459 { 460 u8 status = *((u8 *) skb->data); 461 struct hci_cp_write_sc_support *sent; 462 463 BT_DBG("%s status 0x%2.2x", hdev->name, status); 464 465 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 466 if (!sent) 467 return; 468 469 if (!status) { 470 if (sent->support) 471 hdev->features[1][0] |= LMP_HOST_SC; 472 else 473 hdev->features[1][0] &= ~LMP_HOST_SC; 474 } 475 476 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 477 mgmt_sc_enable_complete(hdev, sent->support, status); 478 else if (!status) { 479 if (sent->support) 480 set_bit(HCI_SC_ENABLED, &hdev->dev_flags); 481 else 482 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags); 483 } 484 } 485 486 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 487 { 488 struct hci_rp_read_local_version *rp = (void *) skb->data; 489 490 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 491 492 if (rp->status) 493 return; 494 495 if (test_bit(HCI_SETUP, &hdev->dev_flags)) { 496 hdev->hci_ver = rp->hci_ver; 497 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 498 hdev->lmp_ver = rp->lmp_ver; 499 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 500 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 501 } 502 } 503 504 static void hci_cc_read_local_commands(struct hci_dev *hdev, 505 struct sk_buff *skb) 506 { 507 struct hci_rp_read_local_commands *rp = (void *) skb->data; 508 509 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 510 511 if (rp->status) 512 return; 513 514 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 515 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 516 } 517 518 static void hci_cc_read_local_features(struct hci_dev *hdev, 519 struct sk_buff *skb) 520 { 521 struct hci_rp_read_local_features *rp = (void *) skb->data; 522 523 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 524 525 if (rp->status) 526 return; 527 528 memcpy(hdev->features, rp->features, 8); 529 530 /* Adjust default settings according to features 531 * supported by device. */ 532 533 if (hdev->features[0][0] & LMP_3SLOT) 534 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 535 536 if (hdev->features[0][0] & LMP_5SLOT) 537 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 538 539 if (hdev->features[0][1] & LMP_HV2) { 540 hdev->pkt_type |= (HCI_HV2); 541 hdev->esco_type |= (ESCO_HV2); 542 } 543 544 if (hdev->features[0][1] & LMP_HV3) { 545 hdev->pkt_type |= (HCI_HV3); 546 hdev->esco_type |= (ESCO_HV3); 547 } 548 549 if (lmp_esco_capable(hdev)) 550 hdev->esco_type |= (ESCO_EV3); 551 552 if (hdev->features[0][4] & LMP_EV4) 553 hdev->esco_type |= (ESCO_EV4); 554 555 if (hdev->features[0][4] & LMP_EV5) 556 hdev->esco_type |= (ESCO_EV5); 557 558 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 559 hdev->esco_type |= (ESCO_2EV3); 560 561 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 562 hdev->esco_type |= (ESCO_3EV3); 563 564 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 565 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 566 } 567 568 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 569 struct sk_buff *skb) 570 { 571 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 572 573 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 574 575 if (rp->status) 576 return; 577 578 if (hdev->max_page < rp->max_page) 579 hdev->max_page = rp->max_page; 580 581 if (rp->page < HCI_MAX_PAGES) 582 memcpy(hdev->features[rp->page], rp->features, 8); 583 } 584 585 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 586 struct sk_buff *skb) 587 { 588 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 589 590 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 591 592 if (rp->status) 593 return; 594 595 hdev->flow_ctl_mode = rp->mode; 596 } 597 598 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 599 { 600 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 601 602 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 603 604 if (rp->status) 605 return; 606 607 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 608 hdev->sco_mtu = rp->sco_mtu; 609 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 610 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 611 612 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 613 hdev->sco_mtu = 64; 614 hdev->sco_pkts = 8; 615 } 616 617 hdev->acl_cnt = hdev->acl_pkts; 618 hdev->sco_cnt = hdev->sco_pkts; 619 620 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 621 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 622 } 623 624 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 625 { 626 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 627 628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 629 630 if (rp->status) 631 return; 632 633 if (test_bit(HCI_INIT, &hdev->flags)) 634 bacpy(&hdev->bdaddr, &rp->bdaddr); 635 636 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 637 bacpy(&hdev->setup_addr, &rp->bdaddr); 638 } 639 640 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 641 struct sk_buff *skb) 642 { 643 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; 644 645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 646 647 if (rp->status) 648 return; 649 650 if (test_bit(HCI_INIT, &hdev->flags)) { 651 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 652 hdev->page_scan_window = __le16_to_cpu(rp->window); 653 } 654 } 655 656 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, 657 struct sk_buff *skb) 658 { 659 u8 status = *((u8 *) skb->data); 660 struct hci_cp_write_page_scan_activity *sent; 661 662 BT_DBG("%s status 0x%2.2x", hdev->name, status); 663 664 if (status) 665 return; 666 667 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 668 if (!sent) 669 return; 670 671 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 672 hdev->page_scan_window = __le16_to_cpu(sent->window); 673 } 674 675 static void hci_cc_read_page_scan_type(struct hci_dev *hdev, 676 struct sk_buff *skb) 677 { 678 struct hci_rp_read_page_scan_type *rp = (void *) skb->data; 679 680 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 681 682 if (rp->status) 683 return; 684 685 if (test_bit(HCI_INIT, &hdev->flags)) 686 hdev->page_scan_type = rp->type; 687 } 688 689 static void hci_cc_write_page_scan_type(struct hci_dev *hdev, 690 struct sk_buff *skb) 691 { 692 u8 status = *((u8 *) skb->data); 693 u8 *type; 694 695 BT_DBG("%s status 0x%2.2x", hdev->name, status); 696 697 if (status) 698 return; 699 700 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 701 if (type) 702 hdev->page_scan_type = *type; 703 } 704 705 static void hci_cc_read_data_block_size(struct hci_dev *hdev, 706 struct sk_buff *skb) 707 { 708 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 709 710 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 711 712 if (rp->status) 713 return; 714 715 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 716 hdev->block_len = __le16_to_cpu(rp->block_len); 717 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 718 719 hdev->block_cnt = hdev->num_blocks; 720 721 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 722 hdev->block_cnt, hdev->block_len); 723 } 724 725 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb) 726 { 727 struct hci_rp_read_clock *rp = (void *) skb->data; 728 struct hci_cp_read_clock *cp; 729 struct hci_conn *conn; 730 731 BT_DBG("%s", hdev->name); 732 733 if (skb->len < sizeof(*rp)) 734 return; 735 736 if (rp->status) 737 return; 738 739 hci_dev_lock(hdev); 740 741 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 742 if (!cp) 743 goto unlock; 744 745 if (cp->which == 0x00) { 746 hdev->clock = le32_to_cpu(rp->clock); 747 goto unlock; 748 } 749 750 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 751 if (conn) { 752 conn->clock = le32_to_cpu(rp->clock); 753 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 754 } 755 756 unlock: 757 hci_dev_unlock(hdev); 758 } 759 760 static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 761 struct sk_buff *skb) 762 { 763 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 764 765 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 766 767 if (rp->status) 768 goto a2mp_rsp; 769 770 hdev->amp_status = rp->amp_status; 771 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 772 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 773 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 774 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 775 hdev->amp_type = rp->amp_type; 776 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 777 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 778 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 779 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 780 781 a2mp_rsp: 782 a2mp_send_getinfo_rsp(hdev); 783 } 784 785 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev, 786 struct sk_buff *skb) 787 { 788 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data; 789 struct amp_assoc *assoc = &hdev->loc_assoc; 790 size_t rem_len, frag_len; 791 792 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 793 794 if (rp->status) 795 goto a2mp_rsp; 796 797 frag_len = skb->len - sizeof(*rp); 798 rem_len = __le16_to_cpu(rp->rem_len); 799 800 if (rem_len > frag_len) { 801 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len); 802 803 memcpy(assoc->data + assoc->offset, rp->frag, frag_len); 804 assoc->offset += frag_len; 805 806 /* Read other fragments */ 807 amp_read_loc_assoc_frag(hdev, rp->phy_handle); 808 809 return; 810 } 811 812 memcpy(assoc->data + assoc->offset, rp->frag, rem_len); 813 assoc->len = assoc->offset + rem_len; 814 assoc->offset = 0; 815 816 a2mp_rsp: 817 /* Send A2MP Rsp when all fragments are received */ 818 a2mp_send_getampassoc_rsp(hdev, rp->status); 819 a2mp_send_create_phy_link_req(hdev, rp->status); 820 } 821 822 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 823 struct sk_buff *skb) 824 { 825 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 826 827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 828 829 if (rp->status) 830 return; 831 832 hdev->inq_tx_power = rp->tx_power; 833 } 834 835 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 836 { 837 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 838 struct hci_cp_pin_code_reply *cp; 839 struct hci_conn *conn; 840 841 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 842 843 hci_dev_lock(hdev); 844 845 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 846 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 847 848 if (rp->status) 849 goto unlock; 850 851 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 852 if (!cp) 853 goto unlock; 854 855 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 856 if (conn) 857 conn->pin_length = cp->pin_len; 858 859 unlock: 860 hci_dev_unlock(hdev); 861 } 862 863 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 864 { 865 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 866 867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 868 869 hci_dev_lock(hdev); 870 871 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 872 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 873 rp->status); 874 875 hci_dev_unlock(hdev); 876 } 877 878 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 879 struct sk_buff *skb) 880 { 881 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 882 883 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 884 885 if (rp->status) 886 return; 887 888 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 889 hdev->le_pkts = rp->le_max_pkt; 890 891 hdev->le_cnt = hdev->le_pkts; 892 893 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 894 } 895 896 static void hci_cc_le_read_local_features(struct hci_dev *hdev, 897 struct sk_buff *skb) 898 { 899 struct hci_rp_le_read_local_features *rp = (void *) skb->data; 900 901 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 902 903 if (rp->status) 904 return; 905 906 memcpy(hdev->le_features, rp->features, 8); 907 } 908 909 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 910 struct sk_buff *skb) 911 { 912 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; 913 914 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 915 916 if (rp->status) 917 return; 918 919 hdev->adv_tx_power = rp->tx_power; 920 } 921 922 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 923 { 924 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 925 926 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 927 928 hci_dev_lock(hdev); 929 930 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 931 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 932 rp->status); 933 934 hci_dev_unlock(hdev); 935 } 936 937 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 938 struct sk_buff *skb) 939 { 940 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 941 942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 943 944 hci_dev_lock(hdev); 945 946 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 947 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 948 ACL_LINK, 0, rp->status); 949 950 hci_dev_unlock(hdev); 951 } 952 953 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 954 { 955 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 956 957 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 958 959 hci_dev_lock(hdev); 960 961 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 962 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 963 0, rp->status); 964 965 hci_dev_unlock(hdev); 966 } 967 968 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 969 struct sk_buff *skb) 970 { 971 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 972 973 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 974 975 hci_dev_lock(hdev); 976 977 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 978 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 979 ACL_LINK, 0, rp->status); 980 981 hci_dev_unlock(hdev); 982 } 983 984 static void hci_cc_read_local_oob_data(struct hci_dev *hdev, 985 struct sk_buff *skb) 986 { 987 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 988 989 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 990 991 hci_dev_lock(hdev); 992 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer, 993 NULL, NULL, rp->status); 994 hci_dev_unlock(hdev); 995 } 996 997 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, 998 struct sk_buff *skb) 999 { 1000 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; 1001 1002 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1003 1004 hci_dev_lock(hdev); 1005 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192, 1006 rp->hash256, rp->randomizer256, 1007 rp->status); 1008 hci_dev_unlock(hdev); 1009 } 1010 1011 1012 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) 1013 { 1014 __u8 status = *((__u8 *) skb->data); 1015 bdaddr_t *sent; 1016 1017 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1018 1019 if (status) 1020 return; 1021 1022 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1023 if (!sent) 1024 return; 1025 1026 hci_dev_lock(hdev); 1027 1028 bacpy(&hdev->random_addr, sent); 1029 1030 hci_dev_unlock(hdev); 1031 } 1032 1033 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 1034 { 1035 __u8 *sent, status = *((__u8 *) skb->data); 1036 1037 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1038 1039 if (status) 1040 return; 1041 1042 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1043 if (!sent) 1044 return; 1045 1046 hci_dev_lock(hdev); 1047 1048 /* If we're doing connection initation as peripheral. Set a 1049 * timeout in case something goes wrong. 1050 */ 1051 if (*sent) { 1052 struct hci_conn *conn; 1053 1054 set_bit(HCI_LE_ADV, &hdev->dev_flags); 1055 1056 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 1057 if (conn) 1058 queue_delayed_work(hdev->workqueue, 1059 &conn->le_conn_timeout, 1060 conn->conn_timeout); 1061 } else { 1062 clear_bit(HCI_LE_ADV, &hdev->dev_flags); 1063 } 1064 1065 hci_dev_unlock(hdev); 1066 } 1067 1068 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 1069 { 1070 struct hci_cp_le_set_scan_param *cp; 1071 __u8 status = *((__u8 *) skb->data); 1072 1073 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1074 1075 if (status) 1076 return; 1077 1078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1079 if (!cp) 1080 return; 1081 1082 hci_dev_lock(hdev); 1083 1084 hdev->le_scan_type = cp->type; 1085 1086 hci_dev_unlock(hdev); 1087 } 1088 1089 static bool has_pending_adv_report(struct hci_dev *hdev) 1090 { 1091 struct discovery_state *d = &hdev->discovery; 1092 1093 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1094 } 1095 1096 static void clear_pending_adv_report(struct hci_dev *hdev) 1097 { 1098 struct discovery_state *d = &hdev->discovery; 1099 1100 bacpy(&d->last_adv_addr, BDADDR_ANY); 1101 d->last_adv_data_len = 0; 1102 } 1103 1104 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1105 u8 bdaddr_type, s8 rssi, u32 flags, 1106 u8 *data, u8 len) 1107 { 1108 struct discovery_state *d = &hdev->discovery; 1109 1110 bacpy(&d->last_adv_addr, bdaddr); 1111 d->last_adv_addr_type = bdaddr_type; 1112 d->last_adv_rssi = rssi; 1113 d->last_adv_flags = flags; 1114 memcpy(d->last_adv_data, data, len); 1115 d->last_adv_data_len = len; 1116 } 1117 1118 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1119 struct sk_buff *skb) 1120 { 1121 struct hci_cp_le_set_scan_enable *cp; 1122 __u8 status = *((__u8 *) skb->data); 1123 1124 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1125 1126 if (status) 1127 return; 1128 1129 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1130 if (!cp) 1131 return; 1132 1133 switch (cp->enable) { 1134 case LE_SCAN_ENABLE: 1135 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1136 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1137 clear_pending_adv_report(hdev); 1138 break; 1139 1140 case LE_SCAN_DISABLE: 1141 /* We do this here instead of when setting DISCOVERY_STOPPED 1142 * since the latter would potentially require waiting for 1143 * inquiry to stop too. 1144 */ 1145 if (has_pending_adv_report(hdev)) { 1146 struct discovery_state *d = &hdev->discovery; 1147 1148 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1149 d->last_adv_addr_type, NULL, 1150 d->last_adv_rssi, d->last_adv_flags, 1151 d->last_adv_data, 1152 d->last_adv_data_len, NULL, 0); 1153 } 1154 1155 /* Cancel this timer so that we don't try to disable scanning 1156 * when it's already disabled. 1157 */ 1158 cancel_delayed_work(&hdev->le_scan_disable); 1159 1160 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1161 1162 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1163 * interrupted scanning due to a connect request. Mark 1164 * therefore discovery as stopped. If this was not 1165 * because of a connect request advertising might have 1166 * been disabled because of active scanning, so 1167 * re-enable it again if necessary. 1168 */ 1169 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED, 1170 &hdev->dev_flags)) 1171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1172 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) && 1173 hdev->discovery.state == DISCOVERY_FINDING) 1174 mgmt_reenable_advertising(hdev); 1175 1176 break; 1177 1178 default: 1179 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); 1180 break; 1181 } 1182 } 1183 1184 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, 1185 struct sk_buff *skb) 1186 { 1187 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data; 1188 1189 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1190 1191 if (rp->status) 1192 return; 1193 1194 hdev->le_white_list_size = rp->size; 1195 } 1196 1197 static void hci_cc_le_clear_white_list(struct hci_dev *hdev, 1198 struct sk_buff *skb) 1199 { 1200 __u8 status = *((__u8 *) skb->data); 1201 1202 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1203 1204 if (status) 1205 return; 1206 1207 hci_bdaddr_list_clear(&hdev->le_white_list); 1208 } 1209 1210 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev, 1211 struct sk_buff *skb) 1212 { 1213 struct hci_cp_le_add_to_white_list *sent; 1214 __u8 status = *((__u8 *) skb->data); 1215 1216 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1217 1218 if (status) 1219 return; 1220 1221 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST); 1222 if (!sent) 1223 return; 1224 1225 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr, 1226 sent->bdaddr_type); 1227 } 1228 1229 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev, 1230 struct sk_buff *skb) 1231 { 1232 struct hci_cp_le_del_from_white_list *sent; 1233 __u8 status = *((__u8 *) skb->data); 1234 1235 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1236 1237 if (status) 1238 return; 1239 1240 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST); 1241 if (!sent) 1242 return; 1243 1244 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr, 1245 sent->bdaddr_type); 1246 } 1247 1248 static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1249 struct sk_buff *skb) 1250 { 1251 struct hci_rp_le_read_supported_states *rp = (void *) skb->data; 1252 1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1254 1255 if (rp->status) 1256 return; 1257 1258 memcpy(hdev->le_states, rp->le_states, 8); 1259 } 1260 1261 static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1262 struct sk_buff *skb) 1263 { 1264 struct hci_cp_write_le_host_supported *sent; 1265 __u8 status = *((__u8 *) skb->data); 1266 1267 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1268 1269 if (status) 1270 return; 1271 1272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1273 if (!sent) 1274 return; 1275 1276 if (sent->le) { 1277 hdev->features[1][0] |= LMP_HOST_LE; 1278 set_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1279 } else { 1280 hdev->features[1][0] &= ~LMP_HOST_LE; 1281 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1282 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 1283 } 1284 1285 if (sent->simul) 1286 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 1287 else 1288 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 1289 } 1290 1291 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1292 { 1293 struct hci_cp_le_set_adv_param *cp; 1294 u8 status = *((u8 *) skb->data); 1295 1296 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1297 1298 if (status) 1299 return; 1300 1301 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 1302 if (!cp) 1303 return; 1304 1305 hci_dev_lock(hdev); 1306 hdev->adv_addr_type = cp->own_address_type; 1307 hci_dev_unlock(hdev); 1308 } 1309 1310 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, 1311 struct sk_buff *skb) 1312 { 1313 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data; 1314 1315 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x", 1316 hdev->name, rp->status, rp->phy_handle); 1317 1318 if (rp->status) 1319 return; 1320 1321 amp_write_rem_assoc_continue(hdev, rp->phy_handle); 1322 } 1323 1324 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) 1325 { 1326 struct hci_rp_read_rssi *rp = (void *) skb->data; 1327 struct hci_conn *conn; 1328 1329 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1330 1331 if (rp->status) 1332 return; 1333 1334 hci_dev_lock(hdev); 1335 1336 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1337 if (conn) 1338 conn->rssi = rp->rssi; 1339 1340 hci_dev_unlock(hdev); 1341 } 1342 1343 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) 1344 { 1345 struct hci_cp_read_tx_power *sent; 1346 struct hci_rp_read_tx_power *rp = (void *) skb->data; 1347 struct hci_conn *conn; 1348 1349 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1350 1351 if (rp->status) 1352 return; 1353 1354 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 1355 if (!sent) 1356 return; 1357 1358 hci_dev_lock(hdev); 1359 1360 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1361 if (!conn) 1362 goto unlock; 1363 1364 switch (sent->type) { 1365 case 0x00: 1366 conn->tx_power = rp->tx_power; 1367 break; 1368 case 0x01: 1369 conn->max_tx_power = rp->tx_power; 1370 break; 1371 } 1372 1373 unlock: 1374 hci_dev_unlock(hdev); 1375 } 1376 1377 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1378 { 1379 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1380 1381 if (status) { 1382 hci_conn_check_pending(hdev); 1383 return; 1384 } 1385 1386 set_bit(HCI_INQUIRY, &hdev->flags); 1387 } 1388 1389 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1390 { 1391 struct hci_cp_create_conn *cp; 1392 struct hci_conn *conn; 1393 1394 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1395 1396 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1397 if (!cp) 1398 return; 1399 1400 hci_dev_lock(hdev); 1401 1402 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1403 1404 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); 1405 1406 if (status) { 1407 if (conn && conn->state == BT_CONNECT) { 1408 if (status != 0x0c || conn->attempt > 2) { 1409 conn->state = BT_CLOSED; 1410 hci_proto_connect_cfm(conn, status); 1411 hci_conn_del(conn); 1412 } else 1413 conn->state = BT_CONNECT2; 1414 } 1415 } else { 1416 if (!conn) { 1417 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 1418 HCI_ROLE_MASTER); 1419 if (!conn) 1420 BT_ERR("No memory for new connection"); 1421 } 1422 } 1423 1424 hci_dev_unlock(hdev); 1425 } 1426 1427 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1428 { 1429 struct hci_cp_add_sco *cp; 1430 struct hci_conn *acl, *sco; 1431 __u16 handle; 1432 1433 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1434 1435 if (!status) 1436 return; 1437 1438 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1439 if (!cp) 1440 return; 1441 1442 handle = __le16_to_cpu(cp->handle); 1443 1444 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1445 1446 hci_dev_lock(hdev); 1447 1448 acl = hci_conn_hash_lookup_handle(hdev, handle); 1449 if (acl) { 1450 sco = acl->link; 1451 if (sco) { 1452 sco->state = BT_CLOSED; 1453 1454 hci_proto_connect_cfm(sco, status); 1455 hci_conn_del(sco); 1456 } 1457 } 1458 1459 hci_dev_unlock(hdev); 1460 } 1461 1462 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1463 { 1464 struct hci_cp_auth_requested *cp; 1465 struct hci_conn *conn; 1466 1467 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1468 1469 if (!status) 1470 return; 1471 1472 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1473 if (!cp) 1474 return; 1475 1476 hci_dev_lock(hdev); 1477 1478 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1479 if (conn) { 1480 if (conn->state == BT_CONFIG) { 1481 hci_proto_connect_cfm(conn, status); 1482 hci_conn_drop(conn); 1483 } 1484 } 1485 1486 hci_dev_unlock(hdev); 1487 } 1488 1489 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1490 { 1491 struct hci_cp_set_conn_encrypt *cp; 1492 struct hci_conn *conn; 1493 1494 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1495 1496 if (!status) 1497 return; 1498 1499 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1500 if (!cp) 1501 return; 1502 1503 hci_dev_lock(hdev); 1504 1505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1506 if (conn) { 1507 if (conn->state == BT_CONFIG) { 1508 hci_proto_connect_cfm(conn, status); 1509 hci_conn_drop(conn); 1510 } 1511 } 1512 1513 hci_dev_unlock(hdev); 1514 } 1515 1516 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1517 struct hci_conn *conn) 1518 { 1519 if (conn->state != BT_CONFIG || !conn->out) 1520 return 0; 1521 1522 if (conn->pending_sec_level == BT_SECURITY_SDP) 1523 return 0; 1524 1525 /* Only request authentication for SSP connections or non-SSP 1526 * devices with sec_level MEDIUM or HIGH or if MITM protection 1527 * is requested. 1528 */ 1529 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 1530 conn->pending_sec_level != BT_SECURITY_FIPS && 1531 conn->pending_sec_level != BT_SECURITY_HIGH && 1532 conn->pending_sec_level != BT_SECURITY_MEDIUM) 1533 return 0; 1534 1535 return 1; 1536 } 1537 1538 static int hci_resolve_name(struct hci_dev *hdev, 1539 struct inquiry_entry *e) 1540 { 1541 struct hci_cp_remote_name_req cp; 1542 1543 memset(&cp, 0, sizeof(cp)); 1544 1545 bacpy(&cp.bdaddr, &e->data.bdaddr); 1546 cp.pscan_rep_mode = e->data.pscan_rep_mode; 1547 cp.pscan_mode = e->data.pscan_mode; 1548 cp.clock_offset = e->data.clock_offset; 1549 1550 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1551 } 1552 1553 static bool hci_resolve_next_name(struct hci_dev *hdev) 1554 { 1555 struct discovery_state *discov = &hdev->discovery; 1556 struct inquiry_entry *e; 1557 1558 if (list_empty(&discov->resolve)) 1559 return false; 1560 1561 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1562 if (!e) 1563 return false; 1564 1565 if (hci_resolve_name(hdev, e) == 0) { 1566 e->name_state = NAME_PENDING; 1567 return true; 1568 } 1569 1570 return false; 1571 } 1572 1573 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 1574 bdaddr_t *bdaddr, u8 *name, u8 name_len) 1575 { 1576 struct discovery_state *discov = &hdev->discovery; 1577 struct inquiry_entry *e; 1578 1579 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 1580 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name, 1581 name_len, conn->dev_class); 1582 1583 if (discov->state == DISCOVERY_STOPPED) 1584 return; 1585 1586 if (discov->state == DISCOVERY_STOPPING) 1587 goto discov_complete; 1588 1589 if (discov->state != DISCOVERY_RESOLVING) 1590 return; 1591 1592 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 1593 /* If the device was not found in a list of found devices names of which 1594 * are pending. there is no need to continue resolving a next name as it 1595 * will be done upon receiving another Remote Name Request Complete 1596 * Event */ 1597 if (!e) 1598 return; 1599 1600 list_del(&e->list); 1601 if (name) { 1602 e->name_state = NAME_KNOWN; 1603 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 1604 e->data.rssi, name, name_len); 1605 } else { 1606 e->name_state = NAME_NOT_KNOWN; 1607 } 1608 1609 if (hci_resolve_next_name(hdev)) 1610 return; 1611 1612 discov_complete: 1613 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1614 } 1615 1616 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1617 { 1618 struct hci_cp_remote_name_req *cp; 1619 struct hci_conn *conn; 1620 1621 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1622 1623 /* If successful wait for the name req complete event before 1624 * checking for the need to do authentication */ 1625 if (!status) 1626 return; 1627 1628 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 1629 if (!cp) 1630 return; 1631 1632 hci_dev_lock(hdev); 1633 1634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1635 1636 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1637 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 1638 1639 if (!conn) 1640 goto unlock; 1641 1642 if (!hci_outgoing_auth_needed(hdev, conn)) 1643 goto unlock; 1644 1645 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 1646 struct hci_cp_auth_requested auth_cp; 1647 1648 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 1649 1650 auth_cp.handle = __cpu_to_le16(conn->handle); 1651 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 1652 sizeof(auth_cp), &auth_cp); 1653 } 1654 1655 unlock: 1656 hci_dev_unlock(hdev); 1657 } 1658 1659 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1660 { 1661 struct hci_cp_read_remote_features *cp; 1662 struct hci_conn *conn; 1663 1664 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1665 1666 if (!status) 1667 return; 1668 1669 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 1670 if (!cp) 1671 return; 1672 1673 hci_dev_lock(hdev); 1674 1675 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1676 if (conn) { 1677 if (conn->state == BT_CONFIG) { 1678 hci_proto_connect_cfm(conn, status); 1679 hci_conn_drop(conn); 1680 } 1681 } 1682 1683 hci_dev_unlock(hdev); 1684 } 1685 1686 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 1687 { 1688 struct hci_cp_read_remote_ext_features *cp; 1689 struct hci_conn *conn; 1690 1691 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1692 1693 if (!status) 1694 return; 1695 1696 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 1697 if (!cp) 1698 return; 1699 1700 hci_dev_lock(hdev); 1701 1702 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1703 if (conn) { 1704 if (conn->state == BT_CONFIG) { 1705 hci_proto_connect_cfm(conn, status); 1706 hci_conn_drop(conn); 1707 } 1708 } 1709 1710 hci_dev_unlock(hdev); 1711 } 1712 1713 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 1714 { 1715 struct hci_cp_setup_sync_conn *cp; 1716 struct hci_conn *acl, *sco; 1717 __u16 handle; 1718 1719 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1720 1721 if (!status) 1722 return; 1723 1724 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 1725 if (!cp) 1726 return; 1727 1728 handle = __le16_to_cpu(cp->handle); 1729 1730 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1731 1732 hci_dev_lock(hdev); 1733 1734 acl = hci_conn_hash_lookup_handle(hdev, handle); 1735 if (acl) { 1736 sco = acl->link; 1737 if (sco) { 1738 sco->state = BT_CLOSED; 1739 1740 hci_proto_connect_cfm(sco, status); 1741 hci_conn_del(sco); 1742 } 1743 } 1744 1745 hci_dev_unlock(hdev); 1746 } 1747 1748 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 1749 { 1750 struct hci_cp_sniff_mode *cp; 1751 struct hci_conn *conn; 1752 1753 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1754 1755 if (!status) 1756 return; 1757 1758 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 1759 if (!cp) 1760 return; 1761 1762 hci_dev_lock(hdev); 1763 1764 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1765 if (conn) { 1766 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1767 1768 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1769 hci_sco_setup(conn, status); 1770 } 1771 1772 hci_dev_unlock(hdev); 1773 } 1774 1775 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 1776 { 1777 struct hci_cp_exit_sniff_mode *cp; 1778 struct hci_conn *conn; 1779 1780 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1781 1782 if (!status) 1783 return; 1784 1785 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 1786 if (!cp) 1787 return; 1788 1789 hci_dev_lock(hdev); 1790 1791 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1792 if (conn) { 1793 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1794 1795 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1796 hci_sco_setup(conn, status); 1797 } 1798 1799 hci_dev_unlock(hdev); 1800 } 1801 1802 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 1803 { 1804 struct hci_cp_disconnect *cp; 1805 struct hci_conn *conn; 1806 1807 if (!status) 1808 return; 1809 1810 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 1811 if (!cp) 1812 return; 1813 1814 hci_dev_lock(hdev); 1815 1816 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1817 if (conn) 1818 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1819 conn->dst_type, status); 1820 1821 hci_dev_unlock(hdev); 1822 } 1823 1824 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status) 1825 { 1826 struct hci_cp_create_phy_link *cp; 1827 1828 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1829 1830 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); 1831 if (!cp) 1832 return; 1833 1834 hci_dev_lock(hdev); 1835 1836 if (status) { 1837 struct hci_conn *hcon; 1838 1839 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); 1840 if (hcon) 1841 hci_conn_del(hcon); 1842 } else { 1843 amp_write_remote_assoc(hdev, cp->phy_handle); 1844 } 1845 1846 hci_dev_unlock(hdev); 1847 } 1848 1849 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status) 1850 { 1851 struct hci_cp_accept_phy_link *cp; 1852 1853 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1854 1855 if (status) 1856 return; 1857 1858 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); 1859 if (!cp) 1860 return; 1861 1862 amp_write_remote_assoc(hdev, cp->phy_handle); 1863 } 1864 1865 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 1866 { 1867 struct hci_cp_le_create_conn *cp; 1868 struct hci_conn *conn; 1869 1870 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1871 1872 /* All connection failure handling is taken care of by the 1873 * hci_le_conn_failed function which is triggered by the HCI 1874 * request completion callbacks used for connecting. 1875 */ 1876 if (status) 1877 return; 1878 1879 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 1880 if (!cp) 1881 return; 1882 1883 hci_dev_lock(hdev); 1884 1885 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1886 if (!conn) 1887 goto unlock; 1888 1889 /* Store the initiator and responder address information which 1890 * is needed for SMP. These values will not change during the 1891 * lifetime of the connection. 1892 */ 1893 conn->init_addr_type = cp->own_address_type; 1894 if (cp->own_address_type == ADDR_LE_DEV_RANDOM) 1895 bacpy(&conn->init_addr, &hdev->random_addr); 1896 else 1897 bacpy(&conn->init_addr, &hdev->bdaddr); 1898 1899 conn->resp_addr_type = cp->peer_addr_type; 1900 bacpy(&conn->resp_addr, &cp->peer_addr); 1901 1902 /* We don't want the connection attempt to stick around 1903 * indefinitely since LE doesn't have a page timeout concept 1904 * like BR/EDR. Set a timer for any connection that doesn't use 1905 * the white list for connecting. 1906 */ 1907 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR) 1908 queue_delayed_work(conn->hdev->workqueue, 1909 &conn->le_conn_timeout, 1910 conn->conn_timeout); 1911 1912 unlock: 1913 hci_dev_unlock(hdev); 1914 } 1915 1916 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 1917 { 1918 struct hci_cp_le_start_enc *cp; 1919 struct hci_conn *conn; 1920 1921 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1922 1923 if (!status) 1924 return; 1925 1926 hci_dev_lock(hdev); 1927 1928 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 1929 if (!cp) 1930 goto unlock; 1931 1932 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1933 if (!conn) 1934 goto unlock; 1935 1936 if (conn->state != BT_CONNECTED) 1937 goto unlock; 1938 1939 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 1940 hci_conn_drop(conn); 1941 1942 unlock: 1943 hci_dev_unlock(hdev); 1944 } 1945 1946 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1947 { 1948 __u8 status = *((__u8 *) skb->data); 1949 struct discovery_state *discov = &hdev->discovery; 1950 struct inquiry_entry *e; 1951 1952 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1953 1954 hci_conn_check_pending(hdev); 1955 1956 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1957 return; 1958 1959 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 1960 wake_up_bit(&hdev->flags, HCI_INQUIRY); 1961 1962 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1963 return; 1964 1965 hci_dev_lock(hdev); 1966 1967 if (discov->state != DISCOVERY_FINDING) 1968 goto unlock; 1969 1970 if (list_empty(&discov->resolve)) { 1971 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1972 goto unlock; 1973 } 1974 1975 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1976 if (e && hci_resolve_name(hdev, e) == 0) { 1977 e->name_state = NAME_PENDING; 1978 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 1979 } else { 1980 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1981 } 1982 1983 unlock: 1984 hci_dev_unlock(hdev); 1985 } 1986 1987 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1988 { 1989 struct inquiry_data data; 1990 struct inquiry_info *info = (void *) (skb->data + 1); 1991 int num_rsp = *((__u8 *) skb->data); 1992 1993 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 1994 1995 if (!num_rsp) 1996 return; 1997 1998 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 1999 return; 2000 2001 hci_dev_lock(hdev); 2002 2003 for (; num_rsp; num_rsp--, info++) { 2004 u32 flags; 2005 2006 bacpy(&data.bdaddr, &info->bdaddr); 2007 data.pscan_rep_mode = info->pscan_rep_mode; 2008 data.pscan_period_mode = info->pscan_period_mode; 2009 data.pscan_mode = info->pscan_mode; 2010 memcpy(data.dev_class, info->dev_class, 3); 2011 data.clock_offset = info->clock_offset; 2012 data.rssi = 0x00; 2013 data.ssp_mode = 0x00; 2014 2015 flags = hci_inquiry_cache_update(hdev, &data, false); 2016 2017 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2018 info->dev_class, 0, flags, NULL, 0, NULL, 0); 2019 } 2020 2021 hci_dev_unlock(hdev); 2022 } 2023 2024 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2025 { 2026 struct hci_ev_conn_complete *ev = (void *) skb->data; 2027 struct hci_conn *conn; 2028 2029 BT_DBG("%s", hdev->name); 2030 2031 hci_dev_lock(hdev); 2032 2033 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2034 if (!conn) { 2035 if (ev->link_type != SCO_LINK) 2036 goto unlock; 2037 2038 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 2039 if (!conn) 2040 goto unlock; 2041 2042 conn->type = SCO_LINK; 2043 } 2044 2045 if (!ev->status) { 2046 conn->handle = __le16_to_cpu(ev->handle); 2047 2048 if (conn->type == ACL_LINK) { 2049 conn->state = BT_CONFIG; 2050 hci_conn_hold(conn); 2051 2052 if (!conn->out && !hci_conn_ssp_enabled(conn) && 2053 !hci_find_link_key(hdev, &ev->bdaddr)) 2054 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2055 else 2056 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2057 } else 2058 conn->state = BT_CONNECTED; 2059 2060 hci_conn_add_sysfs(conn); 2061 2062 if (test_bit(HCI_AUTH, &hdev->flags)) 2063 set_bit(HCI_CONN_AUTH, &conn->flags); 2064 2065 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 2066 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 2067 2068 /* Get remote features */ 2069 if (conn->type == ACL_LINK) { 2070 struct hci_cp_read_remote_features cp; 2071 cp.handle = ev->handle; 2072 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 2073 sizeof(cp), &cp); 2074 } 2075 2076 /* Set packet type for incoming connection */ 2077 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 2078 struct hci_cp_change_conn_ptype cp; 2079 cp.handle = ev->handle; 2080 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2081 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 2082 &cp); 2083 } 2084 } else { 2085 conn->state = BT_CLOSED; 2086 if (conn->type == ACL_LINK) 2087 mgmt_connect_failed(hdev, &conn->dst, conn->type, 2088 conn->dst_type, ev->status); 2089 } 2090 2091 if (conn->type == ACL_LINK) 2092 hci_sco_setup(conn, ev->status); 2093 2094 if (ev->status) { 2095 hci_proto_connect_cfm(conn, ev->status); 2096 hci_conn_del(conn); 2097 } else if (ev->link_type != ACL_LINK) 2098 hci_proto_connect_cfm(conn, ev->status); 2099 2100 unlock: 2101 hci_dev_unlock(hdev); 2102 2103 hci_conn_check_pending(hdev); 2104 } 2105 2106 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 2107 { 2108 struct hci_cp_reject_conn_req cp; 2109 2110 bacpy(&cp.bdaddr, bdaddr); 2111 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 2112 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 2113 } 2114 2115 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2116 { 2117 struct hci_ev_conn_request *ev = (void *) skb->data; 2118 int mask = hdev->link_mode; 2119 struct inquiry_entry *ie; 2120 struct hci_conn *conn; 2121 __u8 flags = 0; 2122 2123 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 2124 ev->link_type); 2125 2126 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 2127 &flags); 2128 2129 if (!(mask & HCI_LM_ACCEPT)) { 2130 hci_reject_conn(hdev, &ev->bdaddr); 2131 return; 2132 } 2133 2134 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr, 2135 BDADDR_BREDR)) { 2136 hci_reject_conn(hdev, &ev->bdaddr); 2137 return; 2138 } 2139 2140 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && 2141 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, 2142 BDADDR_BREDR)) { 2143 hci_reject_conn(hdev, &ev->bdaddr); 2144 return; 2145 } 2146 2147 /* Connection accepted */ 2148 2149 hci_dev_lock(hdev); 2150 2151 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2152 if (ie) 2153 memcpy(ie->data.dev_class, ev->dev_class, 3); 2154 2155 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 2156 &ev->bdaddr); 2157 if (!conn) { 2158 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2159 HCI_ROLE_SLAVE); 2160 if (!conn) { 2161 BT_ERR("No memory for new connection"); 2162 hci_dev_unlock(hdev); 2163 return; 2164 } 2165 } 2166 2167 memcpy(conn->dev_class, ev->dev_class, 3); 2168 2169 hci_dev_unlock(hdev); 2170 2171 if (ev->link_type == ACL_LINK || 2172 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 2173 struct hci_cp_accept_conn_req cp; 2174 conn->state = BT_CONNECT; 2175 2176 bacpy(&cp.bdaddr, &ev->bdaddr); 2177 2178 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 2179 cp.role = 0x00; /* Become master */ 2180 else 2181 cp.role = 0x01; /* Remain slave */ 2182 2183 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 2184 } else if (!(flags & HCI_PROTO_DEFER)) { 2185 struct hci_cp_accept_sync_conn_req cp; 2186 conn->state = BT_CONNECT; 2187 2188 bacpy(&cp.bdaddr, &ev->bdaddr); 2189 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2190 2191 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 2192 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 2193 cp.max_latency = cpu_to_le16(0xffff); 2194 cp.content_format = cpu_to_le16(hdev->voice_setting); 2195 cp.retrans_effort = 0xff; 2196 2197 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 2198 &cp); 2199 } else { 2200 conn->state = BT_CONNECT2; 2201 hci_proto_connect_cfm(conn, 0); 2202 } 2203 } 2204 2205 static u8 hci_to_mgmt_reason(u8 err) 2206 { 2207 switch (err) { 2208 case HCI_ERROR_CONNECTION_TIMEOUT: 2209 return MGMT_DEV_DISCONN_TIMEOUT; 2210 case HCI_ERROR_REMOTE_USER_TERM: 2211 case HCI_ERROR_REMOTE_LOW_RESOURCES: 2212 case HCI_ERROR_REMOTE_POWER_OFF: 2213 return MGMT_DEV_DISCONN_REMOTE; 2214 case HCI_ERROR_LOCAL_HOST_TERM: 2215 return MGMT_DEV_DISCONN_LOCAL_HOST; 2216 default: 2217 return MGMT_DEV_DISCONN_UNKNOWN; 2218 } 2219 } 2220 2221 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2222 { 2223 struct hci_ev_disconn_complete *ev = (void *) skb->data; 2224 u8 reason = hci_to_mgmt_reason(ev->reason); 2225 struct hci_conn_params *params; 2226 struct hci_conn *conn; 2227 bool mgmt_connected; 2228 u8 type; 2229 2230 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2231 2232 hci_dev_lock(hdev); 2233 2234 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2235 if (!conn) 2236 goto unlock; 2237 2238 if (ev->status) { 2239 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2240 conn->dst_type, ev->status); 2241 goto unlock; 2242 } 2243 2244 conn->state = BT_CLOSED; 2245 2246 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2247 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2248 reason, mgmt_connected); 2249 2250 if (conn->type == ACL_LINK && 2251 test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2252 hci_remove_link_key(hdev, &conn->dst); 2253 2254 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2255 if (params) { 2256 switch (params->auto_connect) { 2257 case HCI_AUTO_CONN_LINK_LOSS: 2258 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2259 break; 2260 /* Fall through */ 2261 2262 case HCI_AUTO_CONN_DIRECT: 2263 case HCI_AUTO_CONN_ALWAYS: 2264 list_del_init(¶ms->action); 2265 list_add(¶ms->action, &hdev->pend_le_conns); 2266 hci_update_background_scan(hdev); 2267 break; 2268 2269 default: 2270 break; 2271 } 2272 } 2273 2274 type = conn->type; 2275 2276 hci_proto_disconn_cfm(conn, ev->reason); 2277 hci_conn_del(conn); 2278 2279 /* Re-enable advertising if necessary, since it might 2280 * have been disabled by the connection. From the 2281 * HCI_LE_Set_Advertise_Enable command description in 2282 * the core specification (v4.0): 2283 * "The Controller shall continue advertising until the Host 2284 * issues an LE_Set_Advertise_Enable command with 2285 * Advertising_Enable set to 0x00 (Advertising is disabled) 2286 * or until a connection is created or until the Advertising 2287 * is timed out due to Directed Advertising." 2288 */ 2289 if (type == LE_LINK) 2290 mgmt_reenable_advertising(hdev); 2291 2292 unlock: 2293 hci_dev_unlock(hdev); 2294 } 2295 2296 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2297 { 2298 struct hci_ev_auth_complete *ev = (void *) skb->data; 2299 struct hci_conn *conn; 2300 2301 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2302 2303 hci_dev_lock(hdev); 2304 2305 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2306 if (!conn) 2307 goto unlock; 2308 2309 if (!ev->status) { 2310 if (!hci_conn_ssp_enabled(conn) && 2311 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 2312 BT_INFO("re-auth of legacy device is not possible."); 2313 } else { 2314 set_bit(HCI_CONN_AUTH, &conn->flags); 2315 conn->sec_level = conn->pending_sec_level; 2316 } 2317 } else { 2318 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 2319 ev->status); 2320 } 2321 2322 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2323 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 2324 2325 if (conn->state == BT_CONFIG) { 2326 if (!ev->status && hci_conn_ssp_enabled(conn)) { 2327 struct hci_cp_set_conn_encrypt cp; 2328 cp.handle = ev->handle; 2329 cp.encrypt = 0x01; 2330 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2331 &cp); 2332 } else { 2333 conn->state = BT_CONNECTED; 2334 hci_proto_connect_cfm(conn, ev->status); 2335 hci_conn_drop(conn); 2336 } 2337 } else { 2338 hci_auth_cfm(conn, ev->status); 2339 2340 hci_conn_hold(conn); 2341 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2342 hci_conn_drop(conn); 2343 } 2344 2345 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 2346 if (!ev->status) { 2347 struct hci_cp_set_conn_encrypt cp; 2348 cp.handle = ev->handle; 2349 cp.encrypt = 0x01; 2350 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2351 &cp); 2352 } else { 2353 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2354 hci_encrypt_cfm(conn, ev->status, 0x00); 2355 } 2356 } 2357 2358 unlock: 2359 hci_dev_unlock(hdev); 2360 } 2361 2362 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 2363 { 2364 struct hci_ev_remote_name *ev = (void *) skb->data; 2365 struct hci_conn *conn; 2366 2367 BT_DBG("%s", hdev->name); 2368 2369 hci_conn_check_pending(hdev); 2370 2371 hci_dev_lock(hdev); 2372 2373 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2374 2375 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 2376 goto check_auth; 2377 2378 if (ev->status == 0) 2379 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 2380 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 2381 else 2382 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 2383 2384 check_auth: 2385 if (!conn) 2386 goto unlock; 2387 2388 if (!hci_outgoing_auth_needed(hdev, conn)) 2389 goto unlock; 2390 2391 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2392 struct hci_cp_auth_requested cp; 2393 2394 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2395 2396 cp.handle = __cpu_to_le16(conn->handle); 2397 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 2398 } 2399 2400 unlock: 2401 hci_dev_unlock(hdev); 2402 } 2403 2404 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2405 { 2406 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2407 struct hci_conn *conn; 2408 2409 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2410 2411 hci_dev_lock(hdev); 2412 2413 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2414 if (!conn) 2415 goto unlock; 2416 2417 if (!ev->status) { 2418 if (ev->encrypt) { 2419 /* Encryption implies authentication */ 2420 set_bit(HCI_CONN_AUTH, &conn->flags); 2421 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 2422 conn->sec_level = conn->pending_sec_level; 2423 2424 /* P-256 authentication key implies FIPS */ 2425 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 2426 set_bit(HCI_CONN_FIPS, &conn->flags); 2427 2428 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 2429 conn->type == LE_LINK) 2430 set_bit(HCI_CONN_AES_CCM, &conn->flags); 2431 } else { 2432 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 2433 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 2434 } 2435 } 2436 2437 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2438 2439 if (ev->status && conn->state == BT_CONNECTED) { 2440 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2441 hci_conn_drop(conn); 2442 goto unlock; 2443 } 2444 2445 if (conn->state == BT_CONFIG) { 2446 if (!ev->status) 2447 conn->state = BT_CONNECTED; 2448 2449 /* In Secure Connections Only mode, do not allow any 2450 * connections that are not encrypted with AES-CCM 2451 * using a P-256 authenticated combination key. 2452 */ 2453 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && 2454 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || 2455 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { 2456 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); 2457 hci_conn_drop(conn); 2458 goto unlock; 2459 } 2460 2461 hci_proto_connect_cfm(conn, ev->status); 2462 hci_conn_drop(conn); 2463 } else 2464 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 2465 2466 unlock: 2467 hci_dev_unlock(hdev); 2468 } 2469 2470 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, 2471 struct sk_buff *skb) 2472 { 2473 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2474 struct hci_conn *conn; 2475 2476 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2477 2478 hci_dev_lock(hdev); 2479 2480 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2481 if (conn) { 2482 if (!ev->status) 2483 set_bit(HCI_CONN_SECURE, &conn->flags); 2484 2485 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2486 2487 hci_key_change_cfm(conn, ev->status); 2488 } 2489 2490 hci_dev_unlock(hdev); 2491 } 2492 2493 static void hci_remote_features_evt(struct hci_dev *hdev, 2494 struct sk_buff *skb) 2495 { 2496 struct hci_ev_remote_features *ev = (void *) skb->data; 2497 struct hci_conn *conn; 2498 2499 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2500 2501 hci_dev_lock(hdev); 2502 2503 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2504 if (!conn) 2505 goto unlock; 2506 2507 if (!ev->status) 2508 memcpy(conn->features[0], ev->features, 8); 2509 2510 if (conn->state != BT_CONFIG) 2511 goto unlock; 2512 2513 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { 2514 struct hci_cp_read_remote_ext_features cp; 2515 cp.handle = ev->handle; 2516 cp.page = 0x01; 2517 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2518 sizeof(cp), &cp); 2519 goto unlock; 2520 } 2521 2522 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 2523 struct hci_cp_remote_name_req cp; 2524 memset(&cp, 0, sizeof(cp)); 2525 bacpy(&cp.bdaddr, &conn->dst); 2526 cp.pscan_rep_mode = 0x02; 2527 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2528 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2529 mgmt_device_connected(hdev, &conn->dst, conn->type, 2530 conn->dst_type, 0, NULL, 0, 2531 conn->dev_class); 2532 2533 if (!hci_outgoing_auth_needed(hdev, conn)) { 2534 conn->state = BT_CONNECTED; 2535 hci_proto_connect_cfm(conn, ev->status); 2536 hci_conn_drop(conn); 2537 } 2538 2539 unlock: 2540 hci_dev_unlock(hdev); 2541 } 2542 2543 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2544 { 2545 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2546 u8 status = skb->data[sizeof(*ev)]; 2547 __u16 opcode; 2548 2549 skb_pull(skb, sizeof(*ev)); 2550 2551 opcode = __le16_to_cpu(ev->opcode); 2552 2553 switch (opcode) { 2554 case HCI_OP_INQUIRY_CANCEL: 2555 hci_cc_inquiry_cancel(hdev, skb); 2556 break; 2557 2558 case HCI_OP_PERIODIC_INQ: 2559 hci_cc_periodic_inq(hdev, skb); 2560 break; 2561 2562 case HCI_OP_EXIT_PERIODIC_INQ: 2563 hci_cc_exit_periodic_inq(hdev, skb); 2564 break; 2565 2566 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 2567 hci_cc_remote_name_req_cancel(hdev, skb); 2568 break; 2569 2570 case HCI_OP_ROLE_DISCOVERY: 2571 hci_cc_role_discovery(hdev, skb); 2572 break; 2573 2574 case HCI_OP_READ_LINK_POLICY: 2575 hci_cc_read_link_policy(hdev, skb); 2576 break; 2577 2578 case HCI_OP_WRITE_LINK_POLICY: 2579 hci_cc_write_link_policy(hdev, skb); 2580 break; 2581 2582 case HCI_OP_READ_DEF_LINK_POLICY: 2583 hci_cc_read_def_link_policy(hdev, skb); 2584 break; 2585 2586 case HCI_OP_WRITE_DEF_LINK_POLICY: 2587 hci_cc_write_def_link_policy(hdev, skb); 2588 break; 2589 2590 case HCI_OP_RESET: 2591 hci_cc_reset(hdev, skb); 2592 break; 2593 2594 case HCI_OP_WRITE_LOCAL_NAME: 2595 hci_cc_write_local_name(hdev, skb); 2596 break; 2597 2598 case HCI_OP_READ_LOCAL_NAME: 2599 hci_cc_read_local_name(hdev, skb); 2600 break; 2601 2602 case HCI_OP_WRITE_AUTH_ENABLE: 2603 hci_cc_write_auth_enable(hdev, skb); 2604 break; 2605 2606 case HCI_OP_WRITE_ENCRYPT_MODE: 2607 hci_cc_write_encrypt_mode(hdev, skb); 2608 break; 2609 2610 case HCI_OP_WRITE_SCAN_ENABLE: 2611 hci_cc_write_scan_enable(hdev, skb); 2612 break; 2613 2614 case HCI_OP_READ_CLASS_OF_DEV: 2615 hci_cc_read_class_of_dev(hdev, skb); 2616 break; 2617 2618 case HCI_OP_WRITE_CLASS_OF_DEV: 2619 hci_cc_write_class_of_dev(hdev, skb); 2620 break; 2621 2622 case HCI_OP_READ_VOICE_SETTING: 2623 hci_cc_read_voice_setting(hdev, skb); 2624 break; 2625 2626 case HCI_OP_WRITE_VOICE_SETTING: 2627 hci_cc_write_voice_setting(hdev, skb); 2628 break; 2629 2630 case HCI_OP_READ_NUM_SUPPORTED_IAC: 2631 hci_cc_read_num_supported_iac(hdev, skb); 2632 break; 2633 2634 case HCI_OP_WRITE_SSP_MODE: 2635 hci_cc_write_ssp_mode(hdev, skb); 2636 break; 2637 2638 case HCI_OP_WRITE_SC_SUPPORT: 2639 hci_cc_write_sc_support(hdev, skb); 2640 break; 2641 2642 case HCI_OP_READ_LOCAL_VERSION: 2643 hci_cc_read_local_version(hdev, skb); 2644 break; 2645 2646 case HCI_OP_READ_LOCAL_COMMANDS: 2647 hci_cc_read_local_commands(hdev, skb); 2648 break; 2649 2650 case HCI_OP_READ_LOCAL_FEATURES: 2651 hci_cc_read_local_features(hdev, skb); 2652 break; 2653 2654 case HCI_OP_READ_LOCAL_EXT_FEATURES: 2655 hci_cc_read_local_ext_features(hdev, skb); 2656 break; 2657 2658 case HCI_OP_READ_BUFFER_SIZE: 2659 hci_cc_read_buffer_size(hdev, skb); 2660 break; 2661 2662 case HCI_OP_READ_BD_ADDR: 2663 hci_cc_read_bd_addr(hdev, skb); 2664 break; 2665 2666 case HCI_OP_READ_PAGE_SCAN_ACTIVITY: 2667 hci_cc_read_page_scan_activity(hdev, skb); 2668 break; 2669 2670 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: 2671 hci_cc_write_page_scan_activity(hdev, skb); 2672 break; 2673 2674 case HCI_OP_READ_PAGE_SCAN_TYPE: 2675 hci_cc_read_page_scan_type(hdev, skb); 2676 break; 2677 2678 case HCI_OP_WRITE_PAGE_SCAN_TYPE: 2679 hci_cc_write_page_scan_type(hdev, skb); 2680 break; 2681 2682 case HCI_OP_READ_DATA_BLOCK_SIZE: 2683 hci_cc_read_data_block_size(hdev, skb); 2684 break; 2685 2686 case HCI_OP_READ_FLOW_CONTROL_MODE: 2687 hci_cc_read_flow_control_mode(hdev, skb); 2688 break; 2689 2690 case HCI_OP_READ_LOCAL_AMP_INFO: 2691 hci_cc_read_local_amp_info(hdev, skb); 2692 break; 2693 2694 case HCI_OP_READ_CLOCK: 2695 hci_cc_read_clock(hdev, skb); 2696 break; 2697 2698 case HCI_OP_READ_LOCAL_AMP_ASSOC: 2699 hci_cc_read_local_amp_assoc(hdev, skb); 2700 break; 2701 2702 case HCI_OP_READ_INQ_RSP_TX_POWER: 2703 hci_cc_read_inq_rsp_tx_power(hdev, skb); 2704 break; 2705 2706 case HCI_OP_PIN_CODE_REPLY: 2707 hci_cc_pin_code_reply(hdev, skb); 2708 break; 2709 2710 case HCI_OP_PIN_CODE_NEG_REPLY: 2711 hci_cc_pin_code_neg_reply(hdev, skb); 2712 break; 2713 2714 case HCI_OP_READ_LOCAL_OOB_DATA: 2715 hci_cc_read_local_oob_data(hdev, skb); 2716 break; 2717 2718 case HCI_OP_READ_LOCAL_OOB_EXT_DATA: 2719 hci_cc_read_local_oob_ext_data(hdev, skb); 2720 break; 2721 2722 case HCI_OP_LE_READ_BUFFER_SIZE: 2723 hci_cc_le_read_buffer_size(hdev, skb); 2724 break; 2725 2726 case HCI_OP_LE_READ_LOCAL_FEATURES: 2727 hci_cc_le_read_local_features(hdev, skb); 2728 break; 2729 2730 case HCI_OP_LE_READ_ADV_TX_POWER: 2731 hci_cc_le_read_adv_tx_power(hdev, skb); 2732 break; 2733 2734 case HCI_OP_USER_CONFIRM_REPLY: 2735 hci_cc_user_confirm_reply(hdev, skb); 2736 break; 2737 2738 case HCI_OP_USER_CONFIRM_NEG_REPLY: 2739 hci_cc_user_confirm_neg_reply(hdev, skb); 2740 break; 2741 2742 case HCI_OP_USER_PASSKEY_REPLY: 2743 hci_cc_user_passkey_reply(hdev, skb); 2744 break; 2745 2746 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2747 hci_cc_user_passkey_neg_reply(hdev, skb); 2748 break; 2749 2750 case HCI_OP_LE_SET_RANDOM_ADDR: 2751 hci_cc_le_set_random_addr(hdev, skb); 2752 break; 2753 2754 case HCI_OP_LE_SET_ADV_ENABLE: 2755 hci_cc_le_set_adv_enable(hdev, skb); 2756 break; 2757 2758 case HCI_OP_LE_SET_SCAN_PARAM: 2759 hci_cc_le_set_scan_param(hdev, skb); 2760 break; 2761 2762 case HCI_OP_LE_SET_SCAN_ENABLE: 2763 hci_cc_le_set_scan_enable(hdev, skb); 2764 break; 2765 2766 case HCI_OP_LE_READ_WHITE_LIST_SIZE: 2767 hci_cc_le_read_white_list_size(hdev, skb); 2768 break; 2769 2770 case HCI_OP_LE_CLEAR_WHITE_LIST: 2771 hci_cc_le_clear_white_list(hdev, skb); 2772 break; 2773 2774 case HCI_OP_LE_ADD_TO_WHITE_LIST: 2775 hci_cc_le_add_to_white_list(hdev, skb); 2776 break; 2777 2778 case HCI_OP_LE_DEL_FROM_WHITE_LIST: 2779 hci_cc_le_del_from_white_list(hdev, skb); 2780 break; 2781 2782 case HCI_OP_LE_READ_SUPPORTED_STATES: 2783 hci_cc_le_read_supported_states(hdev, skb); 2784 break; 2785 2786 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 2787 hci_cc_write_le_host_supported(hdev, skb); 2788 break; 2789 2790 case HCI_OP_LE_SET_ADV_PARAM: 2791 hci_cc_set_adv_param(hdev, skb); 2792 break; 2793 2794 case HCI_OP_WRITE_REMOTE_AMP_ASSOC: 2795 hci_cc_write_remote_amp_assoc(hdev, skb); 2796 break; 2797 2798 case HCI_OP_READ_RSSI: 2799 hci_cc_read_rssi(hdev, skb); 2800 break; 2801 2802 case HCI_OP_READ_TX_POWER: 2803 hci_cc_read_tx_power(hdev, skb); 2804 break; 2805 2806 default: 2807 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2808 break; 2809 } 2810 2811 if (opcode != HCI_OP_NOP) 2812 cancel_delayed_work(&hdev->cmd_timer); 2813 2814 hci_req_cmd_complete(hdev, opcode, status); 2815 2816 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2817 atomic_set(&hdev->cmd_cnt, 1); 2818 if (!skb_queue_empty(&hdev->cmd_q)) 2819 queue_work(hdev->workqueue, &hdev->cmd_work); 2820 } 2821 } 2822 2823 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2824 { 2825 struct hci_ev_cmd_status *ev = (void *) skb->data; 2826 __u16 opcode; 2827 2828 skb_pull(skb, sizeof(*ev)); 2829 2830 opcode = __le16_to_cpu(ev->opcode); 2831 2832 switch (opcode) { 2833 case HCI_OP_INQUIRY: 2834 hci_cs_inquiry(hdev, ev->status); 2835 break; 2836 2837 case HCI_OP_CREATE_CONN: 2838 hci_cs_create_conn(hdev, ev->status); 2839 break; 2840 2841 case HCI_OP_ADD_SCO: 2842 hci_cs_add_sco(hdev, ev->status); 2843 break; 2844 2845 case HCI_OP_AUTH_REQUESTED: 2846 hci_cs_auth_requested(hdev, ev->status); 2847 break; 2848 2849 case HCI_OP_SET_CONN_ENCRYPT: 2850 hci_cs_set_conn_encrypt(hdev, ev->status); 2851 break; 2852 2853 case HCI_OP_REMOTE_NAME_REQ: 2854 hci_cs_remote_name_req(hdev, ev->status); 2855 break; 2856 2857 case HCI_OP_READ_REMOTE_FEATURES: 2858 hci_cs_read_remote_features(hdev, ev->status); 2859 break; 2860 2861 case HCI_OP_READ_REMOTE_EXT_FEATURES: 2862 hci_cs_read_remote_ext_features(hdev, ev->status); 2863 break; 2864 2865 case HCI_OP_SETUP_SYNC_CONN: 2866 hci_cs_setup_sync_conn(hdev, ev->status); 2867 break; 2868 2869 case HCI_OP_SNIFF_MODE: 2870 hci_cs_sniff_mode(hdev, ev->status); 2871 break; 2872 2873 case HCI_OP_EXIT_SNIFF_MODE: 2874 hci_cs_exit_sniff_mode(hdev, ev->status); 2875 break; 2876 2877 case HCI_OP_DISCONNECT: 2878 hci_cs_disconnect(hdev, ev->status); 2879 break; 2880 2881 case HCI_OP_CREATE_PHY_LINK: 2882 hci_cs_create_phylink(hdev, ev->status); 2883 break; 2884 2885 case HCI_OP_ACCEPT_PHY_LINK: 2886 hci_cs_accept_phylink(hdev, ev->status); 2887 break; 2888 2889 case HCI_OP_LE_CREATE_CONN: 2890 hci_cs_le_create_conn(hdev, ev->status); 2891 break; 2892 2893 case HCI_OP_LE_START_ENC: 2894 hci_cs_le_start_enc(hdev, ev->status); 2895 break; 2896 2897 default: 2898 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2899 break; 2900 } 2901 2902 if (opcode != HCI_OP_NOP) 2903 cancel_delayed_work(&hdev->cmd_timer); 2904 2905 if (ev->status || 2906 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event)) 2907 hci_req_cmd_complete(hdev, opcode, ev->status); 2908 2909 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2910 atomic_set(&hdev->cmd_cnt, 1); 2911 if (!skb_queue_empty(&hdev->cmd_q)) 2912 queue_work(hdev->workqueue, &hdev->cmd_work); 2913 } 2914 } 2915 2916 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2917 { 2918 struct hci_ev_role_change *ev = (void *) skb->data; 2919 struct hci_conn *conn; 2920 2921 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2922 2923 hci_dev_lock(hdev); 2924 2925 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2926 if (conn) { 2927 if (!ev->status) 2928 conn->role = ev->role; 2929 2930 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2931 2932 hci_role_switch_cfm(conn, ev->status, ev->role); 2933 } 2934 2935 hci_dev_unlock(hdev); 2936 } 2937 2938 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2939 { 2940 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2941 int i; 2942 2943 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 2944 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 2945 return; 2946 } 2947 2948 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2949 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2950 BT_DBG("%s bad parameters", hdev->name); 2951 return; 2952 } 2953 2954 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 2955 2956 for (i = 0; i < ev->num_hndl; i++) { 2957 struct hci_comp_pkts_info *info = &ev->handles[i]; 2958 struct hci_conn *conn; 2959 __u16 handle, count; 2960 2961 handle = __le16_to_cpu(info->handle); 2962 count = __le16_to_cpu(info->count); 2963 2964 conn = hci_conn_hash_lookup_handle(hdev, handle); 2965 if (!conn) 2966 continue; 2967 2968 conn->sent -= count; 2969 2970 switch (conn->type) { 2971 case ACL_LINK: 2972 hdev->acl_cnt += count; 2973 if (hdev->acl_cnt > hdev->acl_pkts) 2974 hdev->acl_cnt = hdev->acl_pkts; 2975 break; 2976 2977 case LE_LINK: 2978 if (hdev->le_pkts) { 2979 hdev->le_cnt += count; 2980 if (hdev->le_cnt > hdev->le_pkts) 2981 hdev->le_cnt = hdev->le_pkts; 2982 } else { 2983 hdev->acl_cnt += count; 2984 if (hdev->acl_cnt > hdev->acl_pkts) 2985 hdev->acl_cnt = hdev->acl_pkts; 2986 } 2987 break; 2988 2989 case SCO_LINK: 2990 hdev->sco_cnt += count; 2991 if (hdev->sco_cnt > hdev->sco_pkts) 2992 hdev->sco_cnt = hdev->sco_pkts; 2993 break; 2994 2995 default: 2996 BT_ERR("Unknown type %d conn %p", conn->type, conn); 2997 break; 2998 } 2999 } 3000 3001 queue_work(hdev->workqueue, &hdev->tx_work); 3002 } 3003 3004 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 3005 __u16 handle) 3006 { 3007 struct hci_chan *chan; 3008 3009 switch (hdev->dev_type) { 3010 case HCI_BREDR: 3011 return hci_conn_hash_lookup_handle(hdev, handle); 3012 case HCI_AMP: 3013 chan = hci_chan_lookup_handle(hdev, handle); 3014 if (chan) 3015 return chan->conn; 3016 break; 3017 default: 3018 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); 3019 break; 3020 } 3021 3022 return NULL; 3023 } 3024 3025 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 3026 { 3027 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 3028 int i; 3029 3030 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 3031 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 3032 return; 3033 } 3034 3035 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 3036 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 3037 BT_DBG("%s bad parameters", hdev->name); 3038 return; 3039 } 3040 3041 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 3042 ev->num_hndl); 3043 3044 for (i = 0; i < ev->num_hndl; i++) { 3045 struct hci_comp_blocks_info *info = &ev->handles[i]; 3046 struct hci_conn *conn = NULL; 3047 __u16 handle, block_count; 3048 3049 handle = __le16_to_cpu(info->handle); 3050 block_count = __le16_to_cpu(info->blocks); 3051 3052 conn = __hci_conn_lookup_handle(hdev, handle); 3053 if (!conn) 3054 continue; 3055 3056 conn->sent -= block_count; 3057 3058 switch (conn->type) { 3059 case ACL_LINK: 3060 case AMP_LINK: 3061 hdev->block_cnt += block_count; 3062 if (hdev->block_cnt > hdev->num_blocks) 3063 hdev->block_cnt = hdev->num_blocks; 3064 break; 3065 3066 default: 3067 BT_ERR("Unknown type %d conn %p", conn->type, conn); 3068 break; 3069 } 3070 } 3071 3072 queue_work(hdev->workqueue, &hdev->tx_work); 3073 } 3074 3075 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3076 { 3077 struct hci_ev_mode_change *ev = (void *) skb->data; 3078 struct hci_conn *conn; 3079 3080 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3081 3082 hci_dev_lock(hdev); 3083 3084 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3085 if (conn) { 3086 conn->mode = ev->mode; 3087 3088 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 3089 &conn->flags)) { 3090 if (conn->mode == HCI_CM_ACTIVE) 3091 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 3092 else 3093 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 3094 } 3095 3096 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 3097 hci_sco_setup(conn, ev->status); 3098 } 3099 3100 hci_dev_unlock(hdev); 3101 } 3102 3103 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3104 { 3105 struct hci_ev_pin_code_req *ev = (void *) skb->data; 3106 struct hci_conn *conn; 3107 3108 BT_DBG("%s", hdev->name); 3109 3110 hci_dev_lock(hdev); 3111 3112 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3113 if (!conn) 3114 goto unlock; 3115 3116 if (conn->state == BT_CONNECTED) { 3117 hci_conn_hold(conn); 3118 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3119 hci_conn_drop(conn); 3120 } 3121 3122 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) && 3123 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 3124 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 3125 sizeof(ev->bdaddr), &ev->bdaddr); 3126 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 3127 u8 secure; 3128 3129 if (conn->pending_sec_level == BT_SECURITY_HIGH) 3130 secure = 1; 3131 else 3132 secure = 0; 3133 3134 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 3135 } 3136 3137 unlock: 3138 hci_dev_unlock(hdev); 3139 } 3140 3141 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3142 { 3143 struct hci_ev_link_key_req *ev = (void *) skb->data; 3144 struct hci_cp_link_key_reply cp; 3145 struct hci_conn *conn; 3146 struct link_key *key; 3147 3148 BT_DBG("%s", hdev->name); 3149 3150 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3151 return; 3152 3153 hci_dev_lock(hdev); 3154 3155 key = hci_find_link_key(hdev, &ev->bdaddr); 3156 if (!key) { 3157 BT_DBG("%s link key not found for %pMR", hdev->name, 3158 &ev->bdaddr); 3159 goto not_found; 3160 } 3161 3162 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 3163 &ev->bdaddr); 3164 3165 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3166 if (conn) { 3167 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 3168 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 3169 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 3170 BT_DBG("%s ignoring unauthenticated key", hdev->name); 3171 goto not_found; 3172 } 3173 3174 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 3175 (conn->pending_sec_level == BT_SECURITY_HIGH || 3176 conn->pending_sec_level == BT_SECURITY_FIPS)) { 3177 BT_DBG("%s ignoring key unauthenticated for high security", 3178 hdev->name); 3179 goto not_found; 3180 } 3181 3182 conn->key_type = key->type; 3183 conn->pin_length = key->pin_len; 3184 } 3185 3186 bacpy(&cp.bdaddr, &ev->bdaddr); 3187 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 3188 3189 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 3190 3191 hci_dev_unlock(hdev); 3192 3193 return; 3194 3195 not_found: 3196 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 3197 hci_dev_unlock(hdev); 3198 } 3199 3200 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 3201 { 3202 struct hci_ev_link_key_notify *ev = (void *) skb->data; 3203 struct hci_conn *conn; 3204 struct link_key *key; 3205 bool persistent; 3206 u8 pin_len = 0; 3207 3208 BT_DBG("%s", hdev->name); 3209 3210 hci_dev_lock(hdev); 3211 3212 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3213 if (conn) { 3214 hci_conn_hold(conn); 3215 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3216 pin_len = conn->pin_length; 3217 3218 if (ev->key_type != HCI_LK_CHANGED_COMBINATION) 3219 conn->key_type = ev->key_type; 3220 3221 hci_conn_drop(conn); 3222 } 3223 3224 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3225 goto unlock; 3226 3227 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 3228 ev->key_type, pin_len, &persistent); 3229 if (!key) 3230 goto unlock; 3231 3232 mgmt_new_link_key(hdev, key, persistent); 3233 3234 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 3235 * is set. If it's not set simply remove the key from the kernel 3236 * list (we've still notified user space about it but with 3237 * store_hint being 0). 3238 */ 3239 if (key->type == HCI_LK_DEBUG_COMBINATION && 3240 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) { 3241 list_del(&key->list); 3242 kfree(key); 3243 } else if (conn) { 3244 if (persistent) 3245 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 3246 else 3247 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 3248 } 3249 3250 unlock: 3251 hci_dev_unlock(hdev); 3252 } 3253 3254 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 3255 { 3256 struct hci_ev_clock_offset *ev = (void *) skb->data; 3257 struct hci_conn *conn; 3258 3259 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3260 3261 hci_dev_lock(hdev); 3262 3263 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3264 if (conn && !ev->status) { 3265 struct inquiry_entry *ie; 3266 3267 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 3268 if (ie) { 3269 ie->data.clock_offset = ev->clock_offset; 3270 ie->timestamp = jiffies; 3271 } 3272 } 3273 3274 hci_dev_unlock(hdev); 3275 } 3276 3277 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3278 { 3279 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 3280 struct hci_conn *conn; 3281 3282 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3283 3284 hci_dev_lock(hdev); 3285 3286 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3287 if (conn && !ev->status) 3288 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 3289 3290 hci_dev_unlock(hdev); 3291 } 3292 3293 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 3294 { 3295 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 3296 struct inquiry_entry *ie; 3297 3298 BT_DBG("%s", hdev->name); 3299 3300 hci_dev_lock(hdev); 3301 3302 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3303 if (ie) { 3304 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 3305 ie->timestamp = jiffies; 3306 } 3307 3308 hci_dev_unlock(hdev); 3309 } 3310 3311 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, 3312 struct sk_buff *skb) 3313 { 3314 struct inquiry_data data; 3315 int num_rsp = *((__u8 *) skb->data); 3316 3317 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3318 3319 if (!num_rsp) 3320 return; 3321 3322 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 3323 return; 3324 3325 hci_dev_lock(hdev); 3326 3327 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 3328 struct inquiry_info_with_rssi_and_pscan_mode *info; 3329 info = (void *) (skb->data + 1); 3330 3331 for (; num_rsp; num_rsp--, info++) { 3332 u32 flags; 3333 3334 bacpy(&data.bdaddr, &info->bdaddr); 3335 data.pscan_rep_mode = info->pscan_rep_mode; 3336 data.pscan_period_mode = info->pscan_period_mode; 3337 data.pscan_mode = info->pscan_mode; 3338 memcpy(data.dev_class, info->dev_class, 3); 3339 data.clock_offset = info->clock_offset; 3340 data.rssi = info->rssi; 3341 data.ssp_mode = 0x00; 3342 3343 flags = hci_inquiry_cache_update(hdev, &data, false); 3344 3345 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3346 info->dev_class, info->rssi, 3347 flags, NULL, 0, NULL, 0); 3348 } 3349 } else { 3350 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 3351 3352 for (; num_rsp; num_rsp--, info++) { 3353 u32 flags; 3354 3355 bacpy(&data.bdaddr, &info->bdaddr); 3356 data.pscan_rep_mode = info->pscan_rep_mode; 3357 data.pscan_period_mode = info->pscan_period_mode; 3358 data.pscan_mode = 0x00; 3359 memcpy(data.dev_class, info->dev_class, 3); 3360 data.clock_offset = info->clock_offset; 3361 data.rssi = info->rssi; 3362 data.ssp_mode = 0x00; 3363 3364 flags = hci_inquiry_cache_update(hdev, &data, false); 3365 3366 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3367 info->dev_class, info->rssi, 3368 flags, NULL, 0, NULL, 0); 3369 } 3370 } 3371 3372 hci_dev_unlock(hdev); 3373 } 3374 3375 static void hci_remote_ext_features_evt(struct hci_dev *hdev, 3376 struct sk_buff *skb) 3377 { 3378 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 3379 struct hci_conn *conn; 3380 3381 BT_DBG("%s", hdev->name); 3382 3383 hci_dev_lock(hdev); 3384 3385 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3386 if (!conn) 3387 goto unlock; 3388 3389 if (ev->page < HCI_MAX_PAGES) 3390 memcpy(conn->features[ev->page], ev->features, 8); 3391 3392 if (!ev->status && ev->page == 0x01) { 3393 struct inquiry_entry *ie; 3394 3395 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 3396 if (ie) 3397 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 3398 3399 if (ev->features[0] & LMP_HOST_SSP) { 3400 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 3401 } else { 3402 /* It is mandatory by the Bluetooth specification that 3403 * Extended Inquiry Results are only used when Secure 3404 * Simple Pairing is enabled, but some devices violate 3405 * this. 3406 * 3407 * To make these devices work, the internal SSP 3408 * enabled flag needs to be cleared if the remote host 3409 * features do not indicate SSP support */ 3410 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 3411 } 3412 3413 if (ev->features[0] & LMP_HOST_SC) 3414 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 3415 } 3416 3417 if (conn->state != BT_CONFIG) 3418 goto unlock; 3419 3420 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3421 struct hci_cp_remote_name_req cp; 3422 memset(&cp, 0, sizeof(cp)); 3423 bacpy(&cp.bdaddr, &conn->dst); 3424 cp.pscan_rep_mode = 0x02; 3425 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3426 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3427 mgmt_device_connected(hdev, &conn->dst, conn->type, 3428 conn->dst_type, 0, NULL, 0, 3429 conn->dev_class); 3430 3431 if (!hci_outgoing_auth_needed(hdev, conn)) { 3432 conn->state = BT_CONNECTED; 3433 hci_proto_connect_cfm(conn, ev->status); 3434 hci_conn_drop(conn); 3435 } 3436 3437 unlock: 3438 hci_dev_unlock(hdev); 3439 } 3440 3441 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, 3442 struct sk_buff *skb) 3443 { 3444 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 3445 struct hci_conn *conn; 3446 3447 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3448 3449 hci_dev_lock(hdev); 3450 3451 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3452 if (!conn) { 3453 if (ev->link_type == ESCO_LINK) 3454 goto unlock; 3455 3456 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 3457 if (!conn) 3458 goto unlock; 3459 3460 conn->type = SCO_LINK; 3461 } 3462 3463 switch (ev->status) { 3464 case 0x00: 3465 conn->handle = __le16_to_cpu(ev->handle); 3466 conn->state = BT_CONNECTED; 3467 3468 hci_conn_add_sysfs(conn); 3469 break; 3470 3471 case 0x10: /* Connection Accept Timeout */ 3472 case 0x0d: /* Connection Rejected due to Limited Resources */ 3473 case 0x11: /* Unsupported Feature or Parameter Value */ 3474 case 0x1c: /* SCO interval rejected */ 3475 case 0x1a: /* Unsupported Remote Feature */ 3476 case 0x1f: /* Unspecified error */ 3477 case 0x20: /* Unsupported LMP Parameter value */ 3478 if (conn->out) { 3479 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 3480 (hdev->esco_type & EDR_ESCO_MASK); 3481 if (hci_setup_sync(conn, conn->link->handle)) 3482 goto unlock; 3483 } 3484 /* fall through */ 3485 3486 default: 3487 conn->state = BT_CLOSED; 3488 break; 3489 } 3490 3491 hci_proto_connect_cfm(conn, ev->status); 3492 if (ev->status) 3493 hci_conn_del(conn); 3494 3495 unlock: 3496 hci_dev_unlock(hdev); 3497 } 3498 3499 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 3500 { 3501 size_t parsed = 0; 3502 3503 while (parsed < eir_len) { 3504 u8 field_len = eir[0]; 3505 3506 if (field_len == 0) 3507 return parsed; 3508 3509 parsed += field_len + 1; 3510 eir += field_len + 1; 3511 } 3512 3513 return eir_len; 3514 } 3515 3516 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 3517 struct sk_buff *skb) 3518 { 3519 struct inquiry_data data; 3520 struct extended_inquiry_info *info = (void *) (skb->data + 1); 3521 int num_rsp = *((__u8 *) skb->data); 3522 size_t eir_len; 3523 3524 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3525 3526 if (!num_rsp) 3527 return; 3528 3529 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 3530 return; 3531 3532 hci_dev_lock(hdev); 3533 3534 for (; num_rsp; num_rsp--, info++) { 3535 u32 flags; 3536 bool name_known; 3537 3538 bacpy(&data.bdaddr, &info->bdaddr); 3539 data.pscan_rep_mode = info->pscan_rep_mode; 3540 data.pscan_period_mode = info->pscan_period_mode; 3541 data.pscan_mode = 0x00; 3542 memcpy(data.dev_class, info->dev_class, 3); 3543 data.clock_offset = info->clock_offset; 3544 data.rssi = info->rssi; 3545 data.ssp_mode = 0x01; 3546 3547 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3548 name_known = eir_has_data_type(info->data, 3549 sizeof(info->data), 3550 EIR_NAME_COMPLETE); 3551 else 3552 name_known = true; 3553 3554 flags = hci_inquiry_cache_update(hdev, &data, name_known); 3555 3556 eir_len = eir_get_length(info->data, sizeof(info->data)); 3557 3558 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3559 info->dev_class, info->rssi, 3560 flags, info->data, eir_len, NULL, 0); 3561 } 3562 3563 hci_dev_unlock(hdev); 3564 } 3565 3566 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, 3567 struct sk_buff *skb) 3568 { 3569 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 3570 struct hci_conn *conn; 3571 3572 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, 3573 __le16_to_cpu(ev->handle)); 3574 3575 hci_dev_lock(hdev); 3576 3577 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3578 if (!conn) 3579 goto unlock; 3580 3581 /* For BR/EDR the necessary steps are taken through the 3582 * auth_complete event. 3583 */ 3584 if (conn->type != LE_LINK) 3585 goto unlock; 3586 3587 if (!ev->status) 3588 conn->sec_level = conn->pending_sec_level; 3589 3590 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3591 3592 if (ev->status && conn->state == BT_CONNECTED) { 3593 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3594 hci_conn_drop(conn); 3595 goto unlock; 3596 } 3597 3598 if (conn->state == BT_CONFIG) { 3599 if (!ev->status) 3600 conn->state = BT_CONNECTED; 3601 3602 hci_proto_connect_cfm(conn, ev->status); 3603 hci_conn_drop(conn); 3604 } else { 3605 hci_auth_cfm(conn, ev->status); 3606 3607 hci_conn_hold(conn); 3608 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3609 hci_conn_drop(conn); 3610 } 3611 3612 unlock: 3613 hci_dev_unlock(hdev); 3614 } 3615 3616 static u8 hci_get_auth_req(struct hci_conn *conn) 3617 { 3618 /* If remote requests no-bonding follow that lead */ 3619 if (conn->remote_auth == HCI_AT_NO_BONDING || 3620 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 3621 return conn->remote_auth | (conn->auth_type & 0x01); 3622 3623 /* If both remote and local have enough IO capabilities, require 3624 * MITM protection 3625 */ 3626 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 3627 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 3628 return conn->remote_auth | 0x01; 3629 3630 /* No MITM protection possible so ignore remote requirement */ 3631 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 3632 } 3633 3634 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3635 { 3636 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3637 struct hci_conn *conn; 3638 3639 BT_DBG("%s", hdev->name); 3640 3641 hci_dev_lock(hdev); 3642 3643 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3644 if (!conn) 3645 goto unlock; 3646 3647 hci_conn_hold(conn); 3648 3649 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3650 goto unlock; 3651 3652 /* Allow pairing if we're pairable, the initiators of the 3653 * pairing or if the remote is not requesting bonding. 3654 */ 3655 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) || 3656 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 3657 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3658 struct hci_cp_io_capability_reply cp; 3659 3660 bacpy(&cp.bdaddr, &ev->bdaddr); 3661 /* Change the IO capability from KeyboardDisplay 3662 * to DisplayYesNo as it is not supported by BT spec. */ 3663 cp.capability = (conn->io_capability == 0x04) ? 3664 HCI_IO_DISPLAY_YESNO : conn->io_capability; 3665 3666 /* If we are initiators, there is no remote information yet */ 3667 if (conn->remote_auth == 0xff) { 3668 /* Request MITM protection if our IO caps allow it 3669 * except for the no-bonding case. 3670 */ 3671 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 3672 conn->auth_type != HCI_AT_NO_BONDING) 3673 conn->auth_type |= 0x01; 3674 } else { 3675 conn->auth_type = hci_get_auth_req(conn); 3676 } 3677 3678 /* If we're not bondable, force one of the non-bondable 3679 * authentication requirement values. 3680 */ 3681 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags)) 3682 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 3683 3684 cp.authentication = conn->auth_type; 3685 3686 if (hci_find_remote_oob_data(hdev, &conn->dst) && 3687 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) 3688 cp.oob_data = 0x01; 3689 else 3690 cp.oob_data = 0x00; 3691 3692 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3693 sizeof(cp), &cp); 3694 } else { 3695 struct hci_cp_io_capability_neg_reply cp; 3696 3697 bacpy(&cp.bdaddr, &ev->bdaddr); 3698 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3699 3700 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3701 sizeof(cp), &cp); 3702 } 3703 3704 unlock: 3705 hci_dev_unlock(hdev); 3706 } 3707 3708 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3709 { 3710 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3711 struct hci_conn *conn; 3712 3713 BT_DBG("%s", hdev->name); 3714 3715 hci_dev_lock(hdev); 3716 3717 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3718 if (!conn) 3719 goto unlock; 3720 3721 conn->remote_cap = ev->capability; 3722 conn->remote_auth = ev->authentication; 3723 if (ev->oob_data) 3724 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags); 3725 3726 unlock: 3727 hci_dev_unlock(hdev); 3728 } 3729 3730 static void hci_user_confirm_request_evt(struct hci_dev *hdev, 3731 struct sk_buff *skb) 3732 { 3733 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3734 int loc_mitm, rem_mitm, confirm_hint = 0; 3735 struct hci_conn *conn; 3736 3737 BT_DBG("%s", hdev->name); 3738 3739 hci_dev_lock(hdev); 3740 3741 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3742 goto unlock; 3743 3744 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3745 if (!conn) 3746 goto unlock; 3747 3748 loc_mitm = (conn->auth_type & 0x01); 3749 rem_mitm = (conn->remote_auth & 0x01); 3750 3751 /* If we require MITM but the remote device can't provide that 3752 * (it has NoInputNoOutput) then reject the confirmation 3753 * request. We check the security level here since it doesn't 3754 * necessarily match conn->auth_type. 3755 */ 3756 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 3757 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 3758 BT_DBG("Rejecting request: remote device can't provide MITM"); 3759 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3760 sizeof(ev->bdaddr), &ev->bdaddr); 3761 goto unlock; 3762 } 3763 3764 /* If no side requires MITM protection; auto-accept */ 3765 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 3766 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 3767 3768 /* If we're not the initiators request authorization to 3769 * proceed from user space (mgmt_user_confirm with 3770 * confirm_hint set to 1). The exception is if neither 3771 * side had MITM or if the local IO capability is 3772 * NoInputNoOutput, in which case we do auto-accept 3773 */ 3774 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 3775 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 3776 (loc_mitm || rem_mitm)) { 3777 BT_DBG("Confirming auto-accept as acceptor"); 3778 confirm_hint = 1; 3779 goto confirm; 3780 } 3781 3782 BT_DBG("Auto-accept of user confirmation with %ums delay", 3783 hdev->auto_accept_delay); 3784 3785 if (hdev->auto_accept_delay > 0) { 3786 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3787 queue_delayed_work(conn->hdev->workqueue, 3788 &conn->auto_accept_work, delay); 3789 goto unlock; 3790 } 3791 3792 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3793 sizeof(ev->bdaddr), &ev->bdaddr); 3794 goto unlock; 3795 } 3796 3797 confirm: 3798 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 3799 le32_to_cpu(ev->passkey), confirm_hint); 3800 3801 unlock: 3802 hci_dev_unlock(hdev); 3803 } 3804 3805 static void hci_user_passkey_request_evt(struct hci_dev *hdev, 3806 struct sk_buff *skb) 3807 { 3808 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3809 3810 BT_DBG("%s", hdev->name); 3811 3812 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3813 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 3814 } 3815 3816 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, 3817 struct sk_buff *skb) 3818 { 3819 struct hci_ev_user_passkey_notify *ev = (void *) skb->data; 3820 struct hci_conn *conn; 3821 3822 BT_DBG("%s", hdev->name); 3823 3824 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3825 if (!conn) 3826 return; 3827 3828 conn->passkey_notify = __le32_to_cpu(ev->passkey); 3829 conn->passkey_entered = 0; 3830 3831 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3832 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 3833 conn->dst_type, conn->passkey_notify, 3834 conn->passkey_entered); 3835 } 3836 3837 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 3838 { 3839 struct hci_ev_keypress_notify *ev = (void *) skb->data; 3840 struct hci_conn *conn; 3841 3842 BT_DBG("%s", hdev->name); 3843 3844 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3845 if (!conn) 3846 return; 3847 3848 switch (ev->type) { 3849 case HCI_KEYPRESS_STARTED: 3850 conn->passkey_entered = 0; 3851 return; 3852 3853 case HCI_KEYPRESS_ENTERED: 3854 conn->passkey_entered++; 3855 break; 3856 3857 case HCI_KEYPRESS_ERASED: 3858 conn->passkey_entered--; 3859 break; 3860 3861 case HCI_KEYPRESS_CLEARED: 3862 conn->passkey_entered = 0; 3863 break; 3864 3865 case HCI_KEYPRESS_COMPLETED: 3866 return; 3867 } 3868 3869 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3870 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 3871 conn->dst_type, conn->passkey_notify, 3872 conn->passkey_entered); 3873 } 3874 3875 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 3876 struct sk_buff *skb) 3877 { 3878 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3879 struct hci_conn *conn; 3880 3881 BT_DBG("%s", hdev->name); 3882 3883 hci_dev_lock(hdev); 3884 3885 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3886 if (!conn) 3887 goto unlock; 3888 3889 /* Reset the authentication requirement to unknown */ 3890 conn->remote_auth = 0xff; 3891 3892 /* To avoid duplicate auth_failed events to user space we check 3893 * the HCI_CONN_AUTH_PEND flag which will be set if we 3894 * initiated the authentication. A traditional auth_complete 3895 * event gets always produced as initiator and is also mapped to 3896 * the mgmt_auth_failed event */ 3897 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 3898 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 3899 ev->status); 3900 3901 hci_conn_drop(conn); 3902 3903 unlock: 3904 hci_dev_unlock(hdev); 3905 } 3906 3907 static void hci_remote_host_features_evt(struct hci_dev *hdev, 3908 struct sk_buff *skb) 3909 { 3910 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3911 struct inquiry_entry *ie; 3912 struct hci_conn *conn; 3913 3914 BT_DBG("%s", hdev->name); 3915 3916 hci_dev_lock(hdev); 3917 3918 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3919 if (conn) 3920 memcpy(conn->features[1], ev->features, 8); 3921 3922 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3923 if (ie) 3924 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 3925 3926 hci_dev_unlock(hdev); 3927 } 3928 3929 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3930 struct sk_buff *skb) 3931 { 3932 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3933 struct oob_data *data; 3934 3935 BT_DBG("%s", hdev->name); 3936 3937 hci_dev_lock(hdev); 3938 3939 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3940 goto unlock; 3941 3942 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 3943 if (data) { 3944 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { 3945 struct hci_cp_remote_oob_ext_data_reply cp; 3946 3947 bacpy(&cp.bdaddr, &ev->bdaddr); 3948 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 3949 memcpy(cp.randomizer192, data->randomizer192, 3950 sizeof(cp.randomizer192)); 3951 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 3952 memcpy(cp.randomizer256, data->randomizer256, 3953 sizeof(cp.randomizer256)); 3954 3955 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 3956 sizeof(cp), &cp); 3957 } else { 3958 struct hci_cp_remote_oob_data_reply cp; 3959 3960 bacpy(&cp.bdaddr, &ev->bdaddr); 3961 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 3962 memcpy(cp.randomizer, data->randomizer192, 3963 sizeof(cp.randomizer)); 3964 3965 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 3966 sizeof(cp), &cp); 3967 } 3968 } else { 3969 struct hci_cp_remote_oob_data_neg_reply cp; 3970 3971 bacpy(&cp.bdaddr, &ev->bdaddr); 3972 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 3973 sizeof(cp), &cp); 3974 } 3975 3976 unlock: 3977 hci_dev_unlock(hdev); 3978 } 3979 3980 static void hci_phy_link_complete_evt(struct hci_dev *hdev, 3981 struct sk_buff *skb) 3982 { 3983 struct hci_ev_phy_link_complete *ev = (void *) skb->data; 3984 struct hci_conn *hcon, *bredr_hcon; 3985 3986 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, 3987 ev->status); 3988 3989 hci_dev_lock(hdev); 3990 3991 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3992 if (!hcon) { 3993 hci_dev_unlock(hdev); 3994 return; 3995 } 3996 3997 if (ev->status) { 3998 hci_conn_del(hcon); 3999 hci_dev_unlock(hdev); 4000 return; 4001 } 4002 4003 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 4004 4005 hcon->state = BT_CONNECTED; 4006 bacpy(&hcon->dst, &bredr_hcon->dst); 4007 4008 hci_conn_hold(hcon); 4009 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 4010 hci_conn_drop(hcon); 4011 4012 hci_conn_add_sysfs(hcon); 4013 4014 amp_physical_cfm(bredr_hcon, hcon); 4015 4016 hci_dev_unlock(hdev); 4017 } 4018 4019 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4020 { 4021 struct hci_ev_logical_link_complete *ev = (void *) skb->data; 4022 struct hci_conn *hcon; 4023 struct hci_chan *hchan; 4024 struct amp_mgr *mgr; 4025 4026 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 4027 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, 4028 ev->status); 4029 4030 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4031 if (!hcon) 4032 return; 4033 4034 /* Create AMP hchan */ 4035 hchan = hci_chan_create(hcon); 4036 if (!hchan) 4037 return; 4038 4039 hchan->handle = le16_to_cpu(ev->handle); 4040 4041 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 4042 4043 mgr = hcon->amp_mgr; 4044 if (mgr && mgr->bredr_chan) { 4045 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 4046 4047 l2cap_chan_lock(bredr_chan); 4048 4049 bredr_chan->conn->mtu = hdev->block_mtu; 4050 l2cap_logical_cfm(bredr_chan, hchan, 0); 4051 hci_conn_hold(hcon); 4052 4053 l2cap_chan_unlock(bredr_chan); 4054 } 4055 } 4056 4057 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, 4058 struct sk_buff *skb) 4059 { 4060 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; 4061 struct hci_chan *hchan; 4062 4063 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, 4064 le16_to_cpu(ev->handle), ev->status); 4065 4066 if (ev->status) 4067 return; 4068 4069 hci_dev_lock(hdev); 4070 4071 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 4072 if (!hchan) 4073 goto unlock; 4074 4075 amp_destroy_logical_link(hchan, ev->reason); 4076 4077 unlock: 4078 hci_dev_unlock(hdev); 4079 } 4080 4081 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, 4082 struct sk_buff *skb) 4083 { 4084 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; 4085 struct hci_conn *hcon; 4086 4087 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4088 4089 if (ev->status) 4090 return; 4091 4092 hci_dev_lock(hdev); 4093 4094 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4095 if (hcon) { 4096 hcon->state = BT_CLOSED; 4097 hci_conn_del(hcon); 4098 } 4099 4100 hci_dev_unlock(hdev); 4101 } 4102 4103 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4104 { 4105 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 4106 struct hci_conn_params *params; 4107 struct hci_conn *conn; 4108 struct smp_irk *irk; 4109 u8 addr_type; 4110 4111 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4112 4113 hci_dev_lock(hdev); 4114 4115 /* All controllers implicitly stop advertising in the event of a 4116 * connection, so ensure that the state bit is cleared. 4117 */ 4118 clear_bit(HCI_LE_ADV, &hdev->dev_flags); 4119 4120 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 4121 if (!conn) { 4122 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role); 4123 if (!conn) { 4124 BT_ERR("No memory for new connection"); 4125 goto unlock; 4126 } 4127 4128 conn->dst_type = ev->bdaddr_type; 4129 4130 /* If we didn't have a hci_conn object previously 4131 * but we're in master role this must be something 4132 * initiated using a white list. Since white list based 4133 * connections are not "first class citizens" we don't 4134 * have full tracking of them. Therefore, we go ahead 4135 * with a "best effort" approach of determining the 4136 * initiator address based on the HCI_PRIVACY flag. 4137 */ 4138 if (conn->out) { 4139 conn->resp_addr_type = ev->bdaddr_type; 4140 bacpy(&conn->resp_addr, &ev->bdaddr); 4141 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { 4142 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 4143 bacpy(&conn->init_addr, &hdev->rpa); 4144 } else { 4145 hci_copy_identity_address(hdev, 4146 &conn->init_addr, 4147 &conn->init_addr_type); 4148 } 4149 } 4150 } else { 4151 cancel_delayed_work(&conn->le_conn_timeout); 4152 } 4153 4154 if (!conn->out) { 4155 /* Set the responder (our side) address type based on 4156 * the advertising address type. 4157 */ 4158 conn->resp_addr_type = hdev->adv_addr_type; 4159 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) 4160 bacpy(&conn->resp_addr, &hdev->random_addr); 4161 else 4162 bacpy(&conn->resp_addr, &hdev->bdaddr); 4163 4164 conn->init_addr_type = ev->bdaddr_type; 4165 bacpy(&conn->init_addr, &ev->bdaddr); 4166 4167 /* For incoming connections, set the default minimum 4168 * and maximum connection interval. They will be used 4169 * to check if the parameters are in range and if not 4170 * trigger the connection update procedure. 4171 */ 4172 conn->le_conn_min_interval = hdev->le_conn_min_interval; 4173 conn->le_conn_max_interval = hdev->le_conn_max_interval; 4174 } 4175 4176 /* Lookup the identity address from the stored connection 4177 * address and address type. 4178 * 4179 * When establishing connections to an identity address, the 4180 * connection procedure will store the resolvable random 4181 * address first. Now if it can be converted back into the 4182 * identity address, start using the identity address from 4183 * now on. 4184 */ 4185 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 4186 if (irk) { 4187 bacpy(&conn->dst, &irk->bdaddr); 4188 conn->dst_type = irk->addr_type; 4189 } 4190 4191 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 4192 addr_type = BDADDR_LE_PUBLIC; 4193 else 4194 addr_type = BDADDR_LE_RANDOM; 4195 4196 if (ev->status) { 4197 hci_le_conn_failed(conn, ev->status); 4198 goto unlock; 4199 } 4200 4201 /* Drop the connection if the device is blocked */ 4202 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) { 4203 hci_conn_drop(conn); 4204 goto unlock; 4205 } 4206 4207 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4208 mgmt_device_connected(hdev, &conn->dst, conn->type, 4209 conn->dst_type, 0, NULL, 0, NULL); 4210 4211 conn->sec_level = BT_SECURITY_LOW; 4212 conn->handle = __le16_to_cpu(ev->handle); 4213 conn->state = BT_CONNECTED; 4214 4215 conn->le_conn_interval = le16_to_cpu(ev->interval); 4216 conn->le_conn_latency = le16_to_cpu(ev->latency); 4217 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 4218 4219 hci_conn_add_sysfs(conn); 4220 4221 hci_proto_connect_cfm(conn, ev->status); 4222 4223 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 4224 if (params) { 4225 list_del_init(¶ms->action); 4226 if (params->conn) { 4227 hci_conn_drop(params->conn); 4228 params->conn = NULL; 4229 } 4230 } 4231 4232 unlock: 4233 hci_update_background_scan(hdev); 4234 hci_dev_unlock(hdev); 4235 } 4236 4237 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, 4238 struct sk_buff *skb) 4239 { 4240 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data; 4241 struct hci_conn *conn; 4242 4243 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4244 4245 if (ev->status) 4246 return; 4247 4248 hci_dev_lock(hdev); 4249 4250 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4251 if (conn) { 4252 conn->le_conn_interval = le16_to_cpu(ev->interval); 4253 conn->le_conn_latency = le16_to_cpu(ev->latency); 4254 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 4255 } 4256 4257 hci_dev_unlock(hdev); 4258 } 4259 4260 /* This function requires the caller holds hdev->lock */ 4261 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, 4262 u8 addr_type, u8 adv_type) 4263 { 4264 struct hci_conn *conn; 4265 struct hci_conn_params *params; 4266 4267 /* If the event is not connectable don't proceed further */ 4268 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 4269 return; 4270 4271 /* Ignore if the device is blocked */ 4272 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type)) 4273 return; 4274 4275 /* Most controller will fail if we try to create new connections 4276 * while we have an existing one in slave role. 4277 */ 4278 if (hdev->conn_hash.le_num_slave > 0) 4279 return; 4280 4281 /* If we're not connectable only connect devices that we have in 4282 * our pend_le_conns list. 4283 */ 4284 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, 4285 addr, addr_type); 4286 if (!params) 4287 return; 4288 4289 switch (params->auto_connect) { 4290 case HCI_AUTO_CONN_DIRECT: 4291 /* Only devices advertising with ADV_DIRECT_IND are 4292 * triggering a connection attempt. This is allowing 4293 * incoming connections from slave devices. 4294 */ 4295 if (adv_type != LE_ADV_DIRECT_IND) 4296 return; 4297 break; 4298 case HCI_AUTO_CONN_ALWAYS: 4299 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 4300 * are triggering a connection attempt. This means 4301 * that incoming connectioms from slave device are 4302 * accepted and also outgoing connections to slave 4303 * devices are established when found. 4304 */ 4305 break; 4306 default: 4307 return; 4308 } 4309 4310 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, 4311 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER); 4312 if (!IS_ERR(conn)) { 4313 /* Store the pointer since we don't really have any 4314 * other owner of the object besides the params that 4315 * triggered it. This way we can abort the connection if 4316 * the parameters get removed and keep the reference 4317 * count consistent once the connection is established. 4318 */ 4319 params->conn = conn; 4320 return; 4321 } 4322 4323 switch (PTR_ERR(conn)) { 4324 case -EBUSY: 4325 /* If hci_connect() returns -EBUSY it means there is already 4326 * an LE connection attempt going on. Since controllers don't 4327 * support more than one connection attempt at the time, we 4328 * don't consider this an error case. 4329 */ 4330 break; 4331 default: 4332 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 4333 } 4334 } 4335 4336 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 4337 u8 bdaddr_type, s8 rssi, u8 *data, u8 len) 4338 { 4339 struct discovery_state *d = &hdev->discovery; 4340 struct smp_irk *irk; 4341 bool match; 4342 u32 flags; 4343 4344 /* Check if we need to convert to identity address */ 4345 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 4346 if (irk) { 4347 bdaddr = &irk->bdaddr; 4348 bdaddr_type = irk->addr_type; 4349 } 4350 4351 /* Check if we have been requested to connect to this device */ 4352 check_pending_le_conn(hdev, bdaddr, bdaddr_type, type); 4353 4354 /* Passive scanning shouldn't trigger any device found events, 4355 * except for devices marked as CONN_REPORT for which we do send 4356 * device found events. 4357 */ 4358 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 4359 if (type == LE_ADV_DIRECT_IND) 4360 return; 4361 4362 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 4363 bdaddr, bdaddr_type)) 4364 return; 4365 4366 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 4367 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 4368 else 4369 flags = 0; 4370 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4371 rssi, flags, data, len, NULL, 0); 4372 return; 4373 } 4374 4375 /* When receiving non-connectable or scannable undirected 4376 * advertising reports, this means that the remote device is 4377 * not connectable and then clearly indicate this in the 4378 * device found event. 4379 * 4380 * When receiving a scan response, then there is no way to 4381 * know if the remote device is connectable or not. However 4382 * since scan responses are merged with a previously seen 4383 * advertising report, the flags field from that report 4384 * will be used. 4385 * 4386 * In the really unlikely case that a controller get confused 4387 * and just sends a scan response event, then it is marked as 4388 * not connectable as well. 4389 */ 4390 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 4391 type == LE_ADV_SCAN_RSP) 4392 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 4393 else 4394 flags = 0; 4395 4396 /* If there's nothing pending either store the data from this 4397 * event or send an immediate device found event if the data 4398 * should not be stored for later. 4399 */ 4400 if (!has_pending_adv_report(hdev)) { 4401 /* If the report will trigger a SCAN_REQ store it for 4402 * later merging. 4403 */ 4404 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 4405 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 4406 rssi, flags, data, len); 4407 return; 4408 } 4409 4410 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4411 rssi, flags, data, len, NULL, 0); 4412 return; 4413 } 4414 4415 /* Check if the pending report is for the same device as the new one */ 4416 match = (!bacmp(bdaddr, &d->last_adv_addr) && 4417 bdaddr_type == d->last_adv_addr_type); 4418 4419 /* If the pending data doesn't match this report or this isn't a 4420 * scan response (e.g. we got a duplicate ADV_IND) then force 4421 * sending of the pending data. 4422 */ 4423 if (type != LE_ADV_SCAN_RSP || !match) { 4424 /* Send out whatever is in the cache, but skip duplicates */ 4425 if (!match) 4426 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 4427 d->last_adv_addr_type, NULL, 4428 d->last_adv_rssi, d->last_adv_flags, 4429 d->last_adv_data, 4430 d->last_adv_data_len, NULL, 0); 4431 4432 /* If the new report will trigger a SCAN_REQ store it for 4433 * later merging. 4434 */ 4435 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 4436 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 4437 rssi, flags, data, len); 4438 return; 4439 } 4440 4441 /* The advertising reports cannot be merged, so clear 4442 * the pending report and send out a device found event. 4443 */ 4444 clear_pending_adv_report(hdev); 4445 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4446 rssi, flags, data, len, NULL, 0); 4447 return; 4448 } 4449 4450 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 4451 * the new event is a SCAN_RSP. We can therefore proceed with 4452 * sending a merged device found event. 4453 */ 4454 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 4455 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 4456 d->last_adv_data, d->last_adv_data_len, data, len); 4457 clear_pending_adv_report(hdev); 4458 } 4459 4460 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 4461 { 4462 u8 num_reports = skb->data[0]; 4463 void *ptr = &skb->data[1]; 4464 4465 hci_dev_lock(hdev); 4466 4467 while (num_reports--) { 4468 struct hci_ev_le_advertising_info *ev = ptr; 4469 s8 rssi; 4470 4471 rssi = ev->data[ev->length]; 4472 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 4473 ev->bdaddr_type, rssi, ev->data, ev->length); 4474 4475 ptr += sizeof(*ev) + ev->length + 1; 4476 } 4477 4478 hci_dev_unlock(hdev); 4479 } 4480 4481 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4482 { 4483 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 4484 struct hci_cp_le_ltk_reply cp; 4485 struct hci_cp_le_ltk_neg_reply neg; 4486 struct hci_conn *conn; 4487 struct smp_ltk *ltk; 4488 4489 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); 4490 4491 hci_dev_lock(hdev); 4492 4493 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4494 if (conn == NULL) 4495 goto not_found; 4496 4497 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role); 4498 if (ltk == NULL) 4499 goto not_found; 4500 4501 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); 4502 cp.handle = cpu_to_le16(conn->handle); 4503 4504 if (ltk->authenticated) 4505 conn->pending_sec_level = BT_SECURITY_HIGH; 4506 else 4507 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4508 4509 conn->enc_key_size = ltk->enc_size; 4510 4511 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 4512 4513 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 4514 * temporary key used to encrypt a connection following 4515 * pairing. It is used during the Encrypted Session Setup to 4516 * distribute the keys. Later, security can be re-established 4517 * using a distributed LTK. 4518 */ 4519 if (ltk->type == SMP_STK) { 4520 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 4521 list_del(<k->list); 4522 kfree(ltk); 4523 } else { 4524 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 4525 } 4526 4527 hci_dev_unlock(hdev); 4528 4529 return; 4530 4531 not_found: 4532 neg.handle = ev->handle; 4533 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 4534 hci_dev_unlock(hdev); 4535 } 4536 4537 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 4538 u8 reason) 4539 { 4540 struct hci_cp_le_conn_param_req_neg_reply cp; 4541 4542 cp.handle = cpu_to_le16(handle); 4543 cp.reason = reason; 4544 4545 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 4546 &cp); 4547 } 4548 4549 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, 4550 struct sk_buff *skb) 4551 { 4552 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data; 4553 struct hci_cp_le_conn_param_req_reply cp; 4554 struct hci_conn *hcon; 4555 u16 handle, min, max, latency, timeout; 4556 4557 handle = le16_to_cpu(ev->handle); 4558 min = le16_to_cpu(ev->interval_min); 4559 max = le16_to_cpu(ev->interval_max); 4560 latency = le16_to_cpu(ev->latency); 4561 timeout = le16_to_cpu(ev->timeout); 4562 4563 hcon = hci_conn_hash_lookup_handle(hdev, handle); 4564 if (!hcon || hcon->state != BT_CONNECTED) 4565 return send_conn_param_neg_reply(hdev, handle, 4566 HCI_ERROR_UNKNOWN_CONN_ID); 4567 4568 if (hci_check_conn_params(min, max, latency, timeout)) 4569 return send_conn_param_neg_reply(hdev, handle, 4570 HCI_ERROR_INVALID_LL_PARAMS); 4571 4572 if (hcon->role == HCI_ROLE_MASTER) { 4573 struct hci_conn_params *params; 4574 u8 store_hint; 4575 4576 hci_dev_lock(hdev); 4577 4578 params = hci_conn_params_lookup(hdev, &hcon->dst, 4579 hcon->dst_type); 4580 if (params) { 4581 params->conn_min_interval = min; 4582 params->conn_max_interval = max; 4583 params->conn_latency = latency; 4584 params->supervision_timeout = timeout; 4585 store_hint = 0x01; 4586 } else{ 4587 store_hint = 0x00; 4588 } 4589 4590 hci_dev_unlock(hdev); 4591 4592 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 4593 store_hint, min, max, latency, timeout); 4594 } 4595 4596 cp.handle = ev->handle; 4597 cp.interval_min = ev->interval_min; 4598 cp.interval_max = ev->interval_max; 4599 cp.latency = ev->latency; 4600 cp.timeout = ev->timeout; 4601 cp.min_ce_len = 0; 4602 cp.max_ce_len = 0; 4603 4604 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 4605 } 4606 4607 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 4608 { 4609 struct hci_ev_le_meta *le_ev = (void *) skb->data; 4610 4611 skb_pull(skb, sizeof(*le_ev)); 4612 4613 switch (le_ev->subevent) { 4614 case HCI_EV_LE_CONN_COMPLETE: 4615 hci_le_conn_complete_evt(hdev, skb); 4616 break; 4617 4618 case HCI_EV_LE_CONN_UPDATE_COMPLETE: 4619 hci_le_conn_update_complete_evt(hdev, skb); 4620 break; 4621 4622 case HCI_EV_LE_ADVERTISING_REPORT: 4623 hci_le_adv_report_evt(hdev, skb); 4624 break; 4625 4626 case HCI_EV_LE_LTK_REQ: 4627 hci_le_ltk_request_evt(hdev, skb); 4628 break; 4629 4630 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ: 4631 hci_le_remote_conn_param_req_evt(hdev, skb); 4632 break; 4633 4634 default: 4635 break; 4636 } 4637 } 4638 4639 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) 4640 { 4641 struct hci_ev_channel_selected *ev = (void *) skb->data; 4642 struct hci_conn *hcon; 4643 4644 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); 4645 4646 skb_pull(skb, sizeof(*ev)); 4647 4648 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4649 if (!hcon) 4650 return; 4651 4652 amp_read_loc_assoc_final_data(hdev, hcon); 4653 } 4654 4655 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 4656 { 4657 struct hci_event_hdr *hdr = (void *) skb->data; 4658 __u8 event = hdr->evt; 4659 4660 hci_dev_lock(hdev); 4661 4662 /* Received events are (currently) only needed when a request is 4663 * ongoing so avoid unnecessary memory allocation. 4664 */ 4665 if (hci_req_pending(hdev)) { 4666 kfree_skb(hdev->recv_evt); 4667 hdev->recv_evt = skb_clone(skb, GFP_KERNEL); 4668 } 4669 4670 hci_dev_unlock(hdev); 4671 4672 skb_pull(skb, HCI_EVENT_HDR_SIZE); 4673 4674 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) { 4675 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; 4676 u16 opcode = __le16_to_cpu(cmd_hdr->opcode); 4677 4678 hci_req_cmd_complete(hdev, opcode, 0); 4679 } 4680 4681 switch (event) { 4682 case HCI_EV_INQUIRY_COMPLETE: 4683 hci_inquiry_complete_evt(hdev, skb); 4684 break; 4685 4686 case HCI_EV_INQUIRY_RESULT: 4687 hci_inquiry_result_evt(hdev, skb); 4688 break; 4689 4690 case HCI_EV_CONN_COMPLETE: 4691 hci_conn_complete_evt(hdev, skb); 4692 break; 4693 4694 case HCI_EV_CONN_REQUEST: 4695 hci_conn_request_evt(hdev, skb); 4696 break; 4697 4698 case HCI_EV_DISCONN_COMPLETE: 4699 hci_disconn_complete_evt(hdev, skb); 4700 break; 4701 4702 case HCI_EV_AUTH_COMPLETE: 4703 hci_auth_complete_evt(hdev, skb); 4704 break; 4705 4706 case HCI_EV_REMOTE_NAME: 4707 hci_remote_name_evt(hdev, skb); 4708 break; 4709 4710 case HCI_EV_ENCRYPT_CHANGE: 4711 hci_encrypt_change_evt(hdev, skb); 4712 break; 4713 4714 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 4715 hci_change_link_key_complete_evt(hdev, skb); 4716 break; 4717 4718 case HCI_EV_REMOTE_FEATURES: 4719 hci_remote_features_evt(hdev, skb); 4720 break; 4721 4722 case HCI_EV_CMD_COMPLETE: 4723 hci_cmd_complete_evt(hdev, skb); 4724 break; 4725 4726 case HCI_EV_CMD_STATUS: 4727 hci_cmd_status_evt(hdev, skb); 4728 break; 4729 4730 case HCI_EV_ROLE_CHANGE: 4731 hci_role_change_evt(hdev, skb); 4732 break; 4733 4734 case HCI_EV_NUM_COMP_PKTS: 4735 hci_num_comp_pkts_evt(hdev, skb); 4736 break; 4737 4738 case HCI_EV_MODE_CHANGE: 4739 hci_mode_change_evt(hdev, skb); 4740 break; 4741 4742 case HCI_EV_PIN_CODE_REQ: 4743 hci_pin_code_request_evt(hdev, skb); 4744 break; 4745 4746 case HCI_EV_LINK_KEY_REQ: 4747 hci_link_key_request_evt(hdev, skb); 4748 break; 4749 4750 case HCI_EV_LINK_KEY_NOTIFY: 4751 hci_link_key_notify_evt(hdev, skb); 4752 break; 4753 4754 case HCI_EV_CLOCK_OFFSET: 4755 hci_clock_offset_evt(hdev, skb); 4756 break; 4757 4758 case HCI_EV_PKT_TYPE_CHANGE: 4759 hci_pkt_type_change_evt(hdev, skb); 4760 break; 4761 4762 case HCI_EV_PSCAN_REP_MODE: 4763 hci_pscan_rep_mode_evt(hdev, skb); 4764 break; 4765 4766 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 4767 hci_inquiry_result_with_rssi_evt(hdev, skb); 4768 break; 4769 4770 case HCI_EV_REMOTE_EXT_FEATURES: 4771 hci_remote_ext_features_evt(hdev, skb); 4772 break; 4773 4774 case HCI_EV_SYNC_CONN_COMPLETE: 4775 hci_sync_conn_complete_evt(hdev, skb); 4776 break; 4777 4778 case HCI_EV_EXTENDED_INQUIRY_RESULT: 4779 hci_extended_inquiry_result_evt(hdev, skb); 4780 break; 4781 4782 case HCI_EV_KEY_REFRESH_COMPLETE: 4783 hci_key_refresh_complete_evt(hdev, skb); 4784 break; 4785 4786 case HCI_EV_IO_CAPA_REQUEST: 4787 hci_io_capa_request_evt(hdev, skb); 4788 break; 4789 4790 case HCI_EV_IO_CAPA_REPLY: 4791 hci_io_capa_reply_evt(hdev, skb); 4792 break; 4793 4794 case HCI_EV_USER_CONFIRM_REQUEST: 4795 hci_user_confirm_request_evt(hdev, skb); 4796 break; 4797 4798 case HCI_EV_USER_PASSKEY_REQUEST: 4799 hci_user_passkey_request_evt(hdev, skb); 4800 break; 4801 4802 case HCI_EV_USER_PASSKEY_NOTIFY: 4803 hci_user_passkey_notify_evt(hdev, skb); 4804 break; 4805 4806 case HCI_EV_KEYPRESS_NOTIFY: 4807 hci_keypress_notify_evt(hdev, skb); 4808 break; 4809 4810 case HCI_EV_SIMPLE_PAIR_COMPLETE: 4811 hci_simple_pair_complete_evt(hdev, skb); 4812 break; 4813 4814 case HCI_EV_REMOTE_HOST_FEATURES: 4815 hci_remote_host_features_evt(hdev, skb); 4816 break; 4817 4818 case HCI_EV_LE_META: 4819 hci_le_meta_evt(hdev, skb); 4820 break; 4821 4822 case HCI_EV_CHANNEL_SELECTED: 4823 hci_chan_selected_evt(hdev, skb); 4824 break; 4825 4826 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 4827 hci_remote_oob_data_request_evt(hdev, skb); 4828 break; 4829 4830 case HCI_EV_PHY_LINK_COMPLETE: 4831 hci_phy_link_complete_evt(hdev, skb); 4832 break; 4833 4834 case HCI_EV_LOGICAL_LINK_COMPLETE: 4835 hci_loglink_complete_evt(hdev, skb); 4836 break; 4837 4838 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: 4839 hci_disconn_loglink_complete_evt(hdev, skb); 4840 break; 4841 4842 case HCI_EV_DISCONN_PHY_LINK_COMPLETE: 4843 hci_disconn_phylink_complete_evt(hdev, skb); 4844 break; 4845 4846 case HCI_EV_NUM_COMP_BLOCKS: 4847 hci_num_comp_blocks_evt(hdev, skb); 4848 break; 4849 4850 default: 4851 BT_DBG("%s event 0x%2.2x", hdev->name, event); 4852 break; 4853 } 4854 4855 kfree_skb(skb); 4856 hdev->stat.evt_rx++; 4857 } 4858