1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2011 ProFUSION Embedded Systems 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI core. */ 27 28 #include <linux/export.h> 29 #include <linux/rfkill.h> 30 #include <linux/debugfs.h> 31 #include <linux/crypto.h> 32 #include <linux/kcov.h> 33 #include <linux/property.h> 34 #include <linux/suspend.h> 35 #include <linux/wait.h> 36 #include <asm/unaligned.h> 37 38 #include <net/bluetooth/bluetooth.h> 39 #include <net/bluetooth/hci_core.h> 40 #include <net/bluetooth/l2cap.h> 41 #include <net/bluetooth/mgmt.h> 42 43 #include "hci_debugfs.h" 44 #include "smp.h" 45 #include "leds.h" 46 #include "msft.h" 47 #include "aosp.h" 48 #include "hci_codec.h" 49 50 static void hci_rx_work(struct work_struct *work); 51 static void hci_cmd_work(struct work_struct *work); 52 static void hci_tx_work(struct work_struct *work); 53 54 /* HCI device list */ 55 LIST_HEAD(hci_dev_list); 56 DEFINE_RWLOCK(hci_dev_list_lock); 57 58 /* HCI callback list */ 59 LIST_HEAD(hci_cb_list); 60 DEFINE_MUTEX(hci_cb_list_lock); 61 62 /* HCI ID Numbering */ 63 static DEFINE_IDA(hci_index_ida); 64 65 /* Get HCI device by index. 66 * Device is held on return. */ 67 struct hci_dev *hci_dev_get(int index) 68 { 69 struct hci_dev *hdev = NULL, *d; 70 71 BT_DBG("%d", index); 72 73 if (index < 0) 74 return NULL; 75 76 read_lock(&hci_dev_list_lock); 77 list_for_each_entry(d, &hci_dev_list, list) { 78 if (d->id == index) { 79 hdev = hci_dev_hold(d); 80 break; 81 } 82 } 83 read_unlock(&hci_dev_list_lock); 84 return hdev; 85 } 86 87 /* ---- Inquiry support ---- */ 88 89 bool hci_discovery_active(struct hci_dev *hdev) 90 { 91 struct discovery_state *discov = &hdev->discovery; 92 93 switch (discov->state) { 94 case DISCOVERY_FINDING: 95 case DISCOVERY_RESOLVING: 96 return true; 97 98 default: 99 return false; 100 } 101 } 102 103 void hci_discovery_set_state(struct hci_dev *hdev, int state) 104 { 105 int old_state = hdev->discovery.state; 106 107 if (old_state == state) 108 return; 109 110 hdev->discovery.state = state; 111 112 switch (state) { 113 case DISCOVERY_STOPPED: 114 hci_update_passive_scan(hdev); 115 116 if (old_state != DISCOVERY_STARTING) 117 mgmt_discovering(hdev, 0); 118 break; 119 case DISCOVERY_STARTING: 120 break; 121 case DISCOVERY_FINDING: 122 mgmt_discovering(hdev, 1); 123 break; 124 case DISCOVERY_RESOLVING: 125 break; 126 case DISCOVERY_STOPPING: 127 break; 128 } 129 130 bt_dev_dbg(hdev, "state %u -> %u", old_state, state); 131 } 132 133 void hci_inquiry_cache_flush(struct hci_dev *hdev) 134 { 135 struct discovery_state *cache = &hdev->discovery; 136 struct inquiry_entry *p, *n; 137 138 list_for_each_entry_safe(p, n, &cache->all, all) { 139 list_del(&p->all); 140 kfree(p); 141 } 142 143 INIT_LIST_HEAD(&cache->unknown); 144 INIT_LIST_HEAD(&cache->resolve); 145 } 146 147 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, 148 bdaddr_t *bdaddr) 149 { 150 struct discovery_state *cache = &hdev->discovery; 151 struct inquiry_entry *e; 152 153 BT_DBG("cache %p, %pMR", cache, bdaddr); 154 155 list_for_each_entry(e, &cache->all, all) { 156 if (!bacmp(&e->data.bdaddr, bdaddr)) 157 return e; 158 } 159 160 return NULL; 161 } 162 163 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 164 bdaddr_t *bdaddr) 165 { 166 struct discovery_state *cache = &hdev->discovery; 167 struct inquiry_entry *e; 168 169 BT_DBG("cache %p, %pMR", cache, bdaddr); 170 171 list_for_each_entry(e, &cache->unknown, list) { 172 if (!bacmp(&e->data.bdaddr, bdaddr)) 173 return e; 174 } 175 176 return NULL; 177 } 178 179 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 180 bdaddr_t *bdaddr, 181 int state) 182 { 183 struct discovery_state *cache = &hdev->discovery; 184 struct inquiry_entry *e; 185 186 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); 187 188 list_for_each_entry(e, &cache->resolve, list) { 189 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) 190 return e; 191 if (!bacmp(&e->data.bdaddr, bdaddr)) 192 return e; 193 } 194 195 return NULL; 196 } 197 198 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 199 struct inquiry_entry *ie) 200 { 201 struct discovery_state *cache = &hdev->discovery; 202 struct list_head *pos = &cache->resolve; 203 struct inquiry_entry *p; 204 205 list_del(&ie->list); 206 207 list_for_each_entry(p, &cache->resolve, list) { 208 if (p->name_state != NAME_PENDING && 209 abs(p->data.rssi) >= abs(ie->data.rssi)) 210 break; 211 pos = &p->list; 212 } 213 214 list_add(&ie->list, pos); 215 } 216 217 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 218 bool name_known) 219 { 220 struct discovery_state *cache = &hdev->discovery; 221 struct inquiry_entry *ie; 222 u32 flags = 0; 223 224 BT_DBG("cache %p, %pMR", cache, &data->bdaddr); 225 226 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); 227 228 if (!data->ssp_mode) 229 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 230 231 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 232 if (ie) { 233 if (!ie->data.ssp_mode) 234 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 235 236 if (ie->name_state == NAME_NEEDED && 237 data->rssi != ie->data.rssi) { 238 ie->data.rssi = data->rssi; 239 hci_inquiry_cache_update_resolve(hdev, ie); 240 } 241 242 goto update; 243 } 244 245 /* Entry not in the cache. Add new one. */ 246 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 247 if (!ie) { 248 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 249 goto done; 250 } 251 252 list_add(&ie->all, &cache->all); 253 254 if (name_known) { 255 ie->name_state = NAME_KNOWN; 256 } else { 257 ie->name_state = NAME_NOT_KNOWN; 258 list_add(&ie->list, &cache->unknown); 259 } 260 261 update: 262 if (name_known && ie->name_state != NAME_KNOWN && 263 ie->name_state != NAME_PENDING) { 264 ie->name_state = NAME_KNOWN; 265 list_del(&ie->list); 266 } 267 268 memcpy(&ie->data, data, sizeof(*data)); 269 ie->timestamp = jiffies; 270 cache->timestamp = jiffies; 271 272 if (ie->name_state == NAME_NOT_KNOWN) 273 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 274 275 done: 276 return flags; 277 } 278 279 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 280 { 281 struct discovery_state *cache = &hdev->discovery; 282 struct inquiry_info *info = (struct inquiry_info *) buf; 283 struct inquiry_entry *e; 284 int copied = 0; 285 286 list_for_each_entry(e, &cache->all, all) { 287 struct inquiry_data *data = &e->data; 288 289 if (copied >= num) 290 break; 291 292 bacpy(&info->bdaddr, &data->bdaddr); 293 info->pscan_rep_mode = data->pscan_rep_mode; 294 info->pscan_period_mode = data->pscan_period_mode; 295 info->pscan_mode = data->pscan_mode; 296 memcpy(info->dev_class, data->dev_class, 3); 297 info->clock_offset = data->clock_offset; 298 299 info++; 300 copied++; 301 } 302 303 BT_DBG("cache %p, copied %d", cache, copied); 304 return copied; 305 } 306 307 int hci_inquiry(void __user *arg) 308 { 309 __u8 __user *ptr = arg; 310 struct hci_inquiry_req ir; 311 struct hci_dev *hdev; 312 int err = 0, do_inquiry = 0, max_rsp; 313 __u8 *buf; 314 315 if (copy_from_user(&ir, ptr, sizeof(ir))) 316 return -EFAULT; 317 318 hdev = hci_dev_get(ir.dev_id); 319 if (!hdev) 320 return -ENODEV; 321 322 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 323 err = -EBUSY; 324 goto done; 325 } 326 327 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 328 err = -EOPNOTSUPP; 329 goto done; 330 } 331 332 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 333 err = -EOPNOTSUPP; 334 goto done; 335 } 336 337 /* Restrict maximum inquiry length to 60 seconds */ 338 if (ir.length > 60) { 339 err = -EINVAL; 340 goto done; 341 } 342 343 hci_dev_lock(hdev); 344 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 345 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { 346 hci_inquiry_cache_flush(hdev); 347 do_inquiry = 1; 348 } 349 hci_dev_unlock(hdev); 350 351 if (do_inquiry) { 352 hci_req_sync_lock(hdev); 353 err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp); 354 hci_req_sync_unlock(hdev); 355 356 if (err < 0) 357 goto done; 358 359 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is 360 * cleared). If it is interrupted by a signal, return -EINTR. 361 */ 362 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, 363 TASK_INTERRUPTIBLE)) { 364 err = -EINTR; 365 goto done; 366 } 367 } 368 369 /* for unlimited number of responses we will use buffer with 370 * 255 entries 371 */ 372 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 373 374 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 375 * copy it to the user space. 376 */ 377 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); 378 if (!buf) { 379 err = -ENOMEM; 380 goto done; 381 } 382 383 hci_dev_lock(hdev); 384 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 385 hci_dev_unlock(hdev); 386 387 BT_DBG("num_rsp %d", ir.num_rsp); 388 389 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 390 ptr += sizeof(ir); 391 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 392 ir.num_rsp)) 393 err = -EFAULT; 394 } else 395 err = -EFAULT; 396 397 kfree(buf); 398 399 done: 400 hci_dev_put(hdev); 401 return err; 402 } 403 404 static int hci_dev_do_open(struct hci_dev *hdev) 405 { 406 int ret = 0; 407 408 BT_DBG("%s %p", hdev->name, hdev); 409 410 hci_req_sync_lock(hdev); 411 412 ret = hci_dev_open_sync(hdev); 413 414 hci_req_sync_unlock(hdev); 415 return ret; 416 } 417 418 /* ---- HCI ioctl helpers ---- */ 419 420 int hci_dev_open(__u16 dev) 421 { 422 struct hci_dev *hdev; 423 int err; 424 425 hdev = hci_dev_get(dev); 426 if (!hdev) 427 return -ENODEV; 428 429 /* Devices that are marked as unconfigured can only be powered 430 * up as user channel. Trying to bring them up as normal devices 431 * will result into a failure. Only user channel operation is 432 * possible. 433 * 434 * When this function is called for a user channel, the flag 435 * HCI_USER_CHANNEL will be set first before attempting to 436 * open the device. 437 */ 438 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 439 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 440 err = -EOPNOTSUPP; 441 goto done; 442 } 443 444 /* We need to ensure that no other power on/off work is pending 445 * before proceeding to call hci_dev_do_open. This is 446 * particularly important if the setup procedure has not yet 447 * completed. 448 */ 449 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 450 cancel_delayed_work(&hdev->power_off); 451 452 /* After this call it is guaranteed that the setup procedure 453 * has finished. This means that error conditions like RFKILL 454 * or no valid public or static random address apply. 455 */ 456 flush_workqueue(hdev->req_workqueue); 457 458 /* For controllers not using the management interface and that 459 * are brought up using legacy ioctl, set the HCI_BONDABLE bit 460 * so that pairing works for them. Once the management interface 461 * is in use this bit will be cleared again and userspace has 462 * to explicitly enable it. 463 */ 464 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 465 !hci_dev_test_flag(hdev, HCI_MGMT)) 466 hci_dev_set_flag(hdev, HCI_BONDABLE); 467 468 err = hci_dev_do_open(hdev); 469 470 done: 471 hci_dev_put(hdev); 472 return err; 473 } 474 475 int hci_dev_do_close(struct hci_dev *hdev) 476 { 477 int err; 478 479 BT_DBG("%s %p", hdev->name, hdev); 480 481 hci_req_sync_lock(hdev); 482 483 err = hci_dev_close_sync(hdev); 484 485 hci_req_sync_unlock(hdev); 486 487 return err; 488 } 489 490 int hci_dev_close(__u16 dev) 491 { 492 struct hci_dev *hdev; 493 int err; 494 495 hdev = hci_dev_get(dev); 496 if (!hdev) 497 return -ENODEV; 498 499 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 500 err = -EBUSY; 501 goto done; 502 } 503 504 cancel_work_sync(&hdev->power_on); 505 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 506 cancel_delayed_work(&hdev->power_off); 507 508 err = hci_dev_do_close(hdev); 509 510 done: 511 hci_dev_put(hdev); 512 return err; 513 } 514 515 static int hci_dev_do_reset(struct hci_dev *hdev) 516 { 517 int ret; 518 519 BT_DBG("%s %p", hdev->name, hdev); 520 521 hci_req_sync_lock(hdev); 522 523 /* Drop queues */ 524 skb_queue_purge(&hdev->rx_q); 525 skb_queue_purge(&hdev->cmd_q); 526 527 /* Cancel these to avoid queueing non-chained pending work */ 528 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); 529 /* Wait for 530 * 531 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 532 * queue_delayed_work(&hdev->{cmd,ncmd}_timer) 533 * 534 * inside RCU section to see the flag or complete scheduling. 535 */ 536 synchronize_rcu(); 537 /* Explicitly cancel works in case scheduled after setting the flag. */ 538 cancel_delayed_work(&hdev->cmd_timer); 539 cancel_delayed_work(&hdev->ncmd_timer); 540 541 /* Avoid potential lockdep warnings from the *_flush() calls by 542 * ensuring the workqueue is empty up front. 543 */ 544 drain_workqueue(hdev->workqueue); 545 546 hci_dev_lock(hdev); 547 hci_inquiry_cache_flush(hdev); 548 hci_conn_hash_flush(hdev); 549 hci_dev_unlock(hdev); 550 551 if (hdev->flush) 552 hdev->flush(hdev); 553 554 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); 555 556 atomic_set(&hdev->cmd_cnt, 1); 557 hdev->acl_cnt = 0; 558 hdev->sco_cnt = 0; 559 hdev->le_cnt = 0; 560 hdev->iso_cnt = 0; 561 562 ret = hci_reset_sync(hdev); 563 564 hci_req_sync_unlock(hdev); 565 return ret; 566 } 567 568 int hci_dev_reset(__u16 dev) 569 { 570 struct hci_dev *hdev; 571 int err; 572 573 hdev = hci_dev_get(dev); 574 if (!hdev) 575 return -ENODEV; 576 577 if (!test_bit(HCI_UP, &hdev->flags)) { 578 err = -ENETDOWN; 579 goto done; 580 } 581 582 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 583 err = -EBUSY; 584 goto done; 585 } 586 587 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 588 err = -EOPNOTSUPP; 589 goto done; 590 } 591 592 err = hci_dev_do_reset(hdev); 593 594 done: 595 hci_dev_put(hdev); 596 return err; 597 } 598 599 int hci_dev_reset_stat(__u16 dev) 600 { 601 struct hci_dev *hdev; 602 int ret = 0; 603 604 hdev = hci_dev_get(dev); 605 if (!hdev) 606 return -ENODEV; 607 608 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 609 ret = -EBUSY; 610 goto done; 611 } 612 613 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 614 ret = -EOPNOTSUPP; 615 goto done; 616 } 617 618 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 619 620 done: 621 hci_dev_put(hdev); 622 return ret; 623 } 624 625 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) 626 { 627 bool conn_changed, discov_changed; 628 629 BT_DBG("%s scan 0x%02x", hdev->name, scan); 630 631 if ((scan & SCAN_PAGE)) 632 conn_changed = !hci_dev_test_and_set_flag(hdev, 633 HCI_CONNECTABLE); 634 else 635 conn_changed = hci_dev_test_and_clear_flag(hdev, 636 HCI_CONNECTABLE); 637 638 if ((scan & SCAN_INQUIRY)) { 639 discov_changed = !hci_dev_test_and_set_flag(hdev, 640 HCI_DISCOVERABLE); 641 } else { 642 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 643 discov_changed = hci_dev_test_and_clear_flag(hdev, 644 HCI_DISCOVERABLE); 645 } 646 647 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 648 return; 649 650 if (conn_changed || discov_changed) { 651 /* In case this was disabled through mgmt */ 652 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 653 654 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 655 hci_update_adv_data(hdev, hdev->cur_adv_instance); 656 657 mgmt_new_settings(hdev); 658 } 659 } 660 661 int hci_dev_cmd(unsigned int cmd, void __user *arg) 662 { 663 struct hci_dev *hdev; 664 struct hci_dev_req dr; 665 __le16 policy; 666 int err = 0; 667 668 if (copy_from_user(&dr, arg, sizeof(dr))) 669 return -EFAULT; 670 671 hdev = hci_dev_get(dr.dev_id); 672 if (!hdev) 673 return -ENODEV; 674 675 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 676 err = -EBUSY; 677 goto done; 678 } 679 680 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 681 err = -EOPNOTSUPP; 682 goto done; 683 } 684 685 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 686 err = -EOPNOTSUPP; 687 goto done; 688 } 689 690 switch (cmd) { 691 case HCISETAUTH: 692 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, 693 1, &dr.dev_opt, HCI_CMD_TIMEOUT); 694 break; 695 696 case HCISETENCRYPT: 697 if (!lmp_encrypt_capable(hdev)) { 698 err = -EOPNOTSUPP; 699 break; 700 } 701 702 if (!test_bit(HCI_AUTH, &hdev->flags)) { 703 /* Auth must be enabled first */ 704 err = hci_cmd_sync_status(hdev, 705 HCI_OP_WRITE_AUTH_ENABLE, 706 1, &dr.dev_opt, 707 HCI_CMD_TIMEOUT); 708 if (err) 709 break; 710 } 711 712 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 713 1, &dr.dev_opt, HCI_CMD_TIMEOUT); 714 break; 715 716 case HCISETSCAN: 717 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, 718 1, &dr.dev_opt, HCI_CMD_TIMEOUT); 719 720 /* Ensure that the connectable and discoverable states 721 * get correctly modified as this was a non-mgmt change. 722 */ 723 if (!err) 724 hci_update_passive_scan_state(hdev, dr.dev_opt); 725 break; 726 727 case HCISETLINKPOL: 728 policy = cpu_to_le16(dr.dev_opt); 729 730 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 731 2, &policy, HCI_CMD_TIMEOUT); 732 break; 733 734 case HCISETLINKMODE: 735 hdev->link_mode = ((__u16) dr.dev_opt) & 736 (HCI_LM_MASTER | HCI_LM_ACCEPT); 737 break; 738 739 case HCISETPTYPE: 740 if (hdev->pkt_type == (__u16) dr.dev_opt) 741 break; 742 743 hdev->pkt_type = (__u16) dr.dev_opt; 744 mgmt_phy_configuration_changed(hdev, NULL); 745 break; 746 747 case HCISETACLMTU: 748 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 749 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 750 break; 751 752 case HCISETSCOMTU: 753 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 754 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 755 break; 756 757 default: 758 err = -EINVAL; 759 break; 760 } 761 762 done: 763 hci_dev_put(hdev); 764 return err; 765 } 766 767 int hci_get_dev_list(void __user *arg) 768 { 769 struct hci_dev *hdev; 770 struct hci_dev_list_req *dl; 771 struct hci_dev_req *dr; 772 int n = 0, err; 773 __u16 dev_num; 774 775 if (get_user(dev_num, (__u16 __user *) arg)) 776 return -EFAULT; 777 778 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 779 return -EINVAL; 780 781 dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL); 782 if (!dl) 783 return -ENOMEM; 784 785 dl->dev_num = dev_num; 786 dr = dl->dev_req; 787 788 read_lock(&hci_dev_list_lock); 789 list_for_each_entry(hdev, &hci_dev_list, list) { 790 unsigned long flags = hdev->flags; 791 792 /* When the auto-off is configured it means the transport 793 * is running, but in that case still indicate that the 794 * device is actually down. 795 */ 796 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 797 flags &= ~BIT(HCI_UP); 798 799 dr[n].dev_id = hdev->id; 800 dr[n].dev_opt = flags; 801 802 if (++n >= dev_num) 803 break; 804 } 805 read_unlock(&hci_dev_list_lock); 806 807 dl->dev_num = n; 808 err = copy_to_user(arg, dl, struct_size(dl, dev_req, n)); 809 kfree(dl); 810 811 return err ? -EFAULT : 0; 812 } 813 814 int hci_get_dev_info(void __user *arg) 815 { 816 struct hci_dev *hdev; 817 struct hci_dev_info di; 818 unsigned long flags; 819 int err = 0; 820 821 if (copy_from_user(&di, arg, sizeof(di))) 822 return -EFAULT; 823 824 hdev = hci_dev_get(di.dev_id); 825 if (!hdev) 826 return -ENODEV; 827 828 /* When the auto-off is configured it means the transport 829 * is running, but in that case still indicate that the 830 * device is actually down. 831 */ 832 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 833 flags = hdev->flags & ~BIT(HCI_UP); 834 else 835 flags = hdev->flags; 836 837 strscpy(di.name, hdev->name, sizeof(di.name)); 838 di.bdaddr = hdev->bdaddr; 839 di.type = (hdev->bus & 0x0f); 840 di.flags = flags; 841 di.pkt_type = hdev->pkt_type; 842 if (lmp_bredr_capable(hdev)) { 843 di.acl_mtu = hdev->acl_mtu; 844 di.acl_pkts = hdev->acl_pkts; 845 di.sco_mtu = hdev->sco_mtu; 846 di.sco_pkts = hdev->sco_pkts; 847 } else { 848 di.acl_mtu = hdev->le_mtu; 849 di.acl_pkts = hdev->le_pkts; 850 di.sco_mtu = 0; 851 di.sco_pkts = 0; 852 } 853 di.link_policy = hdev->link_policy; 854 di.link_mode = hdev->link_mode; 855 856 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 857 memcpy(&di.features, &hdev->features, sizeof(di.features)); 858 859 if (copy_to_user(arg, &di, sizeof(di))) 860 err = -EFAULT; 861 862 hci_dev_put(hdev); 863 864 return err; 865 } 866 867 /* ---- Interface to HCI drivers ---- */ 868 869 static int hci_dev_do_poweroff(struct hci_dev *hdev) 870 { 871 int err; 872 873 BT_DBG("%s %p", hdev->name, hdev); 874 875 hci_req_sync_lock(hdev); 876 877 err = hci_set_powered_sync(hdev, false); 878 879 hci_req_sync_unlock(hdev); 880 881 return err; 882 } 883 884 static int hci_rfkill_set_block(void *data, bool blocked) 885 { 886 struct hci_dev *hdev = data; 887 int err; 888 889 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 890 891 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 892 return -EBUSY; 893 894 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED)) 895 return 0; 896 897 if (blocked) { 898 hci_dev_set_flag(hdev, HCI_RFKILLED); 899 900 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 901 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 902 err = hci_dev_do_poweroff(hdev); 903 if (err) { 904 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)", 905 err); 906 907 /* Make sure the device is still closed even if 908 * anything during power off sequence (eg. 909 * disconnecting devices) failed. 910 */ 911 hci_dev_do_close(hdev); 912 } 913 } 914 } else { 915 hci_dev_clear_flag(hdev, HCI_RFKILLED); 916 } 917 918 return 0; 919 } 920 921 static const struct rfkill_ops hci_rfkill_ops = { 922 .set_block = hci_rfkill_set_block, 923 }; 924 925 static void hci_power_on(struct work_struct *work) 926 { 927 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 928 int err; 929 930 BT_DBG("%s", hdev->name); 931 932 if (test_bit(HCI_UP, &hdev->flags) && 933 hci_dev_test_flag(hdev, HCI_MGMT) && 934 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 935 cancel_delayed_work(&hdev->power_off); 936 err = hci_powered_update_sync(hdev); 937 mgmt_power_on(hdev, err); 938 return; 939 } 940 941 err = hci_dev_do_open(hdev); 942 if (err < 0) { 943 hci_dev_lock(hdev); 944 mgmt_set_powered_failed(hdev, err); 945 hci_dev_unlock(hdev); 946 return; 947 } 948 949 /* During the HCI setup phase, a few error conditions are 950 * ignored and they need to be checked now. If they are still 951 * valid, it is important to turn the device back off. 952 */ 953 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 954 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 955 (!bacmp(&hdev->bdaddr, BDADDR_ANY) && 956 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 957 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 958 hci_dev_do_close(hdev); 959 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 960 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 961 HCI_AUTO_OFF_TIMEOUT); 962 } 963 964 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 965 /* For unconfigured devices, set the HCI_RAW flag 966 * so that userspace can easily identify them. 967 */ 968 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 969 set_bit(HCI_RAW, &hdev->flags); 970 971 /* For fully configured devices, this will send 972 * the Index Added event. For unconfigured devices, 973 * it will send Unconfigued Index Added event. 974 * 975 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 976 * and no event will be send. 977 */ 978 mgmt_index_added(hdev); 979 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 980 /* When the controller is now configured, then it 981 * is important to clear the HCI_RAW flag. 982 */ 983 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 984 clear_bit(HCI_RAW, &hdev->flags); 985 986 /* Powering on the controller with HCI_CONFIG set only 987 * happens with the transition from unconfigured to 988 * configured. This will send the Index Added event. 989 */ 990 mgmt_index_added(hdev); 991 } 992 } 993 994 static void hci_power_off(struct work_struct *work) 995 { 996 struct hci_dev *hdev = container_of(work, struct hci_dev, 997 power_off.work); 998 999 BT_DBG("%s", hdev->name); 1000 1001 hci_dev_do_close(hdev); 1002 } 1003 1004 static void hci_error_reset(struct work_struct *work) 1005 { 1006 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); 1007 1008 hci_dev_hold(hdev); 1009 BT_DBG("%s", hdev->name); 1010 1011 if (hdev->hw_error) 1012 hdev->hw_error(hdev, hdev->hw_error_code); 1013 else 1014 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); 1015 1016 if (!hci_dev_do_close(hdev)) 1017 hci_dev_do_open(hdev); 1018 1019 hci_dev_put(hdev); 1020 } 1021 1022 void hci_uuids_clear(struct hci_dev *hdev) 1023 { 1024 struct bt_uuid *uuid, *tmp; 1025 1026 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { 1027 list_del(&uuid->list); 1028 kfree(uuid); 1029 } 1030 } 1031 1032 void hci_link_keys_clear(struct hci_dev *hdev) 1033 { 1034 struct link_key *key, *tmp; 1035 1036 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { 1037 list_del_rcu(&key->list); 1038 kfree_rcu(key, rcu); 1039 } 1040 } 1041 1042 void hci_smp_ltks_clear(struct hci_dev *hdev) 1043 { 1044 struct smp_ltk *k, *tmp; 1045 1046 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { 1047 list_del_rcu(&k->list); 1048 kfree_rcu(k, rcu); 1049 } 1050 } 1051 1052 void hci_smp_irks_clear(struct hci_dev *hdev) 1053 { 1054 struct smp_irk *k, *tmp; 1055 1056 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { 1057 list_del_rcu(&k->list); 1058 kfree_rcu(k, rcu); 1059 } 1060 } 1061 1062 void hci_blocked_keys_clear(struct hci_dev *hdev) 1063 { 1064 struct blocked_key *b, *tmp; 1065 1066 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { 1067 list_del_rcu(&b->list); 1068 kfree_rcu(b, rcu); 1069 } 1070 } 1071 1072 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) 1073 { 1074 bool blocked = false; 1075 struct blocked_key *b; 1076 1077 rcu_read_lock(); 1078 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { 1079 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { 1080 blocked = true; 1081 break; 1082 } 1083 } 1084 1085 rcu_read_unlock(); 1086 return blocked; 1087 } 1088 1089 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1090 { 1091 struct link_key *k; 1092 1093 rcu_read_lock(); 1094 list_for_each_entry_rcu(k, &hdev->link_keys, list) { 1095 if (bacmp(bdaddr, &k->bdaddr) == 0) { 1096 rcu_read_unlock(); 1097 1098 if (hci_is_blocked_key(hdev, 1099 HCI_BLOCKED_KEY_TYPE_LINKKEY, 1100 k->val)) { 1101 bt_dev_warn_ratelimited(hdev, 1102 "Link key blocked for %pMR", 1103 &k->bdaddr); 1104 return NULL; 1105 } 1106 1107 return k; 1108 } 1109 } 1110 rcu_read_unlock(); 1111 1112 return NULL; 1113 } 1114 1115 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1116 u8 key_type, u8 old_key_type) 1117 { 1118 /* Legacy key */ 1119 if (key_type < 0x03) 1120 return true; 1121 1122 /* Debug keys are insecure so don't store them persistently */ 1123 if (key_type == HCI_LK_DEBUG_COMBINATION) 1124 return false; 1125 1126 /* Changed combination key and there's no previous one */ 1127 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1128 return false; 1129 1130 /* Security mode 3 case */ 1131 if (!conn) 1132 return true; 1133 1134 /* BR/EDR key derived using SC from an LE link */ 1135 if (conn->type == LE_LINK) 1136 return true; 1137 1138 /* Neither local nor remote side had no-bonding as requirement */ 1139 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1140 return true; 1141 1142 /* Local side had dedicated bonding as requirement */ 1143 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1144 return true; 1145 1146 /* Remote side had dedicated bonding as requirement */ 1147 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1148 return true; 1149 1150 /* If none of the above criteria match, then don't store the key 1151 * persistently */ 1152 return false; 1153 } 1154 1155 static u8 ltk_role(u8 type) 1156 { 1157 if (type == SMP_LTK) 1158 return HCI_ROLE_MASTER; 1159 1160 return HCI_ROLE_SLAVE; 1161 } 1162 1163 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1164 u8 addr_type, u8 role) 1165 { 1166 struct smp_ltk *k; 1167 1168 rcu_read_lock(); 1169 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1170 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) 1171 continue; 1172 1173 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { 1174 rcu_read_unlock(); 1175 1176 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, 1177 k->val)) { 1178 bt_dev_warn_ratelimited(hdev, 1179 "LTK blocked for %pMR", 1180 &k->bdaddr); 1181 return NULL; 1182 } 1183 1184 return k; 1185 } 1186 } 1187 rcu_read_unlock(); 1188 1189 return NULL; 1190 } 1191 1192 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) 1193 { 1194 struct smp_irk *irk_to_return = NULL; 1195 struct smp_irk *irk; 1196 1197 rcu_read_lock(); 1198 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1199 if (!bacmp(&irk->rpa, rpa)) { 1200 irk_to_return = irk; 1201 goto done; 1202 } 1203 } 1204 1205 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1206 if (smp_irk_matches(hdev, irk->val, rpa)) { 1207 bacpy(&irk->rpa, rpa); 1208 irk_to_return = irk; 1209 goto done; 1210 } 1211 } 1212 1213 done: 1214 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 1215 irk_to_return->val)) { 1216 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 1217 &irk_to_return->bdaddr); 1218 irk_to_return = NULL; 1219 } 1220 1221 rcu_read_unlock(); 1222 1223 return irk_to_return; 1224 } 1225 1226 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1227 u8 addr_type) 1228 { 1229 struct smp_irk *irk_to_return = NULL; 1230 struct smp_irk *irk; 1231 1232 /* Identity Address must be public or static random */ 1233 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) 1234 return NULL; 1235 1236 rcu_read_lock(); 1237 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1238 if (addr_type == irk->addr_type && 1239 bacmp(bdaddr, &irk->bdaddr) == 0) { 1240 irk_to_return = irk; 1241 goto done; 1242 } 1243 } 1244 1245 done: 1246 1247 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 1248 irk_to_return->val)) { 1249 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 1250 &irk_to_return->bdaddr); 1251 irk_to_return = NULL; 1252 } 1253 1254 rcu_read_unlock(); 1255 1256 return irk_to_return; 1257 } 1258 1259 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, 1260 bdaddr_t *bdaddr, u8 *val, u8 type, 1261 u8 pin_len, bool *persistent) 1262 { 1263 struct link_key *key, *old_key; 1264 u8 old_key_type; 1265 1266 old_key = hci_find_link_key(hdev, bdaddr); 1267 if (old_key) { 1268 old_key_type = old_key->type; 1269 key = old_key; 1270 } else { 1271 old_key_type = conn ? conn->key_type : 0xff; 1272 key = kzalloc(sizeof(*key), GFP_KERNEL); 1273 if (!key) 1274 return NULL; 1275 list_add_rcu(&key->list, &hdev->link_keys); 1276 } 1277 1278 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); 1279 1280 /* Some buggy controller combinations generate a changed 1281 * combination key for legacy pairing even when there's no 1282 * previous key */ 1283 if (type == HCI_LK_CHANGED_COMBINATION && 1284 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { 1285 type = HCI_LK_COMBINATION; 1286 if (conn) 1287 conn->key_type = type; 1288 } 1289 1290 bacpy(&key->bdaddr, bdaddr); 1291 memcpy(key->val, val, HCI_LINK_KEY_SIZE); 1292 key->pin_len = pin_len; 1293 1294 if (type == HCI_LK_CHANGED_COMBINATION) 1295 key->type = old_key_type; 1296 else 1297 key->type = type; 1298 1299 if (persistent) 1300 *persistent = hci_persistent_key(hdev, conn, type, 1301 old_key_type); 1302 1303 return key; 1304 } 1305 1306 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1307 u8 addr_type, u8 type, u8 authenticated, 1308 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) 1309 { 1310 struct smp_ltk *key, *old_key; 1311 u8 role = ltk_role(type); 1312 1313 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); 1314 if (old_key) 1315 key = old_key; 1316 else { 1317 key = kzalloc(sizeof(*key), GFP_KERNEL); 1318 if (!key) 1319 return NULL; 1320 list_add_rcu(&key->list, &hdev->long_term_keys); 1321 } 1322 1323 bacpy(&key->bdaddr, bdaddr); 1324 key->bdaddr_type = addr_type; 1325 memcpy(key->val, tk, sizeof(key->val)); 1326 key->authenticated = authenticated; 1327 key->ediv = ediv; 1328 key->rand = rand; 1329 key->enc_size = enc_size; 1330 key->type = type; 1331 1332 return key; 1333 } 1334 1335 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1336 u8 addr_type, u8 val[16], bdaddr_t *rpa) 1337 { 1338 struct smp_irk *irk; 1339 1340 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); 1341 if (!irk) { 1342 irk = kzalloc(sizeof(*irk), GFP_KERNEL); 1343 if (!irk) 1344 return NULL; 1345 1346 bacpy(&irk->bdaddr, bdaddr); 1347 irk->addr_type = addr_type; 1348 1349 list_add_rcu(&irk->list, &hdev->identity_resolving_keys); 1350 } 1351 1352 memcpy(irk->val, val, 16); 1353 bacpy(&irk->rpa, rpa); 1354 1355 return irk; 1356 } 1357 1358 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1359 { 1360 struct link_key *key; 1361 1362 key = hci_find_link_key(hdev, bdaddr); 1363 if (!key) 1364 return -ENOENT; 1365 1366 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1367 1368 list_del_rcu(&key->list); 1369 kfree_rcu(key, rcu); 1370 1371 return 0; 1372 } 1373 1374 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) 1375 { 1376 struct smp_ltk *k, *tmp; 1377 int removed = 0; 1378 1379 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { 1380 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) 1381 continue; 1382 1383 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1384 1385 list_del_rcu(&k->list); 1386 kfree_rcu(k, rcu); 1387 removed++; 1388 } 1389 1390 return removed ? 0 : -ENOENT; 1391 } 1392 1393 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) 1394 { 1395 struct smp_irk *k, *tmp; 1396 1397 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { 1398 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) 1399 continue; 1400 1401 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1402 1403 list_del_rcu(&k->list); 1404 kfree_rcu(k, rcu); 1405 } 1406 } 1407 1408 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 1409 { 1410 struct smp_ltk *k; 1411 struct smp_irk *irk; 1412 u8 addr_type; 1413 1414 if (type == BDADDR_BREDR) { 1415 if (hci_find_link_key(hdev, bdaddr)) 1416 return true; 1417 return false; 1418 } 1419 1420 /* Convert to HCI addr type which struct smp_ltk uses */ 1421 if (type == BDADDR_LE_PUBLIC) 1422 addr_type = ADDR_LE_DEV_PUBLIC; 1423 else 1424 addr_type = ADDR_LE_DEV_RANDOM; 1425 1426 irk = hci_get_irk(hdev, bdaddr, addr_type); 1427 if (irk) { 1428 bdaddr = &irk->bdaddr; 1429 addr_type = irk->addr_type; 1430 } 1431 1432 rcu_read_lock(); 1433 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1434 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { 1435 rcu_read_unlock(); 1436 return true; 1437 } 1438 } 1439 rcu_read_unlock(); 1440 1441 return false; 1442 } 1443 1444 /* HCI command timer function */ 1445 static void hci_cmd_timeout(struct work_struct *work) 1446 { 1447 struct hci_dev *hdev = container_of(work, struct hci_dev, 1448 cmd_timer.work); 1449 1450 if (hdev->req_skb) { 1451 u16 opcode = hci_skb_opcode(hdev->req_skb); 1452 1453 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); 1454 1455 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT); 1456 } else { 1457 bt_dev_err(hdev, "command tx timeout"); 1458 } 1459 1460 if (hdev->cmd_timeout) 1461 hdev->cmd_timeout(hdev); 1462 1463 atomic_set(&hdev->cmd_cnt, 1); 1464 queue_work(hdev->workqueue, &hdev->cmd_work); 1465 } 1466 1467 /* HCI ncmd timer function */ 1468 static void hci_ncmd_timeout(struct work_struct *work) 1469 { 1470 struct hci_dev *hdev = container_of(work, struct hci_dev, 1471 ncmd_timer.work); 1472 1473 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); 1474 1475 /* During HCI_INIT phase no events can be injected if the ncmd timer 1476 * triggers since the procedure has its own timeout handling. 1477 */ 1478 if (test_bit(HCI_INIT, &hdev->flags)) 1479 return; 1480 1481 /* This is an irrecoverable state, inject hardware error event */ 1482 hci_reset_dev(hdev); 1483 } 1484 1485 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1486 bdaddr_t *bdaddr, u8 bdaddr_type) 1487 { 1488 struct oob_data *data; 1489 1490 list_for_each_entry(data, &hdev->remote_oob_data, list) { 1491 if (bacmp(bdaddr, &data->bdaddr) != 0) 1492 continue; 1493 if (data->bdaddr_type != bdaddr_type) 1494 continue; 1495 return data; 1496 } 1497 1498 return NULL; 1499 } 1500 1501 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1502 u8 bdaddr_type) 1503 { 1504 struct oob_data *data; 1505 1506 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 1507 if (!data) 1508 return -ENOENT; 1509 1510 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); 1511 1512 list_del(&data->list); 1513 kfree(data); 1514 1515 return 0; 1516 } 1517 1518 void hci_remote_oob_data_clear(struct hci_dev *hdev) 1519 { 1520 struct oob_data *data, *n; 1521 1522 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { 1523 list_del(&data->list); 1524 kfree(data); 1525 } 1526 } 1527 1528 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1529 u8 bdaddr_type, u8 *hash192, u8 *rand192, 1530 u8 *hash256, u8 *rand256) 1531 { 1532 struct oob_data *data; 1533 1534 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 1535 if (!data) { 1536 data = kmalloc(sizeof(*data), GFP_KERNEL); 1537 if (!data) 1538 return -ENOMEM; 1539 1540 bacpy(&data->bdaddr, bdaddr); 1541 data->bdaddr_type = bdaddr_type; 1542 list_add(&data->list, &hdev->remote_oob_data); 1543 } 1544 1545 if (hash192 && rand192) { 1546 memcpy(data->hash192, hash192, sizeof(data->hash192)); 1547 memcpy(data->rand192, rand192, sizeof(data->rand192)); 1548 if (hash256 && rand256) 1549 data->present = 0x03; 1550 } else { 1551 memset(data->hash192, 0, sizeof(data->hash192)); 1552 memset(data->rand192, 0, sizeof(data->rand192)); 1553 if (hash256 && rand256) 1554 data->present = 0x02; 1555 else 1556 data->present = 0x00; 1557 } 1558 1559 if (hash256 && rand256) { 1560 memcpy(data->hash256, hash256, sizeof(data->hash256)); 1561 memcpy(data->rand256, rand256, sizeof(data->rand256)); 1562 } else { 1563 memset(data->hash256, 0, sizeof(data->hash256)); 1564 memset(data->rand256, 0, sizeof(data->rand256)); 1565 if (hash192 && rand192) 1566 data->present = 0x01; 1567 } 1568 1569 BT_DBG("%s for %pMR", hdev->name, bdaddr); 1570 1571 return 0; 1572 } 1573 1574 /* This function requires the caller holds hdev->lock */ 1575 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) 1576 { 1577 struct adv_info *adv_instance; 1578 1579 list_for_each_entry(adv_instance, &hdev->adv_instances, list) { 1580 if (adv_instance->instance == instance) 1581 return adv_instance; 1582 } 1583 1584 return NULL; 1585 } 1586 1587 /* This function requires the caller holds hdev->lock */ 1588 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) 1589 { 1590 struct adv_info *cur_instance; 1591 1592 cur_instance = hci_find_adv_instance(hdev, instance); 1593 if (!cur_instance) 1594 return NULL; 1595 1596 if (cur_instance == list_last_entry(&hdev->adv_instances, 1597 struct adv_info, list)) 1598 return list_first_entry(&hdev->adv_instances, 1599 struct adv_info, list); 1600 else 1601 return list_next_entry(cur_instance, list); 1602 } 1603 1604 /* This function requires the caller holds hdev->lock */ 1605 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) 1606 { 1607 struct adv_info *adv_instance; 1608 1609 adv_instance = hci_find_adv_instance(hdev, instance); 1610 if (!adv_instance) 1611 return -ENOENT; 1612 1613 BT_DBG("%s removing %dMR", hdev->name, instance); 1614 1615 if (hdev->cur_adv_instance == instance) { 1616 if (hdev->adv_instance_timeout) { 1617 cancel_delayed_work(&hdev->adv_instance_expire); 1618 hdev->adv_instance_timeout = 0; 1619 } 1620 hdev->cur_adv_instance = 0x00; 1621 } 1622 1623 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1624 1625 list_del(&adv_instance->list); 1626 kfree(adv_instance); 1627 1628 hdev->adv_instance_cnt--; 1629 1630 return 0; 1631 } 1632 1633 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) 1634 { 1635 struct adv_info *adv_instance, *n; 1636 1637 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) 1638 adv_instance->rpa_expired = rpa_expired; 1639 } 1640 1641 /* This function requires the caller holds hdev->lock */ 1642 void hci_adv_instances_clear(struct hci_dev *hdev) 1643 { 1644 struct adv_info *adv_instance, *n; 1645 1646 if (hdev->adv_instance_timeout) { 1647 cancel_delayed_work(&hdev->adv_instance_expire); 1648 hdev->adv_instance_timeout = 0; 1649 } 1650 1651 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { 1652 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1653 list_del(&adv_instance->list); 1654 kfree(adv_instance); 1655 } 1656 1657 hdev->adv_instance_cnt = 0; 1658 hdev->cur_adv_instance = 0x00; 1659 } 1660 1661 static void adv_instance_rpa_expired(struct work_struct *work) 1662 { 1663 struct adv_info *adv_instance = container_of(work, struct adv_info, 1664 rpa_expired_cb.work); 1665 1666 BT_DBG(""); 1667 1668 adv_instance->rpa_expired = true; 1669 } 1670 1671 /* This function requires the caller holds hdev->lock */ 1672 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, 1673 u32 flags, u16 adv_data_len, u8 *adv_data, 1674 u16 scan_rsp_len, u8 *scan_rsp_data, 1675 u16 timeout, u16 duration, s8 tx_power, 1676 u32 min_interval, u32 max_interval, 1677 u8 mesh_handle) 1678 { 1679 struct adv_info *adv; 1680 1681 adv = hci_find_adv_instance(hdev, instance); 1682 if (adv) { 1683 memset(adv->adv_data, 0, sizeof(adv->adv_data)); 1684 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); 1685 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); 1686 } else { 1687 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || 1688 instance < 1 || instance > hdev->le_num_of_adv_sets + 1) 1689 return ERR_PTR(-EOVERFLOW); 1690 1691 adv = kzalloc(sizeof(*adv), GFP_KERNEL); 1692 if (!adv) 1693 return ERR_PTR(-ENOMEM); 1694 1695 adv->pending = true; 1696 adv->instance = instance; 1697 1698 /* If controller support only one set and the instance is set to 1699 * 1 then there is no option other than using handle 0x00. 1700 */ 1701 if (hdev->le_num_of_adv_sets == 1 && instance == 1) 1702 adv->handle = 0x00; 1703 else 1704 adv->handle = instance; 1705 1706 list_add(&adv->list, &hdev->adv_instances); 1707 hdev->adv_instance_cnt++; 1708 } 1709 1710 adv->flags = flags; 1711 adv->min_interval = min_interval; 1712 adv->max_interval = max_interval; 1713 adv->tx_power = tx_power; 1714 /* Defining a mesh_handle changes the timing units to ms, 1715 * rather than seconds, and ties the instance to the requested 1716 * mesh_tx queue. 1717 */ 1718 adv->mesh = mesh_handle; 1719 1720 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, 1721 scan_rsp_len, scan_rsp_data); 1722 1723 adv->timeout = timeout; 1724 adv->remaining_time = timeout; 1725 1726 if (duration == 0) 1727 adv->duration = hdev->def_multi_adv_rotation_duration; 1728 else 1729 adv->duration = duration; 1730 1731 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); 1732 1733 BT_DBG("%s for %dMR", hdev->name, instance); 1734 1735 return adv; 1736 } 1737 1738 /* This function requires the caller holds hdev->lock */ 1739 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, 1740 u32 flags, u8 data_len, u8 *data, 1741 u32 min_interval, u32 max_interval) 1742 { 1743 struct adv_info *adv; 1744 1745 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, 1746 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, 1747 min_interval, max_interval, 0); 1748 if (IS_ERR(adv)) 1749 return adv; 1750 1751 adv->periodic = true; 1752 adv->per_adv_data_len = data_len; 1753 1754 if (data) 1755 memcpy(adv->per_adv_data, data, data_len); 1756 1757 return adv; 1758 } 1759 1760 /* This function requires the caller holds hdev->lock */ 1761 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, 1762 u16 adv_data_len, u8 *adv_data, 1763 u16 scan_rsp_len, u8 *scan_rsp_data) 1764 { 1765 struct adv_info *adv; 1766 1767 adv = hci_find_adv_instance(hdev, instance); 1768 1769 /* If advertisement doesn't exist, we can't modify its data */ 1770 if (!adv) 1771 return -ENOENT; 1772 1773 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { 1774 memset(adv->adv_data, 0, sizeof(adv->adv_data)); 1775 memcpy(adv->adv_data, adv_data, adv_data_len); 1776 adv->adv_data_len = adv_data_len; 1777 adv->adv_data_changed = true; 1778 } 1779 1780 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { 1781 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); 1782 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); 1783 adv->scan_rsp_len = scan_rsp_len; 1784 adv->scan_rsp_changed = true; 1785 } 1786 1787 /* Mark as changed if there are flags which would affect it */ 1788 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || 1789 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 1790 adv->scan_rsp_changed = true; 1791 1792 return 0; 1793 } 1794 1795 /* This function requires the caller holds hdev->lock */ 1796 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) 1797 { 1798 u32 flags; 1799 struct adv_info *adv; 1800 1801 if (instance == 0x00) { 1802 /* Instance 0 always manages the "Tx Power" and "Flags" 1803 * fields 1804 */ 1805 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; 1806 1807 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting 1808 * corresponds to the "connectable" instance flag. 1809 */ 1810 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) 1811 flags |= MGMT_ADV_FLAG_CONNECTABLE; 1812 1813 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 1814 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; 1815 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 1816 flags |= MGMT_ADV_FLAG_DISCOV; 1817 1818 return flags; 1819 } 1820 1821 adv = hci_find_adv_instance(hdev, instance); 1822 1823 /* Return 0 when we got an invalid instance identifier. */ 1824 if (!adv) 1825 return 0; 1826 1827 return adv->flags; 1828 } 1829 1830 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) 1831 { 1832 struct adv_info *adv; 1833 1834 /* Instance 0x00 always set local name */ 1835 if (instance == 0x00) 1836 return true; 1837 1838 adv = hci_find_adv_instance(hdev, instance); 1839 if (!adv) 1840 return false; 1841 1842 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || 1843 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 1844 return true; 1845 1846 return adv->scan_rsp_len ? true : false; 1847 } 1848 1849 /* This function requires the caller holds hdev->lock */ 1850 void hci_adv_monitors_clear(struct hci_dev *hdev) 1851 { 1852 struct adv_monitor *monitor; 1853 int handle; 1854 1855 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) 1856 hci_free_adv_monitor(hdev, monitor); 1857 1858 idr_destroy(&hdev->adv_monitors_idr); 1859 } 1860 1861 /* Frees the monitor structure and do some bookkeepings. 1862 * This function requires the caller holds hdev->lock. 1863 */ 1864 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 1865 { 1866 struct adv_pattern *pattern; 1867 struct adv_pattern *tmp; 1868 1869 if (!monitor) 1870 return; 1871 1872 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { 1873 list_del(&pattern->list); 1874 kfree(pattern); 1875 } 1876 1877 if (monitor->handle) 1878 idr_remove(&hdev->adv_monitors_idr, monitor->handle); 1879 1880 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { 1881 hdev->adv_monitors_cnt--; 1882 mgmt_adv_monitor_removed(hdev, monitor->handle); 1883 } 1884 1885 kfree(monitor); 1886 } 1887 1888 /* Assigns handle to a monitor, and if offloading is supported and power is on, 1889 * also attempts to forward the request to the controller. 1890 * This function requires the caller holds hci_req_sync_lock. 1891 */ 1892 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 1893 { 1894 int min, max, handle; 1895 int status = 0; 1896 1897 if (!monitor) 1898 return -EINVAL; 1899 1900 hci_dev_lock(hdev); 1901 1902 min = HCI_MIN_ADV_MONITOR_HANDLE; 1903 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; 1904 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, 1905 GFP_KERNEL); 1906 1907 hci_dev_unlock(hdev); 1908 1909 if (handle < 0) 1910 return handle; 1911 1912 monitor->handle = handle; 1913 1914 if (!hdev_is_powered(hdev)) 1915 return status; 1916 1917 switch (hci_get_adv_monitor_offload_ext(hdev)) { 1918 case HCI_ADV_MONITOR_EXT_NONE: 1919 bt_dev_dbg(hdev, "add monitor %d status %d", 1920 monitor->handle, status); 1921 /* Message was not forwarded to controller - not an error */ 1922 break; 1923 1924 case HCI_ADV_MONITOR_EXT_MSFT: 1925 status = msft_add_monitor_pattern(hdev, monitor); 1926 bt_dev_dbg(hdev, "add monitor %d msft status %d", 1927 handle, status); 1928 break; 1929 } 1930 1931 return status; 1932 } 1933 1934 /* Attempts to tell the controller and free the monitor. If somehow the 1935 * controller doesn't have a corresponding handle, remove anyway. 1936 * This function requires the caller holds hci_req_sync_lock. 1937 */ 1938 static int hci_remove_adv_monitor(struct hci_dev *hdev, 1939 struct adv_monitor *monitor) 1940 { 1941 int status = 0; 1942 int handle; 1943 1944 switch (hci_get_adv_monitor_offload_ext(hdev)) { 1945 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ 1946 bt_dev_dbg(hdev, "remove monitor %d status %d", 1947 monitor->handle, status); 1948 goto free_monitor; 1949 1950 case HCI_ADV_MONITOR_EXT_MSFT: 1951 handle = monitor->handle; 1952 status = msft_remove_monitor(hdev, monitor); 1953 bt_dev_dbg(hdev, "remove monitor %d msft status %d", 1954 handle, status); 1955 break; 1956 } 1957 1958 /* In case no matching handle registered, just free the monitor */ 1959 if (status == -ENOENT) 1960 goto free_monitor; 1961 1962 return status; 1963 1964 free_monitor: 1965 if (status == -ENOENT) 1966 bt_dev_warn(hdev, "Removing monitor with no matching handle %d", 1967 monitor->handle); 1968 hci_free_adv_monitor(hdev, monitor); 1969 1970 return status; 1971 } 1972 1973 /* This function requires the caller holds hci_req_sync_lock */ 1974 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) 1975 { 1976 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); 1977 1978 if (!monitor) 1979 return -EINVAL; 1980 1981 return hci_remove_adv_monitor(hdev, monitor); 1982 } 1983 1984 /* This function requires the caller holds hci_req_sync_lock */ 1985 int hci_remove_all_adv_monitor(struct hci_dev *hdev) 1986 { 1987 struct adv_monitor *monitor; 1988 int idr_next_id = 0; 1989 int status = 0; 1990 1991 while (1) { 1992 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); 1993 if (!monitor) 1994 break; 1995 1996 status = hci_remove_adv_monitor(hdev, monitor); 1997 if (status) 1998 return status; 1999 2000 idr_next_id++; 2001 } 2002 2003 return status; 2004 } 2005 2006 /* This function requires the caller holds hdev->lock */ 2007 bool hci_is_adv_monitoring(struct hci_dev *hdev) 2008 { 2009 return !idr_is_empty(&hdev->adv_monitors_idr); 2010 } 2011 2012 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) 2013 { 2014 if (msft_monitor_supported(hdev)) 2015 return HCI_ADV_MONITOR_EXT_MSFT; 2016 2017 return HCI_ADV_MONITOR_EXT_NONE; 2018 } 2019 2020 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, 2021 bdaddr_t *bdaddr, u8 type) 2022 { 2023 struct bdaddr_list *b; 2024 2025 list_for_each_entry(b, bdaddr_list, list) { 2026 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2027 return b; 2028 } 2029 2030 return NULL; 2031 } 2032 2033 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( 2034 struct list_head *bdaddr_list, bdaddr_t *bdaddr, 2035 u8 type) 2036 { 2037 struct bdaddr_list_with_irk *b; 2038 2039 list_for_each_entry(b, bdaddr_list, list) { 2040 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2041 return b; 2042 } 2043 2044 return NULL; 2045 } 2046 2047 struct bdaddr_list_with_flags * 2048 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, 2049 bdaddr_t *bdaddr, u8 type) 2050 { 2051 struct bdaddr_list_with_flags *b; 2052 2053 list_for_each_entry(b, bdaddr_list, list) { 2054 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2055 return b; 2056 } 2057 2058 return NULL; 2059 } 2060 2061 void hci_bdaddr_list_clear(struct list_head *bdaddr_list) 2062 { 2063 struct bdaddr_list *b, *n; 2064 2065 list_for_each_entry_safe(b, n, bdaddr_list, list) { 2066 list_del(&b->list); 2067 kfree(b); 2068 } 2069 } 2070 2071 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2072 { 2073 struct bdaddr_list *entry; 2074 2075 if (!bacmp(bdaddr, BDADDR_ANY)) 2076 return -EBADF; 2077 2078 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2079 return -EEXIST; 2080 2081 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2082 if (!entry) 2083 return -ENOMEM; 2084 2085 bacpy(&entry->bdaddr, bdaddr); 2086 entry->bdaddr_type = type; 2087 2088 list_add(&entry->list, list); 2089 2090 return 0; 2091 } 2092 2093 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, 2094 u8 type, u8 *peer_irk, u8 *local_irk) 2095 { 2096 struct bdaddr_list_with_irk *entry; 2097 2098 if (!bacmp(bdaddr, BDADDR_ANY)) 2099 return -EBADF; 2100 2101 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2102 return -EEXIST; 2103 2104 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2105 if (!entry) 2106 return -ENOMEM; 2107 2108 bacpy(&entry->bdaddr, bdaddr); 2109 entry->bdaddr_type = type; 2110 2111 if (peer_irk) 2112 memcpy(entry->peer_irk, peer_irk, 16); 2113 2114 if (local_irk) 2115 memcpy(entry->local_irk, local_irk, 16); 2116 2117 list_add(&entry->list, list); 2118 2119 return 0; 2120 } 2121 2122 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, 2123 u8 type, u32 flags) 2124 { 2125 struct bdaddr_list_with_flags *entry; 2126 2127 if (!bacmp(bdaddr, BDADDR_ANY)) 2128 return -EBADF; 2129 2130 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2131 return -EEXIST; 2132 2133 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2134 if (!entry) 2135 return -ENOMEM; 2136 2137 bacpy(&entry->bdaddr, bdaddr); 2138 entry->bdaddr_type = type; 2139 entry->flags = flags; 2140 2141 list_add(&entry->list, list); 2142 2143 return 0; 2144 } 2145 2146 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2147 { 2148 struct bdaddr_list *entry; 2149 2150 if (!bacmp(bdaddr, BDADDR_ANY)) { 2151 hci_bdaddr_list_clear(list); 2152 return 0; 2153 } 2154 2155 entry = hci_bdaddr_list_lookup(list, bdaddr, type); 2156 if (!entry) 2157 return -ENOENT; 2158 2159 list_del(&entry->list); 2160 kfree(entry); 2161 2162 return 0; 2163 } 2164 2165 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, 2166 u8 type) 2167 { 2168 struct bdaddr_list_with_irk *entry; 2169 2170 if (!bacmp(bdaddr, BDADDR_ANY)) { 2171 hci_bdaddr_list_clear(list); 2172 return 0; 2173 } 2174 2175 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); 2176 if (!entry) 2177 return -ENOENT; 2178 2179 list_del(&entry->list); 2180 kfree(entry); 2181 2182 return 0; 2183 } 2184 2185 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, 2186 u8 type) 2187 { 2188 struct bdaddr_list_with_flags *entry; 2189 2190 if (!bacmp(bdaddr, BDADDR_ANY)) { 2191 hci_bdaddr_list_clear(list); 2192 return 0; 2193 } 2194 2195 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); 2196 if (!entry) 2197 return -ENOENT; 2198 2199 list_del(&entry->list); 2200 kfree(entry); 2201 2202 return 0; 2203 } 2204 2205 /* This function requires the caller holds hdev->lock */ 2206 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, 2207 bdaddr_t *addr, u8 addr_type) 2208 { 2209 struct hci_conn_params *params; 2210 2211 list_for_each_entry(params, &hdev->le_conn_params, list) { 2212 if (bacmp(¶ms->addr, addr) == 0 && 2213 params->addr_type == addr_type) { 2214 return params; 2215 } 2216 } 2217 2218 return NULL; 2219 } 2220 2221 /* This function requires the caller holds hdev->lock or rcu_read_lock */ 2222 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 2223 bdaddr_t *addr, u8 addr_type) 2224 { 2225 struct hci_conn_params *param; 2226 2227 rcu_read_lock(); 2228 2229 list_for_each_entry_rcu(param, list, action) { 2230 if (bacmp(¶m->addr, addr) == 0 && 2231 param->addr_type == addr_type) { 2232 rcu_read_unlock(); 2233 return param; 2234 } 2235 } 2236 2237 rcu_read_unlock(); 2238 2239 return NULL; 2240 } 2241 2242 /* This function requires the caller holds hdev->lock */ 2243 void hci_pend_le_list_del_init(struct hci_conn_params *param) 2244 { 2245 if (list_empty(¶m->action)) 2246 return; 2247 2248 list_del_rcu(¶m->action); 2249 synchronize_rcu(); 2250 INIT_LIST_HEAD(¶m->action); 2251 } 2252 2253 /* This function requires the caller holds hdev->lock */ 2254 void hci_pend_le_list_add(struct hci_conn_params *param, 2255 struct list_head *list) 2256 { 2257 list_add_rcu(¶m->action, list); 2258 } 2259 2260 /* This function requires the caller holds hdev->lock */ 2261 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, 2262 bdaddr_t *addr, u8 addr_type) 2263 { 2264 struct hci_conn_params *params; 2265 2266 params = hci_conn_params_lookup(hdev, addr, addr_type); 2267 if (params) 2268 return params; 2269 2270 params = kzalloc(sizeof(*params), GFP_KERNEL); 2271 if (!params) { 2272 bt_dev_err(hdev, "out of memory"); 2273 return NULL; 2274 } 2275 2276 bacpy(¶ms->addr, addr); 2277 params->addr_type = addr_type; 2278 2279 list_add(¶ms->list, &hdev->le_conn_params); 2280 INIT_LIST_HEAD(¶ms->action); 2281 2282 params->conn_min_interval = hdev->le_conn_min_interval; 2283 params->conn_max_interval = hdev->le_conn_max_interval; 2284 params->conn_latency = hdev->le_conn_latency; 2285 params->supervision_timeout = hdev->le_supv_timeout; 2286 params->auto_connect = HCI_AUTO_CONN_DISABLED; 2287 2288 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2289 2290 return params; 2291 } 2292 2293 void hci_conn_params_free(struct hci_conn_params *params) 2294 { 2295 hci_pend_le_list_del_init(params); 2296 2297 if (params->conn) { 2298 hci_conn_drop(params->conn); 2299 hci_conn_put(params->conn); 2300 } 2301 2302 list_del(¶ms->list); 2303 kfree(params); 2304 } 2305 2306 /* This function requires the caller holds hdev->lock */ 2307 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) 2308 { 2309 struct hci_conn_params *params; 2310 2311 params = hci_conn_params_lookup(hdev, addr, addr_type); 2312 if (!params) 2313 return; 2314 2315 hci_conn_params_free(params); 2316 2317 hci_update_passive_scan(hdev); 2318 2319 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2320 } 2321 2322 /* This function requires the caller holds hdev->lock */ 2323 void hci_conn_params_clear_disabled(struct hci_dev *hdev) 2324 { 2325 struct hci_conn_params *params, *tmp; 2326 2327 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { 2328 if (params->auto_connect != HCI_AUTO_CONN_DISABLED) 2329 continue; 2330 2331 /* If trying to establish one time connection to disabled 2332 * device, leave the params, but mark them as just once. 2333 */ 2334 if (params->explicit_connect) { 2335 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; 2336 continue; 2337 } 2338 2339 hci_conn_params_free(params); 2340 } 2341 2342 BT_DBG("All LE disabled connection parameters were removed"); 2343 } 2344 2345 /* This function requires the caller holds hdev->lock */ 2346 static void hci_conn_params_clear_all(struct hci_dev *hdev) 2347 { 2348 struct hci_conn_params *params, *tmp; 2349 2350 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) 2351 hci_conn_params_free(params); 2352 2353 BT_DBG("All LE connection parameters were removed"); 2354 } 2355 2356 /* Copy the Identity Address of the controller. 2357 * 2358 * If the controller has a public BD_ADDR, then by default use that one. 2359 * If this is a LE only controller without a public address, default to 2360 * the static random address. 2361 * 2362 * For debugging purposes it is possible to force controllers with a 2363 * public address to use the static random address instead. 2364 * 2365 * In case BR/EDR has been disabled on a dual-mode controller and 2366 * userspace has configured a static address, then that address 2367 * becomes the identity address instead of the public BR/EDR address. 2368 */ 2369 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 2370 u8 *bdaddr_type) 2371 { 2372 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 2373 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 2374 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 2375 bacmp(&hdev->static_addr, BDADDR_ANY))) { 2376 bacpy(bdaddr, &hdev->static_addr); 2377 *bdaddr_type = ADDR_LE_DEV_RANDOM; 2378 } else { 2379 bacpy(bdaddr, &hdev->bdaddr); 2380 *bdaddr_type = ADDR_LE_DEV_PUBLIC; 2381 } 2382 } 2383 2384 static void hci_clear_wake_reason(struct hci_dev *hdev) 2385 { 2386 hci_dev_lock(hdev); 2387 2388 hdev->wake_reason = 0; 2389 bacpy(&hdev->wake_addr, BDADDR_ANY); 2390 hdev->wake_addr_type = 0; 2391 2392 hci_dev_unlock(hdev); 2393 } 2394 2395 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, 2396 void *data) 2397 { 2398 struct hci_dev *hdev = 2399 container_of(nb, struct hci_dev, suspend_notifier); 2400 int ret = 0; 2401 2402 /* Userspace has full control of this device. Do nothing. */ 2403 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 2404 return NOTIFY_DONE; 2405 2406 /* To avoid a potential race with hci_unregister_dev. */ 2407 hci_dev_hold(hdev); 2408 2409 if (action == PM_SUSPEND_PREPARE) 2410 ret = hci_suspend_dev(hdev); 2411 else if (action == PM_POST_SUSPEND) 2412 ret = hci_resume_dev(hdev); 2413 2414 if (ret) 2415 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", 2416 action, ret); 2417 2418 hci_dev_put(hdev); 2419 return NOTIFY_DONE; 2420 } 2421 2422 /* Alloc HCI device */ 2423 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) 2424 { 2425 struct hci_dev *hdev; 2426 unsigned int alloc_size; 2427 2428 alloc_size = sizeof(*hdev); 2429 if (sizeof_priv) { 2430 /* Fixme: May need ALIGN-ment? */ 2431 alloc_size += sizeof_priv; 2432 } 2433 2434 hdev = kzalloc(alloc_size, GFP_KERNEL); 2435 if (!hdev) 2436 return NULL; 2437 2438 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 2439 hdev->esco_type = (ESCO_HV1); 2440 hdev->link_mode = (HCI_LM_ACCEPT); 2441 hdev->num_iac = 0x01; /* One IAC support is mandatory */ 2442 hdev->io_capability = 0x03; /* No Input No Output */ 2443 hdev->manufacturer = 0xffff; /* Default to internal use */ 2444 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 2445 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 2446 hdev->adv_instance_cnt = 0; 2447 hdev->cur_adv_instance = 0x00; 2448 hdev->adv_instance_timeout = 0; 2449 2450 hdev->advmon_allowlist_duration = 300; 2451 hdev->advmon_no_filter_duration = 500; 2452 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ 2453 2454 hdev->sniff_max_interval = 800; 2455 hdev->sniff_min_interval = 80; 2456 2457 hdev->le_adv_channel_map = 0x07; 2458 hdev->le_adv_min_interval = 0x0800; 2459 hdev->le_adv_max_interval = 0x0800; 2460 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST; 2461 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST; 2462 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1; 2463 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1; 2464 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; 2465 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; 2466 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST; 2467 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST; 2468 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN; 2469 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN; 2470 hdev->le_conn_min_interval = 0x0018; 2471 hdev->le_conn_max_interval = 0x0028; 2472 hdev->le_conn_latency = 0x0000; 2473 hdev->le_supv_timeout = 0x002a; 2474 hdev->le_def_tx_len = 0x001b; 2475 hdev->le_def_tx_time = 0x0148; 2476 hdev->le_max_tx_len = 0x001b; 2477 hdev->le_max_tx_time = 0x0148; 2478 hdev->le_max_rx_len = 0x001b; 2479 hdev->le_max_rx_time = 0x0148; 2480 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; 2481 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; 2482 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; 2483 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; 2484 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; 2485 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; 2486 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT; 2487 hdev->min_le_tx_power = HCI_TX_POWER_INVALID; 2488 hdev->max_le_tx_power = HCI_TX_POWER_INVALID; 2489 2490 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 2491 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 2492 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 2493 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 2494 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 2495 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; 2496 2497 /* default 1.28 sec page scan */ 2498 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; 2499 hdev->def_page_scan_int = 0x0800; 2500 hdev->def_page_scan_window = 0x0012; 2501 2502 mutex_init(&hdev->lock); 2503 mutex_init(&hdev->req_lock); 2504 2505 ida_init(&hdev->unset_handle_ida); 2506 2507 INIT_LIST_HEAD(&hdev->mesh_pending); 2508 INIT_LIST_HEAD(&hdev->mgmt_pending); 2509 INIT_LIST_HEAD(&hdev->reject_list); 2510 INIT_LIST_HEAD(&hdev->accept_list); 2511 INIT_LIST_HEAD(&hdev->uuids); 2512 INIT_LIST_HEAD(&hdev->link_keys); 2513 INIT_LIST_HEAD(&hdev->long_term_keys); 2514 INIT_LIST_HEAD(&hdev->identity_resolving_keys); 2515 INIT_LIST_HEAD(&hdev->remote_oob_data); 2516 INIT_LIST_HEAD(&hdev->le_accept_list); 2517 INIT_LIST_HEAD(&hdev->le_resolv_list); 2518 INIT_LIST_HEAD(&hdev->le_conn_params); 2519 INIT_LIST_HEAD(&hdev->pend_le_conns); 2520 INIT_LIST_HEAD(&hdev->pend_le_reports); 2521 INIT_LIST_HEAD(&hdev->conn_hash.list); 2522 INIT_LIST_HEAD(&hdev->adv_instances); 2523 INIT_LIST_HEAD(&hdev->blocked_keys); 2524 INIT_LIST_HEAD(&hdev->monitored_devices); 2525 2526 INIT_LIST_HEAD(&hdev->local_codecs); 2527 INIT_WORK(&hdev->rx_work, hci_rx_work); 2528 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 2529 INIT_WORK(&hdev->tx_work, hci_tx_work); 2530 INIT_WORK(&hdev->power_on, hci_power_on); 2531 INIT_WORK(&hdev->error_reset, hci_error_reset); 2532 2533 hci_cmd_sync_init(hdev); 2534 2535 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 2536 2537 skb_queue_head_init(&hdev->rx_q); 2538 skb_queue_head_init(&hdev->cmd_q); 2539 skb_queue_head_init(&hdev->raw_q); 2540 2541 init_waitqueue_head(&hdev->req_wait_q); 2542 2543 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); 2544 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); 2545 2546 hci_devcd_setup(hdev); 2547 2548 hci_init_sysfs(hdev); 2549 discovery_init(hdev); 2550 2551 return hdev; 2552 } 2553 EXPORT_SYMBOL(hci_alloc_dev_priv); 2554 2555 /* Free HCI device */ 2556 void hci_free_dev(struct hci_dev *hdev) 2557 { 2558 /* will free via device release */ 2559 put_device(&hdev->dev); 2560 } 2561 EXPORT_SYMBOL(hci_free_dev); 2562 2563 /* Register HCI device */ 2564 int hci_register_dev(struct hci_dev *hdev) 2565 { 2566 int id, error; 2567 2568 if (!hdev->open || !hdev->close || !hdev->send) 2569 return -EINVAL; 2570 2571 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL); 2572 if (id < 0) 2573 return id; 2574 2575 error = dev_set_name(&hdev->dev, "hci%u", id); 2576 if (error) 2577 return error; 2578 2579 hdev->name = dev_name(&hdev->dev); 2580 hdev->id = id; 2581 2582 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2583 2584 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); 2585 if (!hdev->workqueue) { 2586 error = -ENOMEM; 2587 goto err; 2588 } 2589 2590 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, 2591 hdev->name); 2592 if (!hdev->req_workqueue) { 2593 destroy_workqueue(hdev->workqueue); 2594 error = -ENOMEM; 2595 goto err; 2596 } 2597 2598 if (!IS_ERR_OR_NULL(bt_debugfs)) 2599 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); 2600 2601 error = device_add(&hdev->dev); 2602 if (error < 0) 2603 goto err_wqueue; 2604 2605 hci_leds_init(hdev); 2606 2607 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 2608 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, 2609 hdev); 2610 if (hdev->rfkill) { 2611 if (rfkill_register(hdev->rfkill) < 0) { 2612 rfkill_destroy(hdev->rfkill); 2613 hdev->rfkill = NULL; 2614 } 2615 } 2616 2617 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) 2618 hci_dev_set_flag(hdev, HCI_RFKILLED); 2619 2620 hci_dev_set_flag(hdev, HCI_SETUP); 2621 hci_dev_set_flag(hdev, HCI_AUTO_OFF); 2622 2623 /* Assume BR/EDR support until proven otherwise (such as 2624 * through reading supported features during init. 2625 */ 2626 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 2627 2628 write_lock(&hci_dev_list_lock); 2629 list_add(&hdev->list, &hci_dev_list); 2630 write_unlock(&hci_dev_list_lock); 2631 2632 /* Devices that are marked for raw-only usage are unconfigured 2633 * and should not be included in normal operation. 2634 */ 2635 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 2636 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 2637 2638 /* Mark Remote Wakeup connection flag as supported if driver has wakeup 2639 * callback. 2640 */ 2641 if (hdev->wakeup) 2642 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; 2643 2644 hci_sock_dev_event(hdev, HCI_DEV_REG); 2645 hci_dev_hold(hdev); 2646 2647 error = hci_register_suspend_notifier(hdev); 2648 if (error) 2649 BT_WARN("register suspend notifier failed error:%d\n", error); 2650 2651 queue_work(hdev->req_workqueue, &hdev->power_on); 2652 2653 idr_init(&hdev->adv_monitors_idr); 2654 msft_register(hdev); 2655 2656 return id; 2657 2658 err_wqueue: 2659 debugfs_remove_recursive(hdev->debugfs); 2660 destroy_workqueue(hdev->workqueue); 2661 destroy_workqueue(hdev->req_workqueue); 2662 err: 2663 ida_free(&hci_index_ida, hdev->id); 2664 2665 return error; 2666 } 2667 EXPORT_SYMBOL(hci_register_dev); 2668 2669 /* Unregister HCI device */ 2670 void hci_unregister_dev(struct hci_dev *hdev) 2671 { 2672 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2673 2674 mutex_lock(&hdev->unregister_lock); 2675 hci_dev_set_flag(hdev, HCI_UNREGISTER); 2676 mutex_unlock(&hdev->unregister_lock); 2677 2678 write_lock(&hci_dev_list_lock); 2679 list_del(&hdev->list); 2680 write_unlock(&hci_dev_list_lock); 2681 2682 cancel_work_sync(&hdev->rx_work); 2683 cancel_work_sync(&hdev->cmd_work); 2684 cancel_work_sync(&hdev->tx_work); 2685 cancel_work_sync(&hdev->power_on); 2686 cancel_work_sync(&hdev->error_reset); 2687 2688 hci_cmd_sync_clear(hdev); 2689 2690 hci_unregister_suspend_notifier(hdev); 2691 2692 hci_dev_do_close(hdev); 2693 2694 if (!test_bit(HCI_INIT, &hdev->flags) && 2695 !hci_dev_test_flag(hdev, HCI_SETUP) && 2696 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 2697 hci_dev_lock(hdev); 2698 mgmt_index_removed(hdev); 2699 hci_dev_unlock(hdev); 2700 } 2701 2702 /* mgmt_index_removed should take care of emptying the 2703 * pending list */ 2704 BUG_ON(!list_empty(&hdev->mgmt_pending)); 2705 2706 hci_sock_dev_event(hdev, HCI_DEV_UNREG); 2707 2708 if (hdev->rfkill) { 2709 rfkill_unregister(hdev->rfkill); 2710 rfkill_destroy(hdev->rfkill); 2711 } 2712 2713 device_del(&hdev->dev); 2714 /* Actual cleanup is deferred until hci_release_dev(). */ 2715 hci_dev_put(hdev); 2716 } 2717 EXPORT_SYMBOL(hci_unregister_dev); 2718 2719 /* Release HCI device */ 2720 void hci_release_dev(struct hci_dev *hdev) 2721 { 2722 debugfs_remove_recursive(hdev->debugfs); 2723 kfree_const(hdev->hw_info); 2724 kfree_const(hdev->fw_info); 2725 2726 destroy_workqueue(hdev->workqueue); 2727 destroy_workqueue(hdev->req_workqueue); 2728 2729 hci_dev_lock(hdev); 2730 hci_bdaddr_list_clear(&hdev->reject_list); 2731 hci_bdaddr_list_clear(&hdev->accept_list); 2732 hci_uuids_clear(hdev); 2733 hci_link_keys_clear(hdev); 2734 hci_smp_ltks_clear(hdev); 2735 hci_smp_irks_clear(hdev); 2736 hci_remote_oob_data_clear(hdev); 2737 hci_adv_instances_clear(hdev); 2738 hci_adv_monitors_clear(hdev); 2739 hci_bdaddr_list_clear(&hdev->le_accept_list); 2740 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2741 hci_conn_params_clear_all(hdev); 2742 hci_discovery_filter_clear(hdev); 2743 hci_blocked_keys_clear(hdev); 2744 hci_codec_list_clear(&hdev->local_codecs); 2745 msft_release(hdev); 2746 hci_dev_unlock(hdev); 2747 2748 ida_destroy(&hdev->unset_handle_ida); 2749 ida_free(&hci_index_ida, hdev->id); 2750 kfree_skb(hdev->sent_cmd); 2751 kfree_skb(hdev->req_skb); 2752 kfree_skb(hdev->recv_event); 2753 kfree(hdev); 2754 } 2755 EXPORT_SYMBOL(hci_release_dev); 2756 2757 int hci_register_suspend_notifier(struct hci_dev *hdev) 2758 { 2759 int ret = 0; 2760 2761 if (!hdev->suspend_notifier.notifier_call && 2762 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { 2763 hdev->suspend_notifier.notifier_call = hci_suspend_notifier; 2764 ret = register_pm_notifier(&hdev->suspend_notifier); 2765 } 2766 2767 return ret; 2768 } 2769 2770 int hci_unregister_suspend_notifier(struct hci_dev *hdev) 2771 { 2772 int ret = 0; 2773 2774 if (hdev->suspend_notifier.notifier_call) { 2775 ret = unregister_pm_notifier(&hdev->suspend_notifier); 2776 if (!ret) 2777 hdev->suspend_notifier.notifier_call = NULL; 2778 } 2779 2780 return ret; 2781 } 2782 2783 /* Cancel ongoing command synchronously: 2784 * 2785 * - Cancel command timer 2786 * - Reset command counter 2787 * - Cancel command request 2788 */ 2789 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err) 2790 { 2791 bt_dev_dbg(hdev, "err 0x%2.2x", err); 2792 2793 cancel_delayed_work_sync(&hdev->cmd_timer); 2794 cancel_delayed_work_sync(&hdev->ncmd_timer); 2795 atomic_set(&hdev->cmd_cnt, 1); 2796 2797 hci_cmd_sync_cancel_sync(hdev, err); 2798 } 2799 2800 /* Suspend HCI device */ 2801 int hci_suspend_dev(struct hci_dev *hdev) 2802 { 2803 int ret; 2804 2805 bt_dev_dbg(hdev, ""); 2806 2807 /* Suspend should only act on when powered. */ 2808 if (!hdev_is_powered(hdev) || 2809 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2810 return 0; 2811 2812 /* If powering down don't attempt to suspend */ 2813 if (mgmt_powering_down(hdev)) 2814 return 0; 2815 2816 /* Cancel potentially blocking sync operation before suspend */ 2817 hci_cancel_cmd_sync(hdev, EHOSTDOWN); 2818 2819 hci_req_sync_lock(hdev); 2820 ret = hci_suspend_sync(hdev); 2821 hci_req_sync_unlock(hdev); 2822 2823 hci_clear_wake_reason(hdev); 2824 mgmt_suspending(hdev, hdev->suspend_state); 2825 2826 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); 2827 return ret; 2828 } 2829 EXPORT_SYMBOL(hci_suspend_dev); 2830 2831 /* Resume HCI device */ 2832 int hci_resume_dev(struct hci_dev *hdev) 2833 { 2834 int ret; 2835 2836 bt_dev_dbg(hdev, ""); 2837 2838 /* Resume should only act on when powered. */ 2839 if (!hdev_is_powered(hdev) || 2840 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2841 return 0; 2842 2843 /* If powering down don't attempt to resume */ 2844 if (mgmt_powering_down(hdev)) 2845 return 0; 2846 2847 hci_req_sync_lock(hdev); 2848 ret = hci_resume_sync(hdev); 2849 hci_req_sync_unlock(hdev); 2850 2851 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, 2852 hdev->wake_addr_type); 2853 2854 hci_sock_dev_event(hdev, HCI_DEV_RESUME); 2855 return ret; 2856 } 2857 EXPORT_SYMBOL(hci_resume_dev); 2858 2859 /* Reset HCI device */ 2860 int hci_reset_dev(struct hci_dev *hdev) 2861 { 2862 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; 2863 struct sk_buff *skb; 2864 2865 skb = bt_skb_alloc(3, GFP_ATOMIC); 2866 if (!skb) 2867 return -ENOMEM; 2868 2869 hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 2870 skb_put_data(skb, hw_err, 3); 2871 2872 bt_dev_err(hdev, "Injecting HCI hardware error event"); 2873 2874 /* Send Hardware Error to upper stack */ 2875 return hci_recv_frame(hdev, skb); 2876 } 2877 EXPORT_SYMBOL(hci_reset_dev); 2878 2879 static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) 2880 { 2881 if (hdev->classify_pkt_type) 2882 return hdev->classify_pkt_type(hdev, skb); 2883 2884 return hci_skb_pkt_type(skb); 2885 } 2886 2887 /* Receive frame from HCI drivers */ 2888 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) 2889 { 2890 u8 dev_pkt_type; 2891 2892 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 2893 && !test_bit(HCI_INIT, &hdev->flags))) { 2894 kfree_skb(skb); 2895 return -ENXIO; 2896 } 2897 2898 /* Check if the driver agree with packet type classification */ 2899 dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb); 2900 if (hci_skb_pkt_type(skb) != dev_pkt_type) { 2901 hci_skb_pkt_type(skb) = dev_pkt_type; 2902 } 2903 2904 switch (hci_skb_pkt_type(skb)) { 2905 case HCI_EVENT_PKT: 2906 break; 2907 case HCI_ACLDATA_PKT: 2908 /* Detect if ISO packet has been sent as ACL */ 2909 if (hci_conn_num(hdev, ISO_LINK)) { 2910 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); 2911 __u8 type; 2912 2913 type = hci_conn_lookup_type(hdev, hci_handle(handle)); 2914 if (type == ISO_LINK) 2915 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 2916 } 2917 break; 2918 case HCI_SCODATA_PKT: 2919 break; 2920 case HCI_ISODATA_PKT: 2921 break; 2922 default: 2923 kfree_skb(skb); 2924 return -EINVAL; 2925 } 2926 2927 /* Incoming skb */ 2928 bt_cb(skb)->incoming = 1; 2929 2930 /* Time stamp */ 2931 __net_timestamp(skb); 2932 2933 skb_queue_tail(&hdev->rx_q, skb); 2934 queue_work(hdev->workqueue, &hdev->rx_work); 2935 2936 return 0; 2937 } 2938 EXPORT_SYMBOL(hci_recv_frame); 2939 2940 /* Receive diagnostic message from HCI drivers */ 2941 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) 2942 { 2943 /* Mark as diagnostic packet */ 2944 hci_skb_pkt_type(skb) = HCI_DIAG_PKT; 2945 2946 /* Time stamp */ 2947 __net_timestamp(skb); 2948 2949 skb_queue_tail(&hdev->rx_q, skb); 2950 queue_work(hdev->workqueue, &hdev->rx_work); 2951 2952 return 0; 2953 } 2954 EXPORT_SYMBOL(hci_recv_diag); 2955 2956 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) 2957 { 2958 va_list vargs; 2959 2960 va_start(vargs, fmt); 2961 kfree_const(hdev->hw_info); 2962 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 2963 va_end(vargs); 2964 } 2965 EXPORT_SYMBOL(hci_set_hw_info); 2966 2967 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) 2968 { 2969 va_list vargs; 2970 2971 va_start(vargs, fmt); 2972 kfree_const(hdev->fw_info); 2973 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 2974 va_end(vargs); 2975 } 2976 EXPORT_SYMBOL(hci_set_fw_info); 2977 2978 /* ---- Interface to upper protocols ---- */ 2979 2980 int hci_register_cb(struct hci_cb *cb) 2981 { 2982 BT_DBG("%p name %s", cb, cb->name); 2983 2984 mutex_lock(&hci_cb_list_lock); 2985 list_add_tail(&cb->list, &hci_cb_list); 2986 mutex_unlock(&hci_cb_list_lock); 2987 2988 return 0; 2989 } 2990 EXPORT_SYMBOL(hci_register_cb); 2991 2992 int hci_unregister_cb(struct hci_cb *cb) 2993 { 2994 BT_DBG("%p name %s", cb, cb->name); 2995 2996 mutex_lock(&hci_cb_list_lock); 2997 list_del(&cb->list); 2998 mutex_unlock(&hci_cb_list_lock); 2999 3000 return 0; 3001 } 3002 EXPORT_SYMBOL(hci_unregister_cb); 3003 3004 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 3005 { 3006 int err; 3007 3008 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), 3009 skb->len); 3010 3011 /* Time stamp */ 3012 __net_timestamp(skb); 3013 3014 /* Send copy to monitor */ 3015 hci_send_to_monitor(hdev, skb); 3016 3017 if (atomic_read(&hdev->promisc)) { 3018 /* Send copy to the sockets */ 3019 hci_send_to_sock(hdev, skb); 3020 } 3021 3022 /* Get rid of skb owner, prior to sending to the driver. */ 3023 skb_orphan(skb); 3024 3025 if (!test_bit(HCI_RUNNING, &hdev->flags)) { 3026 kfree_skb(skb); 3027 return -EINVAL; 3028 } 3029 3030 err = hdev->send(hdev, skb); 3031 if (err < 0) { 3032 bt_dev_err(hdev, "sending frame failed (%d)", err); 3033 kfree_skb(skb); 3034 return err; 3035 } 3036 3037 return 0; 3038 } 3039 3040 /* Send HCI command */ 3041 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 3042 const void *param) 3043 { 3044 struct sk_buff *skb; 3045 3046 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); 3047 3048 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); 3049 if (!skb) { 3050 bt_dev_err(hdev, "no memory for command"); 3051 return -ENOMEM; 3052 } 3053 3054 /* Stand-alone HCI commands must be flagged as 3055 * single-command requests. 3056 */ 3057 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 3058 3059 skb_queue_tail(&hdev->cmd_q, skb); 3060 queue_work(hdev->workqueue, &hdev->cmd_work); 3061 3062 return 0; 3063 } 3064 3065 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, 3066 const void *param) 3067 { 3068 struct sk_buff *skb; 3069 3070 if (hci_opcode_ogf(opcode) != 0x3f) { 3071 /* A controller receiving a command shall respond with either 3072 * a Command Status Event or a Command Complete Event. 3073 * Therefore, all standard HCI commands must be sent via the 3074 * standard API, using hci_send_cmd or hci_cmd_sync helpers. 3075 * Some vendors do not comply with this rule for vendor-specific 3076 * commands and do not return any event. We want to support 3077 * unresponded commands for such cases only. 3078 */ 3079 bt_dev_err(hdev, "unresponded command not supported"); 3080 return -EINVAL; 3081 } 3082 3083 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); 3084 if (!skb) { 3085 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 3086 opcode); 3087 return -ENOMEM; 3088 } 3089 3090 hci_send_frame(hdev, skb); 3091 3092 return 0; 3093 } 3094 EXPORT_SYMBOL(__hci_cmd_send); 3095 3096 /* Get data from the previously sent command */ 3097 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode) 3098 { 3099 struct hci_command_hdr *hdr; 3100 3101 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE) 3102 return NULL; 3103 3104 hdr = (void *)skb->data; 3105 3106 if (hdr->opcode != cpu_to_le16(opcode)) 3107 return NULL; 3108 3109 return skb->data + HCI_COMMAND_HDR_SIZE; 3110 } 3111 3112 /* Get data from the previously sent command */ 3113 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 3114 { 3115 void *data; 3116 3117 /* Check if opcode matches last sent command */ 3118 data = hci_cmd_data(hdev->sent_cmd, opcode); 3119 if (!data) 3120 /* Check if opcode matches last request */ 3121 data = hci_cmd_data(hdev->req_skb, opcode); 3122 3123 return data; 3124 } 3125 3126 /* Get data from last received event */ 3127 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) 3128 { 3129 struct hci_event_hdr *hdr; 3130 int offset; 3131 3132 if (!hdev->recv_event) 3133 return NULL; 3134 3135 hdr = (void *)hdev->recv_event->data; 3136 offset = sizeof(*hdr); 3137 3138 if (hdr->evt != event) { 3139 /* In case of LE metaevent check the subevent match */ 3140 if (hdr->evt == HCI_EV_LE_META) { 3141 struct hci_ev_le_meta *ev; 3142 3143 ev = (void *)hdev->recv_event->data + offset; 3144 offset += sizeof(*ev); 3145 if (ev->subevent == event) 3146 goto found; 3147 } 3148 return NULL; 3149 } 3150 3151 found: 3152 bt_dev_dbg(hdev, "event 0x%2.2x", event); 3153 3154 return hdev->recv_event->data + offset; 3155 } 3156 3157 /* Send ACL data */ 3158 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 3159 { 3160 struct hci_acl_hdr *hdr; 3161 int len = skb->len; 3162 3163 skb_push(skb, HCI_ACL_HDR_SIZE); 3164 skb_reset_transport_header(skb); 3165 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 3166 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 3167 hdr->dlen = cpu_to_le16(len); 3168 } 3169 3170 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, 3171 struct sk_buff *skb, __u16 flags) 3172 { 3173 struct hci_conn *conn = chan->conn; 3174 struct hci_dev *hdev = conn->hdev; 3175 struct sk_buff *list; 3176 3177 skb->len = skb_headlen(skb); 3178 skb->data_len = 0; 3179 3180 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3181 3182 hci_add_acl_hdr(skb, conn->handle, flags); 3183 3184 list = skb_shinfo(skb)->frag_list; 3185 if (!list) { 3186 /* Non fragmented */ 3187 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 3188 3189 skb_queue_tail(queue, skb); 3190 } else { 3191 /* Fragmented */ 3192 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3193 3194 skb_shinfo(skb)->frag_list = NULL; 3195 3196 /* Queue all fragments atomically. We need to use spin_lock_bh 3197 * here because of 6LoWPAN links, as there this function is 3198 * called from softirq and using normal spin lock could cause 3199 * deadlocks. 3200 */ 3201 spin_lock_bh(&queue->lock); 3202 3203 __skb_queue_tail(queue, skb); 3204 3205 flags &= ~ACL_START; 3206 flags |= ACL_CONT; 3207 do { 3208 skb = list; list = list->next; 3209 3210 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3211 hci_add_acl_hdr(skb, conn->handle, flags); 3212 3213 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3214 3215 __skb_queue_tail(queue, skb); 3216 } while (list); 3217 3218 spin_unlock_bh(&queue->lock); 3219 } 3220 } 3221 3222 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) 3223 { 3224 struct hci_dev *hdev = chan->conn->hdev; 3225 3226 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); 3227 3228 hci_queue_acl(chan, &chan->data_q, skb, flags); 3229 3230 queue_work(hdev->workqueue, &hdev->tx_work); 3231 } 3232 3233 /* Send SCO data */ 3234 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 3235 { 3236 struct hci_dev *hdev = conn->hdev; 3237 struct hci_sco_hdr hdr; 3238 3239 BT_DBG("%s len %d", hdev->name, skb->len); 3240 3241 hdr.handle = cpu_to_le16(conn->handle); 3242 hdr.dlen = skb->len; 3243 3244 skb_push(skb, HCI_SCO_HDR_SIZE); 3245 skb_reset_transport_header(skb); 3246 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 3247 3248 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; 3249 3250 skb_queue_tail(&conn->data_q, skb); 3251 queue_work(hdev->workqueue, &hdev->tx_work); 3252 } 3253 3254 /* Send ISO data */ 3255 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) 3256 { 3257 struct hci_iso_hdr *hdr; 3258 int len = skb->len; 3259 3260 skb_push(skb, HCI_ISO_HDR_SIZE); 3261 skb_reset_transport_header(skb); 3262 hdr = (struct hci_iso_hdr *)skb_transport_header(skb); 3263 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 3264 hdr->dlen = cpu_to_le16(len); 3265 } 3266 3267 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, 3268 struct sk_buff *skb) 3269 { 3270 struct hci_dev *hdev = conn->hdev; 3271 struct sk_buff *list; 3272 __u16 flags; 3273 3274 skb->len = skb_headlen(skb); 3275 skb->data_len = 0; 3276 3277 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 3278 3279 list = skb_shinfo(skb)->frag_list; 3280 3281 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); 3282 hci_add_iso_hdr(skb, conn->handle, flags); 3283 3284 if (!list) { 3285 /* Non fragmented */ 3286 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 3287 3288 skb_queue_tail(queue, skb); 3289 } else { 3290 /* Fragmented */ 3291 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3292 3293 skb_shinfo(skb)->frag_list = NULL; 3294 3295 __skb_queue_tail(queue, skb); 3296 3297 do { 3298 skb = list; list = list->next; 3299 3300 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 3301 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, 3302 0x00); 3303 hci_add_iso_hdr(skb, conn->handle, flags); 3304 3305 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3306 3307 __skb_queue_tail(queue, skb); 3308 } while (list); 3309 } 3310 } 3311 3312 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) 3313 { 3314 struct hci_dev *hdev = conn->hdev; 3315 3316 BT_DBG("%s len %d", hdev->name, skb->len); 3317 3318 hci_queue_iso(conn, &conn->data_q, skb); 3319 3320 queue_work(hdev->workqueue, &hdev->tx_work); 3321 } 3322 3323 /* ---- HCI TX task (outgoing data) ---- */ 3324 3325 /* HCI Connection scheduler */ 3326 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) 3327 { 3328 struct hci_dev *hdev; 3329 int cnt, q; 3330 3331 if (!conn) { 3332 *quote = 0; 3333 return; 3334 } 3335 3336 hdev = conn->hdev; 3337 3338 switch (conn->type) { 3339 case ACL_LINK: 3340 cnt = hdev->acl_cnt; 3341 break; 3342 case SCO_LINK: 3343 case ESCO_LINK: 3344 cnt = hdev->sco_cnt; 3345 break; 3346 case LE_LINK: 3347 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3348 break; 3349 case ISO_LINK: 3350 cnt = hdev->iso_mtu ? hdev->iso_cnt : 3351 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3352 break; 3353 default: 3354 cnt = 0; 3355 bt_dev_err(hdev, "unknown link type %d", conn->type); 3356 } 3357 3358 q = cnt / num; 3359 *quote = q ? q : 1; 3360 } 3361 3362 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, 3363 int *quote) 3364 { 3365 struct hci_conn_hash *h = &hdev->conn_hash; 3366 struct hci_conn *conn = NULL, *c; 3367 unsigned int num = 0, min = ~0; 3368 3369 /* We don't have to lock device here. Connections are always 3370 * added and removed with TX task disabled. */ 3371 3372 rcu_read_lock(); 3373 3374 list_for_each_entry_rcu(c, &h->list, list) { 3375 if (c->type != type || skb_queue_empty(&c->data_q)) 3376 continue; 3377 3378 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 3379 continue; 3380 3381 num++; 3382 3383 if (c->sent < min) { 3384 min = c->sent; 3385 conn = c; 3386 } 3387 3388 if (hci_conn_num(hdev, type) == num) 3389 break; 3390 } 3391 3392 rcu_read_unlock(); 3393 3394 hci_quote_sent(conn, num, quote); 3395 3396 BT_DBG("conn %p quote %d", conn, *quote); 3397 return conn; 3398 } 3399 3400 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 3401 { 3402 struct hci_conn_hash *h = &hdev->conn_hash; 3403 struct hci_conn *c; 3404 3405 bt_dev_err(hdev, "link tx timeout"); 3406 3407 rcu_read_lock(); 3408 3409 /* Kill stalled connections */ 3410 list_for_each_entry_rcu(c, &h->list, list) { 3411 if (c->type == type && c->sent) { 3412 bt_dev_err(hdev, "killing stalled connection %pMR", 3413 &c->dst); 3414 /* hci_disconnect might sleep, so, we have to release 3415 * the RCU read lock before calling it. 3416 */ 3417 rcu_read_unlock(); 3418 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); 3419 rcu_read_lock(); 3420 } 3421 } 3422 3423 rcu_read_unlock(); 3424 } 3425 3426 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 3427 int *quote) 3428 { 3429 struct hci_conn_hash *h = &hdev->conn_hash; 3430 struct hci_chan *chan = NULL; 3431 unsigned int num = 0, min = ~0, cur_prio = 0; 3432 struct hci_conn *conn; 3433 int conn_num = 0; 3434 3435 BT_DBG("%s", hdev->name); 3436 3437 rcu_read_lock(); 3438 3439 list_for_each_entry_rcu(conn, &h->list, list) { 3440 struct hci_chan *tmp; 3441 3442 if (conn->type != type) 3443 continue; 3444 3445 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3446 continue; 3447 3448 conn_num++; 3449 3450 list_for_each_entry_rcu(tmp, &conn->chan_list, list) { 3451 struct sk_buff *skb; 3452 3453 if (skb_queue_empty(&tmp->data_q)) 3454 continue; 3455 3456 skb = skb_peek(&tmp->data_q); 3457 if (skb->priority < cur_prio) 3458 continue; 3459 3460 if (skb->priority > cur_prio) { 3461 num = 0; 3462 min = ~0; 3463 cur_prio = skb->priority; 3464 } 3465 3466 num++; 3467 3468 if (conn->sent < min) { 3469 min = conn->sent; 3470 chan = tmp; 3471 } 3472 } 3473 3474 if (hci_conn_num(hdev, type) == conn_num) 3475 break; 3476 } 3477 3478 rcu_read_unlock(); 3479 3480 if (!chan) 3481 return NULL; 3482 3483 hci_quote_sent(chan->conn, num, quote); 3484 3485 BT_DBG("chan %p quote %d", chan, *quote); 3486 return chan; 3487 } 3488 3489 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) 3490 { 3491 struct hci_conn_hash *h = &hdev->conn_hash; 3492 struct hci_conn *conn; 3493 int num = 0; 3494 3495 BT_DBG("%s", hdev->name); 3496 3497 rcu_read_lock(); 3498 3499 list_for_each_entry_rcu(conn, &h->list, list) { 3500 struct hci_chan *chan; 3501 3502 if (conn->type != type) 3503 continue; 3504 3505 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3506 continue; 3507 3508 num++; 3509 3510 list_for_each_entry_rcu(chan, &conn->chan_list, list) { 3511 struct sk_buff *skb; 3512 3513 if (chan->sent) { 3514 chan->sent = 0; 3515 continue; 3516 } 3517 3518 if (skb_queue_empty(&chan->data_q)) 3519 continue; 3520 3521 skb = skb_peek(&chan->data_q); 3522 if (skb->priority >= HCI_PRIO_MAX - 1) 3523 continue; 3524 3525 skb->priority = HCI_PRIO_MAX - 1; 3526 3527 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 3528 skb->priority); 3529 } 3530 3531 if (hci_conn_num(hdev, type) == num) 3532 break; 3533 } 3534 3535 rcu_read_unlock(); 3536 3537 } 3538 3539 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) 3540 { 3541 unsigned long last_tx; 3542 3543 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 3544 return; 3545 3546 switch (type) { 3547 case LE_LINK: 3548 last_tx = hdev->le_last_tx; 3549 break; 3550 default: 3551 last_tx = hdev->acl_last_tx; 3552 break; 3553 } 3554 3555 /* tx timeout must be longer than maximum link supervision timeout 3556 * (40.9 seconds) 3557 */ 3558 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) 3559 hci_link_tx_to(hdev, type); 3560 } 3561 3562 /* Schedule SCO */ 3563 static void hci_sched_sco(struct hci_dev *hdev) 3564 { 3565 struct hci_conn *conn; 3566 struct sk_buff *skb; 3567 int quote; 3568 3569 BT_DBG("%s", hdev->name); 3570 3571 if (!hci_conn_num(hdev, SCO_LINK)) 3572 return; 3573 3574 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 3575 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3576 BT_DBG("skb %p len %d", skb, skb->len); 3577 hci_send_frame(hdev, skb); 3578 3579 conn->sent++; 3580 if (conn->sent == ~0) 3581 conn->sent = 0; 3582 } 3583 } 3584 } 3585 3586 static void hci_sched_esco(struct hci_dev *hdev) 3587 { 3588 struct hci_conn *conn; 3589 struct sk_buff *skb; 3590 int quote; 3591 3592 BT_DBG("%s", hdev->name); 3593 3594 if (!hci_conn_num(hdev, ESCO_LINK)) 3595 return; 3596 3597 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, 3598 "e))) { 3599 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3600 BT_DBG("skb %p len %d", skb, skb->len); 3601 hci_send_frame(hdev, skb); 3602 3603 conn->sent++; 3604 if (conn->sent == ~0) 3605 conn->sent = 0; 3606 } 3607 } 3608 } 3609 3610 static void hci_sched_acl_pkt(struct hci_dev *hdev) 3611 { 3612 unsigned int cnt = hdev->acl_cnt; 3613 struct hci_chan *chan; 3614 struct sk_buff *skb; 3615 int quote; 3616 3617 __check_timeout(hdev, cnt, ACL_LINK); 3618 3619 while (hdev->acl_cnt && 3620 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { 3621 u32 priority = (skb_peek(&chan->data_q))->priority; 3622 while (quote-- && (skb = skb_peek(&chan->data_q))) { 3623 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3624 skb->len, skb->priority); 3625 3626 /* Stop if priority has changed */ 3627 if (skb->priority < priority) 3628 break; 3629 3630 skb = skb_dequeue(&chan->data_q); 3631 3632 hci_conn_enter_active_mode(chan->conn, 3633 bt_cb(skb)->force_active); 3634 3635 hci_send_frame(hdev, skb); 3636 hdev->acl_last_tx = jiffies; 3637 3638 hdev->acl_cnt--; 3639 chan->sent++; 3640 chan->conn->sent++; 3641 3642 /* Send pending SCO packets right away */ 3643 hci_sched_sco(hdev); 3644 hci_sched_esco(hdev); 3645 } 3646 } 3647 3648 if (cnt != hdev->acl_cnt) 3649 hci_prio_recalculate(hdev, ACL_LINK); 3650 } 3651 3652 static void hci_sched_acl(struct hci_dev *hdev) 3653 { 3654 BT_DBG("%s", hdev->name); 3655 3656 /* No ACL link over BR/EDR controller */ 3657 if (!hci_conn_num(hdev, ACL_LINK)) 3658 return; 3659 3660 hci_sched_acl_pkt(hdev); 3661 } 3662 3663 static void hci_sched_le(struct hci_dev *hdev) 3664 { 3665 struct hci_chan *chan; 3666 struct sk_buff *skb; 3667 int quote, cnt, tmp; 3668 3669 BT_DBG("%s", hdev->name); 3670 3671 if (!hci_conn_num(hdev, LE_LINK)) 3672 return; 3673 3674 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; 3675 3676 __check_timeout(hdev, cnt, LE_LINK); 3677 3678 tmp = cnt; 3679 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { 3680 u32 priority = (skb_peek(&chan->data_q))->priority; 3681 while (quote-- && (skb = skb_peek(&chan->data_q))) { 3682 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3683 skb->len, skb->priority); 3684 3685 /* Stop if priority has changed */ 3686 if (skb->priority < priority) 3687 break; 3688 3689 skb = skb_dequeue(&chan->data_q); 3690 3691 hci_send_frame(hdev, skb); 3692 hdev->le_last_tx = jiffies; 3693 3694 cnt--; 3695 chan->sent++; 3696 chan->conn->sent++; 3697 3698 /* Send pending SCO packets right away */ 3699 hci_sched_sco(hdev); 3700 hci_sched_esco(hdev); 3701 } 3702 } 3703 3704 if (hdev->le_pkts) 3705 hdev->le_cnt = cnt; 3706 else 3707 hdev->acl_cnt = cnt; 3708 3709 if (cnt != tmp) 3710 hci_prio_recalculate(hdev, LE_LINK); 3711 } 3712 3713 /* Schedule CIS */ 3714 static void hci_sched_iso(struct hci_dev *hdev) 3715 { 3716 struct hci_conn *conn; 3717 struct sk_buff *skb; 3718 int quote, *cnt; 3719 3720 BT_DBG("%s", hdev->name); 3721 3722 if (!hci_conn_num(hdev, ISO_LINK)) 3723 return; 3724 3725 cnt = hdev->iso_pkts ? &hdev->iso_cnt : 3726 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; 3727 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) { 3728 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3729 BT_DBG("skb %p len %d", skb, skb->len); 3730 hci_send_frame(hdev, skb); 3731 3732 conn->sent++; 3733 if (conn->sent == ~0) 3734 conn->sent = 0; 3735 (*cnt)--; 3736 } 3737 } 3738 } 3739 3740 static void hci_tx_work(struct work_struct *work) 3741 { 3742 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); 3743 struct sk_buff *skb; 3744 3745 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, 3746 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); 3747 3748 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 3749 /* Schedule queues and send stuff to HCI driver */ 3750 hci_sched_sco(hdev); 3751 hci_sched_esco(hdev); 3752 hci_sched_iso(hdev); 3753 hci_sched_acl(hdev); 3754 hci_sched_le(hdev); 3755 } 3756 3757 /* Send next queued raw (unknown type) packet */ 3758 while ((skb = skb_dequeue(&hdev->raw_q))) 3759 hci_send_frame(hdev, skb); 3760 } 3761 3762 /* ----- HCI RX task (incoming data processing) ----- */ 3763 3764 /* ACL data packet */ 3765 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3766 { 3767 struct hci_acl_hdr *hdr = (void *) skb->data; 3768 struct hci_conn *conn; 3769 __u16 handle, flags; 3770 3771 skb_pull(skb, HCI_ACL_HDR_SIZE); 3772 3773 handle = __le16_to_cpu(hdr->handle); 3774 flags = hci_flags(handle); 3775 handle = hci_handle(handle); 3776 3777 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 3778 handle, flags); 3779 3780 hdev->stat.acl_rx++; 3781 3782 hci_dev_lock(hdev); 3783 conn = hci_conn_hash_lookup_handle(hdev, handle); 3784 hci_dev_unlock(hdev); 3785 3786 if (conn) { 3787 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 3788 3789 /* Send to upper protocol */ 3790 l2cap_recv_acldata(conn, skb, flags); 3791 return; 3792 } else { 3793 bt_dev_err(hdev, "ACL packet for unknown connection handle %d", 3794 handle); 3795 } 3796 3797 kfree_skb(skb); 3798 } 3799 3800 /* SCO data packet */ 3801 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3802 { 3803 struct hci_sco_hdr *hdr = (void *) skb->data; 3804 struct hci_conn *conn; 3805 __u16 handle, flags; 3806 3807 skb_pull(skb, HCI_SCO_HDR_SIZE); 3808 3809 handle = __le16_to_cpu(hdr->handle); 3810 flags = hci_flags(handle); 3811 handle = hci_handle(handle); 3812 3813 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 3814 handle, flags); 3815 3816 hdev->stat.sco_rx++; 3817 3818 hci_dev_lock(hdev); 3819 conn = hci_conn_hash_lookup_handle(hdev, handle); 3820 hci_dev_unlock(hdev); 3821 3822 if (conn) { 3823 /* Send to upper protocol */ 3824 hci_skb_pkt_status(skb) = flags & 0x03; 3825 sco_recv_scodata(conn, skb); 3826 return; 3827 } else { 3828 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", 3829 handle); 3830 } 3831 3832 kfree_skb(skb); 3833 } 3834 3835 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3836 { 3837 struct hci_iso_hdr *hdr; 3838 struct hci_conn *conn; 3839 __u16 handle, flags; 3840 3841 hdr = skb_pull_data(skb, sizeof(*hdr)); 3842 if (!hdr) { 3843 bt_dev_err(hdev, "ISO packet too small"); 3844 goto drop; 3845 } 3846 3847 handle = __le16_to_cpu(hdr->handle); 3848 flags = hci_flags(handle); 3849 handle = hci_handle(handle); 3850 3851 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, 3852 handle, flags); 3853 3854 hci_dev_lock(hdev); 3855 conn = hci_conn_hash_lookup_handle(hdev, handle); 3856 hci_dev_unlock(hdev); 3857 3858 if (!conn) { 3859 bt_dev_err(hdev, "ISO packet for unknown connection handle %d", 3860 handle); 3861 goto drop; 3862 } 3863 3864 /* Send to upper protocol */ 3865 iso_recv(conn, skb, flags); 3866 return; 3867 3868 drop: 3869 kfree_skb(skb); 3870 } 3871 3872 static bool hci_req_is_complete(struct hci_dev *hdev) 3873 { 3874 struct sk_buff *skb; 3875 3876 skb = skb_peek(&hdev->cmd_q); 3877 if (!skb) 3878 return true; 3879 3880 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); 3881 } 3882 3883 static void hci_resend_last(struct hci_dev *hdev) 3884 { 3885 struct hci_command_hdr *sent; 3886 struct sk_buff *skb; 3887 u16 opcode; 3888 3889 if (!hdev->sent_cmd) 3890 return; 3891 3892 sent = (void *) hdev->sent_cmd->data; 3893 opcode = __le16_to_cpu(sent->opcode); 3894 if (opcode == HCI_OP_RESET) 3895 return; 3896 3897 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); 3898 if (!skb) 3899 return; 3900 3901 skb_queue_head(&hdev->cmd_q, skb); 3902 queue_work(hdev->workqueue, &hdev->cmd_work); 3903 } 3904 3905 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, 3906 hci_req_complete_t *req_complete, 3907 hci_req_complete_skb_t *req_complete_skb) 3908 { 3909 struct sk_buff *skb; 3910 unsigned long flags; 3911 3912 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); 3913 3914 /* If the completed command doesn't match the last one that was 3915 * sent we need to do special handling of it. 3916 */ 3917 if (!hci_sent_cmd_data(hdev, opcode)) { 3918 /* Some CSR based controllers generate a spontaneous 3919 * reset complete event during init and any pending 3920 * command will never be completed. In such a case we 3921 * need to resend whatever was the last sent 3922 * command. 3923 */ 3924 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) 3925 hci_resend_last(hdev); 3926 3927 return; 3928 } 3929 3930 /* If we reach this point this event matches the last command sent */ 3931 hci_dev_clear_flag(hdev, HCI_CMD_PENDING); 3932 3933 /* If the command succeeded and there's still more commands in 3934 * this request the request is not yet complete. 3935 */ 3936 if (!status && !hci_req_is_complete(hdev)) 3937 return; 3938 3939 skb = hdev->req_skb; 3940 3941 /* If this was the last command in a request the complete 3942 * callback would be found in hdev->req_skb instead of the 3943 * command queue (hdev->cmd_q). 3944 */ 3945 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) { 3946 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 3947 return; 3948 } 3949 3950 if (skb && bt_cb(skb)->hci.req_complete) { 3951 *req_complete = bt_cb(skb)->hci.req_complete; 3952 return; 3953 } 3954 3955 /* Remove all pending commands belonging to this request */ 3956 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 3957 while ((skb = __skb_dequeue(&hdev->cmd_q))) { 3958 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { 3959 __skb_queue_head(&hdev->cmd_q, skb); 3960 break; 3961 } 3962 3963 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) 3964 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 3965 else 3966 *req_complete = bt_cb(skb)->hci.req_complete; 3967 dev_kfree_skb_irq(skb); 3968 } 3969 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 3970 } 3971 3972 static void hci_rx_work(struct work_struct *work) 3973 { 3974 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); 3975 struct sk_buff *skb; 3976 3977 BT_DBG("%s", hdev->name); 3978 3979 /* The kcov_remote functions used for collecting packet parsing 3980 * coverage information from this background thread and associate 3981 * the coverage with the syscall's thread which originally injected 3982 * the packet. This helps fuzzing the kernel. 3983 */ 3984 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { 3985 kcov_remote_start_common(skb_get_kcov_handle(skb)); 3986 3987 /* Send copy to monitor */ 3988 hci_send_to_monitor(hdev, skb); 3989 3990 if (atomic_read(&hdev->promisc)) { 3991 /* Send copy to the sockets */ 3992 hci_send_to_sock(hdev, skb); 3993 } 3994 3995 /* If the device has been opened in HCI_USER_CHANNEL, 3996 * the userspace has exclusive access to device. 3997 * When device is HCI_INIT, we still need to process 3998 * the data packets to the driver in order 3999 * to complete its setup(). 4000 */ 4001 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4002 !test_bit(HCI_INIT, &hdev->flags)) { 4003 kfree_skb(skb); 4004 continue; 4005 } 4006 4007 if (test_bit(HCI_INIT, &hdev->flags)) { 4008 /* Don't process data packets in this states. */ 4009 switch (hci_skb_pkt_type(skb)) { 4010 case HCI_ACLDATA_PKT: 4011 case HCI_SCODATA_PKT: 4012 case HCI_ISODATA_PKT: 4013 kfree_skb(skb); 4014 continue; 4015 } 4016 } 4017 4018 /* Process frame */ 4019 switch (hci_skb_pkt_type(skb)) { 4020 case HCI_EVENT_PKT: 4021 BT_DBG("%s Event packet", hdev->name); 4022 hci_event_packet(hdev, skb); 4023 break; 4024 4025 case HCI_ACLDATA_PKT: 4026 BT_DBG("%s ACL data packet", hdev->name); 4027 hci_acldata_packet(hdev, skb); 4028 break; 4029 4030 case HCI_SCODATA_PKT: 4031 BT_DBG("%s SCO data packet", hdev->name); 4032 hci_scodata_packet(hdev, skb); 4033 break; 4034 4035 case HCI_ISODATA_PKT: 4036 BT_DBG("%s ISO data packet", hdev->name); 4037 hci_isodata_packet(hdev, skb); 4038 break; 4039 4040 default: 4041 kfree_skb(skb); 4042 break; 4043 } 4044 } 4045 } 4046 4047 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) 4048 { 4049 int err; 4050 4051 bt_dev_dbg(hdev, "skb %p", skb); 4052 4053 kfree_skb(hdev->sent_cmd); 4054 4055 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); 4056 if (!hdev->sent_cmd) { 4057 skb_queue_head(&hdev->cmd_q, skb); 4058 queue_work(hdev->workqueue, &hdev->cmd_work); 4059 return; 4060 } 4061 4062 err = hci_send_frame(hdev, skb); 4063 if (err < 0) { 4064 hci_cmd_sync_cancel_sync(hdev, -err); 4065 return; 4066 } 4067 4068 if (hdev->req_status == HCI_REQ_PEND && 4069 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) { 4070 kfree_skb(hdev->req_skb); 4071 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); 4072 } 4073 4074 atomic_dec(&hdev->cmd_cnt); 4075 } 4076 4077 static void hci_cmd_work(struct work_struct *work) 4078 { 4079 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 4080 struct sk_buff *skb; 4081 4082 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, 4083 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); 4084 4085 /* Send queued commands */ 4086 if (atomic_read(&hdev->cmd_cnt)) { 4087 skb = skb_dequeue(&hdev->cmd_q); 4088 if (!skb) 4089 return; 4090 4091 hci_send_cmd_sync(hdev, skb); 4092 4093 rcu_read_lock(); 4094 if (test_bit(HCI_RESET, &hdev->flags) || 4095 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 4096 cancel_delayed_work(&hdev->cmd_timer); 4097 else 4098 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, 4099 HCI_CMD_TIMEOUT); 4100 rcu_read_unlock(); 4101 } 4102 } 4103