1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2011 ProFUSION Embedded Systems 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI core. */ 27 28 #include <linux/export.h> 29 #include <linux/rfkill.h> 30 #include <linux/debugfs.h> 31 #include <linux/crypto.h> 32 #include <linux/property.h> 33 #include <linux/suspend.h> 34 #include <linux/wait.h> 35 #include <asm/unaligned.h> 36 37 #include <net/bluetooth/bluetooth.h> 38 #include <net/bluetooth/hci_core.h> 39 #include <net/bluetooth/l2cap.h> 40 #include <net/bluetooth/mgmt.h> 41 42 #include "hci_request.h" 43 #include "hci_debugfs.h" 44 #include "smp.h" 45 #include "leds.h" 46 #include "msft.h" 47 #include "aosp.h" 48 #include "hci_codec.h" 49 50 static void hci_rx_work(struct work_struct *work); 51 static void hci_cmd_work(struct work_struct *work); 52 static void hci_tx_work(struct work_struct *work); 53 54 /* HCI device list */ 55 LIST_HEAD(hci_dev_list); 56 DEFINE_RWLOCK(hci_dev_list_lock); 57 58 /* HCI callback list */ 59 LIST_HEAD(hci_cb_list); 60 DEFINE_MUTEX(hci_cb_list_lock); 61 62 /* HCI ID Numbering */ 63 static DEFINE_IDA(hci_index_ida); 64 65 static int hci_scan_req(struct hci_request *req, unsigned long opt) 66 { 67 __u8 scan = opt; 68 69 BT_DBG("%s %x", req->hdev->name, scan); 70 71 /* Inquiry and Page scans */ 72 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 73 return 0; 74 } 75 76 static int hci_auth_req(struct hci_request *req, unsigned long opt) 77 { 78 __u8 auth = opt; 79 80 BT_DBG("%s %x", req->hdev->name, auth); 81 82 /* Authentication */ 83 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 84 return 0; 85 } 86 87 static int hci_encrypt_req(struct hci_request *req, unsigned long opt) 88 { 89 __u8 encrypt = opt; 90 91 BT_DBG("%s %x", req->hdev->name, encrypt); 92 93 /* Encryption */ 94 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 95 return 0; 96 } 97 98 static int hci_linkpol_req(struct hci_request *req, unsigned long opt) 99 { 100 __le16 policy = cpu_to_le16(opt); 101 102 BT_DBG("%s %x", req->hdev->name, policy); 103 104 /* Default link policy */ 105 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 106 return 0; 107 } 108 109 /* Get HCI device by index. 110 * Device is held on return. */ 111 struct hci_dev *hci_dev_get(int index) 112 { 113 struct hci_dev *hdev = NULL, *d; 114 115 BT_DBG("%d", index); 116 117 if (index < 0) 118 return NULL; 119 120 read_lock(&hci_dev_list_lock); 121 list_for_each_entry(d, &hci_dev_list, list) { 122 if (d->id == index) { 123 hdev = hci_dev_hold(d); 124 break; 125 } 126 } 127 read_unlock(&hci_dev_list_lock); 128 return hdev; 129 } 130 131 /* ---- Inquiry support ---- */ 132 133 bool hci_discovery_active(struct hci_dev *hdev) 134 { 135 struct discovery_state *discov = &hdev->discovery; 136 137 switch (discov->state) { 138 case DISCOVERY_FINDING: 139 case DISCOVERY_RESOLVING: 140 return true; 141 142 default: 143 return false; 144 } 145 } 146 147 void hci_discovery_set_state(struct hci_dev *hdev, int state) 148 { 149 int old_state = hdev->discovery.state; 150 151 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); 152 153 if (old_state == state) 154 return; 155 156 hdev->discovery.state = state; 157 158 switch (state) { 159 case DISCOVERY_STOPPED: 160 hci_update_passive_scan(hdev); 161 162 if (old_state != DISCOVERY_STARTING) 163 mgmt_discovering(hdev, 0); 164 break; 165 case DISCOVERY_STARTING: 166 break; 167 case DISCOVERY_FINDING: 168 mgmt_discovering(hdev, 1); 169 break; 170 case DISCOVERY_RESOLVING: 171 break; 172 case DISCOVERY_STOPPING: 173 break; 174 } 175 } 176 177 void hci_inquiry_cache_flush(struct hci_dev *hdev) 178 { 179 struct discovery_state *cache = &hdev->discovery; 180 struct inquiry_entry *p, *n; 181 182 list_for_each_entry_safe(p, n, &cache->all, all) { 183 list_del(&p->all); 184 kfree(p); 185 } 186 187 INIT_LIST_HEAD(&cache->unknown); 188 INIT_LIST_HEAD(&cache->resolve); 189 } 190 191 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, 192 bdaddr_t *bdaddr) 193 { 194 struct discovery_state *cache = &hdev->discovery; 195 struct inquiry_entry *e; 196 197 BT_DBG("cache %p, %pMR", cache, bdaddr); 198 199 list_for_each_entry(e, &cache->all, all) { 200 if (!bacmp(&e->data.bdaddr, bdaddr)) 201 return e; 202 } 203 204 return NULL; 205 } 206 207 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 208 bdaddr_t *bdaddr) 209 { 210 struct discovery_state *cache = &hdev->discovery; 211 struct inquiry_entry *e; 212 213 BT_DBG("cache %p, %pMR", cache, bdaddr); 214 215 list_for_each_entry(e, &cache->unknown, list) { 216 if (!bacmp(&e->data.bdaddr, bdaddr)) 217 return e; 218 } 219 220 return NULL; 221 } 222 223 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 224 bdaddr_t *bdaddr, 225 int state) 226 { 227 struct discovery_state *cache = &hdev->discovery; 228 struct inquiry_entry *e; 229 230 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); 231 232 list_for_each_entry(e, &cache->resolve, list) { 233 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) 234 return e; 235 if (!bacmp(&e->data.bdaddr, bdaddr)) 236 return e; 237 } 238 239 return NULL; 240 } 241 242 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 243 struct inquiry_entry *ie) 244 { 245 struct discovery_state *cache = &hdev->discovery; 246 struct list_head *pos = &cache->resolve; 247 struct inquiry_entry *p; 248 249 list_del(&ie->list); 250 251 list_for_each_entry(p, &cache->resolve, list) { 252 if (p->name_state != NAME_PENDING && 253 abs(p->data.rssi) >= abs(ie->data.rssi)) 254 break; 255 pos = &p->list; 256 } 257 258 list_add(&ie->list, pos); 259 } 260 261 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 262 bool name_known) 263 { 264 struct discovery_state *cache = &hdev->discovery; 265 struct inquiry_entry *ie; 266 u32 flags = 0; 267 268 BT_DBG("cache %p, %pMR", cache, &data->bdaddr); 269 270 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); 271 272 if (!data->ssp_mode) 273 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 274 275 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 276 if (ie) { 277 if (!ie->data.ssp_mode) 278 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 279 280 if (ie->name_state == NAME_NEEDED && 281 data->rssi != ie->data.rssi) { 282 ie->data.rssi = data->rssi; 283 hci_inquiry_cache_update_resolve(hdev, ie); 284 } 285 286 goto update; 287 } 288 289 /* Entry not in the cache. Add new one. */ 290 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 291 if (!ie) { 292 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 293 goto done; 294 } 295 296 list_add(&ie->all, &cache->all); 297 298 if (name_known) { 299 ie->name_state = NAME_KNOWN; 300 } else { 301 ie->name_state = NAME_NOT_KNOWN; 302 list_add(&ie->list, &cache->unknown); 303 } 304 305 update: 306 if (name_known && ie->name_state != NAME_KNOWN && 307 ie->name_state != NAME_PENDING) { 308 ie->name_state = NAME_KNOWN; 309 list_del(&ie->list); 310 } 311 312 memcpy(&ie->data, data, sizeof(*data)); 313 ie->timestamp = jiffies; 314 cache->timestamp = jiffies; 315 316 if (ie->name_state == NAME_NOT_KNOWN) 317 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 318 319 done: 320 return flags; 321 } 322 323 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 324 { 325 struct discovery_state *cache = &hdev->discovery; 326 struct inquiry_info *info = (struct inquiry_info *) buf; 327 struct inquiry_entry *e; 328 int copied = 0; 329 330 list_for_each_entry(e, &cache->all, all) { 331 struct inquiry_data *data = &e->data; 332 333 if (copied >= num) 334 break; 335 336 bacpy(&info->bdaddr, &data->bdaddr); 337 info->pscan_rep_mode = data->pscan_rep_mode; 338 info->pscan_period_mode = data->pscan_period_mode; 339 info->pscan_mode = data->pscan_mode; 340 memcpy(info->dev_class, data->dev_class, 3); 341 info->clock_offset = data->clock_offset; 342 343 info++; 344 copied++; 345 } 346 347 BT_DBG("cache %p, copied %d", cache, copied); 348 return copied; 349 } 350 351 static int hci_inq_req(struct hci_request *req, unsigned long opt) 352 { 353 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 354 struct hci_dev *hdev = req->hdev; 355 struct hci_cp_inquiry cp; 356 357 BT_DBG("%s", hdev->name); 358 359 if (test_bit(HCI_INQUIRY, &hdev->flags)) 360 return 0; 361 362 /* Start Inquiry */ 363 memcpy(&cp.lap, &ir->lap, 3); 364 cp.length = ir->length; 365 cp.num_rsp = ir->num_rsp; 366 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 367 368 return 0; 369 } 370 371 int hci_inquiry(void __user *arg) 372 { 373 __u8 __user *ptr = arg; 374 struct hci_inquiry_req ir; 375 struct hci_dev *hdev; 376 int err = 0, do_inquiry = 0, max_rsp; 377 long timeo; 378 __u8 *buf; 379 380 if (copy_from_user(&ir, ptr, sizeof(ir))) 381 return -EFAULT; 382 383 hdev = hci_dev_get(ir.dev_id); 384 if (!hdev) 385 return -ENODEV; 386 387 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 388 err = -EBUSY; 389 goto done; 390 } 391 392 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 393 err = -EOPNOTSUPP; 394 goto done; 395 } 396 397 if (hdev->dev_type != HCI_PRIMARY) { 398 err = -EOPNOTSUPP; 399 goto done; 400 } 401 402 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 403 err = -EOPNOTSUPP; 404 goto done; 405 } 406 407 /* Restrict maximum inquiry length to 60 seconds */ 408 if (ir.length > 60) { 409 err = -EINVAL; 410 goto done; 411 } 412 413 hci_dev_lock(hdev); 414 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 415 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { 416 hci_inquiry_cache_flush(hdev); 417 do_inquiry = 1; 418 } 419 hci_dev_unlock(hdev); 420 421 timeo = ir.length * msecs_to_jiffies(2000); 422 423 if (do_inquiry) { 424 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, 425 timeo, NULL); 426 if (err < 0) 427 goto done; 428 429 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is 430 * cleared). If it is interrupted by a signal, return -EINTR. 431 */ 432 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, 433 TASK_INTERRUPTIBLE)) { 434 err = -EINTR; 435 goto done; 436 } 437 } 438 439 /* for unlimited number of responses we will use buffer with 440 * 255 entries 441 */ 442 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 443 444 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 445 * copy it to the user space. 446 */ 447 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); 448 if (!buf) { 449 err = -ENOMEM; 450 goto done; 451 } 452 453 hci_dev_lock(hdev); 454 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 455 hci_dev_unlock(hdev); 456 457 BT_DBG("num_rsp %d", ir.num_rsp); 458 459 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 460 ptr += sizeof(ir); 461 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 462 ir.num_rsp)) 463 err = -EFAULT; 464 } else 465 err = -EFAULT; 466 467 kfree(buf); 468 469 done: 470 hci_dev_put(hdev); 471 return err; 472 } 473 474 static int hci_dev_do_open(struct hci_dev *hdev) 475 { 476 int ret = 0; 477 478 BT_DBG("%s %p", hdev->name, hdev); 479 480 hci_req_sync_lock(hdev); 481 482 ret = hci_dev_open_sync(hdev); 483 484 hci_req_sync_unlock(hdev); 485 return ret; 486 } 487 488 /* ---- HCI ioctl helpers ---- */ 489 490 int hci_dev_open(__u16 dev) 491 { 492 struct hci_dev *hdev; 493 int err; 494 495 hdev = hci_dev_get(dev); 496 if (!hdev) 497 return -ENODEV; 498 499 /* Devices that are marked as unconfigured can only be powered 500 * up as user channel. Trying to bring them up as normal devices 501 * will result into a failure. Only user channel operation is 502 * possible. 503 * 504 * When this function is called for a user channel, the flag 505 * HCI_USER_CHANNEL will be set first before attempting to 506 * open the device. 507 */ 508 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 509 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 510 err = -EOPNOTSUPP; 511 goto done; 512 } 513 514 /* We need to ensure that no other power on/off work is pending 515 * before proceeding to call hci_dev_do_open. This is 516 * particularly important if the setup procedure has not yet 517 * completed. 518 */ 519 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 520 cancel_delayed_work(&hdev->power_off); 521 522 /* After this call it is guaranteed that the setup procedure 523 * has finished. This means that error conditions like RFKILL 524 * or no valid public or static random address apply. 525 */ 526 flush_workqueue(hdev->req_workqueue); 527 528 /* For controllers not using the management interface and that 529 * are brought up using legacy ioctl, set the HCI_BONDABLE bit 530 * so that pairing works for them. Once the management interface 531 * is in use this bit will be cleared again and userspace has 532 * to explicitly enable it. 533 */ 534 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 535 !hci_dev_test_flag(hdev, HCI_MGMT)) 536 hci_dev_set_flag(hdev, HCI_BONDABLE); 537 538 err = hci_dev_do_open(hdev); 539 540 done: 541 hci_dev_put(hdev); 542 return err; 543 } 544 545 int hci_dev_do_close(struct hci_dev *hdev) 546 { 547 int err; 548 549 BT_DBG("%s %p", hdev->name, hdev); 550 551 hci_req_sync_lock(hdev); 552 553 err = hci_dev_close_sync(hdev); 554 555 hci_req_sync_unlock(hdev); 556 557 return err; 558 } 559 560 int hci_dev_close(__u16 dev) 561 { 562 struct hci_dev *hdev; 563 int err; 564 565 hdev = hci_dev_get(dev); 566 if (!hdev) 567 return -ENODEV; 568 569 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 570 err = -EBUSY; 571 goto done; 572 } 573 574 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 575 cancel_delayed_work(&hdev->power_off); 576 577 err = hci_dev_do_close(hdev); 578 579 done: 580 hci_dev_put(hdev); 581 return err; 582 } 583 584 static int hci_dev_do_reset(struct hci_dev *hdev) 585 { 586 int ret; 587 588 BT_DBG("%s %p", hdev->name, hdev); 589 590 hci_req_sync_lock(hdev); 591 592 /* Drop queues */ 593 skb_queue_purge(&hdev->rx_q); 594 skb_queue_purge(&hdev->cmd_q); 595 596 /* Avoid potential lockdep warnings from the *_flush() calls by 597 * ensuring the workqueue is empty up front. 598 */ 599 drain_workqueue(hdev->workqueue); 600 601 hci_dev_lock(hdev); 602 hci_inquiry_cache_flush(hdev); 603 hci_conn_hash_flush(hdev); 604 hci_dev_unlock(hdev); 605 606 if (hdev->flush) 607 hdev->flush(hdev); 608 609 atomic_set(&hdev->cmd_cnt, 1); 610 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 611 612 ret = hci_reset_sync(hdev); 613 614 hci_req_sync_unlock(hdev); 615 return ret; 616 } 617 618 int hci_dev_reset(__u16 dev) 619 { 620 struct hci_dev *hdev; 621 int err; 622 623 hdev = hci_dev_get(dev); 624 if (!hdev) 625 return -ENODEV; 626 627 if (!test_bit(HCI_UP, &hdev->flags)) { 628 err = -ENETDOWN; 629 goto done; 630 } 631 632 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 633 err = -EBUSY; 634 goto done; 635 } 636 637 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 638 err = -EOPNOTSUPP; 639 goto done; 640 } 641 642 err = hci_dev_do_reset(hdev); 643 644 done: 645 hci_dev_put(hdev); 646 return err; 647 } 648 649 int hci_dev_reset_stat(__u16 dev) 650 { 651 struct hci_dev *hdev; 652 int ret = 0; 653 654 hdev = hci_dev_get(dev); 655 if (!hdev) 656 return -ENODEV; 657 658 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 659 ret = -EBUSY; 660 goto done; 661 } 662 663 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 664 ret = -EOPNOTSUPP; 665 goto done; 666 } 667 668 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 669 670 done: 671 hci_dev_put(hdev); 672 return ret; 673 } 674 675 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) 676 { 677 bool conn_changed, discov_changed; 678 679 BT_DBG("%s scan 0x%02x", hdev->name, scan); 680 681 if ((scan & SCAN_PAGE)) 682 conn_changed = !hci_dev_test_and_set_flag(hdev, 683 HCI_CONNECTABLE); 684 else 685 conn_changed = hci_dev_test_and_clear_flag(hdev, 686 HCI_CONNECTABLE); 687 688 if ((scan & SCAN_INQUIRY)) { 689 discov_changed = !hci_dev_test_and_set_flag(hdev, 690 HCI_DISCOVERABLE); 691 } else { 692 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 693 discov_changed = hci_dev_test_and_clear_flag(hdev, 694 HCI_DISCOVERABLE); 695 } 696 697 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 698 return; 699 700 if (conn_changed || discov_changed) { 701 /* In case this was disabled through mgmt */ 702 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 703 704 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 705 hci_req_update_adv_data(hdev, hdev->cur_adv_instance); 706 707 mgmt_new_settings(hdev); 708 } 709 } 710 711 int hci_dev_cmd(unsigned int cmd, void __user *arg) 712 { 713 struct hci_dev *hdev; 714 struct hci_dev_req dr; 715 int err = 0; 716 717 if (copy_from_user(&dr, arg, sizeof(dr))) 718 return -EFAULT; 719 720 hdev = hci_dev_get(dr.dev_id); 721 if (!hdev) 722 return -ENODEV; 723 724 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 725 err = -EBUSY; 726 goto done; 727 } 728 729 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 730 err = -EOPNOTSUPP; 731 goto done; 732 } 733 734 if (hdev->dev_type != HCI_PRIMARY) { 735 err = -EOPNOTSUPP; 736 goto done; 737 } 738 739 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 740 err = -EOPNOTSUPP; 741 goto done; 742 } 743 744 switch (cmd) { 745 case HCISETAUTH: 746 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 747 HCI_INIT_TIMEOUT, NULL); 748 break; 749 750 case HCISETENCRYPT: 751 if (!lmp_encrypt_capable(hdev)) { 752 err = -EOPNOTSUPP; 753 break; 754 } 755 756 if (!test_bit(HCI_AUTH, &hdev->flags)) { 757 /* Auth must be enabled first */ 758 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 759 HCI_INIT_TIMEOUT, NULL); 760 if (err) 761 break; 762 } 763 764 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, 765 HCI_INIT_TIMEOUT, NULL); 766 break; 767 768 case HCISETSCAN: 769 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, 770 HCI_INIT_TIMEOUT, NULL); 771 772 /* Ensure that the connectable and discoverable states 773 * get correctly modified as this was a non-mgmt change. 774 */ 775 if (!err) 776 hci_update_passive_scan_state(hdev, dr.dev_opt); 777 break; 778 779 case HCISETLINKPOL: 780 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, 781 HCI_INIT_TIMEOUT, NULL); 782 break; 783 784 case HCISETLINKMODE: 785 hdev->link_mode = ((__u16) dr.dev_opt) & 786 (HCI_LM_MASTER | HCI_LM_ACCEPT); 787 break; 788 789 case HCISETPTYPE: 790 if (hdev->pkt_type == (__u16) dr.dev_opt) 791 break; 792 793 hdev->pkt_type = (__u16) dr.dev_opt; 794 mgmt_phy_configuration_changed(hdev, NULL); 795 break; 796 797 case HCISETACLMTU: 798 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 799 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 800 break; 801 802 case HCISETSCOMTU: 803 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 804 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 805 break; 806 807 default: 808 err = -EINVAL; 809 break; 810 } 811 812 done: 813 hci_dev_put(hdev); 814 return err; 815 } 816 817 int hci_get_dev_list(void __user *arg) 818 { 819 struct hci_dev *hdev; 820 struct hci_dev_list_req *dl; 821 struct hci_dev_req *dr; 822 int n = 0, size, err; 823 __u16 dev_num; 824 825 if (get_user(dev_num, (__u16 __user *) arg)) 826 return -EFAULT; 827 828 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 829 return -EINVAL; 830 831 size = sizeof(*dl) + dev_num * sizeof(*dr); 832 833 dl = kzalloc(size, GFP_KERNEL); 834 if (!dl) 835 return -ENOMEM; 836 837 dr = dl->dev_req; 838 839 read_lock(&hci_dev_list_lock); 840 list_for_each_entry(hdev, &hci_dev_list, list) { 841 unsigned long flags = hdev->flags; 842 843 /* When the auto-off is configured it means the transport 844 * is running, but in that case still indicate that the 845 * device is actually down. 846 */ 847 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 848 flags &= ~BIT(HCI_UP); 849 850 (dr + n)->dev_id = hdev->id; 851 (dr + n)->dev_opt = flags; 852 853 if (++n >= dev_num) 854 break; 855 } 856 read_unlock(&hci_dev_list_lock); 857 858 dl->dev_num = n; 859 size = sizeof(*dl) + n * sizeof(*dr); 860 861 err = copy_to_user(arg, dl, size); 862 kfree(dl); 863 864 return err ? -EFAULT : 0; 865 } 866 867 int hci_get_dev_info(void __user *arg) 868 { 869 struct hci_dev *hdev; 870 struct hci_dev_info di; 871 unsigned long flags; 872 int err = 0; 873 874 if (copy_from_user(&di, arg, sizeof(di))) 875 return -EFAULT; 876 877 hdev = hci_dev_get(di.dev_id); 878 if (!hdev) 879 return -ENODEV; 880 881 /* When the auto-off is configured it means the transport 882 * is running, but in that case still indicate that the 883 * device is actually down. 884 */ 885 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 886 flags = hdev->flags & ~BIT(HCI_UP); 887 else 888 flags = hdev->flags; 889 890 strcpy(di.name, hdev->name); 891 di.bdaddr = hdev->bdaddr; 892 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); 893 di.flags = flags; 894 di.pkt_type = hdev->pkt_type; 895 if (lmp_bredr_capable(hdev)) { 896 di.acl_mtu = hdev->acl_mtu; 897 di.acl_pkts = hdev->acl_pkts; 898 di.sco_mtu = hdev->sco_mtu; 899 di.sco_pkts = hdev->sco_pkts; 900 } else { 901 di.acl_mtu = hdev->le_mtu; 902 di.acl_pkts = hdev->le_pkts; 903 di.sco_mtu = 0; 904 di.sco_pkts = 0; 905 } 906 di.link_policy = hdev->link_policy; 907 di.link_mode = hdev->link_mode; 908 909 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 910 memcpy(&di.features, &hdev->features, sizeof(di.features)); 911 912 if (copy_to_user(arg, &di, sizeof(di))) 913 err = -EFAULT; 914 915 hci_dev_put(hdev); 916 917 return err; 918 } 919 920 /* ---- Interface to HCI drivers ---- */ 921 922 static int hci_rfkill_set_block(void *data, bool blocked) 923 { 924 struct hci_dev *hdev = data; 925 926 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 927 928 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 929 return -EBUSY; 930 931 if (blocked) { 932 hci_dev_set_flag(hdev, HCI_RFKILLED); 933 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 934 !hci_dev_test_flag(hdev, HCI_CONFIG)) 935 hci_dev_do_close(hdev); 936 } else { 937 hci_dev_clear_flag(hdev, HCI_RFKILLED); 938 } 939 940 return 0; 941 } 942 943 static const struct rfkill_ops hci_rfkill_ops = { 944 .set_block = hci_rfkill_set_block, 945 }; 946 947 static void hci_power_on(struct work_struct *work) 948 { 949 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 950 int err; 951 952 BT_DBG("%s", hdev->name); 953 954 if (test_bit(HCI_UP, &hdev->flags) && 955 hci_dev_test_flag(hdev, HCI_MGMT) && 956 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 957 cancel_delayed_work(&hdev->power_off); 958 err = hci_powered_update_sync(hdev); 959 mgmt_power_on(hdev, err); 960 return; 961 } 962 963 err = hci_dev_do_open(hdev); 964 if (err < 0) { 965 hci_dev_lock(hdev); 966 mgmt_set_powered_failed(hdev, err); 967 hci_dev_unlock(hdev); 968 return; 969 } 970 971 /* During the HCI setup phase, a few error conditions are 972 * ignored and they need to be checked now. If they are still 973 * valid, it is important to turn the device back off. 974 */ 975 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 976 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 977 (hdev->dev_type == HCI_PRIMARY && 978 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 979 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 980 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 981 hci_dev_do_close(hdev); 982 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 983 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 984 HCI_AUTO_OFF_TIMEOUT); 985 } 986 987 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 988 /* For unconfigured devices, set the HCI_RAW flag 989 * so that userspace can easily identify them. 990 */ 991 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 992 set_bit(HCI_RAW, &hdev->flags); 993 994 /* For fully configured devices, this will send 995 * the Index Added event. For unconfigured devices, 996 * it will send Unconfigued Index Added event. 997 * 998 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 999 * and no event will be send. 1000 */ 1001 mgmt_index_added(hdev); 1002 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 1003 /* When the controller is now configured, then it 1004 * is important to clear the HCI_RAW flag. 1005 */ 1006 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 1007 clear_bit(HCI_RAW, &hdev->flags); 1008 1009 /* Powering on the controller with HCI_CONFIG set only 1010 * happens with the transition from unconfigured to 1011 * configured. This will send the Index Added event. 1012 */ 1013 mgmt_index_added(hdev); 1014 } 1015 } 1016 1017 static void hci_power_off(struct work_struct *work) 1018 { 1019 struct hci_dev *hdev = container_of(work, struct hci_dev, 1020 power_off.work); 1021 1022 BT_DBG("%s", hdev->name); 1023 1024 hci_dev_do_close(hdev); 1025 } 1026 1027 static void hci_error_reset(struct work_struct *work) 1028 { 1029 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); 1030 1031 BT_DBG("%s", hdev->name); 1032 1033 if (hdev->hw_error) 1034 hdev->hw_error(hdev, hdev->hw_error_code); 1035 else 1036 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); 1037 1038 if (hci_dev_do_close(hdev)) 1039 return; 1040 1041 hci_dev_do_open(hdev); 1042 } 1043 1044 void hci_uuids_clear(struct hci_dev *hdev) 1045 { 1046 struct bt_uuid *uuid, *tmp; 1047 1048 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { 1049 list_del(&uuid->list); 1050 kfree(uuid); 1051 } 1052 } 1053 1054 void hci_link_keys_clear(struct hci_dev *hdev) 1055 { 1056 struct link_key *key; 1057 1058 list_for_each_entry(key, &hdev->link_keys, list) { 1059 list_del_rcu(&key->list); 1060 kfree_rcu(key, rcu); 1061 } 1062 } 1063 1064 void hci_smp_ltks_clear(struct hci_dev *hdev) 1065 { 1066 struct smp_ltk *k; 1067 1068 list_for_each_entry(k, &hdev->long_term_keys, list) { 1069 list_del_rcu(&k->list); 1070 kfree_rcu(k, rcu); 1071 } 1072 } 1073 1074 void hci_smp_irks_clear(struct hci_dev *hdev) 1075 { 1076 struct smp_irk *k; 1077 1078 list_for_each_entry(k, &hdev->identity_resolving_keys, list) { 1079 list_del_rcu(&k->list); 1080 kfree_rcu(k, rcu); 1081 } 1082 } 1083 1084 void hci_blocked_keys_clear(struct hci_dev *hdev) 1085 { 1086 struct blocked_key *b; 1087 1088 list_for_each_entry(b, &hdev->blocked_keys, list) { 1089 list_del_rcu(&b->list); 1090 kfree_rcu(b, rcu); 1091 } 1092 } 1093 1094 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) 1095 { 1096 bool blocked = false; 1097 struct blocked_key *b; 1098 1099 rcu_read_lock(); 1100 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { 1101 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { 1102 blocked = true; 1103 break; 1104 } 1105 } 1106 1107 rcu_read_unlock(); 1108 return blocked; 1109 } 1110 1111 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1112 { 1113 struct link_key *k; 1114 1115 rcu_read_lock(); 1116 list_for_each_entry_rcu(k, &hdev->link_keys, list) { 1117 if (bacmp(bdaddr, &k->bdaddr) == 0) { 1118 rcu_read_unlock(); 1119 1120 if (hci_is_blocked_key(hdev, 1121 HCI_BLOCKED_KEY_TYPE_LINKKEY, 1122 k->val)) { 1123 bt_dev_warn_ratelimited(hdev, 1124 "Link key blocked for %pMR", 1125 &k->bdaddr); 1126 return NULL; 1127 } 1128 1129 return k; 1130 } 1131 } 1132 rcu_read_unlock(); 1133 1134 return NULL; 1135 } 1136 1137 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1138 u8 key_type, u8 old_key_type) 1139 { 1140 /* Legacy key */ 1141 if (key_type < 0x03) 1142 return true; 1143 1144 /* Debug keys are insecure so don't store them persistently */ 1145 if (key_type == HCI_LK_DEBUG_COMBINATION) 1146 return false; 1147 1148 /* Changed combination key and there's no previous one */ 1149 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1150 return false; 1151 1152 /* Security mode 3 case */ 1153 if (!conn) 1154 return true; 1155 1156 /* BR/EDR key derived using SC from an LE link */ 1157 if (conn->type == LE_LINK) 1158 return true; 1159 1160 /* Neither local nor remote side had no-bonding as requirement */ 1161 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1162 return true; 1163 1164 /* Local side had dedicated bonding as requirement */ 1165 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1166 return true; 1167 1168 /* Remote side had dedicated bonding as requirement */ 1169 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1170 return true; 1171 1172 /* If none of the above criteria match, then don't store the key 1173 * persistently */ 1174 return false; 1175 } 1176 1177 static u8 ltk_role(u8 type) 1178 { 1179 if (type == SMP_LTK) 1180 return HCI_ROLE_MASTER; 1181 1182 return HCI_ROLE_SLAVE; 1183 } 1184 1185 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1186 u8 addr_type, u8 role) 1187 { 1188 struct smp_ltk *k; 1189 1190 rcu_read_lock(); 1191 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1192 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) 1193 continue; 1194 1195 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { 1196 rcu_read_unlock(); 1197 1198 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, 1199 k->val)) { 1200 bt_dev_warn_ratelimited(hdev, 1201 "LTK blocked for %pMR", 1202 &k->bdaddr); 1203 return NULL; 1204 } 1205 1206 return k; 1207 } 1208 } 1209 rcu_read_unlock(); 1210 1211 return NULL; 1212 } 1213 1214 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) 1215 { 1216 struct smp_irk *irk_to_return = NULL; 1217 struct smp_irk *irk; 1218 1219 rcu_read_lock(); 1220 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1221 if (!bacmp(&irk->rpa, rpa)) { 1222 irk_to_return = irk; 1223 goto done; 1224 } 1225 } 1226 1227 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1228 if (smp_irk_matches(hdev, irk->val, rpa)) { 1229 bacpy(&irk->rpa, rpa); 1230 irk_to_return = irk; 1231 goto done; 1232 } 1233 } 1234 1235 done: 1236 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 1237 irk_to_return->val)) { 1238 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 1239 &irk_to_return->bdaddr); 1240 irk_to_return = NULL; 1241 } 1242 1243 rcu_read_unlock(); 1244 1245 return irk_to_return; 1246 } 1247 1248 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1249 u8 addr_type) 1250 { 1251 struct smp_irk *irk_to_return = NULL; 1252 struct smp_irk *irk; 1253 1254 /* Identity Address must be public or static random */ 1255 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) 1256 return NULL; 1257 1258 rcu_read_lock(); 1259 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1260 if (addr_type == irk->addr_type && 1261 bacmp(bdaddr, &irk->bdaddr) == 0) { 1262 irk_to_return = irk; 1263 goto done; 1264 } 1265 } 1266 1267 done: 1268 1269 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 1270 irk_to_return->val)) { 1271 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 1272 &irk_to_return->bdaddr); 1273 irk_to_return = NULL; 1274 } 1275 1276 rcu_read_unlock(); 1277 1278 return irk_to_return; 1279 } 1280 1281 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, 1282 bdaddr_t *bdaddr, u8 *val, u8 type, 1283 u8 pin_len, bool *persistent) 1284 { 1285 struct link_key *key, *old_key; 1286 u8 old_key_type; 1287 1288 old_key = hci_find_link_key(hdev, bdaddr); 1289 if (old_key) { 1290 old_key_type = old_key->type; 1291 key = old_key; 1292 } else { 1293 old_key_type = conn ? conn->key_type : 0xff; 1294 key = kzalloc(sizeof(*key), GFP_KERNEL); 1295 if (!key) 1296 return NULL; 1297 list_add_rcu(&key->list, &hdev->link_keys); 1298 } 1299 1300 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); 1301 1302 /* Some buggy controller combinations generate a changed 1303 * combination key for legacy pairing even when there's no 1304 * previous key */ 1305 if (type == HCI_LK_CHANGED_COMBINATION && 1306 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { 1307 type = HCI_LK_COMBINATION; 1308 if (conn) 1309 conn->key_type = type; 1310 } 1311 1312 bacpy(&key->bdaddr, bdaddr); 1313 memcpy(key->val, val, HCI_LINK_KEY_SIZE); 1314 key->pin_len = pin_len; 1315 1316 if (type == HCI_LK_CHANGED_COMBINATION) 1317 key->type = old_key_type; 1318 else 1319 key->type = type; 1320 1321 if (persistent) 1322 *persistent = hci_persistent_key(hdev, conn, type, 1323 old_key_type); 1324 1325 return key; 1326 } 1327 1328 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1329 u8 addr_type, u8 type, u8 authenticated, 1330 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) 1331 { 1332 struct smp_ltk *key, *old_key; 1333 u8 role = ltk_role(type); 1334 1335 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); 1336 if (old_key) 1337 key = old_key; 1338 else { 1339 key = kzalloc(sizeof(*key), GFP_KERNEL); 1340 if (!key) 1341 return NULL; 1342 list_add_rcu(&key->list, &hdev->long_term_keys); 1343 } 1344 1345 bacpy(&key->bdaddr, bdaddr); 1346 key->bdaddr_type = addr_type; 1347 memcpy(key->val, tk, sizeof(key->val)); 1348 key->authenticated = authenticated; 1349 key->ediv = ediv; 1350 key->rand = rand; 1351 key->enc_size = enc_size; 1352 key->type = type; 1353 1354 return key; 1355 } 1356 1357 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1358 u8 addr_type, u8 val[16], bdaddr_t *rpa) 1359 { 1360 struct smp_irk *irk; 1361 1362 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); 1363 if (!irk) { 1364 irk = kzalloc(sizeof(*irk), GFP_KERNEL); 1365 if (!irk) 1366 return NULL; 1367 1368 bacpy(&irk->bdaddr, bdaddr); 1369 irk->addr_type = addr_type; 1370 1371 list_add_rcu(&irk->list, &hdev->identity_resolving_keys); 1372 } 1373 1374 memcpy(irk->val, val, 16); 1375 bacpy(&irk->rpa, rpa); 1376 1377 return irk; 1378 } 1379 1380 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1381 { 1382 struct link_key *key; 1383 1384 key = hci_find_link_key(hdev, bdaddr); 1385 if (!key) 1386 return -ENOENT; 1387 1388 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1389 1390 list_del_rcu(&key->list); 1391 kfree_rcu(key, rcu); 1392 1393 return 0; 1394 } 1395 1396 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) 1397 { 1398 struct smp_ltk *k; 1399 int removed = 0; 1400 1401 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1402 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) 1403 continue; 1404 1405 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1406 1407 list_del_rcu(&k->list); 1408 kfree_rcu(k, rcu); 1409 removed++; 1410 } 1411 1412 return removed ? 0 : -ENOENT; 1413 } 1414 1415 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) 1416 { 1417 struct smp_irk *k; 1418 1419 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { 1420 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) 1421 continue; 1422 1423 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1424 1425 list_del_rcu(&k->list); 1426 kfree_rcu(k, rcu); 1427 } 1428 } 1429 1430 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 1431 { 1432 struct smp_ltk *k; 1433 struct smp_irk *irk; 1434 u8 addr_type; 1435 1436 if (type == BDADDR_BREDR) { 1437 if (hci_find_link_key(hdev, bdaddr)) 1438 return true; 1439 return false; 1440 } 1441 1442 /* Convert to HCI addr type which struct smp_ltk uses */ 1443 if (type == BDADDR_LE_PUBLIC) 1444 addr_type = ADDR_LE_DEV_PUBLIC; 1445 else 1446 addr_type = ADDR_LE_DEV_RANDOM; 1447 1448 irk = hci_get_irk(hdev, bdaddr, addr_type); 1449 if (irk) { 1450 bdaddr = &irk->bdaddr; 1451 addr_type = irk->addr_type; 1452 } 1453 1454 rcu_read_lock(); 1455 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1456 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { 1457 rcu_read_unlock(); 1458 return true; 1459 } 1460 } 1461 rcu_read_unlock(); 1462 1463 return false; 1464 } 1465 1466 /* HCI command timer function */ 1467 static void hci_cmd_timeout(struct work_struct *work) 1468 { 1469 struct hci_dev *hdev = container_of(work, struct hci_dev, 1470 cmd_timer.work); 1471 1472 if (hdev->sent_cmd) { 1473 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; 1474 u16 opcode = __le16_to_cpu(sent->opcode); 1475 1476 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); 1477 } else { 1478 bt_dev_err(hdev, "command tx timeout"); 1479 } 1480 1481 if (hdev->cmd_timeout) 1482 hdev->cmd_timeout(hdev); 1483 1484 atomic_set(&hdev->cmd_cnt, 1); 1485 queue_work(hdev->workqueue, &hdev->cmd_work); 1486 } 1487 1488 /* HCI ncmd timer function */ 1489 static void hci_ncmd_timeout(struct work_struct *work) 1490 { 1491 struct hci_dev *hdev = container_of(work, struct hci_dev, 1492 ncmd_timer.work); 1493 1494 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); 1495 1496 /* During HCI_INIT phase no events can be injected if the ncmd timer 1497 * triggers since the procedure has its own timeout handling. 1498 */ 1499 if (test_bit(HCI_INIT, &hdev->flags)) 1500 return; 1501 1502 /* This is an irrecoverable state, inject hardware error event */ 1503 hci_reset_dev(hdev); 1504 } 1505 1506 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1507 bdaddr_t *bdaddr, u8 bdaddr_type) 1508 { 1509 struct oob_data *data; 1510 1511 list_for_each_entry(data, &hdev->remote_oob_data, list) { 1512 if (bacmp(bdaddr, &data->bdaddr) != 0) 1513 continue; 1514 if (data->bdaddr_type != bdaddr_type) 1515 continue; 1516 return data; 1517 } 1518 1519 return NULL; 1520 } 1521 1522 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1523 u8 bdaddr_type) 1524 { 1525 struct oob_data *data; 1526 1527 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 1528 if (!data) 1529 return -ENOENT; 1530 1531 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); 1532 1533 list_del(&data->list); 1534 kfree(data); 1535 1536 return 0; 1537 } 1538 1539 void hci_remote_oob_data_clear(struct hci_dev *hdev) 1540 { 1541 struct oob_data *data, *n; 1542 1543 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { 1544 list_del(&data->list); 1545 kfree(data); 1546 } 1547 } 1548 1549 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1550 u8 bdaddr_type, u8 *hash192, u8 *rand192, 1551 u8 *hash256, u8 *rand256) 1552 { 1553 struct oob_data *data; 1554 1555 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 1556 if (!data) { 1557 data = kmalloc(sizeof(*data), GFP_KERNEL); 1558 if (!data) 1559 return -ENOMEM; 1560 1561 bacpy(&data->bdaddr, bdaddr); 1562 data->bdaddr_type = bdaddr_type; 1563 list_add(&data->list, &hdev->remote_oob_data); 1564 } 1565 1566 if (hash192 && rand192) { 1567 memcpy(data->hash192, hash192, sizeof(data->hash192)); 1568 memcpy(data->rand192, rand192, sizeof(data->rand192)); 1569 if (hash256 && rand256) 1570 data->present = 0x03; 1571 } else { 1572 memset(data->hash192, 0, sizeof(data->hash192)); 1573 memset(data->rand192, 0, sizeof(data->rand192)); 1574 if (hash256 && rand256) 1575 data->present = 0x02; 1576 else 1577 data->present = 0x00; 1578 } 1579 1580 if (hash256 && rand256) { 1581 memcpy(data->hash256, hash256, sizeof(data->hash256)); 1582 memcpy(data->rand256, rand256, sizeof(data->rand256)); 1583 } else { 1584 memset(data->hash256, 0, sizeof(data->hash256)); 1585 memset(data->rand256, 0, sizeof(data->rand256)); 1586 if (hash192 && rand192) 1587 data->present = 0x01; 1588 } 1589 1590 BT_DBG("%s for %pMR", hdev->name, bdaddr); 1591 1592 return 0; 1593 } 1594 1595 /* This function requires the caller holds hdev->lock */ 1596 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) 1597 { 1598 struct adv_info *adv_instance; 1599 1600 list_for_each_entry(adv_instance, &hdev->adv_instances, list) { 1601 if (adv_instance->instance == instance) 1602 return adv_instance; 1603 } 1604 1605 return NULL; 1606 } 1607 1608 /* This function requires the caller holds hdev->lock */ 1609 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) 1610 { 1611 struct adv_info *cur_instance; 1612 1613 cur_instance = hci_find_adv_instance(hdev, instance); 1614 if (!cur_instance) 1615 return NULL; 1616 1617 if (cur_instance == list_last_entry(&hdev->adv_instances, 1618 struct adv_info, list)) 1619 return list_first_entry(&hdev->adv_instances, 1620 struct adv_info, list); 1621 else 1622 return list_next_entry(cur_instance, list); 1623 } 1624 1625 /* This function requires the caller holds hdev->lock */ 1626 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) 1627 { 1628 struct adv_info *adv_instance; 1629 1630 adv_instance = hci_find_adv_instance(hdev, instance); 1631 if (!adv_instance) 1632 return -ENOENT; 1633 1634 BT_DBG("%s removing %dMR", hdev->name, instance); 1635 1636 if (hdev->cur_adv_instance == instance) { 1637 if (hdev->adv_instance_timeout) { 1638 cancel_delayed_work(&hdev->adv_instance_expire); 1639 hdev->adv_instance_timeout = 0; 1640 } 1641 hdev->cur_adv_instance = 0x00; 1642 } 1643 1644 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1645 1646 list_del(&adv_instance->list); 1647 kfree(adv_instance); 1648 1649 hdev->adv_instance_cnt--; 1650 1651 return 0; 1652 } 1653 1654 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) 1655 { 1656 struct adv_info *adv_instance, *n; 1657 1658 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) 1659 adv_instance->rpa_expired = rpa_expired; 1660 } 1661 1662 /* This function requires the caller holds hdev->lock */ 1663 void hci_adv_instances_clear(struct hci_dev *hdev) 1664 { 1665 struct adv_info *adv_instance, *n; 1666 1667 if (hdev->adv_instance_timeout) { 1668 cancel_delayed_work(&hdev->adv_instance_expire); 1669 hdev->adv_instance_timeout = 0; 1670 } 1671 1672 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { 1673 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1674 list_del(&adv_instance->list); 1675 kfree(adv_instance); 1676 } 1677 1678 hdev->adv_instance_cnt = 0; 1679 hdev->cur_adv_instance = 0x00; 1680 } 1681 1682 static void adv_instance_rpa_expired(struct work_struct *work) 1683 { 1684 struct adv_info *adv_instance = container_of(work, struct adv_info, 1685 rpa_expired_cb.work); 1686 1687 BT_DBG(""); 1688 1689 adv_instance->rpa_expired = true; 1690 } 1691 1692 /* This function requires the caller holds hdev->lock */ 1693 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, 1694 u16 adv_data_len, u8 *adv_data, 1695 u16 scan_rsp_len, u8 *scan_rsp_data, 1696 u16 timeout, u16 duration, s8 tx_power, 1697 u32 min_interval, u32 max_interval) 1698 { 1699 struct adv_info *adv_instance; 1700 1701 adv_instance = hci_find_adv_instance(hdev, instance); 1702 if (adv_instance) { 1703 memset(adv_instance->adv_data, 0, 1704 sizeof(adv_instance->adv_data)); 1705 memset(adv_instance->scan_rsp_data, 0, 1706 sizeof(adv_instance->scan_rsp_data)); 1707 } else { 1708 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || 1709 instance < 1 || instance > hdev->le_num_of_adv_sets) 1710 return -EOVERFLOW; 1711 1712 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL); 1713 if (!adv_instance) 1714 return -ENOMEM; 1715 1716 adv_instance->pending = true; 1717 adv_instance->instance = instance; 1718 list_add(&adv_instance->list, &hdev->adv_instances); 1719 hdev->adv_instance_cnt++; 1720 } 1721 1722 adv_instance->flags = flags; 1723 adv_instance->adv_data_len = adv_data_len; 1724 adv_instance->scan_rsp_len = scan_rsp_len; 1725 adv_instance->min_interval = min_interval; 1726 adv_instance->max_interval = max_interval; 1727 adv_instance->tx_power = tx_power; 1728 1729 if (adv_data_len) 1730 memcpy(adv_instance->adv_data, adv_data, adv_data_len); 1731 1732 if (scan_rsp_len) 1733 memcpy(adv_instance->scan_rsp_data, 1734 scan_rsp_data, scan_rsp_len); 1735 1736 adv_instance->timeout = timeout; 1737 adv_instance->remaining_time = timeout; 1738 1739 if (duration == 0) 1740 adv_instance->duration = hdev->def_multi_adv_rotation_duration; 1741 else 1742 adv_instance->duration = duration; 1743 1744 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb, 1745 adv_instance_rpa_expired); 1746 1747 BT_DBG("%s for %dMR", hdev->name, instance); 1748 1749 return 0; 1750 } 1751 1752 /* This function requires the caller holds hdev->lock */ 1753 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, 1754 u16 adv_data_len, u8 *adv_data, 1755 u16 scan_rsp_len, u8 *scan_rsp_data) 1756 { 1757 struct adv_info *adv_instance; 1758 1759 adv_instance = hci_find_adv_instance(hdev, instance); 1760 1761 /* If advertisement doesn't exist, we can't modify its data */ 1762 if (!adv_instance) 1763 return -ENOENT; 1764 1765 if (adv_data_len) { 1766 memset(adv_instance->adv_data, 0, 1767 sizeof(adv_instance->adv_data)); 1768 memcpy(adv_instance->adv_data, adv_data, adv_data_len); 1769 adv_instance->adv_data_len = adv_data_len; 1770 } 1771 1772 if (scan_rsp_len) { 1773 memset(adv_instance->scan_rsp_data, 0, 1774 sizeof(adv_instance->scan_rsp_data)); 1775 memcpy(adv_instance->scan_rsp_data, 1776 scan_rsp_data, scan_rsp_len); 1777 adv_instance->scan_rsp_len = scan_rsp_len; 1778 } 1779 1780 return 0; 1781 } 1782 1783 /* This function requires the caller holds hdev->lock */ 1784 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) 1785 { 1786 u32 flags; 1787 struct adv_info *adv; 1788 1789 if (instance == 0x00) { 1790 /* Instance 0 always manages the "Tx Power" and "Flags" 1791 * fields 1792 */ 1793 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; 1794 1795 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting 1796 * corresponds to the "connectable" instance flag. 1797 */ 1798 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) 1799 flags |= MGMT_ADV_FLAG_CONNECTABLE; 1800 1801 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 1802 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; 1803 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 1804 flags |= MGMT_ADV_FLAG_DISCOV; 1805 1806 return flags; 1807 } 1808 1809 adv = hci_find_adv_instance(hdev, instance); 1810 1811 /* Return 0 when we got an invalid instance identifier. */ 1812 if (!adv) 1813 return 0; 1814 1815 return adv->flags; 1816 } 1817 1818 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) 1819 { 1820 struct adv_info *adv; 1821 1822 /* Instance 0x00 always set local name */ 1823 if (instance == 0x00) 1824 return true; 1825 1826 adv = hci_find_adv_instance(hdev, instance); 1827 if (!adv) 1828 return false; 1829 1830 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || 1831 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 1832 return true; 1833 1834 return adv->scan_rsp_len ? true : false; 1835 } 1836 1837 /* This function requires the caller holds hdev->lock */ 1838 void hci_adv_monitors_clear(struct hci_dev *hdev) 1839 { 1840 struct adv_monitor *monitor; 1841 int handle; 1842 1843 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) 1844 hci_free_adv_monitor(hdev, monitor); 1845 1846 idr_destroy(&hdev->adv_monitors_idr); 1847 } 1848 1849 /* Frees the monitor structure and do some bookkeepings. 1850 * This function requires the caller holds hdev->lock. 1851 */ 1852 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 1853 { 1854 struct adv_pattern *pattern; 1855 struct adv_pattern *tmp; 1856 1857 if (!monitor) 1858 return; 1859 1860 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { 1861 list_del(&pattern->list); 1862 kfree(pattern); 1863 } 1864 1865 if (monitor->handle) 1866 idr_remove(&hdev->adv_monitors_idr, monitor->handle); 1867 1868 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { 1869 hdev->adv_monitors_cnt--; 1870 mgmt_adv_monitor_removed(hdev, monitor->handle); 1871 } 1872 1873 kfree(monitor); 1874 } 1875 1876 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) 1877 { 1878 return mgmt_add_adv_patterns_monitor_complete(hdev, status); 1879 } 1880 1881 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) 1882 { 1883 return mgmt_remove_adv_monitor_complete(hdev, status); 1884 } 1885 1886 /* Assigns handle to a monitor, and if offloading is supported and power is on, 1887 * also attempts to forward the request to the controller. 1888 * Returns true if request is forwarded (result is pending), false otherwise. 1889 * This function requires the caller holds hdev->lock. 1890 */ 1891 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, 1892 int *err) 1893 { 1894 int min, max, handle; 1895 1896 *err = 0; 1897 1898 if (!monitor) { 1899 *err = -EINVAL; 1900 return false; 1901 } 1902 1903 min = HCI_MIN_ADV_MONITOR_HANDLE; 1904 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; 1905 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, 1906 GFP_KERNEL); 1907 if (handle < 0) { 1908 *err = handle; 1909 return false; 1910 } 1911 1912 monitor->handle = handle; 1913 1914 if (!hdev_is_powered(hdev)) 1915 return false; 1916 1917 switch (hci_get_adv_monitor_offload_ext(hdev)) { 1918 case HCI_ADV_MONITOR_EXT_NONE: 1919 hci_update_passive_scan(hdev); 1920 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err); 1921 /* Message was not forwarded to controller - not an error */ 1922 return false; 1923 case HCI_ADV_MONITOR_EXT_MSFT: 1924 *err = msft_add_monitor_pattern(hdev, monitor); 1925 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name, 1926 *err); 1927 break; 1928 } 1929 1930 return (*err == 0); 1931 } 1932 1933 /* Attempts to tell the controller and free the monitor. If somehow the 1934 * controller doesn't have a corresponding handle, remove anyway. 1935 * Returns true if request is forwarded (result is pending), false otherwise. 1936 * This function requires the caller holds hdev->lock. 1937 */ 1938 static bool hci_remove_adv_monitor(struct hci_dev *hdev, 1939 struct adv_monitor *monitor, 1940 u16 handle, int *err) 1941 { 1942 *err = 0; 1943 1944 switch (hci_get_adv_monitor_offload_ext(hdev)) { 1945 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ 1946 goto free_monitor; 1947 case HCI_ADV_MONITOR_EXT_MSFT: 1948 *err = msft_remove_monitor(hdev, monitor, handle); 1949 break; 1950 } 1951 1952 /* In case no matching handle registered, just free the monitor */ 1953 if (*err == -ENOENT) 1954 goto free_monitor; 1955 1956 return (*err == 0); 1957 1958 free_monitor: 1959 if (*err == -ENOENT) 1960 bt_dev_warn(hdev, "Removing monitor with no matching handle %d", 1961 monitor->handle); 1962 hci_free_adv_monitor(hdev, monitor); 1963 1964 *err = 0; 1965 return false; 1966 } 1967 1968 /* Returns true if request is forwarded (result is pending), false otherwise. 1969 * This function requires the caller holds hdev->lock. 1970 */ 1971 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err) 1972 { 1973 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); 1974 bool pending; 1975 1976 if (!monitor) { 1977 *err = -EINVAL; 1978 return false; 1979 } 1980 1981 pending = hci_remove_adv_monitor(hdev, monitor, handle, err); 1982 if (!*err && !pending) 1983 hci_update_passive_scan(hdev); 1984 1985 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending", 1986 hdev->name, handle, *err, pending ? "" : "not "); 1987 1988 return pending; 1989 } 1990 1991 /* Returns true if request is forwarded (result is pending), false otherwise. 1992 * This function requires the caller holds hdev->lock. 1993 */ 1994 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err) 1995 { 1996 struct adv_monitor *monitor; 1997 int idr_next_id = 0; 1998 bool pending = false; 1999 bool update = false; 2000 2001 *err = 0; 2002 2003 while (!*err && !pending) { 2004 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); 2005 if (!monitor) 2006 break; 2007 2008 pending = hci_remove_adv_monitor(hdev, monitor, 0, err); 2009 2010 if (!*err && !pending) 2011 update = true; 2012 } 2013 2014 if (update) 2015 hci_update_passive_scan(hdev); 2016 2017 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending", 2018 hdev->name, *err, pending ? "" : "not "); 2019 2020 return pending; 2021 } 2022 2023 /* This function requires the caller holds hdev->lock */ 2024 bool hci_is_adv_monitoring(struct hci_dev *hdev) 2025 { 2026 return !idr_is_empty(&hdev->adv_monitors_idr); 2027 } 2028 2029 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) 2030 { 2031 if (msft_monitor_supported(hdev)) 2032 return HCI_ADV_MONITOR_EXT_MSFT; 2033 2034 return HCI_ADV_MONITOR_EXT_NONE; 2035 } 2036 2037 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, 2038 bdaddr_t *bdaddr, u8 type) 2039 { 2040 struct bdaddr_list *b; 2041 2042 list_for_each_entry(b, bdaddr_list, list) { 2043 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2044 return b; 2045 } 2046 2047 return NULL; 2048 } 2049 2050 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( 2051 struct list_head *bdaddr_list, bdaddr_t *bdaddr, 2052 u8 type) 2053 { 2054 struct bdaddr_list_with_irk *b; 2055 2056 list_for_each_entry(b, bdaddr_list, list) { 2057 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2058 return b; 2059 } 2060 2061 return NULL; 2062 } 2063 2064 struct bdaddr_list_with_flags * 2065 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, 2066 bdaddr_t *bdaddr, u8 type) 2067 { 2068 struct bdaddr_list_with_flags *b; 2069 2070 list_for_each_entry(b, bdaddr_list, list) { 2071 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2072 return b; 2073 } 2074 2075 return NULL; 2076 } 2077 2078 void hci_bdaddr_list_clear(struct list_head *bdaddr_list) 2079 { 2080 struct bdaddr_list *b, *n; 2081 2082 list_for_each_entry_safe(b, n, bdaddr_list, list) { 2083 list_del(&b->list); 2084 kfree(b); 2085 } 2086 } 2087 2088 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2089 { 2090 struct bdaddr_list *entry; 2091 2092 if (!bacmp(bdaddr, BDADDR_ANY)) 2093 return -EBADF; 2094 2095 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2096 return -EEXIST; 2097 2098 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2099 if (!entry) 2100 return -ENOMEM; 2101 2102 bacpy(&entry->bdaddr, bdaddr); 2103 entry->bdaddr_type = type; 2104 2105 list_add(&entry->list, list); 2106 2107 return 0; 2108 } 2109 2110 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, 2111 u8 type, u8 *peer_irk, u8 *local_irk) 2112 { 2113 struct bdaddr_list_with_irk *entry; 2114 2115 if (!bacmp(bdaddr, BDADDR_ANY)) 2116 return -EBADF; 2117 2118 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2119 return -EEXIST; 2120 2121 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2122 if (!entry) 2123 return -ENOMEM; 2124 2125 bacpy(&entry->bdaddr, bdaddr); 2126 entry->bdaddr_type = type; 2127 2128 if (peer_irk) 2129 memcpy(entry->peer_irk, peer_irk, 16); 2130 2131 if (local_irk) 2132 memcpy(entry->local_irk, local_irk, 16); 2133 2134 list_add(&entry->list, list); 2135 2136 return 0; 2137 } 2138 2139 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, 2140 u8 type, u32 flags) 2141 { 2142 struct bdaddr_list_with_flags *entry; 2143 2144 if (!bacmp(bdaddr, BDADDR_ANY)) 2145 return -EBADF; 2146 2147 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2148 return -EEXIST; 2149 2150 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2151 if (!entry) 2152 return -ENOMEM; 2153 2154 bacpy(&entry->bdaddr, bdaddr); 2155 entry->bdaddr_type = type; 2156 entry->current_flags = flags; 2157 2158 list_add(&entry->list, list); 2159 2160 return 0; 2161 } 2162 2163 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2164 { 2165 struct bdaddr_list *entry; 2166 2167 if (!bacmp(bdaddr, BDADDR_ANY)) { 2168 hci_bdaddr_list_clear(list); 2169 return 0; 2170 } 2171 2172 entry = hci_bdaddr_list_lookup(list, bdaddr, type); 2173 if (!entry) 2174 return -ENOENT; 2175 2176 list_del(&entry->list); 2177 kfree(entry); 2178 2179 return 0; 2180 } 2181 2182 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, 2183 u8 type) 2184 { 2185 struct bdaddr_list_with_irk *entry; 2186 2187 if (!bacmp(bdaddr, BDADDR_ANY)) { 2188 hci_bdaddr_list_clear(list); 2189 return 0; 2190 } 2191 2192 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); 2193 if (!entry) 2194 return -ENOENT; 2195 2196 list_del(&entry->list); 2197 kfree(entry); 2198 2199 return 0; 2200 } 2201 2202 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, 2203 u8 type) 2204 { 2205 struct bdaddr_list_with_flags *entry; 2206 2207 if (!bacmp(bdaddr, BDADDR_ANY)) { 2208 hci_bdaddr_list_clear(list); 2209 return 0; 2210 } 2211 2212 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); 2213 if (!entry) 2214 return -ENOENT; 2215 2216 list_del(&entry->list); 2217 kfree(entry); 2218 2219 return 0; 2220 } 2221 2222 /* This function requires the caller holds hdev->lock */ 2223 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, 2224 bdaddr_t *addr, u8 addr_type) 2225 { 2226 struct hci_conn_params *params; 2227 2228 list_for_each_entry(params, &hdev->le_conn_params, list) { 2229 if (bacmp(¶ms->addr, addr) == 0 && 2230 params->addr_type == addr_type) { 2231 return params; 2232 } 2233 } 2234 2235 return NULL; 2236 } 2237 2238 /* This function requires the caller holds hdev->lock */ 2239 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 2240 bdaddr_t *addr, u8 addr_type) 2241 { 2242 struct hci_conn_params *param; 2243 2244 list_for_each_entry(param, list, action) { 2245 if (bacmp(¶m->addr, addr) == 0 && 2246 param->addr_type == addr_type) 2247 return param; 2248 } 2249 2250 return NULL; 2251 } 2252 2253 /* This function requires the caller holds hdev->lock */ 2254 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, 2255 bdaddr_t *addr, u8 addr_type) 2256 { 2257 struct hci_conn_params *params; 2258 2259 params = hci_conn_params_lookup(hdev, addr, addr_type); 2260 if (params) 2261 return params; 2262 2263 params = kzalloc(sizeof(*params), GFP_KERNEL); 2264 if (!params) { 2265 bt_dev_err(hdev, "out of memory"); 2266 return NULL; 2267 } 2268 2269 bacpy(¶ms->addr, addr); 2270 params->addr_type = addr_type; 2271 2272 list_add(¶ms->list, &hdev->le_conn_params); 2273 INIT_LIST_HEAD(¶ms->action); 2274 2275 params->conn_min_interval = hdev->le_conn_min_interval; 2276 params->conn_max_interval = hdev->le_conn_max_interval; 2277 params->conn_latency = hdev->le_conn_latency; 2278 params->supervision_timeout = hdev->le_supv_timeout; 2279 params->auto_connect = HCI_AUTO_CONN_DISABLED; 2280 2281 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2282 2283 return params; 2284 } 2285 2286 static void hci_conn_params_free(struct hci_conn_params *params) 2287 { 2288 if (params->conn) { 2289 hci_conn_drop(params->conn); 2290 hci_conn_put(params->conn); 2291 } 2292 2293 list_del(¶ms->action); 2294 list_del(¶ms->list); 2295 kfree(params); 2296 } 2297 2298 /* This function requires the caller holds hdev->lock */ 2299 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) 2300 { 2301 struct hci_conn_params *params; 2302 2303 params = hci_conn_params_lookup(hdev, addr, addr_type); 2304 if (!params) 2305 return; 2306 2307 hci_conn_params_free(params); 2308 2309 hci_update_passive_scan(hdev); 2310 2311 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2312 } 2313 2314 /* This function requires the caller holds hdev->lock */ 2315 void hci_conn_params_clear_disabled(struct hci_dev *hdev) 2316 { 2317 struct hci_conn_params *params, *tmp; 2318 2319 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { 2320 if (params->auto_connect != HCI_AUTO_CONN_DISABLED) 2321 continue; 2322 2323 /* If trying to establish one time connection to disabled 2324 * device, leave the params, but mark them as just once. 2325 */ 2326 if (params->explicit_connect) { 2327 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; 2328 continue; 2329 } 2330 2331 list_del(¶ms->list); 2332 kfree(params); 2333 } 2334 2335 BT_DBG("All LE disabled connection parameters were removed"); 2336 } 2337 2338 /* This function requires the caller holds hdev->lock */ 2339 static void hci_conn_params_clear_all(struct hci_dev *hdev) 2340 { 2341 struct hci_conn_params *params, *tmp; 2342 2343 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) 2344 hci_conn_params_free(params); 2345 2346 BT_DBG("All LE connection parameters were removed"); 2347 } 2348 2349 /* Copy the Identity Address of the controller. 2350 * 2351 * If the controller has a public BD_ADDR, then by default use that one. 2352 * If this is a LE only controller without a public address, default to 2353 * the static random address. 2354 * 2355 * For debugging purposes it is possible to force controllers with a 2356 * public address to use the static random address instead. 2357 * 2358 * In case BR/EDR has been disabled on a dual-mode controller and 2359 * userspace has configured a static address, then that address 2360 * becomes the identity address instead of the public BR/EDR address. 2361 */ 2362 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 2363 u8 *bdaddr_type) 2364 { 2365 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 2366 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 2367 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 2368 bacmp(&hdev->static_addr, BDADDR_ANY))) { 2369 bacpy(bdaddr, &hdev->static_addr); 2370 *bdaddr_type = ADDR_LE_DEV_RANDOM; 2371 } else { 2372 bacpy(bdaddr, &hdev->bdaddr); 2373 *bdaddr_type = ADDR_LE_DEV_PUBLIC; 2374 } 2375 } 2376 2377 static void hci_clear_wake_reason(struct hci_dev *hdev) 2378 { 2379 hci_dev_lock(hdev); 2380 2381 hdev->wake_reason = 0; 2382 bacpy(&hdev->wake_addr, BDADDR_ANY); 2383 hdev->wake_addr_type = 0; 2384 2385 hci_dev_unlock(hdev); 2386 } 2387 2388 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, 2389 void *data) 2390 { 2391 struct hci_dev *hdev = 2392 container_of(nb, struct hci_dev, suspend_notifier); 2393 int ret = 0; 2394 2395 if (action == PM_SUSPEND_PREPARE) 2396 ret = hci_suspend_dev(hdev); 2397 else if (action == PM_POST_SUSPEND) 2398 ret = hci_resume_dev(hdev); 2399 2400 if (ret) 2401 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", 2402 action, ret); 2403 2404 return NOTIFY_DONE; 2405 } 2406 2407 /* Alloc HCI device */ 2408 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) 2409 { 2410 struct hci_dev *hdev; 2411 unsigned int alloc_size; 2412 2413 alloc_size = sizeof(*hdev); 2414 if (sizeof_priv) { 2415 /* Fixme: May need ALIGN-ment? */ 2416 alloc_size += sizeof_priv; 2417 } 2418 2419 hdev = kzalloc(alloc_size, GFP_KERNEL); 2420 if (!hdev) 2421 return NULL; 2422 2423 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 2424 hdev->esco_type = (ESCO_HV1); 2425 hdev->link_mode = (HCI_LM_ACCEPT); 2426 hdev->num_iac = 0x01; /* One IAC support is mandatory */ 2427 hdev->io_capability = 0x03; /* No Input No Output */ 2428 hdev->manufacturer = 0xffff; /* Default to internal use */ 2429 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 2430 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 2431 hdev->adv_instance_cnt = 0; 2432 hdev->cur_adv_instance = 0x00; 2433 hdev->adv_instance_timeout = 0; 2434 2435 hdev->advmon_allowlist_duration = 300; 2436 hdev->advmon_no_filter_duration = 500; 2437 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ 2438 2439 hdev->sniff_max_interval = 800; 2440 hdev->sniff_min_interval = 80; 2441 2442 hdev->le_adv_channel_map = 0x07; 2443 hdev->le_adv_min_interval = 0x0800; 2444 hdev->le_adv_max_interval = 0x0800; 2445 hdev->le_scan_interval = 0x0060; 2446 hdev->le_scan_window = 0x0030; 2447 hdev->le_scan_int_suspend = 0x0400; 2448 hdev->le_scan_window_suspend = 0x0012; 2449 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; 2450 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; 2451 hdev->le_scan_int_adv_monitor = 0x0060; 2452 hdev->le_scan_window_adv_monitor = 0x0030; 2453 hdev->le_scan_int_connect = 0x0060; 2454 hdev->le_scan_window_connect = 0x0060; 2455 hdev->le_conn_min_interval = 0x0018; 2456 hdev->le_conn_max_interval = 0x0028; 2457 hdev->le_conn_latency = 0x0000; 2458 hdev->le_supv_timeout = 0x002a; 2459 hdev->le_def_tx_len = 0x001b; 2460 hdev->le_def_tx_time = 0x0148; 2461 hdev->le_max_tx_len = 0x001b; 2462 hdev->le_max_tx_time = 0x0148; 2463 hdev->le_max_rx_len = 0x001b; 2464 hdev->le_max_rx_time = 0x0148; 2465 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; 2466 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; 2467 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; 2468 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; 2469 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; 2470 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; 2471 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; 2472 hdev->min_le_tx_power = HCI_TX_POWER_INVALID; 2473 hdev->max_le_tx_power = HCI_TX_POWER_INVALID; 2474 2475 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 2476 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 2477 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 2478 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 2479 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 2480 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; 2481 2482 /* default 1.28 sec page scan */ 2483 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; 2484 hdev->def_page_scan_int = 0x0800; 2485 hdev->def_page_scan_window = 0x0012; 2486 2487 mutex_init(&hdev->lock); 2488 mutex_init(&hdev->req_lock); 2489 2490 INIT_LIST_HEAD(&hdev->mgmt_pending); 2491 INIT_LIST_HEAD(&hdev->reject_list); 2492 INIT_LIST_HEAD(&hdev->accept_list); 2493 INIT_LIST_HEAD(&hdev->uuids); 2494 INIT_LIST_HEAD(&hdev->link_keys); 2495 INIT_LIST_HEAD(&hdev->long_term_keys); 2496 INIT_LIST_HEAD(&hdev->identity_resolving_keys); 2497 INIT_LIST_HEAD(&hdev->remote_oob_data); 2498 INIT_LIST_HEAD(&hdev->le_accept_list); 2499 INIT_LIST_HEAD(&hdev->le_resolv_list); 2500 INIT_LIST_HEAD(&hdev->le_conn_params); 2501 INIT_LIST_HEAD(&hdev->pend_le_conns); 2502 INIT_LIST_HEAD(&hdev->pend_le_reports); 2503 INIT_LIST_HEAD(&hdev->conn_hash.list); 2504 INIT_LIST_HEAD(&hdev->adv_instances); 2505 INIT_LIST_HEAD(&hdev->blocked_keys); 2506 2507 INIT_LIST_HEAD(&hdev->local_codecs); 2508 INIT_WORK(&hdev->rx_work, hci_rx_work); 2509 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 2510 INIT_WORK(&hdev->tx_work, hci_tx_work); 2511 INIT_WORK(&hdev->power_on, hci_power_on); 2512 INIT_WORK(&hdev->error_reset, hci_error_reset); 2513 2514 hci_cmd_sync_init(hdev); 2515 2516 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 2517 2518 skb_queue_head_init(&hdev->rx_q); 2519 skb_queue_head_init(&hdev->cmd_q); 2520 skb_queue_head_init(&hdev->raw_q); 2521 2522 init_waitqueue_head(&hdev->req_wait_q); 2523 2524 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); 2525 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); 2526 2527 hci_request_setup(hdev); 2528 2529 hci_init_sysfs(hdev); 2530 discovery_init(hdev); 2531 2532 return hdev; 2533 } 2534 EXPORT_SYMBOL(hci_alloc_dev_priv); 2535 2536 /* Free HCI device */ 2537 void hci_free_dev(struct hci_dev *hdev) 2538 { 2539 /* will free via device release */ 2540 put_device(&hdev->dev); 2541 } 2542 EXPORT_SYMBOL(hci_free_dev); 2543 2544 /* Register HCI device */ 2545 int hci_register_dev(struct hci_dev *hdev) 2546 { 2547 int id, error; 2548 2549 if (!hdev->open || !hdev->close || !hdev->send) 2550 return -EINVAL; 2551 2552 /* Do not allow HCI_AMP devices to register at index 0, 2553 * so the index can be used as the AMP controller ID. 2554 */ 2555 switch (hdev->dev_type) { 2556 case HCI_PRIMARY: 2557 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); 2558 break; 2559 case HCI_AMP: 2560 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); 2561 break; 2562 default: 2563 return -EINVAL; 2564 } 2565 2566 if (id < 0) 2567 return id; 2568 2569 sprintf(hdev->name, "hci%d", id); 2570 hdev->id = id; 2571 2572 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2573 2574 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); 2575 if (!hdev->workqueue) { 2576 error = -ENOMEM; 2577 goto err; 2578 } 2579 2580 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, 2581 hdev->name); 2582 if (!hdev->req_workqueue) { 2583 destroy_workqueue(hdev->workqueue); 2584 error = -ENOMEM; 2585 goto err; 2586 } 2587 2588 if (!IS_ERR_OR_NULL(bt_debugfs)) 2589 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); 2590 2591 dev_set_name(&hdev->dev, "%s", hdev->name); 2592 2593 error = device_add(&hdev->dev); 2594 if (error < 0) 2595 goto err_wqueue; 2596 2597 hci_leds_init(hdev); 2598 2599 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 2600 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, 2601 hdev); 2602 if (hdev->rfkill) { 2603 if (rfkill_register(hdev->rfkill) < 0) { 2604 rfkill_destroy(hdev->rfkill); 2605 hdev->rfkill = NULL; 2606 } 2607 } 2608 2609 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) 2610 hci_dev_set_flag(hdev, HCI_RFKILLED); 2611 2612 hci_dev_set_flag(hdev, HCI_SETUP); 2613 hci_dev_set_flag(hdev, HCI_AUTO_OFF); 2614 2615 if (hdev->dev_type == HCI_PRIMARY) { 2616 /* Assume BR/EDR support until proven otherwise (such as 2617 * through reading supported features during init. 2618 */ 2619 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 2620 } 2621 2622 write_lock(&hci_dev_list_lock); 2623 list_add(&hdev->list, &hci_dev_list); 2624 write_unlock(&hci_dev_list_lock); 2625 2626 /* Devices that are marked for raw-only usage are unconfigured 2627 * and should not be included in normal operation. 2628 */ 2629 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 2630 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 2631 2632 hci_sock_dev_event(hdev, HCI_DEV_REG); 2633 hci_dev_hold(hdev); 2634 2635 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { 2636 hdev->suspend_notifier.notifier_call = hci_suspend_notifier; 2637 error = register_pm_notifier(&hdev->suspend_notifier); 2638 if (error) 2639 goto err_wqueue; 2640 } 2641 2642 queue_work(hdev->req_workqueue, &hdev->power_on); 2643 2644 idr_init(&hdev->adv_monitors_idr); 2645 msft_register(hdev); 2646 2647 return id; 2648 2649 err_wqueue: 2650 debugfs_remove_recursive(hdev->debugfs); 2651 destroy_workqueue(hdev->workqueue); 2652 destroy_workqueue(hdev->req_workqueue); 2653 err: 2654 ida_simple_remove(&hci_index_ida, hdev->id); 2655 2656 return error; 2657 } 2658 EXPORT_SYMBOL(hci_register_dev); 2659 2660 /* Unregister HCI device */ 2661 void hci_unregister_dev(struct hci_dev *hdev) 2662 { 2663 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2664 2665 hci_dev_set_flag(hdev, HCI_UNREGISTER); 2666 2667 write_lock(&hci_dev_list_lock); 2668 list_del(&hdev->list); 2669 write_unlock(&hci_dev_list_lock); 2670 2671 cancel_work_sync(&hdev->power_on); 2672 2673 hci_cmd_sync_clear(hdev); 2674 2675 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) 2676 unregister_pm_notifier(&hdev->suspend_notifier); 2677 2678 msft_unregister(hdev); 2679 2680 hci_dev_do_close(hdev); 2681 2682 if (!test_bit(HCI_INIT, &hdev->flags) && 2683 !hci_dev_test_flag(hdev, HCI_SETUP) && 2684 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 2685 hci_dev_lock(hdev); 2686 mgmt_index_removed(hdev); 2687 hci_dev_unlock(hdev); 2688 } 2689 2690 /* mgmt_index_removed should take care of emptying the 2691 * pending list */ 2692 BUG_ON(!list_empty(&hdev->mgmt_pending)); 2693 2694 hci_sock_dev_event(hdev, HCI_DEV_UNREG); 2695 2696 if (hdev->rfkill) { 2697 rfkill_unregister(hdev->rfkill); 2698 rfkill_destroy(hdev->rfkill); 2699 } 2700 2701 device_del(&hdev->dev); 2702 /* Actual cleanup is deferred until hci_release_dev(). */ 2703 hci_dev_put(hdev); 2704 } 2705 EXPORT_SYMBOL(hci_unregister_dev); 2706 2707 /* Release HCI device */ 2708 void hci_release_dev(struct hci_dev *hdev) 2709 { 2710 debugfs_remove_recursive(hdev->debugfs); 2711 kfree_const(hdev->hw_info); 2712 kfree_const(hdev->fw_info); 2713 2714 destroy_workqueue(hdev->workqueue); 2715 destroy_workqueue(hdev->req_workqueue); 2716 2717 hci_dev_lock(hdev); 2718 hci_bdaddr_list_clear(&hdev->reject_list); 2719 hci_bdaddr_list_clear(&hdev->accept_list); 2720 hci_uuids_clear(hdev); 2721 hci_link_keys_clear(hdev); 2722 hci_smp_ltks_clear(hdev); 2723 hci_smp_irks_clear(hdev); 2724 hci_remote_oob_data_clear(hdev); 2725 hci_adv_instances_clear(hdev); 2726 hci_adv_monitors_clear(hdev); 2727 hci_bdaddr_list_clear(&hdev->le_accept_list); 2728 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2729 hci_conn_params_clear_all(hdev); 2730 hci_discovery_filter_clear(hdev); 2731 hci_blocked_keys_clear(hdev); 2732 hci_dev_unlock(hdev); 2733 2734 ida_simple_remove(&hci_index_ida, hdev->id); 2735 kfree(hdev); 2736 } 2737 EXPORT_SYMBOL(hci_release_dev); 2738 2739 /* Suspend HCI device */ 2740 int hci_suspend_dev(struct hci_dev *hdev) 2741 { 2742 int ret; 2743 2744 bt_dev_dbg(hdev, ""); 2745 2746 /* Suspend should only act on when powered. */ 2747 if (!hdev_is_powered(hdev) || 2748 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2749 return 0; 2750 2751 /* If powering down don't attempt to suspend */ 2752 if (mgmt_powering_down(hdev)) 2753 return 0; 2754 2755 hci_req_sync_lock(hdev); 2756 ret = hci_suspend_sync(hdev); 2757 hci_req_sync_unlock(hdev); 2758 2759 hci_clear_wake_reason(hdev); 2760 mgmt_suspending(hdev, hdev->suspend_state); 2761 2762 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); 2763 return ret; 2764 } 2765 EXPORT_SYMBOL(hci_suspend_dev); 2766 2767 /* Resume HCI device */ 2768 int hci_resume_dev(struct hci_dev *hdev) 2769 { 2770 int ret; 2771 2772 bt_dev_dbg(hdev, ""); 2773 2774 /* Resume should only act on when powered. */ 2775 if (!hdev_is_powered(hdev) || 2776 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2777 return 0; 2778 2779 /* If powering down don't attempt to resume */ 2780 if (mgmt_powering_down(hdev)) 2781 return 0; 2782 2783 hci_req_sync_lock(hdev); 2784 ret = hci_resume_sync(hdev); 2785 hci_req_sync_unlock(hdev); 2786 2787 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, 2788 hdev->wake_addr_type); 2789 2790 hci_sock_dev_event(hdev, HCI_DEV_RESUME); 2791 return ret; 2792 } 2793 EXPORT_SYMBOL(hci_resume_dev); 2794 2795 /* Reset HCI device */ 2796 int hci_reset_dev(struct hci_dev *hdev) 2797 { 2798 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; 2799 struct sk_buff *skb; 2800 2801 skb = bt_skb_alloc(3, GFP_ATOMIC); 2802 if (!skb) 2803 return -ENOMEM; 2804 2805 hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 2806 skb_put_data(skb, hw_err, 3); 2807 2808 bt_dev_err(hdev, "Injecting HCI hardware error event"); 2809 2810 /* Send Hardware Error to upper stack */ 2811 return hci_recv_frame(hdev, skb); 2812 } 2813 EXPORT_SYMBOL(hci_reset_dev); 2814 2815 /* Receive frame from HCI drivers */ 2816 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) 2817 { 2818 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 2819 && !test_bit(HCI_INIT, &hdev->flags))) { 2820 kfree_skb(skb); 2821 return -ENXIO; 2822 } 2823 2824 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && 2825 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && 2826 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && 2827 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { 2828 kfree_skb(skb); 2829 return -EINVAL; 2830 } 2831 2832 /* Incoming skb */ 2833 bt_cb(skb)->incoming = 1; 2834 2835 /* Time stamp */ 2836 __net_timestamp(skb); 2837 2838 skb_queue_tail(&hdev->rx_q, skb); 2839 queue_work(hdev->workqueue, &hdev->rx_work); 2840 2841 return 0; 2842 } 2843 EXPORT_SYMBOL(hci_recv_frame); 2844 2845 /* Receive diagnostic message from HCI drivers */ 2846 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) 2847 { 2848 /* Mark as diagnostic packet */ 2849 hci_skb_pkt_type(skb) = HCI_DIAG_PKT; 2850 2851 /* Time stamp */ 2852 __net_timestamp(skb); 2853 2854 skb_queue_tail(&hdev->rx_q, skb); 2855 queue_work(hdev->workqueue, &hdev->rx_work); 2856 2857 return 0; 2858 } 2859 EXPORT_SYMBOL(hci_recv_diag); 2860 2861 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) 2862 { 2863 va_list vargs; 2864 2865 va_start(vargs, fmt); 2866 kfree_const(hdev->hw_info); 2867 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 2868 va_end(vargs); 2869 } 2870 EXPORT_SYMBOL(hci_set_hw_info); 2871 2872 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) 2873 { 2874 va_list vargs; 2875 2876 va_start(vargs, fmt); 2877 kfree_const(hdev->fw_info); 2878 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 2879 va_end(vargs); 2880 } 2881 EXPORT_SYMBOL(hci_set_fw_info); 2882 2883 /* ---- Interface to upper protocols ---- */ 2884 2885 int hci_register_cb(struct hci_cb *cb) 2886 { 2887 BT_DBG("%p name %s", cb, cb->name); 2888 2889 mutex_lock(&hci_cb_list_lock); 2890 list_add_tail(&cb->list, &hci_cb_list); 2891 mutex_unlock(&hci_cb_list_lock); 2892 2893 return 0; 2894 } 2895 EXPORT_SYMBOL(hci_register_cb); 2896 2897 int hci_unregister_cb(struct hci_cb *cb) 2898 { 2899 BT_DBG("%p name %s", cb, cb->name); 2900 2901 mutex_lock(&hci_cb_list_lock); 2902 list_del(&cb->list); 2903 mutex_unlock(&hci_cb_list_lock); 2904 2905 return 0; 2906 } 2907 EXPORT_SYMBOL(hci_unregister_cb); 2908 2909 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 2910 { 2911 int err; 2912 2913 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), 2914 skb->len); 2915 2916 /* Time stamp */ 2917 __net_timestamp(skb); 2918 2919 /* Send copy to monitor */ 2920 hci_send_to_monitor(hdev, skb); 2921 2922 if (atomic_read(&hdev->promisc)) { 2923 /* Send copy to the sockets */ 2924 hci_send_to_sock(hdev, skb); 2925 } 2926 2927 /* Get rid of skb owner, prior to sending to the driver. */ 2928 skb_orphan(skb); 2929 2930 if (!test_bit(HCI_RUNNING, &hdev->flags)) { 2931 kfree_skb(skb); 2932 return; 2933 } 2934 2935 err = hdev->send(hdev, skb); 2936 if (err < 0) { 2937 bt_dev_err(hdev, "sending frame failed (%d)", err); 2938 kfree_skb(skb); 2939 } 2940 } 2941 2942 /* Send HCI command */ 2943 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 2944 const void *param) 2945 { 2946 struct sk_buff *skb; 2947 2948 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); 2949 2950 skb = hci_prepare_cmd(hdev, opcode, plen, param); 2951 if (!skb) { 2952 bt_dev_err(hdev, "no memory for command"); 2953 return -ENOMEM; 2954 } 2955 2956 /* Stand-alone HCI commands must be flagged as 2957 * single-command requests. 2958 */ 2959 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 2960 2961 skb_queue_tail(&hdev->cmd_q, skb); 2962 queue_work(hdev->workqueue, &hdev->cmd_work); 2963 2964 return 0; 2965 } 2966 2967 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, 2968 const void *param) 2969 { 2970 struct sk_buff *skb; 2971 2972 if (hci_opcode_ogf(opcode) != 0x3f) { 2973 /* A controller receiving a command shall respond with either 2974 * a Command Status Event or a Command Complete Event. 2975 * Therefore, all standard HCI commands must be sent via the 2976 * standard API, using hci_send_cmd or hci_cmd_sync helpers. 2977 * Some vendors do not comply with this rule for vendor-specific 2978 * commands and do not return any event. We want to support 2979 * unresponded commands for such cases only. 2980 */ 2981 bt_dev_err(hdev, "unresponded command not supported"); 2982 return -EINVAL; 2983 } 2984 2985 skb = hci_prepare_cmd(hdev, opcode, plen, param); 2986 if (!skb) { 2987 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 2988 opcode); 2989 return -ENOMEM; 2990 } 2991 2992 hci_send_frame(hdev, skb); 2993 2994 return 0; 2995 } 2996 EXPORT_SYMBOL(__hci_cmd_send); 2997 2998 /* Get data from the previously sent command */ 2999 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 3000 { 3001 struct hci_command_hdr *hdr; 3002 3003 if (!hdev->sent_cmd) 3004 return NULL; 3005 3006 hdr = (void *) hdev->sent_cmd->data; 3007 3008 if (hdr->opcode != cpu_to_le16(opcode)) 3009 return NULL; 3010 3011 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 3012 3013 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 3014 } 3015 3016 /* Send ACL data */ 3017 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 3018 { 3019 struct hci_acl_hdr *hdr; 3020 int len = skb->len; 3021 3022 skb_push(skb, HCI_ACL_HDR_SIZE); 3023 skb_reset_transport_header(skb); 3024 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 3025 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 3026 hdr->dlen = cpu_to_le16(len); 3027 } 3028 3029 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, 3030 struct sk_buff *skb, __u16 flags) 3031 { 3032 struct hci_conn *conn = chan->conn; 3033 struct hci_dev *hdev = conn->hdev; 3034 struct sk_buff *list; 3035 3036 skb->len = skb_headlen(skb); 3037 skb->data_len = 0; 3038 3039 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3040 3041 switch (hdev->dev_type) { 3042 case HCI_PRIMARY: 3043 hci_add_acl_hdr(skb, conn->handle, flags); 3044 break; 3045 case HCI_AMP: 3046 hci_add_acl_hdr(skb, chan->handle, flags); 3047 break; 3048 default: 3049 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 3050 return; 3051 } 3052 3053 list = skb_shinfo(skb)->frag_list; 3054 if (!list) { 3055 /* Non fragmented */ 3056 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 3057 3058 skb_queue_tail(queue, skb); 3059 } else { 3060 /* Fragmented */ 3061 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3062 3063 skb_shinfo(skb)->frag_list = NULL; 3064 3065 /* Queue all fragments atomically. We need to use spin_lock_bh 3066 * here because of 6LoWPAN links, as there this function is 3067 * called from softirq and using normal spin lock could cause 3068 * deadlocks. 3069 */ 3070 spin_lock_bh(&queue->lock); 3071 3072 __skb_queue_tail(queue, skb); 3073 3074 flags &= ~ACL_START; 3075 flags |= ACL_CONT; 3076 do { 3077 skb = list; list = list->next; 3078 3079 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3080 hci_add_acl_hdr(skb, conn->handle, flags); 3081 3082 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3083 3084 __skb_queue_tail(queue, skb); 3085 } while (list); 3086 3087 spin_unlock_bh(&queue->lock); 3088 } 3089 } 3090 3091 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) 3092 { 3093 struct hci_dev *hdev = chan->conn->hdev; 3094 3095 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); 3096 3097 hci_queue_acl(chan, &chan->data_q, skb, flags); 3098 3099 queue_work(hdev->workqueue, &hdev->tx_work); 3100 } 3101 3102 /* Send SCO data */ 3103 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 3104 { 3105 struct hci_dev *hdev = conn->hdev; 3106 struct hci_sco_hdr hdr; 3107 3108 BT_DBG("%s len %d", hdev->name, skb->len); 3109 3110 hdr.handle = cpu_to_le16(conn->handle); 3111 hdr.dlen = skb->len; 3112 3113 skb_push(skb, HCI_SCO_HDR_SIZE); 3114 skb_reset_transport_header(skb); 3115 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 3116 3117 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; 3118 3119 skb_queue_tail(&conn->data_q, skb); 3120 queue_work(hdev->workqueue, &hdev->tx_work); 3121 } 3122 3123 /* ---- HCI TX task (outgoing data) ---- */ 3124 3125 /* HCI Connection scheduler */ 3126 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, 3127 int *quote) 3128 { 3129 struct hci_conn_hash *h = &hdev->conn_hash; 3130 struct hci_conn *conn = NULL, *c; 3131 unsigned int num = 0, min = ~0; 3132 3133 /* We don't have to lock device here. Connections are always 3134 * added and removed with TX task disabled. */ 3135 3136 rcu_read_lock(); 3137 3138 list_for_each_entry_rcu(c, &h->list, list) { 3139 if (c->type != type || skb_queue_empty(&c->data_q)) 3140 continue; 3141 3142 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 3143 continue; 3144 3145 num++; 3146 3147 if (c->sent < min) { 3148 min = c->sent; 3149 conn = c; 3150 } 3151 3152 if (hci_conn_num(hdev, type) == num) 3153 break; 3154 } 3155 3156 rcu_read_unlock(); 3157 3158 if (conn) { 3159 int cnt, q; 3160 3161 switch (conn->type) { 3162 case ACL_LINK: 3163 cnt = hdev->acl_cnt; 3164 break; 3165 case SCO_LINK: 3166 case ESCO_LINK: 3167 cnt = hdev->sco_cnt; 3168 break; 3169 case LE_LINK: 3170 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3171 break; 3172 default: 3173 cnt = 0; 3174 bt_dev_err(hdev, "unknown link type %d", conn->type); 3175 } 3176 3177 q = cnt / num; 3178 *quote = q ? q : 1; 3179 } else 3180 *quote = 0; 3181 3182 BT_DBG("conn %p quote %d", conn, *quote); 3183 return conn; 3184 } 3185 3186 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 3187 { 3188 struct hci_conn_hash *h = &hdev->conn_hash; 3189 struct hci_conn *c; 3190 3191 bt_dev_err(hdev, "link tx timeout"); 3192 3193 rcu_read_lock(); 3194 3195 /* Kill stalled connections */ 3196 list_for_each_entry_rcu(c, &h->list, list) { 3197 if (c->type == type && c->sent) { 3198 bt_dev_err(hdev, "killing stalled connection %pMR", 3199 &c->dst); 3200 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); 3201 } 3202 } 3203 3204 rcu_read_unlock(); 3205 } 3206 3207 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 3208 int *quote) 3209 { 3210 struct hci_conn_hash *h = &hdev->conn_hash; 3211 struct hci_chan *chan = NULL; 3212 unsigned int num = 0, min = ~0, cur_prio = 0; 3213 struct hci_conn *conn; 3214 int cnt, q, conn_num = 0; 3215 3216 BT_DBG("%s", hdev->name); 3217 3218 rcu_read_lock(); 3219 3220 list_for_each_entry_rcu(conn, &h->list, list) { 3221 struct hci_chan *tmp; 3222 3223 if (conn->type != type) 3224 continue; 3225 3226 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3227 continue; 3228 3229 conn_num++; 3230 3231 list_for_each_entry_rcu(tmp, &conn->chan_list, list) { 3232 struct sk_buff *skb; 3233 3234 if (skb_queue_empty(&tmp->data_q)) 3235 continue; 3236 3237 skb = skb_peek(&tmp->data_q); 3238 if (skb->priority < cur_prio) 3239 continue; 3240 3241 if (skb->priority > cur_prio) { 3242 num = 0; 3243 min = ~0; 3244 cur_prio = skb->priority; 3245 } 3246 3247 num++; 3248 3249 if (conn->sent < min) { 3250 min = conn->sent; 3251 chan = tmp; 3252 } 3253 } 3254 3255 if (hci_conn_num(hdev, type) == conn_num) 3256 break; 3257 } 3258 3259 rcu_read_unlock(); 3260 3261 if (!chan) 3262 return NULL; 3263 3264 switch (chan->conn->type) { 3265 case ACL_LINK: 3266 cnt = hdev->acl_cnt; 3267 break; 3268 case AMP_LINK: 3269 cnt = hdev->block_cnt; 3270 break; 3271 case SCO_LINK: 3272 case ESCO_LINK: 3273 cnt = hdev->sco_cnt; 3274 break; 3275 case LE_LINK: 3276 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3277 break; 3278 default: 3279 cnt = 0; 3280 bt_dev_err(hdev, "unknown link type %d", chan->conn->type); 3281 } 3282 3283 q = cnt / num; 3284 *quote = q ? q : 1; 3285 BT_DBG("chan %p quote %d", chan, *quote); 3286 return chan; 3287 } 3288 3289 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) 3290 { 3291 struct hci_conn_hash *h = &hdev->conn_hash; 3292 struct hci_conn *conn; 3293 int num = 0; 3294 3295 BT_DBG("%s", hdev->name); 3296 3297 rcu_read_lock(); 3298 3299 list_for_each_entry_rcu(conn, &h->list, list) { 3300 struct hci_chan *chan; 3301 3302 if (conn->type != type) 3303 continue; 3304 3305 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3306 continue; 3307 3308 num++; 3309 3310 list_for_each_entry_rcu(chan, &conn->chan_list, list) { 3311 struct sk_buff *skb; 3312 3313 if (chan->sent) { 3314 chan->sent = 0; 3315 continue; 3316 } 3317 3318 if (skb_queue_empty(&chan->data_q)) 3319 continue; 3320 3321 skb = skb_peek(&chan->data_q); 3322 if (skb->priority >= HCI_PRIO_MAX - 1) 3323 continue; 3324 3325 skb->priority = HCI_PRIO_MAX - 1; 3326 3327 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 3328 skb->priority); 3329 } 3330 3331 if (hci_conn_num(hdev, type) == num) 3332 break; 3333 } 3334 3335 rcu_read_unlock(); 3336 3337 } 3338 3339 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) 3340 { 3341 /* Calculate count of blocks used by this packet */ 3342 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 3343 } 3344 3345 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 3346 { 3347 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 3348 /* ACL tx timeout must be longer than maximum 3349 * link supervision timeout (40.9 seconds) */ 3350 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 3351 HCI_ACL_TX_TIMEOUT)) 3352 hci_link_tx_to(hdev, ACL_LINK); 3353 } 3354 } 3355 3356 /* Schedule SCO */ 3357 static void hci_sched_sco(struct hci_dev *hdev) 3358 { 3359 struct hci_conn *conn; 3360 struct sk_buff *skb; 3361 int quote; 3362 3363 BT_DBG("%s", hdev->name); 3364 3365 if (!hci_conn_num(hdev, SCO_LINK)) 3366 return; 3367 3368 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 3369 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3370 BT_DBG("skb %p len %d", skb, skb->len); 3371 hci_send_frame(hdev, skb); 3372 3373 conn->sent++; 3374 if (conn->sent == ~0) 3375 conn->sent = 0; 3376 } 3377 } 3378 } 3379 3380 static void hci_sched_esco(struct hci_dev *hdev) 3381 { 3382 struct hci_conn *conn; 3383 struct sk_buff *skb; 3384 int quote; 3385 3386 BT_DBG("%s", hdev->name); 3387 3388 if (!hci_conn_num(hdev, ESCO_LINK)) 3389 return; 3390 3391 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, 3392 "e))) { 3393 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3394 BT_DBG("skb %p len %d", skb, skb->len); 3395 hci_send_frame(hdev, skb); 3396 3397 conn->sent++; 3398 if (conn->sent == ~0) 3399 conn->sent = 0; 3400 } 3401 } 3402 } 3403 3404 static void hci_sched_acl_pkt(struct hci_dev *hdev) 3405 { 3406 unsigned int cnt = hdev->acl_cnt; 3407 struct hci_chan *chan; 3408 struct sk_buff *skb; 3409 int quote; 3410 3411 __check_timeout(hdev, cnt); 3412 3413 while (hdev->acl_cnt && 3414 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { 3415 u32 priority = (skb_peek(&chan->data_q))->priority; 3416 while (quote-- && (skb = skb_peek(&chan->data_q))) { 3417 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3418 skb->len, skb->priority); 3419 3420 /* Stop if priority has changed */ 3421 if (skb->priority < priority) 3422 break; 3423 3424 skb = skb_dequeue(&chan->data_q); 3425 3426 hci_conn_enter_active_mode(chan->conn, 3427 bt_cb(skb)->force_active); 3428 3429 hci_send_frame(hdev, skb); 3430 hdev->acl_last_tx = jiffies; 3431 3432 hdev->acl_cnt--; 3433 chan->sent++; 3434 chan->conn->sent++; 3435 3436 /* Send pending SCO packets right away */ 3437 hci_sched_sco(hdev); 3438 hci_sched_esco(hdev); 3439 } 3440 } 3441 3442 if (cnt != hdev->acl_cnt) 3443 hci_prio_recalculate(hdev, ACL_LINK); 3444 } 3445 3446 static void hci_sched_acl_blk(struct hci_dev *hdev) 3447 { 3448 unsigned int cnt = hdev->block_cnt; 3449 struct hci_chan *chan; 3450 struct sk_buff *skb; 3451 int quote; 3452 u8 type; 3453 3454 __check_timeout(hdev, cnt); 3455 3456 BT_DBG("%s", hdev->name); 3457 3458 if (hdev->dev_type == HCI_AMP) 3459 type = AMP_LINK; 3460 else 3461 type = ACL_LINK; 3462 3463 while (hdev->block_cnt > 0 && 3464 (chan = hci_chan_sent(hdev, type, "e))) { 3465 u32 priority = (skb_peek(&chan->data_q))->priority; 3466 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 3467 int blocks; 3468 3469 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3470 skb->len, skb->priority); 3471 3472 /* Stop if priority has changed */ 3473 if (skb->priority < priority) 3474 break; 3475 3476 skb = skb_dequeue(&chan->data_q); 3477 3478 blocks = __get_blocks(hdev, skb); 3479 if (blocks > hdev->block_cnt) 3480 return; 3481 3482 hci_conn_enter_active_mode(chan->conn, 3483 bt_cb(skb)->force_active); 3484 3485 hci_send_frame(hdev, skb); 3486 hdev->acl_last_tx = jiffies; 3487 3488 hdev->block_cnt -= blocks; 3489 quote -= blocks; 3490 3491 chan->sent += blocks; 3492 chan->conn->sent += blocks; 3493 } 3494 } 3495 3496 if (cnt != hdev->block_cnt) 3497 hci_prio_recalculate(hdev, type); 3498 } 3499 3500 static void hci_sched_acl(struct hci_dev *hdev) 3501 { 3502 BT_DBG("%s", hdev->name); 3503 3504 /* No ACL link over BR/EDR controller */ 3505 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) 3506 return; 3507 3508 /* No AMP link over AMP controller */ 3509 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) 3510 return; 3511 3512 switch (hdev->flow_ctl_mode) { 3513 case HCI_FLOW_CTL_MODE_PACKET_BASED: 3514 hci_sched_acl_pkt(hdev); 3515 break; 3516 3517 case HCI_FLOW_CTL_MODE_BLOCK_BASED: 3518 hci_sched_acl_blk(hdev); 3519 break; 3520 } 3521 } 3522 3523 static void hci_sched_le(struct hci_dev *hdev) 3524 { 3525 struct hci_chan *chan; 3526 struct sk_buff *skb; 3527 int quote, cnt, tmp; 3528 3529 BT_DBG("%s", hdev->name); 3530 3531 if (!hci_conn_num(hdev, LE_LINK)) 3532 return; 3533 3534 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; 3535 3536 __check_timeout(hdev, cnt); 3537 3538 tmp = cnt; 3539 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { 3540 u32 priority = (skb_peek(&chan->data_q))->priority; 3541 while (quote-- && (skb = skb_peek(&chan->data_q))) { 3542 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3543 skb->len, skb->priority); 3544 3545 /* Stop if priority has changed */ 3546 if (skb->priority < priority) 3547 break; 3548 3549 skb = skb_dequeue(&chan->data_q); 3550 3551 hci_send_frame(hdev, skb); 3552 hdev->le_last_tx = jiffies; 3553 3554 cnt--; 3555 chan->sent++; 3556 chan->conn->sent++; 3557 3558 /* Send pending SCO packets right away */ 3559 hci_sched_sco(hdev); 3560 hci_sched_esco(hdev); 3561 } 3562 } 3563 3564 if (hdev->le_pkts) 3565 hdev->le_cnt = cnt; 3566 else 3567 hdev->acl_cnt = cnt; 3568 3569 if (cnt != tmp) 3570 hci_prio_recalculate(hdev, LE_LINK); 3571 } 3572 3573 static void hci_tx_work(struct work_struct *work) 3574 { 3575 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); 3576 struct sk_buff *skb; 3577 3578 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 3579 hdev->sco_cnt, hdev->le_cnt); 3580 3581 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 3582 /* Schedule queues and send stuff to HCI driver */ 3583 hci_sched_sco(hdev); 3584 hci_sched_esco(hdev); 3585 hci_sched_acl(hdev); 3586 hci_sched_le(hdev); 3587 } 3588 3589 /* Send next queued raw (unknown type) packet */ 3590 while ((skb = skb_dequeue(&hdev->raw_q))) 3591 hci_send_frame(hdev, skb); 3592 } 3593 3594 /* ----- HCI RX task (incoming data processing) ----- */ 3595 3596 /* ACL data packet */ 3597 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3598 { 3599 struct hci_acl_hdr *hdr = (void *) skb->data; 3600 struct hci_conn *conn; 3601 __u16 handle, flags; 3602 3603 skb_pull(skb, HCI_ACL_HDR_SIZE); 3604 3605 handle = __le16_to_cpu(hdr->handle); 3606 flags = hci_flags(handle); 3607 handle = hci_handle(handle); 3608 3609 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 3610 handle, flags); 3611 3612 hdev->stat.acl_rx++; 3613 3614 hci_dev_lock(hdev); 3615 conn = hci_conn_hash_lookup_handle(hdev, handle); 3616 hci_dev_unlock(hdev); 3617 3618 if (conn) { 3619 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 3620 3621 /* Send to upper protocol */ 3622 l2cap_recv_acldata(conn, skb, flags); 3623 return; 3624 } else { 3625 bt_dev_err(hdev, "ACL packet for unknown connection handle %d", 3626 handle); 3627 } 3628 3629 kfree_skb(skb); 3630 } 3631 3632 /* SCO data packet */ 3633 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3634 { 3635 struct hci_sco_hdr *hdr = (void *) skb->data; 3636 struct hci_conn *conn; 3637 __u16 handle, flags; 3638 3639 skb_pull(skb, HCI_SCO_HDR_SIZE); 3640 3641 handle = __le16_to_cpu(hdr->handle); 3642 flags = hci_flags(handle); 3643 handle = hci_handle(handle); 3644 3645 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 3646 handle, flags); 3647 3648 hdev->stat.sco_rx++; 3649 3650 hci_dev_lock(hdev); 3651 conn = hci_conn_hash_lookup_handle(hdev, handle); 3652 hci_dev_unlock(hdev); 3653 3654 if (conn) { 3655 /* Send to upper protocol */ 3656 bt_cb(skb)->sco.pkt_status = flags & 0x03; 3657 sco_recv_scodata(conn, skb); 3658 return; 3659 } else { 3660 bt_dev_err(hdev, "SCO packet for unknown connection handle %d", 3661 handle); 3662 } 3663 3664 kfree_skb(skb); 3665 } 3666 3667 static bool hci_req_is_complete(struct hci_dev *hdev) 3668 { 3669 struct sk_buff *skb; 3670 3671 skb = skb_peek(&hdev->cmd_q); 3672 if (!skb) 3673 return true; 3674 3675 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); 3676 } 3677 3678 static void hci_resend_last(struct hci_dev *hdev) 3679 { 3680 struct hci_command_hdr *sent; 3681 struct sk_buff *skb; 3682 u16 opcode; 3683 3684 if (!hdev->sent_cmd) 3685 return; 3686 3687 sent = (void *) hdev->sent_cmd->data; 3688 opcode = __le16_to_cpu(sent->opcode); 3689 if (opcode == HCI_OP_RESET) 3690 return; 3691 3692 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); 3693 if (!skb) 3694 return; 3695 3696 skb_queue_head(&hdev->cmd_q, skb); 3697 queue_work(hdev->workqueue, &hdev->cmd_work); 3698 } 3699 3700 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, 3701 hci_req_complete_t *req_complete, 3702 hci_req_complete_skb_t *req_complete_skb) 3703 { 3704 struct sk_buff *skb; 3705 unsigned long flags; 3706 3707 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); 3708 3709 /* If the completed command doesn't match the last one that was 3710 * sent we need to do special handling of it. 3711 */ 3712 if (!hci_sent_cmd_data(hdev, opcode)) { 3713 /* Some CSR based controllers generate a spontaneous 3714 * reset complete event during init and any pending 3715 * command will never be completed. In such a case we 3716 * need to resend whatever was the last sent 3717 * command. 3718 */ 3719 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) 3720 hci_resend_last(hdev); 3721 3722 return; 3723 } 3724 3725 /* If we reach this point this event matches the last command sent */ 3726 hci_dev_clear_flag(hdev, HCI_CMD_PENDING); 3727 3728 /* If the command succeeded and there's still more commands in 3729 * this request the request is not yet complete. 3730 */ 3731 if (!status && !hci_req_is_complete(hdev)) 3732 return; 3733 3734 /* If this was the last command in a request the complete 3735 * callback would be found in hdev->sent_cmd instead of the 3736 * command queue (hdev->cmd_q). 3737 */ 3738 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { 3739 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; 3740 return; 3741 } 3742 3743 if (bt_cb(hdev->sent_cmd)->hci.req_complete) { 3744 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; 3745 return; 3746 } 3747 3748 /* Remove all pending commands belonging to this request */ 3749 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 3750 while ((skb = __skb_dequeue(&hdev->cmd_q))) { 3751 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { 3752 __skb_queue_head(&hdev->cmd_q, skb); 3753 break; 3754 } 3755 3756 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) 3757 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 3758 else 3759 *req_complete = bt_cb(skb)->hci.req_complete; 3760 kfree_skb(skb); 3761 } 3762 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 3763 } 3764 3765 static void hci_rx_work(struct work_struct *work) 3766 { 3767 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); 3768 struct sk_buff *skb; 3769 3770 BT_DBG("%s", hdev->name); 3771 3772 while ((skb = skb_dequeue(&hdev->rx_q))) { 3773 /* Send copy to monitor */ 3774 hci_send_to_monitor(hdev, skb); 3775 3776 if (atomic_read(&hdev->promisc)) { 3777 /* Send copy to the sockets */ 3778 hci_send_to_sock(hdev, skb); 3779 } 3780 3781 /* If the device has been opened in HCI_USER_CHANNEL, 3782 * the userspace has exclusive access to device. 3783 * When device is HCI_INIT, we still need to process 3784 * the data packets to the driver in order 3785 * to complete its setup(). 3786 */ 3787 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 3788 !test_bit(HCI_INIT, &hdev->flags)) { 3789 kfree_skb(skb); 3790 continue; 3791 } 3792 3793 if (test_bit(HCI_INIT, &hdev->flags)) { 3794 /* Don't process data packets in this states. */ 3795 switch (hci_skb_pkt_type(skb)) { 3796 case HCI_ACLDATA_PKT: 3797 case HCI_SCODATA_PKT: 3798 case HCI_ISODATA_PKT: 3799 kfree_skb(skb); 3800 continue; 3801 } 3802 } 3803 3804 /* Process frame */ 3805 switch (hci_skb_pkt_type(skb)) { 3806 case HCI_EVENT_PKT: 3807 BT_DBG("%s Event packet", hdev->name); 3808 hci_event_packet(hdev, skb); 3809 break; 3810 3811 case HCI_ACLDATA_PKT: 3812 BT_DBG("%s ACL data packet", hdev->name); 3813 hci_acldata_packet(hdev, skb); 3814 break; 3815 3816 case HCI_SCODATA_PKT: 3817 BT_DBG("%s SCO data packet", hdev->name); 3818 hci_scodata_packet(hdev, skb); 3819 break; 3820 3821 default: 3822 kfree_skb(skb); 3823 break; 3824 } 3825 } 3826 } 3827 3828 static void hci_cmd_work(struct work_struct *work) 3829 { 3830 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 3831 struct sk_buff *skb; 3832 3833 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, 3834 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); 3835 3836 /* Send queued commands */ 3837 if (atomic_read(&hdev->cmd_cnt)) { 3838 skb = skb_dequeue(&hdev->cmd_q); 3839 if (!skb) 3840 return; 3841 3842 kfree_skb(hdev->sent_cmd); 3843 3844 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); 3845 if (hdev->sent_cmd) { 3846 if (hci_req_status_pend(hdev)) 3847 hci_dev_set_flag(hdev, HCI_CMD_PENDING); 3848 atomic_dec(&hdev->cmd_cnt); 3849 hci_send_frame(hdev, skb); 3850 if (test_bit(HCI_RESET, &hdev->flags)) 3851 cancel_delayed_work(&hdev->cmd_timer); 3852 else 3853 schedule_delayed_work(&hdev->cmd_timer, 3854 HCI_CMD_TIMEOUT); 3855 } else { 3856 skb_queue_head(&hdev->cmd_q, skb); 3857 queue_work(hdev->workqueue, &hdev->cmd_work); 3858 } 3859 } 3860 } 3861