1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * BlueZ - Bluetooth protocol stack for Linux 4 * 5 * Copyright (C) 2021 Intel Corporation 6 * Copyright 2023 NXP 7 */ 8 9 #include <linux/property.h> 10 11 #include <net/bluetooth/bluetooth.h> 12 #include <net/bluetooth/hci_core.h> 13 #include <net/bluetooth/mgmt.h> 14 15 #include "hci_request.h" 16 #include "hci_codec.h" 17 #include "hci_debugfs.h" 18 #include "smp.h" 19 #include "eir.h" 20 #include "msft.h" 21 #include "aosp.h" 22 #include "leds.h" 23 24 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, 25 struct sk_buff *skb) 26 { 27 bt_dev_dbg(hdev, "result 0x%2.2x", result); 28 29 if (hdev->req_status != HCI_REQ_PEND) 30 return; 31 32 hdev->req_result = result; 33 hdev->req_status = HCI_REQ_DONE; 34 35 /* Free the request command so it is not used as response */ 36 kfree_skb(hdev->req_skb); 37 hdev->req_skb = NULL; 38 39 if (skb) { 40 struct sock *sk = hci_skb_sk(skb); 41 42 /* Drop sk reference if set */ 43 if (sk) 44 sock_put(sk); 45 46 hdev->req_rsp = skb_get(skb); 47 } 48 49 wake_up_interruptible(&hdev->req_wait_q); 50 } 51 52 static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, 53 u32 plen, const void *param, 54 struct sock *sk) 55 { 56 int len = HCI_COMMAND_HDR_SIZE + plen; 57 struct hci_command_hdr *hdr; 58 struct sk_buff *skb; 59 60 skb = bt_skb_alloc(len, GFP_ATOMIC); 61 if (!skb) 62 return NULL; 63 64 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); 65 hdr->opcode = cpu_to_le16(opcode); 66 hdr->plen = plen; 67 68 if (plen) 69 skb_put_data(skb, param, plen); 70 71 bt_dev_dbg(hdev, "skb len %d", skb->len); 72 73 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; 74 hci_skb_opcode(skb) = opcode; 75 76 /* Grab a reference if command needs to be associated with a sock (e.g. 77 * likely mgmt socket that initiated the command). 78 */ 79 if (sk) { 80 hci_skb_sk(skb) = sk; 81 sock_hold(sk); 82 } 83 84 return skb; 85 } 86 87 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, 88 const void *param, u8 event, struct sock *sk) 89 { 90 struct hci_dev *hdev = req->hdev; 91 struct sk_buff *skb; 92 93 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 94 95 /* If an error occurred during request building, there is no point in 96 * queueing the HCI command. We can simply return. 97 */ 98 if (req->err) 99 return; 100 101 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); 102 if (!skb) { 103 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 104 opcode); 105 req->err = -ENOMEM; 106 return; 107 } 108 109 if (skb_queue_empty(&req->cmd_q)) 110 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 111 112 hci_skb_event(skb) = event; 113 114 skb_queue_tail(&req->cmd_q, skb); 115 } 116 117 static int hci_cmd_sync_run(struct hci_request *req) 118 { 119 struct hci_dev *hdev = req->hdev; 120 struct sk_buff *skb; 121 unsigned long flags; 122 123 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); 124 125 /* If an error occurred during request building, remove all HCI 126 * commands queued on the HCI request queue. 127 */ 128 if (req->err) { 129 skb_queue_purge(&req->cmd_q); 130 return req->err; 131 } 132 133 /* Do not allow empty requests */ 134 if (skb_queue_empty(&req->cmd_q)) 135 return -ENODATA; 136 137 skb = skb_peek_tail(&req->cmd_q); 138 bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; 139 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; 140 141 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 142 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); 143 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 144 145 queue_work(hdev->workqueue, &hdev->cmd_work); 146 147 return 0; 148 } 149 150 /* This function requires the caller holds hdev->req_lock. */ 151 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, 152 const void *param, u8 event, u32 timeout, 153 struct sock *sk) 154 { 155 struct hci_request req; 156 struct sk_buff *skb; 157 int err = 0; 158 159 bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); 160 161 hci_req_init(&req, hdev); 162 163 hci_cmd_sync_add(&req, opcode, plen, param, event, sk); 164 165 hdev->req_status = HCI_REQ_PEND; 166 167 err = hci_cmd_sync_run(&req); 168 if (err < 0) 169 return ERR_PTR(err); 170 171 err = wait_event_interruptible_timeout(hdev->req_wait_q, 172 hdev->req_status != HCI_REQ_PEND, 173 timeout); 174 175 if (err == -ERESTARTSYS) 176 return ERR_PTR(-EINTR); 177 178 switch (hdev->req_status) { 179 case HCI_REQ_DONE: 180 err = -bt_to_errno(hdev->req_result); 181 break; 182 183 case HCI_REQ_CANCELED: 184 err = -hdev->req_result; 185 break; 186 187 default: 188 err = -ETIMEDOUT; 189 break; 190 } 191 192 hdev->req_status = 0; 193 hdev->req_result = 0; 194 skb = hdev->req_rsp; 195 hdev->req_rsp = NULL; 196 197 bt_dev_dbg(hdev, "end: err %d", err); 198 199 if (err < 0) { 200 kfree_skb(skb); 201 return ERR_PTR(err); 202 } 203 204 return skb; 205 } 206 EXPORT_SYMBOL(__hci_cmd_sync_sk); 207 208 /* This function requires the caller holds hdev->req_lock. */ 209 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 210 const void *param, u32 timeout) 211 { 212 return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); 213 } 214 EXPORT_SYMBOL(__hci_cmd_sync); 215 216 /* Send HCI command and wait for command complete event */ 217 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 218 const void *param, u32 timeout) 219 { 220 struct sk_buff *skb; 221 222 if (!test_bit(HCI_UP, &hdev->flags)) 223 return ERR_PTR(-ENETDOWN); 224 225 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 226 227 hci_req_sync_lock(hdev); 228 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); 229 hci_req_sync_unlock(hdev); 230 231 return skb; 232 } 233 EXPORT_SYMBOL(hci_cmd_sync); 234 235 /* This function requires the caller holds hdev->req_lock. */ 236 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, 237 const void *param, u8 event, u32 timeout) 238 { 239 return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, 240 NULL); 241 } 242 EXPORT_SYMBOL(__hci_cmd_sync_ev); 243 244 /* This function requires the caller holds hdev->req_lock. */ 245 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, 246 const void *param, u8 event, u32 timeout, 247 struct sock *sk) 248 { 249 struct sk_buff *skb; 250 u8 status; 251 252 skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); 253 if (IS_ERR(skb)) { 254 if (!event) 255 bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode, 256 PTR_ERR(skb)); 257 return PTR_ERR(skb); 258 } 259 260 /* If command return a status event skb will be set to NULL as there are 261 * no parameters, in case of failure IS_ERR(skb) would have be set to 262 * the actual error would be found with PTR_ERR(skb). 263 */ 264 if (!skb) 265 return 0; 266 267 status = skb->data[0]; 268 269 kfree_skb(skb); 270 271 return status; 272 } 273 EXPORT_SYMBOL(__hci_cmd_sync_status_sk); 274 275 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, 276 const void *param, u32 timeout) 277 { 278 return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, 279 NULL); 280 } 281 EXPORT_SYMBOL(__hci_cmd_sync_status); 282 283 static void hci_cmd_sync_work(struct work_struct *work) 284 { 285 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); 286 287 bt_dev_dbg(hdev, ""); 288 289 /* Dequeue all entries and run them */ 290 while (1) { 291 struct hci_cmd_sync_work_entry *entry; 292 293 mutex_lock(&hdev->cmd_sync_work_lock); 294 entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, 295 struct hci_cmd_sync_work_entry, 296 list); 297 if (entry) 298 list_del(&entry->list); 299 mutex_unlock(&hdev->cmd_sync_work_lock); 300 301 if (!entry) 302 break; 303 304 bt_dev_dbg(hdev, "entry %p", entry); 305 306 if (entry->func) { 307 int err; 308 309 hci_req_sync_lock(hdev); 310 err = entry->func(hdev, entry->data); 311 if (entry->destroy) 312 entry->destroy(hdev, entry->data, err); 313 hci_req_sync_unlock(hdev); 314 } 315 316 kfree(entry); 317 } 318 } 319 320 static void hci_cmd_sync_cancel_work(struct work_struct *work) 321 { 322 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); 323 324 cancel_delayed_work_sync(&hdev->cmd_timer); 325 cancel_delayed_work_sync(&hdev->ncmd_timer); 326 atomic_set(&hdev->cmd_cnt, 1); 327 328 wake_up_interruptible(&hdev->req_wait_q); 329 } 330 331 static int hci_scan_disable_sync(struct hci_dev *hdev); 332 static int scan_disable_sync(struct hci_dev *hdev, void *data) 333 { 334 return hci_scan_disable_sync(hdev); 335 } 336 337 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length); 338 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) 339 { 340 return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); 341 } 342 343 static void le_scan_disable(struct work_struct *work) 344 { 345 struct hci_dev *hdev = container_of(work, struct hci_dev, 346 le_scan_disable.work); 347 int status; 348 349 bt_dev_dbg(hdev, ""); 350 hci_dev_lock(hdev); 351 352 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) 353 goto _return; 354 355 status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); 356 if (status) { 357 bt_dev_err(hdev, "failed to disable LE scan: %d", status); 358 goto _return; 359 } 360 361 hdev->discovery.scan_start = 0; 362 363 /* If we were running LE only scan, change discovery state. If 364 * we were running both LE and BR/EDR inquiry simultaneously, 365 * and BR/EDR inquiry is already finished, stop discovery, 366 * otherwise BR/EDR inquiry will stop discovery when finished. 367 * If we will resolve remote device name, do not change 368 * discovery state. 369 */ 370 371 if (hdev->discovery.type == DISCOV_TYPE_LE) 372 goto discov_stopped; 373 374 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) 375 goto _return; 376 377 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { 378 if (!test_bit(HCI_INQUIRY, &hdev->flags) && 379 hdev->discovery.state != DISCOVERY_RESOLVING) 380 goto discov_stopped; 381 382 goto _return; 383 } 384 385 status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); 386 if (status) { 387 bt_dev_err(hdev, "inquiry failed: status %d", status); 388 goto discov_stopped; 389 } 390 391 goto _return; 392 393 discov_stopped: 394 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 395 396 _return: 397 hci_dev_unlock(hdev); 398 } 399 400 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, 401 u8 filter_dup); 402 403 static int reenable_adv_sync(struct hci_dev *hdev, void *data) 404 { 405 bt_dev_dbg(hdev, ""); 406 407 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && 408 list_empty(&hdev->adv_instances)) 409 return 0; 410 411 if (hdev->cur_adv_instance) { 412 return hci_schedule_adv_instance_sync(hdev, 413 hdev->cur_adv_instance, 414 true); 415 } else { 416 if (ext_adv_capable(hdev)) { 417 hci_start_ext_adv_sync(hdev, 0x00); 418 } else { 419 hci_update_adv_data_sync(hdev, 0x00); 420 hci_update_scan_rsp_data_sync(hdev, 0x00); 421 hci_enable_advertising_sync(hdev); 422 } 423 } 424 425 return 0; 426 } 427 428 static void reenable_adv(struct work_struct *work) 429 { 430 struct hci_dev *hdev = container_of(work, struct hci_dev, 431 reenable_adv_work); 432 int status; 433 434 bt_dev_dbg(hdev, ""); 435 436 hci_dev_lock(hdev); 437 438 status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); 439 if (status) 440 bt_dev_err(hdev, "failed to reenable ADV: %d", status); 441 442 hci_dev_unlock(hdev); 443 } 444 445 static void cancel_adv_timeout(struct hci_dev *hdev) 446 { 447 if (hdev->adv_instance_timeout) { 448 hdev->adv_instance_timeout = 0; 449 cancel_delayed_work(&hdev->adv_instance_expire); 450 } 451 } 452 453 /* For a single instance: 454 * - force == true: The instance will be removed even when its remaining 455 * lifetime is not zero. 456 * - force == false: the instance will be deactivated but kept stored unless 457 * the remaining lifetime is zero. 458 * 459 * For instance == 0x00: 460 * - force == true: All instances will be removed regardless of their timeout 461 * setting. 462 * - force == false: Only instances that have a timeout will be removed. 463 */ 464 int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, 465 u8 instance, bool force) 466 { 467 struct adv_info *adv_instance, *n, *next_instance = NULL; 468 int err; 469 u8 rem_inst; 470 471 /* Cancel any timeout concerning the removed instance(s). */ 472 if (!instance || hdev->cur_adv_instance == instance) 473 cancel_adv_timeout(hdev); 474 475 /* Get the next instance to advertise BEFORE we remove 476 * the current one. This can be the same instance again 477 * if there is only one instance. 478 */ 479 if (instance && hdev->cur_adv_instance == instance) 480 next_instance = hci_get_next_instance(hdev, instance); 481 482 if (instance == 0x00) { 483 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, 484 list) { 485 if (!(force || adv_instance->timeout)) 486 continue; 487 488 rem_inst = adv_instance->instance; 489 err = hci_remove_adv_instance(hdev, rem_inst); 490 if (!err) 491 mgmt_advertising_removed(sk, hdev, rem_inst); 492 } 493 } else { 494 adv_instance = hci_find_adv_instance(hdev, instance); 495 496 if (force || (adv_instance && adv_instance->timeout && 497 !adv_instance->remaining_time)) { 498 /* Don't advertise a removed instance. */ 499 if (next_instance && 500 next_instance->instance == instance) 501 next_instance = NULL; 502 503 err = hci_remove_adv_instance(hdev, instance); 504 if (!err) 505 mgmt_advertising_removed(sk, hdev, instance); 506 } 507 } 508 509 if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) 510 return 0; 511 512 if (next_instance && !ext_adv_capable(hdev)) 513 return hci_schedule_adv_instance_sync(hdev, 514 next_instance->instance, 515 false); 516 517 return 0; 518 } 519 520 static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) 521 { 522 u8 instance = *(u8 *)data; 523 524 kfree(data); 525 526 hci_clear_adv_instance_sync(hdev, NULL, instance, false); 527 528 if (list_empty(&hdev->adv_instances)) 529 return hci_disable_advertising_sync(hdev); 530 531 return 0; 532 } 533 534 static void adv_timeout_expire(struct work_struct *work) 535 { 536 u8 *inst_ptr; 537 struct hci_dev *hdev = container_of(work, struct hci_dev, 538 adv_instance_expire.work); 539 540 bt_dev_dbg(hdev, ""); 541 542 hci_dev_lock(hdev); 543 544 hdev->adv_instance_timeout = 0; 545 546 if (hdev->cur_adv_instance == 0x00) 547 goto unlock; 548 549 inst_ptr = kmalloc(1, GFP_KERNEL); 550 if (!inst_ptr) 551 goto unlock; 552 553 *inst_ptr = hdev->cur_adv_instance; 554 hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); 555 556 unlock: 557 hci_dev_unlock(hdev); 558 } 559 560 void hci_cmd_sync_init(struct hci_dev *hdev) 561 { 562 INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); 563 INIT_LIST_HEAD(&hdev->cmd_sync_work_list); 564 mutex_init(&hdev->cmd_sync_work_lock); 565 mutex_init(&hdev->unregister_lock); 566 567 INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); 568 INIT_WORK(&hdev->reenable_adv_work, reenable_adv); 569 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); 570 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); 571 } 572 573 static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, 574 struct hci_cmd_sync_work_entry *entry, 575 int err) 576 { 577 if (entry->destroy) 578 entry->destroy(hdev, entry->data, err); 579 580 list_del(&entry->list); 581 kfree(entry); 582 } 583 584 void hci_cmd_sync_clear(struct hci_dev *hdev) 585 { 586 struct hci_cmd_sync_work_entry *entry, *tmp; 587 588 cancel_work_sync(&hdev->cmd_sync_work); 589 cancel_work_sync(&hdev->reenable_adv_work); 590 591 mutex_lock(&hdev->cmd_sync_work_lock); 592 list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) 593 _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); 594 mutex_unlock(&hdev->cmd_sync_work_lock); 595 } 596 597 void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) 598 { 599 bt_dev_dbg(hdev, "err 0x%2.2x", err); 600 601 if (hdev->req_status == HCI_REQ_PEND) { 602 hdev->req_result = err; 603 hdev->req_status = HCI_REQ_CANCELED; 604 605 queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); 606 } 607 } 608 EXPORT_SYMBOL(hci_cmd_sync_cancel); 609 610 /* Cancel ongoing command request synchronously: 611 * 612 * - Set result and mark status to HCI_REQ_CANCELED 613 * - Wakeup command sync thread 614 */ 615 void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err) 616 { 617 bt_dev_dbg(hdev, "err 0x%2.2x", err); 618 619 if (hdev->req_status == HCI_REQ_PEND) { 620 hdev->req_result = err; 621 hdev->req_status = HCI_REQ_CANCELED; 622 623 wake_up_interruptible(&hdev->req_wait_q); 624 } 625 } 626 EXPORT_SYMBOL(hci_cmd_sync_cancel_sync); 627 628 /* Submit HCI command to be run in as cmd_sync_work: 629 * 630 * - hdev must _not_ be unregistered 631 */ 632 int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, 633 void *data, hci_cmd_sync_work_destroy_t destroy) 634 { 635 struct hci_cmd_sync_work_entry *entry; 636 int err = 0; 637 638 mutex_lock(&hdev->unregister_lock); 639 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 640 err = -ENODEV; 641 goto unlock; 642 } 643 644 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 645 if (!entry) { 646 err = -ENOMEM; 647 goto unlock; 648 } 649 entry->func = func; 650 entry->data = data; 651 entry->destroy = destroy; 652 653 mutex_lock(&hdev->cmd_sync_work_lock); 654 list_add_tail(&entry->list, &hdev->cmd_sync_work_list); 655 mutex_unlock(&hdev->cmd_sync_work_lock); 656 657 queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); 658 659 unlock: 660 mutex_unlock(&hdev->unregister_lock); 661 return err; 662 } 663 EXPORT_SYMBOL(hci_cmd_sync_submit); 664 665 /* Queue HCI command: 666 * 667 * - hdev must be running 668 */ 669 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, 670 void *data, hci_cmd_sync_work_destroy_t destroy) 671 { 672 /* Only queue command if hdev is running which means it had been opened 673 * and is either on init phase or is already up. 674 */ 675 if (!test_bit(HCI_RUNNING, &hdev->flags)) 676 return -ENETDOWN; 677 678 return hci_cmd_sync_submit(hdev, func, data, destroy); 679 } 680 EXPORT_SYMBOL(hci_cmd_sync_queue); 681 682 static struct hci_cmd_sync_work_entry * 683 _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, 684 void *data, hci_cmd_sync_work_destroy_t destroy) 685 { 686 struct hci_cmd_sync_work_entry *entry, *tmp; 687 688 list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { 689 if (func && entry->func != func) 690 continue; 691 692 if (data && entry->data != data) 693 continue; 694 695 if (destroy && entry->destroy != destroy) 696 continue; 697 698 return entry; 699 } 700 701 return NULL; 702 } 703 704 /* Queue HCI command entry once: 705 * 706 * - Lookup if an entry already exist and only if it doesn't creates a new entry 707 * and queue it. 708 */ 709 int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, 710 void *data, hci_cmd_sync_work_destroy_t destroy) 711 { 712 if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) 713 return 0; 714 715 return hci_cmd_sync_queue(hdev, func, data, destroy); 716 } 717 EXPORT_SYMBOL(hci_cmd_sync_queue_once); 718 719 /* Lookup HCI command entry: 720 * 721 * - Return first entry that matches by function callback or data or 722 * destroy callback. 723 */ 724 struct hci_cmd_sync_work_entry * 725 hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, 726 void *data, hci_cmd_sync_work_destroy_t destroy) 727 { 728 struct hci_cmd_sync_work_entry *entry; 729 730 mutex_lock(&hdev->cmd_sync_work_lock); 731 entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); 732 mutex_unlock(&hdev->cmd_sync_work_lock); 733 734 return entry; 735 } 736 EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); 737 738 /* Cancel HCI command entry */ 739 void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, 740 struct hci_cmd_sync_work_entry *entry) 741 { 742 mutex_lock(&hdev->cmd_sync_work_lock); 743 _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); 744 mutex_unlock(&hdev->cmd_sync_work_lock); 745 } 746 EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); 747 748 /* Dequeue one HCI command entry: 749 * 750 * - Lookup and cancel first entry that matches. 751 */ 752 bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, 753 hci_cmd_sync_work_func_t func, 754 void *data, hci_cmd_sync_work_destroy_t destroy) 755 { 756 struct hci_cmd_sync_work_entry *entry; 757 758 entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); 759 if (!entry) 760 return false; 761 762 hci_cmd_sync_cancel_entry(hdev, entry); 763 764 return true; 765 } 766 EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); 767 768 /* Dequeue HCI command entry: 769 * 770 * - Lookup and cancel any entry that matches by function callback or data or 771 * destroy callback. 772 */ 773 bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, 774 void *data, hci_cmd_sync_work_destroy_t destroy) 775 { 776 struct hci_cmd_sync_work_entry *entry; 777 bool ret = false; 778 779 mutex_lock(&hdev->cmd_sync_work_lock); 780 while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, 781 destroy))) { 782 _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); 783 ret = true; 784 } 785 mutex_unlock(&hdev->cmd_sync_work_lock); 786 787 return ret; 788 } 789 EXPORT_SYMBOL(hci_cmd_sync_dequeue); 790 791 int hci_update_eir_sync(struct hci_dev *hdev) 792 { 793 struct hci_cp_write_eir cp; 794 795 bt_dev_dbg(hdev, ""); 796 797 if (!hdev_is_powered(hdev)) 798 return 0; 799 800 if (!lmp_ext_inq_capable(hdev)) 801 return 0; 802 803 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 804 return 0; 805 806 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) 807 return 0; 808 809 memset(&cp, 0, sizeof(cp)); 810 811 eir_create(hdev, cp.data); 812 813 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) 814 return 0; 815 816 memcpy(hdev->eir, cp.data, sizeof(cp.data)); 817 818 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, 819 HCI_CMD_TIMEOUT); 820 } 821 822 static u8 get_service_classes(struct hci_dev *hdev) 823 { 824 struct bt_uuid *uuid; 825 u8 val = 0; 826 827 list_for_each_entry(uuid, &hdev->uuids, list) 828 val |= uuid->svc_hint; 829 830 return val; 831 } 832 833 int hci_update_class_sync(struct hci_dev *hdev) 834 { 835 u8 cod[3]; 836 837 bt_dev_dbg(hdev, ""); 838 839 if (!hdev_is_powered(hdev)) 840 return 0; 841 842 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 843 return 0; 844 845 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) 846 return 0; 847 848 cod[0] = hdev->minor_class; 849 cod[1] = hdev->major_class; 850 cod[2] = get_service_classes(hdev); 851 852 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 853 cod[1] |= 0x20; 854 855 if (memcmp(cod, hdev->dev_class, 3) == 0) 856 return 0; 857 858 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, 859 sizeof(cod), cod, HCI_CMD_TIMEOUT); 860 } 861 862 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) 863 { 864 /* If there is no connection we are OK to advertise. */ 865 if (hci_conn_num(hdev, LE_LINK) == 0) 866 return true; 867 868 /* Check le_states if there is any connection in peripheral role. */ 869 if (hdev->conn_hash.le_num_peripheral > 0) { 870 /* Peripheral connection state and non connectable mode 871 * bit 20. 872 */ 873 if (!connectable && !(hdev->le_states[2] & 0x10)) 874 return false; 875 876 /* Peripheral connection state and connectable mode bit 38 877 * and scannable bit 21. 878 */ 879 if (connectable && (!(hdev->le_states[4] & 0x40) || 880 !(hdev->le_states[2] & 0x20))) 881 return false; 882 } 883 884 /* Check le_states if there is any connection in central role. */ 885 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { 886 /* Central connection state and non connectable mode bit 18. */ 887 if (!connectable && !(hdev->le_states[2] & 0x02)) 888 return false; 889 890 /* Central connection state and connectable mode bit 35 and 891 * scannable 19. 892 */ 893 if (connectable && (!(hdev->le_states[4] & 0x08) || 894 !(hdev->le_states[2] & 0x08))) 895 return false; 896 } 897 898 return true; 899 } 900 901 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) 902 { 903 /* If privacy is not enabled don't use RPA */ 904 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 905 return false; 906 907 /* If basic privacy mode is enabled use RPA */ 908 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) 909 return true; 910 911 /* If limited privacy mode is enabled don't use RPA if we're 912 * both discoverable and bondable. 913 */ 914 if ((flags & MGMT_ADV_FLAG_DISCOV) && 915 hci_dev_test_flag(hdev, HCI_BONDABLE)) 916 return false; 917 918 /* We're neither bondable nor discoverable in the limited 919 * privacy mode, therefore use RPA. 920 */ 921 return true; 922 } 923 924 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) 925 { 926 /* If we're advertising or initiating an LE connection we can't 927 * go ahead and change the random address at this time. This is 928 * because the eventual initiator address used for the 929 * subsequently created connection will be undefined (some 930 * controllers use the new address and others the one we had 931 * when the operation started). 932 * 933 * In this kind of scenario skip the update and let the random 934 * address be updated at the next cycle. 935 */ 936 if (hci_dev_test_flag(hdev, HCI_LE_ADV) || 937 hci_lookup_le_connect(hdev)) { 938 bt_dev_dbg(hdev, "Deferring random address update"); 939 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 940 return 0; 941 } 942 943 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, 944 6, rpa, HCI_CMD_TIMEOUT); 945 } 946 947 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, 948 bool rpa, u8 *own_addr_type) 949 { 950 int err; 951 952 /* If privacy is enabled use a resolvable private address. If 953 * current RPA has expired or there is something else than 954 * the current RPA in use, then generate a new one. 955 */ 956 if (rpa) { 957 /* If Controller supports LL Privacy use own address type is 958 * 0x03 959 */ 960 if (use_ll_privacy(hdev)) 961 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; 962 else 963 *own_addr_type = ADDR_LE_DEV_RANDOM; 964 965 /* Check if RPA is valid */ 966 if (rpa_valid(hdev)) 967 return 0; 968 969 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); 970 if (err < 0) { 971 bt_dev_err(hdev, "failed to generate new RPA"); 972 return err; 973 } 974 975 err = hci_set_random_addr_sync(hdev, &hdev->rpa); 976 if (err) 977 return err; 978 979 return 0; 980 } 981 982 /* In case of required privacy without resolvable private address, 983 * use an non-resolvable private address. This is useful for active 984 * scanning and non-connectable advertising. 985 */ 986 if (require_privacy) { 987 bdaddr_t nrpa; 988 989 while (true) { 990 /* The non-resolvable private address is generated 991 * from random six bytes with the two most significant 992 * bits cleared. 993 */ 994 get_random_bytes(&nrpa, 6); 995 nrpa.b[5] &= 0x3f; 996 997 /* The non-resolvable private address shall not be 998 * equal to the public address. 999 */ 1000 if (bacmp(&hdev->bdaddr, &nrpa)) 1001 break; 1002 } 1003 1004 *own_addr_type = ADDR_LE_DEV_RANDOM; 1005 1006 return hci_set_random_addr_sync(hdev, &nrpa); 1007 } 1008 1009 /* If forcing static address is in use or there is no public 1010 * address use the static address as random address (but skip 1011 * the HCI command if the current random address is already the 1012 * static one. 1013 * 1014 * In case BR/EDR has been disabled on a dual-mode controller 1015 * and a static address has been configured, then use that 1016 * address instead of the public BR/EDR address. 1017 */ 1018 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 1019 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 1020 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 1021 bacmp(&hdev->static_addr, BDADDR_ANY))) { 1022 *own_addr_type = ADDR_LE_DEV_RANDOM; 1023 if (bacmp(&hdev->static_addr, &hdev->random_addr)) 1024 return hci_set_random_addr_sync(hdev, 1025 &hdev->static_addr); 1026 return 0; 1027 } 1028 1029 /* Neither privacy nor static address is being used so use a 1030 * public address. 1031 */ 1032 *own_addr_type = ADDR_LE_DEV_PUBLIC; 1033 1034 return 0; 1035 } 1036 1037 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) 1038 { 1039 struct hci_cp_le_set_ext_adv_enable *cp; 1040 struct hci_cp_ext_adv_set *set; 1041 u8 data[sizeof(*cp) + sizeof(*set) * 1]; 1042 u8 size; 1043 1044 /* If request specifies an instance that doesn't exist, fail */ 1045 if (instance > 0) { 1046 struct adv_info *adv; 1047 1048 adv = hci_find_adv_instance(hdev, instance); 1049 if (!adv) 1050 return -EINVAL; 1051 1052 /* If not enabled there is nothing to do */ 1053 if (!adv->enabled) 1054 return 0; 1055 } 1056 1057 memset(data, 0, sizeof(data)); 1058 1059 cp = (void *)data; 1060 set = (void *)cp->data; 1061 1062 /* Instance 0x00 indicates all advertising instances will be disabled */ 1063 cp->num_of_sets = !!instance; 1064 cp->enable = 0x00; 1065 1066 set->handle = instance; 1067 1068 size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; 1069 1070 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, 1071 size, data, HCI_CMD_TIMEOUT); 1072 } 1073 1074 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, 1075 bdaddr_t *random_addr) 1076 { 1077 struct hci_cp_le_set_adv_set_rand_addr cp; 1078 int err; 1079 1080 if (!instance) { 1081 /* Instance 0x00 doesn't have an adv_info, instead it uses 1082 * hdev->random_addr to track its address so whenever it needs 1083 * to be updated this also set the random address since 1084 * hdev->random_addr is shared with scan state machine. 1085 */ 1086 err = hci_set_random_addr_sync(hdev, random_addr); 1087 if (err) 1088 return err; 1089 } 1090 1091 memset(&cp, 0, sizeof(cp)); 1092 1093 cp.handle = instance; 1094 bacpy(&cp.bdaddr, random_addr); 1095 1096 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 1097 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1098 } 1099 1100 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) 1101 { 1102 struct hci_cp_le_set_ext_adv_params cp; 1103 bool connectable; 1104 u32 flags; 1105 bdaddr_t random_addr; 1106 u8 own_addr_type; 1107 int err; 1108 struct adv_info *adv; 1109 bool secondary_adv; 1110 1111 if (instance > 0) { 1112 adv = hci_find_adv_instance(hdev, instance); 1113 if (!adv) 1114 return -EINVAL; 1115 } else { 1116 adv = NULL; 1117 } 1118 1119 /* Updating parameters of an active instance will return a 1120 * Command Disallowed error, so we must first disable the 1121 * instance if it is active. 1122 */ 1123 if (adv && !adv->pending) { 1124 err = hci_disable_ext_adv_instance_sync(hdev, instance); 1125 if (err) 1126 return err; 1127 } 1128 1129 flags = hci_adv_instance_flags(hdev, instance); 1130 1131 /* If the "connectable" instance flag was not set, then choose between 1132 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. 1133 */ 1134 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || 1135 mgmt_get_connectable(hdev); 1136 1137 if (!is_advertising_allowed(hdev, connectable)) 1138 return -EPERM; 1139 1140 /* Set require_privacy to true only when non-connectable 1141 * advertising is used. In that case it is fine to use a 1142 * non-resolvable private address. 1143 */ 1144 err = hci_get_random_address(hdev, !connectable, 1145 adv_use_rpa(hdev, flags), adv, 1146 &own_addr_type, &random_addr); 1147 if (err < 0) 1148 return err; 1149 1150 memset(&cp, 0, sizeof(cp)); 1151 1152 if (adv) { 1153 hci_cpu_to_le24(adv->min_interval, cp.min_interval); 1154 hci_cpu_to_le24(adv->max_interval, cp.max_interval); 1155 cp.tx_power = adv->tx_power; 1156 } else { 1157 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); 1158 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); 1159 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; 1160 } 1161 1162 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); 1163 1164 if (connectable) { 1165 if (secondary_adv) 1166 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); 1167 else 1168 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); 1169 } else if (hci_adv_instance_is_scannable(hdev, instance) || 1170 (flags & MGMT_ADV_PARAM_SCAN_RSP)) { 1171 if (secondary_adv) 1172 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); 1173 else 1174 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); 1175 } else { 1176 if (secondary_adv) 1177 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); 1178 else 1179 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); 1180 } 1181 1182 /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter 1183 * contains the peer’s Identity Address and the Peer_Address_Type 1184 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). 1185 * These parameters are used to locate the corresponding local IRK in 1186 * the resolving list; this IRK is used to generate their own address 1187 * used in the advertisement. 1188 */ 1189 if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) 1190 hci_copy_identity_address(hdev, &cp.peer_addr, 1191 &cp.peer_addr_type); 1192 1193 cp.own_addr_type = own_addr_type; 1194 cp.channel_map = hdev->le_adv_channel_map; 1195 cp.handle = instance; 1196 1197 if (flags & MGMT_ADV_FLAG_SEC_2M) { 1198 cp.primary_phy = HCI_ADV_PHY_1M; 1199 cp.secondary_phy = HCI_ADV_PHY_2M; 1200 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { 1201 cp.primary_phy = HCI_ADV_PHY_CODED; 1202 cp.secondary_phy = HCI_ADV_PHY_CODED; 1203 } else { 1204 /* In all other cases use 1M */ 1205 cp.primary_phy = HCI_ADV_PHY_1M; 1206 cp.secondary_phy = HCI_ADV_PHY_1M; 1207 } 1208 1209 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, 1210 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1211 if (err) 1212 return err; 1213 1214 if ((own_addr_type == ADDR_LE_DEV_RANDOM || 1215 own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && 1216 bacmp(&random_addr, BDADDR_ANY)) { 1217 /* Check if random address need to be updated */ 1218 if (adv) { 1219 if (!bacmp(&random_addr, &adv->random_addr)) 1220 return 0; 1221 } else { 1222 if (!bacmp(&random_addr, &hdev->random_addr)) 1223 return 0; 1224 } 1225 1226 return hci_set_adv_set_random_addr_sync(hdev, instance, 1227 &random_addr); 1228 } 1229 1230 return 0; 1231 } 1232 1233 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) 1234 { 1235 struct { 1236 struct hci_cp_le_set_ext_scan_rsp_data cp; 1237 u8 data[HCI_MAX_EXT_AD_LENGTH]; 1238 } pdu; 1239 u8 len; 1240 struct adv_info *adv = NULL; 1241 int err; 1242 1243 memset(&pdu, 0, sizeof(pdu)); 1244 1245 if (instance) { 1246 adv = hci_find_adv_instance(hdev, instance); 1247 if (!adv || !adv->scan_rsp_changed) 1248 return 0; 1249 } 1250 1251 len = eir_create_scan_rsp(hdev, instance, pdu.data); 1252 1253 pdu.cp.handle = instance; 1254 pdu.cp.length = len; 1255 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1256 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; 1257 1258 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, 1259 sizeof(pdu.cp) + len, &pdu.cp, 1260 HCI_CMD_TIMEOUT); 1261 if (err) 1262 return err; 1263 1264 if (adv) { 1265 adv->scan_rsp_changed = false; 1266 } else { 1267 memcpy(hdev->scan_rsp_data, pdu.data, len); 1268 hdev->scan_rsp_data_len = len; 1269 } 1270 1271 return 0; 1272 } 1273 1274 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) 1275 { 1276 struct hci_cp_le_set_scan_rsp_data cp; 1277 u8 len; 1278 1279 memset(&cp, 0, sizeof(cp)); 1280 1281 len = eir_create_scan_rsp(hdev, instance, cp.data); 1282 1283 if (hdev->scan_rsp_data_len == len && 1284 !memcmp(cp.data, hdev->scan_rsp_data, len)) 1285 return 0; 1286 1287 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); 1288 hdev->scan_rsp_data_len = len; 1289 1290 cp.length = len; 1291 1292 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, 1293 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1294 } 1295 1296 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) 1297 { 1298 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1299 return 0; 1300 1301 if (ext_adv_capable(hdev)) 1302 return hci_set_ext_scan_rsp_data_sync(hdev, instance); 1303 1304 return __hci_set_scan_rsp_data_sync(hdev, instance); 1305 } 1306 1307 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) 1308 { 1309 struct hci_cp_le_set_ext_adv_enable *cp; 1310 struct hci_cp_ext_adv_set *set; 1311 u8 data[sizeof(*cp) + sizeof(*set) * 1]; 1312 struct adv_info *adv; 1313 1314 if (instance > 0) { 1315 adv = hci_find_adv_instance(hdev, instance); 1316 if (!adv) 1317 return -EINVAL; 1318 /* If already enabled there is nothing to do */ 1319 if (adv->enabled) 1320 return 0; 1321 } else { 1322 adv = NULL; 1323 } 1324 1325 cp = (void *)data; 1326 set = (void *)cp->data; 1327 1328 memset(cp, 0, sizeof(*cp)); 1329 1330 cp->enable = 0x01; 1331 cp->num_of_sets = 0x01; 1332 1333 memset(set, 0, sizeof(*set)); 1334 1335 set->handle = instance; 1336 1337 /* Set duration per instance since controller is responsible for 1338 * scheduling it. 1339 */ 1340 if (adv && adv->timeout) { 1341 u16 duration = adv->timeout * MSEC_PER_SEC; 1342 1343 /* Time = N * 10 ms */ 1344 set->duration = cpu_to_le16(duration / 10); 1345 } 1346 1347 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, 1348 sizeof(*cp) + 1349 sizeof(*set) * cp->num_of_sets, 1350 data, HCI_CMD_TIMEOUT); 1351 } 1352 1353 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) 1354 { 1355 int err; 1356 1357 err = hci_setup_ext_adv_instance_sync(hdev, instance); 1358 if (err) 1359 return err; 1360 1361 err = hci_set_ext_scan_rsp_data_sync(hdev, instance); 1362 if (err) 1363 return err; 1364 1365 return hci_enable_ext_advertising_sync(hdev, instance); 1366 } 1367 1368 int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) 1369 { 1370 struct hci_cp_le_set_per_adv_enable cp; 1371 struct adv_info *adv = NULL; 1372 1373 /* If periodic advertising already disabled there is nothing to do. */ 1374 adv = hci_find_adv_instance(hdev, instance); 1375 if (!adv || !adv->periodic || !adv->enabled) 1376 return 0; 1377 1378 memset(&cp, 0, sizeof(cp)); 1379 1380 cp.enable = 0x00; 1381 cp.handle = instance; 1382 1383 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, 1384 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1385 } 1386 1387 static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, 1388 u16 min_interval, u16 max_interval) 1389 { 1390 struct hci_cp_le_set_per_adv_params cp; 1391 1392 memset(&cp, 0, sizeof(cp)); 1393 1394 if (!min_interval) 1395 min_interval = DISCOV_LE_PER_ADV_INT_MIN; 1396 1397 if (!max_interval) 1398 max_interval = DISCOV_LE_PER_ADV_INT_MAX; 1399 1400 cp.handle = instance; 1401 cp.min_interval = cpu_to_le16(min_interval); 1402 cp.max_interval = cpu_to_le16(max_interval); 1403 cp.periodic_properties = 0x0000; 1404 1405 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, 1406 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1407 } 1408 1409 static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) 1410 { 1411 struct { 1412 struct hci_cp_le_set_per_adv_data cp; 1413 u8 data[HCI_MAX_PER_AD_LENGTH]; 1414 } pdu; 1415 u8 len; 1416 1417 memset(&pdu, 0, sizeof(pdu)); 1418 1419 if (instance) { 1420 struct adv_info *adv = hci_find_adv_instance(hdev, instance); 1421 1422 if (!adv || !adv->periodic) 1423 return 0; 1424 } 1425 1426 len = eir_create_per_adv_data(hdev, instance, pdu.data); 1427 1428 pdu.cp.length = len; 1429 pdu.cp.handle = instance; 1430 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1431 1432 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, 1433 sizeof(pdu.cp) + len, &pdu, 1434 HCI_CMD_TIMEOUT); 1435 } 1436 1437 static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) 1438 { 1439 struct hci_cp_le_set_per_adv_enable cp; 1440 struct adv_info *adv = NULL; 1441 1442 /* If periodic advertising already enabled there is nothing to do. */ 1443 adv = hci_find_adv_instance(hdev, instance); 1444 if (adv && adv->periodic && adv->enabled) 1445 return 0; 1446 1447 memset(&cp, 0, sizeof(cp)); 1448 1449 cp.enable = 0x01; 1450 cp.handle = instance; 1451 1452 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, 1453 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1454 } 1455 1456 /* Checks if periodic advertising data contains a Basic Announcement and if it 1457 * does generates a Broadcast ID and add Broadcast Announcement. 1458 */ 1459 static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) 1460 { 1461 u8 bid[3]; 1462 u8 ad[4 + 3]; 1463 1464 /* Skip if NULL adv as instance 0x00 is used for general purpose 1465 * advertising so it cannot used for the likes of Broadcast Announcement 1466 * as it can be overwritten at any point. 1467 */ 1468 if (!adv) 1469 return 0; 1470 1471 /* Check if PA data doesn't contains a Basic Audio Announcement then 1472 * there is nothing to do. 1473 */ 1474 if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, 1475 0x1851, NULL)) 1476 return 0; 1477 1478 /* Check if advertising data already has a Broadcast Announcement since 1479 * the process may want to control the Broadcast ID directly and in that 1480 * case the kernel shall no interfere. 1481 */ 1482 if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, 1483 NULL)) 1484 return 0; 1485 1486 /* Generate Broadcast ID */ 1487 get_random_bytes(bid, sizeof(bid)); 1488 eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); 1489 hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); 1490 1491 return hci_update_adv_data_sync(hdev, adv->instance); 1492 } 1493 1494 int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, 1495 u8 *data, u32 flags, u16 min_interval, 1496 u16 max_interval, u16 sync_interval) 1497 { 1498 struct adv_info *adv = NULL; 1499 int err; 1500 bool added = false; 1501 1502 hci_disable_per_advertising_sync(hdev, instance); 1503 1504 if (instance) { 1505 adv = hci_find_adv_instance(hdev, instance); 1506 /* Create an instance if that could not be found */ 1507 if (!adv) { 1508 adv = hci_add_per_instance(hdev, instance, flags, 1509 data_len, data, 1510 sync_interval, 1511 sync_interval); 1512 if (IS_ERR(adv)) 1513 return PTR_ERR(adv); 1514 adv->pending = false; 1515 added = true; 1516 } 1517 } 1518 1519 /* Start advertising */ 1520 err = hci_start_ext_adv_sync(hdev, instance); 1521 if (err < 0) 1522 goto fail; 1523 1524 err = hci_adv_bcast_annoucement(hdev, adv); 1525 if (err < 0) 1526 goto fail; 1527 1528 err = hci_set_per_adv_params_sync(hdev, instance, min_interval, 1529 max_interval); 1530 if (err < 0) 1531 goto fail; 1532 1533 err = hci_set_per_adv_data_sync(hdev, instance); 1534 if (err < 0) 1535 goto fail; 1536 1537 err = hci_enable_per_advertising_sync(hdev, instance); 1538 if (err < 0) 1539 goto fail; 1540 1541 return 0; 1542 1543 fail: 1544 if (added) 1545 hci_remove_adv_instance(hdev, instance); 1546 1547 return err; 1548 } 1549 1550 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) 1551 { 1552 int err; 1553 1554 if (ext_adv_capable(hdev)) 1555 return hci_start_ext_adv_sync(hdev, instance); 1556 1557 err = hci_update_adv_data_sync(hdev, instance); 1558 if (err) 1559 return err; 1560 1561 err = hci_update_scan_rsp_data_sync(hdev, instance); 1562 if (err) 1563 return err; 1564 1565 return hci_enable_advertising_sync(hdev); 1566 } 1567 1568 int hci_enable_advertising_sync(struct hci_dev *hdev) 1569 { 1570 struct adv_info *adv_instance; 1571 struct hci_cp_le_set_adv_param cp; 1572 u8 own_addr_type, enable = 0x01; 1573 bool connectable; 1574 u16 adv_min_interval, adv_max_interval; 1575 u32 flags; 1576 u8 status; 1577 1578 if (ext_adv_capable(hdev)) 1579 return hci_enable_ext_advertising_sync(hdev, 1580 hdev->cur_adv_instance); 1581 1582 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); 1583 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); 1584 1585 /* If the "connectable" instance flag was not set, then choose between 1586 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. 1587 */ 1588 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || 1589 mgmt_get_connectable(hdev); 1590 1591 if (!is_advertising_allowed(hdev, connectable)) 1592 return -EINVAL; 1593 1594 status = hci_disable_advertising_sync(hdev); 1595 if (status) 1596 return status; 1597 1598 /* Clear the HCI_LE_ADV bit temporarily so that the 1599 * hci_update_random_address knows that it's safe to go ahead 1600 * and write a new random address. The flag will be set back on 1601 * as soon as the SET_ADV_ENABLE HCI command completes. 1602 */ 1603 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1604 1605 /* Set require_privacy to true only when non-connectable 1606 * advertising is used. In that case it is fine to use a 1607 * non-resolvable private address. 1608 */ 1609 status = hci_update_random_address_sync(hdev, !connectable, 1610 adv_use_rpa(hdev, flags), 1611 &own_addr_type); 1612 if (status) 1613 return status; 1614 1615 memset(&cp, 0, sizeof(cp)); 1616 1617 if (adv_instance) { 1618 adv_min_interval = adv_instance->min_interval; 1619 adv_max_interval = adv_instance->max_interval; 1620 } else { 1621 adv_min_interval = hdev->le_adv_min_interval; 1622 adv_max_interval = hdev->le_adv_max_interval; 1623 } 1624 1625 if (connectable) { 1626 cp.type = LE_ADV_IND; 1627 } else { 1628 if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) 1629 cp.type = LE_ADV_SCAN_IND; 1630 else 1631 cp.type = LE_ADV_NONCONN_IND; 1632 1633 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || 1634 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { 1635 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; 1636 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; 1637 } 1638 } 1639 1640 cp.min_interval = cpu_to_le16(adv_min_interval); 1641 cp.max_interval = cpu_to_le16(adv_max_interval); 1642 cp.own_address_type = own_addr_type; 1643 cp.channel_map = hdev->le_adv_channel_map; 1644 1645 status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, 1646 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1647 if (status) 1648 return status; 1649 1650 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, 1651 sizeof(enable), &enable, HCI_CMD_TIMEOUT); 1652 } 1653 1654 static int enable_advertising_sync(struct hci_dev *hdev, void *data) 1655 { 1656 return hci_enable_advertising_sync(hdev); 1657 } 1658 1659 int hci_enable_advertising(struct hci_dev *hdev) 1660 { 1661 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && 1662 list_empty(&hdev->adv_instances)) 1663 return 0; 1664 1665 return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); 1666 } 1667 1668 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, 1669 struct sock *sk) 1670 { 1671 int err; 1672 1673 if (!ext_adv_capable(hdev)) 1674 return 0; 1675 1676 err = hci_disable_ext_adv_instance_sync(hdev, instance); 1677 if (err) 1678 return err; 1679 1680 /* If request specifies an instance that doesn't exist, fail */ 1681 if (instance > 0 && !hci_find_adv_instance(hdev, instance)) 1682 return -EINVAL; 1683 1684 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, 1685 sizeof(instance), &instance, 0, 1686 HCI_CMD_TIMEOUT, sk); 1687 } 1688 1689 static int remove_ext_adv_sync(struct hci_dev *hdev, void *data) 1690 { 1691 struct adv_info *adv = data; 1692 u8 instance = 0; 1693 1694 if (adv) 1695 instance = adv->instance; 1696 1697 return hci_remove_ext_adv_instance_sync(hdev, instance, NULL); 1698 } 1699 1700 int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance) 1701 { 1702 struct adv_info *adv = NULL; 1703 1704 if (instance) { 1705 adv = hci_find_adv_instance(hdev, instance); 1706 if (!adv) 1707 return -EINVAL; 1708 } 1709 1710 return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL); 1711 } 1712 1713 int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) 1714 { 1715 struct hci_cp_le_term_big cp; 1716 1717 memset(&cp, 0, sizeof(cp)); 1718 cp.handle = handle; 1719 cp.reason = reason; 1720 1721 return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, 1722 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1723 } 1724 1725 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) 1726 { 1727 struct { 1728 struct hci_cp_le_set_ext_adv_data cp; 1729 u8 data[HCI_MAX_EXT_AD_LENGTH]; 1730 } pdu; 1731 u8 len; 1732 struct adv_info *adv = NULL; 1733 int err; 1734 1735 memset(&pdu, 0, sizeof(pdu)); 1736 1737 if (instance) { 1738 adv = hci_find_adv_instance(hdev, instance); 1739 if (!adv || !adv->adv_data_changed) 1740 return 0; 1741 } 1742 1743 len = eir_create_adv_data(hdev, instance, pdu.data); 1744 1745 pdu.cp.length = len; 1746 pdu.cp.handle = instance; 1747 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1748 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; 1749 1750 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, 1751 sizeof(pdu.cp) + len, &pdu.cp, 1752 HCI_CMD_TIMEOUT); 1753 if (err) 1754 return err; 1755 1756 /* Update data if the command succeed */ 1757 if (adv) { 1758 adv->adv_data_changed = false; 1759 } else { 1760 memcpy(hdev->adv_data, pdu.data, len); 1761 hdev->adv_data_len = len; 1762 } 1763 1764 return 0; 1765 } 1766 1767 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) 1768 { 1769 struct hci_cp_le_set_adv_data cp; 1770 u8 len; 1771 1772 memset(&cp, 0, sizeof(cp)); 1773 1774 len = eir_create_adv_data(hdev, instance, cp.data); 1775 1776 /* There's nothing to do if the data hasn't changed */ 1777 if (hdev->adv_data_len == len && 1778 memcmp(cp.data, hdev->adv_data, len) == 0) 1779 return 0; 1780 1781 memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); 1782 hdev->adv_data_len = len; 1783 1784 cp.length = len; 1785 1786 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, 1787 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1788 } 1789 1790 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) 1791 { 1792 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1793 return 0; 1794 1795 if (ext_adv_capable(hdev)) 1796 return hci_set_ext_adv_data_sync(hdev, instance); 1797 1798 return hci_set_adv_data_sync(hdev, instance); 1799 } 1800 1801 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, 1802 bool force) 1803 { 1804 struct adv_info *adv = NULL; 1805 u16 timeout; 1806 1807 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) 1808 return -EPERM; 1809 1810 if (hdev->adv_instance_timeout) 1811 return -EBUSY; 1812 1813 adv = hci_find_adv_instance(hdev, instance); 1814 if (!adv) 1815 return -ENOENT; 1816 1817 /* A zero timeout means unlimited advertising. As long as there is 1818 * only one instance, duration should be ignored. We still set a timeout 1819 * in case further instances are being added later on. 1820 * 1821 * If the remaining lifetime of the instance is more than the duration 1822 * then the timeout corresponds to the duration, otherwise it will be 1823 * reduced to the remaining instance lifetime. 1824 */ 1825 if (adv->timeout == 0 || adv->duration <= adv->remaining_time) 1826 timeout = adv->duration; 1827 else 1828 timeout = adv->remaining_time; 1829 1830 /* The remaining time is being reduced unless the instance is being 1831 * advertised without time limit. 1832 */ 1833 if (adv->timeout) 1834 adv->remaining_time = adv->remaining_time - timeout; 1835 1836 /* Only use work for scheduling instances with legacy advertising */ 1837 if (!ext_adv_capable(hdev)) { 1838 hdev->adv_instance_timeout = timeout; 1839 queue_delayed_work(hdev->req_workqueue, 1840 &hdev->adv_instance_expire, 1841 msecs_to_jiffies(timeout * 1000)); 1842 } 1843 1844 /* If we're just re-scheduling the same instance again then do not 1845 * execute any HCI commands. This happens when a single instance is 1846 * being advertised. 1847 */ 1848 if (!force && hdev->cur_adv_instance == instance && 1849 hci_dev_test_flag(hdev, HCI_LE_ADV)) 1850 return 0; 1851 1852 hdev->cur_adv_instance = instance; 1853 1854 return hci_start_adv_sync(hdev, instance); 1855 } 1856 1857 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) 1858 { 1859 int err; 1860 1861 if (!ext_adv_capable(hdev)) 1862 return 0; 1863 1864 /* Disable instance 0x00 to disable all instances */ 1865 err = hci_disable_ext_adv_instance_sync(hdev, 0x00); 1866 if (err) 1867 return err; 1868 1869 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, 1870 0, NULL, 0, HCI_CMD_TIMEOUT, sk); 1871 } 1872 1873 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) 1874 { 1875 struct adv_info *adv, *n; 1876 int err = 0; 1877 1878 if (ext_adv_capable(hdev)) 1879 /* Remove all existing sets */ 1880 err = hci_clear_adv_sets_sync(hdev, sk); 1881 if (ext_adv_capable(hdev)) 1882 return err; 1883 1884 /* This is safe as long as there is no command send while the lock is 1885 * held. 1886 */ 1887 hci_dev_lock(hdev); 1888 1889 /* Cleanup non-ext instances */ 1890 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1891 u8 instance = adv->instance; 1892 int err; 1893 1894 if (!(force || adv->timeout)) 1895 continue; 1896 1897 err = hci_remove_adv_instance(hdev, instance); 1898 if (!err) 1899 mgmt_advertising_removed(sk, hdev, instance); 1900 } 1901 1902 hci_dev_unlock(hdev); 1903 1904 return 0; 1905 } 1906 1907 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, 1908 struct sock *sk) 1909 { 1910 int err = 0; 1911 1912 /* If we use extended advertising, instance has to be removed first. */ 1913 if (ext_adv_capable(hdev)) 1914 err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); 1915 if (ext_adv_capable(hdev)) 1916 return err; 1917 1918 /* This is safe as long as there is no command send while the lock is 1919 * held. 1920 */ 1921 hci_dev_lock(hdev); 1922 1923 err = hci_remove_adv_instance(hdev, instance); 1924 if (!err) 1925 mgmt_advertising_removed(sk, hdev, instance); 1926 1927 hci_dev_unlock(hdev); 1928 1929 return err; 1930 } 1931 1932 /* For a single instance: 1933 * - force == true: The instance will be removed even when its remaining 1934 * lifetime is not zero. 1935 * - force == false: the instance will be deactivated but kept stored unless 1936 * the remaining lifetime is zero. 1937 * 1938 * For instance == 0x00: 1939 * - force == true: All instances will be removed regardless of their timeout 1940 * setting. 1941 * - force == false: Only instances that have a timeout will be removed. 1942 */ 1943 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, 1944 u8 instance, bool force) 1945 { 1946 struct adv_info *next = NULL; 1947 int err; 1948 1949 /* Cancel any timeout concerning the removed instance(s). */ 1950 if (!instance || hdev->cur_adv_instance == instance) 1951 cancel_adv_timeout(hdev); 1952 1953 /* Get the next instance to advertise BEFORE we remove 1954 * the current one. This can be the same instance again 1955 * if there is only one instance. 1956 */ 1957 if (hdev->cur_adv_instance == instance) 1958 next = hci_get_next_instance(hdev, instance); 1959 1960 if (!instance) { 1961 err = hci_clear_adv_sync(hdev, sk, force); 1962 if (err) 1963 return err; 1964 } else { 1965 struct adv_info *adv = hci_find_adv_instance(hdev, instance); 1966 1967 if (force || (adv && adv->timeout && !adv->remaining_time)) { 1968 /* Don't advertise a removed instance. */ 1969 if (next && next->instance == instance) 1970 next = NULL; 1971 1972 err = hci_remove_adv_sync(hdev, instance, sk); 1973 if (err) 1974 return err; 1975 } 1976 } 1977 1978 if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) 1979 return 0; 1980 1981 if (next && !ext_adv_capable(hdev)) 1982 hci_schedule_adv_instance_sync(hdev, next->instance, false); 1983 1984 return 0; 1985 } 1986 1987 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) 1988 { 1989 struct hci_cp_read_rssi cp; 1990 1991 cp.handle = handle; 1992 return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, 1993 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1994 } 1995 1996 int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) 1997 { 1998 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, 1999 sizeof(*cp), cp, HCI_CMD_TIMEOUT); 2000 } 2001 2002 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) 2003 { 2004 struct hci_cp_read_tx_power cp; 2005 2006 cp.handle = handle; 2007 cp.type = type; 2008 return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, 2009 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2010 } 2011 2012 int hci_disable_advertising_sync(struct hci_dev *hdev) 2013 { 2014 u8 enable = 0x00; 2015 int err = 0; 2016 2017 /* If controller is not advertising we are done. */ 2018 if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) 2019 return 0; 2020 2021 if (ext_adv_capable(hdev)) 2022 err = hci_disable_ext_adv_instance_sync(hdev, 0x00); 2023 if (ext_adv_capable(hdev)) 2024 return err; 2025 2026 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, 2027 sizeof(enable), &enable, HCI_CMD_TIMEOUT); 2028 } 2029 2030 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, 2031 u8 filter_dup) 2032 { 2033 struct hci_cp_le_set_ext_scan_enable cp; 2034 2035 memset(&cp, 0, sizeof(cp)); 2036 cp.enable = val; 2037 2038 if (hci_dev_test_flag(hdev, HCI_MESH)) 2039 cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 2040 else 2041 cp.filter_dup = filter_dup; 2042 2043 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, 2044 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2045 } 2046 2047 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, 2048 u8 filter_dup) 2049 { 2050 struct hci_cp_le_set_scan_enable cp; 2051 2052 if (use_ext_scan(hdev)) 2053 return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); 2054 2055 memset(&cp, 0, sizeof(cp)); 2056 cp.enable = val; 2057 2058 if (val && hci_dev_test_flag(hdev, HCI_MESH)) 2059 cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 2060 else 2061 cp.filter_dup = filter_dup; 2062 2063 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, 2064 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2065 } 2066 2067 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) 2068 { 2069 if (!use_ll_privacy(hdev)) 2070 return 0; 2071 2072 /* If controller is not/already resolving we are done. */ 2073 if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) 2074 return 0; 2075 2076 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 2077 sizeof(val), &val, HCI_CMD_TIMEOUT); 2078 } 2079 2080 static int hci_scan_disable_sync(struct hci_dev *hdev) 2081 { 2082 int err; 2083 2084 /* If controller is not scanning we are done. */ 2085 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) 2086 return 0; 2087 2088 if (hdev->scanning_paused) { 2089 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 2090 return 0; 2091 } 2092 2093 err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); 2094 if (err) { 2095 bt_dev_err(hdev, "Unable to disable scanning: %d", err); 2096 return err; 2097 } 2098 2099 return err; 2100 } 2101 2102 static bool scan_use_rpa(struct hci_dev *hdev) 2103 { 2104 return hci_dev_test_flag(hdev, HCI_PRIVACY); 2105 } 2106 2107 static void hci_start_interleave_scan(struct hci_dev *hdev) 2108 { 2109 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; 2110 queue_delayed_work(hdev->req_workqueue, 2111 &hdev->interleave_scan, 0); 2112 } 2113 2114 static bool is_interleave_scanning(struct hci_dev *hdev) 2115 { 2116 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; 2117 } 2118 2119 static void cancel_interleave_scan(struct hci_dev *hdev) 2120 { 2121 bt_dev_dbg(hdev, "cancelling interleave scan"); 2122 2123 cancel_delayed_work_sync(&hdev->interleave_scan); 2124 2125 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; 2126 } 2127 2128 /* Return true if interleave_scan wasn't started until exiting this function, 2129 * otherwise, return false 2130 */ 2131 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) 2132 { 2133 /* Do interleaved scan only if all of the following are true: 2134 * - There is at least one ADV monitor 2135 * - At least one pending LE connection or one device to be scanned for 2136 * - Monitor offloading is not supported 2137 * If so, we should alternate between allowlist scan and one without 2138 * any filters to save power. 2139 */ 2140 bool use_interleaving = hci_is_adv_monitoring(hdev) && 2141 !(list_empty(&hdev->pend_le_conns) && 2142 list_empty(&hdev->pend_le_reports)) && 2143 hci_get_adv_monitor_offload_ext(hdev) == 2144 HCI_ADV_MONITOR_EXT_NONE; 2145 bool is_interleaving = is_interleave_scanning(hdev); 2146 2147 if (use_interleaving && !is_interleaving) { 2148 hci_start_interleave_scan(hdev); 2149 bt_dev_dbg(hdev, "starting interleave scan"); 2150 return true; 2151 } 2152 2153 if (!use_interleaving && is_interleaving) 2154 cancel_interleave_scan(hdev); 2155 2156 return false; 2157 } 2158 2159 /* Removes connection to resolve list if needed.*/ 2160 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, 2161 bdaddr_t *bdaddr, u8 bdaddr_type) 2162 { 2163 struct hci_cp_le_del_from_resolv_list cp; 2164 struct bdaddr_list_with_irk *entry; 2165 2166 if (!use_ll_privacy(hdev)) 2167 return 0; 2168 2169 /* Check if the IRK has been programmed */ 2170 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, 2171 bdaddr_type); 2172 if (!entry) 2173 return 0; 2174 2175 cp.bdaddr_type = bdaddr_type; 2176 bacpy(&cp.bdaddr, bdaddr); 2177 2178 return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, 2179 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2180 } 2181 2182 static int hci_le_del_accept_list_sync(struct hci_dev *hdev, 2183 bdaddr_t *bdaddr, u8 bdaddr_type) 2184 { 2185 struct hci_cp_le_del_from_accept_list cp; 2186 int err; 2187 2188 /* Check if device is on accept list before removing it */ 2189 if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) 2190 return 0; 2191 2192 cp.bdaddr_type = bdaddr_type; 2193 bacpy(&cp.bdaddr, bdaddr); 2194 2195 /* Ignore errors when removing from resolving list as that is likely 2196 * that the device was never added. 2197 */ 2198 hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); 2199 2200 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 2201 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2202 if (err) { 2203 bt_dev_err(hdev, "Unable to remove from allow list: %d", err); 2204 return err; 2205 } 2206 2207 bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, 2208 cp.bdaddr_type); 2209 2210 return 0; 2211 } 2212 2213 struct conn_params { 2214 bdaddr_t addr; 2215 u8 addr_type; 2216 hci_conn_flags_t flags; 2217 u8 privacy_mode; 2218 }; 2219 2220 /* Adds connection to resolve list if needed. 2221 * Setting params to NULL programs local hdev->irk 2222 */ 2223 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, 2224 struct conn_params *params) 2225 { 2226 struct hci_cp_le_add_to_resolv_list cp; 2227 struct smp_irk *irk; 2228 struct bdaddr_list_with_irk *entry; 2229 struct hci_conn_params *p; 2230 2231 if (!use_ll_privacy(hdev)) 2232 return 0; 2233 2234 /* Attempt to program local identity address, type and irk if params is 2235 * NULL. 2236 */ 2237 if (!params) { 2238 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 2239 return 0; 2240 2241 hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); 2242 memcpy(cp.peer_irk, hdev->irk, 16); 2243 goto done; 2244 } 2245 2246 irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); 2247 if (!irk) 2248 return 0; 2249 2250 /* Check if the IK has _not_ been programmed yet. */ 2251 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, 2252 ¶ms->addr, 2253 params->addr_type); 2254 if (entry) 2255 return 0; 2256 2257 cp.bdaddr_type = params->addr_type; 2258 bacpy(&cp.bdaddr, ¶ms->addr); 2259 memcpy(cp.peer_irk, irk->val, 16); 2260 2261 /* Default privacy mode is always Network */ 2262 params->privacy_mode = HCI_NETWORK_PRIVACY; 2263 2264 rcu_read_lock(); 2265 p = hci_pend_le_action_lookup(&hdev->pend_le_conns, 2266 ¶ms->addr, params->addr_type); 2267 if (!p) 2268 p = hci_pend_le_action_lookup(&hdev->pend_le_reports, 2269 ¶ms->addr, params->addr_type); 2270 if (p) 2271 WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); 2272 rcu_read_unlock(); 2273 2274 done: 2275 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) 2276 memcpy(cp.local_irk, hdev->irk, 16); 2277 else 2278 memset(cp.local_irk, 0, 16); 2279 2280 return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, 2281 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2282 } 2283 2284 /* Set Device Privacy Mode. */ 2285 static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, 2286 struct conn_params *params) 2287 { 2288 struct hci_cp_le_set_privacy_mode cp; 2289 struct smp_irk *irk; 2290 2291 /* If device privacy mode has already been set there is nothing to do */ 2292 if (params->privacy_mode == HCI_DEVICE_PRIVACY) 2293 return 0; 2294 2295 /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also 2296 * indicates that LL Privacy has been enabled and 2297 * HCI_OP_LE_SET_PRIVACY_MODE is supported. 2298 */ 2299 if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) 2300 return 0; 2301 2302 irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); 2303 if (!irk) 2304 return 0; 2305 2306 memset(&cp, 0, sizeof(cp)); 2307 cp.bdaddr_type = irk->addr_type; 2308 bacpy(&cp.bdaddr, &irk->bdaddr); 2309 cp.mode = HCI_DEVICE_PRIVACY; 2310 2311 /* Note: params->privacy_mode is not updated since it is a copy */ 2312 2313 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, 2314 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2315 } 2316 2317 /* Adds connection to allow list if needed, if the device uses RPA (has IRK) 2318 * this attempts to program the device in the resolving list as well and 2319 * properly set the privacy mode. 2320 */ 2321 static int hci_le_add_accept_list_sync(struct hci_dev *hdev, 2322 struct conn_params *params, 2323 u8 *num_entries) 2324 { 2325 struct hci_cp_le_add_to_accept_list cp; 2326 int err; 2327 2328 /* During suspend, only wakeable devices can be in acceptlist */ 2329 if (hdev->suspended && 2330 !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) { 2331 hci_le_del_accept_list_sync(hdev, ¶ms->addr, 2332 params->addr_type); 2333 return 0; 2334 } 2335 2336 /* Select filter policy to accept all advertising */ 2337 if (*num_entries >= hdev->le_accept_list_size) 2338 return -ENOSPC; 2339 2340 /* Accept list can not be used with RPAs */ 2341 if (!use_ll_privacy(hdev) && 2342 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) 2343 return -EINVAL; 2344 2345 /* Attempt to program the device in the resolving list first to avoid 2346 * having to rollback in case it fails since the resolving list is 2347 * dynamic it can probably be smaller than the accept list. 2348 */ 2349 err = hci_le_add_resolve_list_sync(hdev, params); 2350 if (err) { 2351 bt_dev_err(hdev, "Unable to add to resolve list: %d", err); 2352 return err; 2353 } 2354 2355 /* Set Privacy Mode */ 2356 err = hci_le_set_privacy_mode_sync(hdev, params); 2357 if (err) { 2358 bt_dev_err(hdev, "Unable to set privacy mode: %d", err); 2359 return err; 2360 } 2361 2362 /* Check if already in accept list */ 2363 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, 2364 params->addr_type)) 2365 return 0; 2366 2367 *num_entries += 1; 2368 cp.bdaddr_type = params->addr_type; 2369 bacpy(&cp.bdaddr, ¶ms->addr); 2370 2371 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, 2372 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2373 if (err) { 2374 bt_dev_err(hdev, "Unable to add to allow list: %d", err); 2375 /* Rollback the device from the resolving list */ 2376 hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); 2377 return err; 2378 } 2379 2380 bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, 2381 cp.bdaddr_type); 2382 2383 return 0; 2384 } 2385 2386 /* This function disables/pause all advertising instances */ 2387 static int hci_pause_advertising_sync(struct hci_dev *hdev) 2388 { 2389 int err; 2390 int old_state; 2391 2392 /* If already been paused there is nothing to do. */ 2393 if (hdev->advertising_paused) 2394 return 0; 2395 2396 bt_dev_dbg(hdev, "Pausing directed advertising"); 2397 2398 /* Stop directed advertising */ 2399 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); 2400 if (old_state) { 2401 /* When discoverable timeout triggers, then just make sure 2402 * the limited discoverable flag is cleared. Even in the case 2403 * of a timeout triggered from general discoverable, it is 2404 * safe to unconditionally clear the flag. 2405 */ 2406 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 2407 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); 2408 hdev->discov_timeout = 0; 2409 } 2410 2411 bt_dev_dbg(hdev, "Pausing advertising instances"); 2412 2413 /* Call to disable any advertisements active on the controller. 2414 * This will succeed even if no advertisements are configured. 2415 */ 2416 err = hci_disable_advertising_sync(hdev); 2417 if (err) 2418 return err; 2419 2420 /* If we are using software rotation, pause the loop */ 2421 if (!ext_adv_capable(hdev)) 2422 cancel_adv_timeout(hdev); 2423 2424 hdev->advertising_paused = true; 2425 hdev->advertising_old_state = old_state; 2426 2427 return 0; 2428 } 2429 2430 /* This function enables all user advertising instances */ 2431 static int hci_resume_advertising_sync(struct hci_dev *hdev) 2432 { 2433 struct adv_info *adv, *tmp; 2434 int err; 2435 2436 /* If advertising has not been paused there is nothing to do. */ 2437 if (!hdev->advertising_paused) 2438 return 0; 2439 2440 /* Resume directed advertising */ 2441 hdev->advertising_paused = false; 2442 if (hdev->advertising_old_state) { 2443 hci_dev_set_flag(hdev, HCI_ADVERTISING); 2444 hdev->advertising_old_state = 0; 2445 } 2446 2447 bt_dev_dbg(hdev, "Resuming advertising instances"); 2448 2449 if (ext_adv_capable(hdev)) { 2450 /* Call for each tracked instance to be re-enabled */ 2451 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { 2452 err = hci_enable_ext_advertising_sync(hdev, 2453 adv->instance); 2454 if (!err) 2455 continue; 2456 2457 /* If the instance cannot be resumed remove it */ 2458 hci_remove_ext_adv_instance_sync(hdev, adv->instance, 2459 NULL); 2460 } 2461 } else { 2462 /* Schedule for most recent instance to be restarted and begin 2463 * the software rotation loop 2464 */ 2465 err = hci_schedule_adv_instance_sync(hdev, 2466 hdev->cur_adv_instance, 2467 true); 2468 } 2469 2470 hdev->advertising_paused = false; 2471 2472 return err; 2473 } 2474 2475 static int hci_pause_addr_resolution(struct hci_dev *hdev) 2476 { 2477 int err; 2478 2479 if (!use_ll_privacy(hdev)) 2480 return 0; 2481 2482 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) 2483 return 0; 2484 2485 /* Cannot disable addr resolution if scanning is enabled or 2486 * when initiating an LE connection. 2487 */ 2488 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2489 hci_lookup_le_connect(hdev)) { 2490 bt_dev_err(hdev, "Command not allowed when scan/LE connect"); 2491 return -EPERM; 2492 } 2493 2494 /* Cannot disable addr resolution if advertising is enabled. */ 2495 err = hci_pause_advertising_sync(hdev); 2496 if (err) { 2497 bt_dev_err(hdev, "Pause advertising failed: %d", err); 2498 return err; 2499 } 2500 2501 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); 2502 if (err) 2503 bt_dev_err(hdev, "Unable to disable Address Resolution: %d", 2504 err); 2505 2506 /* Return if address resolution is disabled and RPA is not used. */ 2507 if (!err && scan_use_rpa(hdev)) 2508 return 0; 2509 2510 hci_resume_advertising_sync(hdev); 2511 return err; 2512 } 2513 2514 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, 2515 bool extended, struct sock *sk) 2516 { 2517 u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : 2518 HCI_OP_READ_LOCAL_OOB_DATA; 2519 2520 return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); 2521 } 2522 2523 static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) 2524 { 2525 struct hci_conn_params *params; 2526 struct conn_params *p; 2527 size_t i; 2528 2529 rcu_read_lock(); 2530 2531 i = 0; 2532 list_for_each_entry_rcu(params, list, action) 2533 ++i; 2534 *n = i; 2535 2536 rcu_read_unlock(); 2537 2538 p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL); 2539 if (!p) 2540 return NULL; 2541 2542 rcu_read_lock(); 2543 2544 i = 0; 2545 list_for_each_entry_rcu(params, list, action) { 2546 /* Racing adds are handled in next scan update */ 2547 if (i >= *n) 2548 break; 2549 2550 /* No hdev->lock, but: addr, addr_type are immutable. 2551 * privacy_mode is only written by us or in 2552 * hci_cc_le_set_privacy_mode that we wait for. 2553 * We should be idempotent so MGMT updating flags 2554 * while we are processing is OK. 2555 */ 2556 bacpy(&p[i].addr, ¶ms->addr); 2557 p[i].addr_type = params->addr_type; 2558 p[i].flags = READ_ONCE(params->flags); 2559 p[i].privacy_mode = READ_ONCE(params->privacy_mode); 2560 ++i; 2561 } 2562 2563 rcu_read_unlock(); 2564 2565 *n = i; 2566 return p; 2567 } 2568 2569 /* Clear LE Accept List */ 2570 static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) 2571 { 2572 if (!(hdev->commands[26] & 0x80)) 2573 return 0; 2574 2575 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, 2576 HCI_CMD_TIMEOUT); 2577 } 2578 2579 /* Device must not be scanning when updating the accept list. 2580 * 2581 * Update is done using the following sequence: 2582 * 2583 * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> 2584 * Remove Devices From Accept List -> 2585 * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> 2586 * Add Devices to Accept List -> 2587 * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> 2588 * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> 2589 * Enable Scanning 2590 * 2591 * In case of failure advertising shall be restored to its original state and 2592 * return would disable accept list since either accept or resolving list could 2593 * not be programmed. 2594 * 2595 */ 2596 static u8 hci_update_accept_list_sync(struct hci_dev *hdev) 2597 { 2598 struct conn_params *params; 2599 struct bdaddr_list *b, *t; 2600 u8 num_entries = 0; 2601 bool pend_conn, pend_report; 2602 u8 filter_policy; 2603 size_t i, n; 2604 int err; 2605 2606 /* Pause advertising if resolving list can be used as controllers 2607 * cannot accept resolving list modifications while advertising. 2608 */ 2609 if (use_ll_privacy(hdev)) { 2610 err = hci_pause_advertising_sync(hdev); 2611 if (err) { 2612 bt_dev_err(hdev, "pause advertising failed: %d", err); 2613 return 0x00; 2614 } 2615 } 2616 2617 /* Disable address resolution while reprogramming accept list since 2618 * devices that do have an IRK will be programmed in the resolving list 2619 * when LL Privacy is enabled. 2620 */ 2621 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); 2622 if (err) { 2623 bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); 2624 goto done; 2625 } 2626 2627 /* Force address filtering if PA Sync is in progress */ 2628 if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { 2629 struct hci_cp_le_pa_create_sync *sent; 2630 2631 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC); 2632 if (sent) { 2633 struct conn_params pa; 2634 2635 memset(&pa, 0, sizeof(pa)); 2636 2637 bacpy(&pa.addr, &sent->addr); 2638 pa.addr_type = sent->addr_type; 2639 2640 /* Clear first since there could be addresses left 2641 * behind. 2642 */ 2643 hci_le_clear_accept_list_sync(hdev); 2644 2645 num_entries = 1; 2646 err = hci_le_add_accept_list_sync(hdev, &pa, 2647 &num_entries); 2648 goto done; 2649 } 2650 } 2651 2652 /* Go through the current accept list programmed into the 2653 * controller one by one and check if that address is connected or is 2654 * still in the list of pending connections or list of devices to 2655 * report. If not present in either list, then remove it from 2656 * the controller. 2657 */ 2658 list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { 2659 if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) 2660 continue; 2661 2662 /* Pointers not dereferenced, no locks needed */ 2663 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, 2664 &b->bdaddr, 2665 b->bdaddr_type); 2666 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, 2667 &b->bdaddr, 2668 b->bdaddr_type); 2669 2670 /* If the device is not likely to connect or report, 2671 * remove it from the acceptlist. 2672 */ 2673 if (!pend_conn && !pend_report) { 2674 hci_le_del_accept_list_sync(hdev, &b->bdaddr, 2675 b->bdaddr_type); 2676 continue; 2677 } 2678 2679 num_entries++; 2680 } 2681 2682 /* Since all no longer valid accept list entries have been 2683 * removed, walk through the list of pending connections 2684 * and ensure that any new device gets programmed into 2685 * the controller. 2686 * 2687 * If the list of the devices is larger than the list of 2688 * available accept list entries in the controller, then 2689 * just abort and return filer policy value to not use the 2690 * accept list. 2691 * 2692 * The list and params may be mutated while we wait for events, 2693 * so make a copy and iterate it. 2694 */ 2695 2696 params = conn_params_copy(&hdev->pend_le_conns, &n); 2697 if (!params) { 2698 err = -ENOMEM; 2699 goto done; 2700 } 2701 2702 for (i = 0; i < n; ++i) { 2703 err = hci_le_add_accept_list_sync(hdev, ¶ms[i], 2704 &num_entries); 2705 if (err) { 2706 kvfree(params); 2707 goto done; 2708 } 2709 } 2710 2711 kvfree(params); 2712 2713 /* After adding all new pending connections, walk through 2714 * the list of pending reports and also add these to the 2715 * accept list if there is still space. Abort if space runs out. 2716 */ 2717 2718 params = conn_params_copy(&hdev->pend_le_reports, &n); 2719 if (!params) { 2720 err = -ENOMEM; 2721 goto done; 2722 } 2723 2724 for (i = 0; i < n; ++i) { 2725 err = hci_le_add_accept_list_sync(hdev, ¶ms[i], 2726 &num_entries); 2727 if (err) { 2728 kvfree(params); 2729 goto done; 2730 } 2731 } 2732 2733 kvfree(params); 2734 2735 /* Use the allowlist unless the following conditions are all true: 2736 * - We are not currently suspending 2737 * - There are 1 or more ADV monitors registered and it's not offloaded 2738 * - Interleaved scanning is not currently using the allowlist 2739 */ 2740 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && 2741 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && 2742 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) 2743 err = -EINVAL; 2744 2745 done: 2746 filter_policy = err ? 0x00 : 0x01; 2747 2748 /* Enable address resolution when LL Privacy is enabled. */ 2749 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); 2750 if (err) 2751 bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); 2752 2753 /* Resume advertising if it was paused */ 2754 if (use_ll_privacy(hdev)) 2755 hci_resume_advertising_sync(hdev); 2756 2757 /* Select filter policy to use accept list */ 2758 return filter_policy; 2759 } 2760 2761 static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp, 2762 u8 type, u16 interval, u16 window) 2763 { 2764 cp->type = type; 2765 cp->interval = cpu_to_le16(interval); 2766 cp->window = cpu_to_le16(window); 2767 } 2768 2769 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, 2770 u16 interval, u16 window, 2771 u8 own_addr_type, u8 filter_policy) 2772 { 2773 struct hci_cp_le_set_ext_scan_params *cp; 2774 struct hci_cp_le_scan_phy_params *phy; 2775 u8 data[sizeof(*cp) + sizeof(*phy) * 2]; 2776 u8 num_phy = 0x00; 2777 2778 cp = (void *)data; 2779 phy = (void *)cp->data; 2780 2781 memset(data, 0, sizeof(data)); 2782 2783 cp->own_addr_type = own_addr_type; 2784 cp->filter_policy = filter_policy; 2785 2786 /* Check if PA Sync is in progress then select the PHY based on the 2787 * hci_conn.iso_qos. 2788 */ 2789 if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { 2790 struct hci_cp_le_add_to_accept_list *sent; 2791 2792 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 2793 if (sent) { 2794 struct hci_conn *conn; 2795 2796 conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK, 2797 &sent->bdaddr); 2798 if (conn) { 2799 struct bt_iso_qos *qos = &conn->iso_qos; 2800 2801 if (qos->bcast.in.phy & BT_ISO_PHY_1M || 2802 qos->bcast.in.phy & BT_ISO_PHY_2M) { 2803 cp->scanning_phys |= LE_SCAN_PHY_1M; 2804 hci_le_scan_phy_params(phy, type, 2805 interval, 2806 window); 2807 num_phy++; 2808 phy++; 2809 } 2810 2811 if (qos->bcast.in.phy & BT_ISO_PHY_CODED) { 2812 cp->scanning_phys |= LE_SCAN_PHY_CODED; 2813 hci_le_scan_phy_params(phy, type, 2814 interval, 2815 window); 2816 num_phy++; 2817 phy++; 2818 } 2819 2820 if (num_phy) 2821 goto done; 2822 } 2823 } 2824 } 2825 2826 if (scan_1m(hdev) || scan_2m(hdev)) { 2827 cp->scanning_phys |= LE_SCAN_PHY_1M; 2828 hci_le_scan_phy_params(phy, type, interval, window); 2829 num_phy++; 2830 phy++; 2831 } 2832 2833 if (scan_coded(hdev)) { 2834 cp->scanning_phys |= LE_SCAN_PHY_CODED; 2835 hci_le_scan_phy_params(phy, type, interval, window); 2836 num_phy++; 2837 phy++; 2838 } 2839 2840 done: 2841 if (!num_phy) 2842 return -EINVAL; 2843 2844 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, 2845 sizeof(*cp) + sizeof(*phy) * num_phy, 2846 data, HCI_CMD_TIMEOUT); 2847 } 2848 2849 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, 2850 u16 interval, u16 window, 2851 u8 own_addr_type, u8 filter_policy) 2852 { 2853 struct hci_cp_le_set_scan_param cp; 2854 2855 if (use_ext_scan(hdev)) 2856 return hci_le_set_ext_scan_param_sync(hdev, type, interval, 2857 window, own_addr_type, 2858 filter_policy); 2859 2860 memset(&cp, 0, sizeof(cp)); 2861 cp.type = type; 2862 cp.interval = cpu_to_le16(interval); 2863 cp.window = cpu_to_le16(window); 2864 cp.own_address_type = own_addr_type; 2865 cp.filter_policy = filter_policy; 2866 2867 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, 2868 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2869 } 2870 2871 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, 2872 u16 window, u8 own_addr_type, u8 filter_policy, 2873 u8 filter_dup) 2874 { 2875 int err; 2876 2877 if (hdev->scanning_paused) { 2878 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 2879 return 0; 2880 } 2881 2882 err = hci_le_set_scan_param_sync(hdev, type, interval, window, 2883 own_addr_type, filter_policy); 2884 if (err) 2885 return err; 2886 2887 return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); 2888 } 2889 2890 static int hci_passive_scan_sync(struct hci_dev *hdev) 2891 { 2892 u8 own_addr_type; 2893 u8 filter_policy; 2894 u16 window, interval; 2895 u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; 2896 int err; 2897 2898 if (hdev->scanning_paused) { 2899 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 2900 return 0; 2901 } 2902 2903 err = hci_scan_disable_sync(hdev); 2904 if (err) { 2905 bt_dev_err(hdev, "disable scanning failed: %d", err); 2906 return err; 2907 } 2908 2909 /* Set require_privacy to false since no SCAN_REQ are send 2910 * during passive scanning. Not using an non-resolvable address 2911 * here is important so that peer devices using direct 2912 * advertising with our address will be correctly reported 2913 * by the controller. 2914 */ 2915 if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev), 2916 &own_addr_type)) 2917 return 0; 2918 2919 if (hdev->enable_advmon_interleave_scan && 2920 hci_update_interleaved_scan_sync(hdev)) 2921 return 0; 2922 2923 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); 2924 2925 /* Adding or removing entries from the accept list must 2926 * happen before enabling scanning. The controller does 2927 * not allow accept list modification while scanning. 2928 */ 2929 filter_policy = hci_update_accept_list_sync(hdev); 2930 2931 /* When the controller is using random resolvable addresses and 2932 * with that having LE privacy enabled, then controllers with 2933 * Extended Scanner Filter Policies support can now enable support 2934 * for handling directed advertising. 2935 * 2936 * So instead of using filter polices 0x00 (no acceptlist) 2937 * and 0x01 (acceptlist enabled) use the new filter policies 2938 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). 2939 */ 2940 if (hci_dev_test_flag(hdev, HCI_PRIVACY) && 2941 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) 2942 filter_policy |= 0x02; 2943 2944 if (hdev->suspended) { 2945 window = hdev->le_scan_window_suspend; 2946 interval = hdev->le_scan_int_suspend; 2947 } else if (hci_is_le_conn_scanning(hdev)) { 2948 window = hdev->le_scan_window_connect; 2949 interval = hdev->le_scan_int_connect; 2950 } else if (hci_is_adv_monitoring(hdev)) { 2951 window = hdev->le_scan_window_adv_monitor; 2952 interval = hdev->le_scan_int_adv_monitor; 2953 } else { 2954 window = hdev->le_scan_window; 2955 interval = hdev->le_scan_interval; 2956 } 2957 2958 /* Disable all filtering for Mesh */ 2959 if (hci_dev_test_flag(hdev, HCI_MESH)) { 2960 filter_policy = 0; 2961 filter_dups = LE_SCAN_FILTER_DUP_DISABLE; 2962 } 2963 2964 bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); 2965 2966 return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, 2967 own_addr_type, filter_policy, filter_dups); 2968 } 2969 2970 /* This function controls the passive scanning based on hdev->pend_le_conns 2971 * list. If there are pending LE connection we start the background scanning, 2972 * otherwise we stop it in the following sequence: 2973 * 2974 * If there are devices to scan: 2975 * 2976 * Disable Scanning -> Update Accept List -> 2977 * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> 2978 * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> 2979 * Enable Scanning 2980 * 2981 * Otherwise: 2982 * 2983 * Disable Scanning 2984 */ 2985 int hci_update_passive_scan_sync(struct hci_dev *hdev) 2986 { 2987 int err; 2988 2989 if (!test_bit(HCI_UP, &hdev->flags) || 2990 test_bit(HCI_INIT, &hdev->flags) || 2991 hci_dev_test_flag(hdev, HCI_SETUP) || 2992 hci_dev_test_flag(hdev, HCI_CONFIG) || 2993 hci_dev_test_flag(hdev, HCI_AUTO_OFF) || 2994 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2995 return 0; 2996 2997 /* No point in doing scanning if LE support hasn't been enabled */ 2998 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 2999 return 0; 3000 3001 /* If discovery is active don't interfere with it */ 3002 if (hdev->discovery.state != DISCOVERY_STOPPED) 3003 return 0; 3004 3005 /* Reset RSSI and UUID filters when starting background scanning 3006 * since these filters are meant for service discovery only. 3007 * 3008 * The Start Discovery and Start Service Discovery operations 3009 * ensure to set proper values for RSSI threshold and UUID 3010 * filter list. So it is safe to just reset them here. 3011 */ 3012 hci_discovery_filter_clear(hdev); 3013 3014 bt_dev_dbg(hdev, "ADV monitoring is %s", 3015 hci_is_adv_monitoring(hdev) ? "on" : "off"); 3016 3017 if (!hci_dev_test_flag(hdev, HCI_MESH) && 3018 list_empty(&hdev->pend_le_conns) && 3019 list_empty(&hdev->pend_le_reports) && 3020 !hci_is_adv_monitoring(hdev) && 3021 !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { 3022 /* If there is no pending LE connections or devices 3023 * to be scanned for or no ADV monitors, we should stop the 3024 * background scanning. 3025 */ 3026 3027 bt_dev_dbg(hdev, "stopping background scanning"); 3028 3029 err = hci_scan_disable_sync(hdev); 3030 if (err) 3031 bt_dev_err(hdev, "stop background scanning failed: %d", 3032 err); 3033 } else { 3034 /* If there is at least one pending LE connection, we should 3035 * keep the background scan running. 3036 */ 3037 3038 /* If controller is connecting, we should not start scanning 3039 * since some controllers are not able to scan and connect at 3040 * the same time. 3041 */ 3042 if (hci_lookup_le_connect(hdev)) 3043 return 0; 3044 3045 bt_dev_dbg(hdev, "start background scanning"); 3046 3047 err = hci_passive_scan_sync(hdev); 3048 if (err) 3049 bt_dev_err(hdev, "start background scanning failed: %d", 3050 err); 3051 } 3052 3053 return err; 3054 } 3055 3056 static int update_scan_sync(struct hci_dev *hdev, void *data) 3057 { 3058 return hci_update_scan_sync(hdev); 3059 } 3060 3061 int hci_update_scan(struct hci_dev *hdev) 3062 { 3063 return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); 3064 } 3065 3066 static int update_passive_scan_sync(struct hci_dev *hdev, void *data) 3067 { 3068 return hci_update_passive_scan_sync(hdev); 3069 } 3070 3071 int hci_update_passive_scan(struct hci_dev *hdev) 3072 { 3073 /* Only queue if it would have any effect */ 3074 if (!test_bit(HCI_UP, &hdev->flags) || 3075 test_bit(HCI_INIT, &hdev->flags) || 3076 hci_dev_test_flag(hdev, HCI_SETUP) || 3077 hci_dev_test_flag(hdev, HCI_CONFIG) || 3078 hci_dev_test_flag(hdev, HCI_AUTO_OFF) || 3079 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 3080 return 0; 3081 3082 return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, 3083 NULL); 3084 } 3085 3086 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) 3087 { 3088 int err; 3089 3090 if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) 3091 return 0; 3092 3093 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, 3094 sizeof(val), &val, HCI_CMD_TIMEOUT); 3095 3096 if (!err) { 3097 if (val) { 3098 hdev->features[1][0] |= LMP_HOST_SC; 3099 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 3100 } else { 3101 hdev->features[1][0] &= ~LMP_HOST_SC; 3102 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 3103 } 3104 } 3105 3106 return err; 3107 } 3108 3109 int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) 3110 { 3111 int err; 3112 3113 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || 3114 lmp_host_ssp_capable(hdev)) 3115 return 0; 3116 3117 if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { 3118 __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, 3119 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 3120 } 3121 3122 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, 3123 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 3124 if (err) 3125 return err; 3126 3127 return hci_write_sc_support_sync(hdev, 0x01); 3128 } 3129 3130 int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) 3131 { 3132 struct hci_cp_write_le_host_supported cp; 3133 3134 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || 3135 !lmp_bredr_capable(hdev)) 3136 return 0; 3137 3138 /* Check first if we already have the right host state 3139 * (host features set) 3140 */ 3141 if (le == lmp_host_le_capable(hdev) && 3142 simul == lmp_host_le_br_capable(hdev)) 3143 return 0; 3144 3145 memset(&cp, 0, sizeof(cp)); 3146 3147 cp.le = le; 3148 cp.simul = simul; 3149 3150 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, 3151 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3152 } 3153 3154 static int hci_powered_update_adv_sync(struct hci_dev *hdev) 3155 { 3156 struct adv_info *adv, *tmp; 3157 int err; 3158 3159 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 3160 return 0; 3161 3162 /* If RPA Resolution has not been enable yet it means the 3163 * resolving list is empty and we should attempt to program the 3164 * local IRK in order to support using own_addr_type 3165 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). 3166 */ 3167 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { 3168 hci_le_add_resolve_list_sync(hdev, NULL); 3169 hci_le_set_addr_resolution_enable_sync(hdev, 0x01); 3170 } 3171 3172 /* Make sure the controller has a good default for 3173 * advertising data. This also applies to the case 3174 * where BR/EDR was toggled during the AUTO_OFF phase. 3175 */ 3176 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || 3177 list_empty(&hdev->adv_instances)) { 3178 if (ext_adv_capable(hdev)) { 3179 err = hci_setup_ext_adv_instance_sync(hdev, 0x00); 3180 if (!err) 3181 hci_update_scan_rsp_data_sync(hdev, 0x00); 3182 } else { 3183 err = hci_update_adv_data_sync(hdev, 0x00); 3184 if (!err) 3185 hci_update_scan_rsp_data_sync(hdev, 0x00); 3186 } 3187 3188 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 3189 hci_enable_advertising_sync(hdev); 3190 } 3191 3192 /* Call for each tracked instance to be scheduled */ 3193 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) 3194 hci_schedule_adv_instance_sync(hdev, adv->instance, true); 3195 3196 return 0; 3197 } 3198 3199 static int hci_write_auth_enable_sync(struct hci_dev *hdev) 3200 { 3201 u8 link_sec; 3202 3203 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); 3204 if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) 3205 return 0; 3206 3207 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, 3208 sizeof(link_sec), &link_sec, 3209 HCI_CMD_TIMEOUT); 3210 } 3211 3212 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) 3213 { 3214 struct hci_cp_write_page_scan_activity cp; 3215 u8 type; 3216 int err = 0; 3217 3218 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 3219 return 0; 3220 3221 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 3222 return 0; 3223 3224 memset(&cp, 0, sizeof(cp)); 3225 3226 if (enable) { 3227 type = PAGE_SCAN_TYPE_INTERLACED; 3228 3229 /* 160 msec page scan interval */ 3230 cp.interval = cpu_to_le16(0x0100); 3231 } else { 3232 type = hdev->def_page_scan_type; 3233 cp.interval = cpu_to_le16(hdev->def_page_scan_int); 3234 } 3235 3236 cp.window = cpu_to_le16(hdev->def_page_scan_window); 3237 3238 if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || 3239 __cpu_to_le16(hdev->page_scan_window) != cp.window) { 3240 err = __hci_cmd_sync_status(hdev, 3241 HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 3242 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3243 if (err) 3244 return err; 3245 } 3246 3247 if (hdev->page_scan_type != type) 3248 err = __hci_cmd_sync_status(hdev, 3249 HCI_OP_WRITE_PAGE_SCAN_TYPE, 3250 sizeof(type), &type, 3251 HCI_CMD_TIMEOUT); 3252 3253 return err; 3254 } 3255 3256 static bool disconnected_accept_list_entries(struct hci_dev *hdev) 3257 { 3258 struct bdaddr_list *b; 3259 3260 list_for_each_entry(b, &hdev->accept_list, list) { 3261 struct hci_conn *conn; 3262 3263 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); 3264 if (!conn) 3265 return true; 3266 3267 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3268 return true; 3269 } 3270 3271 return false; 3272 } 3273 3274 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) 3275 { 3276 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, 3277 sizeof(val), &val, 3278 HCI_CMD_TIMEOUT); 3279 } 3280 3281 int hci_update_scan_sync(struct hci_dev *hdev) 3282 { 3283 u8 scan; 3284 3285 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 3286 return 0; 3287 3288 if (!hdev_is_powered(hdev)) 3289 return 0; 3290 3291 if (mgmt_powering_down(hdev)) 3292 return 0; 3293 3294 if (hdev->scanning_paused) 3295 return 0; 3296 3297 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || 3298 disconnected_accept_list_entries(hdev)) 3299 scan = SCAN_PAGE; 3300 else 3301 scan = SCAN_DISABLED; 3302 3303 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 3304 scan |= SCAN_INQUIRY; 3305 3306 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && 3307 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) 3308 return 0; 3309 3310 return hci_write_scan_enable_sync(hdev, scan); 3311 } 3312 3313 int hci_update_name_sync(struct hci_dev *hdev) 3314 { 3315 struct hci_cp_write_local_name cp; 3316 3317 memset(&cp, 0, sizeof(cp)); 3318 3319 memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); 3320 3321 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, 3322 sizeof(cp), &cp, 3323 HCI_CMD_TIMEOUT); 3324 } 3325 3326 /* This function perform powered update HCI command sequence after the HCI init 3327 * sequence which end up resetting all states, the sequence is as follows: 3328 * 3329 * HCI_SSP_ENABLED(Enable SSP) 3330 * HCI_LE_ENABLED(Enable LE) 3331 * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> 3332 * Update adv data) 3333 * Enable Authentication 3334 * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> 3335 * Set Name -> Set EIR) 3336 * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address) 3337 */ 3338 int hci_powered_update_sync(struct hci_dev *hdev) 3339 { 3340 int err; 3341 3342 /* Register the available SMP channels (BR/EDR and LE) only when 3343 * successfully powering on the controller. This late 3344 * registration is required so that LE SMP can clearly decide if 3345 * the public address or static address is used. 3346 */ 3347 smp_register(hdev); 3348 3349 err = hci_write_ssp_mode_sync(hdev, 0x01); 3350 if (err) 3351 return err; 3352 3353 err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00); 3354 if (err) 3355 return err; 3356 3357 err = hci_powered_update_adv_sync(hdev); 3358 if (err) 3359 return err; 3360 3361 err = hci_write_auth_enable_sync(hdev); 3362 if (err) 3363 return err; 3364 3365 if (lmp_bredr_capable(hdev)) { 3366 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) 3367 hci_write_fast_connectable_sync(hdev, true); 3368 else 3369 hci_write_fast_connectable_sync(hdev, false); 3370 hci_update_scan_sync(hdev); 3371 hci_update_class_sync(hdev); 3372 hci_update_name_sync(hdev); 3373 hci_update_eir_sync(hdev); 3374 } 3375 3376 /* If forcing static address is in use or there is no public 3377 * address use the static address as random address (but skip 3378 * the HCI command if the current random address is already the 3379 * static one. 3380 * 3381 * In case BR/EDR has been disabled on a dual-mode controller 3382 * and a static address has been configured, then use that 3383 * address instead of the public BR/EDR address. 3384 */ 3385 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 3386 (!bacmp(&hdev->bdaddr, BDADDR_ANY) && 3387 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) { 3388 if (bacmp(&hdev->static_addr, BDADDR_ANY)) 3389 return hci_set_random_addr_sync(hdev, 3390 &hdev->static_addr); 3391 } 3392 3393 return 0; 3394 } 3395 3396 /** 3397 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address 3398 * (BD_ADDR) for a HCI device from 3399 * a firmware node property. 3400 * @hdev: The HCI device 3401 * 3402 * Search the firmware node for 'local-bd-address'. 3403 * 3404 * All-zero BD addresses are rejected, because those could be properties 3405 * that exist in the firmware tables, but were not updated by the firmware. For 3406 * example, the DTS could define 'local-bd-address', with zero BD addresses. 3407 */ 3408 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) 3409 { 3410 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); 3411 bdaddr_t ba; 3412 int ret; 3413 3414 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", 3415 (u8 *)&ba, sizeof(ba)); 3416 if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) 3417 return; 3418 3419 bacpy(&hdev->public_addr, &ba); 3420 } 3421 3422 struct hci_init_stage { 3423 int (*func)(struct hci_dev *hdev); 3424 }; 3425 3426 /* Run init stage NULL terminated function table */ 3427 static int hci_init_stage_sync(struct hci_dev *hdev, 3428 const struct hci_init_stage *stage) 3429 { 3430 size_t i; 3431 3432 for (i = 0; stage[i].func; i++) { 3433 int err; 3434 3435 err = stage[i].func(hdev); 3436 if (err) 3437 return err; 3438 } 3439 3440 return 0; 3441 } 3442 3443 /* Read Local Version */ 3444 static int hci_read_local_version_sync(struct hci_dev *hdev) 3445 { 3446 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, 3447 0, NULL, HCI_CMD_TIMEOUT); 3448 } 3449 3450 /* Read BD Address */ 3451 static int hci_read_bd_addr_sync(struct hci_dev *hdev) 3452 { 3453 return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, 3454 0, NULL, HCI_CMD_TIMEOUT); 3455 } 3456 3457 #define HCI_INIT(_func) \ 3458 { \ 3459 .func = _func, \ 3460 } 3461 3462 static const struct hci_init_stage hci_init0[] = { 3463 /* HCI_OP_READ_LOCAL_VERSION */ 3464 HCI_INIT(hci_read_local_version_sync), 3465 /* HCI_OP_READ_BD_ADDR */ 3466 HCI_INIT(hci_read_bd_addr_sync), 3467 {} 3468 }; 3469 3470 int hci_reset_sync(struct hci_dev *hdev) 3471 { 3472 int err; 3473 3474 set_bit(HCI_RESET, &hdev->flags); 3475 3476 err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, 3477 HCI_CMD_TIMEOUT); 3478 if (err) 3479 return err; 3480 3481 return 0; 3482 } 3483 3484 static int hci_init0_sync(struct hci_dev *hdev) 3485 { 3486 int err; 3487 3488 bt_dev_dbg(hdev, ""); 3489 3490 /* Reset */ 3491 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 3492 err = hci_reset_sync(hdev); 3493 if (err) 3494 return err; 3495 } 3496 3497 return hci_init_stage_sync(hdev, hci_init0); 3498 } 3499 3500 static int hci_unconf_init_sync(struct hci_dev *hdev) 3501 { 3502 int err; 3503 3504 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 3505 return 0; 3506 3507 err = hci_init0_sync(hdev); 3508 if (err < 0) 3509 return err; 3510 3511 if (hci_dev_test_flag(hdev, HCI_SETUP)) 3512 hci_debugfs_create_basic(hdev); 3513 3514 return 0; 3515 } 3516 3517 /* Read Local Supported Features. */ 3518 static int hci_read_local_features_sync(struct hci_dev *hdev) 3519 { 3520 /* Not all AMP controllers support this command */ 3521 if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) 3522 return 0; 3523 3524 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, 3525 0, NULL, HCI_CMD_TIMEOUT); 3526 } 3527 3528 /* BR Controller init stage 1 command sequence */ 3529 static const struct hci_init_stage br_init1[] = { 3530 /* HCI_OP_READ_LOCAL_FEATURES */ 3531 HCI_INIT(hci_read_local_features_sync), 3532 /* HCI_OP_READ_LOCAL_VERSION */ 3533 HCI_INIT(hci_read_local_version_sync), 3534 /* HCI_OP_READ_BD_ADDR */ 3535 HCI_INIT(hci_read_bd_addr_sync), 3536 {} 3537 }; 3538 3539 /* Read Local Commands */ 3540 static int hci_read_local_cmds_sync(struct hci_dev *hdev) 3541 { 3542 /* All Bluetooth 1.2 and later controllers should support the 3543 * HCI command for reading the local supported commands. 3544 * 3545 * Unfortunately some controllers indicate Bluetooth 1.2 support, 3546 * but do not have support for this command. If that is the case, 3547 * the driver can quirk the behavior and skip reading the local 3548 * supported commands. 3549 */ 3550 if (hdev->hci_ver > BLUETOOTH_VER_1_1 && 3551 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) 3552 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, 3553 0, NULL, HCI_CMD_TIMEOUT); 3554 3555 return 0; 3556 } 3557 3558 /* Read Local AMP Info */ 3559 static int hci_read_local_amp_info_sync(struct hci_dev *hdev) 3560 { 3561 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 3562 0, NULL, HCI_CMD_TIMEOUT); 3563 } 3564 3565 /* Read Data Blk size */ 3566 static int hci_read_data_block_size_sync(struct hci_dev *hdev) 3567 { 3568 return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 3569 0, NULL, HCI_CMD_TIMEOUT); 3570 } 3571 3572 /* Read Flow Control Mode */ 3573 static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) 3574 { 3575 return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 3576 0, NULL, HCI_CMD_TIMEOUT); 3577 } 3578 3579 /* Read Location Data */ 3580 static int hci_read_location_data_sync(struct hci_dev *hdev) 3581 { 3582 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, 3583 0, NULL, HCI_CMD_TIMEOUT); 3584 } 3585 3586 /* AMP Controller init stage 1 command sequence */ 3587 static const struct hci_init_stage amp_init1[] = { 3588 /* HCI_OP_READ_LOCAL_VERSION */ 3589 HCI_INIT(hci_read_local_version_sync), 3590 /* HCI_OP_READ_LOCAL_COMMANDS */ 3591 HCI_INIT(hci_read_local_cmds_sync), 3592 /* HCI_OP_READ_LOCAL_AMP_INFO */ 3593 HCI_INIT(hci_read_local_amp_info_sync), 3594 /* HCI_OP_READ_DATA_BLOCK_SIZE */ 3595 HCI_INIT(hci_read_data_block_size_sync), 3596 /* HCI_OP_READ_FLOW_CONTROL_MODE */ 3597 HCI_INIT(hci_read_flow_control_mode_sync), 3598 /* HCI_OP_READ_LOCATION_DATA */ 3599 HCI_INIT(hci_read_location_data_sync), 3600 {} 3601 }; 3602 3603 static int hci_init1_sync(struct hci_dev *hdev) 3604 { 3605 int err; 3606 3607 bt_dev_dbg(hdev, ""); 3608 3609 /* Reset */ 3610 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 3611 err = hci_reset_sync(hdev); 3612 if (err) 3613 return err; 3614 } 3615 3616 switch (hdev->dev_type) { 3617 case HCI_PRIMARY: 3618 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 3619 return hci_init_stage_sync(hdev, br_init1); 3620 case HCI_AMP: 3621 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 3622 return hci_init_stage_sync(hdev, amp_init1); 3623 default: 3624 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); 3625 break; 3626 } 3627 3628 return 0; 3629 } 3630 3631 /* AMP Controller init stage 2 command sequence */ 3632 static const struct hci_init_stage amp_init2[] = { 3633 /* HCI_OP_READ_LOCAL_FEATURES */ 3634 HCI_INIT(hci_read_local_features_sync), 3635 {} 3636 }; 3637 3638 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 3639 static int hci_read_buffer_size_sync(struct hci_dev *hdev) 3640 { 3641 return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, 3642 0, NULL, HCI_CMD_TIMEOUT); 3643 } 3644 3645 /* Read Class of Device */ 3646 static int hci_read_dev_class_sync(struct hci_dev *hdev) 3647 { 3648 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, 3649 0, NULL, HCI_CMD_TIMEOUT); 3650 } 3651 3652 /* Read Local Name */ 3653 static int hci_read_local_name_sync(struct hci_dev *hdev) 3654 { 3655 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, 3656 0, NULL, HCI_CMD_TIMEOUT); 3657 } 3658 3659 /* Read Voice Setting */ 3660 static int hci_read_voice_setting_sync(struct hci_dev *hdev) 3661 { 3662 return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 3663 0, NULL, HCI_CMD_TIMEOUT); 3664 } 3665 3666 /* Read Number of Supported IAC */ 3667 static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) 3668 { 3669 return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, 3670 0, NULL, HCI_CMD_TIMEOUT); 3671 } 3672 3673 /* Read Current IAC LAP */ 3674 static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) 3675 { 3676 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, 3677 0, NULL, HCI_CMD_TIMEOUT); 3678 } 3679 3680 static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, 3681 u8 cond_type, bdaddr_t *bdaddr, 3682 u8 auto_accept) 3683 { 3684 struct hci_cp_set_event_filter cp; 3685 3686 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 3687 return 0; 3688 3689 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) 3690 return 0; 3691 3692 memset(&cp, 0, sizeof(cp)); 3693 cp.flt_type = flt_type; 3694 3695 if (flt_type != HCI_FLT_CLEAR_ALL) { 3696 cp.cond_type = cond_type; 3697 bacpy(&cp.addr_conn_flt.bdaddr, bdaddr); 3698 cp.addr_conn_flt.auto_accept = auto_accept; 3699 } 3700 3701 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, 3702 flt_type == HCI_FLT_CLEAR_ALL ? 3703 sizeof(cp.flt_type) : sizeof(cp), &cp, 3704 HCI_CMD_TIMEOUT); 3705 } 3706 3707 static int hci_clear_event_filter_sync(struct hci_dev *hdev) 3708 { 3709 if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) 3710 return 0; 3711 3712 /* In theory the state machine should not reach here unless 3713 * a hci_set_event_filter_sync() call succeeds, but we do 3714 * the check both for parity and as a future reminder. 3715 */ 3716 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) 3717 return 0; 3718 3719 return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, 3720 BDADDR_ANY, 0x00); 3721 } 3722 3723 /* Connection accept timeout ~20 secs */ 3724 static int hci_write_ca_timeout_sync(struct hci_dev *hdev) 3725 { 3726 __le16 param = cpu_to_le16(0x7d00); 3727 3728 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, 3729 sizeof(param), ¶m, HCI_CMD_TIMEOUT); 3730 } 3731 3732 /* BR Controller init stage 2 command sequence */ 3733 static const struct hci_init_stage br_init2[] = { 3734 /* HCI_OP_READ_BUFFER_SIZE */ 3735 HCI_INIT(hci_read_buffer_size_sync), 3736 /* HCI_OP_READ_CLASS_OF_DEV */ 3737 HCI_INIT(hci_read_dev_class_sync), 3738 /* HCI_OP_READ_LOCAL_NAME */ 3739 HCI_INIT(hci_read_local_name_sync), 3740 /* HCI_OP_READ_VOICE_SETTING */ 3741 HCI_INIT(hci_read_voice_setting_sync), 3742 /* HCI_OP_READ_NUM_SUPPORTED_IAC */ 3743 HCI_INIT(hci_read_num_supported_iac_sync), 3744 /* HCI_OP_READ_CURRENT_IAC_LAP */ 3745 HCI_INIT(hci_read_current_iac_lap_sync), 3746 /* HCI_OP_SET_EVENT_FLT */ 3747 HCI_INIT(hci_clear_event_filter_sync), 3748 /* HCI_OP_WRITE_CA_TIMEOUT */ 3749 HCI_INIT(hci_write_ca_timeout_sync), 3750 {} 3751 }; 3752 3753 static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) 3754 { 3755 u8 mode = 0x01; 3756 3757 if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 3758 return 0; 3759 3760 /* When SSP is available, then the host features page 3761 * should also be available as well. However some 3762 * controllers list the max_page as 0 as long as SSP 3763 * has not been enabled. To achieve proper debugging 3764 * output, force the minimum max_page to 1 at least. 3765 */ 3766 hdev->max_page = 0x01; 3767 3768 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, 3769 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 3770 } 3771 3772 static int hci_write_eir_sync(struct hci_dev *hdev) 3773 { 3774 struct hci_cp_write_eir cp; 3775 3776 if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 3777 return 0; 3778 3779 memset(hdev->eir, 0, sizeof(hdev->eir)); 3780 memset(&cp, 0, sizeof(cp)); 3781 3782 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, 3783 HCI_CMD_TIMEOUT); 3784 } 3785 3786 static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) 3787 { 3788 u8 mode; 3789 3790 if (!lmp_inq_rssi_capable(hdev) && 3791 !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) 3792 return 0; 3793 3794 /* If Extended Inquiry Result events are supported, then 3795 * they are clearly preferred over Inquiry Result with RSSI 3796 * events. 3797 */ 3798 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; 3799 3800 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, 3801 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 3802 } 3803 3804 static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) 3805 { 3806 if (!lmp_inq_tx_pwr_capable(hdev)) 3807 return 0; 3808 3809 return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 3810 0, NULL, HCI_CMD_TIMEOUT); 3811 } 3812 3813 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) 3814 { 3815 struct hci_cp_read_local_ext_features cp; 3816 3817 if (!lmp_ext_feat_capable(hdev)) 3818 return 0; 3819 3820 memset(&cp, 0, sizeof(cp)); 3821 cp.page = page; 3822 3823 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, 3824 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3825 } 3826 3827 static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) 3828 { 3829 return hci_read_local_ext_features_sync(hdev, 0x01); 3830 } 3831 3832 /* HCI Controller init stage 2 command sequence */ 3833 static const struct hci_init_stage hci_init2[] = { 3834 /* HCI_OP_READ_LOCAL_COMMANDS */ 3835 HCI_INIT(hci_read_local_cmds_sync), 3836 /* HCI_OP_WRITE_SSP_MODE */ 3837 HCI_INIT(hci_write_ssp_mode_1_sync), 3838 /* HCI_OP_WRITE_EIR */ 3839 HCI_INIT(hci_write_eir_sync), 3840 /* HCI_OP_WRITE_INQUIRY_MODE */ 3841 HCI_INIT(hci_write_inquiry_mode_sync), 3842 /* HCI_OP_READ_INQ_RSP_TX_POWER */ 3843 HCI_INIT(hci_read_inq_rsp_tx_power_sync), 3844 /* HCI_OP_READ_LOCAL_EXT_FEATURES */ 3845 HCI_INIT(hci_read_local_ext_features_1_sync), 3846 /* HCI_OP_WRITE_AUTH_ENABLE */ 3847 HCI_INIT(hci_write_auth_enable_sync), 3848 {} 3849 }; 3850 3851 /* Read LE Buffer Size */ 3852 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) 3853 { 3854 /* Use Read LE Buffer Size V2 if supported */ 3855 if (iso_capable(hdev) && hdev->commands[41] & 0x20) 3856 return __hci_cmd_sync_status(hdev, 3857 HCI_OP_LE_READ_BUFFER_SIZE_V2, 3858 0, NULL, HCI_CMD_TIMEOUT); 3859 3860 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 3861 0, NULL, HCI_CMD_TIMEOUT); 3862 } 3863 3864 /* Read LE Local Supported Features */ 3865 static int hci_le_read_local_features_sync(struct hci_dev *hdev) 3866 { 3867 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 3868 0, NULL, HCI_CMD_TIMEOUT); 3869 } 3870 3871 /* Read LE Supported States */ 3872 static int hci_le_read_supported_states_sync(struct hci_dev *hdev) 3873 { 3874 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 3875 0, NULL, HCI_CMD_TIMEOUT); 3876 } 3877 3878 /* LE Controller init stage 2 command sequence */ 3879 static const struct hci_init_stage le_init2[] = { 3880 /* HCI_OP_LE_READ_LOCAL_FEATURES */ 3881 HCI_INIT(hci_le_read_local_features_sync), 3882 /* HCI_OP_LE_READ_BUFFER_SIZE */ 3883 HCI_INIT(hci_le_read_buffer_size_sync), 3884 /* HCI_OP_LE_READ_SUPPORTED_STATES */ 3885 HCI_INIT(hci_le_read_supported_states_sync), 3886 {} 3887 }; 3888 3889 static int hci_init2_sync(struct hci_dev *hdev) 3890 { 3891 int err; 3892 3893 bt_dev_dbg(hdev, ""); 3894 3895 if (hdev->dev_type == HCI_AMP) 3896 return hci_init_stage_sync(hdev, amp_init2); 3897 3898 err = hci_init_stage_sync(hdev, hci_init2); 3899 if (err) 3900 return err; 3901 3902 if (lmp_bredr_capable(hdev)) { 3903 err = hci_init_stage_sync(hdev, br_init2); 3904 if (err) 3905 return err; 3906 } else { 3907 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); 3908 } 3909 3910 if (lmp_le_capable(hdev)) { 3911 err = hci_init_stage_sync(hdev, le_init2); 3912 if (err) 3913 return err; 3914 /* LE-only controllers have LE implicitly enabled */ 3915 if (!lmp_bredr_capable(hdev)) 3916 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 3917 } 3918 3919 return 0; 3920 } 3921 3922 static int hci_set_event_mask_sync(struct hci_dev *hdev) 3923 { 3924 /* The second byte is 0xff instead of 0x9f (two reserved bits 3925 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 3926 * command otherwise. 3927 */ 3928 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 3929 3930 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 3931 * any event mask for pre 1.2 devices. 3932 */ 3933 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 3934 return 0; 3935 3936 if (lmp_bredr_capable(hdev)) { 3937 events[4] |= 0x01; /* Flow Specification Complete */ 3938 3939 /* Don't set Disconnect Complete and mode change when 3940 * suspended as that would wakeup the host when disconnecting 3941 * due to suspend. 3942 */ 3943 if (hdev->suspended) { 3944 events[0] &= 0xef; 3945 events[2] &= 0xf7; 3946 } 3947 } else { 3948 /* Use a different default for LE-only devices */ 3949 memset(events, 0, sizeof(events)); 3950 events[1] |= 0x20; /* Command Complete */ 3951 events[1] |= 0x40; /* Command Status */ 3952 events[1] |= 0x80; /* Hardware Error */ 3953 3954 /* If the controller supports the Disconnect command, enable 3955 * the corresponding event. In addition enable packet flow 3956 * control related events. 3957 */ 3958 if (hdev->commands[0] & 0x20) { 3959 /* Don't set Disconnect Complete when suspended as that 3960 * would wakeup the host when disconnecting due to 3961 * suspend. 3962 */ 3963 if (!hdev->suspended) 3964 events[0] |= 0x10; /* Disconnection Complete */ 3965 events[2] |= 0x04; /* Number of Completed Packets */ 3966 events[3] |= 0x02; /* Data Buffer Overflow */ 3967 } 3968 3969 /* If the controller supports the Read Remote Version 3970 * Information command, enable the corresponding event. 3971 */ 3972 if (hdev->commands[2] & 0x80) 3973 events[1] |= 0x08; /* Read Remote Version Information 3974 * Complete 3975 */ 3976 3977 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { 3978 events[0] |= 0x80; /* Encryption Change */ 3979 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 3980 } 3981 } 3982 3983 if (lmp_inq_rssi_capable(hdev) || 3984 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) 3985 events[4] |= 0x02; /* Inquiry Result with RSSI */ 3986 3987 if (lmp_ext_feat_capable(hdev)) 3988 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 3989 3990 if (lmp_esco_capable(hdev)) { 3991 events[5] |= 0x08; /* Synchronous Connection Complete */ 3992 events[5] |= 0x10; /* Synchronous Connection Changed */ 3993 } 3994 3995 if (lmp_sniffsubr_capable(hdev)) 3996 events[5] |= 0x20; /* Sniff Subrating */ 3997 3998 if (lmp_pause_enc_capable(hdev)) 3999 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 4000 4001 if (lmp_ext_inq_capable(hdev)) 4002 events[5] |= 0x40; /* Extended Inquiry Result */ 4003 4004 if (lmp_no_flush_capable(hdev)) 4005 events[7] |= 0x01; /* Enhanced Flush Complete */ 4006 4007 if (lmp_lsto_capable(hdev)) 4008 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 4009 4010 if (lmp_ssp_capable(hdev)) { 4011 events[6] |= 0x01; /* IO Capability Request */ 4012 events[6] |= 0x02; /* IO Capability Response */ 4013 events[6] |= 0x04; /* User Confirmation Request */ 4014 events[6] |= 0x08; /* User Passkey Request */ 4015 events[6] |= 0x10; /* Remote OOB Data Request */ 4016 events[6] |= 0x20; /* Simple Pairing Complete */ 4017 events[7] |= 0x04; /* User Passkey Notification */ 4018 events[7] |= 0x08; /* Keypress Notification */ 4019 events[7] |= 0x10; /* Remote Host Supported 4020 * Features Notification 4021 */ 4022 } 4023 4024 if (lmp_le_capable(hdev)) 4025 events[7] |= 0x20; /* LE Meta-Event */ 4026 4027 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, 4028 sizeof(events), events, HCI_CMD_TIMEOUT); 4029 } 4030 4031 static int hci_read_stored_link_key_sync(struct hci_dev *hdev) 4032 { 4033 struct hci_cp_read_stored_link_key cp; 4034 4035 if (!(hdev->commands[6] & 0x20) || 4036 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) 4037 return 0; 4038 4039 memset(&cp, 0, sizeof(cp)); 4040 bacpy(&cp.bdaddr, BDADDR_ANY); 4041 cp.read_all = 0x01; 4042 4043 return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, 4044 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4045 } 4046 4047 static int hci_setup_link_policy_sync(struct hci_dev *hdev) 4048 { 4049 struct hci_cp_write_def_link_policy cp; 4050 u16 link_policy = 0; 4051 4052 if (!(hdev->commands[5] & 0x10)) 4053 return 0; 4054 4055 memset(&cp, 0, sizeof(cp)); 4056 4057 if (lmp_rswitch_capable(hdev)) 4058 link_policy |= HCI_LP_RSWITCH; 4059 if (lmp_hold_capable(hdev)) 4060 link_policy |= HCI_LP_HOLD; 4061 if (lmp_sniff_capable(hdev)) 4062 link_policy |= HCI_LP_SNIFF; 4063 if (lmp_park_capable(hdev)) 4064 link_policy |= HCI_LP_PARK; 4065 4066 cp.policy = cpu_to_le16(link_policy); 4067 4068 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 4069 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4070 } 4071 4072 static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) 4073 { 4074 if (!(hdev->commands[8] & 0x01)) 4075 return 0; 4076 4077 return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 4078 0, NULL, HCI_CMD_TIMEOUT); 4079 } 4080 4081 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) 4082 { 4083 if (!(hdev->commands[18] & 0x04) || 4084 !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || 4085 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) 4086 return 0; 4087 4088 return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 4089 0, NULL, HCI_CMD_TIMEOUT); 4090 } 4091 4092 static int hci_read_page_scan_type_sync(struct hci_dev *hdev) 4093 { 4094 /* Some older Broadcom based Bluetooth 1.2 controllers do not 4095 * support the Read Page Scan Type command. Check support for 4096 * this command in the bit mask of supported commands. 4097 */ 4098 if (!(hdev->commands[13] & 0x01)) 4099 return 0; 4100 4101 return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, 4102 0, NULL, HCI_CMD_TIMEOUT); 4103 } 4104 4105 /* Read features beyond page 1 if available */ 4106 static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) 4107 { 4108 u8 page; 4109 int err; 4110 4111 if (!lmp_ext_feat_capable(hdev)) 4112 return 0; 4113 4114 for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; 4115 page++) { 4116 err = hci_read_local_ext_features_sync(hdev, page); 4117 if (err) 4118 return err; 4119 } 4120 4121 return 0; 4122 } 4123 4124 /* HCI Controller init stage 3 command sequence */ 4125 static const struct hci_init_stage hci_init3[] = { 4126 /* HCI_OP_SET_EVENT_MASK */ 4127 HCI_INIT(hci_set_event_mask_sync), 4128 /* HCI_OP_READ_STORED_LINK_KEY */ 4129 HCI_INIT(hci_read_stored_link_key_sync), 4130 /* HCI_OP_WRITE_DEF_LINK_POLICY */ 4131 HCI_INIT(hci_setup_link_policy_sync), 4132 /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ 4133 HCI_INIT(hci_read_page_scan_activity_sync), 4134 /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ 4135 HCI_INIT(hci_read_def_err_data_reporting_sync), 4136 /* HCI_OP_READ_PAGE_SCAN_TYPE */ 4137 HCI_INIT(hci_read_page_scan_type_sync), 4138 /* HCI_OP_READ_LOCAL_EXT_FEATURES */ 4139 HCI_INIT(hci_read_local_ext_features_all_sync), 4140 {} 4141 }; 4142 4143 static int hci_le_set_event_mask_sync(struct hci_dev *hdev) 4144 { 4145 u8 events[8]; 4146 4147 if (!lmp_le_capable(hdev)) 4148 return 0; 4149 4150 memset(events, 0, sizeof(events)); 4151 4152 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) 4153 events[0] |= 0x10; /* LE Long Term Key Request */ 4154 4155 /* If controller supports the Connection Parameters Request 4156 * Link Layer Procedure, enable the corresponding event. 4157 */ 4158 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) 4159 /* LE Remote Connection Parameter Request */ 4160 events[0] |= 0x20; 4161 4162 /* If the controller supports the Data Length Extension 4163 * feature, enable the corresponding event. 4164 */ 4165 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) 4166 events[0] |= 0x40; /* LE Data Length Change */ 4167 4168 /* If the controller supports LL Privacy feature or LE Extended Adv, 4169 * enable the corresponding event. 4170 */ 4171 if (use_enhanced_conn_complete(hdev)) 4172 events[1] |= 0x02; /* LE Enhanced Connection Complete */ 4173 4174 /* If the controller supports Extended Scanner Filter 4175 * Policies, enable the corresponding event. 4176 */ 4177 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) 4178 events[1] |= 0x04; /* LE Direct Advertising Report */ 4179 4180 /* If the controller supports Channel Selection Algorithm #2 4181 * feature, enable the corresponding event. 4182 */ 4183 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) 4184 events[2] |= 0x08; /* LE Channel Selection Algorithm */ 4185 4186 /* If the controller supports the LE Set Scan Enable command, 4187 * enable the corresponding advertising report event. 4188 */ 4189 if (hdev->commands[26] & 0x08) 4190 events[0] |= 0x02; /* LE Advertising Report */ 4191 4192 /* If the controller supports the LE Create Connection 4193 * command, enable the corresponding event. 4194 */ 4195 if (hdev->commands[26] & 0x10) 4196 events[0] |= 0x01; /* LE Connection Complete */ 4197 4198 /* If the controller supports the LE Connection Update 4199 * command, enable the corresponding event. 4200 */ 4201 if (hdev->commands[27] & 0x04) 4202 events[0] |= 0x04; /* LE Connection Update Complete */ 4203 4204 /* If the controller supports the LE Read Remote Used Features 4205 * command, enable the corresponding event. 4206 */ 4207 if (hdev->commands[27] & 0x20) 4208 /* LE Read Remote Used Features Complete */ 4209 events[0] |= 0x08; 4210 4211 /* If the controller supports the LE Read Local P-256 4212 * Public Key command, enable the corresponding event. 4213 */ 4214 if (hdev->commands[34] & 0x02) 4215 /* LE Read Local P-256 Public Key Complete */ 4216 events[0] |= 0x80; 4217 4218 /* If the controller supports the LE Generate DHKey 4219 * command, enable the corresponding event. 4220 */ 4221 if (hdev->commands[34] & 0x04) 4222 events[1] |= 0x01; /* LE Generate DHKey Complete */ 4223 4224 /* If the controller supports the LE Set Default PHY or 4225 * LE Set PHY commands, enable the corresponding event. 4226 */ 4227 if (hdev->commands[35] & (0x20 | 0x40)) 4228 events[1] |= 0x08; /* LE PHY Update Complete */ 4229 4230 /* If the controller supports LE Set Extended Scan Parameters 4231 * and LE Set Extended Scan Enable commands, enable the 4232 * corresponding event. 4233 */ 4234 if (use_ext_scan(hdev)) 4235 events[1] |= 0x10; /* LE Extended Advertising Report */ 4236 4237 /* If the controller supports the LE Extended Advertising 4238 * command, enable the corresponding event. 4239 */ 4240 if (ext_adv_capable(hdev)) 4241 events[2] |= 0x02; /* LE Advertising Set Terminated */ 4242 4243 if (cis_capable(hdev)) { 4244 events[3] |= 0x01; /* LE CIS Established */ 4245 if (cis_peripheral_capable(hdev)) 4246 events[3] |= 0x02; /* LE CIS Request */ 4247 } 4248 4249 if (bis_capable(hdev)) { 4250 events[1] |= 0x20; /* LE PA Report */ 4251 events[1] |= 0x40; /* LE PA Sync Established */ 4252 events[3] |= 0x04; /* LE Create BIG Complete */ 4253 events[3] |= 0x08; /* LE Terminate BIG Complete */ 4254 events[3] |= 0x10; /* LE BIG Sync Established */ 4255 events[3] |= 0x20; /* LE BIG Sync Loss */ 4256 events[4] |= 0x02; /* LE BIG Info Advertising Report */ 4257 } 4258 4259 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, 4260 sizeof(events), events, HCI_CMD_TIMEOUT); 4261 } 4262 4263 /* Read LE Advertising Channel TX Power */ 4264 static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) 4265 { 4266 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { 4267 /* HCI TS spec forbids mixing of legacy and extended 4268 * advertising commands wherein READ_ADV_TX_POWER is 4269 * also included. So do not call it if extended adv 4270 * is supported otherwise controller will return 4271 * COMMAND_DISALLOWED for extended commands. 4272 */ 4273 return __hci_cmd_sync_status(hdev, 4274 HCI_OP_LE_READ_ADV_TX_POWER, 4275 0, NULL, HCI_CMD_TIMEOUT); 4276 } 4277 4278 return 0; 4279 } 4280 4281 /* Read LE Min/Max Tx Power*/ 4282 static int hci_le_read_tx_power_sync(struct hci_dev *hdev) 4283 { 4284 if (!(hdev->commands[38] & 0x80) || 4285 test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) 4286 return 0; 4287 4288 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, 4289 0, NULL, HCI_CMD_TIMEOUT); 4290 } 4291 4292 /* Read LE Accept List Size */ 4293 static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) 4294 { 4295 if (!(hdev->commands[26] & 0x40)) 4296 return 0; 4297 4298 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 4299 0, NULL, HCI_CMD_TIMEOUT); 4300 } 4301 4302 /* Read LE Resolving List Size */ 4303 static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) 4304 { 4305 if (!(hdev->commands[34] & 0x40)) 4306 return 0; 4307 4308 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, 4309 0, NULL, HCI_CMD_TIMEOUT); 4310 } 4311 4312 /* Clear LE Resolving List */ 4313 static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) 4314 { 4315 if (!(hdev->commands[34] & 0x20)) 4316 return 0; 4317 4318 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, 4319 HCI_CMD_TIMEOUT); 4320 } 4321 4322 /* Set RPA timeout */ 4323 static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) 4324 { 4325 __le16 timeout = cpu_to_le16(hdev->rpa_timeout); 4326 4327 if (!(hdev->commands[35] & 0x04) || 4328 test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) 4329 return 0; 4330 4331 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, 4332 sizeof(timeout), &timeout, 4333 HCI_CMD_TIMEOUT); 4334 } 4335 4336 /* Read LE Maximum Data Length */ 4337 static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) 4338 { 4339 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) 4340 return 0; 4341 4342 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, 4343 HCI_CMD_TIMEOUT); 4344 } 4345 4346 /* Read LE Suggested Default Data Length */ 4347 static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) 4348 { 4349 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) 4350 return 0; 4351 4352 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, 4353 HCI_CMD_TIMEOUT); 4354 } 4355 4356 /* Read LE Number of Supported Advertising Sets */ 4357 static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) 4358 { 4359 if (!ext_adv_capable(hdev)) 4360 return 0; 4361 4362 return __hci_cmd_sync_status(hdev, 4363 HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 4364 0, NULL, HCI_CMD_TIMEOUT); 4365 } 4366 4367 /* Write LE Host Supported */ 4368 static int hci_set_le_support_sync(struct hci_dev *hdev) 4369 { 4370 struct hci_cp_write_le_host_supported cp; 4371 4372 /* LE-only devices do not support explicit enablement */ 4373 if (!lmp_bredr_capable(hdev)) 4374 return 0; 4375 4376 memset(&cp, 0, sizeof(cp)); 4377 4378 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 4379 cp.le = 0x01; 4380 cp.simul = 0x00; 4381 } 4382 4383 if (cp.le == lmp_host_le_capable(hdev)) 4384 return 0; 4385 4386 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, 4387 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4388 } 4389 4390 /* LE Set Host Feature */ 4391 static int hci_le_set_host_feature_sync(struct hci_dev *hdev) 4392 { 4393 struct hci_cp_le_set_host_feature cp; 4394 4395 if (!cis_capable(hdev)) 4396 return 0; 4397 4398 memset(&cp, 0, sizeof(cp)); 4399 4400 /* Connected Isochronous Channels (Host Support) */ 4401 cp.bit_number = 32; 4402 cp.bit_value = 1; 4403 4404 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, 4405 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4406 } 4407 4408 /* LE Controller init stage 3 command sequence */ 4409 static const struct hci_init_stage le_init3[] = { 4410 /* HCI_OP_LE_SET_EVENT_MASK */ 4411 HCI_INIT(hci_le_set_event_mask_sync), 4412 /* HCI_OP_LE_READ_ADV_TX_POWER */ 4413 HCI_INIT(hci_le_read_adv_tx_power_sync), 4414 /* HCI_OP_LE_READ_TRANSMIT_POWER */ 4415 HCI_INIT(hci_le_read_tx_power_sync), 4416 /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ 4417 HCI_INIT(hci_le_read_accept_list_size_sync), 4418 /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ 4419 HCI_INIT(hci_le_clear_accept_list_sync), 4420 /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ 4421 HCI_INIT(hci_le_read_resolv_list_size_sync), 4422 /* HCI_OP_LE_CLEAR_RESOLV_LIST */ 4423 HCI_INIT(hci_le_clear_resolv_list_sync), 4424 /* HCI_OP_LE_SET_RPA_TIMEOUT */ 4425 HCI_INIT(hci_le_set_rpa_timeout_sync), 4426 /* HCI_OP_LE_READ_MAX_DATA_LEN */ 4427 HCI_INIT(hci_le_read_max_data_len_sync), 4428 /* HCI_OP_LE_READ_DEF_DATA_LEN */ 4429 HCI_INIT(hci_le_read_def_data_len_sync), 4430 /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ 4431 HCI_INIT(hci_le_read_num_support_adv_sets_sync), 4432 /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ 4433 HCI_INIT(hci_set_le_support_sync), 4434 /* HCI_OP_LE_SET_HOST_FEATURE */ 4435 HCI_INIT(hci_le_set_host_feature_sync), 4436 {} 4437 }; 4438 4439 static int hci_init3_sync(struct hci_dev *hdev) 4440 { 4441 int err; 4442 4443 bt_dev_dbg(hdev, ""); 4444 4445 err = hci_init_stage_sync(hdev, hci_init3); 4446 if (err) 4447 return err; 4448 4449 if (lmp_le_capable(hdev)) 4450 return hci_init_stage_sync(hdev, le_init3); 4451 4452 return 0; 4453 } 4454 4455 static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) 4456 { 4457 struct hci_cp_delete_stored_link_key cp; 4458 4459 /* Some Broadcom based Bluetooth controllers do not support the 4460 * Delete Stored Link Key command. They are clearly indicating its 4461 * absence in the bit mask of supported commands. 4462 * 4463 * Check the supported commands and only if the command is marked 4464 * as supported send it. If not supported assume that the controller 4465 * does not have actual support for stored link keys which makes this 4466 * command redundant anyway. 4467 * 4468 * Some controllers indicate that they support handling deleting 4469 * stored link keys, but they don't. The quirk lets a driver 4470 * just disable this command. 4471 */ 4472 if (!(hdev->commands[6] & 0x80) || 4473 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) 4474 return 0; 4475 4476 memset(&cp, 0, sizeof(cp)); 4477 bacpy(&cp.bdaddr, BDADDR_ANY); 4478 cp.delete_all = 0x01; 4479 4480 return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, 4481 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4482 } 4483 4484 static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) 4485 { 4486 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 4487 bool changed = false; 4488 4489 /* Set event mask page 2 if the HCI command for it is supported */ 4490 if (!(hdev->commands[22] & 0x04)) 4491 return 0; 4492 4493 /* If Connectionless Peripheral Broadcast central role is supported 4494 * enable all necessary events for it. 4495 */ 4496 if (lmp_cpb_central_capable(hdev)) { 4497 events[1] |= 0x40; /* Triggered Clock Capture */ 4498 events[1] |= 0x80; /* Synchronization Train Complete */ 4499 events[2] |= 0x08; /* Truncated Page Complete */ 4500 events[2] |= 0x20; /* CPB Channel Map Change */ 4501 changed = true; 4502 } 4503 4504 /* If Connectionless Peripheral Broadcast peripheral role is supported 4505 * enable all necessary events for it. 4506 */ 4507 if (lmp_cpb_peripheral_capable(hdev)) { 4508 events[2] |= 0x01; /* Synchronization Train Received */ 4509 events[2] |= 0x02; /* CPB Receive */ 4510 events[2] |= 0x04; /* CPB Timeout */ 4511 events[2] |= 0x10; /* Peripheral Page Response Timeout */ 4512 changed = true; 4513 } 4514 4515 /* Enable Authenticated Payload Timeout Expired event if supported */ 4516 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { 4517 events[2] |= 0x80; 4518 changed = true; 4519 } 4520 4521 /* Some Broadcom based controllers indicate support for Set Event 4522 * Mask Page 2 command, but then actually do not support it. Since 4523 * the default value is all bits set to zero, the command is only 4524 * required if the event mask has to be changed. In case no change 4525 * to the event mask is needed, skip this command. 4526 */ 4527 if (!changed) 4528 return 0; 4529 4530 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, 4531 sizeof(events), events, HCI_CMD_TIMEOUT); 4532 } 4533 4534 /* Read local codec list if the HCI command is supported */ 4535 static int hci_read_local_codecs_sync(struct hci_dev *hdev) 4536 { 4537 if (hdev->commands[45] & 0x04) 4538 hci_read_supported_codecs_v2(hdev); 4539 else if (hdev->commands[29] & 0x20) 4540 hci_read_supported_codecs(hdev); 4541 4542 return 0; 4543 } 4544 4545 /* Read local pairing options if the HCI command is supported */ 4546 static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) 4547 { 4548 if (!(hdev->commands[41] & 0x08)) 4549 return 0; 4550 4551 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, 4552 0, NULL, HCI_CMD_TIMEOUT); 4553 } 4554 4555 /* Get MWS transport configuration if the HCI command is supported */ 4556 static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) 4557 { 4558 if (!mws_transport_config_capable(hdev)) 4559 return 0; 4560 4561 return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 4562 0, NULL, HCI_CMD_TIMEOUT); 4563 } 4564 4565 /* Check for Synchronization Train support */ 4566 static int hci_read_sync_train_params_sync(struct hci_dev *hdev) 4567 { 4568 if (!lmp_sync_train_capable(hdev)) 4569 return 0; 4570 4571 return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, 4572 0, NULL, HCI_CMD_TIMEOUT); 4573 } 4574 4575 /* Enable Secure Connections if supported and configured */ 4576 static int hci_write_sc_support_1_sync(struct hci_dev *hdev) 4577 { 4578 u8 support = 0x01; 4579 4580 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || 4581 !bredr_sc_enabled(hdev)) 4582 return 0; 4583 4584 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, 4585 sizeof(support), &support, 4586 HCI_CMD_TIMEOUT); 4587 } 4588 4589 /* Set erroneous data reporting if supported to the wideband speech 4590 * setting value 4591 */ 4592 static int hci_set_err_data_report_sync(struct hci_dev *hdev) 4593 { 4594 struct hci_cp_write_def_err_data_reporting cp; 4595 bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); 4596 4597 if (!(hdev->commands[18] & 0x08) || 4598 !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || 4599 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) 4600 return 0; 4601 4602 if (enabled == hdev->err_data_reporting) 4603 return 0; 4604 4605 memset(&cp, 0, sizeof(cp)); 4606 cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : 4607 ERR_DATA_REPORTING_DISABLED; 4608 4609 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 4610 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4611 } 4612 4613 static const struct hci_init_stage hci_init4[] = { 4614 /* HCI_OP_DELETE_STORED_LINK_KEY */ 4615 HCI_INIT(hci_delete_stored_link_key_sync), 4616 /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ 4617 HCI_INIT(hci_set_event_mask_page_2_sync), 4618 /* HCI_OP_READ_LOCAL_CODECS */ 4619 HCI_INIT(hci_read_local_codecs_sync), 4620 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ 4621 HCI_INIT(hci_read_local_pairing_opts_sync), 4622 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ 4623 HCI_INIT(hci_get_mws_transport_config_sync), 4624 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ 4625 HCI_INIT(hci_read_sync_train_params_sync), 4626 /* HCI_OP_WRITE_SC_SUPPORT */ 4627 HCI_INIT(hci_write_sc_support_1_sync), 4628 /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ 4629 HCI_INIT(hci_set_err_data_report_sync), 4630 {} 4631 }; 4632 4633 /* Set Suggested Default Data Length to maximum if supported */ 4634 static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) 4635 { 4636 struct hci_cp_le_write_def_data_len cp; 4637 4638 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) 4639 return 0; 4640 4641 memset(&cp, 0, sizeof(cp)); 4642 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); 4643 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); 4644 4645 return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, 4646 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4647 } 4648 4649 /* Set Default PHY parameters if command is supported, enables all supported 4650 * PHYs according to the LE Features bits. 4651 */ 4652 static int hci_le_set_default_phy_sync(struct hci_dev *hdev) 4653 { 4654 struct hci_cp_le_set_default_phy cp; 4655 4656 if (!(hdev->commands[35] & 0x20)) { 4657 /* If the command is not supported it means only 1M PHY is 4658 * supported. 4659 */ 4660 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; 4661 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; 4662 return 0; 4663 } 4664 4665 memset(&cp, 0, sizeof(cp)); 4666 cp.all_phys = 0x00; 4667 cp.tx_phys = HCI_LE_SET_PHY_1M; 4668 cp.rx_phys = HCI_LE_SET_PHY_1M; 4669 4670 /* Enables 2M PHY if supported */ 4671 if (le_2m_capable(hdev)) { 4672 cp.tx_phys |= HCI_LE_SET_PHY_2M; 4673 cp.rx_phys |= HCI_LE_SET_PHY_2M; 4674 } 4675 4676 /* Enables Coded PHY if supported */ 4677 if (le_coded_capable(hdev)) { 4678 cp.tx_phys |= HCI_LE_SET_PHY_CODED; 4679 cp.rx_phys |= HCI_LE_SET_PHY_CODED; 4680 } 4681 4682 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, 4683 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4684 } 4685 4686 static const struct hci_init_stage le_init4[] = { 4687 /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ 4688 HCI_INIT(hci_le_set_write_def_data_len_sync), 4689 /* HCI_OP_LE_SET_DEFAULT_PHY */ 4690 HCI_INIT(hci_le_set_default_phy_sync), 4691 {} 4692 }; 4693 4694 static int hci_init4_sync(struct hci_dev *hdev) 4695 { 4696 int err; 4697 4698 bt_dev_dbg(hdev, ""); 4699 4700 err = hci_init_stage_sync(hdev, hci_init4); 4701 if (err) 4702 return err; 4703 4704 if (lmp_le_capable(hdev)) 4705 return hci_init_stage_sync(hdev, le_init4); 4706 4707 return 0; 4708 } 4709 4710 static int hci_init_sync(struct hci_dev *hdev) 4711 { 4712 int err; 4713 4714 err = hci_init1_sync(hdev); 4715 if (err < 0) 4716 return err; 4717 4718 if (hci_dev_test_flag(hdev, HCI_SETUP)) 4719 hci_debugfs_create_basic(hdev); 4720 4721 err = hci_init2_sync(hdev); 4722 if (err < 0) 4723 return err; 4724 4725 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode 4726 * BR/EDR/LE type controllers. AMP controllers only need the 4727 * first two stages of init. 4728 */ 4729 if (hdev->dev_type != HCI_PRIMARY) 4730 return 0; 4731 4732 err = hci_init3_sync(hdev); 4733 if (err < 0) 4734 return err; 4735 4736 err = hci_init4_sync(hdev); 4737 if (err < 0) 4738 return err; 4739 4740 /* This function is only called when the controller is actually in 4741 * configured state. When the controller is marked as unconfigured, 4742 * this initialization procedure is not run. 4743 * 4744 * It means that it is possible that a controller runs through its 4745 * setup phase and then discovers missing settings. If that is the 4746 * case, then this function will not be called. It then will only 4747 * be called during the config phase. 4748 * 4749 * So only when in setup phase or config phase, create the debugfs 4750 * entries and register the SMP channels. 4751 */ 4752 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4753 !hci_dev_test_flag(hdev, HCI_CONFIG)) 4754 return 0; 4755 4756 if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) 4757 return 0; 4758 4759 hci_debugfs_create_common(hdev); 4760 4761 if (lmp_bredr_capable(hdev)) 4762 hci_debugfs_create_bredr(hdev); 4763 4764 if (lmp_le_capable(hdev)) 4765 hci_debugfs_create_le(hdev); 4766 4767 return 0; 4768 } 4769 4770 #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } 4771 4772 static const struct { 4773 unsigned long quirk; 4774 const char *desc; 4775 } hci_broken_table[] = { 4776 HCI_QUIRK_BROKEN(LOCAL_COMMANDS, 4777 "HCI Read Local Supported Commands not supported"), 4778 HCI_QUIRK_BROKEN(STORED_LINK_KEY, 4779 "HCI Delete Stored Link Key command is advertised, " 4780 "but not supported."), 4781 HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, 4782 "HCI Read Default Erroneous Data Reporting command is " 4783 "advertised, but not supported."), 4784 HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, 4785 "HCI Read Transmit Power Level command is advertised, " 4786 "but not supported."), 4787 HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, 4788 "HCI Set Event Filter command not supported."), 4789 HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, 4790 "HCI Enhanced Setup Synchronous Connection command is " 4791 "advertised, but not supported."), 4792 HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, 4793 "HCI LE Set Random Private Address Timeout command is " 4794 "advertised, but not supported."), 4795 HCI_QUIRK_BROKEN(LE_CODED, 4796 "HCI LE Coded PHY feature bit is set, " 4797 "but its usage is not supported.") 4798 }; 4799 4800 /* This function handles hdev setup stage: 4801 * 4802 * Calls hdev->setup 4803 * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. 4804 */ 4805 static int hci_dev_setup_sync(struct hci_dev *hdev) 4806 { 4807 int ret = 0; 4808 bool invalid_bdaddr; 4809 size_t i; 4810 4811 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4812 !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) 4813 return 0; 4814 4815 bt_dev_dbg(hdev, ""); 4816 4817 hci_sock_dev_event(hdev, HCI_DEV_SETUP); 4818 4819 if (hdev->setup) 4820 ret = hdev->setup(hdev); 4821 4822 for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { 4823 if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) 4824 bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); 4825 } 4826 4827 /* The transport driver can set the quirk to mark the 4828 * BD_ADDR invalid before creating the HCI device or in 4829 * its setup callback. 4830 */ 4831 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || 4832 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); 4833 if (!ret) { 4834 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && 4835 !bacmp(&hdev->public_addr, BDADDR_ANY)) 4836 hci_dev_get_bd_addr_from_property(hdev); 4837 4838 if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) && 4839 hdev->set_bdaddr) { 4840 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); 4841 if (!ret) 4842 invalid_bdaddr = false; 4843 } 4844 } 4845 4846 /* The transport driver can set these quirks before 4847 * creating the HCI device or in its setup callback. 4848 * 4849 * For the invalid BD_ADDR quirk it is possible that 4850 * it becomes a valid address if the bootloader does 4851 * provide it (see above). 4852 * 4853 * In case any of them is set, the controller has to 4854 * start up as unconfigured. 4855 */ 4856 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || 4857 invalid_bdaddr) 4858 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 4859 4860 /* For an unconfigured controller it is required to 4861 * read at least the version information provided by 4862 * the Read Local Version Information command. 4863 * 4864 * If the set_bdaddr driver callback is provided, then 4865 * also the original Bluetooth public device address 4866 * will be read using the Read BD Address command. 4867 */ 4868 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 4869 return hci_unconf_init_sync(hdev); 4870 4871 return ret; 4872 } 4873 4874 /* This function handles hdev init stage: 4875 * 4876 * Calls hci_dev_setup_sync to perform setup stage 4877 * Calls hci_init_sync to perform HCI command init sequence 4878 */ 4879 static int hci_dev_init_sync(struct hci_dev *hdev) 4880 { 4881 int ret; 4882 4883 bt_dev_dbg(hdev, ""); 4884 4885 atomic_set(&hdev->cmd_cnt, 1); 4886 set_bit(HCI_INIT, &hdev->flags); 4887 4888 ret = hci_dev_setup_sync(hdev); 4889 4890 if (hci_dev_test_flag(hdev, HCI_CONFIG)) { 4891 /* If public address change is configured, ensure that 4892 * the address gets programmed. If the driver does not 4893 * support changing the public address, fail the power 4894 * on procedure. 4895 */ 4896 if (bacmp(&hdev->public_addr, BDADDR_ANY) && 4897 hdev->set_bdaddr) 4898 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); 4899 else 4900 ret = -EADDRNOTAVAIL; 4901 } 4902 4903 if (!ret) { 4904 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 4905 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4906 ret = hci_init_sync(hdev); 4907 if (!ret && hdev->post_init) 4908 ret = hdev->post_init(hdev); 4909 } 4910 } 4911 4912 /* If the HCI Reset command is clearing all diagnostic settings, 4913 * then they need to be reprogrammed after the init procedure 4914 * completed. 4915 */ 4916 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && 4917 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4918 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) 4919 ret = hdev->set_diag(hdev, true); 4920 4921 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4922 msft_do_open(hdev); 4923 aosp_do_open(hdev); 4924 } 4925 4926 clear_bit(HCI_INIT, &hdev->flags); 4927 4928 return ret; 4929 } 4930 4931 int hci_dev_open_sync(struct hci_dev *hdev) 4932 { 4933 int ret; 4934 4935 bt_dev_dbg(hdev, ""); 4936 4937 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 4938 ret = -ENODEV; 4939 goto done; 4940 } 4941 4942 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4943 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 4944 /* Check for rfkill but allow the HCI setup stage to 4945 * proceed (which in itself doesn't cause any RF activity). 4946 */ 4947 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { 4948 ret = -ERFKILL; 4949 goto done; 4950 } 4951 4952 /* Check for valid public address or a configured static 4953 * random address, but let the HCI setup proceed to 4954 * be able to determine if there is a public address 4955 * or not. 4956 * 4957 * In case of user channel usage, it is not important 4958 * if a public address or static random address is 4959 * available. 4960 * 4961 * This check is only valid for BR/EDR controllers 4962 * since AMP controllers do not have an address. 4963 */ 4964 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4965 hdev->dev_type == HCI_PRIMARY && 4966 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 4967 !bacmp(&hdev->static_addr, BDADDR_ANY)) { 4968 ret = -EADDRNOTAVAIL; 4969 goto done; 4970 } 4971 } 4972 4973 if (test_bit(HCI_UP, &hdev->flags)) { 4974 ret = -EALREADY; 4975 goto done; 4976 } 4977 4978 if (hdev->open(hdev)) { 4979 ret = -EIO; 4980 goto done; 4981 } 4982 4983 hci_devcd_reset(hdev); 4984 4985 set_bit(HCI_RUNNING, &hdev->flags); 4986 hci_sock_dev_event(hdev, HCI_DEV_OPEN); 4987 4988 ret = hci_dev_init_sync(hdev); 4989 if (!ret) { 4990 hci_dev_hold(hdev); 4991 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 4992 hci_adv_instances_set_rpa_expired(hdev, true); 4993 set_bit(HCI_UP, &hdev->flags); 4994 hci_sock_dev_event(hdev, HCI_DEV_UP); 4995 hci_leds_update_powered(hdev, true); 4996 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4997 !hci_dev_test_flag(hdev, HCI_CONFIG) && 4998 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 4999 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 5000 hci_dev_test_flag(hdev, HCI_MGMT) && 5001 hdev->dev_type == HCI_PRIMARY) { 5002 ret = hci_powered_update_sync(hdev); 5003 mgmt_power_on(hdev, ret); 5004 } 5005 } else { 5006 /* Init failed, cleanup */ 5007 flush_work(&hdev->tx_work); 5008 5009 /* Since hci_rx_work() is possible to awake new cmd_work 5010 * it should be flushed first to avoid unexpected call of 5011 * hci_cmd_work() 5012 */ 5013 flush_work(&hdev->rx_work); 5014 flush_work(&hdev->cmd_work); 5015 5016 skb_queue_purge(&hdev->cmd_q); 5017 skb_queue_purge(&hdev->rx_q); 5018 5019 if (hdev->flush) 5020 hdev->flush(hdev); 5021 5022 if (hdev->sent_cmd) { 5023 cancel_delayed_work_sync(&hdev->cmd_timer); 5024 kfree_skb(hdev->sent_cmd); 5025 hdev->sent_cmd = NULL; 5026 } 5027 5028 if (hdev->req_skb) { 5029 kfree_skb(hdev->req_skb); 5030 hdev->req_skb = NULL; 5031 } 5032 5033 clear_bit(HCI_RUNNING, &hdev->flags); 5034 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 5035 5036 hdev->close(hdev); 5037 hdev->flags &= BIT(HCI_RAW); 5038 } 5039 5040 done: 5041 return ret; 5042 } 5043 5044 /* This function requires the caller holds hdev->lock */ 5045 static void hci_pend_le_actions_clear(struct hci_dev *hdev) 5046 { 5047 struct hci_conn_params *p; 5048 5049 list_for_each_entry(p, &hdev->le_conn_params, list) { 5050 hci_pend_le_list_del_init(p); 5051 if (p->conn) { 5052 hci_conn_drop(p->conn); 5053 hci_conn_put(p->conn); 5054 p->conn = NULL; 5055 } 5056 } 5057 5058 BT_DBG("All LE pending actions cleared"); 5059 } 5060 5061 static int hci_dev_shutdown(struct hci_dev *hdev) 5062 { 5063 int err = 0; 5064 /* Similar to how we first do setup and then set the exclusive access 5065 * bit for userspace, we must first unset userchannel and then clean up. 5066 * Otherwise, the kernel can't properly use the hci channel to clean up 5067 * the controller (some shutdown routines require sending additional 5068 * commands to the controller for example). 5069 */ 5070 bool was_userchannel = 5071 hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); 5072 5073 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && 5074 test_bit(HCI_UP, &hdev->flags)) { 5075 /* Execute vendor specific shutdown routine */ 5076 if (hdev->shutdown) 5077 err = hdev->shutdown(hdev); 5078 } 5079 5080 if (was_userchannel) 5081 hci_dev_set_flag(hdev, HCI_USER_CHANNEL); 5082 5083 return err; 5084 } 5085 5086 int hci_dev_close_sync(struct hci_dev *hdev) 5087 { 5088 bool auto_off; 5089 int err = 0; 5090 5091 bt_dev_dbg(hdev, ""); 5092 5093 cancel_delayed_work(&hdev->power_off); 5094 cancel_delayed_work(&hdev->ncmd_timer); 5095 cancel_delayed_work(&hdev->le_scan_disable); 5096 5097 hci_request_cancel_all(hdev); 5098 5099 if (hdev->adv_instance_timeout) { 5100 cancel_delayed_work_sync(&hdev->adv_instance_expire); 5101 hdev->adv_instance_timeout = 0; 5102 } 5103 5104 err = hci_dev_shutdown(hdev); 5105 5106 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 5107 cancel_delayed_work_sync(&hdev->cmd_timer); 5108 return err; 5109 } 5110 5111 hci_leds_update_powered(hdev, false); 5112 5113 /* Flush RX and TX works */ 5114 flush_work(&hdev->tx_work); 5115 flush_work(&hdev->rx_work); 5116 5117 if (hdev->discov_timeout > 0) { 5118 hdev->discov_timeout = 0; 5119 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); 5120 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 5121 } 5122 5123 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) 5124 cancel_delayed_work(&hdev->service_cache); 5125 5126 if (hci_dev_test_flag(hdev, HCI_MGMT)) { 5127 struct adv_info *adv_instance; 5128 5129 cancel_delayed_work_sync(&hdev->rpa_expired); 5130 5131 list_for_each_entry(adv_instance, &hdev->adv_instances, list) 5132 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 5133 } 5134 5135 /* Avoid potential lockdep warnings from the *_flush() calls by 5136 * ensuring the workqueue is empty up front. 5137 */ 5138 drain_workqueue(hdev->workqueue); 5139 5140 hci_dev_lock(hdev); 5141 5142 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 5143 5144 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); 5145 5146 if (!auto_off && hdev->dev_type == HCI_PRIMARY && 5147 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 5148 hci_dev_test_flag(hdev, HCI_MGMT)) 5149 __mgmt_power_off(hdev); 5150 5151 hci_inquiry_cache_flush(hdev); 5152 hci_pend_le_actions_clear(hdev); 5153 hci_conn_hash_flush(hdev); 5154 /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ 5155 smp_unregister(hdev); 5156 hci_dev_unlock(hdev); 5157 5158 hci_sock_dev_event(hdev, HCI_DEV_DOWN); 5159 5160 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 5161 aosp_do_close(hdev); 5162 msft_do_close(hdev); 5163 } 5164 5165 if (hdev->flush) 5166 hdev->flush(hdev); 5167 5168 /* Reset device */ 5169 skb_queue_purge(&hdev->cmd_q); 5170 atomic_set(&hdev->cmd_cnt, 1); 5171 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && 5172 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 5173 set_bit(HCI_INIT, &hdev->flags); 5174 hci_reset_sync(hdev); 5175 clear_bit(HCI_INIT, &hdev->flags); 5176 } 5177 5178 /* flush cmd work */ 5179 flush_work(&hdev->cmd_work); 5180 5181 /* Drop queues */ 5182 skb_queue_purge(&hdev->rx_q); 5183 skb_queue_purge(&hdev->cmd_q); 5184 skb_queue_purge(&hdev->raw_q); 5185 5186 /* Drop last sent command */ 5187 if (hdev->sent_cmd) { 5188 cancel_delayed_work_sync(&hdev->cmd_timer); 5189 kfree_skb(hdev->sent_cmd); 5190 hdev->sent_cmd = NULL; 5191 } 5192 5193 /* Drop last request */ 5194 if (hdev->req_skb) { 5195 kfree_skb(hdev->req_skb); 5196 hdev->req_skb = NULL; 5197 } 5198 5199 clear_bit(HCI_RUNNING, &hdev->flags); 5200 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 5201 5202 /* After this point our queues are empty and no tasks are scheduled. */ 5203 hdev->close(hdev); 5204 5205 /* Clear flags */ 5206 hdev->flags &= BIT(HCI_RAW); 5207 hci_dev_clear_volatile_flags(hdev); 5208 5209 /* Controller radio is available but is currently powered down */ 5210 hdev->amp_status = AMP_STATUS_POWERED_DOWN; 5211 5212 memset(hdev->eir, 0, sizeof(hdev->eir)); 5213 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 5214 bacpy(&hdev->random_addr, BDADDR_ANY); 5215 hci_codec_list_clear(&hdev->local_codecs); 5216 5217 hci_dev_put(hdev); 5218 return err; 5219 } 5220 5221 /* This function perform power on HCI command sequence as follows: 5222 * 5223 * If controller is already up (HCI_UP) performs hci_powered_update_sync 5224 * sequence otherwise run hci_dev_open_sync which will follow with 5225 * hci_powered_update_sync after the init sequence is completed. 5226 */ 5227 static int hci_power_on_sync(struct hci_dev *hdev) 5228 { 5229 int err; 5230 5231 if (test_bit(HCI_UP, &hdev->flags) && 5232 hci_dev_test_flag(hdev, HCI_MGMT) && 5233 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 5234 cancel_delayed_work(&hdev->power_off); 5235 return hci_powered_update_sync(hdev); 5236 } 5237 5238 err = hci_dev_open_sync(hdev); 5239 if (err < 0) 5240 return err; 5241 5242 /* During the HCI setup phase, a few error conditions are 5243 * ignored and they need to be checked now. If they are still 5244 * valid, it is important to return the device back off. 5245 */ 5246 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 5247 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 5248 (hdev->dev_type == HCI_PRIMARY && 5249 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 5250 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 5251 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 5252 hci_dev_close_sync(hdev); 5253 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 5254 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 5255 HCI_AUTO_OFF_TIMEOUT); 5256 } 5257 5258 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 5259 /* For unconfigured devices, set the HCI_RAW flag 5260 * so that userspace can easily identify them. 5261 */ 5262 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 5263 set_bit(HCI_RAW, &hdev->flags); 5264 5265 /* For fully configured devices, this will send 5266 * the Index Added event. For unconfigured devices, 5267 * it will send Unconfigued Index Added event. 5268 * 5269 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 5270 * and no event will be send. 5271 */ 5272 mgmt_index_added(hdev); 5273 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 5274 /* When the controller is now configured, then it 5275 * is important to clear the HCI_RAW flag. 5276 */ 5277 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 5278 clear_bit(HCI_RAW, &hdev->flags); 5279 5280 /* Powering on the controller with HCI_CONFIG set only 5281 * happens with the transition from unconfigured to 5282 * configured. This will send the Index Added event. 5283 */ 5284 mgmt_index_added(hdev); 5285 } 5286 5287 return 0; 5288 } 5289 5290 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) 5291 { 5292 struct hci_cp_remote_name_req_cancel cp; 5293 5294 memset(&cp, 0, sizeof(cp)); 5295 bacpy(&cp.bdaddr, addr); 5296 5297 return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, 5298 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5299 } 5300 5301 int hci_stop_discovery_sync(struct hci_dev *hdev) 5302 { 5303 struct discovery_state *d = &hdev->discovery; 5304 struct inquiry_entry *e; 5305 int err; 5306 5307 bt_dev_dbg(hdev, "state %u", hdev->discovery.state); 5308 5309 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { 5310 if (test_bit(HCI_INQUIRY, &hdev->flags)) { 5311 err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 5312 0, NULL, HCI_CMD_TIMEOUT); 5313 if (err) 5314 return err; 5315 } 5316 5317 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { 5318 cancel_delayed_work(&hdev->le_scan_disable); 5319 5320 err = hci_scan_disable_sync(hdev); 5321 if (err) 5322 return err; 5323 } 5324 5325 } else { 5326 err = hci_scan_disable_sync(hdev); 5327 if (err) 5328 return err; 5329 } 5330 5331 /* Resume advertising if it was paused */ 5332 if (use_ll_privacy(hdev)) 5333 hci_resume_advertising_sync(hdev); 5334 5335 /* No further actions needed for LE-only discovery */ 5336 if (d->type == DISCOV_TYPE_LE) 5337 return 0; 5338 5339 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { 5340 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 5341 NAME_PENDING); 5342 if (!e) 5343 return 0; 5344 5345 return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); 5346 } 5347 5348 return 0; 5349 } 5350 5351 static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, 5352 u8 reason) 5353 { 5354 struct hci_cp_disconn_phy_link cp; 5355 5356 memset(&cp, 0, sizeof(cp)); 5357 cp.phy_handle = HCI_PHY_HANDLE(handle); 5358 cp.reason = reason; 5359 5360 return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, 5361 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5362 } 5363 5364 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, 5365 u8 reason) 5366 { 5367 struct hci_cp_disconnect cp; 5368 5369 if (conn->type == AMP_LINK) 5370 return hci_disconnect_phy_link_sync(hdev, conn->handle, reason); 5371 5372 if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) { 5373 /* This is a BIS connection, hci_conn_del will 5374 * do the necessary cleanup. 5375 */ 5376 hci_dev_lock(hdev); 5377 hci_conn_failed(conn, reason); 5378 hci_dev_unlock(hdev); 5379 5380 return 0; 5381 } 5382 5383 memset(&cp, 0, sizeof(cp)); 5384 cp.handle = cpu_to_le16(conn->handle); 5385 cp.reason = reason; 5386 5387 /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the 5388 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is 5389 * used when suspending or powering off, where we don't want to wait 5390 * for the peer's response. 5391 */ 5392 if (reason != HCI_ERROR_REMOTE_POWER_OFF) 5393 return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, 5394 sizeof(cp), &cp, 5395 HCI_EV_DISCONN_COMPLETE, 5396 HCI_CMD_TIMEOUT, NULL); 5397 5398 return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, 5399 HCI_CMD_TIMEOUT); 5400 } 5401 5402 static int hci_le_connect_cancel_sync(struct hci_dev *hdev, 5403 struct hci_conn *conn, u8 reason) 5404 { 5405 /* Return reason if scanning since the connection shall probably be 5406 * cleanup directly. 5407 */ 5408 if (test_bit(HCI_CONN_SCANNING, &conn->flags)) 5409 return reason; 5410 5411 if (conn->role == HCI_ROLE_SLAVE || 5412 test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) 5413 return 0; 5414 5415 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 5416 0, NULL, HCI_CMD_TIMEOUT); 5417 } 5418 5419 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, 5420 u8 reason) 5421 { 5422 if (conn->type == LE_LINK) 5423 return hci_le_connect_cancel_sync(hdev, conn, reason); 5424 5425 if (conn->type == ISO_LINK) { 5426 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 5427 * page 1857: 5428 * 5429 * If this command is issued for a CIS on the Central and the 5430 * CIS is successfully terminated before being established, 5431 * then an HCI_LE_CIS_Established event shall also be sent for 5432 * this CIS with the Status Operation Cancelled by Host (0x44). 5433 */ 5434 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) 5435 return hci_disconnect_sync(hdev, conn, reason); 5436 5437 /* CIS with no Create CIS sent have nothing to cancel */ 5438 if (bacmp(&conn->dst, BDADDR_ANY)) 5439 return HCI_ERROR_LOCAL_HOST_TERM; 5440 5441 /* There is no way to cancel a BIS without terminating the BIG 5442 * which is done later on connection cleanup. 5443 */ 5444 return 0; 5445 } 5446 5447 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 5448 return 0; 5449 5450 /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the 5451 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is 5452 * used when suspending or powering off, where we don't want to wait 5453 * for the peer's response. 5454 */ 5455 if (reason != HCI_ERROR_REMOTE_POWER_OFF) 5456 return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL, 5457 6, &conn->dst, 5458 HCI_EV_CONN_COMPLETE, 5459 HCI_CMD_TIMEOUT, NULL); 5460 5461 return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, 5462 6, &conn->dst, HCI_CMD_TIMEOUT); 5463 } 5464 5465 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, 5466 u8 reason) 5467 { 5468 struct hci_cp_reject_sync_conn_req cp; 5469 5470 memset(&cp, 0, sizeof(cp)); 5471 bacpy(&cp.bdaddr, &conn->dst); 5472 cp.reason = reason; 5473 5474 /* SCO rejection has its own limited set of 5475 * allowed error values (0x0D-0x0F). 5476 */ 5477 if (reason < 0x0d || reason > 0x0f) 5478 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; 5479 5480 return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, 5481 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5482 } 5483 5484 static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn, 5485 u8 reason) 5486 { 5487 struct hci_cp_le_reject_cis cp; 5488 5489 memset(&cp, 0, sizeof(cp)); 5490 cp.handle = cpu_to_le16(conn->handle); 5491 cp.reason = reason; 5492 5493 return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS, 5494 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5495 } 5496 5497 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, 5498 u8 reason) 5499 { 5500 struct hci_cp_reject_conn_req cp; 5501 5502 if (conn->type == ISO_LINK) 5503 return hci_le_reject_cis_sync(hdev, conn, reason); 5504 5505 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) 5506 return hci_reject_sco_sync(hdev, conn, reason); 5507 5508 memset(&cp, 0, sizeof(cp)); 5509 bacpy(&cp.bdaddr, &conn->dst); 5510 cp.reason = reason; 5511 5512 return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, 5513 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5514 } 5515 5516 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) 5517 { 5518 int err = 0; 5519 u16 handle = conn->handle; 5520 bool disconnect = false; 5521 struct hci_conn *c; 5522 5523 switch (conn->state) { 5524 case BT_CONNECTED: 5525 case BT_CONFIG: 5526 err = hci_disconnect_sync(hdev, conn, reason); 5527 break; 5528 case BT_CONNECT: 5529 err = hci_connect_cancel_sync(hdev, conn, reason); 5530 break; 5531 case BT_CONNECT2: 5532 err = hci_reject_conn_sync(hdev, conn, reason); 5533 break; 5534 case BT_OPEN: 5535 case BT_BOUND: 5536 break; 5537 default: 5538 disconnect = true; 5539 break; 5540 } 5541 5542 hci_dev_lock(hdev); 5543 5544 /* Check if the connection has been cleaned up concurrently */ 5545 c = hci_conn_hash_lookup_handle(hdev, handle); 5546 if (!c || c != conn) { 5547 err = 0; 5548 goto unlock; 5549 } 5550 5551 /* Cleanup hci_conn object if it cannot be cancelled as it 5552 * likelly means the controller and host stack are out of sync 5553 * or in case of LE it was still scanning so it can be cleanup 5554 * safely. 5555 */ 5556 if (disconnect) { 5557 conn->state = BT_CLOSED; 5558 hci_disconn_cfm(conn, reason); 5559 hci_conn_del(conn); 5560 } else { 5561 hci_conn_failed(conn, reason); 5562 } 5563 5564 unlock: 5565 hci_dev_unlock(hdev); 5566 return err; 5567 } 5568 5569 static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) 5570 { 5571 struct list_head *head = &hdev->conn_hash.list; 5572 struct hci_conn *conn; 5573 5574 rcu_read_lock(); 5575 while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { 5576 /* Make sure the connection is not freed while unlocking */ 5577 conn = hci_conn_get(conn); 5578 rcu_read_unlock(); 5579 /* Disregard possible errors since hci_conn_del shall have been 5580 * called even in case of errors had occurred since it would 5581 * then cause hci_conn_failed to be called which calls 5582 * hci_conn_del internally. 5583 */ 5584 hci_abort_conn_sync(hdev, conn, reason); 5585 hci_conn_put(conn); 5586 rcu_read_lock(); 5587 } 5588 rcu_read_unlock(); 5589 5590 return 0; 5591 } 5592 5593 /* This function perform power off HCI command sequence as follows: 5594 * 5595 * Clear Advertising 5596 * Stop Discovery 5597 * Disconnect all connections 5598 * hci_dev_close_sync 5599 */ 5600 static int hci_power_off_sync(struct hci_dev *hdev) 5601 { 5602 int err; 5603 5604 /* If controller is already down there is nothing to do */ 5605 if (!test_bit(HCI_UP, &hdev->flags)) 5606 return 0; 5607 5608 hci_dev_set_flag(hdev, HCI_POWERING_DOWN); 5609 5610 if (test_bit(HCI_ISCAN, &hdev->flags) || 5611 test_bit(HCI_PSCAN, &hdev->flags)) { 5612 err = hci_write_scan_enable_sync(hdev, 0x00); 5613 if (err) 5614 goto out; 5615 } 5616 5617 err = hci_clear_adv_sync(hdev, NULL, false); 5618 if (err) 5619 goto out; 5620 5621 err = hci_stop_discovery_sync(hdev); 5622 if (err) 5623 goto out; 5624 5625 /* Terminated due to Power Off */ 5626 err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); 5627 if (err) 5628 goto out; 5629 5630 err = hci_dev_close_sync(hdev); 5631 5632 out: 5633 hci_dev_clear_flag(hdev, HCI_POWERING_DOWN); 5634 return err; 5635 } 5636 5637 int hci_set_powered_sync(struct hci_dev *hdev, u8 val) 5638 { 5639 if (val) 5640 return hci_power_on_sync(hdev); 5641 5642 return hci_power_off_sync(hdev); 5643 } 5644 5645 static int hci_write_iac_sync(struct hci_dev *hdev) 5646 { 5647 struct hci_cp_write_current_iac_lap cp; 5648 5649 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 5650 return 0; 5651 5652 memset(&cp, 0, sizeof(cp)); 5653 5654 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { 5655 /* Limited discoverable mode */ 5656 cp.num_iac = min_t(u8, hdev->num_iac, 2); 5657 cp.iac_lap[0] = 0x00; /* LIAC */ 5658 cp.iac_lap[1] = 0x8b; 5659 cp.iac_lap[2] = 0x9e; 5660 cp.iac_lap[3] = 0x33; /* GIAC */ 5661 cp.iac_lap[4] = 0x8b; 5662 cp.iac_lap[5] = 0x9e; 5663 } else { 5664 /* General discoverable mode */ 5665 cp.num_iac = 1; 5666 cp.iac_lap[0] = 0x33; /* GIAC */ 5667 cp.iac_lap[1] = 0x8b; 5668 cp.iac_lap[2] = 0x9e; 5669 } 5670 5671 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, 5672 (cp.num_iac * 3) + 1, &cp, 5673 HCI_CMD_TIMEOUT); 5674 } 5675 5676 int hci_update_discoverable_sync(struct hci_dev *hdev) 5677 { 5678 int err = 0; 5679 5680 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 5681 err = hci_write_iac_sync(hdev); 5682 if (err) 5683 return err; 5684 5685 err = hci_update_scan_sync(hdev); 5686 if (err) 5687 return err; 5688 5689 err = hci_update_class_sync(hdev); 5690 if (err) 5691 return err; 5692 } 5693 5694 /* Advertising instances don't use the global discoverable setting, so 5695 * only update AD if advertising was enabled using Set Advertising. 5696 */ 5697 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { 5698 err = hci_update_adv_data_sync(hdev, 0x00); 5699 if (err) 5700 return err; 5701 5702 /* Discoverable mode affects the local advertising 5703 * address in limited privacy mode. 5704 */ 5705 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { 5706 if (ext_adv_capable(hdev)) 5707 err = hci_start_ext_adv_sync(hdev, 0x00); 5708 else 5709 err = hci_enable_advertising_sync(hdev); 5710 } 5711 } 5712 5713 return err; 5714 } 5715 5716 static int update_discoverable_sync(struct hci_dev *hdev, void *data) 5717 { 5718 return hci_update_discoverable_sync(hdev); 5719 } 5720 5721 int hci_update_discoverable(struct hci_dev *hdev) 5722 { 5723 /* Only queue if it would have any effect */ 5724 if (hdev_is_powered(hdev) && 5725 hci_dev_test_flag(hdev, HCI_ADVERTISING) && 5726 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && 5727 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) 5728 return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, 5729 NULL); 5730 5731 return 0; 5732 } 5733 5734 int hci_update_connectable_sync(struct hci_dev *hdev) 5735 { 5736 int err; 5737 5738 err = hci_update_scan_sync(hdev); 5739 if (err) 5740 return err; 5741 5742 /* If BR/EDR is not enabled and we disable advertising as a 5743 * by-product of disabling connectable, we need to update the 5744 * advertising flags. 5745 */ 5746 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 5747 err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); 5748 5749 /* Update the advertising parameters if necessary */ 5750 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || 5751 !list_empty(&hdev->adv_instances)) { 5752 if (ext_adv_capable(hdev)) 5753 err = hci_start_ext_adv_sync(hdev, 5754 hdev->cur_adv_instance); 5755 else 5756 err = hci_enable_advertising_sync(hdev); 5757 5758 if (err) 5759 return err; 5760 } 5761 5762 return hci_update_passive_scan_sync(hdev); 5763 } 5764 5765 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) 5766 { 5767 const u8 giac[3] = { 0x33, 0x8b, 0x9e }; 5768 const u8 liac[3] = { 0x00, 0x8b, 0x9e }; 5769 struct hci_cp_inquiry cp; 5770 5771 bt_dev_dbg(hdev, ""); 5772 5773 if (test_bit(HCI_INQUIRY, &hdev->flags)) 5774 return 0; 5775 5776 hci_dev_lock(hdev); 5777 hci_inquiry_cache_flush(hdev); 5778 hci_dev_unlock(hdev); 5779 5780 memset(&cp, 0, sizeof(cp)); 5781 5782 if (hdev->discovery.limited) 5783 memcpy(&cp.lap, liac, sizeof(cp.lap)); 5784 else 5785 memcpy(&cp.lap, giac, sizeof(cp.lap)); 5786 5787 cp.length = length; 5788 5789 return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, 5790 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5791 } 5792 5793 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) 5794 { 5795 u8 own_addr_type; 5796 /* Accept list is not used for discovery */ 5797 u8 filter_policy = 0x00; 5798 /* Default is to enable duplicates filter */ 5799 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 5800 int err; 5801 5802 bt_dev_dbg(hdev, ""); 5803 5804 /* If controller is scanning, it means the passive scanning is 5805 * running. Thus, we should temporarily stop it in order to set the 5806 * discovery scanning parameters. 5807 */ 5808 err = hci_scan_disable_sync(hdev); 5809 if (err) { 5810 bt_dev_err(hdev, "Unable to disable scanning: %d", err); 5811 return err; 5812 } 5813 5814 cancel_interleave_scan(hdev); 5815 5816 /* Pause address resolution for active scan and stop advertising if 5817 * privacy is enabled. 5818 */ 5819 err = hci_pause_addr_resolution(hdev); 5820 if (err) 5821 goto failed; 5822 5823 /* All active scans will be done with either a resolvable private 5824 * address (when privacy feature has been enabled) or non-resolvable 5825 * private address. 5826 */ 5827 err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev), 5828 &own_addr_type); 5829 if (err < 0) 5830 own_addr_type = ADDR_LE_DEV_PUBLIC; 5831 5832 if (hci_is_adv_monitoring(hdev) || 5833 (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && 5834 hdev->discovery.result_filtering)) { 5835 /* Duplicate filter should be disabled when some advertisement 5836 * monitor is activated, otherwise AdvMon can only receive one 5837 * advertisement for one peer(*) during active scanning, and 5838 * might report loss to these peers. 5839 * 5840 * If controller does strict duplicate filtering and the 5841 * discovery requires result filtering disables controller based 5842 * filtering since that can cause reports that would match the 5843 * host filter to not be reported. 5844 */ 5845 filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 5846 } 5847 5848 err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, 5849 hdev->le_scan_window_discovery, 5850 own_addr_type, filter_policy, filter_dup); 5851 if (!err) 5852 return err; 5853 5854 failed: 5855 /* Resume advertising if it was paused */ 5856 if (use_ll_privacy(hdev)) 5857 hci_resume_advertising_sync(hdev); 5858 5859 /* Resume passive scanning */ 5860 hci_update_passive_scan_sync(hdev); 5861 return err; 5862 } 5863 5864 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) 5865 { 5866 int err; 5867 5868 bt_dev_dbg(hdev, ""); 5869 5870 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); 5871 if (err) 5872 return err; 5873 5874 return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); 5875 } 5876 5877 int hci_start_discovery_sync(struct hci_dev *hdev) 5878 { 5879 unsigned long timeout; 5880 int err; 5881 5882 bt_dev_dbg(hdev, "type %u", hdev->discovery.type); 5883 5884 switch (hdev->discovery.type) { 5885 case DISCOV_TYPE_BREDR: 5886 return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); 5887 case DISCOV_TYPE_INTERLEAVED: 5888 /* When running simultaneous discovery, the LE scanning time 5889 * should occupy the whole discovery time sine BR/EDR inquiry 5890 * and LE scanning are scheduled by the controller. 5891 * 5892 * For interleaving discovery in comparison, BR/EDR inquiry 5893 * and LE scanning are done sequentially with separate 5894 * timeouts. 5895 */ 5896 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, 5897 &hdev->quirks)) { 5898 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 5899 /* During simultaneous discovery, we double LE scan 5900 * interval. We must leave some time for the controller 5901 * to do BR/EDR inquiry. 5902 */ 5903 err = hci_start_interleaved_discovery_sync(hdev); 5904 break; 5905 } 5906 5907 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); 5908 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); 5909 break; 5910 case DISCOV_TYPE_LE: 5911 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 5912 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); 5913 break; 5914 default: 5915 return -EINVAL; 5916 } 5917 5918 if (err) 5919 return err; 5920 5921 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); 5922 5923 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, 5924 timeout); 5925 return 0; 5926 } 5927 5928 static void hci_suspend_monitor_sync(struct hci_dev *hdev) 5929 { 5930 switch (hci_get_adv_monitor_offload_ext(hdev)) { 5931 case HCI_ADV_MONITOR_EXT_MSFT: 5932 msft_suspend_sync(hdev); 5933 break; 5934 default: 5935 return; 5936 } 5937 } 5938 5939 /* This function disables discovery and mark it as paused */ 5940 static int hci_pause_discovery_sync(struct hci_dev *hdev) 5941 { 5942 int old_state = hdev->discovery.state; 5943 int err; 5944 5945 /* If discovery already stopped/stopping/paused there nothing to do */ 5946 if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || 5947 hdev->discovery_paused) 5948 return 0; 5949 5950 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); 5951 err = hci_stop_discovery_sync(hdev); 5952 if (err) 5953 return err; 5954 5955 hdev->discovery_paused = true; 5956 hdev->discovery_old_state = old_state; 5957 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 5958 5959 return 0; 5960 } 5961 5962 static int hci_update_event_filter_sync(struct hci_dev *hdev) 5963 { 5964 struct bdaddr_list_with_flags *b; 5965 u8 scan = SCAN_DISABLED; 5966 bool scanning = test_bit(HCI_PSCAN, &hdev->flags); 5967 int err; 5968 5969 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 5970 return 0; 5971 5972 /* Some fake CSR controllers lock up after setting this type of 5973 * filter, so avoid sending the request altogether. 5974 */ 5975 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) 5976 return 0; 5977 5978 /* Always clear event filter when starting */ 5979 hci_clear_event_filter_sync(hdev); 5980 5981 list_for_each_entry(b, &hdev->accept_list, list) { 5982 if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) 5983 continue; 5984 5985 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); 5986 5987 err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, 5988 HCI_CONN_SETUP_ALLOW_BDADDR, 5989 &b->bdaddr, 5990 HCI_CONN_SETUP_AUTO_ON); 5991 if (err) 5992 bt_dev_dbg(hdev, "Failed to set event filter for %pMR", 5993 &b->bdaddr); 5994 else 5995 scan = SCAN_PAGE; 5996 } 5997 5998 if (scan && !scanning) 5999 hci_write_scan_enable_sync(hdev, scan); 6000 else if (!scan && scanning) 6001 hci_write_scan_enable_sync(hdev, scan); 6002 6003 return 0; 6004 } 6005 6006 /* This function disables scan (BR and LE) and mark it as paused */ 6007 static int hci_pause_scan_sync(struct hci_dev *hdev) 6008 { 6009 if (hdev->scanning_paused) 6010 return 0; 6011 6012 /* Disable page scan if enabled */ 6013 if (test_bit(HCI_PSCAN, &hdev->flags)) 6014 hci_write_scan_enable_sync(hdev, SCAN_DISABLED); 6015 6016 hci_scan_disable_sync(hdev); 6017 6018 hdev->scanning_paused = true; 6019 6020 return 0; 6021 } 6022 6023 /* This function performs the HCI suspend procedures in the follow order: 6024 * 6025 * Pause discovery (active scanning/inquiry) 6026 * Pause Directed Advertising/Advertising 6027 * Pause Scanning (passive scanning in case discovery was not active) 6028 * Disconnect all connections 6029 * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup 6030 * otherwise: 6031 * Update event mask (only set events that are allowed to wake up the host) 6032 * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) 6033 * Update passive scanning (lower duty cycle) 6034 * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE 6035 */ 6036 int hci_suspend_sync(struct hci_dev *hdev) 6037 { 6038 int err; 6039 6040 /* If marked as suspended there nothing to do */ 6041 if (hdev->suspended) 6042 return 0; 6043 6044 /* Mark device as suspended */ 6045 hdev->suspended = true; 6046 6047 /* Pause discovery if not already stopped */ 6048 hci_pause_discovery_sync(hdev); 6049 6050 /* Pause other advertisements */ 6051 hci_pause_advertising_sync(hdev); 6052 6053 /* Suspend monitor filters */ 6054 hci_suspend_monitor_sync(hdev); 6055 6056 /* Prevent disconnects from causing scanning to be re-enabled */ 6057 hci_pause_scan_sync(hdev); 6058 6059 if (hci_conn_count(hdev)) { 6060 /* Soft disconnect everything (power off) */ 6061 err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); 6062 if (err) { 6063 /* Set state to BT_RUNNING so resume doesn't notify */ 6064 hdev->suspend_state = BT_RUNNING; 6065 hci_resume_sync(hdev); 6066 return err; 6067 } 6068 6069 /* Update event mask so only the allowed event can wakeup the 6070 * host. 6071 */ 6072 hci_set_event_mask_sync(hdev); 6073 } 6074 6075 /* Only configure accept list if disconnect succeeded and wake 6076 * isn't being prevented. 6077 */ 6078 if (!hdev->wakeup || !hdev->wakeup(hdev)) { 6079 hdev->suspend_state = BT_SUSPEND_DISCONNECT; 6080 return 0; 6081 } 6082 6083 /* Unpause to take care of updating scanning params */ 6084 hdev->scanning_paused = false; 6085 6086 /* Enable event filter for paired devices */ 6087 hci_update_event_filter_sync(hdev); 6088 6089 /* Update LE passive scan if enabled */ 6090 hci_update_passive_scan_sync(hdev); 6091 6092 /* Pause scan changes again. */ 6093 hdev->scanning_paused = true; 6094 6095 hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; 6096 6097 return 0; 6098 } 6099 6100 /* This function resumes discovery */ 6101 static int hci_resume_discovery_sync(struct hci_dev *hdev) 6102 { 6103 int err; 6104 6105 /* If discovery not paused there nothing to do */ 6106 if (!hdev->discovery_paused) 6107 return 0; 6108 6109 hdev->discovery_paused = false; 6110 6111 hci_discovery_set_state(hdev, DISCOVERY_STARTING); 6112 6113 err = hci_start_discovery_sync(hdev); 6114 6115 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED : 6116 DISCOVERY_FINDING); 6117 6118 return err; 6119 } 6120 6121 static void hci_resume_monitor_sync(struct hci_dev *hdev) 6122 { 6123 switch (hci_get_adv_monitor_offload_ext(hdev)) { 6124 case HCI_ADV_MONITOR_EXT_MSFT: 6125 msft_resume_sync(hdev); 6126 break; 6127 default: 6128 return; 6129 } 6130 } 6131 6132 /* This function resume scan and reset paused flag */ 6133 static int hci_resume_scan_sync(struct hci_dev *hdev) 6134 { 6135 if (!hdev->scanning_paused) 6136 return 0; 6137 6138 hdev->scanning_paused = false; 6139 6140 hci_update_scan_sync(hdev); 6141 6142 /* Reset passive scanning to normal */ 6143 hci_update_passive_scan_sync(hdev); 6144 6145 return 0; 6146 } 6147 6148 /* This function performs the HCI suspend procedures in the follow order: 6149 * 6150 * Restore event mask 6151 * Clear event filter 6152 * Update passive scanning (normal duty cycle) 6153 * Resume Directed Advertising/Advertising 6154 * Resume discovery (active scanning/inquiry) 6155 */ 6156 int hci_resume_sync(struct hci_dev *hdev) 6157 { 6158 /* If not marked as suspended there nothing to do */ 6159 if (!hdev->suspended) 6160 return 0; 6161 6162 hdev->suspended = false; 6163 6164 /* Restore event mask */ 6165 hci_set_event_mask_sync(hdev); 6166 6167 /* Clear any event filters and restore scan state */ 6168 hci_clear_event_filter_sync(hdev); 6169 6170 /* Resume scanning */ 6171 hci_resume_scan_sync(hdev); 6172 6173 /* Resume monitor filters */ 6174 hci_resume_monitor_sync(hdev); 6175 6176 /* Resume other advertisements */ 6177 hci_resume_advertising_sync(hdev); 6178 6179 /* Resume discovery */ 6180 hci_resume_discovery_sync(hdev); 6181 6182 return 0; 6183 } 6184 6185 static bool conn_use_rpa(struct hci_conn *conn) 6186 { 6187 struct hci_dev *hdev = conn->hdev; 6188 6189 return hci_dev_test_flag(hdev, HCI_PRIVACY); 6190 } 6191 6192 static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, 6193 struct hci_conn *conn) 6194 { 6195 struct hci_cp_le_set_ext_adv_params cp; 6196 int err; 6197 bdaddr_t random_addr; 6198 u8 own_addr_type; 6199 6200 err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), 6201 &own_addr_type); 6202 if (err) 6203 return err; 6204 6205 /* Set require_privacy to false so that the remote device has a 6206 * chance of identifying us. 6207 */ 6208 err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, 6209 &own_addr_type, &random_addr); 6210 if (err) 6211 return err; 6212 6213 memset(&cp, 0, sizeof(cp)); 6214 6215 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); 6216 cp.channel_map = hdev->le_adv_channel_map; 6217 cp.tx_power = HCI_TX_POWER_INVALID; 6218 cp.primary_phy = HCI_ADV_PHY_1M; 6219 cp.secondary_phy = HCI_ADV_PHY_1M; 6220 cp.handle = 0x00; /* Use instance 0 for directed adv */ 6221 cp.own_addr_type = own_addr_type; 6222 cp.peer_addr_type = conn->dst_type; 6223 bacpy(&cp.peer_addr, &conn->dst); 6224 6225 /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for 6226 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND 6227 * does not supports advertising data when the advertising set already 6228 * contains some, the controller shall return erroc code 'Invalid 6229 * HCI Command Parameters(0x12). 6230 * So it is required to remove adv set for handle 0x00. since we use 6231 * instance 0 for directed adv. 6232 */ 6233 err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL); 6234 if (err) 6235 return err; 6236 6237 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, 6238 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6239 if (err) 6240 return err; 6241 6242 /* Check if random address need to be updated */ 6243 if (own_addr_type == ADDR_LE_DEV_RANDOM && 6244 bacmp(&random_addr, BDADDR_ANY) && 6245 bacmp(&random_addr, &hdev->random_addr)) { 6246 err = hci_set_adv_set_random_addr_sync(hdev, 0x00, 6247 &random_addr); 6248 if (err) 6249 return err; 6250 } 6251 6252 return hci_enable_ext_advertising_sync(hdev, 0x00); 6253 } 6254 6255 static int hci_le_directed_advertising_sync(struct hci_dev *hdev, 6256 struct hci_conn *conn) 6257 { 6258 struct hci_cp_le_set_adv_param cp; 6259 u8 status; 6260 u8 own_addr_type; 6261 u8 enable; 6262 6263 if (ext_adv_capable(hdev)) 6264 return hci_le_ext_directed_advertising_sync(hdev, conn); 6265 6266 /* Clear the HCI_LE_ADV bit temporarily so that the 6267 * hci_update_random_address knows that it's safe to go ahead 6268 * and write a new random address. The flag will be set back on 6269 * as soon as the SET_ADV_ENABLE HCI command completes. 6270 */ 6271 hci_dev_clear_flag(hdev, HCI_LE_ADV); 6272 6273 /* Set require_privacy to false so that the remote device has a 6274 * chance of identifying us. 6275 */ 6276 status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), 6277 &own_addr_type); 6278 if (status) 6279 return status; 6280 6281 memset(&cp, 0, sizeof(cp)); 6282 6283 /* Some controllers might reject command if intervals are not 6284 * within range for undirected advertising. 6285 * BCM20702A0 is known to be affected by this. 6286 */ 6287 cp.min_interval = cpu_to_le16(0x0020); 6288 cp.max_interval = cpu_to_le16(0x0020); 6289 6290 cp.type = LE_ADV_DIRECT_IND; 6291 cp.own_address_type = own_addr_type; 6292 cp.direct_addr_type = conn->dst_type; 6293 bacpy(&cp.direct_addr, &conn->dst); 6294 cp.channel_map = hdev->le_adv_channel_map; 6295 6296 status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, 6297 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6298 if (status) 6299 return status; 6300 6301 enable = 0x01; 6302 6303 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, 6304 sizeof(enable), &enable, HCI_CMD_TIMEOUT); 6305 } 6306 6307 static void set_ext_conn_params(struct hci_conn *conn, 6308 struct hci_cp_le_ext_conn_param *p) 6309 { 6310 struct hci_dev *hdev = conn->hdev; 6311 6312 memset(p, 0, sizeof(*p)); 6313 6314 p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); 6315 p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); 6316 p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 6317 p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 6318 p->conn_latency = cpu_to_le16(conn->le_conn_latency); 6319 p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); 6320 p->min_ce_len = cpu_to_le16(0x0000); 6321 p->max_ce_len = cpu_to_le16(0x0000); 6322 } 6323 6324 static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, 6325 struct hci_conn *conn, u8 own_addr_type) 6326 { 6327 struct hci_cp_le_ext_create_conn *cp; 6328 struct hci_cp_le_ext_conn_param *p; 6329 u8 data[sizeof(*cp) + sizeof(*p) * 3]; 6330 u32 plen; 6331 6332 cp = (void *)data; 6333 p = (void *)cp->data; 6334 6335 memset(cp, 0, sizeof(*cp)); 6336 6337 bacpy(&cp->peer_addr, &conn->dst); 6338 cp->peer_addr_type = conn->dst_type; 6339 cp->own_addr_type = own_addr_type; 6340 6341 plen = sizeof(*cp); 6342 6343 if (scan_1m(hdev)) { 6344 cp->phys |= LE_SCAN_PHY_1M; 6345 set_ext_conn_params(conn, p); 6346 6347 p++; 6348 plen += sizeof(*p); 6349 } 6350 6351 if (scan_2m(hdev)) { 6352 cp->phys |= LE_SCAN_PHY_2M; 6353 set_ext_conn_params(conn, p); 6354 6355 p++; 6356 plen += sizeof(*p); 6357 } 6358 6359 if (scan_coded(hdev)) { 6360 cp->phys |= LE_SCAN_PHY_CODED; 6361 set_ext_conn_params(conn, p); 6362 6363 plen += sizeof(*p); 6364 } 6365 6366 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, 6367 plen, data, 6368 HCI_EV_LE_ENHANCED_CONN_COMPLETE, 6369 conn->conn_timeout, NULL); 6370 } 6371 6372 static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data) 6373 { 6374 struct hci_cp_le_create_conn cp; 6375 struct hci_conn_params *params; 6376 u8 own_addr_type; 6377 int err; 6378 struct hci_conn *conn = data; 6379 6380 if (!hci_conn_valid(hdev, conn)) 6381 return -ECANCELED; 6382 6383 bt_dev_dbg(hdev, "conn %p", conn); 6384 6385 clear_bit(HCI_CONN_SCANNING, &conn->flags); 6386 conn->state = BT_CONNECT; 6387 6388 /* If requested to connect as peripheral use directed advertising */ 6389 if (conn->role == HCI_ROLE_SLAVE) { 6390 /* If we're active scanning and simultaneous roles is not 6391 * enabled simply reject the attempt. 6392 */ 6393 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && 6394 hdev->le_scan_type == LE_SCAN_ACTIVE && 6395 !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { 6396 hci_conn_del(conn); 6397 return -EBUSY; 6398 } 6399 6400 /* Pause advertising while doing directed advertising. */ 6401 hci_pause_advertising_sync(hdev); 6402 6403 err = hci_le_directed_advertising_sync(hdev, conn); 6404 goto done; 6405 } 6406 6407 /* Disable advertising if simultaneous roles is not in use. */ 6408 if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) 6409 hci_pause_advertising_sync(hdev); 6410 6411 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 6412 if (params) { 6413 conn->le_conn_min_interval = params->conn_min_interval; 6414 conn->le_conn_max_interval = params->conn_max_interval; 6415 conn->le_conn_latency = params->conn_latency; 6416 conn->le_supv_timeout = params->supervision_timeout; 6417 } else { 6418 conn->le_conn_min_interval = hdev->le_conn_min_interval; 6419 conn->le_conn_max_interval = hdev->le_conn_max_interval; 6420 conn->le_conn_latency = hdev->le_conn_latency; 6421 conn->le_supv_timeout = hdev->le_supv_timeout; 6422 } 6423 6424 /* If controller is scanning, we stop it since some controllers are 6425 * not able to scan and connect at the same time. Also set the 6426 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete 6427 * handler for scan disabling knows to set the correct discovery 6428 * state. 6429 */ 6430 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { 6431 hci_scan_disable_sync(hdev); 6432 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); 6433 } 6434 6435 /* Update random address, but set require_privacy to false so 6436 * that we never connect with an non-resolvable address. 6437 */ 6438 err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), 6439 &own_addr_type); 6440 if (err) 6441 goto done; 6442 6443 if (use_ext_conn(hdev)) { 6444 err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); 6445 goto done; 6446 } 6447 6448 memset(&cp, 0, sizeof(cp)); 6449 6450 cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); 6451 cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); 6452 6453 bacpy(&cp.peer_addr, &conn->dst); 6454 cp.peer_addr_type = conn->dst_type; 6455 cp.own_address_type = own_addr_type; 6456 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 6457 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 6458 cp.conn_latency = cpu_to_le16(conn->le_conn_latency); 6459 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); 6460 cp.min_ce_len = cpu_to_le16(0x0000); 6461 cp.max_ce_len = cpu_to_le16(0x0000); 6462 6463 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: 6464 * 6465 * If this event is unmasked and the HCI_LE_Connection_Complete event 6466 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is 6467 * sent when a new connection has been created. 6468 */ 6469 err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, 6470 sizeof(cp), &cp, 6471 use_enhanced_conn_complete(hdev) ? 6472 HCI_EV_LE_ENHANCED_CONN_COMPLETE : 6473 HCI_EV_LE_CONN_COMPLETE, 6474 conn->conn_timeout, NULL); 6475 6476 done: 6477 if (err == -ETIMEDOUT) 6478 hci_le_connect_cancel_sync(hdev, conn, 0x00); 6479 6480 /* Re-enable advertising after the connection attempt is finished. */ 6481 hci_resume_advertising_sync(hdev); 6482 return err; 6483 } 6484 6485 int hci_le_create_cis_sync(struct hci_dev *hdev) 6486 { 6487 struct { 6488 struct hci_cp_le_create_cis cp; 6489 struct hci_cis cis[0x1f]; 6490 } cmd; 6491 struct hci_conn *conn; 6492 u8 cig = BT_ISO_QOS_CIG_UNSET; 6493 6494 /* The spec allows only one pending LE Create CIS command at a time. If 6495 * the command is pending now, don't do anything. We check for pending 6496 * connections after each CIS Established event. 6497 * 6498 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 6499 * page 2566: 6500 * 6501 * If the Host issues this command before all the 6502 * HCI_LE_CIS_Established events from the previous use of the 6503 * command have been generated, the Controller shall return the 6504 * error code Command Disallowed (0x0C). 6505 * 6506 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 6507 * page 2567: 6508 * 6509 * When the Controller receives the HCI_LE_Create_CIS command, the 6510 * Controller sends the HCI_Command_Status event to the Host. An 6511 * HCI_LE_CIS_Established event will be generated for each CIS when it 6512 * is established or if it is disconnected or considered lost before 6513 * being established; until all the events are generated, the command 6514 * remains pending. 6515 */ 6516 6517 memset(&cmd, 0, sizeof(cmd)); 6518 6519 hci_dev_lock(hdev); 6520 6521 rcu_read_lock(); 6522 6523 /* Wait until previous Create CIS has completed */ 6524 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 6525 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) 6526 goto done; 6527 } 6528 6529 /* Find CIG with all CIS ready */ 6530 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 6531 struct hci_conn *link; 6532 6533 if (hci_conn_check_create_cis(conn)) 6534 continue; 6535 6536 cig = conn->iso_qos.ucast.cig; 6537 6538 list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { 6539 if (hci_conn_check_create_cis(link) > 0 && 6540 link->iso_qos.ucast.cig == cig && 6541 link->state != BT_CONNECTED) { 6542 cig = BT_ISO_QOS_CIG_UNSET; 6543 break; 6544 } 6545 } 6546 6547 if (cig != BT_ISO_QOS_CIG_UNSET) 6548 break; 6549 } 6550 6551 if (cig == BT_ISO_QOS_CIG_UNSET) 6552 goto done; 6553 6554 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 6555 struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; 6556 6557 if (hci_conn_check_create_cis(conn) || 6558 conn->iso_qos.ucast.cig != cig) 6559 continue; 6560 6561 set_bit(HCI_CONN_CREATE_CIS, &conn->flags); 6562 cis->acl_handle = cpu_to_le16(conn->parent->handle); 6563 cis->cis_handle = cpu_to_le16(conn->handle); 6564 cmd.cp.num_cis++; 6565 6566 if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis)) 6567 break; 6568 } 6569 6570 done: 6571 rcu_read_unlock(); 6572 6573 hci_dev_unlock(hdev); 6574 6575 if (!cmd.cp.num_cis) 6576 return 0; 6577 6578 /* Wait for HCI_LE_CIS_Established */ 6579 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, 6580 sizeof(cmd.cp) + sizeof(cmd.cis[0]) * 6581 cmd.cp.num_cis, &cmd, 6582 HCI_EVT_LE_CIS_ESTABLISHED, 6583 conn->conn_timeout, NULL); 6584 } 6585 6586 int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) 6587 { 6588 struct hci_cp_le_remove_cig cp; 6589 6590 memset(&cp, 0, sizeof(cp)); 6591 cp.cig_id = handle; 6592 6593 return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), 6594 &cp, HCI_CMD_TIMEOUT); 6595 } 6596 6597 int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) 6598 { 6599 struct hci_cp_le_big_term_sync cp; 6600 6601 memset(&cp, 0, sizeof(cp)); 6602 cp.handle = handle; 6603 6604 return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, 6605 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6606 } 6607 6608 int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) 6609 { 6610 struct hci_cp_le_pa_term_sync cp; 6611 6612 memset(&cp, 0, sizeof(cp)); 6613 cp.handle = cpu_to_le16(handle); 6614 6615 return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, 6616 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6617 } 6618 6619 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, 6620 bool use_rpa, struct adv_info *adv_instance, 6621 u8 *own_addr_type, bdaddr_t *rand_addr) 6622 { 6623 int err; 6624 6625 bacpy(rand_addr, BDADDR_ANY); 6626 6627 /* If privacy is enabled use a resolvable private address. If 6628 * current RPA has expired then generate a new one. 6629 */ 6630 if (use_rpa) { 6631 /* If Controller supports LL Privacy use own address type is 6632 * 0x03 6633 */ 6634 if (use_ll_privacy(hdev)) 6635 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; 6636 else 6637 *own_addr_type = ADDR_LE_DEV_RANDOM; 6638 6639 if (adv_instance) { 6640 if (adv_rpa_valid(adv_instance)) 6641 return 0; 6642 } else { 6643 if (rpa_valid(hdev)) 6644 return 0; 6645 } 6646 6647 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); 6648 if (err < 0) { 6649 bt_dev_err(hdev, "failed to generate new RPA"); 6650 return err; 6651 } 6652 6653 bacpy(rand_addr, &hdev->rpa); 6654 6655 return 0; 6656 } 6657 6658 /* In case of required privacy without resolvable private address, 6659 * use an non-resolvable private address. This is useful for 6660 * non-connectable advertising. 6661 */ 6662 if (require_privacy) { 6663 bdaddr_t nrpa; 6664 6665 while (true) { 6666 /* The non-resolvable private address is generated 6667 * from random six bytes with the two most significant 6668 * bits cleared. 6669 */ 6670 get_random_bytes(&nrpa, 6); 6671 nrpa.b[5] &= 0x3f; 6672 6673 /* The non-resolvable private address shall not be 6674 * equal to the public address. 6675 */ 6676 if (bacmp(&hdev->bdaddr, &nrpa)) 6677 break; 6678 } 6679 6680 *own_addr_type = ADDR_LE_DEV_RANDOM; 6681 bacpy(rand_addr, &nrpa); 6682 6683 return 0; 6684 } 6685 6686 /* No privacy so use a public address. */ 6687 *own_addr_type = ADDR_LE_DEV_PUBLIC; 6688 6689 return 0; 6690 } 6691 6692 static int _update_adv_data_sync(struct hci_dev *hdev, void *data) 6693 { 6694 u8 instance = PTR_UINT(data); 6695 6696 return hci_update_adv_data_sync(hdev, instance); 6697 } 6698 6699 int hci_update_adv_data(struct hci_dev *hdev, u8 instance) 6700 { 6701 return hci_cmd_sync_queue(hdev, _update_adv_data_sync, 6702 UINT_PTR(instance), NULL); 6703 } 6704 6705 static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data) 6706 { 6707 struct hci_conn *conn = data; 6708 struct inquiry_entry *ie; 6709 struct hci_cp_create_conn cp; 6710 int err; 6711 6712 if (!hci_conn_valid(hdev, conn)) 6713 return -ECANCELED; 6714 6715 /* Many controllers disallow HCI Create Connection while it is doing 6716 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create 6717 * Connection. This may cause the MGMT discovering state to become false 6718 * without user space's request but it is okay since the MGMT Discovery 6719 * APIs do not promise that discovery should be done forever. Instead, 6720 * the user space monitors the status of MGMT discovering and it may 6721 * request for discovery again when this flag becomes false. 6722 */ 6723 if (test_bit(HCI_INQUIRY, &hdev->flags)) { 6724 err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, 6725 NULL, HCI_CMD_TIMEOUT); 6726 if (err) 6727 bt_dev_warn(hdev, "Failed to cancel inquiry %d", err); 6728 } 6729 6730 conn->state = BT_CONNECT; 6731 conn->out = true; 6732 conn->role = HCI_ROLE_MASTER; 6733 6734 conn->attempt++; 6735 6736 conn->link_policy = hdev->link_policy; 6737 6738 memset(&cp, 0, sizeof(cp)); 6739 bacpy(&cp.bdaddr, &conn->dst); 6740 cp.pscan_rep_mode = 0x02; 6741 6742 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 6743 if (ie) { 6744 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { 6745 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 6746 cp.pscan_mode = ie->data.pscan_mode; 6747 cp.clock_offset = ie->data.clock_offset | 6748 cpu_to_le16(0x8000); 6749 } 6750 6751 memcpy(conn->dev_class, ie->data.dev_class, 3); 6752 } 6753 6754 cp.pkt_type = cpu_to_le16(conn->pkt_type); 6755 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) 6756 cp.role_switch = 0x01; 6757 else 6758 cp.role_switch = 0x00; 6759 6760 return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN, 6761 sizeof(cp), &cp, 6762 HCI_EV_CONN_COMPLETE, 6763 conn->conn_timeout, NULL); 6764 } 6765 6766 int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn) 6767 { 6768 return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn, 6769 NULL); 6770 } 6771 6772 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) 6773 { 6774 struct hci_conn *conn = data; 6775 6776 bt_dev_dbg(hdev, "err %d", err); 6777 6778 if (err == -ECANCELED) 6779 return; 6780 6781 hci_dev_lock(hdev); 6782 6783 if (!hci_conn_valid(hdev, conn)) 6784 goto done; 6785 6786 if (!err) { 6787 hci_connect_le_scan_cleanup(conn, 0x00); 6788 goto done; 6789 } 6790 6791 /* Check if connection is still pending */ 6792 if (conn != hci_lookup_le_connect(hdev)) 6793 goto done; 6794 6795 /* Flush to make sure we send create conn cancel command if needed */ 6796 flush_delayed_work(&conn->le_conn_timeout); 6797 hci_conn_failed(conn, bt_status(err)); 6798 6799 done: 6800 hci_dev_unlock(hdev); 6801 } 6802 6803 int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn) 6804 { 6805 return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn, 6806 create_le_conn_complete); 6807 } 6808 6809 int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) 6810 { 6811 if (conn->state != BT_OPEN) 6812 return -EINVAL; 6813 6814 switch (conn->type) { 6815 case ACL_LINK: 6816 return !hci_cmd_sync_dequeue_once(hdev, 6817 hci_acl_create_conn_sync, 6818 conn, NULL); 6819 case LE_LINK: 6820 return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync, 6821 conn, create_le_conn_complete); 6822 } 6823 6824 return -ENOENT; 6825 } 6826