1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 Copyright 2023-2024 NXP 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 #ifndef __HCI_CORE_H 27 #define __HCI_CORE_H 28 29 #include <linux/idr.h> 30 #include <linux/leds.h> 31 #include <linux/rculist.h> 32 33 #include <net/bluetooth/hci.h> 34 #include <net/bluetooth/hci_drv.h> 35 #include <net/bluetooth/hci_sync.h> 36 #include <net/bluetooth/hci_sock.h> 37 #include <net/bluetooth/coredump.h> 38 39 /* HCI priority */ 40 #define HCI_PRIO_MAX 7 41 42 /* HCI maximum id value */ 43 #define HCI_MAX_ID 10000 44 45 /* HCI Core structures */ 46 struct inquiry_data { 47 bdaddr_t bdaddr; 48 __u8 pscan_rep_mode; 49 __u8 pscan_period_mode; 50 __u8 pscan_mode; 51 __u8 dev_class[3]; 52 __le16 clock_offset; 53 __s8 rssi; 54 __u8 ssp_mode; 55 }; 56 57 struct inquiry_entry { 58 struct list_head all; /* inq_cache.all */ 59 struct list_head list; /* unknown or resolve */ 60 enum { 61 NAME_NOT_KNOWN, 62 NAME_NEEDED, 63 NAME_PENDING, 64 NAME_KNOWN, 65 } name_state; 66 __u32 timestamp; 67 struct inquiry_data data; 68 }; 69 70 struct discovery_state { 71 int type; 72 enum { 73 DISCOVERY_STOPPED, 74 DISCOVERY_STARTING, 75 DISCOVERY_FINDING, 76 DISCOVERY_RESOLVING, 77 DISCOVERY_STOPPING, 78 } state; 79 struct list_head all; /* All devices found during inquiry */ 80 struct list_head unknown; /* Name state not known */ 81 struct list_head resolve; /* Name needs to be resolved */ 82 __u32 timestamp; 83 bdaddr_t last_adv_addr; 84 u8 last_adv_addr_type; 85 s8 last_adv_rssi; 86 u32 last_adv_flags; 87 u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH]; 88 u8 last_adv_data_len; 89 bool report_invalid_rssi; 90 bool result_filtering; 91 bool limited; 92 s8 rssi; 93 u16 uuid_count; 94 u8 (*uuids)[16]; 95 unsigned long name_resolve_timeout; 96 }; 97 98 #define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ 99 100 enum suspend_tasks { 101 SUSPEND_PAUSE_DISCOVERY, 102 SUSPEND_UNPAUSE_DISCOVERY, 103 104 SUSPEND_PAUSE_ADVERTISING, 105 SUSPEND_UNPAUSE_ADVERTISING, 106 107 SUSPEND_SCAN_DISABLE, 108 SUSPEND_SCAN_ENABLE, 109 SUSPEND_DISCONNECTING, 110 111 SUSPEND_POWERING_DOWN, 112 113 SUSPEND_PREPARE_NOTIFIER, 114 115 SUSPEND_SET_ADV_FILTER, 116 __SUSPEND_NUM_TASKS 117 }; 118 119 enum suspended_state { 120 BT_RUNNING = 0, 121 BT_SUSPEND_DISCONNECT, 122 BT_SUSPEND_CONFIGURE_WAKE, 123 }; 124 125 struct hci_conn_hash { 126 struct list_head list; 127 unsigned int acl_num; 128 unsigned int sco_num; 129 unsigned int iso_num; 130 unsigned int le_num; 131 unsigned int le_num_peripheral; 132 }; 133 134 struct bdaddr_list { 135 struct list_head list; 136 bdaddr_t bdaddr; 137 u8 bdaddr_type; 138 }; 139 140 struct codec_list { 141 struct list_head list; 142 u8 id; 143 __u16 cid; 144 __u16 vid; 145 u8 transport; 146 u8 num_caps; 147 u32 len; 148 struct hci_codec_caps caps[]; 149 }; 150 151 struct bdaddr_list_with_irk { 152 struct list_head list; 153 bdaddr_t bdaddr; 154 u8 bdaddr_type; 155 u8 peer_irk[16]; 156 u8 local_irk[16]; 157 }; 158 159 /* Bitmask of connection flags */ 160 enum hci_conn_flags { 161 HCI_CONN_FLAG_REMOTE_WAKEUP = BIT(0), 162 HCI_CONN_FLAG_DEVICE_PRIVACY = BIT(1), 163 HCI_CONN_FLAG_ADDRESS_RESOLUTION = BIT(2), 164 }; 165 typedef u8 hci_conn_flags_t; 166 167 struct bdaddr_list_with_flags { 168 struct list_head list; 169 bdaddr_t bdaddr; 170 u8 bdaddr_type; 171 hci_conn_flags_t flags; 172 }; 173 174 struct bt_uuid { 175 struct list_head list; 176 u8 uuid[16]; 177 u8 size; 178 u8 svc_hint; 179 }; 180 181 struct blocked_key { 182 struct list_head list; 183 struct rcu_head rcu; 184 u8 type; 185 u8 val[16]; 186 }; 187 188 struct smp_csrk { 189 bdaddr_t bdaddr; 190 u8 bdaddr_type; 191 u8 type; 192 u8 val[16]; 193 }; 194 195 struct smp_ltk { 196 struct list_head list; 197 struct rcu_head rcu; 198 bdaddr_t bdaddr; 199 u8 bdaddr_type; 200 u8 authenticated; 201 u8 type; 202 u8 enc_size; 203 __le16 ediv; 204 __le64 rand; 205 u8 val[16]; 206 }; 207 208 struct smp_irk { 209 struct list_head list; 210 struct rcu_head rcu; 211 bdaddr_t rpa; 212 bdaddr_t bdaddr; 213 u8 addr_type; 214 u8 val[16]; 215 }; 216 217 struct link_key { 218 struct list_head list; 219 struct rcu_head rcu; 220 bdaddr_t bdaddr; 221 u8 type; 222 u8 val[HCI_LINK_KEY_SIZE]; 223 u8 pin_len; 224 }; 225 226 struct oob_data { 227 struct list_head list; 228 bdaddr_t bdaddr; 229 u8 bdaddr_type; 230 u8 present; 231 u8 hash192[16]; 232 u8 rand192[16]; 233 u8 hash256[16]; 234 u8 rand256[16]; 235 }; 236 237 struct adv_info { 238 struct list_head list; 239 bool enabled; 240 bool pending; 241 bool periodic; 242 __u8 mesh; 243 __u8 instance; 244 __u8 handle; 245 __u8 sid; 246 __u32 flags; 247 __u16 timeout; 248 __u16 remaining_time; 249 __u16 duration; 250 __u16 adv_data_len; 251 __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; 252 bool adv_data_changed; 253 __u16 scan_rsp_len; 254 __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; 255 bool scan_rsp_changed; 256 __u16 per_adv_data_len; 257 __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; 258 __s8 tx_power; 259 __u32 min_interval; 260 __u32 max_interval; 261 bdaddr_t random_addr; 262 bool rpa_expired; 263 struct delayed_work rpa_expired_cb; 264 }; 265 266 struct tx_queue { 267 struct sk_buff_head queue; 268 unsigned int extra; 269 unsigned int tracked; 270 }; 271 272 #define HCI_MAX_ADV_INSTANCES 5 273 #define HCI_DEFAULT_ADV_DURATION 2 274 275 #define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F 276 277 #define DATA_CMP(_d1, _l1, _d2, _l2) \ 278 (_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2) 279 280 #define ADV_DATA_CMP(_adv, _data, _len) \ 281 DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len) 282 283 #define SCAN_RSP_CMP(_adv, _data, _len) \ 284 DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len) 285 286 struct monitored_device { 287 struct list_head list; 288 289 bdaddr_t bdaddr; 290 __u8 addr_type; 291 __u16 handle; 292 bool notified; 293 }; 294 295 struct adv_pattern { 296 struct list_head list; 297 __u8 ad_type; 298 __u8 offset; 299 __u8 length; 300 __u8 value[HCI_MAX_EXT_AD_LENGTH]; 301 }; 302 303 struct adv_rssi_thresholds { 304 __s8 low_threshold; 305 __s8 high_threshold; 306 __u16 low_threshold_timeout; 307 __u16 high_threshold_timeout; 308 __u8 sampling_period; 309 }; 310 311 struct adv_monitor { 312 struct list_head patterns; 313 struct adv_rssi_thresholds rssi; 314 __u16 handle; 315 316 enum { 317 ADV_MONITOR_STATE_NOT_REGISTERED, 318 ADV_MONITOR_STATE_REGISTERED, 319 ADV_MONITOR_STATE_OFFLOADED 320 } state; 321 }; 322 323 #define HCI_MIN_ADV_MONITOR_HANDLE 1 324 #define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 325 #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 326 #define HCI_ADV_MONITOR_EXT_NONE 1 327 #define HCI_ADV_MONITOR_EXT_MSFT 2 328 329 #define HCI_MAX_SHORT_NAME_LENGTH 10 330 331 #define HCI_CONN_HANDLE_MAX 0x0eff 332 #define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX) 333 334 /* Min encryption key size to match with SMP */ 335 #define HCI_MIN_ENC_KEY_SIZE 7 336 337 /* Default LE RPA expiry time, 15 minutes */ 338 #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) 339 340 /* Default min/max age of connection information (1s/3s) */ 341 #define DEFAULT_CONN_INFO_MIN_AGE 1000 342 #define DEFAULT_CONN_INFO_MAX_AGE 3000 343 /* Default authenticated payload timeout 30s */ 344 #define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8 345 346 #define HCI_MAX_PAGES 3 347 348 struct hci_dev { 349 struct list_head list; 350 struct mutex lock; 351 352 struct ida unset_handle_ida; 353 354 const char *name; 355 unsigned long flags; 356 __u16 id; 357 __u8 bus; 358 bdaddr_t bdaddr; 359 bdaddr_t setup_addr; 360 bdaddr_t public_addr; 361 bdaddr_t random_addr; 362 bdaddr_t static_addr; 363 __u8 adv_addr_type; 364 __u8 dev_name[HCI_MAX_NAME_LENGTH]; 365 __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH]; 366 __u8 eir[HCI_MAX_EIR_LENGTH]; 367 __u16 appearance; 368 __u8 dev_class[3]; 369 __u8 major_class; 370 __u8 minor_class; 371 __u8 max_page; 372 __u8 features[HCI_MAX_PAGES][8]; 373 __u8 le_features[8]; 374 __u8 le_accept_list_size; 375 __u8 le_resolv_list_size; 376 __u8 le_num_of_adv_sets; 377 __u8 le_states[8]; 378 __u8 mesh_ad_types[16]; 379 __u8 mesh_send_ref; 380 __u8 commands[64]; 381 __u8 hci_ver; 382 __u16 hci_rev; 383 __u8 lmp_ver; 384 __u16 manufacturer; 385 __u16 lmp_subver; 386 __u16 voice_setting; 387 __u8 num_iac; 388 __u16 stored_max_keys; 389 __u16 stored_num_keys; 390 __u8 io_capability; 391 __s8 inq_tx_power; 392 __u8 err_data_reporting; 393 __u16 page_scan_interval; 394 __u16 page_scan_window; 395 __u8 page_scan_type; 396 __u8 le_adv_channel_map; 397 __u16 le_adv_min_interval; 398 __u16 le_adv_max_interval; 399 __u8 le_scan_type; 400 __u16 le_scan_interval; 401 __u16 le_scan_window; 402 __u16 le_scan_int_suspend; 403 __u16 le_scan_window_suspend; 404 __u16 le_scan_int_discovery; 405 __u16 le_scan_window_discovery; 406 __u16 le_scan_int_adv_monitor; 407 __u16 le_scan_window_adv_monitor; 408 __u16 le_scan_int_connect; 409 __u16 le_scan_window_connect; 410 __u16 le_conn_min_interval; 411 __u16 le_conn_max_interval; 412 __u16 le_conn_latency; 413 __u16 le_supv_timeout; 414 __u16 le_def_tx_len; 415 __u16 le_def_tx_time; 416 __u16 le_max_tx_len; 417 __u16 le_max_tx_time; 418 __u16 le_max_rx_len; 419 __u16 le_max_rx_time; 420 __u8 le_max_key_size; 421 __u8 le_min_key_size; 422 __u16 discov_interleaved_timeout; 423 __u16 conn_info_min_age; 424 __u16 conn_info_max_age; 425 __u16 auth_payload_timeout; 426 __u8 min_enc_key_size; 427 __u8 max_enc_key_size; 428 __u8 pairing_opts; 429 __u8 ssp_debug_mode; 430 __u8 hw_error_code; 431 __u32 clock; 432 __u16 advmon_allowlist_duration; 433 __u16 advmon_no_filter_duration; 434 __u8 enable_advmon_interleave_scan; 435 436 __u16 devid_source; 437 __u16 devid_vendor; 438 __u16 devid_product; 439 __u16 devid_version; 440 441 __u8 def_page_scan_type; 442 __u16 def_page_scan_int; 443 __u16 def_page_scan_window; 444 __u8 def_inq_scan_type; 445 __u16 def_inq_scan_int; 446 __u16 def_inq_scan_window; 447 __u16 def_br_lsto; 448 __u16 def_page_timeout; 449 __u16 def_multi_adv_rotation_duration; 450 __u16 def_le_autoconnect_timeout; 451 __s8 min_le_tx_power; 452 __s8 max_le_tx_power; 453 454 __u16 pkt_type; 455 __u16 esco_type; 456 __u16 link_policy; 457 __u16 link_mode; 458 459 __u32 idle_timeout; 460 __u16 sniff_min_interval; 461 __u16 sniff_max_interval; 462 463 unsigned int auto_accept_delay; 464 465 unsigned long quirks; 466 467 atomic_t cmd_cnt; 468 unsigned int acl_cnt; 469 unsigned int sco_cnt; 470 unsigned int le_cnt; 471 unsigned int iso_cnt; 472 473 unsigned int acl_mtu; 474 unsigned int sco_mtu; 475 unsigned int le_mtu; 476 unsigned int iso_mtu; 477 unsigned int acl_pkts; 478 unsigned int sco_pkts; 479 unsigned int le_pkts; 480 unsigned int iso_pkts; 481 482 unsigned long acl_last_tx; 483 unsigned long le_last_tx; 484 485 __u8 le_tx_def_phys; 486 __u8 le_rx_def_phys; 487 488 struct workqueue_struct *workqueue; 489 struct workqueue_struct *req_workqueue; 490 491 struct work_struct power_on; 492 struct delayed_work power_off; 493 struct work_struct error_reset; 494 struct work_struct cmd_sync_work; 495 struct list_head cmd_sync_work_list; 496 struct mutex cmd_sync_work_lock; 497 struct mutex unregister_lock; 498 struct work_struct cmd_sync_cancel_work; 499 struct work_struct reenable_adv_work; 500 501 __u16 discov_timeout; 502 struct delayed_work discov_off; 503 504 struct delayed_work service_cache; 505 506 struct delayed_work cmd_timer; 507 struct delayed_work ncmd_timer; 508 509 struct work_struct rx_work; 510 struct work_struct cmd_work; 511 struct work_struct tx_work; 512 513 struct delayed_work le_scan_disable; 514 515 struct sk_buff_head rx_q; 516 struct sk_buff_head raw_q; 517 struct sk_buff_head cmd_q; 518 519 struct sk_buff *sent_cmd; 520 struct sk_buff *recv_event; 521 522 struct mutex req_lock; 523 wait_queue_head_t req_wait_q; 524 __u32 req_status; 525 __u32 req_result; 526 struct sk_buff *req_skb; 527 struct sk_buff *req_rsp; 528 529 void *smp_data; 530 void *smp_bredr_data; 531 532 struct discovery_state discovery; 533 534 bool discovery_paused; 535 int advertising_old_state; 536 bool advertising_paused; 537 538 struct notifier_block suspend_notifier; 539 enum suspended_state suspend_state_next; 540 enum suspended_state suspend_state; 541 bool scanning_paused; 542 bool suspended; 543 u8 wake_reason; 544 bdaddr_t wake_addr; 545 u8 wake_addr_type; 546 547 struct hci_conn_hash conn_hash; 548 549 struct list_head mesh_pending; 550 struct mutex mgmt_pending_lock; 551 struct list_head mgmt_pending; 552 struct list_head reject_list; 553 struct list_head accept_list; 554 struct list_head uuids; 555 struct list_head link_keys; 556 struct list_head long_term_keys; 557 struct list_head identity_resolving_keys; 558 struct list_head remote_oob_data; 559 struct list_head le_accept_list; 560 struct list_head le_resolv_list; 561 struct list_head le_conn_params; 562 struct list_head pend_le_conns; 563 struct list_head pend_le_reports; 564 struct list_head blocked_keys; 565 struct list_head local_codecs; 566 567 struct hci_dev_stats stat; 568 569 atomic_t promisc; 570 571 const char *hw_info; 572 const char *fw_info; 573 struct dentry *debugfs; 574 575 struct hci_devcoredump dump; 576 577 struct device dev; 578 579 struct rfkill *rfkill; 580 581 DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); 582 hci_conn_flags_t conn_flags; 583 584 __s8 adv_tx_power; 585 __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; 586 __u8 adv_data_len; 587 __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; 588 __u8 scan_rsp_data_len; 589 __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; 590 __u8 per_adv_data_len; 591 592 struct list_head adv_instances; 593 unsigned int adv_instance_cnt; 594 __u8 cur_adv_instance; 595 __u16 adv_instance_timeout; 596 struct delayed_work adv_instance_expire; 597 598 struct idr adv_monitors_idr; 599 unsigned int adv_monitors_cnt; 600 601 __u8 irk[16]; 602 __u32 rpa_timeout; 603 struct delayed_work rpa_expired; 604 bdaddr_t rpa; 605 606 struct delayed_work mesh_send_done; 607 608 enum { 609 INTERLEAVE_SCAN_NONE, 610 INTERLEAVE_SCAN_NO_FILTER, 611 INTERLEAVE_SCAN_ALLOWLIST 612 } interleave_scan_state; 613 614 struct delayed_work interleave_scan; 615 616 struct list_head monitored_devices; 617 bool advmon_pend_notify; 618 619 struct hci_drv *hci_drv; 620 621 #if IS_ENABLED(CONFIG_BT_LEDS) 622 struct led_trigger *power_led; 623 #endif 624 625 #if IS_ENABLED(CONFIG_BT_MSFTEXT) 626 __u16 msft_opcode; 627 void *msft_data; 628 bool msft_curve_validity; 629 #endif 630 631 #if IS_ENABLED(CONFIG_BT_AOSPEXT) 632 bool aosp_capable; 633 bool aosp_quality_report; 634 #endif 635 636 int (*open)(struct hci_dev *hdev); 637 int (*close)(struct hci_dev *hdev); 638 int (*flush)(struct hci_dev *hdev); 639 int (*setup)(struct hci_dev *hdev); 640 int (*shutdown)(struct hci_dev *hdev); 641 int (*send)(struct hci_dev *hdev, struct sk_buff *skb); 642 void (*notify)(struct hci_dev *hdev, unsigned int evt); 643 void (*hw_error)(struct hci_dev *hdev, u8 code); 644 int (*post_init)(struct hci_dev *hdev); 645 int (*set_diag)(struct hci_dev *hdev, bool enable); 646 int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); 647 void (*reset)(struct hci_dev *hdev); 648 bool (*wakeup)(struct hci_dev *hdev); 649 int (*set_quality_report)(struct hci_dev *hdev, bool enable); 650 int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path); 651 int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, 652 struct bt_codec *codec, __u8 *vnd_len, 653 __u8 **vnd_data); 654 u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb); 655 }; 656 657 #define HCI_PHY_HANDLE(handle) (handle & 0xff) 658 659 enum conn_reasons { 660 CONN_REASON_PAIR_DEVICE, 661 CONN_REASON_L2CAP_CHAN, 662 CONN_REASON_SCO_CONNECT, 663 CONN_REASON_ISO_CONNECT, 664 }; 665 666 struct hci_conn { 667 struct list_head list; 668 669 atomic_t refcnt; 670 671 bdaddr_t dst; 672 __u8 dst_type; 673 bdaddr_t src; 674 __u8 src_type; 675 bdaddr_t init_addr; 676 __u8 init_addr_type; 677 bdaddr_t resp_addr; 678 __u8 resp_addr_type; 679 __u8 adv_instance; 680 __u16 handle; 681 __u16 sync_handle; 682 __u8 sid; 683 __u16 state; 684 __u16 mtu; 685 __u8 mode; 686 __u8 type; 687 __u8 role; 688 bool out; 689 __u8 attempt; 690 __u8 dev_class[3]; 691 __u8 features[HCI_MAX_PAGES][8]; 692 __u16 pkt_type; 693 __u16 link_policy; 694 __u8 key_type; 695 __u8 auth_type; 696 __u8 sec_level; 697 __u8 pending_sec_level; 698 __u8 pin_length; 699 __u8 enc_key_size; 700 __u8 io_capability; 701 __u32 passkey_notify; 702 __u8 passkey_entered; 703 __u16 disc_timeout; 704 __u16 conn_timeout; 705 __u16 setting; 706 __u16 auth_payload_timeout; 707 __u16 le_conn_min_interval; 708 __u16 le_conn_max_interval; 709 __u16 le_conn_interval; 710 __u16 le_conn_latency; 711 __u16 le_supv_timeout; 712 __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH]; 713 __u8 le_adv_data_len; 714 __u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN]; 715 __u16 le_per_adv_data_len; 716 __u16 le_per_adv_data_offset; 717 __u8 le_adv_phy; 718 __u8 le_adv_sec_phy; 719 __u8 le_tx_phy; 720 __u8 le_rx_phy; 721 __s8 rssi; 722 __s8 tx_power; 723 __s8 max_tx_power; 724 struct bt_iso_qos iso_qos; 725 __u8 num_bis; 726 __u8 bis[HCI_MAX_ISO_BIS]; 727 728 unsigned long flags; 729 730 enum conn_reasons conn_reason; 731 __u8 abort_reason; 732 733 __u32 clock; 734 __u16 clock_accuracy; 735 736 unsigned long conn_info_timestamp; 737 738 __u8 remote_cap; 739 __u8 remote_auth; 740 __u8 remote_id; 741 742 unsigned int sent; 743 744 struct sk_buff_head data_q; 745 struct list_head chan_list; 746 747 struct tx_queue tx_q; 748 749 struct delayed_work disc_work; 750 struct delayed_work auto_accept_work; 751 struct delayed_work idle_work; 752 struct delayed_work le_conn_timeout; 753 754 struct device dev; 755 struct dentry *debugfs; 756 757 struct hci_dev *hdev; 758 void *l2cap_data; 759 void *sco_data; 760 void *iso_data; 761 762 struct list_head link_list; 763 struct hci_conn *parent; 764 struct hci_link *link; 765 766 struct bt_codec codec; 767 768 void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); 769 void (*security_cfm_cb) (struct hci_conn *conn, u8 status); 770 void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); 771 772 void (*cleanup)(struct hci_conn *conn); 773 }; 774 775 struct hci_link { 776 struct list_head list; 777 struct hci_conn *conn; 778 }; 779 780 struct hci_chan { 781 struct list_head list; 782 __u16 handle; 783 struct hci_conn *conn; 784 struct sk_buff_head data_q; 785 unsigned int sent; 786 __u8 state; 787 }; 788 789 struct hci_conn_params { 790 struct list_head list; 791 struct list_head action; 792 793 bdaddr_t addr; 794 u8 addr_type; 795 796 u16 conn_min_interval; 797 u16 conn_max_interval; 798 u16 conn_latency; 799 u16 supervision_timeout; 800 801 enum { 802 HCI_AUTO_CONN_DISABLED, 803 HCI_AUTO_CONN_REPORT, 804 HCI_AUTO_CONN_DIRECT, 805 HCI_AUTO_CONN_ALWAYS, 806 HCI_AUTO_CONN_LINK_LOSS, 807 HCI_AUTO_CONN_EXPLICIT, 808 } auto_connect; 809 810 struct hci_conn *conn; 811 bool explicit_connect; 812 /* Accessed without hdev->lock: */ 813 hci_conn_flags_t flags; 814 u8 privacy_mode; 815 }; 816 817 extern struct list_head hci_dev_list; 818 extern struct list_head hci_cb_list; 819 extern rwlock_t hci_dev_list_lock; 820 extern struct mutex hci_cb_list_lock; 821 822 #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) 823 #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) 824 #define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags) 825 #define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags) 826 #define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags) 827 #define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags) 828 #define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags) 829 830 #define hci_dev_clear_volatile_flags(hdev) \ 831 do { \ 832 hci_dev_clear_flag(hdev, HCI_LE_SCAN); \ 833 hci_dev_clear_flag(hdev, HCI_LE_ADV); \ 834 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\ 835 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ 836 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ 837 } while (0) 838 839 #define hci_dev_le_state_simultaneous(hdev) \ 840 (!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \ 841 (hdev->le_states[4] & 0x08) && /* Central */ \ 842 (hdev->le_states[4] & 0x40) && /* Peripheral */ \ 843 (hdev->le_states[3] & 0x10)) /* Simultaneous */ 844 845 /* ----- HCI interface to upper protocols ----- */ 846 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); 847 int l2cap_disconn_ind(struct hci_conn *hcon); 848 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); 849 850 #if IS_ENABLED(CONFIG_BT_BREDR) 851 int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); 852 void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); 853 #else 854 static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, 855 __u8 *flags) 856 { 857 return 0; 858 } 859 860 static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) 861 { 862 } 863 #endif 864 865 #if IS_ENABLED(CONFIG_BT_LE) 866 int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); 867 void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); 868 #else 869 static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, 870 __u8 *flags) 871 { 872 return 0; 873 } 874 static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, 875 u16 flags) 876 { 877 } 878 #endif 879 880 /* ----- Inquiry cache ----- */ 881 #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ 882 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ 883 884 static inline void discovery_init(struct hci_dev *hdev) 885 { 886 hdev->discovery.state = DISCOVERY_STOPPED; 887 INIT_LIST_HEAD(&hdev->discovery.all); 888 INIT_LIST_HEAD(&hdev->discovery.unknown); 889 INIT_LIST_HEAD(&hdev->discovery.resolve); 890 hdev->discovery.report_invalid_rssi = true; 891 hdev->discovery.rssi = HCI_RSSI_INVALID; 892 } 893 894 static inline void hci_discovery_filter_clear(struct hci_dev *hdev) 895 { 896 hdev->discovery.result_filtering = false; 897 hdev->discovery.report_invalid_rssi = true; 898 hdev->discovery.rssi = HCI_RSSI_INVALID; 899 hdev->discovery.uuid_count = 0; 900 kfree(hdev->discovery.uuids); 901 hdev->discovery.uuids = NULL; 902 } 903 904 bool hci_discovery_active(struct hci_dev *hdev); 905 906 void hci_discovery_set_state(struct hci_dev *hdev, int state); 907 908 static inline int inquiry_cache_empty(struct hci_dev *hdev) 909 { 910 return list_empty(&hdev->discovery.all); 911 } 912 913 static inline long inquiry_cache_age(struct hci_dev *hdev) 914 { 915 struct discovery_state *c = &hdev->discovery; 916 return jiffies - c->timestamp; 917 } 918 919 static inline long inquiry_entry_age(struct inquiry_entry *e) 920 { 921 return jiffies - e->timestamp; 922 } 923 924 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, 925 bdaddr_t *bdaddr); 926 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 927 bdaddr_t *bdaddr); 928 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 929 bdaddr_t *bdaddr, 930 int state); 931 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 932 struct inquiry_entry *ie); 933 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 934 bool name_known); 935 void hci_inquiry_cache_flush(struct hci_dev *hdev); 936 937 /* ----- HCI Connections ----- */ 938 enum { 939 HCI_CONN_AUTH_PEND, 940 HCI_CONN_ENCRYPT_PEND, 941 HCI_CONN_RSWITCH_PEND, 942 HCI_CONN_MODE_CHANGE_PEND, 943 HCI_CONN_SCO_SETUP_PEND, 944 HCI_CONN_MGMT_CONNECTED, 945 HCI_CONN_SSP_ENABLED, 946 HCI_CONN_SC_ENABLED, 947 HCI_CONN_AES_CCM, 948 HCI_CONN_POWER_SAVE, 949 HCI_CONN_FLUSH_KEY, 950 HCI_CONN_ENCRYPT, 951 HCI_CONN_AUTH, 952 HCI_CONN_SECURE, 953 HCI_CONN_FIPS, 954 HCI_CONN_STK_ENCRYPT, 955 HCI_CONN_AUTH_INITIATOR, 956 HCI_CONN_DROP, 957 HCI_CONN_CANCEL, 958 HCI_CONN_PARAM_REMOVAL_PEND, 959 HCI_CONN_NEW_LINK_KEY, 960 HCI_CONN_SCANNING, 961 HCI_CONN_AUTH_FAILURE, 962 HCI_CONN_PER_ADV, 963 HCI_CONN_BIG_CREATED, 964 HCI_CONN_CREATE_CIS, 965 HCI_CONN_CREATE_BIG_SYNC, 966 HCI_CONN_BIG_SYNC, 967 HCI_CONN_BIG_SYNC_FAILED, 968 HCI_CONN_CREATE_PA_SYNC, 969 HCI_CONN_PA_SYNC, 970 HCI_CONN_PA_SYNC_FAILED, 971 }; 972 973 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) 974 { 975 struct hci_dev *hdev = conn->hdev; 976 return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && 977 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 978 } 979 980 static inline bool hci_conn_sc_enabled(struct hci_conn *conn) 981 { 982 struct hci_dev *hdev = conn->hdev; 983 return hci_dev_test_flag(hdev, HCI_SC_ENABLED) && 984 test_bit(HCI_CONN_SC_ENABLED, &conn->flags); 985 } 986 987 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) 988 { 989 struct hci_conn_hash *h = &hdev->conn_hash; 990 list_add_tail_rcu(&c->list, &h->list); 991 switch (c->type) { 992 case ACL_LINK: 993 h->acl_num++; 994 break; 995 case LE_LINK: 996 h->le_num++; 997 if (c->role == HCI_ROLE_SLAVE) 998 h->le_num_peripheral++; 999 break; 1000 case SCO_LINK: 1001 case ESCO_LINK: 1002 h->sco_num++; 1003 break; 1004 case CIS_LINK: 1005 case BIS_LINK: 1006 h->iso_num++; 1007 break; 1008 } 1009 } 1010 1011 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) 1012 { 1013 struct hci_conn_hash *h = &hdev->conn_hash; 1014 1015 list_del_rcu(&c->list); 1016 synchronize_rcu(); 1017 1018 switch (c->type) { 1019 case ACL_LINK: 1020 h->acl_num--; 1021 break; 1022 case LE_LINK: 1023 h->le_num--; 1024 if (c->role == HCI_ROLE_SLAVE) 1025 h->le_num_peripheral--; 1026 break; 1027 case SCO_LINK: 1028 case ESCO_LINK: 1029 h->sco_num--; 1030 break; 1031 case CIS_LINK: 1032 case BIS_LINK: 1033 h->iso_num--; 1034 break; 1035 } 1036 } 1037 1038 static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type) 1039 { 1040 struct hci_conn_hash *h = &hdev->conn_hash; 1041 switch (type) { 1042 case ACL_LINK: 1043 return h->acl_num; 1044 case LE_LINK: 1045 return h->le_num; 1046 case SCO_LINK: 1047 case ESCO_LINK: 1048 return h->sco_num; 1049 case CIS_LINK: 1050 case BIS_LINK: 1051 return h->iso_num; 1052 default: 1053 return 0; 1054 } 1055 } 1056 1057 static inline unsigned int hci_conn_count(struct hci_dev *hdev) 1058 { 1059 struct hci_conn_hash *c = &hdev->conn_hash; 1060 1061 return c->acl_num + c->sco_num + c->le_num + c->iso_num; 1062 } 1063 1064 static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn) 1065 { 1066 struct hci_conn_hash *h = &hdev->conn_hash; 1067 struct hci_conn *c; 1068 1069 rcu_read_lock(); 1070 1071 list_for_each_entry_rcu(c, &h->list, list) { 1072 if (c == conn) { 1073 rcu_read_unlock(); 1074 return true; 1075 } 1076 } 1077 rcu_read_unlock(); 1078 1079 return false; 1080 } 1081 1082 static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) 1083 { 1084 struct hci_conn_hash *h = &hdev->conn_hash; 1085 struct hci_conn *c; 1086 __u8 type = INVALID_LINK; 1087 1088 rcu_read_lock(); 1089 1090 list_for_each_entry_rcu(c, &h->list, list) { 1091 if (c->handle == handle) { 1092 type = c->type; 1093 break; 1094 } 1095 } 1096 1097 rcu_read_unlock(); 1098 1099 return type; 1100 } 1101 1102 static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, 1103 bdaddr_t *ba, __u8 bis) 1104 { 1105 struct hci_conn_hash *h = &hdev->conn_hash; 1106 struct hci_conn *c; 1107 1108 rcu_read_lock(); 1109 1110 list_for_each_entry_rcu(c, &h->list, list) { 1111 if (bacmp(&c->dst, ba) || c->type != BIS_LINK) 1112 continue; 1113 1114 if (c->iso_qos.bcast.bis == bis) { 1115 rcu_read_unlock(); 1116 return c; 1117 } 1118 } 1119 rcu_read_unlock(); 1120 1121 return NULL; 1122 } 1123 1124 static inline struct hci_conn * 1125 hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev) 1126 { 1127 struct hci_conn_hash *h = &hdev->conn_hash; 1128 struct hci_conn *c; 1129 1130 rcu_read_lock(); 1131 1132 list_for_each_entry_rcu(c, &h->list, list) { 1133 if (c->type != BIS_LINK) 1134 continue; 1135 1136 if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags)) 1137 continue; 1138 1139 rcu_read_unlock(); 1140 return c; 1141 } 1142 1143 rcu_read_unlock(); 1144 1145 return NULL; 1146 } 1147 1148 static inline struct hci_conn * 1149 hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev, 1150 bdaddr_t *ba, 1151 __u8 big, __u8 bis) 1152 { 1153 struct hci_conn_hash *h = &hdev->conn_hash; 1154 struct hci_conn *c; 1155 1156 rcu_read_lock(); 1157 1158 list_for_each_entry_rcu(c, &h->list, list) { 1159 if (bacmp(&c->dst, ba) || c->type != BIS_LINK || 1160 !test_bit(HCI_CONN_PER_ADV, &c->flags)) 1161 continue; 1162 1163 if (c->iso_qos.bcast.big == big && 1164 c->iso_qos.bcast.bis == bis) { 1165 rcu_read_unlock(); 1166 return c; 1167 } 1168 } 1169 rcu_read_unlock(); 1170 1171 return NULL; 1172 } 1173 1174 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, 1175 __u16 handle) 1176 { 1177 struct hci_conn_hash *h = &hdev->conn_hash; 1178 struct hci_conn *c; 1179 1180 rcu_read_lock(); 1181 1182 list_for_each_entry_rcu(c, &h->list, list) { 1183 if (c->handle == handle) { 1184 rcu_read_unlock(); 1185 return c; 1186 } 1187 } 1188 rcu_read_unlock(); 1189 1190 return NULL; 1191 } 1192 1193 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, 1194 __u8 type, bdaddr_t *ba) 1195 { 1196 struct hci_conn_hash *h = &hdev->conn_hash; 1197 struct hci_conn *c; 1198 1199 rcu_read_lock(); 1200 1201 list_for_each_entry_rcu(c, &h->list, list) { 1202 if (c->type == type && !bacmp(&c->dst, ba)) { 1203 rcu_read_unlock(); 1204 return c; 1205 } 1206 } 1207 1208 rcu_read_unlock(); 1209 1210 return NULL; 1211 } 1212 1213 static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, 1214 bdaddr_t *ba, 1215 __u8 ba_type) 1216 { 1217 struct hci_conn_hash *h = &hdev->conn_hash; 1218 struct hci_conn *c; 1219 1220 rcu_read_lock(); 1221 1222 list_for_each_entry_rcu(c, &h->list, list) { 1223 if (c->type != LE_LINK) 1224 continue; 1225 1226 if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { 1227 rcu_read_unlock(); 1228 return c; 1229 } 1230 } 1231 1232 rcu_read_unlock(); 1233 1234 return NULL; 1235 } 1236 1237 static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, 1238 bdaddr_t *ba, 1239 __u8 ba_type, 1240 __u8 cig, 1241 __u8 id) 1242 { 1243 struct hci_conn_hash *h = &hdev->conn_hash; 1244 struct hci_conn *c; 1245 1246 rcu_read_lock(); 1247 1248 list_for_each_entry_rcu(c, &h->list, list) { 1249 if (c->type != CIS_LINK) 1250 continue; 1251 1252 /* Match CIG ID if set */ 1253 if (cig != c->iso_qos.ucast.cig) 1254 continue; 1255 1256 /* Match CIS ID if set */ 1257 if (id != c->iso_qos.ucast.cis) 1258 continue; 1259 1260 /* Match destination address if set */ 1261 if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) { 1262 rcu_read_unlock(); 1263 return c; 1264 } 1265 } 1266 1267 rcu_read_unlock(); 1268 1269 return NULL; 1270 } 1271 1272 static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, 1273 __u8 handle) 1274 { 1275 struct hci_conn_hash *h = &hdev->conn_hash; 1276 struct hci_conn *c; 1277 1278 rcu_read_lock(); 1279 1280 list_for_each_entry_rcu(c, &h->list, list) { 1281 if (c->type != CIS_LINK) 1282 continue; 1283 1284 if (handle == c->iso_qos.ucast.cig) { 1285 rcu_read_unlock(); 1286 return c; 1287 } 1288 } 1289 1290 rcu_read_unlock(); 1291 1292 return NULL; 1293 } 1294 1295 static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, 1296 __u8 handle) 1297 { 1298 struct hci_conn_hash *h = &hdev->conn_hash; 1299 struct hci_conn *c; 1300 1301 rcu_read_lock(); 1302 1303 list_for_each_entry_rcu(c, &h->list, list) { 1304 if (c->type != BIS_LINK) 1305 continue; 1306 1307 if (handle == c->iso_qos.bcast.big) { 1308 rcu_read_unlock(); 1309 return c; 1310 } 1311 } 1312 1313 rcu_read_unlock(); 1314 1315 return NULL; 1316 } 1317 1318 static inline struct hci_conn * 1319 hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev, 1320 __u8 handle, __u8 num_bis) 1321 { 1322 struct hci_conn_hash *h = &hdev->conn_hash; 1323 struct hci_conn *c; 1324 1325 rcu_read_lock(); 1326 1327 list_for_each_entry_rcu(c, &h->list, list) { 1328 if (c->type != BIS_LINK) 1329 continue; 1330 1331 if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) { 1332 rcu_read_unlock(); 1333 return c; 1334 } 1335 } 1336 1337 rcu_read_unlock(); 1338 1339 return NULL; 1340 } 1341 1342 static inline struct hci_conn * 1343 hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state) 1344 { 1345 struct hci_conn_hash *h = &hdev->conn_hash; 1346 struct hci_conn *c; 1347 1348 rcu_read_lock(); 1349 1350 list_for_each_entry_rcu(c, &h->list, list) { 1351 if (c->type != BIS_LINK || bacmp(&c->dst, BDADDR_ANY) || 1352 c->state != state) 1353 continue; 1354 1355 if (handle == c->iso_qos.bcast.big) { 1356 rcu_read_unlock(); 1357 return c; 1358 } 1359 } 1360 1361 rcu_read_unlock(); 1362 1363 return NULL; 1364 } 1365 1366 static inline struct hci_conn * 1367 hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big) 1368 { 1369 struct hci_conn_hash *h = &hdev->conn_hash; 1370 struct hci_conn *c; 1371 1372 rcu_read_lock(); 1373 1374 list_for_each_entry_rcu(c, &h->list, list) { 1375 if (c->type != BIS_LINK || 1376 !test_bit(HCI_CONN_PA_SYNC, &c->flags)) 1377 continue; 1378 1379 if (c->iso_qos.bcast.big == big) { 1380 rcu_read_unlock(); 1381 return c; 1382 } 1383 } 1384 rcu_read_unlock(); 1385 1386 return NULL; 1387 } 1388 1389 static inline struct hci_conn * 1390 hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle) 1391 { 1392 struct hci_conn_hash *h = &hdev->conn_hash; 1393 struct hci_conn *c; 1394 1395 rcu_read_lock(); 1396 1397 list_for_each_entry_rcu(c, &h->list, list) { 1398 if (c->type != BIS_LINK) 1399 continue; 1400 1401 /* Ignore the listen hcon, we are looking 1402 * for the child hcon that was created as 1403 * a result of the PA sync established event. 1404 */ 1405 if (c->state == BT_LISTEN) 1406 continue; 1407 1408 if (c->sync_handle == sync_handle) { 1409 rcu_read_unlock(); 1410 return c; 1411 } 1412 } 1413 rcu_read_unlock(); 1414 1415 return NULL; 1416 } 1417 1418 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, 1419 __u8 type, __u16 state) 1420 { 1421 struct hci_conn_hash *h = &hdev->conn_hash; 1422 struct hci_conn *c; 1423 1424 rcu_read_lock(); 1425 1426 list_for_each_entry_rcu(c, &h->list, list) { 1427 if (c->type == type && c->state == state) { 1428 rcu_read_unlock(); 1429 return c; 1430 } 1431 } 1432 1433 rcu_read_unlock(); 1434 1435 return NULL; 1436 } 1437 1438 typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data); 1439 static inline void hci_conn_hash_list_state(struct hci_dev *hdev, 1440 hci_conn_func_t func, __u8 type, 1441 __u16 state, void *data) 1442 { 1443 struct hci_conn_hash *h = &hdev->conn_hash; 1444 struct hci_conn *c; 1445 1446 if (!func) 1447 return; 1448 1449 rcu_read_lock(); 1450 1451 list_for_each_entry_rcu(c, &h->list, list) { 1452 if (c->type == type && c->state == state) 1453 func(c, data); 1454 } 1455 1456 rcu_read_unlock(); 1457 } 1458 1459 static inline void hci_conn_hash_list_flag(struct hci_dev *hdev, 1460 hci_conn_func_t func, __u8 type, 1461 __u8 flag, void *data) 1462 { 1463 struct hci_conn_hash *h = &hdev->conn_hash; 1464 struct hci_conn *c; 1465 1466 if (!func) 1467 return; 1468 1469 rcu_read_lock(); 1470 1471 list_for_each_entry_rcu(c, &h->list, list) { 1472 if (c->type == type && test_bit(flag, &c->flags)) 1473 func(c, data); 1474 } 1475 1476 rcu_read_unlock(); 1477 } 1478 1479 static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) 1480 { 1481 struct hci_conn_hash *h = &hdev->conn_hash; 1482 struct hci_conn *c; 1483 1484 rcu_read_lock(); 1485 1486 list_for_each_entry_rcu(c, &h->list, list) { 1487 if (c->type == LE_LINK && c->state == BT_CONNECT && 1488 !test_bit(HCI_CONN_SCANNING, &c->flags)) { 1489 rcu_read_unlock(); 1490 return c; 1491 } 1492 } 1493 1494 rcu_read_unlock(); 1495 1496 return NULL; 1497 } 1498 1499 /* Returns true if an le connection is in the scanning state */ 1500 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) 1501 { 1502 struct hci_conn_hash *h = &hdev->conn_hash; 1503 struct hci_conn *c; 1504 1505 rcu_read_lock(); 1506 1507 list_for_each_entry_rcu(c, &h->list, list) { 1508 if (c->type == LE_LINK && c->state == BT_CONNECT && 1509 test_bit(HCI_CONN_SCANNING, &c->flags)) { 1510 rcu_read_unlock(); 1511 return true; 1512 } 1513 } 1514 1515 rcu_read_unlock(); 1516 1517 return false; 1518 } 1519 1520 int hci_disconnect(struct hci_conn *conn, __u8 reason); 1521 bool hci_setup_sync(struct hci_conn *conn, __u16 handle); 1522 void hci_sco_setup(struct hci_conn *conn, __u8 status); 1523 bool hci_iso_setup_path(struct hci_conn *conn); 1524 int hci_le_create_cis_pending(struct hci_dev *hdev); 1525 int hci_conn_check_create_cis(struct hci_conn *conn); 1526 1527 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, 1528 u8 role, u16 handle); 1529 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, 1530 bdaddr_t *dst, u8 role); 1531 void hci_conn_del(struct hci_conn *conn); 1532 void hci_conn_hash_flush(struct hci_dev *hdev); 1533 1534 struct hci_chan *hci_chan_create(struct hci_conn *conn); 1535 void hci_chan_del(struct hci_chan *chan); 1536 void hci_chan_list_flush(struct hci_conn *conn); 1537 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); 1538 1539 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, 1540 u8 dst_type, u8 sec_level, 1541 u16 conn_timeout, 1542 enum conn_reasons conn_reason); 1543 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 1544 u8 dst_type, bool dst_resolved, u8 sec_level, 1545 u16 conn_timeout, u8 role, u8 phy, u8 sec_phy); 1546 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status); 1547 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, 1548 u8 sec_level, u8 auth_type, 1549 enum conn_reasons conn_reason, u16 timeout); 1550 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, 1551 __u16 setting, struct bt_codec *codec, 1552 u16 timeout); 1553 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, 1554 __u8 dst_type, struct bt_iso_qos *qos); 1555 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid, 1556 struct bt_iso_qos *qos, 1557 __u8 base_len, __u8 *base); 1558 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, 1559 __u8 dst_type, struct bt_iso_qos *qos); 1560 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, 1561 __u8 dst_type, __u8 sid, 1562 struct bt_iso_qos *qos, 1563 __u8 data_len, __u8 *data); 1564 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, 1565 __u8 dst_type, __u8 sid, struct bt_iso_qos *qos); 1566 int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, 1567 struct bt_iso_qos *qos, __u16 sync_handle, 1568 __u8 num_bis, __u8 bis[]); 1569 int hci_conn_check_link_mode(struct hci_conn *conn); 1570 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); 1571 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, 1572 bool initiator); 1573 int hci_conn_switch_role(struct hci_conn *conn, __u8 role); 1574 1575 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); 1576 1577 void hci_conn_failed(struct hci_conn *conn, u8 status); 1578 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle); 1579 1580 void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb); 1581 void hci_conn_tx_dequeue(struct hci_conn *conn); 1582 void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset, 1583 const struct sockcm_cookie *sockc); 1584 1585 static inline void hci_sockcm_init(struct sockcm_cookie *sockc, struct sock *sk) 1586 { 1587 *sockc = (struct sockcm_cookie) { 1588 .tsflags = READ_ONCE(sk->sk_tsflags), 1589 }; 1590 } 1591 1592 /* 1593 * hci_conn_get() and hci_conn_put() are used to control the life-time of an 1594 * "hci_conn" object. They do not guarantee that the hci_conn object is running, 1595 * working or anything else. They just guarantee that the object is available 1596 * and can be dereferenced. So you can use its locks, local variables and any 1597 * other constant data. 1598 * Before accessing runtime data, you _must_ lock the object and then check that 1599 * it is still running. As soon as you release the locks, the connection might 1600 * get dropped, though. 1601 * 1602 * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control 1603 * how long the underlying connection is held. So every channel that runs on the 1604 * hci_conn object calls this to prevent the connection from disappearing. As 1605 * long as you hold a device, you must also guarantee that you have a valid 1606 * reference to the device via hci_conn_get() (or the initial reference from 1607 * hci_conn_add()). 1608 * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't 1609 * break because nobody cares for that. But this means, we cannot use 1610 * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). 1611 */ 1612 1613 static inline struct hci_conn *hci_conn_get(struct hci_conn *conn) 1614 { 1615 get_device(&conn->dev); 1616 return conn; 1617 } 1618 1619 static inline void hci_conn_put(struct hci_conn *conn) 1620 { 1621 put_device(&conn->dev); 1622 } 1623 1624 static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn) 1625 { 1626 BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); 1627 1628 atomic_inc(&conn->refcnt); 1629 cancel_delayed_work(&conn->disc_work); 1630 1631 return conn; 1632 } 1633 1634 static inline void hci_conn_drop(struct hci_conn *conn) 1635 { 1636 BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); 1637 1638 if (atomic_dec_and_test(&conn->refcnt)) { 1639 unsigned long timeo; 1640 1641 switch (conn->type) { 1642 case ACL_LINK: 1643 case LE_LINK: 1644 cancel_delayed_work(&conn->idle_work); 1645 if (conn->state == BT_CONNECTED) { 1646 timeo = conn->disc_timeout; 1647 if (!conn->out) 1648 timeo *= 2; 1649 } else { 1650 timeo = 0; 1651 } 1652 break; 1653 1654 default: 1655 timeo = 0; 1656 break; 1657 } 1658 1659 cancel_delayed_work(&conn->disc_work); 1660 queue_delayed_work(conn->hdev->workqueue, 1661 &conn->disc_work, timeo); 1662 } 1663 } 1664 1665 /* ----- HCI Devices ----- */ 1666 static inline void hci_dev_put(struct hci_dev *d) 1667 { 1668 BT_DBG("%s orig refcnt %d", d->name, 1669 kref_read(&d->dev.kobj.kref)); 1670 1671 put_device(&d->dev); 1672 } 1673 1674 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) 1675 { 1676 BT_DBG("%s orig refcnt %d", d->name, 1677 kref_read(&d->dev.kobj.kref)); 1678 1679 get_device(&d->dev); 1680 return d; 1681 } 1682 1683 #define hci_dev_lock(d) mutex_lock(&d->lock) 1684 #define hci_dev_unlock(d) mutex_unlock(&d->lock) 1685 1686 #define to_hci_dev(d) container_of(d, struct hci_dev, dev) 1687 #define to_hci_conn(c) container_of(c, struct hci_conn, dev) 1688 1689 static inline void *hci_get_drvdata(struct hci_dev *hdev) 1690 { 1691 return dev_get_drvdata(&hdev->dev); 1692 } 1693 1694 static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) 1695 { 1696 dev_set_drvdata(&hdev->dev, data); 1697 } 1698 1699 static inline void *hci_get_priv(struct hci_dev *hdev) 1700 { 1701 return (char *)hdev + sizeof(*hdev); 1702 } 1703 1704 struct hci_dev *hci_dev_get(int index); 1705 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); 1706 1707 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv); 1708 1709 static inline struct hci_dev *hci_alloc_dev(void) 1710 { 1711 return hci_alloc_dev_priv(0); 1712 } 1713 1714 void hci_free_dev(struct hci_dev *hdev); 1715 int hci_register_dev(struct hci_dev *hdev); 1716 void hci_unregister_dev(struct hci_dev *hdev); 1717 void hci_release_dev(struct hci_dev *hdev); 1718 int hci_register_suspend_notifier(struct hci_dev *hdev); 1719 int hci_unregister_suspend_notifier(struct hci_dev *hdev); 1720 int hci_suspend_dev(struct hci_dev *hdev); 1721 int hci_resume_dev(struct hci_dev *hdev); 1722 int hci_reset_dev(struct hci_dev *hdev); 1723 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); 1724 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); 1725 __printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...); 1726 __printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...); 1727 1728 static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode) 1729 { 1730 #if IS_ENABLED(CONFIG_BT_MSFTEXT) 1731 hdev->msft_opcode = opcode; 1732 #endif 1733 } 1734 1735 static inline void hci_set_aosp_capable(struct hci_dev *hdev) 1736 { 1737 #if IS_ENABLED(CONFIG_BT_AOSPEXT) 1738 hdev->aosp_capable = true; 1739 #endif 1740 } 1741 1742 static inline void hci_devcd_setup(struct hci_dev *hdev) 1743 { 1744 #ifdef CONFIG_DEV_COREDUMP 1745 INIT_WORK(&hdev->dump.dump_rx, hci_devcd_rx); 1746 INIT_DELAYED_WORK(&hdev->dump.dump_timeout, hci_devcd_timeout); 1747 skb_queue_head_init(&hdev->dump.dump_q); 1748 #endif 1749 } 1750 1751 int hci_dev_open(__u16 dev); 1752 int hci_dev_close(__u16 dev); 1753 int hci_dev_do_close(struct hci_dev *hdev); 1754 int hci_dev_reset(__u16 dev); 1755 int hci_dev_reset_stat(__u16 dev); 1756 int hci_dev_cmd(unsigned int cmd, void __user *arg); 1757 int hci_get_dev_list(void __user *arg); 1758 int hci_get_dev_info(void __user *arg); 1759 int hci_get_conn_list(void __user *arg); 1760 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); 1761 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); 1762 int hci_inquiry(void __user *arg); 1763 1764 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, 1765 bdaddr_t *bdaddr, u8 type); 1766 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( 1767 struct list_head *list, bdaddr_t *bdaddr, 1768 u8 type); 1769 struct bdaddr_list_with_flags * 1770 hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr, 1771 u8 type); 1772 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); 1773 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, 1774 u8 type, u8 *peer_irk, u8 *local_irk); 1775 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, 1776 u8 type, u32 flags); 1777 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); 1778 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, 1779 u8 type); 1780 void hci_bdaddr_list_clear(struct list_head *list); 1781 1782 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, 1783 bdaddr_t *addr, u8 addr_type); 1784 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, 1785 bdaddr_t *addr, u8 addr_type); 1786 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); 1787 void hci_conn_params_clear_disabled(struct hci_dev *hdev); 1788 void hci_conn_params_free(struct hci_conn_params *param); 1789 1790 void hci_pend_le_list_del_init(struct hci_conn_params *param); 1791 void hci_pend_le_list_add(struct hci_conn_params *param, 1792 struct list_head *list); 1793 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 1794 bdaddr_t *addr, 1795 u8 addr_type); 1796 1797 void hci_uuids_clear(struct hci_dev *hdev); 1798 1799 void hci_link_keys_clear(struct hci_dev *hdev); 1800 u8 *hci_conn_key_enc_size(struct hci_conn *conn); 1801 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); 1802 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, 1803 bdaddr_t *bdaddr, u8 *val, u8 type, 1804 u8 pin_len, bool *persistent); 1805 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1806 u8 addr_type, u8 type, u8 authenticated, 1807 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); 1808 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1809 u8 addr_type, u8 role); 1810 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); 1811 void hci_smp_ltks_clear(struct hci_dev *hdev); 1812 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); 1813 1814 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa); 1815 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1816 u8 addr_type); 1817 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1818 u8 addr_type, u8 val[16], bdaddr_t *rpa); 1819 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); 1820 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]); 1821 void hci_blocked_keys_clear(struct hci_dev *hdev); 1822 void hci_smp_irks_clear(struct hci_dev *hdev); 1823 1824 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 1825 1826 void hci_remote_oob_data_clear(struct hci_dev *hdev); 1827 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1828 bdaddr_t *bdaddr, u8 bdaddr_type); 1829 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1830 u8 bdaddr_type, u8 *hash192, u8 *rand192, 1831 u8 *hash256, u8 *rand256); 1832 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1833 u8 bdaddr_type); 1834 1835 void hci_adv_instances_clear(struct hci_dev *hdev); 1836 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance); 1837 struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid); 1838 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance); 1839 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, 1840 u32 flags, u16 adv_data_len, u8 *adv_data, 1841 u16 scan_rsp_len, u8 *scan_rsp_data, 1842 u16 timeout, u16 duration, s8 tx_power, 1843 u32 min_interval, u32 max_interval, 1844 u8 mesh_handle); 1845 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid, 1846 u32 flags, u8 data_len, u8 *data, 1847 u32 min_interval, u32 max_interval); 1848 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, 1849 u16 adv_data_len, u8 *adv_data, 1850 u16 scan_rsp_len, u8 *scan_rsp_data); 1851 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); 1852 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); 1853 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance); 1854 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance); 1855 1856 void hci_adv_monitors_clear(struct hci_dev *hdev); 1857 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); 1858 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); 1859 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle); 1860 int hci_remove_all_adv_monitor(struct hci_dev *hdev); 1861 bool hci_is_adv_monitoring(struct hci_dev *hdev); 1862 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev); 1863 1864 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 1865 1866 void hci_init_sysfs(struct hci_dev *hdev); 1867 void hci_conn_init_sysfs(struct hci_conn *conn); 1868 void hci_conn_add_sysfs(struct hci_conn *conn); 1869 void hci_conn_del_sysfs(struct hci_conn *conn); 1870 1871 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev)) 1872 #define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent) 1873 1874 /* ----- LMP capabilities ----- */ 1875 #define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT) 1876 #define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH) 1877 #define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD) 1878 #define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF) 1879 #define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK) 1880 #define lmp_sco_capable(dev) ((dev)->features[0][1] & LMP_SCO) 1881 #define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ) 1882 #define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO) 1883 #define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR)) 1884 #define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) 1885 #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) 1886 #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) 1887 #define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M) 1888 #define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) 1889 #define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) 1890 #define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) 1891 #define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH) 1892 #define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO) 1893 #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) 1894 #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) 1895 #define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) 1896 #define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M) 1897 #define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M) 1898 #define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT) 1899 #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) 1900 1901 /* ----- Extended LMP capabilities ----- */ 1902 #define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL) 1903 #define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL) 1904 #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN) 1905 #define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN) 1906 #define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC) 1907 #define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING) 1908 1909 /* ----- Host capabilities ----- */ 1910 #define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP) 1911 #define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC) 1912 #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) 1913 #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) 1914 1915 #define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \ 1916 !hci_dev_test_flag(dev, HCI_AUTO_OFF)) 1917 #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ 1918 hci_dev_test_flag(dev, HCI_SC_ENABLED)) 1919 #define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \ 1920 !hci_dev_test_flag(dev, HCI_RPA_EXPIRED)) 1921 #define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \ 1922 !adv->rpa_expired) 1923 1924 #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ 1925 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) 1926 1927 #define le_2m_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_2M)) 1928 1929 #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ 1930 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) 1931 1932 #define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \ 1933 !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \ 1934 &(dev)->quirks)) 1935 1936 #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ 1937 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) 1938 1939 #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) 1940 1941 #define privacy_mode_capable(dev) (ll_privacy_capable(dev) && \ 1942 (hdev->commands[39] & 0x04)) 1943 1944 #define read_key_size_capable(dev) \ 1945 ((dev)->commands[20] & 0x10 && \ 1946 !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) 1947 1948 #define read_voice_setting_capable(dev) \ 1949 ((dev)->commands[9] & 0x04 && \ 1950 !test_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &(dev)->quirks)) 1951 1952 /* Use enhanced synchronous connection if command is supported and its quirk 1953 * has not been set. 1954 */ 1955 #define enhanced_sync_conn_capable(dev) \ 1956 (((dev)->commands[29] & 0x08) && \ 1957 !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks)) 1958 1959 /* Use ext scanning if set ext scan param and ext scan enable is supported */ 1960 #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ 1961 ((dev)->commands[37] & 0x40) && \ 1962 !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks)) 1963 1964 /* Use ext create connection if command is supported */ 1965 #define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \ 1966 !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &(dev)->quirks)) 1967 /* Extended advertising support */ 1968 #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) 1969 1970 /* Maximum advertising length */ 1971 #define max_adv_len(dev) \ 1972 (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH) 1973 1974 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789: 1975 * 1976 * C24: Mandatory if the LE Controller supports Connection State and either 1977 * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported 1978 */ 1979 #define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \ 1980 ext_adv_capable(dev)) && \ 1981 !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, \ 1982 &(dev)->quirks)) 1983 1984 /* Periodic advertising support */ 1985 #define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV)) 1986 1987 /* CIS Master/Slave and BIS support */ 1988 #define iso_capable(dev) (cis_capable(dev) || bis_capable(dev)) 1989 #define cis_capable(dev) \ 1990 (cis_central_capable(dev) || cis_peripheral_capable(dev)) 1991 #define cis_central_capable(dev) \ 1992 ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL) 1993 #define cis_peripheral_capable(dev) \ 1994 ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL) 1995 #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER) 1996 #define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER) 1997 1998 #define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \ 1999 (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks))) 2000 2001 /* ----- HCI protocols ----- */ 2002 #define HCI_PROTO_DEFER 0x01 2003 2004 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, 2005 __u8 type, __u8 *flags) 2006 { 2007 switch (type) { 2008 case ACL_LINK: 2009 return l2cap_connect_ind(hdev, bdaddr); 2010 2011 case SCO_LINK: 2012 case ESCO_LINK: 2013 return sco_connect_ind(hdev, bdaddr, flags); 2014 2015 case CIS_LINK: 2016 case BIS_LINK: 2017 return iso_connect_ind(hdev, bdaddr, flags); 2018 2019 default: 2020 BT_ERR("unknown link type %d", type); 2021 return -EINVAL; 2022 } 2023 } 2024 2025 static inline int hci_proto_disconn_ind(struct hci_conn *conn) 2026 { 2027 if (conn->type != ACL_LINK && conn->type != LE_LINK) 2028 return HCI_ERROR_REMOTE_USER_TERM; 2029 2030 return l2cap_disconn_ind(conn); 2031 } 2032 2033 /* ----- HCI callbacks ----- */ 2034 struct hci_cb { 2035 struct list_head list; 2036 2037 char *name; 2038 2039 void (*connect_cfm) (struct hci_conn *conn, __u8 status); 2040 void (*disconn_cfm) (struct hci_conn *conn, __u8 status); 2041 void (*security_cfm) (struct hci_conn *conn, __u8 status, 2042 __u8 encrypt); 2043 void (*key_change_cfm) (struct hci_conn *conn, __u8 status); 2044 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); 2045 }; 2046 2047 static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) 2048 { 2049 struct hci_cb *cb; 2050 2051 mutex_lock(&hci_cb_list_lock); 2052 list_for_each_entry(cb, &hci_cb_list, list) { 2053 if (cb->connect_cfm) 2054 cb->connect_cfm(conn, status); 2055 } 2056 mutex_unlock(&hci_cb_list_lock); 2057 2058 if (conn->connect_cfm_cb) 2059 conn->connect_cfm_cb(conn, status); 2060 } 2061 2062 static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) 2063 { 2064 struct hci_cb *cb; 2065 2066 mutex_lock(&hci_cb_list_lock); 2067 list_for_each_entry(cb, &hci_cb_list, list) { 2068 if (cb->disconn_cfm) 2069 cb->disconn_cfm(conn, reason); 2070 } 2071 mutex_unlock(&hci_cb_list_lock); 2072 2073 if (conn->disconn_cfm_cb) 2074 conn->disconn_cfm_cb(conn, reason); 2075 } 2076 2077 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) 2078 { 2079 struct hci_cb *cb; 2080 __u8 encrypt; 2081 2082 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) 2083 return; 2084 2085 encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; 2086 2087 mutex_lock(&hci_cb_list_lock); 2088 list_for_each_entry(cb, &hci_cb_list, list) { 2089 if (cb->security_cfm) 2090 cb->security_cfm(conn, status, encrypt); 2091 } 2092 mutex_unlock(&hci_cb_list_lock); 2093 2094 if (conn->security_cfm_cb) 2095 conn->security_cfm_cb(conn, status); 2096 } 2097 2098 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) 2099 { 2100 struct hci_cb *cb; 2101 __u8 encrypt; 2102 2103 if (conn->state == BT_CONFIG) { 2104 if (!status) 2105 conn->state = BT_CONNECTED; 2106 2107 hci_connect_cfm(conn, status); 2108 hci_conn_drop(conn); 2109 return; 2110 } 2111 2112 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 2113 encrypt = 0x00; 2114 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) 2115 encrypt = 0x02; 2116 else 2117 encrypt = 0x01; 2118 2119 if (!status) { 2120 if (conn->sec_level == BT_SECURITY_SDP) 2121 conn->sec_level = BT_SECURITY_LOW; 2122 2123 if (conn->pending_sec_level > conn->sec_level) 2124 conn->sec_level = conn->pending_sec_level; 2125 } 2126 2127 mutex_lock(&hci_cb_list_lock); 2128 list_for_each_entry(cb, &hci_cb_list, list) { 2129 if (cb->security_cfm) 2130 cb->security_cfm(conn, status, encrypt); 2131 } 2132 mutex_unlock(&hci_cb_list_lock); 2133 2134 if (conn->security_cfm_cb) 2135 conn->security_cfm_cb(conn, status); 2136 } 2137 2138 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) 2139 { 2140 struct hci_cb *cb; 2141 2142 mutex_lock(&hci_cb_list_lock); 2143 list_for_each_entry(cb, &hci_cb_list, list) { 2144 if (cb->key_change_cfm) 2145 cb->key_change_cfm(conn, status); 2146 } 2147 mutex_unlock(&hci_cb_list_lock); 2148 } 2149 2150 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, 2151 __u8 role) 2152 { 2153 struct hci_cb *cb; 2154 2155 mutex_lock(&hci_cb_list_lock); 2156 list_for_each_entry(cb, &hci_cb_list, list) { 2157 if (cb->role_switch_cfm) 2158 cb->role_switch_cfm(conn, status, role); 2159 } 2160 mutex_unlock(&hci_cb_list_lock); 2161 } 2162 2163 static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) 2164 { 2165 if (addr_type != ADDR_LE_DEV_RANDOM) 2166 return false; 2167 2168 if ((bdaddr->b[5] & 0xc0) == 0x40) 2169 return true; 2170 2171 return false; 2172 } 2173 2174 static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type) 2175 { 2176 if (addr_type == ADDR_LE_DEV_PUBLIC) 2177 return true; 2178 2179 /* Check for Random Static address type */ 2180 if ((addr->b[5] & 0xc0) == 0xc0) 2181 return true; 2182 2183 return false; 2184 } 2185 2186 static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev, 2187 bdaddr_t *bdaddr, u8 addr_type) 2188 { 2189 if (!hci_bdaddr_is_rpa(bdaddr, addr_type)) 2190 return NULL; 2191 2192 return hci_find_irk_by_rpa(hdev, bdaddr); 2193 } 2194 2195 static inline int hci_check_conn_params(u16 min, u16 max, u16 latency, 2196 u16 to_multiplier) 2197 { 2198 u16 max_latency; 2199 2200 if (min > max) { 2201 BT_WARN("min %d > max %d", min, max); 2202 return -EINVAL; 2203 } 2204 2205 if (min < 6) { 2206 BT_WARN("min %d < 6", min); 2207 return -EINVAL; 2208 } 2209 2210 if (max > 3200) { 2211 BT_WARN("max %d > 3200", max); 2212 return -EINVAL; 2213 } 2214 2215 if (to_multiplier < 10) { 2216 BT_WARN("to_multiplier %d < 10", to_multiplier); 2217 return -EINVAL; 2218 } 2219 2220 if (to_multiplier > 3200) { 2221 BT_WARN("to_multiplier %d > 3200", to_multiplier); 2222 return -EINVAL; 2223 } 2224 2225 if (max >= to_multiplier * 8) { 2226 BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier); 2227 return -EINVAL; 2228 } 2229 2230 max_latency = (to_multiplier * 4 / max) - 1; 2231 if (latency > 499) { 2232 BT_WARN("latency %d > 499", latency); 2233 return -EINVAL; 2234 } 2235 2236 if (latency > max_latency) { 2237 BT_WARN("latency %d > max_latency %d", latency, max_latency); 2238 return -EINVAL; 2239 } 2240 2241 return 0; 2242 } 2243 2244 int hci_register_cb(struct hci_cb *hcb); 2245 int hci_unregister_cb(struct hci_cb *hcb); 2246 2247 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, 2248 const void *param); 2249 2250 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 2251 const void *param); 2252 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); 2253 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); 2254 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb); 2255 2256 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); 2257 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event); 2258 2259 u32 hci_conn_get_phy(struct hci_conn *conn); 2260 2261 /* ----- HCI Sockets ----- */ 2262 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); 2263 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, 2264 int flag, struct sock *skip_sk); 2265 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); 2266 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, 2267 void *data, u16 data_len, ktime_t tstamp, 2268 int flag, struct sock *skip_sk); 2269 2270 void hci_sock_dev_event(struct hci_dev *hdev, int event); 2271 2272 #define HCI_MGMT_VAR_LEN BIT(0) 2273 #define HCI_MGMT_NO_HDEV BIT(1) 2274 #define HCI_MGMT_UNTRUSTED BIT(2) 2275 #define HCI_MGMT_UNCONFIGURED BIT(3) 2276 #define HCI_MGMT_HDEV_OPTIONAL BIT(4) 2277 2278 struct hci_mgmt_handler { 2279 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, 2280 u16 data_len); 2281 size_t data_len; 2282 unsigned long flags; 2283 }; 2284 2285 struct hci_mgmt_chan { 2286 struct list_head list; 2287 unsigned short channel; 2288 size_t handler_count; 2289 const struct hci_mgmt_handler *handlers; 2290 void (*hdev_init) (struct sock *sk, struct hci_dev *hdev); 2291 }; 2292 2293 int hci_mgmt_chan_register(struct hci_mgmt_chan *c); 2294 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); 2295 2296 /* Management interface */ 2297 #define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) 2298 #define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ 2299 BIT(BDADDR_LE_RANDOM)) 2300 #define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \ 2301 BIT(BDADDR_LE_PUBLIC) | \ 2302 BIT(BDADDR_LE_RANDOM)) 2303 2304 /* These LE scan and inquiry parameters were chosen according to LE General 2305 * Discovery Procedure specification. 2306 */ 2307 #define DISCOV_LE_SCAN_WIN 0x0012 /* 11.25 msec */ 2308 #define DISCOV_LE_SCAN_INT 0x0012 /* 11.25 msec */ 2309 #define DISCOV_LE_SCAN_INT_FAST 0x0060 /* 60 msec */ 2310 #define DISCOV_LE_SCAN_WIN_FAST 0x0030 /* 30 msec */ 2311 #define DISCOV_LE_SCAN_INT_CONN 0x0060 /* 60 msec */ 2312 #define DISCOV_LE_SCAN_WIN_CONN 0x0060 /* 60 msec */ 2313 #define DISCOV_LE_SCAN_INT_SLOW1 0x0800 /* 1.28 sec */ 2314 #define DISCOV_LE_SCAN_WIN_SLOW1 0x0012 /* 11.25 msec */ 2315 #define DISCOV_LE_SCAN_INT_SLOW2 0x1000 /* 2.56 sec */ 2316 #define DISCOV_LE_SCAN_WIN_SLOW2 0x0024 /* 22.5 msec */ 2317 #define DISCOV_CODED_SCAN_INT_FAST 0x0120 /* 180 msec */ 2318 #define DISCOV_CODED_SCAN_WIN_FAST 0x0090 /* 90 msec */ 2319 #define DISCOV_CODED_SCAN_INT_SLOW1 0x1800 /* 3.84 sec */ 2320 #define DISCOV_CODED_SCAN_WIN_SLOW1 0x0036 /* 33.75 msec */ 2321 #define DISCOV_CODED_SCAN_INT_SLOW2 0x3000 /* 7.68 sec */ 2322 #define DISCOV_CODED_SCAN_WIN_SLOW2 0x006c /* 67.5 msec */ 2323 #define DISCOV_LE_TIMEOUT 10240 /* msec */ 2324 #define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */ 2325 #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 2326 #define DISCOV_BREDR_INQUIRY_LEN 0x08 2327 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ 2328 #define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ 2329 #define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ 2330 #define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */ 2331 #define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */ 2332 #define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */ 2333 #define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */ 2334 #define INTERVAL_TO_MS(x) (((x) * 10) / 0x10) 2335 2336 #define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */ 2337 2338 void mgmt_fill_version_info(void *ver); 2339 int mgmt_new_settings(struct hci_dev *hdev); 2340 void mgmt_index_added(struct hci_dev *hdev); 2341 void mgmt_index_removed(struct hci_dev *hdev); 2342 void mgmt_set_powered_failed(struct hci_dev *hdev, int err); 2343 void mgmt_power_on(struct hci_dev *hdev, int err); 2344 void __mgmt_power_off(struct hci_dev *hdev); 2345 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 2346 bool persistent); 2347 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, 2348 u8 *name, u8 name_len); 2349 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, 2350 u8 link_type, u8 addr_type, u8 reason, 2351 bool mgmt_connected); 2352 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, 2353 u8 link_type, u8 addr_type, u8 status); 2354 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, 2355 u8 status); 2356 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); 2357 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 2358 u8 status); 2359 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 2360 u8 status); 2361 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 2362 u8 link_type, u8 addr_type, u32 value, 2363 u8 confirm_hint); 2364 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 2365 u8 link_type, u8 addr_type, u8 status); 2366 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 2367 u8 link_type, u8 addr_type, u8 status); 2368 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 2369 u8 link_type, u8 addr_type); 2370 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 2371 u8 link_type, u8 addr_type, u8 status); 2372 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 2373 u8 link_type, u8 addr_type, u8 status); 2374 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, 2375 u8 link_type, u8 addr_type, u32 passkey, 2376 u8 entered); 2377 void mgmt_auth_failed(struct hci_conn *conn, u8 status); 2378 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); 2379 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, 2380 u8 status); 2381 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); 2382 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 2383 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, 2384 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, 2385 u64 instant); 2386 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 2387 u8 addr_type, s8 rssi, u8 *name, u8 name_len); 2388 void mgmt_discovering(struct hci_dev *hdev, u8 discovering); 2389 void mgmt_suspending(struct hci_dev *hdev, u8 state); 2390 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, 2391 u8 addr_type); 2392 bool mgmt_powering_down(struct hci_dev *hdev); 2393 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); 2394 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent); 2395 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, 2396 bool persistent); 2397 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, 2398 u8 bdaddr_type, u8 store_hint, u16 min_interval, 2399 u16 max_interval, u16 latency, u16 timeout); 2400 void mgmt_smp_complete(struct hci_conn *conn, bool complete); 2401 bool mgmt_get_connectable(struct hci_dev *hdev); 2402 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev); 2403 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, 2404 u8 instance); 2405 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, 2406 u8 instance); 2407 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); 2408 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, 2409 bdaddr_t *bdaddr, u8 addr_type); 2410 2411 int hci_abort_conn(struct hci_conn *conn, u8 reason); 2412 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, 2413 u16 to_multiplier); 2414 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, 2415 __u8 ltk[16], __u8 key_size); 2416 2417 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 2418 u8 *bdaddr_type); 2419 2420 #define SCO_AIRMODE_MASK 0x0003 2421 #define SCO_AIRMODE_CVSD 0x0000 2422 #define SCO_AIRMODE_TRANSP 0x0003 2423 2424 #define LOCAL_CODEC_ACL_MASK BIT(0) 2425 #define LOCAL_CODEC_SCO_MASK BIT(1) 2426 2427 #define TRANSPORT_TYPE_MAX 0x04 2428 2429 #endif /* __HCI_CORE_H */ 2430