xref: /linux/include/net/bluetooth/hci_core.h (revision 9410645520e9b820069761f3450ef6661418e279)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023-2024 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 #ifndef __HCI_CORE_H
27 #define __HCI_CORE_H
28 
29 #include <linux/idr.h>
30 #include <linux/leds.h>
31 #include <linux/rculist.h>
32 
33 #include <net/bluetooth/hci.h>
34 #include <net/bluetooth/hci_sync.h>
35 #include <net/bluetooth/hci_sock.h>
36 #include <net/bluetooth/coredump.h>
37 
38 /* HCI priority */
39 #define HCI_PRIO_MAX	7
40 
41 /* HCI maximum id value */
42 #define HCI_MAX_ID 10000
43 
44 /* HCI Core structures */
45 struct inquiry_data {
46 	bdaddr_t	bdaddr;
47 	__u8		pscan_rep_mode;
48 	__u8		pscan_period_mode;
49 	__u8		pscan_mode;
50 	__u8		dev_class[3];
51 	__le16		clock_offset;
52 	__s8		rssi;
53 	__u8		ssp_mode;
54 };
55 
56 struct inquiry_entry {
57 	struct list_head	all;		/* inq_cache.all */
58 	struct list_head	list;		/* unknown or resolve */
59 	enum {
60 		NAME_NOT_KNOWN,
61 		NAME_NEEDED,
62 		NAME_PENDING,
63 		NAME_KNOWN,
64 	} name_state;
65 	__u32			timestamp;
66 	struct inquiry_data	data;
67 };
68 
69 struct discovery_state {
70 	int			type;
71 	enum {
72 		DISCOVERY_STOPPED,
73 		DISCOVERY_STARTING,
74 		DISCOVERY_FINDING,
75 		DISCOVERY_RESOLVING,
76 		DISCOVERY_STOPPING,
77 	} state;
78 	struct list_head	all;	/* All devices found during inquiry */
79 	struct list_head	unknown;	/* Name state not known */
80 	struct list_head	resolve;	/* Name needs to be resolved */
81 	__u32			timestamp;
82 	bdaddr_t		last_adv_addr;
83 	u8			last_adv_addr_type;
84 	s8			last_adv_rssi;
85 	u32			last_adv_flags;
86 	u8			last_adv_data[HCI_MAX_EXT_AD_LENGTH];
87 	u8			last_adv_data_len;
88 	bool			report_invalid_rssi;
89 	bool			result_filtering;
90 	bool			limited;
91 	s8			rssi;
92 	u16			uuid_count;
93 	u8			(*uuids)[16];
94 	unsigned long		name_resolve_timeout;
95 };
96 
97 #define SUSPEND_NOTIFIER_TIMEOUT	msecs_to_jiffies(2000) /* 2 seconds */
98 
99 enum suspend_tasks {
100 	SUSPEND_PAUSE_DISCOVERY,
101 	SUSPEND_UNPAUSE_DISCOVERY,
102 
103 	SUSPEND_PAUSE_ADVERTISING,
104 	SUSPEND_UNPAUSE_ADVERTISING,
105 
106 	SUSPEND_SCAN_DISABLE,
107 	SUSPEND_SCAN_ENABLE,
108 	SUSPEND_DISCONNECTING,
109 
110 	SUSPEND_POWERING_DOWN,
111 
112 	SUSPEND_PREPARE_NOTIFIER,
113 
114 	SUSPEND_SET_ADV_FILTER,
115 	__SUSPEND_NUM_TASKS
116 };
117 
118 enum suspended_state {
119 	BT_RUNNING = 0,
120 	BT_SUSPEND_DISCONNECT,
121 	BT_SUSPEND_CONFIGURE_WAKE,
122 };
123 
124 struct hci_conn_hash {
125 	struct list_head list;
126 	unsigned int     acl_num;
127 	unsigned int     sco_num;
128 	unsigned int     iso_num;
129 	unsigned int     le_num;
130 	unsigned int     le_num_peripheral;
131 };
132 
133 struct bdaddr_list {
134 	struct list_head list;
135 	bdaddr_t bdaddr;
136 	u8 bdaddr_type;
137 };
138 
139 struct codec_list {
140 	struct list_head list;
141 	u8	id;
142 	__u16	cid;
143 	__u16	vid;
144 	u8	transport;
145 	u8	num_caps;
146 	u32	len;
147 	struct hci_codec_caps caps[];
148 };
149 
150 struct bdaddr_list_with_irk {
151 	struct list_head list;
152 	bdaddr_t bdaddr;
153 	u8 bdaddr_type;
154 	u8 peer_irk[16];
155 	u8 local_irk[16];
156 };
157 
158 /* Bitmask of connection flags */
159 enum hci_conn_flags {
160 	HCI_CONN_FLAG_REMOTE_WAKEUP = 1,
161 	HCI_CONN_FLAG_DEVICE_PRIVACY = 2,
162 };
163 typedef u8 hci_conn_flags_t;
164 
165 struct bdaddr_list_with_flags {
166 	struct list_head list;
167 	bdaddr_t bdaddr;
168 	u8 bdaddr_type;
169 	hci_conn_flags_t flags;
170 };
171 
172 struct bt_uuid {
173 	struct list_head list;
174 	u8 uuid[16];
175 	u8 size;
176 	u8 svc_hint;
177 };
178 
179 struct blocked_key {
180 	struct list_head list;
181 	struct rcu_head rcu;
182 	u8 type;
183 	u8 val[16];
184 };
185 
186 struct smp_csrk {
187 	bdaddr_t bdaddr;
188 	u8 bdaddr_type;
189 	u8 type;
190 	u8 val[16];
191 };
192 
193 struct smp_ltk {
194 	struct list_head list;
195 	struct rcu_head rcu;
196 	bdaddr_t bdaddr;
197 	u8 bdaddr_type;
198 	u8 authenticated;
199 	u8 type;
200 	u8 enc_size;
201 	__le16 ediv;
202 	__le64 rand;
203 	u8 val[16];
204 };
205 
206 struct smp_irk {
207 	struct list_head list;
208 	struct rcu_head rcu;
209 	bdaddr_t rpa;
210 	bdaddr_t bdaddr;
211 	u8 addr_type;
212 	u8 val[16];
213 };
214 
215 struct link_key {
216 	struct list_head list;
217 	struct rcu_head rcu;
218 	bdaddr_t bdaddr;
219 	u8 type;
220 	u8 val[HCI_LINK_KEY_SIZE];
221 	u8 pin_len;
222 };
223 
224 struct oob_data {
225 	struct list_head list;
226 	bdaddr_t bdaddr;
227 	u8 bdaddr_type;
228 	u8 present;
229 	u8 hash192[16];
230 	u8 rand192[16];
231 	u8 hash256[16];
232 	u8 rand256[16];
233 };
234 
235 struct adv_info {
236 	struct list_head list;
237 	bool	enabled;
238 	bool	pending;
239 	bool	periodic;
240 	__u8	mesh;
241 	__u8	instance;
242 	__u8	handle;
243 	__u32	flags;
244 	__u16	timeout;
245 	__u16	remaining_time;
246 	__u16	duration;
247 	__u16	adv_data_len;
248 	__u8	adv_data[HCI_MAX_EXT_AD_LENGTH];
249 	bool	adv_data_changed;
250 	__u16	scan_rsp_len;
251 	__u8	scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
252 	bool	scan_rsp_changed;
253 	__u16	per_adv_data_len;
254 	__u8	per_adv_data[HCI_MAX_PER_AD_LENGTH];
255 	__s8	tx_power;
256 	__u32   min_interval;
257 	__u32   max_interval;
258 	bdaddr_t	random_addr;
259 	bool 		rpa_expired;
260 	struct delayed_work	rpa_expired_cb;
261 };
262 
263 #define HCI_MAX_ADV_INSTANCES		5
264 #define HCI_DEFAULT_ADV_DURATION	2
265 
266 #define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F
267 
268 #define DATA_CMP(_d1, _l1, _d2, _l2) \
269 	(_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2)
270 
271 #define ADV_DATA_CMP(_adv, _data, _len) \
272 	DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len)
273 
274 #define SCAN_RSP_CMP(_adv, _data, _len) \
275 	DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len)
276 
277 struct monitored_device {
278 	struct list_head list;
279 
280 	bdaddr_t bdaddr;
281 	__u8     addr_type;
282 	__u16    handle;
283 	bool     notified;
284 };
285 
286 struct adv_pattern {
287 	struct list_head list;
288 	__u8 ad_type;
289 	__u8 offset;
290 	__u8 length;
291 	__u8 value[HCI_MAX_EXT_AD_LENGTH];
292 };
293 
294 struct adv_rssi_thresholds {
295 	__s8 low_threshold;
296 	__s8 high_threshold;
297 	__u16 low_threshold_timeout;
298 	__u16 high_threshold_timeout;
299 	__u8 sampling_period;
300 };
301 
302 struct adv_monitor {
303 	struct list_head patterns;
304 	struct adv_rssi_thresholds rssi;
305 	__u16		handle;
306 
307 	enum {
308 		ADV_MONITOR_STATE_NOT_REGISTERED,
309 		ADV_MONITOR_STATE_REGISTERED,
310 		ADV_MONITOR_STATE_OFFLOADED
311 	} state;
312 };
313 
314 #define HCI_MIN_ADV_MONITOR_HANDLE		1
315 #define HCI_MAX_ADV_MONITOR_NUM_HANDLES		32
316 #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS	16
317 #define HCI_ADV_MONITOR_EXT_NONE		1
318 #define HCI_ADV_MONITOR_EXT_MSFT		2
319 
320 #define HCI_MAX_SHORT_NAME_LENGTH	10
321 
322 #define HCI_CONN_HANDLE_MAX		0x0eff
323 #define HCI_CONN_HANDLE_UNSET(_handle)	(_handle > HCI_CONN_HANDLE_MAX)
324 
325 /* Min encryption key size to match with SMP */
326 #define HCI_MIN_ENC_KEY_SIZE		7
327 
328 /* Default LE RPA expiry time, 15 minutes */
329 #define HCI_DEFAULT_RPA_TIMEOUT		(15 * 60)
330 
331 /* Default min/max age of connection information (1s/3s) */
332 #define DEFAULT_CONN_INFO_MIN_AGE	1000
333 #define DEFAULT_CONN_INFO_MAX_AGE	3000
334 /* Default authenticated payload timeout 30s */
335 #define DEFAULT_AUTH_PAYLOAD_TIMEOUT   0x0bb8
336 
337 #define HCI_MAX_PAGES	3
338 
339 struct hci_dev {
340 	struct list_head list;
341 	struct mutex	lock;
342 
343 	struct ida	unset_handle_ida;
344 
345 	const char	*name;
346 	unsigned long	flags;
347 	__u16		id;
348 	__u8		bus;
349 	bdaddr_t	bdaddr;
350 	bdaddr_t	setup_addr;
351 	bdaddr_t	public_addr;
352 	bdaddr_t	random_addr;
353 	bdaddr_t	static_addr;
354 	__u8		adv_addr_type;
355 	__u8		dev_name[HCI_MAX_NAME_LENGTH];
356 	__u8		short_name[HCI_MAX_SHORT_NAME_LENGTH];
357 	__u8		eir[HCI_MAX_EIR_LENGTH];
358 	__u16		appearance;
359 	__u8		dev_class[3];
360 	__u8		major_class;
361 	__u8		minor_class;
362 	__u8		max_page;
363 	__u8		features[HCI_MAX_PAGES][8];
364 	__u8		le_features[8];
365 	__u8		le_accept_list_size;
366 	__u8		le_resolv_list_size;
367 	__u8		le_num_of_adv_sets;
368 	__u8		le_states[8];
369 	__u8		mesh_ad_types[16];
370 	__u8		mesh_send_ref;
371 	__u8		commands[64];
372 	__u8		hci_ver;
373 	__u16		hci_rev;
374 	__u8		lmp_ver;
375 	__u16		manufacturer;
376 	__u16		lmp_subver;
377 	__u16		voice_setting;
378 	__u8		num_iac;
379 	__u16		stored_max_keys;
380 	__u16		stored_num_keys;
381 	__u8		io_capability;
382 	__s8		inq_tx_power;
383 	__u8		err_data_reporting;
384 	__u16		page_scan_interval;
385 	__u16		page_scan_window;
386 	__u8		page_scan_type;
387 	__u8		le_adv_channel_map;
388 	__u16		le_adv_min_interval;
389 	__u16		le_adv_max_interval;
390 	__u8		le_scan_type;
391 	__u16		le_scan_interval;
392 	__u16		le_scan_window;
393 	__u16		le_scan_int_suspend;
394 	__u16		le_scan_window_suspend;
395 	__u16		le_scan_int_discovery;
396 	__u16		le_scan_window_discovery;
397 	__u16		le_scan_int_adv_monitor;
398 	__u16		le_scan_window_adv_monitor;
399 	__u16		le_scan_int_connect;
400 	__u16		le_scan_window_connect;
401 	__u16		le_conn_min_interval;
402 	__u16		le_conn_max_interval;
403 	__u16		le_conn_latency;
404 	__u16		le_supv_timeout;
405 	__u16		le_def_tx_len;
406 	__u16		le_def_tx_time;
407 	__u16		le_max_tx_len;
408 	__u16		le_max_tx_time;
409 	__u16		le_max_rx_len;
410 	__u16		le_max_rx_time;
411 	__u8		le_max_key_size;
412 	__u8		le_min_key_size;
413 	__u16		discov_interleaved_timeout;
414 	__u16		conn_info_min_age;
415 	__u16		conn_info_max_age;
416 	__u16		auth_payload_timeout;
417 	__u8		min_enc_key_size;
418 	__u8		max_enc_key_size;
419 	__u8		pairing_opts;
420 	__u8		ssp_debug_mode;
421 	__u8		hw_error_code;
422 	__u32		clock;
423 	__u16		advmon_allowlist_duration;
424 	__u16		advmon_no_filter_duration;
425 	__u8		enable_advmon_interleave_scan;
426 
427 	__u16		devid_source;
428 	__u16		devid_vendor;
429 	__u16		devid_product;
430 	__u16		devid_version;
431 
432 	__u8		def_page_scan_type;
433 	__u16		def_page_scan_int;
434 	__u16		def_page_scan_window;
435 	__u8		def_inq_scan_type;
436 	__u16		def_inq_scan_int;
437 	__u16		def_inq_scan_window;
438 	__u16		def_br_lsto;
439 	__u16		def_page_timeout;
440 	__u16		def_multi_adv_rotation_duration;
441 	__u16		def_le_autoconnect_timeout;
442 	__s8		min_le_tx_power;
443 	__s8		max_le_tx_power;
444 
445 	__u16		pkt_type;
446 	__u16		esco_type;
447 	__u16		link_policy;
448 	__u16		link_mode;
449 
450 	__u32		idle_timeout;
451 	__u16		sniff_min_interval;
452 	__u16		sniff_max_interval;
453 
454 	unsigned int	auto_accept_delay;
455 
456 	unsigned long	quirks;
457 
458 	atomic_t	cmd_cnt;
459 	unsigned int	acl_cnt;
460 	unsigned int	sco_cnt;
461 	unsigned int	le_cnt;
462 	unsigned int	iso_cnt;
463 
464 	unsigned int	acl_mtu;
465 	unsigned int	sco_mtu;
466 	unsigned int	le_mtu;
467 	unsigned int	iso_mtu;
468 	unsigned int	acl_pkts;
469 	unsigned int	sco_pkts;
470 	unsigned int	le_pkts;
471 	unsigned int	iso_pkts;
472 
473 	unsigned long	acl_last_tx;
474 	unsigned long	le_last_tx;
475 
476 	__u8		le_tx_def_phys;
477 	__u8		le_rx_def_phys;
478 
479 	struct workqueue_struct	*workqueue;
480 	struct workqueue_struct	*req_workqueue;
481 
482 	struct work_struct	power_on;
483 	struct delayed_work	power_off;
484 	struct work_struct	error_reset;
485 	struct work_struct	cmd_sync_work;
486 	struct list_head	cmd_sync_work_list;
487 	struct mutex		cmd_sync_work_lock;
488 	struct mutex		unregister_lock;
489 	struct work_struct	cmd_sync_cancel_work;
490 	struct work_struct	reenable_adv_work;
491 
492 	__u16			discov_timeout;
493 	struct delayed_work	discov_off;
494 
495 	struct delayed_work	service_cache;
496 
497 	struct delayed_work	cmd_timer;
498 	struct delayed_work	ncmd_timer;
499 
500 	struct work_struct	rx_work;
501 	struct work_struct	cmd_work;
502 	struct work_struct	tx_work;
503 
504 	struct delayed_work	le_scan_disable;
505 
506 	struct sk_buff_head	rx_q;
507 	struct sk_buff_head	raw_q;
508 	struct sk_buff_head	cmd_q;
509 
510 	struct sk_buff		*sent_cmd;
511 	struct sk_buff		*recv_event;
512 
513 	struct mutex		req_lock;
514 	wait_queue_head_t	req_wait_q;
515 	__u32			req_status;
516 	__u32			req_result;
517 	struct sk_buff		*req_skb;
518 	struct sk_buff		*req_rsp;
519 
520 	void			*smp_data;
521 	void			*smp_bredr_data;
522 
523 	struct discovery_state	discovery;
524 
525 	bool			discovery_paused;
526 	int			advertising_old_state;
527 	bool			advertising_paused;
528 
529 	struct notifier_block	suspend_notifier;
530 	enum suspended_state	suspend_state_next;
531 	enum suspended_state	suspend_state;
532 	bool			scanning_paused;
533 	bool			suspended;
534 	u8			wake_reason;
535 	bdaddr_t		wake_addr;
536 	u8			wake_addr_type;
537 
538 	struct hci_conn_hash	conn_hash;
539 
540 	struct list_head	mesh_pending;
541 	struct list_head	mgmt_pending;
542 	struct list_head	reject_list;
543 	struct list_head	accept_list;
544 	struct list_head	uuids;
545 	struct list_head	link_keys;
546 	struct list_head	long_term_keys;
547 	struct list_head	identity_resolving_keys;
548 	struct list_head	remote_oob_data;
549 	struct list_head	le_accept_list;
550 	struct list_head	le_resolv_list;
551 	struct list_head	le_conn_params;
552 	struct list_head	pend_le_conns;
553 	struct list_head	pend_le_reports;
554 	struct list_head	blocked_keys;
555 	struct list_head	local_codecs;
556 
557 	struct hci_dev_stats	stat;
558 
559 	atomic_t		promisc;
560 
561 	const char		*hw_info;
562 	const char		*fw_info;
563 	struct dentry		*debugfs;
564 
565 	struct hci_devcoredump	dump;
566 
567 	struct device		dev;
568 
569 	struct rfkill		*rfkill;
570 
571 	DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
572 	hci_conn_flags_t	conn_flags;
573 
574 	__s8			adv_tx_power;
575 	__u8			adv_data[HCI_MAX_EXT_AD_LENGTH];
576 	__u8			adv_data_len;
577 	__u8			scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
578 	__u8			scan_rsp_data_len;
579 	__u8			per_adv_data[HCI_MAX_PER_AD_LENGTH];
580 	__u8			per_adv_data_len;
581 
582 	struct list_head	adv_instances;
583 	unsigned int		adv_instance_cnt;
584 	__u8			cur_adv_instance;
585 	__u16			adv_instance_timeout;
586 	struct delayed_work	adv_instance_expire;
587 
588 	struct idr		adv_monitors_idr;
589 	unsigned int		adv_monitors_cnt;
590 
591 	__u8			irk[16];
592 	__u32			rpa_timeout;
593 	struct delayed_work	rpa_expired;
594 	bdaddr_t		rpa;
595 
596 	struct delayed_work	mesh_send_done;
597 
598 	enum {
599 		INTERLEAVE_SCAN_NONE,
600 		INTERLEAVE_SCAN_NO_FILTER,
601 		INTERLEAVE_SCAN_ALLOWLIST
602 	} interleave_scan_state;
603 
604 	struct delayed_work	interleave_scan;
605 
606 	struct list_head	monitored_devices;
607 	bool			advmon_pend_notify;
608 
609 #if IS_ENABLED(CONFIG_BT_LEDS)
610 	struct led_trigger	*power_led;
611 #endif
612 
613 #if IS_ENABLED(CONFIG_BT_MSFTEXT)
614 	__u16			msft_opcode;
615 	void			*msft_data;
616 	bool			msft_curve_validity;
617 #endif
618 
619 #if IS_ENABLED(CONFIG_BT_AOSPEXT)
620 	bool			aosp_capable;
621 	bool			aosp_quality_report;
622 #endif
623 
624 	int (*open)(struct hci_dev *hdev);
625 	int (*close)(struct hci_dev *hdev);
626 	int (*flush)(struct hci_dev *hdev);
627 	int (*setup)(struct hci_dev *hdev);
628 	int (*shutdown)(struct hci_dev *hdev);
629 	int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
630 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
631 	void (*hw_error)(struct hci_dev *hdev, u8 code);
632 	int (*post_init)(struct hci_dev *hdev);
633 	int (*set_diag)(struct hci_dev *hdev, bool enable);
634 	int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
635 	void (*cmd_timeout)(struct hci_dev *hdev);
636 	void (*reset)(struct hci_dev *hdev);
637 	bool (*wakeup)(struct hci_dev *hdev);
638 	int (*set_quality_report)(struct hci_dev *hdev, bool enable);
639 	int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path);
640 	int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
641 				     struct bt_codec *codec, __u8 *vnd_len,
642 				     __u8 **vnd_data);
643 	u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb);
644 };
645 
646 #define HCI_PHY_HANDLE(handle)	(handle & 0xff)
647 
648 enum conn_reasons {
649 	CONN_REASON_PAIR_DEVICE,
650 	CONN_REASON_L2CAP_CHAN,
651 	CONN_REASON_SCO_CONNECT,
652 	CONN_REASON_ISO_CONNECT,
653 };
654 
655 struct hci_conn {
656 	struct list_head list;
657 
658 	atomic_t	refcnt;
659 
660 	bdaddr_t	dst;
661 	__u8		dst_type;
662 	bdaddr_t	src;
663 	__u8		src_type;
664 	bdaddr_t	init_addr;
665 	__u8		init_addr_type;
666 	bdaddr_t	resp_addr;
667 	__u8		resp_addr_type;
668 	__u8		adv_instance;
669 	__u16		handle;
670 	__u16		sync_handle;
671 	__u16		state;
672 	__u16		mtu;
673 	__u8		mode;
674 	__u8		type;
675 	__u8		role;
676 	bool		out;
677 	__u8		attempt;
678 	__u8		dev_class[3];
679 	__u8		features[HCI_MAX_PAGES][8];
680 	__u16		pkt_type;
681 	__u16		link_policy;
682 	__u8		key_type;
683 	__u8		auth_type;
684 	__u8		sec_level;
685 	__u8		pending_sec_level;
686 	__u8		pin_length;
687 	__u8		enc_key_size;
688 	__u8		io_capability;
689 	__u32		passkey_notify;
690 	__u8		passkey_entered;
691 	__u16		disc_timeout;
692 	__u16		conn_timeout;
693 	__u16		setting;
694 	__u16		auth_payload_timeout;
695 	__u16		le_conn_min_interval;
696 	__u16		le_conn_max_interval;
697 	__u16		le_conn_interval;
698 	__u16		le_conn_latency;
699 	__u16		le_supv_timeout;
700 	__u8		le_adv_data[HCI_MAX_EXT_AD_LENGTH];
701 	__u8		le_adv_data_len;
702 	__u8		le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
703 	__u16		le_per_adv_data_len;
704 	__u16		le_per_adv_data_offset;
705 	__u8		le_adv_phy;
706 	__u8		le_adv_sec_phy;
707 	__u8		le_tx_phy;
708 	__u8		le_rx_phy;
709 	__s8		rssi;
710 	__s8		tx_power;
711 	__s8		max_tx_power;
712 	struct bt_iso_qos iso_qos;
713 	unsigned long	flags;
714 
715 	enum conn_reasons conn_reason;
716 	__u8		abort_reason;
717 
718 	__u32		clock;
719 	__u16		clock_accuracy;
720 
721 	unsigned long	conn_info_timestamp;
722 
723 	__u8		remote_cap;
724 	__u8		remote_auth;
725 	__u8		remote_id;
726 
727 	unsigned int	sent;
728 
729 	struct sk_buff_head data_q;
730 	struct list_head chan_list;
731 
732 	struct delayed_work disc_work;
733 	struct delayed_work auto_accept_work;
734 	struct delayed_work idle_work;
735 	struct delayed_work le_conn_timeout;
736 
737 	struct device	dev;
738 	struct dentry	*debugfs;
739 
740 	struct hci_dev	*hdev;
741 	void		*l2cap_data;
742 	void		*sco_data;
743 	void		*iso_data;
744 
745 	struct list_head link_list;
746 	struct hci_conn	*parent;
747 	struct hci_link *link;
748 
749 	struct bt_codec codec;
750 
751 	void (*connect_cfm_cb)	(struct hci_conn *conn, u8 status);
752 	void (*security_cfm_cb)	(struct hci_conn *conn, u8 status);
753 	void (*disconn_cfm_cb)	(struct hci_conn *conn, u8 reason);
754 
755 	void (*cleanup)(struct hci_conn *conn);
756 };
757 
758 struct hci_link {
759 	struct list_head list;
760 	struct hci_conn *conn;
761 };
762 
763 struct hci_chan {
764 	struct list_head list;
765 	__u16 handle;
766 	struct hci_conn *conn;
767 	struct sk_buff_head data_q;
768 	unsigned int	sent;
769 	__u8		state;
770 };
771 
772 struct hci_conn_params {
773 	struct list_head list;
774 	struct list_head action;
775 
776 	bdaddr_t addr;
777 	u8 addr_type;
778 
779 	u16 conn_min_interval;
780 	u16 conn_max_interval;
781 	u16 conn_latency;
782 	u16 supervision_timeout;
783 
784 	enum {
785 		HCI_AUTO_CONN_DISABLED,
786 		HCI_AUTO_CONN_REPORT,
787 		HCI_AUTO_CONN_DIRECT,
788 		HCI_AUTO_CONN_ALWAYS,
789 		HCI_AUTO_CONN_LINK_LOSS,
790 		HCI_AUTO_CONN_EXPLICIT,
791 	} auto_connect;
792 
793 	struct hci_conn *conn;
794 	bool explicit_connect;
795 	/* Accessed without hdev->lock: */
796 	hci_conn_flags_t flags;
797 	u8  privacy_mode;
798 };
799 
800 extern struct list_head hci_dev_list;
801 extern struct list_head hci_cb_list;
802 extern rwlock_t hci_dev_list_lock;
803 extern struct mutex hci_cb_list_lock;
804 
805 #define hci_dev_set_flag(hdev, nr)             set_bit((nr), (hdev)->dev_flags)
806 #define hci_dev_clear_flag(hdev, nr)           clear_bit((nr), (hdev)->dev_flags)
807 #define hci_dev_change_flag(hdev, nr)          change_bit((nr), (hdev)->dev_flags)
808 #define hci_dev_test_flag(hdev, nr)            test_bit((nr), (hdev)->dev_flags)
809 #define hci_dev_test_and_set_flag(hdev, nr)    test_and_set_bit((nr), (hdev)->dev_flags)
810 #define hci_dev_test_and_clear_flag(hdev, nr)  test_and_clear_bit((nr), (hdev)->dev_flags)
811 #define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
812 
813 #define hci_dev_clear_volatile_flags(hdev)			\
814 	do {							\
815 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);		\
816 		hci_dev_clear_flag(hdev, HCI_LE_ADV);		\
817 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
818 		hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);	\
819 		hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);	\
820 	} while (0)
821 
822 #define hci_dev_le_state_simultaneous(hdev) \
823 	(!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \
824 	 (hdev->le_states[4] & 0x08) &&	/* Central */ \
825 	 (hdev->le_states[4] & 0x40) &&	/* Peripheral */ \
826 	 (hdev->le_states[3] & 0x10))	/* Simultaneous */
827 
828 /* ----- HCI interface to upper protocols ----- */
829 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
830 int l2cap_disconn_ind(struct hci_conn *hcon);
831 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
832 
833 #if IS_ENABLED(CONFIG_BT_BREDR)
834 int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
835 void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
836 #else
sco_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr,__u8 * flags)837 static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
838 				  __u8 *flags)
839 {
840 	return 0;
841 }
842 
sco_recv_scodata(struct hci_conn * hcon,struct sk_buff * skb)843 static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
844 {
845 }
846 #endif
847 
848 #if IS_ENABLED(CONFIG_BT_LE)
849 int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
850 void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
851 #else
iso_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr,__u8 * flags)852 static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
853 				  __u8 *flags)
854 {
855 	return 0;
856 }
iso_recv(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)857 static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
858 			    u16 flags)
859 {
860 }
861 #endif
862 
863 /* ----- Inquiry cache ----- */
864 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
865 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   /* 60 seconds */
866 
discovery_init(struct hci_dev * hdev)867 static inline void discovery_init(struct hci_dev *hdev)
868 {
869 	hdev->discovery.state = DISCOVERY_STOPPED;
870 	INIT_LIST_HEAD(&hdev->discovery.all);
871 	INIT_LIST_HEAD(&hdev->discovery.unknown);
872 	INIT_LIST_HEAD(&hdev->discovery.resolve);
873 	hdev->discovery.report_invalid_rssi = true;
874 	hdev->discovery.rssi = HCI_RSSI_INVALID;
875 }
876 
hci_discovery_filter_clear(struct hci_dev * hdev)877 static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
878 {
879 	hdev->discovery.result_filtering = false;
880 	hdev->discovery.report_invalid_rssi = true;
881 	hdev->discovery.rssi = HCI_RSSI_INVALID;
882 	hdev->discovery.uuid_count = 0;
883 	kfree(hdev->discovery.uuids);
884 	hdev->discovery.uuids = NULL;
885 }
886 
887 bool hci_discovery_active(struct hci_dev *hdev);
888 
889 void hci_discovery_set_state(struct hci_dev *hdev, int state);
890 
inquiry_cache_empty(struct hci_dev * hdev)891 static inline int inquiry_cache_empty(struct hci_dev *hdev)
892 {
893 	return list_empty(&hdev->discovery.all);
894 }
895 
inquiry_cache_age(struct hci_dev * hdev)896 static inline long inquiry_cache_age(struct hci_dev *hdev)
897 {
898 	struct discovery_state *c = &hdev->discovery;
899 	return jiffies - c->timestamp;
900 }
901 
inquiry_entry_age(struct inquiry_entry * e)902 static inline long inquiry_entry_age(struct inquiry_entry *e)
903 {
904 	return jiffies - e->timestamp;
905 }
906 
907 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
908 					       bdaddr_t *bdaddr);
909 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
910 						       bdaddr_t *bdaddr);
911 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
912 						       bdaddr_t *bdaddr,
913 						       int state);
914 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
915 				      struct inquiry_entry *ie);
916 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
917 			     bool name_known);
918 void hci_inquiry_cache_flush(struct hci_dev *hdev);
919 
920 /* ----- HCI Connections ----- */
921 enum {
922 	HCI_CONN_AUTH_PEND,
923 	HCI_CONN_ENCRYPT_PEND,
924 	HCI_CONN_RSWITCH_PEND,
925 	HCI_CONN_MODE_CHANGE_PEND,
926 	HCI_CONN_SCO_SETUP_PEND,
927 	HCI_CONN_MGMT_CONNECTED,
928 	HCI_CONN_SSP_ENABLED,
929 	HCI_CONN_SC_ENABLED,
930 	HCI_CONN_AES_CCM,
931 	HCI_CONN_POWER_SAVE,
932 	HCI_CONN_FLUSH_KEY,
933 	HCI_CONN_ENCRYPT,
934 	HCI_CONN_AUTH,
935 	HCI_CONN_SECURE,
936 	HCI_CONN_FIPS,
937 	HCI_CONN_STK_ENCRYPT,
938 	HCI_CONN_AUTH_INITIATOR,
939 	HCI_CONN_DROP,
940 	HCI_CONN_CANCEL,
941 	HCI_CONN_PARAM_REMOVAL_PEND,
942 	HCI_CONN_NEW_LINK_KEY,
943 	HCI_CONN_SCANNING,
944 	HCI_CONN_AUTH_FAILURE,
945 	HCI_CONN_PER_ADV,
946 	HCI_CONN_BIG_CREATED,
947 	HCI_CONN_CREATE_CIS,
948 	HCI_CONN_BIG_SYNC,
949 	HCI_CONN_BIG_SYNC_FAILED,
950 	HCI_CONN_PA_SYNC,
951 	HCI_CONN_PA_SYNC_FAILED,
952 };
953 
hci_conn_ssp_enabled(struct hci_conn * conn)954 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
955 {
956 	struct hci_dev *hdev = conn->hdev;
957 	return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
958 	       test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
959 }
960 
hci_conn_sc_enabled(struct hci_conn * conn)961 static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
962 {
963 	struct hci_dev *hdev = conn->hdev;
964 	return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
965 	       test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
966 }
967 
hci_conn_hash_add(struct hci_dev * hdev,struct hci_conn * c)968 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
969 {
970 	struct hci_conn_hash *h = &hdev->conn_hash;
971 	list_add_tail_rcu(&c->list, &h->list);
972 	switch (c->type) {
973 	case ACL_LINK:
974 		h->acl_num++;
975 		break;
976 	case LE_LINK:
977 		h->le_num++;
978 		if (c->role == HCI_ROLE_SLAVE)
979 			h->le_num_peripheral++;
980 		break;
981 	case SCO_LINK:
982 	case ESCO_LINK:
983 		h->sco_num++;
984 		break;
985 	case ISO_LINK:
986 		h->iso_num++;
987 		break;
988 	}
989 }
990 
hci_conn_hash_del(struct hci_dev * hdev,struct hci_conn * c)991 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
992 {
993 	struct hci_conn_hash *h = &hdev->conn_hash;
994 
995 	list_del_rcu(&c->list);
996 	synchronize_rcu();
997 
998 	switch (c->type) {
999 	case ACL_LINK:
1000 		h->acl_num--;
1001 		break;
1002 	case LE_LINK:
1003 		h->le_num--;
1004 		if (c->role == HCI_ROLE_SLAVE)
1005 			h->le_num_peripheral--;
1006 		break;
1007 	case SCO_LINK:
1008 	case ESCO_LINK:
1009 		h->sco_num--;
1010 		break;
1011 	case ISO_LINK:
1012 		h->iso_num--;
1013 		break;
1014 	}
1015 }
1016 
hci_conn_num(struct hci_dev * hdev,__u8 type)1017 static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
1018 {
1019 	struct hci_conn_hash *h = &hdev->conn_hash;
1020 	switch (type) {
1021 	case ACL_LINK:
1022 		return h->acl_num;
1023 	case LE_LINK:
1024 		return h->le_num;
1025 	case SCO_LINK:
1026 	case ESCO_LINK:
1027 		return h->sco_num;
1028 	case ISO_LINK:
1029 		return h->iso_num;
1030 	default:
1031 		return 0;
1032 	}
1033 }
1034 
hci_conn_count(struct hci_dev * hdev)1035 static inline unsigned int hci_conn_count(struct hci_dev *hdev)
1036 {
1037 	struct hci_conn_hash *c = &hdev->conn_hash;
1038 
1039 	return c->acl_num + c->sco_num + c->le_num + c->iso_num;
1040 }
1041 
hci_conn_valid(struct hci_dev * hdev,struct hci_conn * conn)1042 static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
1043 {
1044 	struct hci_conn_hash *h = &hdev->conn_hash;
1045 	struct hci_conn  *c;
1046 
1047 	rcu_read_lock();
1048 
1049 	list_for_each_entry_rcu(c, &h->list, list) {
1050 		if (c == conn) {
1051 			rcu_read_unlock();
1052 			return true;
1053 		}
1054 	}
1055 	rcu_read_unlock();
1056 
1057 	return false;
1058 }
1059 
hci_conn_lookup_type(struct hci_dev * hdev,__u16 handle)1060 static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
1061 {
1062 	struct hci_conn_hash *h = &hdev->conn_hash;
1063 	struct hci_conn *c;
1064 	__u8 type = INVALID_LINK;
1065 
1066 	rcu_read_lock();
1067 
1068 	list_for_each_entry_rcu(c, &h->list, list) {
1069 		if (c->handle == handle) {
1070 			type = c->type;
1071 			break;
1072 		}
1073 	}
1074 
1075 	rcu_read_unlock();
1076 
1077 	return type;
1078 }
1079 
hci_conn_hash_lookup_bis(struct hci_dev * hdev,bdaddr_t * ba,__u8 bis)1080 static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
1081 							bdaddr_t *ba, __u8 bis)
1082 {
1083 	struct hci_conn_hash *h = &hdev->conn_hash;
1084 	struct hci_conn  *c;
1085 
1086 	rcu_read_lock();
1087 
1088 	list_for_each_entry_rcu(c, &h->list, list) {
1089 		if (bacmp(&c->dst, ba) || c->type != ISO_LINK)
1090 			continue;
1091 
1092 		if (c->iso_qos.bcast.bis == bis) {
1093 			rcu_read_unlock();
1094 			return c;
1095 		}
1096 	}
1097 	rcu_read_unlock();
1098 
1099 	return NULL;
1100 }
1101 
1102 static inline struct hci_conn *
hci_conn_hash_lookup_per_adv_bis(struct hci_dev * hdev,bdaddr_t * ba,__u8 big,__u8 bis)1103 hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
1104 				 bdaddr_t *ba,
1105 				 __u8 big, __u8 bis)
1106 {
1107 	struct hci_conn_hash *h = &hdev->conn_hash;
1108 	struct hci_conn  *c;
1109 
1110 	rcu_read_lock();
1111 
1112 	list_for_each_entry_rcu(c, &h->list, list) {
1113 		if (bacmp(&c->dst, ba) || c->type != ISO_LINK ||
1114 			!test_bit(HCI_CONN_PER_ADV, &c->flags))
1115 			continue;
1116 
1117 		if (c->iso_qos.bcast.big == big &&
1118 		    c->iso_qos.bcast.bis == bis) {
1119 			rcu_read_unlock();
1120 			return c;
1121 		}
1122 	}
1123 	rcu_read_unlock();
1124 
1125 	return NULL;
1126 }
1127 
hci_conn_hash_lookup_handle(struct hci_dev * hdev,__u16 handle)1128 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
1129 								__u16 handle)
1130 {
1131 	struct hci_conn_hash *h = &hdev->conn_hash;
1132 	struct hci_conn  *c;
1133 
1134 	rcu_read_lock();
1135 
1136 	list_for_each_entry_rcu(c, &h->list, list) {
1137 		if (c->handle == handle) {
1138 			rcu_read_unlock();
1139 			return c;
1140 		}
1141 	}
1142 	rcu_read_unlock();
1143 
1144 	return NULL;
1145 }
1146 
hci_conn_hash_lookup_ba(struct hci_dev * hdev,__u8 type,bdaddr_t * ba)1147 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
1148 							__u8 type, bdaddr_t *ba)
1149 {
1150 	struct hci_conn_hash *h = &hdev->conn_hash;
1151 	struct hci_conn  *c;
1152 
1153 	rcu_read_lock();
1154 
1155 	list_for_each_entry_rcu(c, &h->list, list) {
1156 		if (c->type == type && !bacmp(&c->dst, ba)) {
1157 			rcu_read_unlock();
1158 			return c;
1159 		}
1160 	}
1161 
1162 	rcu_read_unlock();
1163 
1164 	return NULL;
1165 }
1166 
hci_conn_hash_lookup_le(struct hci_dev * hdev,bdaddr_t * ba,__u8 ba_type)1167 static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
1168 						       bdaddr_t *ba,
1169 						       __u8 ba_type)
1170 {
1171 	struct hci_conn_hash *h = &hdev->conn_hash;
1172 	struct hci_conn  *c;
1173 
1174 	rcu_read_lock();
1175 
1176 	list_for_each_entry_rcu(c, &h->list, list) {
1177 		if (c->type != LE_LINK)
1178 		       continue;
1179 
1180 		if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
1181 			rcu_read_unlock();
1182 			return c;
1183 		}
1184 	}
1185 
1186 	rcu_read_unlock();
1187 
1188 	return NULL;
1189 }
1190 
hci_conn_hash_lookup_cis(struct hci_dev * hdev,bdaddr_t * ba,__u8 ba_type,__u8 cig,__u8 id)1191 static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
1192 							bdaddr_t *ba,
1193 							__u8 ba_type,
1194 							__u8 cig,
1195 							__u8 id)
1196 {
1197 	struct hci_conn_hash *h = &hdev->conn_hash;
1198 	struct hci_conn  *c;
1199 
1200 	rcu_read_lock();
1201 
1202 	list_for_each_entry_rcu(c, &h->list, list) {
1203 		if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
1204 			continue;
1205 
1206 		/* Match CIG ID if set */
1207 		if (cig != c->iso_qos.ucast.cig)
1208 			continue;
1209 
1210 		/* Match CIS ID if set */
1211 		if (id != c->iso_qos.ucast.cis)
1212 			continue;
1213 
1214 		/* Match destination address if set */
1215 		if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) {
1216 			rcu_read_unlock();
1217 			return c;
1218 		}
1219 	}
1220 
1221 	rcu_read_unlock();
1222 
1223 	return NULL;
1224 }
1225 
hci_conn_hash_lookup_cig(struct hci_dev * hdev,__u8 handle)1226 static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev,
1227 							__u8 handle)
1228 {
1229 	struct hci_conn_hash *h = &hdev->conn_hash;
1230 	struct hci_conn  *c;
1231 
1232 	rcu_read_lock();
1233 
1234 	list_for_each_entry_rcu(c, &h->list, list) {
1235 		if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
1236 			continue;
1237 
1238 		if (handle == c->iso_qos.ucast.cig) {
1239 			rcu_read_unlock();
1240 			return c;
1241 		}
1242 	}
1243 
1244 	rcu_read_unlock();
1245 
1246 	return NULL;
1247 }
1248 
hci_conn_hash_lookup_big(struct hci_dev * hdev,__u8 handle)1249 static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
1250 							__u8 handle)
1251 {
1252 	struct hci_conn_hash *h = &hdev->conn_hash;
1253 	struct hci_conn  *c;
1254 
1255 	rcu_read_lock();
1256 
1257 	list_for_each_entry_rcu(c, &h->list, list) {
1258 		if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK)
1259 			continue;
1260 
1261 		if (handle == c->iso_qos.bcast.big) {
1262 			rcu_read_unlock();
1263 			return c;
1264 		}
1265 	}
1266 
1267 	rcu_read_unlock();
1268 
1269 	return NULL;
1270 }
1271 
1272 static inline struct hci_conn *
hci_conn_hash_lookup_big_state(struct hci_dev * hdev,__u8 handle,__u16 state)1273 hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle,  __u16 state)
1274 {
1275 	struct hci_conn_hash *h = &hdev->conn_hash;
1276 	struct hci_conn  *c;
1277 
1278 	rcu_read_lock();
1279 
1280 	list_for_each_entry_rcu(c, &h->list, list) {
1281 		if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK ||
1282 			c->state != state)
1283 			continue;
1284 
1285 		if (handle == c->iso_qos.bcast.big) {
1286 			rcu_read_unlock();
1287 			return c;
1288 		}
1289 	}
1290 
1291 	rcu_read_unlock();
1292 
1293 	return NULL;
1294 }
1295 
1296 static inline struct hci_conn *
hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev * hdev,__u8 big)1297 hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
1298 {
1299 	struct hci_conn_hash *h = &hdev->conn_hash;
1300 	struct hci_conn  *c;
1301 
1302 	rcu_read_lock();
1303 
1304 	list_for_each_entry_rcu(c, &h->list, list) {
1305 		if (c->type != ISO_LINK ||
1306 			!test_bit(HCI_CONN_PA_SYNC, &c->flags))
1307 			continue;
1308 
1309 		if (c->iso_qos.bcast.big == big) {
1310 			rcu_read_unlock();
1311 			return c;
1312 		}
1313 	}
1314 	rcu_read_unlock();
1315 
1316 	return NULL;
1317 }
1318 
1319 static inline struct hci_conn *
hci_conn_hash_lookup_pa_sync_handle(struct hci_dev * hdev,__u16 sync_handle)1320 hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
1321 {
1322 	struct hci_conn_hash *h = &hdev->conn_hash;
1323 	struct hci_conn  *c;
1324 
1325 	rcu_read_lock();
1326 
1327 	list_for_each_entry_rcu(c, &h->list, list) {
1328 		if (c->type != ISO_LINK)
1329 			continue;
1330 
1331 		if (c->sync_handle == sync_handle) {
1332 			rcu_read_unlock();
1333 			return c;
1334 		}
1335 	}
1336 	rcu_read_unlock();
1337 
1338 	return NULL;
1339 }
1340 
hci_conn_hash_lookup_state(struct hci_dev * hdev,__u8 type,__u16 state)1341 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
1342 							__u8 type, __u16 state)
1343 {
1344 	struct hci_conn_hash *h = &hdev->conn_hash;
1345 	struct hci_conn  *c;
1346 
1347 	rcu_read_lock();
1348 
1349 	list_for_each_entry_rcu(c, &h->list, list) {
1350 		if (c->type == type && c->state == state) {
1351 			rcu_read_unlock();
1352 			return c;
1353 		}
1354 	}
1355 
1356 	rcu_read_unlock();
1357 
1358 	return NULL;
1359 }
1360 
1361 typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data);
hci_conn_hash_list_state(struct hci_dev * hdev,hci_conn_func_t func,__u8 type,__u16 state,void * data)1362 static inline void hci_conn_hash_list_state(struct hci_dev *hdev,
1363 					    hci_conn_func_t func, __u8 type,
1364 					    __u16 state, void *data)
1365 {
1366 	struct hci_conn_hash *h = &hdev->conn_hash;
1367 	struct hci_conn  *c;
1368 
1369 	if (!func)
1370 		return;
1371 
1372 	rcu_read_lock();
1373 
1374 	list_for_each_entry_rcu(c, &h->list, list) {
1375 		if (c->type == type && c->state == state)
1376 			func(c, data);
1377 	}
1378 
1379 	rcu_read_unlock();
1380 }
1381 
hci_conn_hash_list_flag(struct hci_dev * hdev,hci_conn_func_t func,__u8 type,__u8 flag,void * data)1382 static inline void hci_conn_hash_list_flag(struct hci_dev *hdev,
1383 					    hci_conn_func_t func, __u8 type,
1384 					    __u8 flag, void *data)
1385 {
1386 	struct hci_conn_hash *h = &hdev->conn_hash;
1387 	struct hci_conn  *c;
1388 
1389 	if (!func)
1390 		return;
1391 
1392 	rcu_read_lock();
1393 
1394 	list_for_each_entry_rcu(c, &h->list, list) {
1395 		if (c->type == type && test_bit(flag, &c->flags))
1396 			func(c, data);
1397 	}
1398 
1399 	rcu_read_unlock();
1400 }
1401 
hci_lookup_le_connect(struct hci_dev * hdev)1402 static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
1403 {
1404 	struct hci_conn_hash *h = &hdev->conn_hash;
1405 	struct hci_conn  *c;
1406 
1407 	rcu_read_lock();
1408 
1409 	list_for_each_entry_rcu(c, &h->list, list) {
1410 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
1411 		    !test_bit(HCI_CONN_SCANNING, &c->flags)) {
1412 			rcu_read_unlock();
1413 			return c;
1414 		}
1415 	}
1416 
1417 	rcu_read_unlock();
1418 
1419 	return NULL;
1420 }
1421 
1422 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)1423 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1424 {
1425 	struct hci_conn_hash *h = &hdev->conn_hash;
1426 	struct hci_conn  *c;
1427 
1428 	rcu_read_lock();
1429 
1430 	list_for_each_entry_rcu(c, &h->list, list) {
1431 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
1432 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
1433 			rcu_read_unlock();
1434 			return true;
1435 		}
1436 	}
1437 
1438 	rcu_read_unlock();
1439 
1440 	return false;
1441 }
1442 
1443 int hci_disconnect(struct hci_conn *conn, __u8 reason);
1444 bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
1445 void hci_sco_setup(struct hci_conn *conn, __u8 status);
1446 bool hci_iso_setup_path(struct hci_conn *conn);
1447 int hci_le_create_cis_pending(struct hci_dev *hdev);
1448 int hci_conn_check_create_cis(struct hci_conn *conn);
1449 
1450 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1451 			      u8 role, u16 handle);
1452 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1453 				    bdaddr_t *dst, u8 role);
1454 void hci_conn_del(struct hci_conn *conn);
1455 void hci_conn_hash_flush(struct hci_dev *hdev);
1456 
1457 struct hci_chan *hci_chan_create(struct hci_conn *conn);
1458 void hci_chan_del(struct hci_chan *chan);
1459 void hci_chan_list_flush(struct hci_conn *conn);
1460 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
1461 
1462 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1463 				     u8 dst_type, u8 sec_level,
1464 				     u16 conn_timeout,
1465 				     enum conn_reasons conn_reason);
1466 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1467 				u8 dst_type, bool dst_resolved, u8 sec_level,
1468 				u16 conn_timeout, u8 role, u8 phy, u8 sec_phy);
1469 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
1470 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1471 				 u8 sec_level, u8 auth_type,
1472 				 enum conn_reasons conn_reason, u16 timeout);
1473 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1474 				 __u16 setting, struct bt_codec *codec,
1475 				 u16 timeout);
1476 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1477 			      __u8 dst_type, struct bt_iso_qos *qos);
1478 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
1479 			      struct bt_iso_qos *qos,
1480 			      __u8 base_len, __u8 *base);
1481 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
1482 				 __u8 dst_type, struct bt_iso_qos *qos);
1483 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
1484 				 __u8 dst_type, struct bt_iso_qos *qos,
1485 				 __u8 data_len, __u8 *data);
1486 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
1487 		       __u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
1488 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
1489 			   struct bt_iso_qos *qos,
1490 			   __u16 sync_handle, __u8 num_bis, __u8 bis[]);
1491 int hci_conn_check_link_mode(struct hci_conn *conn);
1492 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
1493 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1494 		      bool initiator);
1495 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
1496 
1497 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
1498 
1499 void hci_conn_failed(struct hci_conn *conn, u8 status);
1500 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle);
1501 
1502 /*
1503  * hci_conn_get() and hci_conn_put() are used to control the life-time of an
1504  * "hci_conn" object. They do not guarantee that the hci_conn object is running,
1505  * working or anything else. They just guarantee that the object is available
1506  * and can be dereferenced. So you can use its locks, local variables and any
1507  * other constant data.
1508  * Before accessing runtime data, you _must_ lock the object and then check that
1509  * it is still running. As soon as you release the locks, the connection might
1510  * get dropped, though.
1511  *
1512  * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
1513  * how long the underlying connection is held. So every channel that runs on the
1514  * hci_conn object calls this to prevent the connection from disappearing. As
1515  * long as you hold a device, you must also guarantee that you have a valid
1516  * reference to the device via hci_conn_get() (or the initial reference from
1517  * hci_conn_add()).
1518  * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
1519  * break because nobody cares for that. But this means, we cannot use
1520  * _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
1521  */
1522 
hci_conn_get(struct hci_conn * conn)1523 static inline struct hci_conn *hci_conn_get(struct hci_conn *conn)
1524 {
1525 	get_device(&conn->dev);
1526 	return conn;
1527 }
1528 
hci_conn_put(struct hci_conn * conn)1529 static inline void hci_conn_put(struct hci_conn *conn)
1530 {
1531 	put_device(&conn->dev);
1532 }
1533 
hci_conn_hold(struct hci_conn * conn)1534 static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn)
1535 {
1536 	BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
1537 
1538 	atomic_inc(&conn->refcnt);
1539 	cancel_delayed_work(&conn->disc_work);
1540 
1541 	return conn;
1542 }
1543 
hci_conn_drop(struct hci_conn * conn)1544 static inline void hci_conn_drop(struct hci_conn *conn)
1545 {
1546 	BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
1547 
1548 	if (atomic_dec_and_test(&conn->refcnt)) {
1549 		unsigned long timeo;
1550 
1551 		switch (conn->type) {
1552 		case ACL_LINK:
1553 		case LE_LINK:
1554 			cancel_delayed_work(&conn->idle_work);
1555 			if (conn->state == BT_CONNECTED) {
1556 				timeo = conn->disc_timeout;
1557 				if (!conn->out)
1558 					timeo *= 2;
1559 			} else {
1560 				timeo = 0;
1561 			}
1562 			break;
1563 
1564 		default:
1565 			timeo = 0;
1566 			break;
1567 		}
1568 
1569 		cancel_delayed_work(&conn->disc_work);
1570 		queue_delayed_work(conn->hdev->workqueue,
1571 				   &conn->disc_work, timeo);
1572 	}
1573 }
1574 
1575 /* ----- HCI Devices ----- */
hci_dev_put(struct hci_dev * d)1576 static inline void hci_dev_put(struct hci_dev *d)
1577 {
1578 	BT_DBG("%s orig refcnt %d", d->name,
1579 	       kref_read(&d->dev.kobj.kref));
1580 
1581 	put_device(&d->dev);
1582 }
1583 
hci_dev_hold(struct hci_dev * d)1584 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
1585 {
1586 	BT_DBG("%s orig refcnt %d", d->name,
1587 	       kref_read(&d->dev.kobj.kref));
1588 
1589 	get_device(&d->dev);
1590 	return d;
1591 }
1592 
1593 #define hci_dev_lock(d)		mutex_lock(&d->lock)
1594 #define hci_dev_unlock(d)	mutex_unlock(&d->lock)
1595 
1596 #define to_hci_dev(d) container_of(d, struct hci_dev, dev)
1597 #define to_hci_conn(c) container_of(c, struct hci_conn, dev)
1598 
hci_get_drvdata(struct hci_dev * hdev)1599 static inline void *hci_get_drvdata(struct hci_dev *hdev)
1600 {
1601 	return dev_get_drvdata(&hdev->dev);
1602 }
1603 
hci_set_drvdata(struct hci_dev * hdev,void * data)1604 static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
1605 {
1606 	dev_set_drvdata(&hdev->dev, data);
1607 }
1608 
hci_get_priv(struct hci_dev * hdev)1609 static inline void *hci_get_priv(struct hci_dev *hdev)
1610 {
1611 	return (char *)hdev + sizeof(*hdev);
1612 }
1613 
1614 struct hci_dev *hci_dev_get(int index);
1615 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type);
1616 
1617 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv);
1618 
hci_alloc_dev(void)1619 static inline struct hci_dev *hci_alloc_dev(void)
1620 {
1621 	return hci_alloc_dev_priv(0);
1622 }
1623 
1624 void hci_free_dev(struct hci_dev *hdev);
1625 int hci_register_dev(struct hci_dev *hdev);
1626 void hci_unregister_dev(struct hci_dev *hdev);
1627 void hci_release_dev(struct hci_dev *hdev);
1628 int hci_register_suspend_notifier(struct hci_dev *hdev);
1629 int hci_unregister_suspend_notifier(struct hci_dev *hdev);
1630 int hci_suspend_dev(struct hci_dev *hdev);
1631 int hci_resume_dev(struct hci_dev *hdev);
1632 int hci_reset_dev(struct hci_dev *hdev);
1633 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
1634 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
1635 __printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
1636 __printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
1637 
hci_set_msft_opcode(struct hci_dev * hdev,__u16 opcode)1638 static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode)
1639 {
1640 #if IS_ENABLED(CONFIG_BT_MSFTEXT)
1641 	hdev->msft_opcode = opcode;
1642 #endif
1643 }
1644 
hci_set_aosp_capable(struct hci_dev * hdev)1645 static inline void hci_set_aosp_capable(struct hci_dev *hdev)
1646 {
1647 #if IS_ENABLED(CONFIG_BT_AOSPEXT)
1648 	hdev->aosp_capable = true;
1649 #endif
1650 }
1651 
hci_devcd_setup(struct hci_dev * hdev)1652 static inline void hci_devcd_setup(struct hci_dev *hdev)
1653 {
1654 #ifdef CONFIG_DEV_COREDUMP
1655 	INIT_WORK(&hdev->dump.dump_rx, hci_devcd_rx);
1656 	INIT_DELAYED_WORK(&hdev->dump.dump_timeout, hci_devcd_timeout);
1657 	skb_queue_head_init(&hdev->dump.dump_q);
1658 #endif
1659 }
1660 
1661 int hci_dev_open(__u16 dev);
1662 int hci_dev_close(__u16 dev);
1663 int hci_dev_do_close(struct hci_dev *hdev);
1664 int hci_dev_reset(__u16 dev);
1665 int hci_dev_reset_stat(__u16 dev);
1666 int hci_dev_cmd(unsigned int cmd, void __user *arg);
1667 int hci_get_dev_list(void __user *arg);
1668 int hci_get_dev_info(void __user *arg);
1669 int hci_get_conn_list(void __user *arg);
1670 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
1671 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
1672 int hci_inquiry(void __user *arg);
1673 
1674 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
1675 					   bdaddr_t *bdaddr, u8 type);
1676 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
1677 				    struct list_head *list, bdaddr_t *bdaddr,
1678 				    u8 type);
1679 struct bdaddr_list_with_flags *
1680 hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr,
1681 				  u8 type);
1682 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
1683 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
1684 				 u8 type, u8 *peer_irk, u8 *local_irk);
1685 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
1686 				   u8 type, u32 flags);
1687 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
1688 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
1689 				 u8 type);
1690 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
1691 				   u8 type);
1692 void hci_bdaddr_list_clear(struct list_head *list);
1693 
1694 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
1695 					       bdaddr_t *addr, u8 addr_type);
1696 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
1697 					    bdaddr_t *addr, u8 addr_type);
1698 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
1699 void hci_conn_params_clear_disabled(struct hci_dev *hdev);
1700 void hci_conn_params_free(struct hci_conn_params *param);
1701 
1702 void hci_pend_le_list_del_init(struct hci_conn_params *param);
1703 void hci_pend_le_list_add(struct hci_conn_params *param,
1704 			  struct list_head *list);
1705 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
1706 						  bdaddr_t *addr,
1707 						  u8 addr_type);
1708 
1709 void hci_uuids_clear(struct hci_dev *hdev);
1710 
1711 void hci_link_keys_clear(struct hci_dev *hdev);
1712 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
1713 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1714 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1715 				  u8 pin_len, bool *persistent);
1716 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1717 			    u8 addr_type, u8 type, u8 authenticated,
1718 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
1719 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1720 			     u8 addr_type, u8 role);
1721 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
1722 void hci_smp_ltks_clear(struct hci_dev *hdev);
1723 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
1724 
1725 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa);
1726 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1727 				     u8 addr_type);
1728 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1729 			    u8 addr_type, u8 val[16], bdaddr_t *rpa);
1730 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
1731 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]);
1732 void hci_blocked_keys_clear(struct hci_dev *hdev);
1733 void hci_smp_irks_clear(struct hci_dev *hdev);
1734 
1735 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1736 
1737 void hci_remote_oob_data_clear(struct hci_dev *hdev);
1738 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1739 					  bdaddr_t *bdaddr, u8 bdaddr_type);
1740 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1741 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1742 			    u8 *hash256, u8 *rand256);
1743 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1744 			       u8 bdaddr_type);
1745 
1746 void hci_adv_instances_clear(struct hci_dev *hdev);
1747 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
1748 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
1749 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1750 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1751 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1752 				      u16 timeout, u16 duration, s8 tx_power,
1753 				      u32 min_interval, u32 max_interval,
1754 				      u8 mesh_handle);
1755 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1756 				      u32 flags, u8 data_len, u8 *data,
1757 				      u32 min_interval, u32 max_interval);
1758 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1759 			 u16 adv_data_len, u8 *adv_data,
1760 			 u16 scan_rsp_len, u8 *scan_rsp_data);
1761 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
1762 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
1763 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance);
1764 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance);
1765 
1766 void hci_adv_monitors_clear(struct hci_dev *hdev);
1767 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
1768 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
1769 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle);
1770 int hci_remove_all_adv_monitor(struct hci_dev *hdev);
1771 bool hci_is_adv_monitoring(struct hci_dev *hdev);
1772 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev);
1773 
1774 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
1775 
1776 void hci_init_sysfs(struct hci_dev *hdev);
1777 void hci_conn_init_sysfs(struct hci_conn *conn);
1778 void hci_conn_add_sysfs(struct hci_conn *conn);
1779 void hci_conn_del_sysfs(struct hci_conn *conn);
1780 
1781 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
1782 #define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent)
1783 
1784 /* ----- LMP capabilities ----- */
1785 #define lmp_encrypt_capable(dev)   ((dev)->features[0][0] & LMP_ENCRYPT)
1786 #define lmp_rswitch_capable(dev)   ((dev)->features[0][0] & LMP_RSWITCH)
1787 #define lmp_hold_capable(dev)      ((dev)->features[0][0] & LMP_HOLD)
1788 #define lmp_sniff_capable(dev)     ((dev)->features[0][0] & LMP_SNIFF)
1789 #define lmp_park_capable(dev)      ((dev)->features[0][1] & LMP_PARK)
1790 #define lmp_inq_rssi_capable(dev)  ((dev)->features[0][3] & LMP_RSSI_INQ)
1791 #define lmp_esco_capable(dev)      ((dev)->features[0][3] & LMP_ESCO)
1792 #define lmp_bredr_capable(dev)     (!((dev)->features[0][4] & LMP_NO_BREDR))
1793 #define lmp_le_capable(dev)        ((dev)->features[0][4] & LMP_LE)
1794 #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
1795 #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
1796 #define lmp_esco_2m_capable(dev)   ((dev)->features[0][5] & LMP_EDR_ESCO_2M)
1797 #define lmp_ext_inq_capable(dev)   ((dev)->features[0][6] & LMP_EXT_INQ)
1798 #define lmp_le_br_capable(dev)     (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
1799 #define lmp_ssp_capable(dev)       ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
1800 #define lmp_no_flush_capable(dev)  ((dev)->features[0][6] & LMP_NO_FLUSH)
1801 #define lmp_lsto_capable(dev)      ((dev)->features[0][7] & LMP_LSTO)
1802 #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
1803 #define lmp_ext_feat_capable(dev)  ((dev)->features[0][7] & LMP_EXTFEATURES)
1804 #define lmp_transp_capable(dev)    ((dev)->features[0][2] & LMP_TRANSPARENT)
1805 #define lmp_edr_2m_capable(dev)    ((dev)->features[0][3] & LMP_EDR_2M)
1806 #define lmp_edr_3m_capable(dev)    ((dev)->features[0][3] & LMP_EDR_3M)
1807 #define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
1808 #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
1809 
1810 /* ----- Extended LMP capabilities ----- */
1811 #define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL)
1812 #define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL)
1813 #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
1814 #define lmp_sync_scan_capable(dev)  ((dev)->features[2][0] & LMP_SYNC_SCAN)
1815 #define lmp_sc_capable(dev)         ((dev)->features[2][1] & LMP_SC)
1816 #define lmp_ping_capable(dev)       ((dev)->features[2][1] & LMP_PING)
1817 
1818 /* ----- Host capabilities ----- */
1819 #define lmp_host_ssp_capable(dev)  ((dev)->features[1][0] & LMP_HOST_SSP)
1820 #define lmp_host_sc_capable(dev)   ((dev)->features[1][0] & LMP_HOST_SC)
1821 #define lmp_host_le_capable(dev)   (!!((dev)->features[1][0] & LMP_HOST_LE))
1822 #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
1823 
1824 #define hdev_is_powered(dev)   (test_bit(HCI_UP, &(dev)->flags) && \
1825 				!hci_dev_test_flag(dev, HCI_AUTO_OFF))
1826 #define bredr_sc_enabled(dev)  (lmp_sc_capable(dev) && \
1827 				hci_dev_test_flag(dev, HCI_SC_ENABLED))
1828 #define rpa_valid(dev)         (bacmp(&dev->rpa, BDADDR_ANY) && \
1829 				!hci_dev_test_flag(dev, HCI_RPA_EXPIRED))
1830 #define adv_rpa_valid(adv)     (bacmp(&adv->random_addr, BDADDR_ANY) && \
1831 				!adv->rpa_expired)
1832 
1833 #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
1834 		      ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
1835 
1836 #define le_2m_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_2M))
1837 
1838 #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
1839 		      ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
1840 
1841 #define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \
1842 			       !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \
1843 					 &(dev)->quirks))
1844 
1845 #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
1846 			 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
1847 
1848 #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
1849 
1850 /* Use LL Privacy based address resolution if supported */
1851 #define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
1852 			     hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
1853 
1854 #define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
1855 				   (hdev->commands[39] & 0x04))
1856 
1857 #define read_key_size_capable(dev) \
1858 	((dev)->commands[20] & 0x10 && \
1859 	 !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
1860 
1861 /* Use enhanced synchronous connection if command is supported and its quirk
1862  * has not been set.
1863  */
1864 #define enhanced_sync_conn_capable(dev) \
1865 	(((dev)->commands[29] & 0x08) && \
1866 	 !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks))
1867 
1868 /* Use ext scanning if set ext scan param and ext scan enable is supported */
1869 #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
1870 			   ((dev)->commands[37] & 0x40) && \
1871 			   !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
1872 
1873 /* Use ext create connection if command is supported */
1874 #define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
1875 
1876 /* Extended advertising support */
1877 #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
1878 
1879 /* Maximum advertising length */
1880 #define max_adv_len(dev) \
1881 	(ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH)
1882 
1883 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
1884  *
1885  * C24: Mandatory if the LE Controller supports Connection State and either
1886  * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
1887  */
1888 #define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
1889 					 ext_adv_capable(dev))
1890 
1891 /* Periodic advertising support */
1892 #define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))
1893 
1894 /* CIS Master/Slave and BIS support */
1895 #define iso_capable(dev) (cis_capable(dev) || bis_capable(dev))
1896 #define cis_capable(dev) \
1897 	(cis_central_capable(dev) || cis_peripheral_capable(dev))
1898 #define cis_central_capable(dev) \
1899 	((dev)->le_features[3] & HCI_LE_CIS_CENTRAL)
1900 #define cis_peripheral_capable(dev) \
1901 	((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
1902 #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
1903 #define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER)
1904 
1905 #define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
1906 	(!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks)))
1907 
1908 /* ----- HCI protocols ----- */
1909 #define HCI_PROTO_DEFER             0x01
1910 
hci_proto_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr,__u8 type,__u8 * flags)1911 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
1912 					__u8 type, __u8 *flags)
1913 {
1914 	switch (type) {
1915 	case ACL_LINK:
1916 		return l2cap_connect_ind(hdev, bdaddr);
1917 
1918 	case SCO_LINK:
1919 	case ESCO_LINK:
1920 		return sco_connect_ind(hdev, bdaddr, flags);
1921 
1922 	case ISO_LINK:
1923 		return iso_connect_ind(hdev, bdaddr, flags);
1924 
1925 	default:
1926 		BT_ERR("unknown link type %d", type);
1927 		return -EINVAL;
1928 	}
1929 }
1930 
hci_proto_disconn_ind(struct hci_conn * conn)1931 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
1932 {
1933 	if (conn->type != ACL_LINK && conn->type != LE_LINK)
1934 		return HCI_ERROR_REMOTE_USER_TERM;
1935 
1936 	return l2cap_disconn_ind(conn);
1937 }
1938 
1939 /* ----- HCI callbacks ----- */
1940 struct hci_cb {
1941 	struct list_head list;
1942 
1943 	char *name;
1944 
1945 	void (*connect_cfm)	(struct hci_conn *conn, __u8 status);
1946 	void (*disconn_cfm)	(struct hci_conn *conn, __u8 status);
1947 	void (*security_cfm)	(struct hci_conn *conn, __u8 status,
1948 								__u8 encrypt);
1949 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
1950 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
1951 };
1952 
hci_connect_cfm(struct hci_conn * conn,__u8 status)1953 static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
1954 {
1955 	struct hci_cb *cb;
1956 
1957 	mutex_lock(&hci_cb_list_lock);
1958 	list_for_each_entry(cb, &hci_cb_list, list) {
1959 		if (cb->connect_cfm)
1960 			cb->connect_cfm(conn, status);
1961 	}
1962 	mutex_unlock(&hci_cb_list_lock);
1963 
1964 	if (conn->connect_cfm_cb)
1965 		conn->connect_cfm_cb(conn, status);
1966 }
1967 
hci_disconn_cfm(struct hci_conn * conn,__u8 reason)1968 static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
1969 {
1970 	struct hci_cb *cb;
1971 
1972 	mutex_lock(&hci_cb_list_lock);
1973 	list_for_each_entry(cb, &hci_cb_list, list) {
1974 		if (cb->disconn_cfm)
1975 			cb->disconn_cfm(conn, reason);
1976 	}
1977 	mutex_unlock(&hci_cb_list_lock);
1978 
1979 	if (conn->disconn_cfm_cb)
1980 		conn->disconn_cfm_cb(conn, reason);
1981 }
1982 
hci_auth_cfm(struct hci_conn * conn,__u8 status)1983 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
1984 {
1985 	struct hci_cb *cb;
1986 	__u8 encrypt;
1987 
1988 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1989 		return;
1990 
1991 	encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
1992 
1993 	mutex_lock(&hci_cb_list_lock);
1994 	list_for_each_entry(cb, &hci_cb_list, list) {
1995 		if (cb->security_cfm)
1996 			cb->security_cfm(conn, status, encrypt);
1997 	}
1998 	mutex_unlock(&hci_cb_list_lock);
1999 
2000 	if (conn->security_cfm_cb)
2001 		conn->security_cfm_cb(conn, status);
2002 }
2003 
hci_encrypt_cfm(struct hci_conn * conn,__u8 status)2004 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
2005 {
2006 	struct hci_cb *cb;
2007 	__u8 encrypt;
2008 
2009 	if (conn->state == BT_CONFIG) {
2010 		if (!status)
2011 			conn->state = BT_CONNECTED;
2012 
2013 		hci_connect_cfm(conn, status);
2014 		hci_conn_drop(conn);
2015 		return;
2016 	}
2017 
2018 	if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2019 		encrypt = 0x00;
2020 	else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2021 		encrypt = 0x02;
2022 	else
2023 		encrypt = 0x01;
2024 
2025 	if (!status) {
2026 		if (conn->sec_level == BT_SECURITY_SDP)
2027 			conn->sec_level = BT_SECURITY_LOW;
2028 
2029 		if (conn->pending_sec_level > conn->sec_level)
2030 			conn->sec_level = conn->pending_sec_level;
2031 	}
2032 
2033 	mutex_lock(&hci_cb_list_lock);
2034 	list_for_each_entry(cb, &hci_cb_list, list) {
2035 		if (cb->security_cfm)
2036 			cb->security_cfm(conn, status, encrypt);
2037 	}
2038 	mutex_unlock(&hci_cb_list_lock);
2039 
2040 	if (conn->security_cfm_cb)
2041 		conn->security_cfm_cb(conn, status);
2042 }
2043 
hci_key_change_cfm(struct hci_conn * conn,__u8 status)2044 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
2045 {
2046 	struct hci_cb *cb;
2047 
2048 	mutex_lock(&hci_cb_list_lock);
2049 	list_for_each_entry(cb, &hci_cb_list, list) {
2050 		if (cb->key_change_cfm)
2051 			cb->key_change_cfm(conn, status);
2052 	}
2053 	mutex_unlock(&hci_cb_list_lock);
2054 }
2055 
hci_role_switch_cfm(struct hci_conn * conn,__u8 status,__u8 role)2056 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
2057 								__u8 role)
2058 {
2059 	struct hci_cb *cb;
2060 
2061 	mutex_lock(&hci_cb_list_lock);
2062 	list_for_each_entry(cb, &hci_cb_list, list) {
2063 		if (cb->role_switch_cfm)
2064 			cb->role_switch_cfm(conn, status, role);
2065 	}
2066 	mutex_unlock(&hci_cb_list_lock);
2067 }
2068 
hci_bdaddr_is_rpa(bdaddr_t * bdaddr,u8 addr_type)2069 static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
2070 {
2071 	if (addr_type != ADDR_LE_DEV_RANDOM)
2072 		return false;
2073 
2074 	if ((bdaddr->b[5] & 0xc0) == 0x40)
2075 	       return true;
2076 
2077 	return false;
2078 }
2079 
hci_is_identity_address(bdaddr_t * addr,u8 addr_type)2080 static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type)
2081 {
2082 	if (addr_type == ADDR_LE_DEV_PUBLIC)
2083 		return true;
2084 
2085 	/* Check for Random Static address type */
2086 	if ((addr->b[5] & 0xc0) == 0xc0)
2087 		return true;
2088 
2089 	return false;
2090 }
2091 
hci_get_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)2092 static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
2093 					  bdaddr_t *bdaddr, u8 addr_type)
2094 {
2095 	if (!hci_bdaddr_is_rpa(bdaddr, addr_type))
2096 		return NULL;
2097 
2098 	return hci_find_irk_by_rpa(hdev, bdaddr);
2099 }
2100 
hci_check_conn_params(u16 min,u16 max,u16 latency,u16 to_multiplier)2101 static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
2102 					u16 to_multiplier)
2103 {
2104 	u16 max_latency;
2105 
2106 	if (min > max) {
2107 		BT_WARN("min %d > max %d", min, max);
2108 		return -EINVAL;
2109 	}
2110 
2111 	if (min < 6) {
2112 		BT_WARN("min %d < 6", min);
2113 		return -EINVAL;
2114 	}
2115 
2116 	if (max > 3200) {
2117 		BT_WARN("max %d > 3200", max);
2118 		return -EINVAL;
2119 	}
2120 
2121 	if (to_multiplier < 10) {
2122 		BT_WARN("to_multiplier %d < 10", to_multiplier);
2123 		return -EINVAL;
2124 	}
2125 
2126 	if (to_multiplier > 3200) {
2127 		BT_WARN("to_multiplier %d > 3200", to_multiplier);
2128 		return -EINVAL;
2129 	}
2130 
2131 	if (max >= to_multiplier * 8) {
2132 		BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
2133 		return -EINVAL;
2134 	}
2135 
2136 	max_latency = (to_multiplier * 4 / max) - 1;
2137 	if (latency > 499) {
2138 		BT_WARN("latency %d > 499", latency);
2139 		return -EINVAL;
2140 	}
2141 
2142 	if (latency > max_latency) {
2143 		BT_WARN("latency %d > max_latency %d", latency, max_latency);
2144 		return -EINVAL;
2145 	}
2146 
2147 	return 0;
2148 }
2149 
2150 int hci_register_cb(struct hci_cb *hcb);
2151 int hci_unregister_cb(struct hci_cb *hcb);
2152 
2153 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2154 		   const void *param);
2155 
2156 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2157 		 const void *param);
2158 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
2159 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
2160 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb);
2161 
2162 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
2163 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event);
2164 
2165 u32 hci_conn_get_phy(struct hci_conn *conn);
2166 
2167 /* ----- HCI Sockets ----- */
2168 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
2169 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
2170 			 int flag, struct sock *skip_sk);
2171 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
2172 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
2173 				 void *data, u16 data_len, ktime_t tstamp,
2174 				 int flag, struct sock *skip_sk);
2175 
2176 void hci_sock_dev_event(struct hci_dev *hdev, int event);
2177 
2178 #define HCI_MGMT_VAR_LEN	BIT(0)
2179 #define HCI_MGMT_NO_HDEV	BIT(1)
2180 #define HCI_MGMT_UNTRUSTED	BIT(2)
2181 #define HCI_MGMT_UNCONFIGURED	BIT(3)
2182 #define HCI_MGMT_HDEV_OPTIONAL	BIT(4)
2183 
2184 struct hci_mgmt_handler {
2185 	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2186 		     u16 data_len);
2187 	size_t data_len;
2188 	unsigned long flags;
2189 };
2190 
2191 struct hci_mgmt_chan {
2192 	struct list_head list;
2193 	unsigned short channel;
2194 	size_t handler_count;
2195 	const struct hci_mgmt_handler *handlers;
2196 	void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
2197 };
2198 
2199 int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
2200 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
2201 
2202 /* Management interface */
2203 #define DISCOV_TYPE_BREDR		(BIT(BDADDR_BREDR))
2204 #define DISCOV_TYPE_LE			(BIT(BDADDR_LE_PUBLIC) | \
2205 					 BIT(BDADDR_LE_RANDOM))
2206 #define DISCOV_TYPE_INTERLEAVED		(BIT(BDADDR_BREDR) | \
2207 					 BIT(BDADDR_LE_PUBLIC) | \
2208 					 BIT(BDADDR_LE_RANDOM))
2209 
2210 /* These LE scan and inquiry parameters were chosen according to LE General
2211  * Discovery Procedure specification.
2212  */
2213 #define DISCOV_LE_SCAN_WIN		0x0012 /* 11.25 msec */
2214 #define DISCOV_LE_SCAN_INT		0x0012 /* 11.25 msec */
2215 #define DISCOV_LE_SCAN_INT_FAST		0x0060 /* 60 msec */
2216 #define DISCOV_LE_SCAN_WIN_FAST		0x0030 /* 30 msec */
2217 #define DISCOV_LE_SCAN_INT_CONN		0x0060 /* 60 msec */
2218 #define DISCOV_LE_SCAN_WIN_CONN		0x0060 /* 60 msec */
2219 #define DISCOV_LE_SCAN_INT_SLOW1	0x0800 /* 1.28 sec */
2220 #define DISCOV_LE_SCAN_WIN_SLOW1	0x0012 /* 11.25 msec */
2221 #define DISCOV_LE_SCAN_INT_SLOW2	0x1000 /* 2.56 sec */
2222 #define DISCOV_LE_SCAN_WIN_SLOW2	0x0024 /* 22.5 msec */
2223 #define DISCOV_CODED_SCAN_INT_FAST	0x0120 /* 180 msec */
2224 #define DISCOV_CODED_SCAN_WIN_FAST	0x0090 /* 90 msec */
2225 #define DISCOV_CODED_SCAN_INT_SLOW1	0x1800 /* 3.84 sec */
2226 #define DISCOV_CODED_SCAN_WIN_SLOW1	0x0036 /* 33.75 msec */
2227 #define DISCOV_CODED_SCAN_INT_SLOW2	0x3000 /* 7.68 sec */
2228 #define DISCOV_CODED_SCAN_WIN_SLOW2	0x006c /* 67.5 msec */
2229 #define DISCOV_LE_TIMEOUT		10240	/* msec */
2230 #define DISCOV_INTERLEAVED_TIMEOUT	5120	/* msec */
2231 #define DISCOV_INTERLEAVED_INQUIRY_LEN	0x04
2232 #define DISCOV_BREDR_INQUIRY_LEN	0x08
2233 #define DISCOV_LE_RESTART_DELAY		msecs_to_jiffies(200)	/* msec */
2234 #define DISCOV_LE_FAST_ADV_INT_MIN	0x00A0	/* 100 msec */
2235 #define DISCOV_LE_FAST_ADV_INT_MAX	0x00F0	/* 150 msec */
2236 #define DISCOV_LE_PER_ADV_INT_MIN	0x00A0	/* 200 msec */
2237 #define DISCOV_LE_PER_ADV_INT_MAX	0x00A0	/* 200 msec */
2238 #define DISCOV_LE_ADV_MESH_MIN		0x00A0  /* 100 msec */
2239 #define DISCOV_LE_ADV_MESH_MAX		0x00A0  /* 100 msec */
2240 #define INTERVAL_TO_MS(x)		(((x) * 10) / 0x10)
2241 
2242 #define NAME_RESOLVE_DURATION		msecs_to_jiffies(10240)	/* 10.24 sec */
2243 
2244 void mgmt_fill_version_info(void *ver);
2245 int mgmt_new_settings(struct hci_dev *hdev);
2246 void mgmt_index_added(struct hci_dev *hdev);
2247 void mgmt_index_removed(struct hci_dev *hdev);
2248 void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
2249 void mgmt_power_on(struct hci_dev *hdev, int err);
2250 void __mgmt_power_off(struct hci_dev *hdev);
2251 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2252 		       bool persistent);
2253 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
2254 			   u8 *name, u8 name_len);
2255 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
2256 			      u8 link_type, u8 addr_type, u8 reason,
2257 			      bool mgmt_connected);
2258 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
2259 			    u8 link_type, u8 addr_type, u8 status);
2260 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn,
2261 			 u8 status);
2262 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
2263 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2264 				  u8 status);
2265 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2266 				      u8 status);
2267 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
2268 			      u8 link_type, u8 addr_type, u32 value,
2269 			      u8 confirm_hint);
2270 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2271 				     u8 link_type, u8 addr_type, u8 status);
2272 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2273 					 u8 link_type, u8 addr_type, u8 status);
2274 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
2275 			      u8 link_type, u8 addr_type);
2276 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2277 				     u8 link_type, u8 addr_type, u8 status);
2278 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2279 					 u8 link_type, u8 addr_type, u8 status);
2280 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
2281 			     u8 link_type, u8 addr_type, u32 passkey,
2282 			     u8 entered);
2283 void mgmt_auth_failed(struct hci_conn *conn, u8 status);
2284 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
2285 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
2286 				    u8 status);
2287 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
2288 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);
2289 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);
2290 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2291 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
2292 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
2293 		       u64 instant);
2294 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2295 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len);
2296 void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
2297 void mgmt_suspending(struct hci_dev *hdev, u8 state);
2298 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
2299 		   u8 addr_type);
2300 bool mgmt_powering_down(struct hci_dev *hdev);
2301 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
2302 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
2303 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
2304 		   bool persistent);
2305 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
2306 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
2307 			 u16 max_interval, u16 latency, u16 timeout);
2308 void mgmt_smp_complete(struct hci_conn *conn, bool complete);
2309 bool mgmt_get_connectable(struct hci_dev *hdev);
2310 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev);
2311 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
2312 			    u8 instance);
2313 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
2314 			      u8 instance);
2315 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle);
2316 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
2317 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
2318 				  bdaddr_t *bdaddr, u8 addr_type);
2319 
2320 int hci_abort_conn(struct hci_conn *conn, u8 reason);
2321 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
2322 		      u16 to_multiplier);
2323 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
2324 		      __u8 ltk[16], __u8 key_size);
2325 
2326 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2327 			       u8 *bdaddr_type);
2328 
2329 #define SCO_AIRMODE_MASK       0x0003
2330 #define SCO_AIRMODE_CVSD       0x0000
2331 #define SCO_AIRMODE_TRANSP     0x0003
2332 
2333 #define LOCAL_CODEC_ACL_MASK	BIT(0)
2334 #define LOCAL_CODEC_SCO_MASK	BIT(1)
2335 
2336 #define TRANSPORT_TYPE_MAX	0x04
2337 
2338 #endif /* __HCI_CORE_H */
2339