xref: /linux/include/net/bluetooth/hci_core.h (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27 
28 #include <net/bluetooth/hci.h>
29 
30 /* HCI priority */
31 #define HCI_PRIO_MAX	7
32 
33 /* HCI Core structures */
34 struct inquiry_data {
35 	bdaddr_t	bdaddr;
36 	__u8		pscan_rep_mode;
37 	__u8		pscan_period_mode;
38 	__u8		pscan_mode;
39 	__u8		dev_class[3];
40 	__le16		clock_offset;
41 	__s8		rssi;
42 	__u8		ssp_mode;
43 };
44 
45 struct inquiry_entry {
46 	struct list_head	all;		/* inq_cache.all */
47 	struct list_head	list;		/* unknown or resolve */
48 	enum {
49 		NAME_NOT_KNOWN,
50 		NAME_NEEDED,
51 		NAME_PENDING,
52 		NAME_KNOWN,
53 	} name_state;
54 	__u32			timestamp;
55 	struct inquiry_data	data;
56 };
57 
58 struct discovery_state {
59 	int			type;
60 	enum {
61 		DISCOVERY_STOPPED,
62 		DISCOVERY_STARTING,
63 		DISCOVERY_FINDING,
64 		DISCOVERY_RESOLVING,
65 		DISCOVERY_STOPPING,
66 	} state;
67 	struct list_head	all;	/* All devices found during inquiry */
68 	struct list_head	unknown;	/* Name state not known */
69 	struct list_head	resolve;	/* Name needs to be resolved */
70 	__u32			timestamp;
71 };
72 
73 struct hci_conn_hash {
74 	struct list_head list;
75 	unsigned int     acl_num;
76 	unsigned int     amp_num;
77 	unsigned int     sco_num;
78 	unsigned int     le_num;
79 };
80 
81 struct bdaddr_list {
82 	struct list_head list;
83 	bdaddr_t bdaddr;
84 	u8 bdaddr_type;
85 };
86 
87 struct bt_uuid {
88 	struct list_head list;
89 	u8 uuid[16];
90 	u8 size;
91 	u8 svc_hint;
92 };
93 
94 struct smp_ltk {
95 	struct list_head list;
96 	bdaddr_t bdaddr;
97 	u8 bdaddr_type;
98 	u8 authenticated;
99 	u8 type;
100 	u8 enc_size;
101 	__le16 ediv;
102 	u8 rand[8];
103 	u8 val[16];
104 } __packed;
105 
106 struct link_key {
107 	struct list_head list;
108 	bdaddr_t bdaddr;
109 	u8 type;
110 	u8 val[HCI_LINK_KEY_SIZE];
111 	u8 pin_len;
112 };
113 
114 struct oob_data {
115 	struct list_head list;
116 	bdaddr_t bdaddr;
117 	u8 hash[16];
118 	u8 randomizer[16];
119 };
120 
121 #define HCI_MAX_SHORT_NAME_LENGTH	10
122 
123 struct amp_assoc {
124 	__u16	len;
125 	__u16	offset;
126 	__u16	rem_len;
127 	__u16	len_so_far;
128 	__u8	data[HCI_MAX_AMP_ASSOC_SIZE];
129 };
130 
131 #define HCI_MAX_PAGES	3
132 
133 #define NUM_REASSEMBLY 4
134 struct hci_dev {
135 	struct list_head list;
136 	struct mutex	lock;
137 
138 	char		name[8];
139 	unsigned long	flags;
140 	__u16		id;
141 	__u8		bus;
142 	__u8		dev_type;
143 	bdaddr_t	bdaddr;
144 	bdaddr_t	static_addr;
145 	__u8		own_addr_type;
146 	__u8		dev_name[HCI_MAX_NAME_LENGTH];
147 	__u8		short_name[HCI_MAX_SHORT_NAME_LENGTH];
148 	__u8		eir[HCI_MAX_EIR_LENGTH];
149 	__u8		dev_class[3];
150 	__u8		major_class;
151 	__u8		minor_class;
152 	__u8		max_page;
153 	__u8		features[HCI_MAX_PAGES][8];
154 	__u8		le_features[8];
155 	__u8		le_white_list_size;
156 	__u8		le_states[8];
157 	__u8		commands[64];
158 	__u8		hci_ver;
159 	__u16		hci_rev;
160 	__u8		lmp_ver;
161 	__u16		manufacturer;
162 	__u16		lmp_subver;
163 	__u16		voice_setting;
164 	__u8		num_iac;
165 	__u8		io_capability;
166 	__s8		inq_tx_power;
167 	__u16		page_scan_interval;
168 	__u16		page_scan_window;
169 	__u8		page_scan_type;
170 	__u16		le_scan_interval;
171 	__u16		le_scan_window;
172 	__u16		le_conn_min_interval;
173 	__u16		le_conn_max_interval;
174 	__u8		ssp_debug_mode;
175 
176 	__u16		devid_source;
177 	__u16		devid_vendor;
178 	__u16		devid_product;
179 	__u16		devid_version;
180 
181 	__u16		pkt_type;
182 	__u16		esco_type;
183 	__u16		link_policy;
184 	__u16		link_mode;
185 
186 	__u32		idle_timeout;
187 	__u16		sniff_min_interval;
188 	__u16		sniff_max_interval;
189 
190 	__u8		amp_status;
191 	__u32		amp_total_bw;
192 	__u32		amp_max_bw;
193 	__u32		amp_min_latency;
194 	__u32		amp_max_pdu;
195 	__u8		amp_type;
196 	__u16		amp_pal_cap;
197 	__u16		amp_assoc_size;
198 	__u32		amp_max_flush_to;
199 	__u32		amp_be_flush_to;
200 
201 	struct amp_assoc	loc_assoc;
202 
203 	__u8		flow_ctl_mode;
204 
205 	unsigned int	auto_accept_delay;
206 
207 	unsigned long	quirks;
208 
209 	atomic_t	cmd_cnt;
210 	unsigned int	acl_cnt;
211 	unsigned int	sco_cnt;
212 	unsigned int	le_cnt;
213 
214 	unsigned int	acl_mtu;
215 	unsigned int	sco_mtu;
216 	unsigned int	le_mtu;
217 	unsigned int	acl_pkts;
218 	unsigned int	sco_pkts;
219 	unsigned int	le_pkts;
220 
221 	__u16		block_len;
222 	__u16		block_mtu;
223 	__u16		num_blocks;
224 	__u16		block_cnt;
225 
226 	unsigned long	acl_last_tx;
227 	unsigned long	sco_last_tx;
228 	unsigned long	le_last_tx;
229 
230 	struct workqueue_struct	*workqueue;
231 	struct workqueue_struct	*req_workqueue;
232 
233 	struct work_struct	power_on;
234 	struct delayed_work	power_off;
235 
236 	__u16			discov_timeout;
237 	struct delayed_work	discov_off;
238 
239 	struct delayed_work	service_cache;
240 
241 	struct timer_list	cmd_timer;
242 
243 	struct work_struct	rx_work;
244 	struct work_struct	cmd_work;
245 	struct work_struct	tx_work;
246 
247 	struct sk_buff_head	rx_q;
248 	struct sk_buff_head	raw_q;
249 	struct sk_buff_head	cmd_q;
250 
251 	struct sk_buff		*recv_evt;
252 	struct sk_buff		*sent_cmd;
253 	struct sk_buff		*reassembly[NUM_REASSEMBLY];
254 
255 	struct mutex		req_lock;
256 	wait_queue_head_t	req_wait_q;
257 	__u32			req_status;
258 	__u32			req_result;
259 
260 	struct list_head	mgmt_pending;
261 
262 	struct discovery_state	discovery;
263 	struct hci_conn_hash	conn_hash;
264 	struct list_head	blacklist;
265 
266 	struct list_head	uuids;
267 
268 	struct list_head	link_keys;
269 
270 	struct list_head	long_term_keys;
271 
272 	struct list_head	remote_oob_data;
273 
274 	struct hci_dev_stats	stat;
275 
276 	atomic_t		promisc;
277 
278 	struct dentry		*debugfs;
279 
280 	struct device		dev;
281 
282 	struct rfkill		*rfkill;
283 
284 	unsigned long		dev_flags;
285 
286 	struct delayed_work	le_scan_disable;
287 
288 	__s8			adv_tx_power;
289 	__u8			adv_data[HCI_MAX_AD_LENGTH];
290 	__u8			adv_data_len;
291 	__u8			scan_rsp_data[HCI_MAX_AD_LENGTH];
292 	__u8			scan_rsp_data_len;
293 
294 	int (*open)(struct hci_dev *hdev);
295 	int (*close)(struct hci_dev *hdev);
296 	int (*flush)(struct hci_dev *hdev);
297 	int (*setup)(struct hci_dev *hdev);
298 	int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
299 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
300 };
301 
302 #define HCI_PHY_HANDLE(handle)	(handle & 0xff)
303 
304 struct hci_conn {
305 	struct list_head list;
306 
307 	atomic_t	refcnt;
308 
309 	bdaddr_t	dst;
310 	__u8		dst_type;
311 	bdaddr_t	src;
312 	__u8		src_type;
313 	__u16		handle;
314 	__u16		state;
315 	__u8		mode;
316 	__u8		type;
317 	bool		out;
318 	__u8		attempt;
319 	__u8		dev_class[3];
320 	__u8		features[HCI_MAX_PAGES][8];
321 	__u16		pkt_type;
322 	__u16		link_policy;
323 	__u32		link_mode;
324 	__u8		key_type;
325 	__u8		auth_type;
326 	__u8		sec_level;
327 	__u8		pending_sec_level;
328 	__u8		pin_length;
329 	__u8		enc_key_size;
330 	__u8		io_capability;
331 	__u32		passkey_notify;
332 	__u8		passkey_entered;
333 	__u16		disc_timeout;
334 	__u16		setting;
335 	unsigned long	flags;
336 
337 	__u8		remote_cap;
338 	__u8		remote_auth;
339 	__u8		remote_id;
340 	bool		flush_key;
341 
342 	unsigned int	sent;
343 
344 	struct sk_buff_head data_q;
345 	struct list_head chan_list;
346 
347 	struct delayed_work disc_work;
348 	struct delayed_work auto_accept_work;
349 	struct delayed_work idle_work;
350 
351 	struct device	dev;
352 
353 	struct hci_dev	*hdev;
354 	void		*l2cap_data;
355 	void		*sco_data;
356 	void		*smp_conn;
357 	struct amp_mgr	*amp_mgr;
358 
359 	struct hci_conn	*link;
360 
361 	void (*connect_cfm_cb)	(struct hci_conn *conn, u8 status);
362 	void (*security_cfm_cb)	(struct hci_conn *conn, u8 status);
363 	void (*disconn_cfm_cb)	(struct hci_conn *conn, u8 reason);
364 };
365 
366 struct hci_chan {
367 	struct list_head list;
368 	__u16 handle;
369 	struct hci_conn *conn;
370 	struct sk_buff_head data_q;
371 	unsigned int	sent;
372 	__u8		state;
373 };
374 
375 extern struct list_head hci_dev_list;
376 extern struct list_head hci_cb_list;
377 extern rwlock_t hci_dev_list_lock;
378 extern rwlock_t hci_cb_list_lock;
379 
380 /* ----- HCI interface to upper protocols ----- */
381 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
382 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
383 int l2cap_disconn_ind(struct hci_conn *hcon);
384 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
385 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
386 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
387 
388 int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
389 void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
390 void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
391 int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
392 
393 /* ----- Inquiry cache ----- */
394 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
395 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   /* 60 seconds */
396 
397 static inline void discovery_init(struct hci_dev *hdev)
398 {
399 	hdev->discovery.state = DISCOVERY_STOPPED;
400 	INIT_LIST_HEAD(&hdev->discovery.all);
401 	INIT_LIST_HEAD(&hdev->discovery.unknown);
402 	INIT_LIST_HEAD(&hdev->discovery.resolve);
403 }
404 
405 bool hci_discovery_active(struct hci_dev *hdev);
406 
407 void hci_discovery_set_state(struct hci_dev *hdev, int state);
408 
409 static inline int inquiry_cache_empty(struct hci_dev *hdev)
410 {
411 	return list_empty(&hdev->discovery.all);
412 }
413 
414 static inline long inquiry_cache_age(struct hci_dev *hdev)
415 {
416 	struct discovery_state *c = &hdev->discovery;
417 	return jiffies - c->timestamp;
418 }
419 
420 static inline long inquiry_entry_age(struct inquiry_entry *e)
421 {
422 	return jiffies - e->timestamp;
423 }
424 
425 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
426 					       bdaddr_t *bdaddr);
427 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
428 						       bdaddr_t *bdaddr);
429 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
430 						       bdaddr_t *bdaddr,
431 						       int state);
432 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
433 				      struct inquiry_entry *ie);
434 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
435 			      bool name_known, bool *ssp);
436 void hci_inquiry_cache_flush(struct hci_dev *hdev);
437 
438 /* ----- HCI Connections ----- */
439 enum {
440 	HCI_CONN_AUTH_PEND,
441 	HCI_CONN_REAUTH_PEND,
442 	HCI_CONN_ENCRYPT_PEND,
443 	HCI_CONN_RSWITCH_PEND,
444 	HCI_CONN_MODE_CHANGE_PEND,
445 	HCI_CONN_SCO_SETUP_PEND,
446 	HCI_CONN_LE_SMP_PEND,
447 	HCI_CONN_MGMT_CONNECTED,
448 	HCI_CONN_SSP_ENABLED,
449 	HCI_CONN_POWER_SAVE,
450 	HCI_CONN_REMOTE_OOB,
451 };
452 
453 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
454 {
455 	struct hci_dev *hdev = conn->hdev;
456 	return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
457 	       test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
458 }
459 
460 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
461 {
462 	struct hci_conn_hash *h = &hdev->conn_hash;
463 	list_add_rcu(&c->list, &h->list);
464 	switch (c->type) {
465 	case ACL_LINK:
466 		h->acl_num++;
467 		break;
468 	case AMP_LINK:
469 		h->amp_num++;
470 		break;
471 	case LE_LINK:
472 		h->le_num++;
473 		break;
474 	case SCO_LINK:
475 	case ESCO_LINK:
476 		h->sco_num++;
477 		break;
478 	}
479 }
480 
481 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
482 {
483 	struct hci_conn_hash *h = &hdev->conn_hash;
484 
485 	list_del_rcu(&c->list);
486 	synchronize_rcu();
487 
488 	switch (c->type) {
489 	case ACL_LINK:
490 		h->acl_num--;
491 		break;
492 	case AMP_LINK:
493 		h->amp_num--;
494 		break;
495 	case LE_LINK:
496 		h->le_num--;
497 		break;
498 	case SCO_LINK:
499 	case ESCO_LINK:
500 		h->sco_num--;
501 		break;
502 	}
503 }
504 
505 static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
506 {
507 	struct hci_conn_hash *h = &hdev->conn_hash;
508 	switch (type) {
509 	case ACL_LINK:
510 		return h->acl_num;
511 	case AMP_LINK:
512 		return h->amp_num;
513 	case LE_LINK:
514 		return h->le_num;
515 	case SCO_LINK:
516 	case ESCO_LINK:
517 		return h->sco_num;
518 	default:
519 		return 0;
520 	}
521 }
522 
523 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
524 								__u16 handle)
525 {
526 	struct hci_conn_hash *h = &hdev->conn_hash;
527 	struct hci_conn  *c;
528 
529 	rcu_read_lock();
530 
531 	list_for_each_entry_rcu(c, &h->list, list) {
532 		if (c->handle == handle) {
533 			rcu_read_unlock();
534 			return c;
535 		}
536 	}
537 	rcu_read_unlock();
538 
539 	return NULL;
540 }
541 
542 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
543 							__u8 type, bdaddr_t *ba)
544 {
545 	struct hci_conn_hash *h = &hdev->conn_hash;
546 	struct hci_conn  *c;
547 
548 	rcu_read_lock();
549 
550 	list_for_each_entry_rcu(c, &h->list, list) {
551 		if (c->type == type && !bacmp(&c->dst, ba)) {
552 			rcu_read_unlock();
553 			return c;
554 		}
555 	}
556 
557 	rcu_read_unlock();
558 
559 	return NULL;
560 }
561 
562 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
563 							__u8 type, __u16 state)
564 {
565 	struct hci_conn_hash *h = &hdev->conn_hash;
566 	struct hci_conn  *c;
567 
568 	rcu_read_lock();
569 
570 	list_for_each_entry_rcu(c, &h->list, list) {
571 		if (c->type == type && c->state == state) {
572 			rcu_read_unlock();
573 			return c;
574 		}
575 	}
576 
577 	rcu_read_unlock();
578 
579 	return NULL;
580 }
581 
582 void hci_disconnect(struct hci_conn *conn, __u8 reason);
583 bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
584 void hci_sco_setup(struct hci_conn *conn, __u8 status);
585 
586 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
587 int hci_conn_del(struct hci_conn *conn);
588 void hci_conn_hash_flush(struct hci_dev *hdev);
589 void hci_conn_check_pending(struct hci_dev *hdev);
590 
591 struct hci_chan *hci_chan_create(struct hci_conn *conn);
592 void hci_chan_del(struct hci_chan *chan);
593 void hci_chan_list_flush(struct hci_conn *conn);
594 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
595 
596 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
597 			     __u8 dst_type, __u8 sec_level, __u8 auth_type);
598 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
599 				 __u16 setting);
600 int hci_conn_check_link_mode(struct hci_conn *conn);
601 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
602 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
603 int hci_conn_change_link_key(struct hci_conn *conn);
604 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
605 
606 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
607 
608 /*
609  * hci_conn_get() and hci_conn_put() are used to control the life-time of an
610  * "hci_conn" object. They do not guarantee that the hci_conn object is running,
611  * working or anything else. They just guarantee that the object is available
612  * and can be dereferenced. So you can use its locks, local variables and any
613  * other constant data.
614  * Before accessing runtime data, you _must_ lock the object and then check that
615  * it is still running. As soon as you release the locks, the connection might
616  * get dropped, though.
617  *
618  * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
619  * how long the underlying connection is held. So every channel that runs on the
620  * hci_conn object calls this to prevent the connection from disappearing. As
621  * long as you hold a device, you must also guarantee that you have a valid
622  * reference to the device via hci_conn_get() (or the initial reference from
623  * hci_conn_add()).
624  * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
625  * break because nobody cares for that. But this means, we cannot use
626  * _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
627  */
628 
629 static inline void hci_conn_get(struct hci_conn *conn)
630 {
631 	get_device(&conn->dev);
632 }
633 
634 static inline void hci_conn_put(struct hci_conn *conn)
635 {
636 	put_device(&conn->dev);
637 }
638 
639 static inline void hci_conn_hold(struct hci_conn *conn)
640 {
641 	BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
642 
643 	atomic_inc(&conn->refcnt);
644 	cancel_delayed_work(&conn->disc_work);
645 }
646 
647 static inline void hci_conn_drop(struct hci_conn *conn)
648 {
649 	BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
650 
651 	if (atomic_dec_and_test(&conn->refcnt)) {
652 		unsigned long timeo;
653 
654 		switch (conn->type) {
655 		case ACL_LINK:
656 		case LE_LINK:
657 			cancel_delayed_work(&conn->idle_work);
658 			if (conn->state == BT_CONNECTED) {
659 				timeo = conn->disc_timeout;
660 				if (!conn->out)
661 					timeo *= 2;
662 			} else {
663 				timeo = msecs_to_jiffies(10);
664 			}
665 			break;
666 
667 		case AMP_LINK:
668 			timeo = conn->disc_timeout;
669 			break;
670 
671 		default:
672 			timeo = msecs_to_jiffies(10);
673 			break;
674 		}
675 
676 		cancel_delayed_work(&conn->disc_work);
677 		queue_delayed_work(conn->hdev->workqueue,
678 				   &conn->disc_work, timeo);
679 	}
680 }
681 
682 /* ----- HCI Devices ----- */
683 static inline void hci_dev_put(struct hci_dev *d)
684 {
685 	BT_DBG("%s orig refcnt %d", d->name,
686 	       atomic_read(&d->dev.kobj.kref.refcount));
687 
688 	put_device(&d->dev);
689 }
690 
691 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
692 {
693 	BT_DBG("%s orig refcnt %d", d->name,
694 	       atomic_read(&d->dev.kobj.kref.refcount));
695 
696 	get_device(&d->dev);
697 	return d;
698 }
699 
700 #define hci_dev_lock(d)		mutex_lock(&d->lock)
701 #define hci_dev_unlock(d)	mutex_unlock(&d->lock)
702 
703 #define to_hci_dev(d) container_of(d, struct hci_dev, dev)
704 #define to_hci_conn(c) container_of(c, struct hci_conn, dev)
705 
706 static inline void *hci_get_drvdata(struct hci_dev *hdev)
707 {
708 	return dev_get_drvdata(&hdev->dev);
709 }
710 
711 static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
712 {
713 	dev_set_drvdata(&hdev->dev, data);
714 }
715 
716 struct hci_dev *hci_dev_get(int index);
717 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
718 
719 struct hci_dev *hci_alloc_dev(void);
720 void hci_free_dev(struct hci_dev *hdev);
721 int hci_register_dev(struct hci_dev *hdev);
722 void hci_unregister_dev(struct hci_dev *hdev);
723 int hci_suspend_dev(struct hci_dev *hdev);
724 int hci_resume_dev(struct hci_dev *hdev);
725 int hci_dev_open(__u16 dev);
726 int hci_dev_close(__u16 dev);
727 int hci_dev_reset(__u16 dev);
728 int hci_dev_reset_stat(__u16 dev);
729 int hci_dev_cmd(unsigned int cmd, void __user *arg);
730 int hci_get_dev_list(void __user *arg);
731 int hci_get_dev_info(void __user *arg);
732 int hci_get_conn_list(void __user *arg);
733 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
734 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
735 int hci_inquiry(void __user *arg);
736 
737 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
738 					 bdaddr_t *bdaddr, u8 type);
739 int hci_blacklist_clear(struct hci_dev *hdev);
740 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
741 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
742 
743 int hci_uuids_clear(struct hci_dev *hdev);
744 
745 int hci_link_keys_clear(struct hci_dev *hdev);
746 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
747 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
748 		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
749 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
750 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
751 		int new_key, u8 authenticated, u8 tk[16], u8 enc_size,
752 		__le16 ediv, u8 rand[8]);
753 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
754 				     u8 addr_type);
755 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr);
756 int hci_smp_ltks_clear(struct hci_dev *hdev);
757 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
758 
759 int hci_remote_oob_data_clear(struct hci_dev *hdev);
760 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
761 							bdaddr_t *bdaddr);
762 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
763 								u8 *randomizer);
764 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
765 
766 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
767 
768 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
769 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
770 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
771 
772 void hci_init_sysfs(struct hci_dev *hdev);
773 void hci_conn_init_sysfs(struct hci_conn *conn);
774 void hci_conn_add_sysfs(struct hci_conn *conn);
775 void hci_conn_del_sysfs(struct hci_conn *conn);
776 
777 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
778 
779 /* ----- LMP capabilities ----- */
780 #define lmp_encrypt_capable(dev)   ((dev)->features[0][0] & LMP_ENCRYPT)
781 #define lmp_rswitch_capable(dev)   ((dev)->features[0][0] & LMP_RSWITCH)
782 #define lmp_hold_capable(dev)      ((dev)->features[0][0] & LMP_HOLD)
783 #define lmp_sniff_capable(dev)     ((dev)->features[0][0] & LMP_SNIFF)
784 #define lmp_park_capable(dev)      ((dev)->features[0][1] & LMP_PARK)
785 #define lmp_inq_rssi_capable(dev)  ((dev)->features[0][3] & LMP_RSSI_INQ)
786 #define lmp_esco_capable(dev)      ((dev)->features[0][3] & LMP_ESCO)
787 #define lmp_bredr_capable(dev)     (!((dev)->features[0][4] & LMP_NO_BREDR))
788 #define lmp_le_capable(dev)        ((dev)->features[0][4] & LMP_LE)
789 #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
790 #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
791 #define lmp_ext_inq_capable(dev)   ((dev)->features[0][6] & LMP_EXT_INQ)
792 #define lmp_le_br_capable(dev)     (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
793 #define lmp_ssp_capable(dev)       ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
794 #define lmp_no_flush_capable(dev)  ((dev)->features[0][6] & LMP_NO_FLUSH)
795 #define lmp_lsto_capable(dev)      ((dev)->features[0][7] & LMP_LSTO)
796 #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
797 #define lmp_ext_feat_capable(dev)  ((dev)->features[0][7] & LMP_EXTFEATURES)
798 #define lmp_transp_capable(dev)    ((dev)->features[0][2] & LMP_TRANSPARENT)
799 
800 /* ----- Extended LMP capabilities ----- */
801 #define lmp_host_ssp_capable(dev)  ((dev)->features[1][0] & LMP_HOST_SSP)
802 #define lmp_host_le_capable(dev)   (!!((dev)->features[1][0] & LMP_HOST_LE))
803 #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
804 
805 /* ----- HCI protocols ----- */
806 #define HCI_PROTO_DEFER             0x01
807 
808 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
809 					__u8 type, __u8 *flags)
810 {
811 	switch (type) {
812 	case ACL_LINK:
813 		return l2cap_connect_ind(hdev, bdaddr);
814 
815 	case SCO_LINK:
816 	case ESCO_LINK:
817 		return sco_connect_ind(hdev, bdaddr, flags);
818 
819 	default:
820 		BT_ERR("unknown link type %d", type);
821 		return -EINVAL;
822 	}
823 }
824 
825 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
826 {
827 	switch (conn->type) {
828 	case ACL_LINK:
829 	case LE_LINK:
830 		l2cap_connect_cfm(conn, status);
831 		break;
832 
833 	case SCO_LINK:
834 	case ESCO_LINK:
835 		sco_connect_cfm(conn, status);
836 		break;
837 
838 	default:
839 		BT_ERR("unknown link type %d", conn->type);
840 		break;
841 	}
842 
843 	if (conn->connect_cfm_cb)
844 		conn->connect_cfm_cb(conn, status);
845 }
846 
847 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
848 {
849 	if (conn->type != ACL_LINK && conn->type != LE_LINK)
850 		return HCI_ERROR_REMOTE_USER_TERM;
851 
852 	return l2cap_disconn_ind(conn);
853 }
854 
855 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
856 {
857 	switch (conn->type) {
858 	case ACL_LINK:
859 	case LE_LINK:
860 		l2cap_disconn_cfm(conn, reason);
861 		break;
862 
863 	case SCO_LINK:
864 	case ESCO_LINK:
865 		sco_disconn_cfm(conn, reason);
866 		break;
867 
868 	/* L2CAP would be handled for BREDR chan */
869 	case AMP_LINK:
870 		break;
871 
872 	default:
873 		BT_ERR("unknown link type %d", conn->type);
874 		break;
875 	}
876 
877 	if (conn->disconn_cfm_cb)
878 		conn->disconn_cfm_cb(conn, reason);
879 }
880 
881 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
882 {
883 	__u8 encrypt;
884 
885 	if (conn->type != ACL_LINK && conn->type != LE_LINK)
886 		return;
887 
888 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
889 		return;
890 
891 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
892 	l2cap_security_cfm(conn, status, encrypt);
893 
894 	if (conn->security_cfm_cb)
895 		conn->security_cfm_cb(conn, status);
896 }
897 
898 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
899 								__u8 encrypt)
900 {
901 	if (conn->type != ACL_LINK && conn->type != LE_LINK)
902 		return;
903 
904 	l2cap_security_cfm(conn, status, encrypt);
905 
906 	if (conn->security_cfm_cb)
907 		conn->security_cfm_cb(conn, status);
908 }
909 
910 /* ----- HCI callbacks ----- */
911 struct hci_cb {
912 	struct list_head list;
913 
914 	char *name;
915 
916 	void (*security_cfm)	(struct hci_conn *conn, __u8 status,
917 								__u8 encrypt);
918 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
919 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
920 };
921 
922 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
923 {
924 	struct hci_cb *cb;
925 	__u8 encrypt;
926 
927 	hci_proto_auth_cfm(conn, status);
928 
929 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
930 		return;
931 
932 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
933 
934 	read_lock(&hci_cb_list_lock);
935 	list_for_each_entry(cb, &hci_cb_list, list) {
936 		if (cb->security_cfm)
937 			cb->security_cfm(conn, status, encrypt);
938 	}
939 	read_unlock(&hci_cb_list_lock);
940 }
941 
942 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
943 								__u8 encrypt)
944 {
945 	struct hci_cb *cb;
946 
947 	if (conn->sec_level == BT_SECURITY_SDP)
948 		conn->sec_level = BT_SECURITY_LOW;
949 
950 	if (conn->pending_sec_level > conn->sec_level)
951 		conn->sec_level = conn->pending_sec_level;
952 
953 	hci_proto_encrypt_cfm(conn, status, encrypt);
954 
955 	read_lock(&hci_cb_list_lock);
956 	list_for_each_entry(cb, &hci_cb_list, list) {
957 		if (cb->security_cfm)
958 			cb->security_cfm(conn, status, encrypt);
959 	}
960 	read_unlock(&hci_cb_list_lock);
961 }
962 
963 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
964 {
965 	struct hci_cb *cb;
966 
967 	read_lock(&hci_cb_list_lock);
968 	list_for_each_entry(cb, &hci_cb_list, list) {
969 		if (cb->key_change_cfm)
970 			cb->key_change_cfm(conn, status);
971 	}
972 	read_unlock(&hci_cb_list_lock);
973 }
974 
975 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
976 								__u8 role)
977 {
978 	struct hci_cb *cb;
979 
980 	read_lock(&hci_cb_list_lock);
981 	list_for_each_entry(cb, &hci_cb_list, list) {
982 		if (cb->role_switch_cfm)
983 			cb->role_switch_cfm(conn, status, role);
984 	}
985 	read_unlock(&hci_cb_list_lock);
986 }
987 
988 static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
989 {
990 	size_t parsed = 0;
991 
992 	if (data_len < 2)
993 		return false;
994 
995 	while (parsed < data_len - 1) {
996 		u8 field_len = data[0];
997 
998 		if (field_len == 0)
999 			break;
1000 
1001 		parsed += field_len + 1;
1002 
1003 		if (parsed > data_len)
1004 			break;
1005 
1006 		if (data[1] == type)
1007 			return true;
1008 
1009 		data += field_len + 1;
1010 	}
1011 
1012 	return false;
1013 }
1014 
1015 int hci_register_cb(struct hci_cb *hcb);
1016 int hci_unregister_cb(struct hci_cb *hcb);
1017 
1018 struct hci_request {
1019 	struct hci_dev		*hdev;
1020 	struct sk_buff_head	cmd_q;
1021 
1022 	/* If something goes wrong when building the HCI request, the error
1023 	 * value is stored in this field.
1024 	 */
1025 	int			err;
1026 };
1027 
1028 void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
1029 int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
1030 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
1031 		 const void *param);
1032 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
1033 		    const void *param, u8 event);
1034 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
1035 
1036 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1037 			       const void *param, u32 timeout);
1038 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1039 				  const void *param, u8 event, u32 timeout);
1040 
1041 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
1042 		 const void *param);
1043 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
1044 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
1045 
1046 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
1047 
1048 /* ----- HCI Sockets ----- */
1049 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
1050 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk);
1051 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
1052 
1053 void hci_sock_dev_event(struct hci_dev *hdev, int event);
1054 
1055 /* Management interface */
1056 #define DISCOV_TYPE_BREDR		(BIT(BDADDR_BREDR))
1057 #define DISCOV_TYPE_LE			(BIT(BDADDR_LE_PUBLIC) | \
1058 					 BIT(BDADDR_LE_RANDOM))
1059 #define DISCOV_TYPE_INTERLEAVED		(BIT(BDADDR_BREDR) | \
1060 					 BIT(BDADDR_LE_PUBLIC) | \
1061 					 BIT(BDADDR_LE_RANDOM))
1062 
1063 /* These LE scan and inquiry parameters were chosen according to LE General
1064  * Discovery Procedure specification.
1065  */
1066 #define DISCOV_LE_SCAN_WIN		0x12
1067 #define DISCOV_LE_SCAN_INT		0x12
1068 #define DISCOV_LE_TIMEOUT		msecs_to_jiffies(10240)
1069 #define DISCOV_INTERLEAVED_TIMEOUT	msecs_to_jiffies(5120)
1070 #define DISCOV_INTERLEAVED_INQUIRY_LEN	0x04
1071 #define DISCOV_BREDR_INQUIRY_LEN	0x08
1072 
1073 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
1074 void mgmt_index_added(struct hci_dev *hdev);
1075 void mgmt_index_removed(struct hci_dev *hdev);
1076 void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
1077 int mgmt_powered(struct hci_dev *hdev, u8 powered);
1078 void mgmt_discoverable_timeout(struct hci_dev *hdev);
1079 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
1080 void mgmt_connectable(struct hci_dev *hdev, u8 connectable);
1081 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
1082 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
1083 		       bool persistent);
1084 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1085 			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
1086 			   u8 *dev_class);
1087 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
1088 			      u8 link_type, u8 addr_type, u8 reason);
1089 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
1090 			    u8 link_type, u8 addr_type, u8 status);
1091 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1092 			 u8 addr_type, u8 status);
1093 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
1094 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1095 				  u8 status);
1096 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1097 				      u8 status);
1098 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
1099 			      u8 link_type, u8 addr_type, __le32 value,
1100 			      u8 confirm_hint);
1101 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1102 				     u8 link_type, u8 addr_type, u8 status);
1103 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1104 					 u8 link_type, u8 addr_type, u8 status);
1105 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
1106 			      u8 link_type, u8 addr_type);
1107 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1108 				     u8 link_type, u8 addr_type, u8 status);
1109 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1110 					 u8 link_type, u8 addr_type, u8 status);
1111 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
1112 			     u8 link_type, u8 addr_type, u32 passkey,
1113 			     u8 entered);
1114 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1115 		      u8 addr_type, u8 status);
1116 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
1117 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
1118 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
1119 				    u8 status);
1120 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
1121 void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
1122 					     u8 *randomizer, u8 status);
1123 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1124 		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
1125 		       u8 ssp, u8 *eir, u16 eir_len);
1126 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1127 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len);
1128 void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
1129 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1130 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1131 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
1132 void mgmt_reenable_advertising(struct hci_dev *hdev);
1133 
1134 /* HCI info for socket */
1135 #define hci_pi(sk) ((struct hci_pinfo *) sk)
1136 
1137 struct hci_pinfo {
1138 	struct bt_sock    bt;
1139 	struct hci_dev    *hdev;
1140 	struct hci_filter filter;
1141 	__u32             cmsg_mask;
1142 	unsigned short   channel;
1143 };
1144 
1145 /* HCI security filter */
1146 #define HCI_SFLT_MAX_OGF  5
1147 
1148 struct hci_sec_filter {
1149 	__u32 type_mask;
1150 	__u32 event_mask[2];
1151 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
1152 };
1153 
1154 /* ----- HCI requests ----- */
1155 #define HCI_REQ_DONE	  0
1156 #define HCI_REQ_PEND	  1
1157 #define HCI_REQ_CANCELED  2
1158 
1159 #define hci_req_lock(d)		mutex_lock(&d->req_lock)
1160 #define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
1161 
1162 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
1163 					u16 latency, u16 to_multiplier);
1164 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
1165 							__u8 ltk[16]);
1166 
1167 #define SCO_AIRMODE_MASK       0x0003
1168 #define SCO_AIRMODE_CVSD       0x0000
1169 #define SCO_AIRMODE_TRANSP     0x0003
1170 
1171 #endif /* __HCI_CORE_H */
1172