xref: /linux/include/net/bluetooth/hci_core.h (revision a33f32244d8550da8b4a26e277ce07d5c6d158b5)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27 
28 #include <net/bluetooth/hci.h>
29 
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP	0
32 #define HCI_PROTO_SCO	1
33 
34 /* HCI Core structures */
35 struct inquiry_data {
36 	bdaddr_t	bdaddr;
37 	__u8		pscan_rep_mode;
38 	__u8		pscan_period_mode;
39 	__u8		pscan_mode;
40 	__u8		dev_class[3];
41 	__le16		clock_offset;
42 	__s8		rssi;
43 	__u8		ssp_mode;
44 };
45 
46 struct inquiry_entry {
47 	struct inquiry_entry 	*next;
48 	__u32			timestamp;
49 	struct inquiry_data	data;
50 };
51 
52 struct inquiry_cache {
53 	spinlock_t 		lock;
54 	__u32			timestamp;
55 	struct inquiry_entry 	*list;
56 };
57 
58 struct hci_conn_hash {
59 	struct list_head list;
60 	spinlock_t       lock;
61 	unsigned int     acl_num;
62 	unsigned int     sco_num;
63 };
64 
65 struct hci_dev {
66 	struct list_head list;
67 	spinlock_t	lock;
68 	atomic_t	refcnt;
69 
70 	char		name[8];
71 	unsigned long	flags;
72 	__u16		id;
73 	__u8		bus;
74 	__u8		dev_type;
75 	bdaddr_t	bdaddr;
76 	__u8		dev_name[248];
77 	__u8		dev_class[3];
78 	__u8		features[8];
79 	__u8		commands[64];
80 	__u8		ssp_mode;
81 	__u8		hci_ver;
82 	__u16		hci_rev;
83 	__u16		manufacturer;
84 	__u16		voice_setting;
85 
86 	__u16		pkt_type;
87 	__u16		esco_type;
88 	__u16		link_policy;
89 	__u16		link_mode;
90 
91 	__u32		idle_timeout;
92 	__u16		sniff_min_interval;
93 	__u16		sniff_max_interval;
94 
95 	unsigned long	quirks;
96 
97 	atomic_t	cmd_cnt;
98 	unsigned int	acl_cnt;
99 	unsigned int	sco_cnt;
100 
101 	unsigned int	acl_mtu;
102 	unsigned int	sco_mtu;
103 	unsigned int	acl_pkts;
104 	unsigned int	sco_pkts;
105 
106 	unsigned long	cmd_last_tx;
107 	unsigned long	acl_last_tx;
108 	unsigned long	sco_last_tx;
109 
110 	struct tasklet_struct	cmd_task;
111 	struct tasklet_struct	rx_task;
112 	struct tasklet_struct	tx_task;
113 
114 	struct sk_buff_head	rx_q;
115 	struct sk_buff_head	raw_q;
116 	struct sk_buff_head	cmd_q;
117 
118 	struct sk_buff		*sent_cmd;
119 	struct sk_buff		*reassembly[3];
120 
121 	struct mutex		req_lock;
122 	wait_queue_head_t	req_wait_q;
123 	__u32			req_status;
124 	__u32			req_result;
125 
126 	struct inquiry_cache	inq_cache;
127 	struct hci_conn_hash	conn_hash;
128 
129 	struct hci_dev_stats	stat;
130 
131 	struct sk_buff_head	driver_init;
132 
133 	void			*driver_data;
134 	void			*core_data;
135 
136 	atomic_t 		promisc;
137 
138 	struct dentry		*debugfs;
139 
140 	struct device		*parent;
141 	struct device		dev;
142 
143 	struct rfkill		*rfkill;
144 
145 	struct module 		*owner;
146 
147 	int (*open)(struct hci_dev *hdev);
148 	int (*close)(struct hci_dev *hdev);
149 	int (*flush)(struct hci_dev *hdev);
150 	int (*send)(struct sk_buff *skb);
151 	void (*destruct)(struct hci_dev *hdev);
152 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
153 	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
154 };
155 
156 struct hci_conn {
157 	struct list_head list;
158 
159 	atomic_t	 refcnt;
160 	spinlock_t	 lock;
161 
162 	bdaddr_t	 dst;
163 	__u16		 handle;
164 	__u16		 state;
165 	__u8             mode;
166 	__u8		 type;
167 	__u8		 out;
168 	__u8		 attempt;
169 	__u8		 dev_class[3];
170 	__u8             features[8];
171 	__u8             ssp_mode;
172 	__u16            interval;
173 	__u16            pkt_type;
174 	__u16            link_policy;
175 	__u32		 link_mode;
176 	__u8             auth_type;
177 	__u8             sec_level;
178 	__u8             power_save;
179 	__u16            disc_timeout;
180 	unsigned long	 pend;
181 
182 	unsigned int	 sent;
183 
184 	struct sk_buff_head data_q;
185 
186 	struct timer_list disc_timer;
187 	struct timer_list idle_timer;
188 
189 	struct work_struct work_add;
190 	struct work_struct work_del;
191 
192 	struct device	dev;
193 	atomic_t	devref;
194 
195 	struct hci_dev	*hdev;
196 	void		*l2cap_data;
197 	void		*sco_data;
198 	void		*priv;
199 
200 	struct hci_conn	*link;
201 };
202 
203 extern struct hci_proto *hci_proto[];
204 extern struct list_head hci_dev_list;
205 extern struct list_head hci_cb_list;
206 extern rwlock_t hci_dev_list_lock;
207 extern rwlock_t hci_cb_list_lock;
208 
209 /* ----- Inquiry cache ----- */
210 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   // 30 seconds
211 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   // 60 seconds
212 
213 #define inquiry_cache_lock(c)		spin_lock(&c->lock)
214 #define inquiry_cache_unlock(c)		spin_unlock(&c->lock)
215 #define inquiry_cache_lock_bh(c)	spin_lock_bh(&c->lock)
216 #define inquiry_cache_unlock_bh(c)	spin_unlock_bh(&c->lock)
217 
218 static inline void inquiry_cache_init(struct hci_dev *hdev)
219 {
220 	struct inquiry_cache *c = &hdev->inq_cache;
221 	spin_lock_init(&c->lock);
222 	c->list = NULL;
223 }
224 
225 static inline int inquiry_cache_empty(struct hci_dev *hdev)
226 {
227 	struct inquiry_cache *c = &hdev->inq_cache;
228 	return (c->list == NULL);
229 }
230 
231 static inline long inquiry_cache_age(struct hci_dev *hdev)
232 {
233 	struct inquiry_cache *c = &hdev->inq_cache;
234 	return jiffies - c->timestamp;
235 }
236 
237 static inline long inquiry_entry_age(struct inquiry_entry *e)
238 {
239 	return jiffies - e->timestamp;
240 }
241 
242 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
243 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
244 
245 /* ----- HCI Connections ----- */
246 enum {
247 	HCI_CONN_AUTH_PEND,
248 	HCI_CONN_ENCRYPT_PEND,
249 	HCI_CONN_RSWITCH_PEND,
250 	HCI_CONN_MODE_CHANGE_PEND,
251 };
252 
253 static inline void hci_conn_hash_init(struct hci_dev *hdev)
254 {
255 	struct hci_conn_hash *h = &hdev->conn_hash;
256 	INIT_LIST_HEAD(&h->list);
257 	spin_lock_init(&h->lock);
258 	h->acl_num = 0;
259 	h->sco_num = 0;
260 }
261 
262 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
263 {
264 	struct hci_conn_hash *h = &hdev->conn_hash;
265 	list_add(&c->list, &h->list);
266 	if (c->type == ACL_LINK)
267 		h->acl_num++;
268 	else
269 		h->sco_num++;
270 }
271 
272 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
273 {
274 	struct hci_conn_hash *h = &hdev->conn_hash;
275 	list_del(&c->list);
276 	if (c->type == ACL_LINK)
277 		h->acl_num--;
278 	else
279 		h->sco_num--;
280 }
281 
282 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
283 					__u16 handle)
284 {
285 	struct hci_conn_hash *h = &hdev->conn_hash;
286 	struct list_head *p;
287 	struct hci_conn  *c;
288 
289 	list_for_each(p, &h->list) {
290 		c = list_entry(p, struct hci_conn, list);
291 		if (c->handle == handle)
292 			return c;
293 	}
294 	return NULL;
295 }
296 
297 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
298 					__u8 type, bdaddr_t *ba)
299 {
300 	struct hci_conn_hash *h = &hdev->conn_hash;
301 	struct list_head *p;
302 	struct hci_conn  *c;
303 
304 	list_for_each(p, &h->list) {
305 		c = list_entry(p, struct hci_conn, list);
306 		if (c->type == type && !bacmp(&c->dst, ba))
307 			return c;
308 	}
309 	return NULL;
310 }
311 
312 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
313 					__u8 type, __u16 state)
314 {
315 	struct hci_conn_hash *h = &hdev->conn_hash;
316 	struct list_head *p;
317 	struct hci_conn  *c;
318 
319 	list_for_each(p, &h->list) {
320 		c = list_entry(p, struct hci_conn, list);
321 		if (c->type == type && c->state == state)
322 			return c;
323 	}
324 	return NULL;
325 }
326 
327 void hci_acl_connect(struct hci_conn *conn);
328 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
329 void hci_add_sco(struct hci_conn *conn, __u16 handle);
330 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
331 
332 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
333 int hci_conn_del(struct hci_conn *conn);
334 void hci_conn_hash_flush(struct hci_dev *hdev);
335 void hci_conn_check_pending(struct hci_dev *hdev);
336 
337 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
338 int hci_conn_check_link_mode(struct hci_conn *conn);
339 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
340 int hci_conn_change_link_key(struct hci_conn *conn);
341 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
342 
343 void hci_conn_enter_active_mode(struct hci_conn *conn);
344 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
345 
346 void hci_conn_hold_device(struct hci_conn *conn);
347 void hci_conn_put_device(struct hci_conn *conn);
348 
349 static inline void hci_conn_hold(struct hci_conn *conn)
350 {
351 	atomic_inc(&conn->refcnt);
352 	del_timer(&conn->disc_timer);
353 }
354 
355 static inline void hci_conn_put(struct hci_conn *conn)
356 {
357 	if (atomic_dec_and_test(&conn->refcnt)) {
358 		unsigned long timeo;
359 		if (conn->type == ACL_LINK) {
360 			del_timer(&conn->idle_timer);
361 			if (conn->state == BT_CONNECTED) {
362 				timeo = msecs_to_jiffies(conn->disc_timeout);
363 				if (!conn->out)
364 					timeo *= 2;
365 			} else
366 				timeo = msecs_to_jiffies(10);
367 		} else
368 			timeo = msecs_to_jiffies(10);
369 		mod_timer(&conn->disc_timer, jiffies + timeo);
370 	}
371 }
372 
373 /* ----- HCI Devices ----- */
374 static inline void __hci_dev_put(struct hci_dev *d)
375 {
376 	if (atomic_dec_and_test(&d->refcnt))
377 		d->destruct(d);
378 }
379 
380 static inline void hci_dev_put(struct hci_dev *d)
381 {
382 	__hci_dev_put(d);
383 	module_put(d->owner);
384 }
385 
386 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
387 {
388 	atomic_inc(&d->refcnt);
389 	return d;
390 }
391 
392 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
393 {
394 	if (try_module_get(d->owner))
395 		return __hci_dev_hold(d);
396 	return NULL;
397 }
398 
399 #define hci_dev_lock(d)		spin_lock(&d->lock)
400 #define hci_dev_unlock(d)	spin_unlock(&d->lock)
401 #define hci_dev_lock_bh(d)	spin_lock_bh(&d->lock)
402 #define hci_dev_unlock_bh(d)	spin_unlock_bh(&d->lock)
403 
404 struct hci_dev *hci_dev_get(int index);
405 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
406 
407 struct hci_dev *hci_alloc_dev(void);
408 void hci_free_dev(struct hci_dev *hdev);
409 int hci_register_dev(struct hci_dev *hdev);
410 int hci_unregister_dev(struct hci_dev *hdev);
411 int hci_suspend_dev(struct hci_dev *hdev);
412 int hci_resume_dev(struct hci_dev *hdev);
413 int hci_dev_open(__u16 dev);
414 int hci_dev_close(__u16 dev);
415 int hci_dev_reset(__u16 dev);
416 int hci_dev_reset_stat(__u16 dev);
417 int hci_dev_cmd(unsigned int cmd, void __user *arg);
418 int hci_get_dev_list(void __user *arg);
419 int hci_get_dev_info(void __user *arg);
420 int hci_get_conn_list(void __user *arg);
421 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
422 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
423 int hci_inquiry(void __user *arg);
424 
425 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
426 
427 int hci_recv_frame(struct sk_buff *skb);
428 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
429 
430 int hci_register_sysfs(struct hci_dev *hdev);
431 void hci_unregister_sysfs(struct hci_dev *hdev);
432 void hci_conn_init_sysfs(struct hci_conn *conn);
433 void hci_conn_add_sysfs(struct hci_conn *conn);
434 void hci_conn_del_sysfs(struct hci_conn *conn);
435 
436 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
437 
438 /* ----- LMP capabilities ----- */
439 #define lmp_rswitch_capable(dev)   ((dev)->features[0] & LMP_RSWITCH)
440 #define lmp_encrypt_capable(dev)   ((dev)->features[0] & LMP_ENCRYPT)
441 #define lmp_sniff_capable(dev)     ((dev)->features[0] & LMP_SNIFF)
442 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
443 #define lmp_esco_capable(dev)      ((dev)->features[3] & LMP_ESCO)
444 #define lmp_ssp_capable(dev)       ((dev)->features[6] & LMP_SIMPLE_PAIR)
445 
446 /* ----- HCI protocols ----- */
447 struct hci_proto {
448 	char		*name;
449 	unsigned int	id;
450 	unsigned long	flags;
451 
452 	void		*priv;
453 
454 	int (*connect_ind)	(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
455 	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
456 	int (*disconn_ind)	(struct hci_conn *conn);
457 	int (*disconn_cfm)	(struct hci_conn *conn, __u8 reason);
458 	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
459 	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
460 	int (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
461 };
462 
463 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
464 {
465 	register struct hci_proto *hp;
466 	int mask = 0;
467 
468 	hp = hci_proto[HCI_PROTO_L2CAP];
469 	if (hp && hp->connect_ind)
470 		mask |= hp->connect_ind(hdev, bdaddr, type);
471 
472 	hp = hci_proto[HCI_PROTO_SCO];
473 	if (hp && hp->connect_ind)
474 		mask |= hp->connect_ind(hdev, bdaddr, type);
475 
476 	return mask;
477 }
478 
479 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
480 {
481 	register struct hci_proto *hp;
482 
483 	hp = hci_proto[HCI_PROTO_L2CAP];
484 	if (hp && hp->connect_cfm)
485 		hp->connect_cfm(conn, status);
486 
487 	hp = hci_proto[HCI_PROTO_SCO];
488 	if (hp && hp->connect_cfm)
489 		hp->connect_cfm(conn, status);
490 }
491 
492 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
493 {
494 	register struct hci_proto *hp;
495 	int reason = 0x13;
496 
497 	hp = hci_proto[HCI_PROTO_L2CAP];
498 	if (hp && hp->disconn_ind)
499 		reason = hp->disconn_ind(conn);
500 
501 	hp = hci_proto[HCI_PROTO_SCO];
502 	if (hp && hp->disconn_ind)
503 		reason = hp->disconn_ind(conn);
504 
505 	return reason;
506 }
507 
508 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
509 {
510 	register struct hci_proto *hp;
511 
512 	hp = hci_proto[HCI_PROTO_L2CAP];
513 	if (hp && hp->disconn_cfm)
514 		hp->disconn_cfm(conn, reason);
515 
516 	hp = hci_proto[HCI_PROTO_SCO];
517 	if (hp && hp->disconn_cfm)
518 		hp->disconn_cfm(conn, reason);
519 }
520 
521 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
522 {
523 	register struct hci_proto *hp;
524 	__u8 encrypt;
525 
526 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
527 		return;
528 
529 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
530 
531 	hp = hci_proto[HCI_PROTO_L2CAP];
532 	if (hp && hp->security_cfm)
533 		hp->security_cfm(conn, status, encrypt);
534 
535 	hp = hci_proto[HCI_PROTO_SCO];
536 	if (hp && hp->security_cfm)
537 		hp->security_cfm(conn, status, encrypt);
538 }
539 
540 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
541 {
542 	register struct hci_proto *hp;
543 
544 	hp = hci_proto[HCI_PROTO_L2CAP];
545 	if (hp && hp->security_cfm)
546 		hp->security_cfm(conn, status, encrypt);
547 
548 	hp = hci_proto[HCI_PROTO_SCO];
549 	if (hp && hp->security_cfm)
550 		hp->security_cfm(conn, status, encrypt);
551 }
552 
553 int hci_register_proto(struct hci_proto *hproto);
554 int hci_unregister_proto(struct hci_proto *hproto);
555 
556 /* ----- HCI callbacks ----- */
557 struct hci_cb {
558 	struct list_head list;
559 
560 	char *name;
561 
562 	void (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
563 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
564 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
565 };
566 
567 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
568 {
569 	struct list_head *p;
570 	__u8 encrypt;
571 
572 	hci_proto_auth_cfm(conn, status);
573 
574 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
575 		return;
576 
577 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
578 
579 	read_lock_bh(&hci_cb_list_lock);
580 	list_for_each(p, &hci_cb_list) {
581 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
582 		if (cb->security_cfm)
583 			cb->security_cfm(conn, status, encrypt);
584 	}
585 	read_unlock_bh(&hci_cb_list_lock);
586 }
587 
588 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
589 {
590 	struct list_head *p;
591 
592 	if (conn->sec_level == BT_SECURITY_SDP)
593 		conn->sec_level = BT_SECURITY_LOW;
594 
595 	hci_proto_encrypt_cfm(conn, status, encrypt);
596 
597 	read_lock_bh(&hci_cb_list_lock);
598 	list_for_each(p, &hci_cb_list) {
599 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
600 		if (cb->security_cfm)
601 			cb->security_cfm(conn, status, encrypt);
602 	}
603 	read_unlock_bh(&hci_cb_list_lock);
604 }
605 
606 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
607 {
608 	struct list_head *p;
609 
610 	read_lock_bh(&hci_cb_list_lock);
611 	list_for_each(p, &hci_cb_list) {
612 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
613 		if (cb->key_change_cfm)
614 			cb->key_change_cfm(conn, status);
615 	}
616 	read_unlock_bh(&hci_cb_list_lock);
617 }
618 
619 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
620 {
621 	struct list_head *p;
622 
623 	read_lock_bh(&hci_cb_list_lock);
624 	list_for_each(p, &hci_cb_list) {
625 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
626 		if (cb->role_switch_cfm)
627 			cb->role_switch_cfm(conn, status, role);
628 	}
629 	read_unlock_bh(&hci_cb_list_lock);
630 }
631 
632 int hci_register_cb(struct hci_cb *hcb);
633 int hci_unregister_cb(struct hci_cb *hcb);
634 
635 int hci_register_notifier(struct notifier_block *nb);
636 int hci_unregister_notifier(struct notifier_block *nb);
637 
638 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
639 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
640 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
641 
642 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
643 
644 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
645 
646 /* ----- HCI Sockets ----- */
647 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
648 
649 /* HCI info for socket */
650 #define hci_pi(sk) ((struct hci_pinfo *) sk)
651 
652 struct hci_pinfo {
653 	struct bt_sock    bt;
654 	struct hci_dev    *hdev;
655 	struct hci_filter filter;
656 	__u32             cmsg_mask;
657 };
658 
659 /* HCI security filter */
660 #define HCI_SFLT_MAX_OGF  5
661 
662 struct hci_sec_filter {
663 	__u32 type_mask;
664 	__u32 event_mask[2];
665 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
666 };
667 
668 /* ----- HCI requests ----- */
669 #define HCI_REQ_DONE	  0
670 #define HCI_REQ_PEND	  1
671 #define HCI_REQ_CANCELED  2
672 
673 #define hci_req_lock(d)		mutex_lock(&d->req_lock)
674 #define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
675 
676 void hci_req_complete(struct hci_dev *hdev, int result);
677 
678 #endif /* __HCI_CORE_H */
679