xref: /linux/include/net/bluetooth/hci_core.h (revision 394d83c17fac2b7bcf05cb99d1e945135767bb6b)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27 
28 #include <net/bluetooth/hci.h>
29 
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP	0
32 #define HCI_PROTO_SCO	1
33 
34 /* HCI Core structures */
35 struct inquiry_data {
36 	bdaddr_t	bdaddr;
37 	__u8		pscan_rep_mode;
38 	__u8		pscan_period_mode;
39 	__u8		pscan_mode;
40 	__u8		dev_class[3];
41 	__le16		clock_offset;
42 	__s8		rssi;
43 	__u8		ssp_mode;
44 };
45 
46 struct inquiry_entry {
47 	struct inquiry_entry	*next;
48 	__u32			timestamp;
49 	struct inquiry_data	data;
50 };
51 
52 struct inquiry_cache {
53 	spinlock_t		lock;
54 	__u32			timestamp;
55 	struct inquiry_entry	*list;
56 };
57 
58 struct hci_conn_hash {
59 	struct list_head list;
60 	spinlock_t       lock;
61 	unsigned int     acl_num;
62 	unsigned int     sco_num;
63 };
64 
65 struct bdaddr_list {
66 	struct list_head list;
67 	bdaddr_t bdaddr;
68 };
69 #define NUM_REASSEMBLY 4
70 struct hci_dev {
71 	struct list_head list;
72 	spinlock_t	lock;
73 	atomic_t	refcnt;
74 
75 	char		name[8];
76 	unsigned long	flags;
77 	__u16		id;
78 	__u8		bus;
79 	__u8		dev_type;
80 	bdaddr_t	bdaddr;
81 	__u8		dev_name[248];
82 	__u8		dev_class[3];
83 	__u8		features[8];
84 	__u8		commands[64];
85 	__u8		ssp_mode;
86 	__u8		hci_ver;
87 	__u16		hci_rev;
88 	__u16		manufacturer;
89 	__u16		voice_setting;
90 
91 	__u16		pkt_type;
92 	__u16		esco_type;
93 	__u16		link_policy;
94 	__u16		link_mode;
95 
96 	__u32		idle_timeout;
97 	__u16		sniff_min_interval;
98 	__u16		sniff_max_interval;
99 
100 	unsigned long	quirks;
101 
102 	atomic_t	cmd_cnt;
103 	unsigned int	acl_cnt;
104 	unsigned int	sco_cnt;
105 
106 	unsigned int	acl_mtu;
107 	unsigned int	sco_mtu;
108 	unsigned int	acl_pkts;
109 	unsigned int	sco_pkts;
110 
111 	unsigned long	cmd_last_tx;
112 	unsigned long	acl_last_tx;
113 	unsigned long	sco_last_tx;
114 
115 	struct workqueue_struct	*workqueue;
116 
117 	struct tasklet_struct	cmd_task;
118 	struct tasklet_struct	rx_task;
119 	struct tasklet_struct	tx_task;
120 
121 	struct sk_buff_head	rx_q;
122 	struct sk_buff_head	raw_q;
123 	struct sk_buff_head	cmd_q;
124 
125 	struct sk_buff		*sent_cmd;
126 	struct sk_buff		*reassembly[NUM_REASSEMBLY];
127 
128 	struct mutex		req_lock;
129 	wait_queue_head_t	req_wait_q;
130 	__u32			req_status;
131 	__u32			req_result;
132 	__u16			req_last_cmd;
133 
134 	struct inquiry_cache	inq_cache;
135 	struct hci_conn_hash	conn_hash;
136 	struct list_head	blacklist;
137 
138 	struct hci_dev_stats	stat;
139 
140 	struct sk_buff_head	driver_init;
141 
142 	void			*driver_data;
143 	void			*core_data;
144 
145 	atomic_t		promisc;
146 
147 	struct dentry		*debugfs;
148 
149 	struct device		*parent;
150 	struct device		dev;
151 
152 	struct rfkill		*rfkill;
153 
154 	struct module		*owner;
155 
156 	int (*open)(struct hci_dev *hdev);
157 	int (*close)(struct hci_dev *hdev);
158 	int (*flush)(struct hci_dev *hdev);
159 	int (*send)(struct sk_buff *skb);
160 	void (*destruct)(struct hci_dev *hdev);
161 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
162 	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
163 };
164 
165 struct hci_conn {
166 	struct list_head list;
167 
168 	atomic_t	 refcnt;
169 	spinlock_t	 lock;
170 
171 	bdaddr_t	 dst;
172 	__u16		 handle;
173 	__u16		 state;
174 	__u8             mode;
175 	__u8		 type;
176 	__u8		 out;
177 	__u8		 attempt;
178 	__u8		 dev_class[3];
179 	__u8             features[8];
180 	__u8             ssp_mode;
181 	__u16            interval;
182 	__u16            pkt_type;
183 	__u16            link_policy;
184 	__u32		 link_mode;
185 	__u8             auth_type;
186 	__u8             sec_level;
187 	__u8             power_save;
188 	__u16            disc_timeout;
189 	unsigned long	 pend;
190 
191 	unsigned int	 sent;
192 
193 	struct sk_buff_head data_q;
194 
195 	struct timer_list disc_timer;
196 	struct timer_list idle_timer;
197 
198 	struct work_struct work_add;
199 	struct work_struct work_del;
200 
201 	struct device	dev;
202 	atomic_t	devref;
203 
204 	struct hci_dev	*hdev;
205 	void		*l2cap_data;
206 	void		*sco_data;
207 	void		*priv;
208 
209 	struct hci_conn	*link;
210 };
211 
212 extern struct hci_proto *hci_proto[];
213 extern struct list_head hci_dev_list;
214 extern struct list_head hci_cb_list;
215 extern rwlock_t hci_dev_list_lock;
216 extern rwlock_t hci_cb_list_lock;
217 
218 /* ----- Inquiry cache ----- */
219 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
220 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   /* 60 seconds */
221 
222 #define inquiry_cache_lock(c)		spin_lock(&c->lock)
223 #define inquiry_cache_unlock(c)		spin_unlock(&c->lock)
224 #define inquiry_cache_lock_bh(c)	spin_lock_bh(&c->lock)
225 #define inquiry_cache_unlock_bh(c)	spin_unlock_bh(&c->lock)
226 
227 static inline void inquiry_cache_init(struct hci_dev *hdev)
228 {
229 	struct inquiry_cache *c = &hdev->inq_cache;
230 	spin_lock_init(&c->lock);
231 	c->list = NULL;
232 }
233 
234 static inline int inquiry_cache_empty(struct hci_dev *hdev)
235 {
236 	struct inquiry_cache *c = &hdev->inq_cache;
237 	return c->list == NULL;
238 }
239 
240 static inline long inquiry_cache_age(struct hci_dev *hdev)
241 {
242 	struct inquiry_cache *c = &hdev->inq_cache;
243 	return jiffies - c->timestamp;
244 }
245 
246 static inline long inquiry_entry_age(struct inquiry_entry *e)
247 {
248 	return jiffies - e->timestamp;
249 }
250 
251 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
252 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
253 
254 /* ----- HCI Connections ----- */
255 enum {
256 	HCI_CONN_AUTH_PEND,
257 	HCI_CONN_ENCRYPT_PEND,
258 	HCI_CONN_RSWITCH_PEND,
259 	HCI_CONN_MODE_CHANGE_PEND,
260 	HCI_CONN_SCO_SETUP_PEND,
261 };
262 
263 static inline void hci_conn_hash_init(struct hci_dev *hdev)
264 {
265 	struct hci_conn_hash *h = &hdev->conn_hash;
266 	INIT_LIST_HEAD(&h->list);
267 	spin_lock_init(&h->lock);
268 	h->acl_num = 0;
269 	h->sco_num = 0;
270 }
271 
272 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
273 {
274 	struct hci_conn_hash *h = &hdev->conn_hash;
275 	list_add(&c->list, &h->list);
276 	if (c->type == ACL_LINK)
277 		h->acl_num++;
278 	else
279 		h->sco_num++;
280 }
281 
282 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
283 {
284 	struct hci_conn_hash *h = &hdev->conn_hash;
285 	list_del(&c->list);
286 	if (c->type == ACL_LINK)
287 		h->acl_num--;
288 	else
289 		h->sco_num--;
290 }
291 
292 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
293 					__u16 handle)
294 {
295 	struct hci_conn_hash *h = &hdev->conn_hash;
296 	struct list_head *p;
297 	struct hci_conn  *c;
298 
299 	list_for_each(p, &h->list) {
300 		c = list_entry(p, struct hci_conn, list);
301 		if (c->handle == handle)
302 			return c;
303 	}
304 	return NULL;
305 }
306 
307 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
308 					__u8 type, bdaddr_t *ba)
309 {
310 	struct hci_conn_hash *h = &hdev->conn_hash;
311 	struct list_head *p;
312 	struct hci_conn  *c;
313 
314 	list_for_each(p, &h->list) {
315 		c = list_entry(p, struct hci_conn, list);
316 		if (c->type == type && !bacmp(&c->dst, ba))
317 			return c;
318 	}
319 	return NULL;
320 }
321 
322 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
323 					__u8 type, __u16 state)
324 {
325 	struct hci_conn_hash *h = &hdev->conn_hash;
326 	struct list_head *p;
327 	struct hci_conn  *c;
328 
329 	list_for_each(p, &h->list) {
330 		c = list_entry(p, struct hci_conn, list);
331 		if (c->type == type && c->state == state)
332 			return c;
333 	}
334 	return NULL;
335 }
336 
337 void hci_acl_connect(struct hci_conn *conn);
338 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
339 void hci_add_sco(struct hci_conn *conn, __u16 handle);
340 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
341 void hci_sco_setup(struct hci_conn *conn, __u8 status);
342 
343 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
344 int hci_conn_del(struct hci_conn *conn);
345 void hci_conn_hash_flush(struct hci_dev *hdev);
346 void hci_conn_check_pending(struct hci_dev *hdev);
347 
348 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
349 int hci_conn_check_link_mode(struct hci_conn *conn);
350 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
351 int hci_conn_change_link_key(struct hci_conn *conn);
352 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
353 
354 void hci_conn_enter_active_mode(struct hci_conn *conn);
355 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
356 
357 void hci_conn_hold_device(struct hci_conn *conn);
358 void hci_conn_put_device(struct hci_conn *conn);
359 
360 static inline void hci_conn_hold(struct hci_conn *conn)
361 {
362 	atomic_inc(&conn->refcnt);
363 	del_timer(&conn->disc_timer);
364 }
365 
366 static inline void hci_conn_put(struct hci_conn *conn)
367 {
368 	if (atomic_dec_and_test(&conn->refcnt)) {
369 		unsigned long timeo;
370 		if (conn->type == ACL_LINK) {
371 			del_timer(&conn->idle_timer);
372 			if (conn->state == BT_CONNECTED) {
373 				timeo = msecs_to_jiffies(conn->disc_timeout);
374 				if (!conn->out)
375 					timeo *= 2;
376 			} else
377 				timeo = msecs_to_jiffies(10);
378 		} else
379 			timeo = msecs_to_jiffies(10);
380 		mod_timer(&conn->disc_timer, jiffies + timeo);
381 	}
382 }
383 
384 /* ----- HCI Devices ----- */
385 static inline void __hci_dev_put(struct hci_dev *d)
386 {
387 	if (atomic_dec_and_test(&d->refcnt))
388 		d->destruct(d);
389 }
390 
391 static inline void hci_dev_put(struct hci_dev *d)
392 {
393 	__hci_dev_put(d);
394 	module_put(d->owner);
395 }
396 
397 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
398 {
399 	atomic_inc(&d->refcnt);
400 	return d;
401 }
402 
403 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
404 {
405 	if (try_module_get(d->owner))
406 		return __hci_dev_hold(d);
407 	return NULL;
408 }
409 
410 #define hci_dev_lock(d)		spin_lock(&d->lock)
411 #define hci_dev_unlock(d)	spin_unlock(&d->lock)
412 #define hci_dev_lock_bh(d)	spin_lock_bh(&d->lock)
413 #define hci_dev_unlock_bh(d)	spin_unlock_bh(&d->lock)
414 
415 struct hci_dev *hci_dev_get(int index);
416 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
417 
418 struct hci_dev *hci_alloc_dev(void);
419 void hci_free_dev(struct hci_dev *hdev);
420 int hci_register_dev(struct hci_dev *hdev);
421 int hci_unregister_dev(struct hci_dev *hdev);
422 int hci_suspend_dev(struct hci_dev *hdev);
423 int hci_resume_dev(struct hci_dev *hdev);
424 int hci_dev_open(__u16 dev);
425 int hci_dev_close(__u16 dev);
426 int hci_dev_reset(__u16 dev);
427 int hci_dev_reset_stat(__u16 dev);
428 int hci_dev_cmd(unsigned int cmd, void __user *arg);
429 int hci_get_dev_list(void __user *arg);
430 int hci_get_dev_info(void __user *arg);
431 int hci_get_conn_list(void __user *arg);
432 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
433 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
434 int hci_inquiry(void __user *arg);
435 
436 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
437 int hci_blacklist_clear(struct hci_dev *hdev);
438 
439 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
440 
441 int hci_recv_frame(struct sk_buff *skb);
442 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
443 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
444 
445 int hci_register_sysfs(struct hci_dev *hdev);
446 void hci_unregister_sysfs(struct hci_dev *hdev);
447 void hci_conn_init_sysfs(struct hci_conn *conn);
448 void hci_conn_add_sysfs(struct hci_conn *conn);
449 void hci_conn_del_sysfs(struct hci_conn *conn);
450 
451 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
452 
453 /* ----- LMP capabilities ----- */
454 #define lmp_rswitch_capable(dev)   ((dev)->features[0] & LMP_RSWITCH)
455 #define lmp_encrypt_capable(dev)   ((dev)->features[0] & LMP_ENCRYPT)
456 #define lmp_sniff_capable(dev)     ((dev)->features[0] & LMP_SNIFF)
457 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
458 #define lmp_esco_capable(dev)      ((dev)->features[3] & LMP_ESCO)
459 #define lmp_ssp_capable(dev)       ((dev)->features[6] & LMP_SIMPLE_PAIR)
460 
461 /* ----- HCI protocols ----- */
462 struct hci_proto {
463 	char		*name;
464 	unsigned int	id;
465 	unsigned long	flags;
466 
467 	void		*priv;
468 
469 	int (*connect_ind)	(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
470 	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
471 	int (*disconn_ind)	(struct hci_conn *conn);
472 	int (*disconn_cfm)	(struct hci_conn *conn, __u8 reason);
473 	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
474 	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
475 	int (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
476 };
477 
478 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
479 {
480 	register struct hci_proto *hp;
481 	int mask = 0;
482 
483 	hp = hci_proto[HCI_PROTO_L2CAP];
484 	if (hp && hp->connect_ind)
485 		mask |= hp->connect_ind(hdev, bdaddr, type);
486 
487 	hp = hci_proto[HCI_PROTO_SCO];
488 	if (hp && hp->connect_ind)
489 		mask |= hp->connect_ind(hdev, bdaddr, type);
490 
491 	return mask;
492 }
493 
494 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
495 {
496 	register struct hci_proto *hp;
497 
498 	hp = hci_proto[HCI_PROTO_L2CAP];
499 	if (hp && hp->connect_cfm)
500 		hp->connect_cfm(conn, status);
501 
502 	hp = hci_proto[HCI_PROTO_SCO];
503 	if (hp && hp->connect_cfm)
504 		hp->connect_cfm(conn, status);
505 }
506 
507 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
508 {
509 	register struct hci_proto *hp;
510 	int reason = 0x13;
511 
512 	hp = hci_proto[HCI_PROTO_L2CAP];
513 	if (hp && hp->disconn_ind)
514 		reason = hp->disconn_ind(conn);
515 
516 	hp = hci_proto[HCI_PROTO_SCO];
517 	if (hp && hp->disconn_ind)
518 		reason = hp->disconn_ind(conn);
519 
520 	return reason;
521 }
522 
523 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
524 {
525 	register struct hci_proto *hp;
526 
527 	hp = hci_proto[HCI_PROTO_L2CAP];
528 	if (hp && hp->disconn_cfm)
529 		hp->disconn_cfm(conn, reason);
530 
531 	hp = hci_proto[HCI_PROTO_SCO];
532 	if (hp && hp->disconn_cfm)
533 		hp->disconn_cfm(conn, reason);
534 }
535 
536 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
537 {
538 	register struct hci_proto *hp;
539 	__u8 encrypt;
540 
541 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
542 		return;
543 
544 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
545 
546 	hp = hci_proto[HCI_PROTO_L2CAP];
547 	if (hp && hp->security_cfm)
548 		hp->security_cfm(conn, status, encrypt);
549 
550 	hp = hci_proto[HCI_PROTO_SCO];
551 	if (hp && hp->security_cfm)
552 		hp->security_cfm(conn, status, encrypt);
553 }
554 
555 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
556 {
557 	register struct hci_proto *hp;
558 
559 	hp = hci_proto[HCI_PROTO_L2CAP];
560 	if (hp && hp->security_cfm)
561 		hp->security_cfm(conn, status, encrypt);
562 
563 	hp = hci_proto[HCI_PROTO_SCO];
564 	if (hp && hp->security_cfm)
565 		hp->security_cfm(conn, status, encrypt);
566 }
567 
568 int hci_register_proto(struct hci_proto *hproto);
569 int hci_unregister_proto(struct hci_proto *hproto);
570 
571 /* ----- HCI callbacks ----- */
572 struct hci_cb {
573 	struct list_head list;
574 
575 	char *name;
576 
577 	void (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
578 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
579 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
580 };
581 
582 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
583 {
584 	struct list_head *p;
585 	__u8 encrypt;
586 
587 	hci_proto_auth_cfm(conn, status);
588 
589 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
590 		return;
591 
592 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
593 
594 	read_lock_bh(&hci_cb_list_lock);
595 	list_for_each(p, &hci_cb_list) {
596 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
597 		if (cb->security_cfm)
598 			cb->security_cfm(conn, status, encrypt);
599 	}
600 	read_unlock_bh(&hci_cb_list_lock);
601 }
602 
603 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
604 {
605 	struct list_head *p;
606 
607 	if (conn->sec_level == BT_SECURITY_SDP)
608 		conn->sec_level = BT_SECURITY_LOW;
609 
610 	hci_proto_encrypt_cfm(conn, status, encrypt);
611 
612 	read_lock_bh(&hci_cb_list_lock);
613 	list_for_each(p, &hci_cb_list) {
614 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
615 		if (cb->security_cfm)
616 			cb->security_cfm(conn, status, encrypt);
617 	}
618 	read_unlock_bh(&hci_cb_list_lock);
619 }
620 
621 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
622 {
623 	struct list_head *p;
624 
625 	read_lock_bh(&hci_cb_list_lock);
626 	list_for_each(p, &hci_cb_list) {
627 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
628 		if (cb->key_change_cfm)
629 			cb->key_change_cfm(conn, status);
630 	}
631 	read_unlock_bh(&hci_cb_list_lock);
632 }
633 
634 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
635 {
636 	struct list_head *p;
637 
638 	read_lock_bh(&hci_cb_list_lock);
639 	list_for_each(p, &hci_cb_list) {
640 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
641 		if (cb->role_switch_cfm)
642 			cb->role_switch_cfm(conn, status, role);
643 	}
644 	read_unlock_bh(&hci_cb_list_lock);
645 }
646 
647 int hci_register_cb(struct hci_cb *hcb);
648 int hci_unregister_cb(struct hci_cb *hcb);
649 
650 int hci_register_notifier(struct notifier_block *nb);
651 int hci_unregister_notifier(struct notifier_block *nb);
652 
653 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
654 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
655 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
656 
657 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
658 
659 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
660 
661 /* ----- HCI Sockets ----- */
662 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
663 
664 /* Management interface */
665 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
666 int mgmt_index_added(u16 index);
667 int mgmt_index_removed(u16 index);
668 
669 /* HCI info for socket */
670 #define hci_pi(sk) ((struct hci_pinfo *) sk)
671 
672 struct hci_pinfo {
673 	struct bt_sock    bt;
674 	struct hci_dev    *hdev;
675 	struct hci_filter filter;
676 	__u32             cmsg_mask;
677 	unsigned short   channel;
678 };
679 
680 /* HCI security filter */
681 #define HCI_SFLT_MAX_OGF  5
682 
683 struct hci_sec_filter {
684 	__u32 type_mask;
685 	__u32 event_mask[2];
686 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
687 };
688 
689 /* ----- HCI requests ----- */
690 #define HCI_REQ_DONE	  0
691 #define HCI_REQ_PEND	  1
692 #define HCI_REQ_CANCELED  2
693 
694 #define hci_req_lock(d)		mutex_lock(&d->req_lock)
695 #define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
696 
697 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
698 
699 #endif /* __HCI_CORE_H */
700