xref: /linux/include/net/bluetooth/hci_core.h (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27 
28 #include <linux/proc_fs.h>
29 #include <net/bluetooth/hci.h>
30 
31 /* HCI upper protocols */
32 #define HCI_PROTO_L2CAP	0
33 #define HCI_PROTO_SCO	1
34 
35 #define HCI_INIT_TIMEOUT (HZ * 10)
36 
37 extern struct proc_dir_entry *proc_bt_hci;
38 
39 /* HCI Core structures */
40 
41 struct inquiry_data {
42 	bdaddr_t	bdaddr;
43 	__u8		pscan_rep_mode;
44 	__u8		pscan_period_mode;
45 	__u8		pscan_mode;
46 	__u8		dev_class[3];
47 	__u16		clock_offset;
48 	__s8		rssi;
49 };
50 
51 struct inquiry_entry {
52 	struct inquiry_entry 	*next;
53 	__u32			timestamp;
54 	struct inquiry_data	data;
55 };
56 
57 struct inquiry_cache {
58 	spinlock_t 		lock;
59 	__u32			timestamp;
60 	struct inquiry_entry 	*list;
61 };
62 
63 struct hci_conn_hash {
64 	struct list_head list;
65 	spinlock_t       lock;
66 	unsigned int     acl_num;
67 	unsigned int     sco_num;
68 };
69 
70 struct hci_dev {
71 	struct list_head list;
72 	spinlock_t	lock;
73 	atomic_t	refcnt;
74 
75 	char		name[8];
76 	unsigned long	flags;
77 	__u16		id;
78 	__u8		type;
79 	bdaddr_t	bdaddr;
80 	__u8		features[8];
81 	__u16		voice_setting;
82 
83 	__u16		pkt_type;
84 	__u16		link_policy;
85 	__u16		link_mode;
86 
87 	unsigned long	quirks;
88 
89 	atomic_t	cmd_cnt;
90 	unsigned int	acl_cnt;
91 	unsigned int	sco_cnt;
92 
93 	unsigned int	acl_mtu;
94 	unsigned int	sco_mtu;
95 	unsigned int	acl_pkts;
96 	unsigned int	sco_pkts;
97 
98 	unsigned long	cmd_last_tx;
99 	unsigned long	acl_last_tx;
100 	unsigned long	sco_last_tx;
101 
102 	struct tasklet_struct	cmd_task;
103 	struct tasklet_struct	rx_task;
104 	struct tasklet_struct	tx_task;
105 
106 	struct sk_buff_head	rx_q;
107 	struct sk_buff_head	raw_q;
108 	struct sk_buff_head	cmd_q;
109 
110 	struct sk_buff		*sent_cmd;
111 
112 	struct semaphore	req_lock;
113 	wait_queue_head_t	req_wait_q;
114 	__u32			req_status;
115 	__u32			req_result;
116 
117 	struct inquiry_cache	inq_cache;
118 	struct hci_conn_hash	conn_hash;
119 
120 	struct hci_dev_stats	stat;
121 
122 	struct sk_buff_head	driver_init;
123 
124 	void			*driver_data;
125 	void			*core_data;
126 
127 	atomic_t 		promisc;
128 
129 #ifdef CONFIG_PROC_FS
130 	struct proc_dir_entry	*proc;
131 #endif
132 
133 	struct class_device	class_dev;
134 
135 	struct module 		*owner;
136 
137 	int (*open)(struct hci_dev *hdev);
138 	int (*close)(struct hci_dev *hdev);
139 	int (*flush)(struct hci_dev *hdev);
140 	int (*send)(struct sk_buff *skb);
141 	void (*destruct)(struct hci_dev *hdev);
142 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
143 	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
144 };
145 
146 struct hci_conn {
147 	struct list_head list;
148 
149 	atomic_t	 refcnt;
150 	spinlock_t	 lock;
151 
152 	bdaddr_t	 dst;
153 	__u16		 handle;
154 	__u16		 state;
155 	__u8		 type;
156 	__u8		 out;
157 	__u8		 dev_class[3];
158 	__u32		 link_mode;
159 	unsigned long	 pend;
160 
161 	unsigned int	 sent;
162 
163 	struct sk_buff_head data_q;
164 
165 	struct timer_list timer;
166 
167 	struct hci_dev	*hdev;
168 	void		*l2cap_data;
169 	void		*sco_data;
170 	void		*priv;
171 
172 	struct hci_conn	*link;
173 };
174 
175 extern struct hci_proto *hci_proto[];
176 extern struct list_head hci_dev_list;
177 extern struct list_head hci_cb_list;
178 extern rwlock_t hci_dev_list_lock;
179 extern rwlock_t hci_cb_list_lock;
180 
181 /* ----- Inquiry cache ----- */
182 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   // 30 seconds
183 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   // 60 seconds
184 
185 #define inquiry_cache_lock(c)		spin_lock(&c->lock)
186 #define inquiry_cache_unlock(c)		spin_unlock(&c->lock)
187 #define inquiry_cache_lock_bh(c)	spin_lock_bh(&c->lock)
188 #define inquiry_cache_unlock_bh(c)	spin_unlock_bh(&c->lock)
189 
190 static inline void inquiry_cache_init(struct hci_dev *hdev)
191 {
192 	struct inquiry_cache *c = &hdev->inq_cache;
193 	spin_lock_init(&c->lock);
194 	c->list = NULL;
195 }
196 
197 static inline int inquiry_cache_empty(struct hci_dev *hdev)
198 {
199 	struct inquiry_cache *c = &hdev->inq_cache;
200 	return (c->list == NULL);
201 }
202 
203 static inline long inquiry_cache_age(struct hci_dev *hdev)
204 {
205 	struct inquiry_cache *c = &hdev->inq_cache;
206 	return jiffies - c->timestamp;
207 }
208 
209 static inline long inquiry_entry_age(struct inquiry_entry *e)
210 {
211 	return jiffies - e->timestamp;
212 }
213 
214 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
215 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
216 
217 /* ----- HCI Connections ----- */
218 enum {
219 	HCI_CONN_AUTH_PEND,
220 	HCI_CONN_ENCRYPT_PEND,
221 	HCI_CONN_RSWITCH_PEND
222 };
223 
224 static inline void hci_conn_hash_init(struct hci_dev *hdev)
225 {
226 	struct hci_conn_hash *h = &hdev->conn_hash;
227 	INIT_LIST_HEAD(&h->list);
228 	spin_lock_init(&h->lock);
229 	h->acl_num = 0;
230 	h->sco_num = 0;
231 }
232 
233 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
234 {
235 	struct hci_conn_hash *h = &hdev->conn_hash;
236 	list_add(&c->list, &h->list);
237 	if (c->type == ACL_LINK)
238 		h->acl_num++;
239 	else
240 		h->sco_num++;
241 }
242 
243 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
244 {
245 	struct hci_conn_hash *h = &hdev->conn_hash;
246 	list_del(&c->list);
247 	if (c->type == ACL_LINK)
248 		h->acl_num--;
249 	else
250 		h->sco_num--;
251 }
252 
253 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
254 					__u16 handle)
255 {
256 	struct hci_conn_hash *h = &hdev->conn_hash;
257 	struct list_head *p;
258 	struct hci_conn  *c;
259 
260 	list_for_each(p, &h->list) {
261 		c = list_entry(p, struct hci_conn, list);
262 		if (c->handle == handle)
263 			return c;
264 	}
265 	return NULL;
266 }
267 
268 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
269 					__u8 type, bdaddr_t *ba)
270 {
271 	struct hci_conn_hash *h = &hdev->conn_hash;
272 	struct list_head *p;
273 	struct hci_conn  *c;
274 
275 	list_for_each(p, &h->list) {
276 		c = list_entry(p, struct hci_conn, list);
277 		if (c->type == type && !bacmp(&c->dst, ba))
278 			return c;
279 	}
280 	return NULL;
281 }
282 
283 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
284 void hci_add_sco(struct hci_conn *conn, __u16 handle);
285 
286 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
287 int    hci_conn_del(struct hci_conn *conn);
288 void   hci_conn_hash_flush(struct hci_dev *hdev);
289 
290 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
291 int hci_conn_auth(struct hci_conn *conn);
292 int hci_conn_encrypt(struct hci_conn *conn);
293 int hci_conn_change_link_key(struct hci_conn *conn);
294 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
295 
296 static inline void hci_conn_set_timer(struct hci_conn *conn, unsigned long timeout)
297 {
298 	mod_timer(&conn->timer, jiffies + timeout);
299 }
300 
301 static inline void hci_conn_del_timer(struct hci_conn *conn)
302 {
303 	del_timer(&conn->timer);
304 }
305 
306 static inline void hci_conn_hold(struct hci_conn *conn)
307 {
308 	atomic_inc(&conn->refcnt);
309 	hci_conn_del_timer(conn);
310 }
311 
312 static inline void hci_conn_put(struct hci_conn *conn)
313 {
314 	if (atomic_dec_and_test(&conn->refcnt)) {
315 		if (conn->type == ACL_LINK) {
316 			unsigned long timeo = (conn->out) ?
317 				HCI_DISCONN_TIMEOUT : HCI_DISCONN_TIMEOUT * 2;
318 			hci_conn_set_timer(conn, timeo);
319 		} else
320 			hci_conn_set_timer(conn, HZ / 100);
321 	}
322 }
323 
324 /* ----- HCI tasks ----- */
325 static inline void hci_sched_cmd(struct hci_dev *hdev)
326 {
327 	tasklet_schedule(&hdev->cmd_task);
328 }
329 
330 static inline void hci_sched_rx(struct hci_dev *hdev)
331 {
332 	tasklet_schedule(&hdev->rx_task);
333 }
334 
335 static inline void hci_sched_tx(struct hci_dev *hdev)
336 {
337 	tasklet_schedule(&hdev->tx_task);
338 }
339 
340 /* ----- HCI Devices ----- */
341 static inline void __hci_dev_put(struct hci_dev *d)
342 {
343 	if (atomic_dec_and_test(&d->refcnt))
344 		d->destruct(d);
345 }
346 
347 static inline void hci_dev_put(struct hci_dev *d)
348 {
349 	__hci_dev_put(d);
350 	module_put(d->owner);
351 }
352 
353 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
354 {
355 	atomic_inc(&d->refcnt);
356 	return d;
357 }
358 
359 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
360 {
361 	if (try_module_get(d->owner))
362 		return __hci_dev_hold(d);
363 	return NULL;
364 }
365 
366 #define hci_dev_lock(d)		spin_lock(&d->lock)
367 #define hci_dev_unlock(d)	spin_unlock(&d->lock)
368 #define hci_dev_lock_bh(d)	spin_lock_bh(&d->lock)
369 #define hci_dev_unlock_bh(d)	spin_unlock_bh(&d->lock)
370 
371 struct hci_dev *hci_dev_get(int index);
372 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
373 
374 struct hci_dev *hci_alloc_dev(void);
375 void hci_free_dev(struct hci_dev *hdev);
376 int hci_register_dev(struct hci_dev *hdev);
377 int hci_unregister_dev(struct hci_dev *hdev);
378 int hci_suspend_dev(struct hci_dev *hdev);
379 int hci_resume_dev(struct hci_dev *hdev);
380 int hci_dev_open(__u16 dev);
381 int hci_dev_close(__u16 dev);
382 int hci_dev_reset(__u16 dev);
383 int hci_dev_reset_stat(__u16 dev);
384 int hci_dev_cmd(unsigned int cmd, void __user *arg);
385 int hci_get_dev_list(void __user *arg);
386 int hci_get_dev_info(void __user *arg);
387 int hci_get_conn_list(void __user *arg);
388 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
389 int hci_inquiry(void __user *arg);
390 
391 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
392 
393 /* Receive frame from HCI drivers */
394 static inline int hci_recv_frame(struct sk_buff *skb)
395 {
396 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
397 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
398 			&& !test_bit(HCI_INIT, &hdev->flags))) {
399 		kfree_skb(skb);
400 		return -ENXIO;
401 	}
402 
403 	/* Incomming skb */
404 	bt_cb(skb)->incoming = 1;
405 
406 	/* Time stamp */
407 	__net_timestamp(skb);
408 
409 	/* Queue frame for rx task */
410 	skb_queue_tail(&hdev->rx_q, skb);
411 	hci_sched_rx(hdev);
412 	return 0;
413 }
414 
415 int hci_register_sysfs(struct hci_dev *hdev);
416 void hci_unregister_sysfs(struct hci_dev *hdev);
417 
418 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->class_dev.dev = (pdev))
419 
420 /* ----- LMP capabilities ----- */
421 #define lmp_rswitch_capable(dev) (dev->features[0] & LMP_RSWITCH)
422 #define lmp_encrypt_capable(dev) (dev->features[0] & LMP_ENCRYPT)
423 
424 /* ----- HCI protocols ----- */
425 struct hci_proto {
426 	char 		*name;
427 	unsigned int	id;
428 	unsigned long	flags;
429 
430 	void		*priv;
431 
432 	int (*connect_ind) 	(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
433 	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
434 	int (*disconn_ind)	(struct hci_conn *conn, __u8 reason);
435 	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
436 	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
437 	int (*auth_cfm)		(struct hci_conn *conn, __u8 status);
438 	int (*encrypt_cfm)	(struct hci_conn *conn, __u8 status);
439 };
440 
441 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
442 {
443 	register struct hci_proto *hp;
444 	int mask = 0;
445 
446 	hp = hci_proto[HCI_PROTO_L2CAP];
447 	if (hp && hp->connect_ind)
448 		mask |= hp->connect_ind(hdev, bdaddr, type);
449 
450 	hp = hci_proto[HCI_PROTO_SCO];
451 	if (hp && hp->connect_ind)
452 		mask |= hp->connect_ind(hdev, bdaddr, type);
453 
454 	return mask;
455 }
456 
457 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
458 {
459 	register struct hci_proto *hp;
460 
461 	hp = hci_proto[HCI_PROTO_L2CAP];
462 	if (hp && hp->connect_cfm)
463 		hp->connect_cfm(conn, status);
464 
465 	hp = hci_proto[HCI_PROTO_SCO];
466 	if (hp && hp->connect_cfm)
467 		hp->connect_cfm(conn, status);
468 }
469 
470 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
471 {
472 	register struct hci_proto *hp;
473 
474 	hp = hci_proto[HCI_PROTO_L2CAP];
475 	if (hp && hp->disconn_ind)
476 		hp->disconn_ind(conn, reason);
477 
478 	hp = hci_proto[HCI_PROTO_SCO];
479 	if (hp && hp->disconn_ind)
480 		hp->disconn_ind(conn, reason);
481 }
482 
483 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
484 {
485 	register struct hci_proto *hp;
486 
487 	hp = hci_proto[HCI_PROTO_L2CAP];
488 	if (hp && hp->auth_cfm)
489 		hp->auth_cfm(conn, status);
490 
491 	hp = hci_proto[HCI_PROTO_SCO];
492 	if (hp && hp->auth_cfm)
493 		hp->auth_cfm(conn, status);
494 }
495 
496 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
497 {
498 	register struct hci_proto *hp;
499 
500 	hp = hci_proto[HCI_PROTO_L2CAP];
501 	if (hp && hp->encrypt_cfm)
502 		hp->encrypt_cfm(conn, status);
503 
504 	hp = hci_proto[HCI_PROTO_SCO];
505 	if (hp && hp->encrypt_cfm)
506 		hp->encrypt_cfm(conn, status);
507 }
508 
509 int hci_register_proto(struct hci_proto *hproto);
510 int hci_unregister_proto(struct hci_proto *hproto);
511 
512 /* ----- HCI callbacks ----- */
513 struct hci_cb {
514 	struct list_head list;
515 
516 	char *name;
517 
518 	void (*auth_cfm)	(struct hci_conn *conn, __u8 status);
519 	void (*encrypt_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
520 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
521 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
522 };
523 
524 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
525 {
526 	struct list_head *p;
527 
528 	hci_proto_auth_cfm(conn, status);
529 
530 	read_lock_bh(&hci_cb_list_lock);
531 	list_for_each(p, &hci_cb_list) {
532 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
533 		if (cb->auth_cfm)
534 			cb->auth_cfm(conn, status);
535 	}
536 	read_unlock_bh(&hci_cb_list_lock);
537 }
538 
539 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
540 {
541 	struct list_head *p;
542 
543 	hci_proto_encrypt_cfm(conn, status);
544 
545 	read_lock_bh(&hci_cb_list_lock);
546 	list_for_each(p, &hci_cb_list) {
547 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
548 		if (cb->encrypt_cfm)
549 			cb->encrypt_cfm(conn, status, encrypt);
550 	}
551 	read_unlock_bh(&hci_cb_list_lock);
552 }
553 
554 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
555 {
556 	struct list_head *p;
557 
558 	read_lock_bh(&hci_cb_list_lock);
559 	list_for_each(p, &hci_cb_list) {
560 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
561 		if (cb->key_change_cfm)
562 			cb->key_change_cfm(conn, status);
563 	}
564 	read_unlock_bh(&hci_cb_list_lock);
565 }
566 
567 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
568 {
569 	struct list_head *p;
570 
571 	read_lock_bh(&hci_cb_list_lock);
572 	list_for_each(p, &hci_cb_list) {
573 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
574 		if (cb->role_switch_cfm)
575 			cb->role_switch_cfm(conn, status, role);
576 	}
577 	read_unlock_bh(&hci_cb_list_lock);
578 }
579 
580 int hci_register_cb(struct hci_cb *hcb);
581 int hci_unregister_cb(struct hci_cb *hcb);
582 
583 int hci_register_notifier(struct notifier_block *nb);
584 int hci_unregister_notifier(struct notifier_block *nb);
585 
586 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
587 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
588 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
589 
590 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
591 
592 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
593 
594 /* ----- HCI Sockets ----- */
595 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
596 
597 /* HCI info for socket */
598 #define hci_pi(sk) ((struct hci_pinfo *) sk)
599 
600 struct hci_pinfo {
601 	struct bt_sock    bt;
602 	struct hci_dev    *hdev;
603 	struct hci_filter filter;
604 	__u32             cmsg_mask;
605 };
606 
607 /* HCI security filter */
608 #define HCI_SFLT_MAX_OGF  5
609 
610 struct hci_sec_filter {
611 	__u32 type_mask;
612 	__u32 event_mask[2];
613 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
614 };
615 
616 /* ----- HCI requests ----- */
617 #define HCI_REQ_DONE	  0
618 #define HCI_REQ_PEND	  1
619 #define HCI_REQ_CANCELED  2
620 
621 #define hci_req_lock(d)		down(&d->req_lock)
622 #define hci_req_unlock(d)	up(&d->req_lock)
623 
624 void hci_req_complete(struct hci_dev *hdev, int result);
625 
626 #endif /* __HCI_CORE_H */
627