xref: /linux/net/bluetooth/af_bluetooth.c (revision b6ebbac51bedf9e98e837688bc838f400196da5e)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth address family and sockets. */
26 
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <asm/ioctls.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <linux/proc_fs.h>
33 
34 #include "selftest.h"
35 
36 /* Bluetooth sockets */
37 #define BT_MAX_PROTO	8
38 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
39 static DEFINE_RWLOCK(bt_proto_lock);
40 
41 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
42 static const char *const bt_key_strings[BT_MAX_PROTO] = {
43 	"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
44 	"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
45 	"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
46 	"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
47 	"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
48 	"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
49 	"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
50 	"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
51 };
52 
53 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
54 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
55 	"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
56 	"slock-AF_BLUETOOTH-BTPROTO_HCI",
57 	"slock-AF_BLUETOOTH-BTPROTO_SCO",
58 	"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
59 	"slock-AF_BLUETOOTH-BTPROTO_BNEP",
60 	"slock-AF_BLUETOOTH-BTPROTO_CMTP",
61 	"slock-AF_BLUETOOTH-BTPROTO_HIDP",
62 	"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
63 };
64 
65 void bt_sock_reclassify_lock(struct sock *sk, int proto)
66 {
67 	BUG_ON(!sk);
68 	BUG_ON(!sock_allow_reclassification(sk));
69 
70 	sock_lock_init_class_and_name(sk,
71 			bt_slock_key_strings[proto], &bt_slock_key[proto],
72 				bt_key_strings[proto], &bt_lock_key[proto]);
73 }
74 EXPORT_SYMBOL(bt_sock_reclassify_lock);
75 
76 int bt_sock_register(int proto, const struct net_proto_family *ops)
77 {
78 	int err = 0;
79 
80 	if (proto < 0 || proto >= BT_MAX_PROTO)
81 		return -EINVAL;
82 
83 	write_lock(&bt_proto_lock);
84 
85 	if (bt_proto[proto])
86 		err = -EEXIST;
87 	else
88 		bt_proto[proto] = ops;
89 
90 	write_unlock(&bt_proto_lock);
91 
92 	return err;
93 }
94 EXPORT_SYMBOL(bt_sock_register);
95 
96 void bt_sock_unregister(int proto)
97 {
98 	if (proto < 0 || proto >= BT_MAX_PROTO)
99 		return;
100 
101 	write_lock(&bt_proto_lock);
102 	bt_proto[proto] = NULL;
103 	write_unlock(&bt_proto_lock);
104 }
105 EXPORT_SYMBOL(bt_sock_unregister);
106 
107 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
108 			  int kern)
109 {
110 	int err;
111 
112 	if (net != &init_net)
113 		return -EAFNOSUPPORT;
114 
115 	if (proto < 0 || proto >= BT_MAX_PROTO)
116 		return -EINVAL;
117 
118 	if (!bt_proto[proto])
119 		request_module("bt-proto-%d", proto);
120 
121 	err = -EPROTONOSUPPORT;
122 
123 	read_lock(&bt_proto_lock);
124 
125 	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
126 		err = bt_proto[proto]->create(net, sock, proto, kern);
127 		if (!err)
128 			bt_sock_reclassify_lock(sock->sk, proto);
129 		module_put(bt_proto[proto]->owner);
130 	}
131 
132 	read_unlock(&bt_proto_lock);
133 
134 	return err;
135 }
136 
137 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
138 {
139 	write_lock(&l->lock);
140 	sk_add_node(sk, &l->head);
141 	write_unlock(&l->lock);
142 }
143 EXPORT_SYMBOL(bt_sock_link);
144 
145 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
146 {
147 	write_lock(&l->lock);
148 	sk_del_node_init(sk);
149 	write_unlock(&l->lock);
150 }
151 EXPORT_SYMBOL(bt_sock_unlink);
152 
153 void bt_accept_enqueue(struct sock *parent, struct sock *sk)
154 {
155 	BT_DBG("parent %p, sk %p", parent, sk);
156 
157 	sock_hold(sk);
158 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
159 	bt_sk(sk)->parent = parent;
160 	parent->sk_ack_backlog++;
161 }
162 EXPORT_SYMBOL(bt_accept_enqueue);
163 
164 void bt_accept_unlink(struct sock *sk)
165 {
166 	BT_DBG("sk %p state %d", sk, sk->sk_state);
167 
168 	list_del_init(&bt_sk(sk)->accept_q);
169 	bt_sk(sk)->parent->sk_ack_backlog--;
170 	bt_sk(sk)->parent = NULL;
171 	sock_put(sk);
172 }
173 EXPORT_SYMBOL(bt_accept_unlink);
174 
175 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
176 {
177 	struct bt_sock *s, *n;
178 	struct sock *sk;
179 
180 	BT_DBG("parent %p", parent);
181 
182 	list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
183 		sk = (struct sock *)s;
184 
185 		lock_sock(sk);
186 
187 		/* FIXME: Is this check still needed */
188 		if (sk->sk_state == BT_CLOSED) {
189 			bt_accept_unlink(sk);
190 			release_sock(sk);
191 			continue;
192 		}
193 
194 		if (sk->sk_state == BT_CONNECTED || !newsock ||
195 		    test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
196 			bt_accept_unlink(sk);
197 			if (newsock)
198 				sock_graft(sk, newsock);
199 
200 			release_sock(sk);
201 			return sk;
202 		}
203 
204 		release_sock(sk);
205 	}
206 
207 	return NULL;
208 }
209 EXPORT_SYMBOL(bt_accept_dequeue);
210 
211 int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
212 		    int flags)
213 {
214 	int noblock = flags & MSG_DONTWAIT;
215 	struct sock *sk = sock->sk;
216 	struct sk_buff *skb;
217 	size_t copied;
218 	size_t skblen;
219 	int err;
220 
221 	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
222 
223 	if (flags & MSG_OOB)
224 		return -EOPNOTSUPP;
225 
226 	skb = skb_recv_datagram(sk, flags, noblock, &err);
227 	if (!skb) {
228 		if (sk->sk_shutdown & RCV_SHUTDOWN)
229 			return 0;
230 
231 		return err;
232 	}
233 
234 	skblen = skb->len;
235 	copied = skb->len;
236 	if (len < copied) {
237 		msg->msg_flags |= MSG_TRUNC;
238 		copied = len;
239 	}
240 
241 	skb_reset_transport_header(skb);
242 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
243 	if (err == 0) {
244 		sock_recv_ts_and_drops(msg, sk, skb);
245 
246 		if (bt_sk(sk)->skb_msg_name)
247 			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
248 						&msg->msg_namelen);
249 	}
250 
251 	skb_free_datagram(sk, skb);
252 
253 	if (msg->msg_flags & MSG_TRUNC)
254 		copied = skblen;
255 
256 	return err ? : copied;
257 }
258 EXPORT_SYMBOL(bt_sock_recvmsg);
259 
260 static long bt_sock_data_wait(struct sock *sk, long timeo)
261 {
262 	DECLARE_WAITQUEUE(wait, current);
263 
264 	add_wait_queue(sk_sleep(sk), &wait);
265 	for (;;) {
266 		set_current_state(TASK_INTERRUPTIBLE);
267 
268 		if (!skb_queue_empty(&sk->sk_receive_queue))
269 			break;
270 
271 		if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
272 			break;
273 
274 		if (signal_pending(current) || !timeo)
275 			break;
276 
277 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
278 		release_sock(sk);
279 		timeo = schedule_timeout(timeo);
280 		lock_sock(sk);
281 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
282 	}
283 
284 	__set_current_state(TASK_RUNNING);
285 	remove_wait_queue(sk_sleep(sk), &wait);
286 	return timeo;
287 }
288 
289 int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
290 			   size_t size, int flags)
291 {
292 	struct sock *sk = sock->sk;
293 	int err = 0;
294 	size_t target, copied = 0;
295 	long timeo;
296 
297 	if (flags & MSG_OOB)
298 		return -EOPNOTSUPP;
299 
300 	BT_DBG("sk %p size %zu", sk, size);
301 
302 	lock_sock(sk);
303 
304 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
305 	timeo  = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
306 
307 	do {
308 		struct sk_buff *skb;
309 		int chunk;
310 
311 		skb = skb_dequeue(&sk->sk_receive_queue);
312 		if (!skb) {
313 			if (copied >= target)
314 				break;
315 
316 			err = sock_error(sk);
317 			if (err)
318 				break;
319 			if (sk->sk_shutdown & RCV_SHUTDOWN)
320 				break;
321 
322 			err = -EAGAIN;
323 			if (!timeo)
324 				break;
325 
326 			timeo = bt_sock_data_wait(sk, timeo);
327 
328 			if (signal_pending(current)) {
329 				err = sock_intr_errno(timeo);
330 				goto out;
331 			}
332 			continue;
333 		}
334 
335 		chunk = min_t(unsigned int, skb->len, size);
336 		if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
337 			skb_queue_head(&sk->sk_receive_queue, skb);
338 			if (!copied)
339 				copied = -EFAULT;
340 			break;
341 		}
342 		copied += chunk;
343 		size   -= chunk;
344 
345 		sock_recv_ts_and_drops(msg, sk, skb);
346 
347 		if (!(flags & MSG_PEEK)) {
348 			int skb_len = skb_headlen(skb);
349 
350 			if (chunk <= skb_len) {
351 				__skb_pull(skb, chunk);
352 			} else {
353 				struct sk_buff *frag;
354 
355 				__skb_pull(skb, skb_len);
356 				chunk -= skb_len;
357 
358 				skb_walk_frags(skb, frag) {
359 					if (chunk <= frag->len) {
360 						/* Pulling partial data */
361 						skb->len -= chunk;
362 						skb->data_len -= chunk;
363 						__skb_pull(frag, chunk);
364 						break;
365 					} else if (frag->len) {
366 						/* Pulling all frag data */
367 						chunk -= frag->len;
368 						skb->len -= frag->len;
369 						skb->data_len -= frag->len;
370 						__skb_pull(frag, frag->len);
371 					}
372 				}
373 			}
374 
375 			if (skb->len) {
376 				skb_queue_head(&sk->sk_receive_queue, skb);
377 				break;
378 			}
379 			kfree_skb(skb);
380 
381 		} else {
382 			/* put message back and return */
383 			skb_queue_head(&sk->sk_receive_queue, skb);
384 			break;
385 		}
386 	} while (size);
387 
388 out:
389 	release_sock(sk);
390 	return copied ? : err;
391 }
392 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
393 
394 static inline unsigned int bt_accept_poll(struct sock *parent)
395 {
396 	struct bt_sock *s, *n;
397 	struct sock *sk;
398 
399 	list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
400 		sk = (struct sock *)s;
401 		if (sk->sk_state == BT_CONNECTED ||
402 		    (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
403 		     sk->sk_state == BT_CONNECT2))
404 			return POLLIN | POLLRDNORM;
405 	}
406 
407 	return 0;
408 }
409 
410 unsigned int bt_sock_poll(struct file *file, struct socket *sock,
411 			  poll_table *wait)
412 {
413 	struct sock *sk = sock->sk;
414 	unsigned int mask = 0;
415 
416 	BT_DBG("sock %p, sk %p", sock, sk);
417 
418 	poll_wait(file, sk_sleep(sk), wait);
419 
420 	if (sk->sk_state == BT_LISTEN)
421 		return bt_accept_poll(sk);
422 
423 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
424 		mask |= POLLERR |
425 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
426 
427 	if (sk->sk_shutdown & RCV_SHUTDOWN)
428 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
429 
430 	if (sk->sk_shutdown == SHUTDOWN_MASK)
431 		mask |= POLLHUP;
432 
433 	if (!skb_queue_empty(&sk->sk_receive_queue))
434 		mask |= POLLIN | POLLRDNORM;
435 
436 	if (sk->sk_state == BT_CLOSED)
437 		mask |= POLLHUP;
438 
439 	if (sk->sk_state == BT_CONNECT ||
440 			sk->sk_state == BT_CONNECT2 ||
441 			sk->sk_state == BT_CONFIG)
442 		return mask;
443 
444 	if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
445 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
446 	else
447 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
448 
449 	return mask;
450 }
451 EXPORT_SYMBOL(bt_sock_poll);
452 
453 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
454 {
455 	struct sock *sk = sock->sk;
456 	struct sk_buff *skb;
457 	long amount;
458 	int err;
459 
460 	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
461 
462 	switch (cmd) {
463 	case TIOCOUTQ:
464 		if (sk->sk_state == BT_LISTEN)
465 			return -EINVAL;
466 
467 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
468 		if (amount < 0)
469 			amount = 0;
470 		err = put_user(amount, (int __user *) arg);
471 		break;
472 
473 	case TIOCINQ:
474 		if (sk->sk_state == BT_LISTEN)
475 			return -EINVAL;
476 
477 		lock_sock(sk);
478 		skb = skb_peek(&sk->sk_receive_queue);
479 		amount = skb ? skb->len : 0;
480 		release_sock(sk);
481 		err = put_user(amount, (int __user *) arg);
482 		break;
483 
484 	case SIOCGSTAMP:
485 		err = sock_get_timestamp(sk, (struct timeval __user *) arg);
486 		break;
487 
488 	case SIOCGSTAMPNS:
489 		err = sock_get_timestampns(sk, (struct timespec __user *) arg);
490 		break;
491 
492 	default:
493 		err = -ENOIOCTLCMD;
494 		break;
495 	}
496 
497 	return err;
498 }
499 EXPORT_SYMBOL(bt_sock_ioctl);
500 
501 /* This function expects the sk lock to be held when called */
502 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
503 {
504 	DECLARE_WAITQUEUE(wait, current);
505 	int err = 0;
506 
507 	BT_DBG("sk %p", sk);
508 
509 	add_wait_queue(sk_sleep(sk), &wait);
510 	set_current_state(TASK_INTERRUPTIBLE);
511 	while (sk->sk_state != state) {
512 		if (!timeo) {
513 			err = -EINPROGRESS;
514 			break;
515 		}
516 
517 		if (signal_pending(current)) {
518 			err = sock_intr_errno(timeo);
519 			break;
520 		}
521 
522 		release_sock(sk);
523 		timeo = schedule_timeout(timeo);
524 		lock_sock(sk);
525 		set_current_state(TASK_INTERRUPTIBLE);
526 
527 		err = sock_error(sk);
528 		if (err)
529 			break;
530 	}
531 	__set_current_state(TASK_RUNNING);
532 	remove_wait_queue(sk_sleep(sk), &wait);
533 	return err;
534 }
535 EXPORT_SYMBOL(bt_sock_wait_state);
536 
537 /* This function expects the sk lock to be held when called */
538 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
539 {
540 	DECLARE_WAITQUEUE(wait, current);
541 	unsigned long timeo;
542 	int err = 0;
543 
544 	BT_DBG("sk %p", sk);
545 
546 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
547 
548 	add_wait_queue(sk_sleep(sk), &wait);
549 	set_current_state(TASK_INTERRUPTIBLE);
550 	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
551 		if (!timeo) {
552 			err = -EAGAIN;
553 			break;
554 		}
555 
556 		if (signal_pending(current)) {
557 			err = sock_intr_errno(timeo);
558 			break;
559 		}
560 
561 		release_sock(sk);
562 		timeo = schedule_timeout(timeo);
563 		lock_sock(sk);
564 		set_current_state(TASK_INTERRUPTIBLE);
565 
566 		err = sock_error(sk);
567 		if (err)
568 			break;
569 	}
570 	__set_current_state(TASK_RUNNING);
571 	remove_wait_queue(sk_sleep(sk), &wait);
572 
573 	return err;
574 }
575 EXPORT_SYMBOL(bt_sock_wait_ready);
576 
577 #ifdef CONFIG_PROC_FS
578 struct bt_seq_state {
579 	struct bt_sock_list *l;
580 };
581 
582 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
583 	__acquires(seq->private->l->lock)
584 {
585 	struct bt_seq_state *s = seq->private;
586 	struct bt_sock_list *l = s->l;
587 
588 	read_lock(&l->lock);
589 	return seq_hlist_start_head(&l->head, *pos);
590 }
591 
592 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
593 {
594 	struct bt_seq_state *s = seq->private;
595 	struct bt_sock_list *l = s->l;
596 
597 	return seq_hlist_next(v, &l->head, pos);
598 }
599 
600 static void bt_seq_stop(struct seq_file *seq, void *v)
601 	__releases(seq->private->l->lock)
602 {
603 	struct bt_seq_state *s = seq->private;
604 	struct bt_sock_list *l = s->l;
605 
606 	read_unlock(&l->lock);
607 }
608 
609 static int bt_seq_show(struct seq_file *seq, void *v)
610 {
611 	struct bt_seq_state *s = seq->private;
612 	struct bt_sock_list *l = s->l;
613 
614 	if (v == SEQ_START_TOKEN) {
615 		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Parent");
616 
617 		if (l->custom_seq_show) {
618 			seq_putc(seq, ' ');
619 			l->custom_seq_show(seq, v);
620 		}
621 
622 		seq_putc(seq, '\n');
623 	} else {
624 		struct sock *sk = sk_entry(v);
625 		struct bt_sock *bt = bt_sk(sk);
626 
627 		seq_printf(seq,
628 			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
629 			   sk,
630 			   atomic_read(&sk->sk_refcnt),
631 			   sk_rmem_alloc_get(sk),
632 			   sk_wmem_alloc_get(sk),
633 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
634 			   sock_i_ino(sk),
635 			   bt->parent? sock_i_ino(bt->parent): 0LU);
636 
637 		if (l->custom_seq_show) {
638 			seq_putc(seq, ' ');
639 			l->custom_seq_show(seq, v);
640 		}
641 
642 		seq_putc(seq, '\n');
643 	}
644 	return 0;
645 }
646 
647 static const struct seq_operations bt_seq_ops = {
648 	.start = bt_seq_start,
649 	.next  = bt_seq_next,
650 	.stop  = bt_seq_stop,
651 	.show  = bt_seq_show,
652 };
653 
654 static int bt_seq_open(struct inode *inode, struct file *file)
655 {
656 	struct bt_sock_list *sk_list;
657 	struct bt_seq_state *s;
658 
659 	sk_list = PDE_DATA(inode);
660 	s = __seq_open_private(file, &bt_seq_ops,
661 			       sizeof(struct bt_seq_state));
662 	if (!s)
663 		return -ENOMEM;
664 
665 	s->l = sk_list;
666 	return 0;
667 }
668 
669 static const struct file_operations bt_fops = {
670 	.open = bt_seq_open,
671 	.read = seq_read,
672 	.llseek = seq_lseek,
673 	.release = seq_release_private
674 };
675 
676 int bt_procfs_init(struct net *net, const char *name,
677 		   struct bt_sock_list *sk_list,
678 		   int (* seq_show)(struct seq_file *, void *))
679 {
680 	sk_list->custom_seq_show = seq_show;
681 
682 	if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
683 		return -ENOMEM;
684 	return 0;
685 }
686 
687 void bt_procfs_cleanup(struct net *net, const char *name)
688 {
689 	remove_proc_entry(name, net->proc_net);
690 }
691 #else
692 int bt_procfs_init(struct net *net, const char *name,
693 		   struct bt_sock_list *sk_list,
694 		   int (* seq_show)(struct seq_file *, void *))
695 {
696 	return 0;
697 }
698 
699 void bt_procfs_cleanup(struct net *net, const char *name)
700 {
701 }
702 #endif
703 EXPORT_SYMBOL(bt_procfs_init);
704 EXPORT_SYMBOL(bt_procfs_cleanup);
705 
706 static struct net_proto_family bt_sock_family_ops = {
707 	.owner	= THIS_MODULE,
708 	.family	= PF_BLUETOOTH,
709 	.create	= bt_sock_create,
710 };
711 
712 struct dentry *bt_debugfs;
713 EXPORT_SYMBOL_GPL(bt_debugfs);
714 
715 static int __init bt_init(void)
716 {
717 	int err;
718 
719 	sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
720 
721 	BT_INFO("Core ver %s", BT_SUBSYS_VERSION);
722 
723 	err = bt_selftest();
724 	if (err < 0)
725 		return err;
726 
727 	bt_debugfs = debugfs_create_dir("bluetooth", NULL);
728 
729 	err = bt_sysfs_init();
730 	if (err < 0)
731 		return err;
732 
733 	err = sock_register(&bt_sock_family_ops);
734 	if (err < 0) {
735 		bt_sysfs_cleanup();
736 		return err;
737 	}
738 
739 	BT_INFO("HCI device and connection manager initialized");
740 
741 	err = hci_sock_init();
742 	if (err < 0)
743 		goto error;
744 
745 	err = l2cap_init();
746 	if (err < 0)
747 		goto sock_err;
748 
749 	err = sco_init();
750 	if (err < 0) {
751 		l2cap_exit();
752 		goto sock_err;
753 	}
754 
755 	err = mgmt_init();
756 	if (err < 0) {
757 		sco_exit();
758 		l2cap_exit();
759 		goto sock_err;
760 	}
761 
762 	return 0;
763 
764 sock_err:
765 	hci_sock_cleanup();
766 
767 error:
768 	sock_unregister(PF_BLUETOOTH);
769 	bt_sysfs_cleanup();
770 
771 	return err;
772 }
773 
774 static void __exit bt_exit(void)
775 {
776 	mgmt_exit();
777 
778 	sco_exit();
779 
780 	l2cap_exit();
781 
782 	hci_sock_cleanup();
783 
784 	sock_unregister(PF_BLUETOOTH);
785 
786 	bt_sysfs_cleanup();
787 
788 	debugfs_remove_recursive(bt_debugfs);
789 }
790 
791 subsys_initcall(bt_init);
792 module_exit(bt_exit);
793 
794 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
795 MODULE_DESCRIPTION("Bluetooth Core ver " BT_SUBSYS_VERSION);
796 MODULE_VERSION(BT_SUBSYS_VERSION);
797 MODULE_LICENSE("GPL");
798 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
799