xref: /linux/net/bluetooth/hci_sock.c (revision c0e297dc61f8d4453e07afbea1fa8d0e67cd4a34)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
34 
35 #include "mgmt_util.h"
36 
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
39 
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
41 
42 /* ----- HCI socket interface ----- */
43 
44 /* Socket info */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
46 
47 struct hci_pinfo {
48 	struct bt_sock    bt;
49 	struct hci_dev    *hdev;
50 	struct hci_filter filter;
51 	__u32             cmsg_mask;
52 	unsigned short    channel;
53 	unsigned long     flags;
54 };
55 
56 void hci_sock_set_flag(struct sock *sk, int nr)
57 {
58 	set_bit(nr, &hci_pi(sk)->flags);
59 }
60 
61 void hci_sock_clear_flag(struct sock *sk, int nr)
62 {
63 	clear_bit(nr, &hci_pi(sk)->flags);
64 }
65 
66 int hci_sock_test_flag(struct sock *sk, int nr)
67 {
68 	return test_bit(nr, &hci_pi(sk)->flags);
69 }
70 
71 unsigned short hci_sock_get_channel(struct sock *sk)
72 {
73 	return hci_pi(sk)->channel;
74 }
75 
76 static inline int hci_test_bit(int nr, const void *addr)
77 {
78 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
79 }
80 
81 /* Security filter */
82 #define HCI_SFLT_MAX_OGF  5
83 
84 struct hci_sec_filter {
85 	__u32 type_mask;
86 	__u32 event_mask[2];
87 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88 };
89 
90 static const struct hci_sec_filter hci_sec_filter = {
91 	/* Packet types */
92 	0x10,
93 	/* Events */
94 	{ 0x1000d9fe, 0x0000b00c },
95 	/* Commands */
96 	{
97 		{ 0x0 },
98 		/* OGF_LINK_CTL */
99 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 		/* OGF_LINK_POLICY */
101 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
102 		/* OGF_HOST_CTL */
103 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
104 		/* OGF_INFO_PARAM */
105 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 		/* OGF_STATUS_PARAM */
107 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
108 	}
109 };
110 
111 static struct bt_sock_list hci_sk_list = {
112 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
113 };
114 
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116 {
117 	struct hci_filter *flt;
118 	int flt_type, flt_event;
119 
120 	/* Apply filter */
121 	flt = &hci_pi(sk)->filter;
122 
123 	if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 		flt_type = 0;
125 	else
126 		flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127 
128 	if (!test_bit(flt_type, &flt->type_mask))
129 		return true;
130 
131 	/* Extra filter for event packets only */
132 	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 		return false;
134 
135 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136 
137 	if (!hci_test_bit(flt_event, &flt->event_mask))
138 		return true;
139 
140 	/* Check filter only when opcode is set */
141 	if (!flt->opcode)
142 		return false;
143 
144 	if (flt_event == HCI_EV_CMD_COMPLETE &&
145 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 		return true;
147 
148 	if (flt_event == HCI_EV_CMD_STATUS &&
149 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 		return true;
151 
152 	return false;
153 }
154 
155 /* Send frame to RAW socket */
156 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
157 {
158 	struct sock *sk;
159 	struct sk_buff *skb_copy = NULL;
160 
161 	BT_DBG("hdev %p len %d", hdev, skb->len);
162 
163 	read_lock(&hci_sk_list.lock);
164 
165 	sk_for_each(sk, &hci_sk_list.head) {
166 		struct sk_buff *nskb;
167 
168 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 			continue;
170 
171 		/* Don't send frame to the socket it came from */
172 		if (skb->sk == sk)
173 			continue;
174 
175 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 			if (is_filtered_packet(sk, skb))
177 				continue;
178 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 			if (!bt_cb(skb)->incoming)
180 				continue;
181 			if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 				continue;
185 		} else {
186 			/* Don't send frame to other channel types */
187 			continue;
188 		}
189 
190 		if (!skb_copy) {
191 			/* Create a private copy with headroom */
192 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
193 			if (!skb_copy)
194 				continue;
195 
196 			/* Put type byte before the data */
197 			memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 		}
199 
200 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
201 		if (!nskb)
202 			continue;
203 
204 		if (sock_queue_rcv_skb(sk, nskb))
205 			kfree_skb(nskb);
206 	}
207 
208 	read_unlock(&hci_sk_list.lock);
209 
210 	kfree_skb(skb_copy);
211 }
212 
213 /* Send frame to sockets with specific channel */
214 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
215 			 int flag, struct sock *skip_sk)
216 {
217 	struct sock *sk;
218 
219 	BT_DBG("channel %u len %d", channel, skb->len);
220 
221 	read_lock(&hci_sk_list.lock);
222 
223 	sk_for_each(sk, &hci_sk_list.head) {
224 		struct sk_buff *nskb;
225 
226 		/* Ignore socket without the flag set */
227 		if (!hci_sock_test_flag(sk, flag))
228 			continue;
229 
230 		/* Skip the original socket */
231 		if (sk == skip_sk)
232 			continue;
233 
234 		if (sk->sk_state != BT_BOUND)
235 			continue;
236 
237 		if (hci_pi(sk)->channel != channel)
238 			continue;
239 
240 		nskb = skb_clone(skb, GFP_ATOMIC);
241 		if (!nskb)
242 			continue;
243 
244 		if (sock_queue_rcv_skb(sk, nskb))
245 			kfree_skb(nskb);
246 	}
247 
248 	read_unlock(&hci_sk_list.lock);
249 }
250 
251 /* Send frame to monitor socket */
252 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253 {
254 	struct sk_buff *skb_copy = NULL;
255 	struct hci_mon_hdr *hdr;
256 	__le16 opcode;
257 
258 	if (!atomic_read(&monitor_promisc))
259 		return;
260 
261 	BT_DBG("hdev %p len %d", hdev, skb->len);
262 
263 	switch (bt_cb(skb)->pkt_type) {
264 	case HCI_COMMAND_PKT:
265 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
266 		break;
267 	case HCI_EVENT_PKT:
268 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
269 		break;
270 	case HCI_ACLDATA_PKT:
271 		if (bt_cb(skb)->incoming)
272 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
273 		else
274 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
275 		break;
276 	case HCI_SCODATA_PKT:
277 		if (bt_cb(skb)->incoming)
278 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
279 		else
280 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
281 		break;
282 	default:
283 		return;
284 	}
285 
286 	/* Create a private copy with headroom */
287 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 	if (!skb_copy)
289 		return;
290 
291 	/* Put header before the data */
292 	hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 	hdr->opcode = opcode;
294 	hdr->index = cpu_to_le16(hdev->id);
295 	hdr->len = cpu_to_le16(skb->len);
296 
297 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 			    HCI_SOCK_TRUSTED, NULL);
299 	kfree_skb(skb_copy);
300 }
301 
302 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303 {
304 	struct hci_mon_hdr *hdr;
305 	struct hci_mon_new_index *ni;
306 	struct sk_buff *skb;
307 	__le16 opcode;
308 
309 	switch (event) {
310 	case HCI_DEV_REG:
311 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
312 		if (!skb)
313 			return NULL;
314 
315 		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
316 		ni->type = hdev->dev_type;
317 		ni->bus = hdev->bus;
318 		bacpy(&ni->bdaddr, &hdev->bdaddr);
319 		memcpy(ni->name, hdev->name, 8);
320 
321 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
322 		break;
323 
324 	case HCI_DEV_UNREG:
325 		skb = bt_skb_alloc(0, GFP_ATOMIC);
326 		if (!skb)
327 			return NULL;
328 
329 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
330 		break;
331 
332 	default:
333 		return NULL;
334 	}
335 
336 	__net_timestamp(skb);
337 
338 	hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
339 	hdr->opcode = opcode;
340 	hdr->index = cpu_to_le16(hdev->id);
341 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
342 
343 	return skb;
344 }
345 
346 static void send_monitor_replay(struct sock *sk)
347 {
348 	struct hci_dev *hdev;
349 
350 	read_lock(&hci_dev_list_lock);
351 
352 	list_for_each_entry(hdev, &hci_dev_list, list) {
353 		struct sk_buff *skb;
354 
355 		skb = create_monitor_event(hdev, HCI_DEV_REG);
356 		if (!skb)
357 			continue;
358 
359 		if (sock_queue_rcv_skb(sk, skb))
360 			kfree_skb(skb);
361 	}
362 
363 	read_unlock(&hci_dev_list_lock);
364 }
365 
366 /* Generate internal stack event */
367 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
368 {
369 	struct hci_event_hdr *hdr;
370 	struct hci_ev_stack_internal *ev;
371 	struct sk_buff *skb;
372 
373 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
374 	if (!skb)
375 		return;
376 
377 	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
378 	hdr->evt  = HCI_EV_STACK_INTERNAL;
379 	hdr->plen = sizeof(*ev) + dlen;
380 
381 	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
382 	ev->type = type;
383 	memcpy(ev->data, data, dlen);
384 
385 	bt_cb(skb)->incoming = 1;
386 	__net_timestamp(skb);
387 
388 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
389 	hci_send_to_sock(hdev, skb);
390 	kfree_skb(skb);
391 }
392 
393 void hci_sock_dev_event(struct hci_dev *hdev, int event)
394 {
395 	struct hci_ev_si_device ev;
396 
397 	BT_DBG("hdev %s event %d", hdev->name, event);
398 
399 	/* Send event to monitor */
400 	if (atomic_read(&monitor_promisc)) {
401 		struct sk_buff *skb;
402 
403 		skb = create_monitor_event(hdev, event);
404 		if (skb) {
405 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
406 					    HCI_SOCK_TRUSTED, NULL);
407 			kfree_skb(skb);
408 		}
409 	}
410 
411 	/* Send event to sockets */
412 	ev.event  = event;
413 	ev.dev_id = hdev->id;
414 	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
415 
416 	if (event == HCI_DEV_UNREG) {
417 		struct sock *sk;
418 
419 		/* Detach sockets from device */
420 		read_lock(&hci_sk_list.lock);
421 		sk_for_each(sk, &hci_sk_list.head) {
422 			bh_lock_sock_nested(sk);
423 			if (hci_pi(sk)->hdev == hdev) {
424 				hci_pi(sk)->hdev = NULL;
425 				sk->sk_err = EPIPE;
426 				sk->sk_state = BT_OPEN;
427 				sk->sk_state_change(sk);
428 
429 				hci_dev_put(hdev);
430 			}
431 			bh_unlock_sock(sk);
432 		}
433 		read_unlock(&hci_sk_list.lock);
434 	}
435 }
436 
437 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
438 {
439 	struct hci_mgmt_chan *c;
440 
441 	list_for_each_entry(c, &mgmt_chan_list, list) {
442 		if (c->channel == channel)
443 			return c;
444 	}
445 
446 	return NULL;
447 }
448 
449 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
450 {
451 	struct hci_mgmt_chan *c;
452 
453 	mutex_lock(&mgmt_chan_list_lock);
454 	c = __hci_mgmt_chan_find(channel);
455 	mutex_unlock(&mgmt_chan_list_lock);
456 
457 	return c;
458 }
459 
460 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
461 {
462 	if (c->channel < HCI_CHANNEL_CONTROL)
463 		return -EINVAL;
464 
465 	mutex_lock(&mgmt_chan_list_lock);
466 	if (__hci_mgmt_chan_find(c->channel)) {
467 		mutex_unlock(&mgmt_chan_list_lock);
468 		return -EALREADY;
469 	}
470 
471 	list_add_tail(&c->list, &mgmt_chan_list);
472 
473 	mutex_unlock(&mgmt_chan_list_lock);
474 
475 	return 0;
476 }
477 EXPORT_SYMBOL(hci_mgmt_chan_register);
478 
479 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
480 {
481 	mutex_lock(&mgmt_chan_list_lock);
482 	list_del(&c->list);
483 	mutex_unlock(&mgmt_chan_list_lock);
484 }
485 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
486 
487 static int hci_sock_release(struct socket *sock)
488 {
489 	struct sock *sk = sock->sk;
490 	struct hci_dev *hdev;
491 
492 	BT_DBG("sock %p sk %p", sock, sk);
493 
494 	if (!sk)
495 		return 0;
496 
497 	hdev = hci_pi(sk)->hdev;
498 
499 	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
500 		atomic_dec(&monitor_promisc);
501 
502 	bt_sock_unlink(&hci_sk_list, sk);
503 
504 	if (hdev) {
505 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
506 			hci_dev_close(hdev->id);
507 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
508 			mgmt_index_added(hdev);
509 		}
510 
511 		atomic_dec(&hdev->promisc);
512 		hci_dev_put(hdev);
513 	}
514 
515 	sock_orphan(sk);
516 
517 	skb_queue_purge(&sk->sk_receive_queue);
518 	skb_queue_purge(&sk->sk_write_queue);
519 
520 	sock_put(sk);
521 	return 0;
522 }
523 
524 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
525 {
526 	bdaddr_t bdaddr;
527 	int err;
528 
529 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
530 		return -EFAULT;
531 
532 	hci_dev_lock(hdev);
533 
534 	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
535 
536 	hci_dev_unlock(hdev);
537 
538 	return err;
539 }
540 
541 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
542 {
543 	bdaddr_t bdaddr;
544 	int err;
545 
546 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
547 		return -EFAULT;
548 
549 	hci_dev_lock(hdev);
550 
551 	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
552 
553 	hci_dev_unlock(hdev);
554 
555 	return err;
556 }
557 
558 /* Ioctls that require bound socket */
559 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
560 				unsigned long arg)
561 {
562 	struct hci_dev *hdev = hci_pi(sk)->hdev;
563 
564 	if (!hdev)
565 		return -EBADFD;
566 
567 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
568 		return -EBUSY;
569 
570 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
571 		return -EOPNOTSUPP;
572 
573 	if (hdev->dev_type != HCI_BREDR)
574 		return -EOPNOTSUPP;
575 
576 	switch (cmd) {
577 	case HCISETRAW:
578 		if (!capable(CAP_NET_ADMIN))
579 			return -EPERM;
580 		return -EOPNOTSUPP;
581 
582 	case HCIGETCONNINFO:
583 		return hci_get_conn_info(hdev, (void __user *) arg);
584 
585 	case HCIGETAUTHINFO:
586 		return hci_get_auth_info(hdev, (void __user *) arg);
587 
588 	case HCIBLOCKADDR:
589 		if (!capable(CAP_NET_ADMIN))
590 			return -EPERM;
591 		return hci_sock_blacklist_add(hdev, (void __user *) arg);
592 
593 	case HCIUNBLOCKADDR:
594 		if (!capable(CAP_NET_ADMIN))
595 			return -EPERM;
596 		return hci_sock_blacklist_del(hdev, (void __user *) arg);
597 	}
598 
599 	return -ENOIOCTLCMD;
600 }
601 
602 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
603 			  unsigned long arg)
604 {
605 	void __user *argp = (void __user *) arg;
606 	struct sock *sk = sock->sk;
607 	int err;
608 
609 	BT_DBG("cmd %x arg %lx", cmd, arg);
610 
611 	lock_sock(sk);
612 
613 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
614 		err = -EBADFD;
615 		goto done;
616 	}
617 
618 	release_sock(sk);
619 
620 	switch (cmd) {
621 	case HCIGETDEVLIST:
622 		return hci_get_dev_list(argp);
623 
624 	case HCIGETDEVINFO:
625 		return hci_get_dev_info(argp);
626 
627 	case HCIGETCONNLIST:
628 		return hci_get_conn_list(argp);
629 
630 	case HCIDEVUP:
631 		if (!capable(CAP_NET_ADMIN))
632 			return -EPERM;
633 		return hci_dev_open(arg);
634 
635 	case HCIDEVDOWN:
636 		if (!capable(CAP_NET_ADMIN))
637 			return -EPERM;
638 		return hci_dev_close(arg);
639 
640 	case HCIDEVRESET:
641 		if (!capable(CAP_NET_ADMIN))
642 			return -EPERM;
643 		return hci_dev_reset(arg);
644 
645 	case HCIDEVRESTAT:
646 		if (!capable(CAP_NET_ADMIN))
647 			return -EPERM;
648 		return hci_dev_reset_stat(arg);
649 
650 	case HCISETSCAN:
651 	case HCISETAUTH:
652 	case HCISETENCRYPT:
653 	case HCISETPTYPE:
654 	case HCISETLINKPOL:
655 	case HCISETLINKMODE:
656 	case HCISETACLMTU:
657 	case HCISETSCOMTU:
658 		if (!capable(CAP_NET_ADMIN))
659 			return -EPERM;
660 		return hci_dev_cmd(cmd, argp);
661 
662 	case HCIINQUIRY:
663 		return hci_inquiry(argp);
664 	}
665 
666 	lock_sock(sk);
667 
668 	err = hci_sock_bound_ioctl(sk, cmd, arg);
669 
670 done:
671 	release_sock(sk);
672 	return err;
673 }
674 
675 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
676 			 int addr_len)
677 {
678 	struct sockaddr_hci haddr;
679 	struct sock *sk = sock->sk;
680 	struct hci_dev *hdev = NULL;
681 	int len, err = 0;
682 
683 	BT_DBG("sock %p sk %p", sock, sk);
684 
685 	if (!addr)
686 		return -EINVAL;
687 
688 	memset(&haddr, 0, sizeof(haddr));
689 	len = min_t(unsigned int, sizeof(haddr), addr_len);
690 	memcpy(&haddr, addr, len);
691 
692 	if (haddr.hci_family != AF_BLUETOOTH)
693 		return -EINVAL;
694 
695 	lock_sock(sk);
696 
697 	if (sk->sk_state == BT_BOUND) {
698 		err = -EALREADY;
699 		goto done;
700 	}
701 
702 	switch (haddr.hci_channel) {
703 	case HCI_CHANNEL_RAW:
704 		if (hci_pi(sk)->hdev) {
705 			err = -EALREADY;
706 			goto done;
707 		}
708 
709 		if (haddr.hci_dev != HCI_DEV_NONE) {
710 			hdev = hci_dev_get(haddr.hci_dev);
711 			if (!hdev) {
712 				err = -ENODEV;
713 				goto done;
714 			}
715 
716 			atomic_inc(&hdev->promisc);
717 		}
718 
719 		hci_pi(sk)->hdev = hdev;
720 		break;
721 
722 	case HCI_CHANNEL_USER:
723 		if (hci_pi(sk)->hdev) {
724 			err = -EALREADY;
725 			goto done;
726 		}
727 
728 		if (haddr.hci_dev == HCI_DEV_NONE) {
729 			err = -EINVAL;
730 			goto done;
731 		}
732 
733 		if (!capable(CAP_NET_ADMIN)) {
734 			err = -EPERM;
735 			goto done;
736 		}
737 
738 		hdev = hci_dev_get(haddr.hci_dev);
739 		if (!hdev) {
740 			err = -ENODEV;
741 			goto done;
742 		}
743 
744 		if (test_bit(HCI_INIT, &hdev->flags) ||
745 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
746 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
747 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
748 		     test_bit(HCI_UP, &hdev->flags))) {
749 			err = -EBUSY;
750 			hci_dev_put(hdev);
751 			goto done;
752 		}
753 
754 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
755 			err = -EUSERS;
756 			hci_dev_put(hdev);
757 			goto done;
758 		}
759 
760 		mgmt_index_removed(hdev);
761 
762 		err = hci_dev_open(hdev->id);
763 		if (err) {
764 			if (err == -EALREADY) {
765 				/* In case the transport is already up and
766 				 * running, clear the error here.
767 				 *
768 				 * This can happen when opening an user
769 				 * channel and HCI_AUTO_OFF grace period
770 				 * is still active.
771 				 */
772 				err = 0;
773 			} else {
774 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
775 				mgmt_index_added(hdev);
776 				hci_dev_put(hdev);
777 				goto done;
778 			}
779 		}
780 
781 		atomic_inc(&hdev->promisc);
782 
783 		hci_pi(sk)->hdev = hdev;
784 		break;
785 
786 	case HCI_CHANNEL_MONITOR:
787 		if (haddr.hci_dev != HCI_DEV_NONE) {
788 			err = -EINVAL;
789 			goto done;
790 		}
791 
792 		if (!capable(CAP_NET_RAW)) {
793 			err = -EPERM;
794 			goto done;
795 		}
796 
797 		/* The monitor interface is restricted to CAP_NET_RAW
798 		 * capabilities and with that implicitly trusted.
799 		 */
800 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
801 
802 		send_monitor_replay(sk);
803 
804 		atomic_inc(&monitor_promisc);
805 		break;
806 
807 	default:
808 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
809 			err = -EINVAL;
810 			goto done;
811 		}
812 
813 		if (haddr.hci_dev != HCI_DEV_NONE) {
814 			err = -EINVAL;
815 			goto done;
816 		}
817 
818 		/* Users with CAP_NET_ADMIN capabilities are allowed
819 		 * access to all management commands and events. For
820 		 * untrusted users the interface is restricted and
821 		 * also only untrusted events are sent.
822 		 */
823 		if (capable(CAP_NET_ADMIN))
824 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
825 
826 		/* At the moment the index and unconfigured index events
827 		 * are enabled unconditionally. Setting them on each
828 		 * socket when binding keeps this functionality. They
829 		 * however might be cleared later and then sending of these
830 		 * events will be disabled, but that is then intentional.
831 		 *
832 		 * This also enables generic events that are safe to be
833 		 * received by untrusted users. Example for such events
834 		 * are changes to settings, class of device, name etc.
835 		 */
836 		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
837 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
838 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
839 			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
840 		}
841 		break;
842 	}
843 
844 
845 	hci_pi(sk)->channel = haddr.hci_channel;
846 	sk->sk_state = BT_BOUND;
847 
848 done:
849 	release_sock(sk);
850 	return err;
851 }
852 
853 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
854 			    int *addr_len, int peer)
855 {
856 	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
857 	struct sock *sk = sock->sk;
858 	struct hci_dev *hdev;
859 	int err = 0;
860 
861 	BT_DBG("sock %p sk %p", sock, sk);
862 
863 	if (peer)
864 		return -EOPNOTSUPP;
865 
866 	lock_sock(sk);
867 
868 	hdev = hci_pi(sk)->hdev;
869 	if (!hdev) {
870 		err = -EBADFD;
871 		goto done;
872 	}
873 
874 	*addr_len = sizeof(*haddr);
875 	haddr->hci_family = AF_BLUETOOTH;
876 	haddr->hci_dev    = hdev->id;
877 	haddr->hci_channel= hci_pi(sk)->channel;
878 
879 done:
880 	release_sock(sk);
881 	return err;
882 }
883 
884 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
885 			  struct sk_buff *skb)
886 {
887 	__u32 mask = hci_pi(sk)->cmsg_mask;
888 
889 	if (mask & HCI_CMSG_DIR) {
890 		int incoming = bt_cb(skb)->incoming;
891 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
892 			 &incoming);
893 	}
894 
895 	if (mask & HCI_CMSG_TSTAMP) {
896 #ifdef CONFIG_COMPAT
897 		struct compat_timeval ctv;
898 #endif
899 		struct timeval tv;
900 		void *data;
901 		int len;
902 
903 		skb_get_timestamp(skb, &tv);
904 
905 		data = &tv;
906 		len = sizeof(tv);
907 #ifdef CONFIG_COMPAT
908 		if (!COMPAT_USE_64BIT_TIME &&
909 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
910 			ctv.tv_sec = tv.tv_sec;
911 			ctv.tv_usec = tv.tv_usec;
912 			data = &ctv;
913 			len = sizeof(ctv);
914 		}
915 #endif
916 
917 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
918 	}
919 }
920 
921 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
922 			    int flags)
923 {
924 	int noblock = flags & MSG_DONTWAIT;
925 	struct sock *sk = sock->sk;
926 	struct sk_buff *skb;
927 	int copied, err;
928 
929 	BT_DBG("sock %p, sk %p", sock, sk);
930 
931 	if (flags & (MSG_OOB))
932 		return -EOPNOTSUPP;
933 
934 	if (sk->sk_state == BT_CLOSED)
935 		return 0;
936 
937 	skb = skb_recv_datagram(sk, flags, noblock, &err);
938 	if (!skb)
939 		return err;
940 
941 	copied = skb->len;
942 	if (len < copied) {
943 		msg->msg_flags |= MSG_TRUNC;
944 		copied = len;
945 	}
946 
947 	skb_reset_transport_header(skb);
948 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
949 
950 	switch (hci_pi(sk)->channel) {
951 	case HCI_CHANNEL_RAW:
952 		hci_sock_cmsg(sk, msg, skb);
953 		break;
954 	case HCI_CHANNEL_USER:
955 	case HCI_CHANNEL_MONITOR:
956 		sock_recv_timestamp(msg, sk, skb);
957 		break;
958 	default:
959 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
960 			sock_recv_timestamp(msg, sk, skb);
961 		break;
962 	}
963 
964 	skb_free_datagram(sk, skb);
965 
966 	return err ? : copied;
967 }
968 
969 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
970 			struct msghdr *msg, size_t msglen)
971 {
972 	void *buf;
973 	u8 *cp;
974 	struct mgmt_hdr *hdr;
975 	u16 opcode, index, len;
976 	struct hci_dev *hdev = NULL;
977 	const struct hci_mgmt_handler *handler;
978 	bool var_len, no_hdev;
979 	int err;
980 
981 	BT_DBG("got %zu bytes", msglen);
982 
983 	if (msglen < sizeof(*hdr))
984 		return -EINVAL;
985 
986 	buf = kmalloc(msglen, GFP_KERNEL);
987 	if (!buf)
988 		return -ENOMEM;
989 
990 	if (memcpy_from_msg(buf, msg, msglen)) {
991 		err = -EFAULT;
992 		goto done;
993 	}
994 
995 	hdr = buf;
996 	opcode = __le16_to_cpu(hdr->opcode);
997 	index = __le16_to_cpu(hdr->index);
998 	len = __le16_to_cpu(hdr->len);
999 
1000 	if (len != msglen - sizeof(*hdr)) {
1001 		err = -EINVAL;
1002 		goto done;
1003 	}
1004 
1005 	if (opcode >= chan->handler_count ||
1006 	    chan->handlers[opcode].func == NULL) {
1007 		BT_DBG("Unknown op %u", opcode);
1008 		err = mgmt_cmd_status(sk, index, opcode,
1009 				      MGMT_STATUS_UNKNOWN_COMMAND);
1010 		goto done;
1011 	}
1012 
1013 	handler = &chan->handlers[opcode];
1014 
1015 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1016 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1017 		err = mgmt_cmd_status(sk, index, opcode,
1018 				      MGMT_STATUS_PERMISSION_DENIED);
1019 		goto done;
1020 	}
1021 
1022 	if (index != MGMT_INDEX_NONE) {
1023 		hdev = hci_dev_get(index);
1024 		if (!hdev) {
1025 			err = mgmt_cmd_status(sk, index, opcode,
1026 					      MGMT_STATUS_INVALID_INDEX);
1027 			goto done;
1028 		}
1029 
1030 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1031 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1032 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1033 			err = mgmt_cmd_status(sk, index, opcode,
1034 					      MGMT_STATUS_INVALID_INDEX);
1035 			goto done;
1036 		}
1037 
1038 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1039 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1040 			err = mgmt_cmd_status(sk, index, opcode,
1041 					      MGMT_STATUS_INVALID_INDEX);
1042 			goto done;
1043 		}
1044 	}
1045 
1046 	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1047 	if (no_hdev != !hdev) {
1048 		err = mgmt_cmd_status(sk, index, opcode,
1049 				      MGMT_STATUS_INVALID_INDEX);
1050 		goto done;
1051 	}
1052 
1053 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1054 	if ((var_len && len < handler->data_len) ||
1055 	    (!var_len && len != handler->data_len)) {
1056 		err = mgmt_cmd_status(sk, index, opcode,
1057 				      MGMT_STATUS_INVALID_PARAMS);
1058 		goto done;
1059 	}
1060 
1061 	if (hdev && chan->hdev_init)
1062 		chan->hdev_init(sk, hdev);
1063 
1064 	cp = buf + sizeof(*hdr);
1065 
1066 	err = handler->func(sk, hdev, cp, len);
1067 	if (err < 0)
1068 		goto done;
1069 
1070 	err = msglen;
1071 
1072 done:
1073 	if (hdev)
1074 		hci_dev_put(hdev);
1075 
1076 	kfree(buf);
1077 	return err;
1078 }
1079 
1080 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1081 			    size_t len)
1082 {
1083 	struct sock *sk = sock->sk;
1084 	struct hci_mgmt_chan *chan;
1085 	struct hci_dev *hdev;
1086 	struct sk_buff *skb;
1087 	int err;
1088 
1089 	BT_DBG("sock %p sk %p", sock, sk);
1090 
1091 	if (msg->msg_flags & MSG_OOB)
1092 		return -EOPNOTSUPP;
1093 
1094 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1095 		return -EINVAL;
1096 
1097 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1098 		return -EINVAL;
1099 
1100 	lock_sock(sk);
1101 
1102 	switch (hci_pi(sk)->channel) {
1103 	case HCI_CHANNEL_RAW:
1104 	case HCI_CHANNEL_USER:
1105 		break;
1106 	case HCI_CHANNEL_MONITOR:
1107 		err = -EOPNOTSUPP;
1108 		goto done;
1109 	default:
1110 		mutex_lock(&mgmt_chan_list_lock);
1111 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1112 		if (chan)
1113 			err = hci_mgmt_cmd(chan, sk, msg, len);
1114 		else
1115 			err = -EINVAL;
1116 
1117 		mutex_unlock(&mgmt_chan_list_lock);
1118 		goto done;
1119 	}
1120 
1121 	hdev = hci_pi(sk)->hdev;
1122 	if (!hdev) {
1123 		err = -EBADFD;
1124 		goto done;
1125 	}
1126 
1127 	if (!test_bit(HCI_UP, &hdev->flags)) {
1128 		err = -ENETDOWN;
1129 		goto done;
1130 	}
1131 
1132 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1133 	if (!skb)
1134 		goto done;
1135 
1136 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1137 		err = -EFAULT;
1138 		goto drop;
1139 	}
1140 
1141 	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1142 	skb_pull(skb, 1);
1143 
1144 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1145 		/* No permission check is needed for user channel
1146 		 * since that gets enforced when binding the socket.
1147 		 *
1148 		 * However check that the packet type is valid.
1149 		 */
1150 		if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1151 		    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1152 		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1153 			err = -EINVAL;
1154 			goto drop;
1155 		}
1156 
1157 		skb_queue_tail(&hdev->raw_q, skb);
1158 		queue_work(hdev->workqueue, &hdev->tx_work);
1159 	} else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1160 		u16 opcode = get_unaligned_le16(skb->data);
1161 		u16 ogf = hci_opcode_ogf(opcode);
1162 		u16 ocf = hci_opcode_ocf(opcode);
1163 
1164 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1165 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1166 				   &hci_sec_filter.ocf_mask[ogf])) &&
1167 		    !capable(CAP_NET_RAW)) {
1168 			err = -EPERM;
1169 			goto drop;
1170 		}
1171 
1172 		if (ogf == 0x3f) {
1173 			skb_queue_tail(&hdev->raw_q, skb);
1174 			queue_work(hdev->workqueue, &hdev->tx_work);
1175 		} else {
1176 			/* Stand-alone HCI commands must be flagged as
1177 			 * single-command requests.
1178 			 */
1179 			bt_cb(skb)->req.start = true;
1180 
1181 			skb_queue_tail(&hdev->cmd_q, skb);
1182 			queue_work(hdev->workqueue, &hdev->cmd_work);
1183 		}
1184 	} else {
1185 		if (!capable(CAP_NET_RAW)) {
1186 			err = -EPERM;
1187 			goto drop;
1188 		}
1189 
1190 		skb_queue_tail(&hdev->raw_q, skb);
1191 		queue_work(hdev->workqueue, &hdev->tx_work);
1192 	}
1193 
1194 	err = len;
1195 
1196 done:
1197 	release_sock(sk);
1198 	return err;
1199 
1200 drop:
1201 	kfree_skb(skb);
1202 	goto done;
1203 }
1204 
1205 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1206 			       char __user *optval, unsigned int len)
1207 {
1208 	struct hci_ufilter uf = { .opcode = 0 };
1209 	struct sock *sk = sock->sk;
1210 	int err = 0, opt = 0;
1211 
1212 	BT_DBG("sk %p, opt %d", sk, optname);
1213 
1214 	lock_sock(sk);
1215 
1216 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1217 		err = -EBADFD;
1218 		goto done;
1219 	}
1220 
1221 	switch (optname) {
1222 	case HCI_DATA_DIR:
1223 		if (get_user(opt, (int __user *)optval)) {
1224 			err = -EFAULT;
1225 			break;
1226 		}
1227 
1228 		if (opt)
1229 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1230 		else
1231 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1232 		break;
1233 
1234 	case HCI_TIME_STAMP:
1235 		if (get_user(opt, (int __user *)optval)) {
1236 			err = -EFAULT;
1237 			break;
1238 		}
1239 
1240 		if (opt)
1241 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1242 		else
1243 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1244 		break;
1245 
1246 	case HCI_FILTER:
1247 		{
1248 			struct hci_filter *f = &hci_pi(sk)->filter;
1249 
1250 			uf.type_mask = f->type_mask;
1251 			uf.opcode    = f->opcode;
1252 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1253 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1254 		}
1255 
1256 		len = min_t(unsigned int, len, sizeof(uf));
1257 		if (copy_from_user(&uf, optval, len)) {
1258 			err = -EFAULT;
1259 			break;
1260 		}
1261 
1262 		if (!capable(CAP_NET_RAW)) {
1263 			uf.type_mask &= hci_sec_filter.type_mask;
1264 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1265 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1266 		}
1267 
1268 		{
1269 			struct hci_filter *f = &hci_pi(sk)->filter;
1270 
1271 			f->type_mask = uf.type_mask;
1272 			f->opcode    = uf.opcode;
1273 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1274 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1275 		}
1276 		break;
1277 
1278 	default:
1279 		err = -ENOPROTOOPT;
1280 		break;
1281 	}
1282 
1283 done:
1284 	release_sock(sk);
1285 	return err;
1286 }
1287 
1288 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1289 			       char __user *optval, int __user *optlen)
1290 {
1291 	struct hci_ufilter uf;
1292 	struct sock *sk = sock->sk;
1293 	int len, opt, err = 0;
1294 
1295 	BT_DBG("sk %p, opt %d", sk, optname);
1296 
1297 	if (get_user(len, optlen))
1298 		return -EFAULT;
1299 
1300 	lock_sock(sk);
1301 
1302 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1303 		err = -EBADFD;
1304 		goto done;
1305 	}
1306 
1307 	switch (optname) {
1308 	case HCI_DATA_DIR:
1309 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1310 			opt = 1;
1311 		else
1312 			opt = 0;
1313 
1314 		if (put_user(opt, optval))
1315 			err = -EFAULT;
1316 		break;
1317 
1318 	case HCI_TIME_STAMP:
1319 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1320 			opt = 1;
1321 		else
1322 			opt = 0;
1323 
1324 		if (put_user(opt, optval))
1325 			err = -EFAULT;
1326 		break;
1327 
1328 	case HCI_FILTER:
1329 		{
1330 			struct hci_filter *f = &hci_pi(sk)->filter;
1331 
1332 			memset(&uf, 0, sizeof(uf));
1333 			uf.type_mask = f->type_mask;
1334 			uf.opcode    = f->opcode;
1335 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1336 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1337 		}
1338 
1339 		len = min_t(unsigned int, len, sizeof(uf));
1340 		if (copy_to_user(optval, &uf, len))
1341 			err = -EFAULT;
1342 		break;
1343 
1344 	default:
1345 		err = -ENOPROTOOPT;
1346 		break;
1347 	}
1348 
1349 done:
1350 	release_sock(sk);
1351 	return err;
1352 }
1353 
1354 static const struct proto_ops hci_sock_ops = {
1355 	.family		= PF_BLUETOOTH,
1356 	.owner		= THIS_MODULE,
1357 	.release	= hci_sock_release,
1358 	.bind		= hci_sock_bind,
1359 	.getname	= hci_sock_getname,
1360 	.sendmsg	= hci_sock_sendmsg,
1361 	.recvmsg	= hci_sock_recvmsg,
1362 	.ioctl		= hci_sock_ioctl,
1363 	.poll		= datagram_poll,
1364 	.listen		= sock_no_listen,
1365 	.shutdown	= sock_no_shutdown,
1366 	.setsockopt	= hci_sock_setsockopt,
1367 	.getsockopt	= hci_sock_getsockopt,
1368 	.connect	= sock_no_connect,
1369 	.socketpair	= sock_no_socketpair,
1370 	.accept		= sock_no_accept,
1371 	.mmap		= sock_no_mmap
1372 };
1373 
1374 static struct proto hci_sk_proto = {
1375 	.name		= "HCI",
1376 	.owner		= THIS_MODULE,
1377 	.obj_size	= sizeof(struct hci_pinfo)
1378 };
1379 
1380 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1381 			   int kern)
1382 {
1383 	struct sock *sk;
1384 
1385 	BT_DBG("sock %p", sock);
1386 
1387 	if (sock->type != SOCK_RAW)
1388 		return -ESOCKTNOSUPPORT;
1389 
1390 	sock->ops = &hci_sock_ops;
1391 
1392 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1393 	if (!sk)
1394 		return -ENOMEM;
1395 
1396 	sock_init_data(sock, sk);
1397 
1398 	sock_reset_flag(sk, SOCK_ZAPPED);
1399 
1400 	sk->sk_protocol = protocol;
1401 
1402 	sock->state = SS_UNCONNECTED;
1403 	sk->sk_state = BT_OPEN;
1404 
1405 	bt_sock_link(&hci_sk_list, sk);
1406 	return 0;
1407 }
1408 
1409 static const struct net_proto_family hci_sock_family_ops = {
1410 	.family	= PF_BLUETOOTH,
1411 	.owner	= THIS_MODULE,
1412 	.create	= hci_sock_create,
1413 };
1414 
1415 int __init hci_sock_init(void)
1416 {
1417 	int err;
1418 
1419 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1420 
1421 	err = proto_register(&hci_sk_proto, 0);
1422 	if (err < 0)
1423 		return err;
1424 
1425 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1426 	if (err < 0) {
1427 		BT_ERR("HCI socket registration failed");
1428 		goto error;
1429 	}
1430 
1431 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1432 	if (err < 0) {
1433 		BT_ERR("Failed to create HCI proc file");
1434 		bt_sock_unregister(BTPROTO_HCI);
1435 		goto error;
1436 	}
1437 
1438 	BT_INFO("HCI socket layer initialized");
1439 
1440 	return 0;
1441 
1442 error:
1443 	proto_unregister(&hci_sk_proto);
1444 	return err;
1445 }
1446 
1447 void hci_sock_cleanup(void)
1448 {
1449 	bt_procfs_cleanup(&init_net, "hci");
1450 	bt_sock_unregister(BTPROTO_HCI);
1451 	proto_unregister(&hci_sk_proto);
1452 }
1453