xref: /linux/net/bluetooth/hci_sock.c (revision 91e60eb60bdf078fc58b8d2fd1ac12f3c09bb893)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
34 
35 #include "mgmt_util.h"
36 
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
39 
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
41 
42 /* ----- HCI socket interface ----- */
43 
44 /* Socket info */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
46 
47 struct hci_pinfo {
48 	struct bt_sock    bt;
49 	struct hci_dev    *hdev;
50 	struct hci_filter filter;
51 	__u32             cmsg_mask;
52 	unsigned short    channel;
53 	unsigned long     flags;
54 };
55 
56 void hci_sock_set_flag(struct sock *sk, int nr)
57 {
58 	set_bit(nr, &hci_pi(sk)->flags);
59 }
60 
61 void hci_sock_clear_flag(struct sock *sk, int nr)
62 {
63 	clear_bit(nr, &hci_pi(sk)->flags);
64 }
65 
66 int hci_sock_test_flag(struct sock *sk, int nr)
67 {
68 	return test_bit(nr, &hci_pi(sk)->flags);
69 }
70 
71 unsigned short hci_sock_get_channel(struct sock *sk)
72 {
73 	return hci_pi(sk)->channel;
74 }
75 
76 static inline int hci_test_bit(int nr, const void *addr)
77 {
78 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
79 }
80 
81 /* Security filter */
82 #define HCI_SFLT_MAX_OGF  5
83 
84 struct hci_sec_filter {
85 	__u32 type_mask;
86 	__u32 event_mask[2];
87 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88 };
89 
90 static const struct hci_sec_filter hci_sec_filter = {
91 	/* Packet types */
92 	0x10,
93 	/* Events */
94 	{ 0x1000d9fe, 0x0000b00c },
95 	/* Commands */
96 	{
97 		{ 0x0 },
98 		/* OGF_LINK_CTL */
99 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 		/* OGF_LINK_POLICY */
101 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
102 		/* OGF_HOST_CTL */
103 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
104 		/* OGF_INFO_PARAM */
105 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 		/* OGF_STATUS_PARAM */
107 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
108 	}
109 };
110 
111 static struct bt_sock_list hci_sk_list = {
112 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
113 };
114 
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116 {
117 	struct hci_filter *flt;
118 	int flt_type, flt_event;
119 
120 	/* Apply filter */
121 	flt = &hci_pi(sk)->filter;
122 
123 	if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 		flt_type = 0;
125 	else
126 		flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127 
128 	if (!test_bit(flt_type, &flt->type_mask))
129 		return true;
130 
131 	/* Extra filter for event packets only */
132 	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 		return false;
134 
135 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136 
137 	if (!hci_test_bit(flt_event, &flt->event_mask))
138 		return true;
139 
140 	/* Check filter only when opcode is set */
141 	if (!flt->opcode)
142 		return false;
143 
144 	if (flt_event == HCI_EV_CMD_COMPLETE &&
145 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 		return true;
147 
148 	if (flt_event == HCI_EV_CMD_STATUS &&
149 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 		return true;
151 
152 	return false;
153 }
154 
155 /* Send frame to RAW socket */
156 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
157 {
158 	struct sock *sk;
159 	struct sk_buff *skb_copy = NULL;
160 
161 	BT_DBG("hdev %p len %d", hdev, skb->len);
162 
163 	read_lock(&hci_sk_list.lock);
164 
165 	sk_for_each(sk, &hci_sk_list.head) {
166 		struct sk_buff *nskb;
167 
168 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 			continue;
170 
171 		/* Don't send frame to the socket it came from */
172 		if (skb->sk == sk)
173 			continue;
174 
175 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 			if (is_filtered_packet(sk, skb))
177 				continue;
178 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 			if (!bt_cb(skb)->incoming)
180 				continue;
181 			if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 				continue;
185 		} else {
186 			/* Don't send frame to other channel types */
187 			continue;
188 		}
189 
190 		if (!skb_copy) {
191 			/* Create a private copy with headroom */
192 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
193 			if (!skb_copy)
194 				continue;
195 
196 			/* Put type byte before the data */
197 			memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 		}
199 
200 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
201 		if (!nskb)
202 			continue;
203 
204 		if (sock_queue_rcv_skb(sk, nskb))
205 			kfree_skb(nskb);
206 	}
207 
208 	read_unlock(&hci_sk_list.lock);
209 
210 	kfree_skb(skb_copy);
211 }
212 
213 /* Send frame to sockets with specific channel */
214 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
215 			 int flag, struct sock *skip_sk)
216 {
217 	struct sock *sk;
218 
219 	BT_DBG("channel %u len %d", channel, skb->len);
220 
221 	read_lock(&hci_sk_list.lock);
222 
223 	sk_for_each(sk, &hci_sk_list.head) {
224 		struct sk_buff *nskb;
225 
226 		/* Ignore socket without the flag set */
227 		if (!hci_sock_test_flag(sk, flag))
228 			continue;
229 
230 		/* Skip the original socket */
231 		if (sk == skip_sk)
232 			continue;
233 
234 		if (sk->sk_state != BT_BOUND)
235 			continue;
236 
237 		if (hci_pi(sk)->channel != channel)
238 			continue;
239 
240 		nskb = skb_clone(skb, GFP_ATOMIC);
241 		if (!nskb)
242 			continue;
243 
244 		if (sock_queue_rcv_skb(sk, nskb))
245 			kfree_skb(nskb);
246 	}
247 
248 	read_unlock(&hci_sk_list.lock);
249 }
250 
251 /* Send frame to monitor socket */
252 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253 {
254 	struct sk_buff *skb_copy = NULL;
255 	struct hci_mon_hdr *hdr;
256 	__le16 opcode;
257 
258 	if (!atomic_read(&monitor_promisc))
259 		return;
260 
261 	BT_DBG("hdev %p len %d", hdev, skb->len);
262 
263 	switch (bt_cb(skb)->pkt_type) {
264 	case HCI_COMMAND_PKT:
265 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
266 		break;
267 	case HCI_EVENT_PKT:
268 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
269 		break;
270 	case HCI_ACLDATA_PKT:
271 		if (bt_cb(skb)->incoming)
272 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
273 		else
274 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
275 		break;
276 	case HCI_SCODATA_PKT:
277 		if (bt_cb(skb)->incoming)
278 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
279 		else
280 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
281 		break;
282 	default:
283 		return;
284 	}
285 
286 	/* Create a private copy with headroom */
287 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 	if (!skb_copy)
289 		return;
290 
291 	/* Put header before the data */
292 	hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 	hdr->opcode = opcode;
294 	hdr->index = cpu_to_le16(hdev->id);
295 	hdr->len = cpu_to_le16(skb->len);
296 
297 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 			    HCI_SOCK_TRUSTED, NULL);
299 	kfree_skb(skb_copy);
300 }
301 
302 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303 {
304 	struct hci_mon_hdr *hdr;
305 	struct hci_mon_new_index *ni;
306 	struct sk_buff *skb;
307 	__le16 opcode;
308 
309 	switch (event) {
310 	case HCI_DEV_REG:
311 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
312 		if (!skb)
313 			return NULL;
314 
315 		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
316 		ni->type = hdev->dev_type;
317 		ni->bus = hdev->bus;
318 		bacpy(&ni->bdaddr, &hdev->bdaddr);
319 		memcpy(ni->name, hdev->name, 8);
320 
321 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
322 		break;
323 
324 	case HCI_DEV_UNREG:
325 		skb = bt_skb_alloc(0, GFP_ATOMIC);
326 		if (!skb)
327 			return NULL;
328 
329 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
330 		break;
331 
332 	default:
333 		return NULL;
334 	}
335 
336 	__net_timestamp(skb);
337 
338 	hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
339 	hdr->opcode = opcode;
340 	hdr->index = cpu_to_le16(hdev->id);
341 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
342 
343 	return skb;
344 }
345 
346 static void send_monitor_replay(struct sock *sk)
347 {
348 	struct hci_dev *hdev;
349 
350 	read_lock(&hci_dev_list_lock);
351 
352 	list_for_each_entry(hdev, &hci_dev_list, list) {
353 		struct sk_buff *skb;
354 
355 		skb = create_monitor_event(hdev, HCI_DEV_REG);
356 		if (!skb)
357 			continue;
358 
359 		if (sock_queue_rcv_skb(sk, skb))
360 			kfree_skb(skb);
361 	}
362 
363 	read_unlock(&hci_dev_list_lock);
364 }
365 
366 /* Generate internal stack event */
367 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
368 {
369 	struct hci_event_hdr *hdr;
370 	struct hci_ev_stack_internal *ev;
371 	struct sk_buff *skb;
372 
373 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
374 	if (!skb)
375 		return;
376 
377 	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
378 	hdr->evt  = HCI_EV_STACK_INTERNAL;
379 	hdr->plen = sizeof(*ev) + dlen;
380 
381 	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
382 	ev->type = type;
383 	memcpy(ev->data, data, dlen);
384 
385 	bt_cb(skb)->incoming = 1;
386 	__net_timestamp(skb);
387 
388 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
389 	hci_send_to_sock(hdev, skb);
390 	kfree_skb(skb);
391 }
392 
393 void hci_sock_dev_event(struct hci_dev *hdev, int event)
394 {
395 	struct hci_ev_si_device ev;
396 
397 	BT_DBG("hdev %s event %d", hdev->name, event);
398 
399 	/* Send event to monitor */
400 	if (atomic_read(&monitor_promisc)) {
401 		struct sk_buff *skb;
402 
403 		skb = create_monitor_event(hdev, event);
404 		if (skb) {
405 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
406 					    HCI_SOCK_TRUSTED, NULL);
407 			kfree_skb(skb);
408 		}
409 	}
410 
411 	/* Send event to sockets */
412 	ev.event  = event;
413 	ev.dev_id = hdev->id;
414 	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
415 
416 	if (event == HCI_DEV_UNREG) {
417 		struct sock *sk;
418 
419 		/* Detach sockets from device */
420 		read_lock(&hci_sk_list.lock);
421 		sk_for_each(sk, &hci_sk_list.head) {
422 			bh_lock_sock_nested(sk);
423 			if (hci_pi(sk)->hdev == hdev) {
424 				hci_pi(sk)->hdev = NULL;
425 				sk->sk_err = EPIPE;
426 				sk->sk_state = BT_OPEN;
427 				sk->sk_state_change(sk);
428 
429 				hci_dev_put(hdev);
430 			}
431 			bh_unlock_sock(sk);
432 		}
433 		read_unlock(&hci_sk_list.lock);
434 	}
435 }
436 
437 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
438 {
439 	struct hci_mgmt_chan *c;
440 
441 	list_for_each_entry(c, &mgmt_chan_list, list) {
442 		if (c->channel == channel)
443 			return c;
444 	}
445 
446 	return NULL;
447 }
448 
449 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
450 {
451 	struct hci_mgmt_chan *c;
452 
453 	mutex_lock(&mgmt_chan_list_lock);
454 	c = __hci_mgmt_chan_find(channel);
455 	mutex_unlock(&mgmt_chan_list_lock);
456 
457 	return c;
458 }
459 
460 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
461 {
462 	if (c->channel < HCI_CHANNEL_CONTROL)
463 		return -EINVAL;
464 
465 	mutex_lock(&mgmt_chan_list_lock);
466 	if (__hci_mgmt_chan_find(c->channel)) {
467 		mutex_unlock(&mgmt_chan_list_lock);
468 		return -EALREADY;
469 	}
470 
471 	list_add_tail(&c->list, &mgmt_chan_list);
472 
473 	mutex_unlock(&mgmt_chan_list_lock);
474 
475 	return 0;
476 }
477 EXPORT_SYMBOL(hci_mgmt_chan_register);
478 
479 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
480 {
481 	mutex_lock(&mgmt_chan_list_lock);
482 	list_del(&c->list);
483 	mutex_unlock(&mgmt_chan_list_lock);
484 }
485 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
486 
487 static int hci_sock_release(struct socket *sock)
488 {
489 	struct sock *sk = sock->sk;
490 	struct hci_dev *hdev;
491 
492 	BT_DBG("sock %p sk %p", sock, sk);
493 
494 	if (!sk)
495 		return 0;
496 
497 	hdev = hci_pi(sk)->hdev;
498 
499 	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
500 		atomic_dec(&monitor_promisc);
501 
502 	bt_sock_unlink(&hci_sk_list, sk);
503 
504 	if (hdev) {
505 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
506 			/* When releasing an user channel exclusive access,
507 			 * call hci_dev_do_close directly instead of calling
508 			 * hci_dev_close to ensure the exclusive access will
509 			 * be released and the controller brought back down.
510 			 *
511 			 * The checking of HCI_AUTO_OFF is not needed in this
512 			 * case since it will have been cleared already when
513 			 * opening the user channel.
514 			 */
515 			hci_dev_do_close(hdev);
516 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
517 			mgmt_index_added(hdev);
518 		}
519 
520 		atomic_dec(&hdev->promisc);
521 		hci_dev_put(hdev);
522 	}
523 
524 	sock_orphan(sk);
525 
526 	skb_queue_purge(&sk->sk_receive_queue);
527 	skb_queue_purge(&sk->sk_write_queue);
528 
529 	sock_put(sk);
530 	return 0;
531 }
532 
533 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
534 {
535 	bdaddr_t bdaddr;
536 	int err;
537 
538 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
539 		return -EFAULT;
540 
541 	hci_dev_lock(hdev);
542 
543 	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
544 
545 	hci_dev_unlock(hdev);
546 
547 	return err;
548 }
549 
550 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
551 {
552 	bdaddr_t bdaddr;
553 	int err;
554 
555 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
556 		return -EFAULT;
557 
558 	hci_dev_lock(hdev);
559 
560 	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
561 
562 	hci_dev_unlock(hdev);
563 
564 	return err;
565 }
566 
567 /* Ioctls that require bound socket */
568 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
569 				unsigned long arg)
570 {
571 	struct hci_dev *hdev = hci_pi(sk)->hdev;
572 
573 	if (!hdev)
574 		return -EBADFD;
575 
576 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
577 		return -EBUSY;
578 
579 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
580 		return -EOPNOTSUPP;
581 
582 	if (hdev->dev_type != HCI_BREDR)
583 		return -EOPNOTSUPP;
584 
585 	switch (cmd) {
586 	case HCISETRAW:
587 		if (!capable(CAP_NET_ADMIN))
588 			return -EPERM;
589 		return -EOPNOTSUPP;
590 
591 	case HCIGETCONNINFO:
592 		return hci_get_conn_info(hdev, (void __user *) arg);
593 
594 	case HCIGETAUTHINFO:
595 		return hci_get_auth_info(hdev, (void __user *) arg);
596 
597 	case HCIBLOCKADDR:
598 		if (!capable(CAP_NET_ADMIN))
599 			return -EPERM;
600 		return hci_sock_blacklist_add(hdev, (void __user *) arg);
601 
602 	case HCIUNBLOCKADDR:
603 		if (!capable(CAP_NET_ADMIN))
604 			return -EPERM;
605 		return hci_sock_blacklist_del(hdev, (void __user *) arg);
606 	}
607 
608 	return -ENOIOCTLCMD;
609 }
610 
611 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
612 			  unsigned long arg)
613 {
614 	void __user *argp = (void __user *) arg;
615 	struct sock *sk = sock->sk;
616 	int err;
617 
618 	BT_DBG("cmd %x arg %lx", cmd, arg);
619 
620 	lock_sock(sk);
621 
622 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
623 		err = -EBADFD;
624 		goto done;
625 	}
626 
627 	release_sock(sk);
628 
629 	switch (cmd) {
630 	case HCIGETDEVLIST:
631 		return hci_get_dev_list(argp);
632 
633 	case HCIGETDEVINFO:
634 		return hci_get_dev_info(argp);
635 
636 	case HCIGETCONNLIST:
637 		return hci_get_conn_list(argp);
638 
639 	case HCIDEVUP:
640 		if (!capable(CAP_NET_ADMIN))
641 			return -EPERM;
642 		return hci_dev_open(arg);
643 
644 	case HCIDEVDOWN:
645 		if (!capable(CAP_NET_ADMIN))
646 			return -EPERM;
647 		return hci_dev_close(arg);
648 
649 	case HCIDEVRESET:
650 		if (!capable(CAP_NET_ADMIN))
651 			return -EPERM;
652 		return hci_dev_reset(arg);
653 
654 	case HCIDEVRESTAT:
655 		if (!capable(CAP_NET_ADMIN))
656 			return -EPERM;
657 		return hci_dev_reset_stat(arg);
658 
659 	case HCISETSCAN:
660 	case HCISETAUTH:
661 	case HCISETENCRYPT:
662 	case HCISETPTYPE:
663 	case HCISETLINKPOL:
664 	case HCISETLINKMODE:
665 	case HCISETACLMTU:
666 	case HCISETSCOMTU:
667 		if (!capable(CAP_NET_ADMIN))
668 			return -EPERM;
669 		return hci_dev_cmd(cmd, argp);
670 
671 	case HCIINQUIRY:
672 		return hci_inquiry(argp);
673 	}
674 
675 	lock_sock(sk);
676 
677 	err = hci_sock_bound_ioctl(sk, cmd, arg);
678 
679 done:
680 	release_sock(sk);
681 	return err;
682 }
683 
684 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
685 			 int addr_len)
686 {
687 	struct sockaddr_hci haddr;
688 	struct sock *sk = sock->sk;
689 	struct hci_dev *hdev = NULL;
690 	int len, err = 0;
691 
692 	BT_DBG("sock %p sk %p", sock, sk);
693 
694 	if (!addr)
695 		return -EINVAL;
696 
697 	memset(&haddr, 0, sizeof(haddr));
698 	len = min_t(unsigned int, sizeof(haddr), addr_len);
699 	memcpy(&haddr, addr, len);
700 
701 	if (haddr.hci_family != AF_BLUETOOTH)
702 		return -EINVAL;
703 
704 	lock_sock(sk);
705 
706 	if (sk->sk_state == BT_BOUND) {
707 		err = -EALREADY;
708 		goto done;
709 	}
710 
711 	switch (haddr.hci_channel) {
712 	case HCI_CHANNEL_RAW:
713 		if (hci_pi(sk)->hdev) {
714 			err = -EALREADY;
715 			goto done;
716 		}
717 
718 		if (haddr.hci_dev != HCI_DEV_NONE) {
719 			hdev = hci_dev_get(haddr.hci_dev);
720 			if (!hdev) {
721 				err = -ENODEV;
722 				goto done;
723 			}
724 
725 			atomic_inc(&hdev->promisc);
726 		}
727 
728 		hci_pi(sk)->hdev = hdev;
729 		break;
730 
731 	case HCI_CHANNEL_USER:
732 		if (hci_pi(sk)->hdev) {
733 			err = -EALREADY;
734 			goto done;
735 		}
736 
737 		if (haddr.hci_dev == HCI_DEV_NONE) {
738 			err = -EINVAL;
739 			goto done;
740 		}
741 
742 		if (!capable(CAP_NET_ADMIN)) {
743 			err = -EPERM;
744 			goto done;
745 		}
746 
747 		hdev = hci_dev_get(haddr.hci_dev);
748 		if (!hdev) {
749 			err = -ENODEV;
750 			goto done;
751 		}
752 
753 		if (test_bit(HCI_INIT, &hdev->flags) ||
754 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
755 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
756 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
757 		     test_bit(HCI_UP, &hdev->flags))) {
758 			err = -EBUSY;
759 			hci_dev_put(hdev);
760 			goto done;
761 		}
762 
763 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
764 			err = -EUSERS;
765 			hci_dev_put(hdev);
766 			goto done;
767 		}
768 
769 		mgmt_index_removed(hdev);
770 
771 		err = hci_dev_open(hdev->id);
772 		if (err) {
773 			if (err == -EALREADY) {
774 				/* In case the transport is already up and
775 				 * running, clear the error here.
776 				 *
777 				 * This can happen when opening an user
778 				 * channel and HCI_AUTO_OFF grace period
779 				 * is still active.
780 				 */
781 				err = 0;
782 			} else {
783 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
784 				mgmt_index_added(hdev);
785 				hci_dev_put(hdev);
786 				goto done;
787 			}
788 		}
789 
790 		atomic_inc(&hdev->promisc);
791 
792 		hci_pi(sk)->hdev = hdev;
793 		break;
794 
795 	case HCI_CHANNEL_MONITOR:
796 		if (haddr.hci_dev != HCI_DEV_NONE) {
797 			err = -EINVAL;
798 			goto done;
799 		}
800 
801 		if (!capable(CAP_NET_RAW)) {
802 			err = -EPERM;
803 			goto done;
804 		}
805 
806 		/* The monitor interface is restricted to CAP_NET_RAW
807 		 * capabilities and with that implicitly trusted.
808 		 */
809 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
810 
811 		send_monitor_replay(sk);
812 
813 		atomic_inc(&monitor_promisc);
814 		break;
815 
816 	default:
817 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
818 			err = -EINVAL;
819 			goto done;
820 		}
821 
822 		if (haddr.hci_dev != HCI_DEV_NONE) {
823 			err = -EINVAL;
824 			goto done;
825 		}
826 
827 		/* Users with CAP_NET_ADMIN capabilities are allowed
828 		 * access to all management commands and events. For
829 		 * untrusted users the interface is restricted and
830 		 * also only untrusted events are sent.
831 		 */
832 		if (capable(CAP_NET_ADMIN))
833 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
834 
835 		/* At the moment the index and unconfigured index events
836 		 * are enabled unconditionally. Setting them on each
837 		 * socket when binding keeps this functionality. They
838 		 * however might be cleared later and then sending of these
839 		 * events will be disabled, but that is then intentional.
840 		 *
841 		 * This also enables generic events that are safe to be
842 		 * received by untrusted users. Example for such events
843 		 * are changes to settings, class of device, name etc.
844 		 */
845 		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
846 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
847 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
848 			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
849 		}
850 		break;
851 	}
852 
853 
854 	hci_pi(sk)->channel = haddr.hci_channel;
855 	sk->sk_state = BT_BOUND;
856 
857 done:
858 	release_sock(sk);
859 	return err;
860 }
861 
862 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
863 			    int *addr_len, int peer)
864 {
865 	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
866 	struct sock *sk = sock->sk;
867 	struct hci_dev *hdev;
868 	int err = 0;
869 
870 	BT_DBG("sock %p sk %p", sock, sk);
871 
872 	if (peer)
873 		return -EOPNOTSUPP;
874 
875 	lock_sock(sk);
876 
877 	hdev = hci_pi(sk)->hdev;
878 	if (!hdev) {
879 		err = -EBADFD;
880 		goto done;
881 	}
882 
883 	*addr_len = sizeof(*haddr);
884 	haddr->hci_family = AF_BLUETOOTH;
885 	haddr->hci_dev    = hdev->id;
886 	haddr->hci_channel= hci_pi(sk)->channel;
887 
888 done:
889 	release_sock(sk);
890 	return err;
891 }
892 
893 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
894 			  struct sk_buff *skb)
895 {
896 	__u32 mask = hci_pi(sk)->cmsg_mask;
897 
898 	if (mask & HCI_CMSG_DIR) {
899 		int incoming = bt_cb(skb)->incoming;
900 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
901 			 &incoming);
902 	}
903 
904 	if (mask & HCI_CMSG_TSTAMP) {
905 #ifdef CONFIG_COMPAT
906 		struct compat_timeval ctv;
907 #endif
908 		struct timeval tv;
909 		void *data;
910 		int len;
911 
912 		skb_get_timestamp(skb, &tv);
913 
914 		data = &tv;
915 		len = sizeof(tv);
916 #ifdef CONFIG_COMPAT
917 		if (!COMPAT_USE_64BIT_TIME &&
918 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
919 			ctv.tv_sec = tv.tv_sec;
920 			ctv.tv_usec = tv.tv_usec;
921 			data = &ctv;
922 			len = sizeof(ctv);
923 		}
924 #endif
925 
926 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
927 	}
928 }
929 
930 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
931 			    int flags)
932 {
933 	int noblock = flags & MSG_DONTWAIT;
934 	struct sock *sk = sock->sk;
935 	struct sk_buff *skb;
936 	int copied, err;
937 
938 	BT_DBG("sock %p, sk %p", sock, sk);
939 
940 	if (flags & (MSG_OOB))
941 		return -EOPNOTSUPP;
942 
943 	if (sk->sk_state == BT_CLOSED)
944 		return 0;
945 
946 	skb = skb_recv_datagram(sk, flags, noblock, &err);
947 	if (!skb)
948 		return err;
949 
950 	copied = skb->len;
951 	if (len < copied) {
952 		msg->msg_flags |= MSG_TRUNC;
953 		copied = len;
954 	}
955 
956 	skb_reset_transport_header(skb);
957 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
958 
959 	switch (hci_pi(sk)->channel) {
960 	case HCI_CHANNEL_RAW:
961 		hci_sock_cmsg(sk, msg, skb);
962 		break;
963 	case HCI_CHANNEL_USER:
964 	case HCI_CHANNEL_MONITOR:
965 		sock_recv_timestamp(msg, sk, skb);
966 		break;
967 	default:
968 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
969 			sock_recv_timestamp(msg, sk, skb);
970 		break;
971 	}
972 
973 	skb_free_datagram(sk, skb);
974 
975 	return err ? : copied;
976 }
977 
978 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
979 			struct msghdr *msg, size_t msglen)
980 {
981 	void *buf;
982 	u8 *cp;
983 	struct mgmt_hdr *hdr;
984 	u16 opcode, index, len;
985 	struct hci_dev *hdev = NULL;
986 	const struct hci_mgmt_handler *handler;
987 	bool var_len, no_hdev;
988 	int err;
989 
990 	BT_DBG("got %zu bytes", msglen);
991 
992 	if (msglen < sizeof(*hdr))
993 		return -EINVAL;
994 
995 	buf = kmalloc(msglen, GFP_KERNEL);
996 	if (!buf)
997 		return -ENOMEM;
998 
999 	if (memcpy_from_msg(buf, msg, msglen)) {
1000 		err = -EFAULT;
1001 		goto done;
1002 	}
1003 
1004 	hdr = buf;
1005 	opcode = __le16_to_cpu(hdr->opcode);
1006 	index = __le16_to_cpu(hdr->index);
1007 	len = __le16_to_cpu(hdr->len);
1008 
1009 	if (len != msglen - sizeof(*hdr)) {
1010 		err = -EINVAL;
1011 		goto done;
1012 	}
1013 
1014 	if (opcode >= chan->handler_count ||
1015 	    chan->handlers[opcode].func == NULL) {
1016 		BT_DBG("Unknown op %u", opcode);
1017 		err = mgmt_cmd_status(sk, index, opcode,
1018 				      MGMT_STATUS_UNKNOWN_COMMAND);
1019 		goto done;
1020 	}
1021 
1022 	handler = &chan->handlers[opcode];
1023 
1024 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1025 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1026 		err = mgmt_cmd_status(sk, index, opcode,
1027 				      MGMT_STATUS_PERMISSION_DENIED);
1028 		goto done;
1029 	}
1030 
1031 	if (index != MGMT_INDEX_NONE) {
1032 		hdev = hci_dev_get(index);
1033 		if (!hdev) {
1034 			err = mgmt_cmd_status(sk, index, opcode,
1035 					      MGMT_STATUS_INVALID_INDEX);
1036 			goto done;
1037 		}
1038 
1039 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1040 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1041 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1042 			err = mgmt_cmd_status(sk, index, opcode,
1043 					      MGMT_STATUS_INVALID_INDEX);
1044 			goto done;
1045 		}
1046 
1047 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1048 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1049 			err = mgmt_cmd_status(sk, index, opcode,
1050 					      MGMT_STATUS_INVALID_INDEX);
1051 			goto done;
1052 		}
1053 	}
1054 
1055 	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1056 	if (no_hdev != !hdev) {
1057 		err = mgmt_cmd_status(sk, index, opcode,
1058 				      MGMT_STATUS_INVALID_INDEX);
1059 		goto done;
1060 	}
1061 
1062 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1063 	if ((var_len && len < handler->data_len) ||
1064 	    (!var_len && len != handler->data_len)) {
1065 		err = mgmt_cmd_status(sk, index, opcode,
1066 				      MGMT_STATUS_INVALID_PARAMS);
1067 		goto done;
1068 	}
1069 
1070 	if (hdev && chan->hdev_init)
1071 		chan->hdev_init(sk, hdev);
1072 
1073 	cp = buf + sizeof(*hdr);
1074 
1075 	err = handler->func(sk, hdev, cp, len);
1076 	if (err < 0)
1077 		goto done;
1078 
1079 	err = msglen;
1080 
1081 done:
1082 	if (hdev)
1083 		hci_dev_put(hdev);
1084 
1085 	kfree(buf);
1086 	return err;
1087 }
1088 
1089 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1090 			    size_t len)
1091 {
1092 	struct sock *sk = sock->sk;
1093 	struct hci_mgmt_chan *chan;
1094 	struct hci_dev *hdev;
1095 	struct sk_buff *skb;
1096 	int err;
1097 
1098 	BT_DBG("sock %p sk %p", sock, sk);
1099 
1100 	if (msg->msg_flags & MSG_OOB)
1101 		return -EOPNOTSUPP;
1102 
1103 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1104 		return -EINVAL;
1105 
1106 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1107 		return -EINVAL;
1108 
1109 	lock_sock(sk);
1110 
1111 	switch (hci_pi(sk)->channel) {
1112 	case HCI_CHANNEL_RAW:
1113 	case HCI_CHANNEL_USER:
1114 		break;
1115 	case HCI_CHANNEL_MONITOR:
1116 		err = -EOPNOTSUPP;
1117 		goto done;
1118 	default:
1119 		mutex_lock(&mgmt_chan_list_lock);
1120 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1121 		if (chan)
1122 			err = hci_mgmt_cmd(chan, sk, msg, len);
1123 		else
1124 			err = -EINVAL;
1125 
1126 		mutex_unlock(&mgmt_chan_list_lock);
1127 		goto done;
1128 	}
1129 
1130 	hdev = hci_pi(sk)->hdev;
1131 	if (!hdev) {
1132 		err = -EBADFD;
1133 		goto done;
1134 	}
1135 
1136 	if (!test_bit(HCI_UP, &hdev->flags)) {
1137 		err = -ENETDOWN;
1138 		goto done;
1139 	}
1140 
1141 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1142 	if (!skb)
1143 		goto done;
1144 
1145 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1146 		err = -EFAULT;
1147 		goto drop;
1148 	}
1149 
1150 	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1151 	skb_pull(skb, 1);
1152 
1153 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1154 		/* No permission check is needed for user channel
1155 		 * since that gets enforced when binding the socket.
1156 		 *
1157 		 * However check that the packet type is valid.
1158 		 */
1159 		if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1160 		    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1161 		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1162 			err = -EINVAL;
1163 			goto drop;
1164 		}
1165 
1166 		skb_queue_tail(&hdev->raw_q, skb);
1167 		queue_work(hdev->workqueue, &hdev->tx_work);
1168 	} else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1169 		u16 opcode = get_unaligned_le16(skb->data);
1170 		u16 ogf = hci_opcode_ogf(opcode);
1171 		u16 ocf = hci_opcode_ocf(opcode);
1172 
1173 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1174 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1175 				   &hci_sec_filter.ocf_mask[ogf])) &&
1176 		    !capable(CAP_NET_RAW)) {
1177 			err = -EPERM;
1178 			goto drop;
1179 		}
1180 
1181 		if (ogf == 0x3f) {
1182 			skb_queue_tail(&hdev->raw_q, skb);
1183 			queue_work(hdev->workqueue, &hdev->tx_work);
1184 		} else {
1185 			/* Stand-alone HCI commands must be flagged as
1186 			 * single-command requests.
1187 			 */
1188 			bt_cb(skb)->req.start = true;
1189 
1190 			skb_queue_tail(&hdev->cmd_q, skb);
1191 			queue_work(hdev->workqueue, &hdev->cmd_work);
1192 		}
1193 	} else {
1194 		if (!capable(CAP_NET_RAW)) {
1195 			err = -EPERM;
1196 			goto drop;
1197 		}
1198 
1199 		skb_queue_tail(&hdev->raw_q, skb);
1200 		queue_work(hdev->workqueue, &hdev->tx_work);
1201 	}
1202 
1203 	err = len;
1204 
1205 done:
1206 	release_sock(sk);
1207 	return err;
1208 
1209 drop:
1210 	kfree_skb(skb);
1211 	goto done;
1212 }
1213 
1214 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1215 			       char __user *optval, unsigned int len)
1216 {
1217 	struct hci_ufilter uf = { .opcode = 0 };
1218 	struct sock *sk = sock->sk;
1219 	int err = 0, opt = 0;
1220 
1221 	BT_DBG("sk %p, opt %d", sk, optname);
1222 
1223 	lock_sock(sk);
1224 
1225 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1226 		err = -EBADFD;
1227 		goto done;
1228 	}
1229 
1230 	switch (optname) {
1231 	case HCI_DATA_DIR:
1232 		if (get_user(opt, (int __user *)optval)) {
1233 			err = -EFAULT;
1234 			break;
1235 		}
1236 
1237 		if (opt)
1238 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1239 		else
1240 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1241 		break;
1242 
1243 	case HCI_TIME_STAMP:
1244 		if (get_user(opt, (int __user *)optval)) {
1245 			err = -EFAULT;
1246 			break;
1247 		}
1248 
1249 		if (opt)
1250 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1251 		else
1252 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1253 		break;
1254 
1255 	case HCI_FILTER:
1256 		{
1257 			struct hci_filter *f = &hci_pi(sk)->filter;
1258 
1259 			uf.type_mask = f->type_mask;
1260 			uf.opcode    = f->opcode;
1261 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1262 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1263 		}
1264 
1265 		len = min_t(unsigned int, len, sizeof(uf));
1266 		if (copy_from_user(&uf, optval, len)) {
1267 			err = -EFAULT;
1268 			break;
1269 		}
1270 
1271 		if (!capable(CAP_NET_RAW)) {
1272 			uf.type_mask &= hci_sec_filter.type_mask;
1273 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1274 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1275 		}
1276 
1277 		{
1278 			struct hci_filter *f = &hci_pi(sk)->filter;
1279 
1280 			f->type_mask = uf.type_mask;
1281 			f->opcode    = uf.opcode;
1282 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1283 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1284 		}
1285 		break;
1286 
1287 	default:
1288 		err = -ENOPROTOOPT;
1289 		break;
1290 	}
1291 
1292 done:
1293 	release_sock(sk);
1294 	return err;
1295 }
1296 
1297 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1298 			       char __user *optval, int __user *optlen)
1299 {
1300 	struct hci_ufilter uf;
1301 	struct sock *sk = sock->sk;
1302 	int len, opt, err = 0;
1303 
1304 	BT_DBG("sk %p, opt %d", sk, optname);
1305 
1306 	if (get_user(len, optlen))
1307 		return -EFAULT;
1308 
1309 	lock_sock(sk);
1310 
1311 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1312 		err = -EBADFD;
1313 		goto done;
1314 	}
1315 
1316 	switch (optname) {
1317 	case HCI_DATA_DIR:
1318 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1319 			opt = 1;
1320 		else
1321 			opt = 0;
1322 
1323 		if (put_user(opt, optval))
1324 			err = -EFAULT;
1325 		break;
1326 
1327 	case HCI_TIME_STAMP:
1328 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1329 			opt = 1;
1330 		else
1331 			opt = 0;
1332 
1333 		if (put_user(opt, optval))
1334 			err = -EFAULT;
1335 		break;
1336 
1337 	case HCI_FILTER:
1338 		{
1339 			struct hci_filter *f = &hci_pi(sk)->filter;
1340 
1341 			memset(&uf, 0, sizeof(uf));
1342 			uf.type_mask = f->type_mask;
1343 			uf.opcode    = f->opcode;
1344 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1345 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1346 		}
1347 
1348 		len = min_t(unsigned int, len, sizeof(uf));
1349 		if (copy_to_user(optval, &uf, len))
1350 			err = -EFAULT;
1351 		break;
1352 
1353 	default:
1354 		err = -ENOPROTOOPT;
1355 		break;
1356 	}
1357 
1358 done:
1359 	release_sock(sk);
1360 	return err;
1361 }
1362 
1363 static const struct proto_ops hci_sock_ops = {
1364 	.family		= PF_BLUETOOTH,
1365 	.owner		= THIS_MODULE,
1366 	.release	= hci_sock_release,
1367 	.bind		= hci_sock_bind,
1368 	.getname	= hci_sock_getname,
1369 	.sendmsg	= hci_sock_sendmsg,
1370 	.recvmsg	= hci_sock_recvmsg,
1371 	.ioctl		= hci_sock_ioctl,
1372 	.poll		= datagram_poll,
1373 	.listen		= sock_no_listen,
1374 	.shutdown	= sock_no_shutdown,
1375 	.setsockopt	= hci_sock_setsockopt,
1376 	.getsockopt	= hci_sock_getsockopt,
1377 	.connect	= sock_no_connect,
1378 	.socketpair	= sock_no_socketpair,
1379 	.accept		= sock_no_accept,
1380 	.mmap		= sock_no_mmap
1381 };
1382 
1383 static struct proto hci_sk_proto = {
1384 	.name		= "HCI",
1385 	.owner		= THIS_MODULE,
1386 	.obj_size	= sizeof(struct hci_pinfo)
1387 };
1388 
1389 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1390 			   int kern)
1391 {
1392 	struct sock *sk;
1393 
1394 	BT_DBG("sock %p", sock);
1395 
1396 	if (sock->type != SOCK_RAW)
1397 		return -ESOCKTNOSUPPORT;
1398 
1399 	sock->ops = &hci_sock_ops;
1400 
1401 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1402 	if (!sk)
1403 		return -ENOMEM;
1404 
1405 	sock_init_data(sock, sk);
1406 
1407 	sock_reset_flag(sk, SOCK_ZAPPED);
1408 
1409 	sk->sk_protocol = protocol;
1410 
1411 	sock->state = SS_UNCONNECTED;
1412 	sk->sk_state = BT_OPEN;
1413 
1414 	bt_sock_link(&hci_sk_list, sk);
1415 	return 0;
1416 }
1417 
1418 static const struct net_proto_family hci_sock_family_ops = {
1419 	.family	= PF_BLUETOOTH,
1420 	.owner	= THIS_MODULE,
1421 	.create	= hci_sock_create,
1422 };
1423 
1424 int __init hci_sock_init(void)
1425 {
1426 	int err;
1427 
1428 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1429 
1430 	err = proto_register(&hci_sk_proto, 0);
1431 	if (err < 0)
1432 		return err;
1433 
1434 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1435 	if (err < 0) {
1436 		BT_ERR("HCI socket registration failed");
1437 		goto error;
1438 	}
1439 
1440 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1441 	if (err < 0) {
1442 		BT_ERR("Failed to create HCI proc file");
1443 		bt_sock_unregister(BTPROTO_HCI);
1444 		goto error;
1445 	}
1446 
1447 	BT_INFO("HCI socket layer initialized");
1448 
1449 	return 0;
1450 
1451 error:
1452 	proto_unregister(&hci_sk_proto);
1453 	return err;
1454 }
1455 
1456 void hci_sock_cleanup(void)
1457 {
1458 	bt_procfs_cleanup(&init_net, "hci");
1459 	bt_sock_unregister(BTPROTO_HCI);
1460 	proto_unregister(&hci_sk_proto);
1461 }
1462