xref: /linux/net/bluetooth/hci_sock.c (revision 25aee3debe0464f6c680173041fa3de30ec9ff54)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35 
36 /* ----- HCI socket interface ----- */
37 
38 static inline int hci_test_bit(int nr, void *addr)
39 {
40 	return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41 }
42 
43 /* Security filter */
44 static struct hci_sec_filter hci_sec_filter = {
45 	/* Packet types */
46 	0x10,
47 	/* Events */
48 	{ 0x1000d9fe, 0x0000b00c },
49 	/* Commands */
50 	{
51 		{ 0x0 },
52 		/* OGF_LINK_CTL */
53 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
54 		/* OGF_LINK_POLICY */
55 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
56 		/* OGF_HOST_CTL */
57 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
58 		/* OGF_INFO_PARAM */
59 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
60 		/* OGF_STATUS_PARAM */
61 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
62 	}
63 };
64 
65 static struct bt_sock_list hci_sk_list = {
66 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
67 };
68 
69 /* Send frame to RAW socket */
70 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
71 {
72 	struct sock *sk;
73 	struct hlist_node *node;
74 	struct sk_buff *skb_copy = NULL;
75 
76 	BT_DBG("hdev %p len %d", hdev, skb->len);
77 
78 	read_lock(&hci_sk_list.lock);
79 
80 	sk_for_each(sk, node, &hci_sk_list.head) {
81 		struct hci_filter *flt;
82 		struct sk_buff *nskb;
83 
84 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
85 			continue;
86 
87 		/* Don't send frame to the socket it came from */
88 		if (skb->sk == sk)
89 			continue;
90 
91 		if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
92 			continue;
93 
94 		/* Apply filter */
95 		flt = &hci_pi(sk)->filter;
96 
97 		if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
98 			      0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
99 			      &flt->type_mask))
100 			continue;
101 
102 		if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
103 			int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
104 
105 			if (!hci_test_bit(evt, &flt->event_mask))
106 				continue;
107 
108 			if (flt->opcode &&
109 			    ((evt == HCI_EV_CMD_COMPLETE &&
110 			      flt->opcode !=
111 			      get_unaligned((__le16 *)(skb->data + 3))) ||
112 			     (evt == HCI_EV_CMD_STATUS &&
113 			      flt->opcode !=
114 			      get_unaligned((__le16 *)(skb->data + 4)))))
115 				continue;
116 		}
117 
118 		if (!skb_copy) {
119 			/* Create a private copy with headroom */
120 			skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
121 			if (!skb_copy)
122 				continue;
123 
124 			/* Put type byte before the data */
125 			memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
126 		}
127 
128 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
129 		if (!nskb)
130 			continue;
131 
132 		if (sock_queue_rcv_skb(sk, nskb))
133 			kfree_skb(nskb);
134 	}
135 
136 	read_unlock(&hci_sk_list.lock);
137 
138 	kfree_skb(skb_copy);
139 }
140 
141 /* Send frame to control socket */
142 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
143 {
144 	struct sock *sk;
145 	struct hlist_node *node;
146 
147 	BT_DBG("len %d", skb->len);
148 
149 	read_lock(&hci_sk_list.lock);
150 
151 	sk_for_each(sk, node, &hci_sk_list.head) {
152 		struct sk_buff *nskb;
153 
154 		/* Skip the original socket */
155 		if (sk == skip_sk)
156 			continue;
157 
158 		if (sk->sk_state != BT_BOUND)
159 			continue;
160 
161 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
162 			continue;
163 
164 		nskb = skb_clone(skb, GFP_ATOMIC);
165 		if (!nskb)
166 			continue;
167 
168 		if (sock_queue_rcv_skb(sk, nskb))
169 			kfree_skb(nskb);
170 	}
171 
172 	read_unlock(&hci_sk_list.lock);
173 }
174 
175 /* Send frame to monitor socket */
176 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
177 {
178 	struct sock *sk;
179 	struct hlist_node *node;
180 	struct sk_buff *skb_copy = NULL;
181 	__le16 opcode;
182 
183 	if (!atomic_read(&monitor_promisc))
184 		return;
185 
186 	BT_DBG("hdev %p len %d", hdev, skb->len);
187 
188 	switch (bt_cb(skb)->pkt_type) {
189 	case HCI_COMMAND_PKT:
190 		opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
191 		break;
192 	case HCI_EVENT_PKT:
193 		opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
194 		break;
195 	case HCI_ACLDATA_PKT:
196 		if (bt_cb(skb)->incoming)
197 			opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
198 		else
199 			opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
200 		break;
201 	case HCI_SCODATA_PKT:
202 		if (bt_cb(skb)->incoming)
203 			opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
204 		else
205 			opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
206 		break;
207 	default:
208 		return;
209 	}
210 
211 	read_lock(&hci_sk_list.lock);
212 
213 	sk_for_each(sk, node, &hci_sk_list.head) {
214 		struct sk_buff *nskb;
215 
216 		if (sk->sk_state != BT_BOUND)
217 			continue;
218 
219 		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
220 			continue;
221 
222 		if (!skb_copy) {
223 			struct hci_mon_hdr *hdr;
224 
225 			/* Create a private copy with headroom */
226 			skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
227 					       GFP_ATOMIC);
228 			if (!skb_copy)
229 				continue;
230 
231 			/* Put header before the data */
232 			hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
233 			hdr->opcode = opcode;
234 			hdr->index = cpu_to_le16(hdev->id);
235 			hdr->len = cpu_to_le16(skb->len);
236 		}
237 
238 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
239 		if (!nskb)
240 			continue;
241 
242 		if (sock_queue_rcv_skb(sk, nskb))
243 			kfree_skb(nskb);
244 	}
245 
246 	read_unlock(&hci_sk_list.lock);
247 
248 	kfree_skb(skb_copy);
249 }
250 
251 static void send_monitor_event(struct sk_buff *skb)
252 {
253 	struct sock *sk;
254 	struct hlist_node *node;
255 
256 	BT_DBG("len %d", skb->len);
257 
258 	read_lock(&hci_sk_list.lock);
259 
260 	sk_for_each(sk, node, &hci_sk_list.head) {
261 		struct sk_buff *nskb;
262 
263 		if (sk->sk_state != BT_BOUND)
264 			continue;
265 
266 		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
267 			continue;
268 
269 		nskb = skb_clone(skb, GFP_ATOMIC);
270 		if (!nskb)
271 			continue;
272 
273 		if (sock_queue_rcv_skb(sk, nskb))
274 			kfree_skb(nskb);
275 	}
276 
277 	read_unlock(&hci_sk_list.lock);
278 }
279 
280 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
281 {
282 	struct hci_mon_hdr *hdr;
283 	struct hci_mon_new_index *ni;
284 	struct sk_buff *skb;
285 	__le16 opcode;
286 
287 	switch (event) {
288 	case HCI_DEV_REG:
289 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
290 		if (!skb)
291 			return NULL;
292 
293 		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
294 		ni->type = hdev->dev_type;
295 		ni->bus = hdev->bus;
296 		bacpy(&ni->bdaddr, &hdev->bdaddr);
297 		memcpy(ni->name, hdev->name, 8);
298 
299 		opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
300 		break;
301 
302 	case HCI_DEV_UNREG:
303 		skb = bt_skb_alloc(0, GFP_ATOMIC);
304 		if (!skb)
305 			return NULL;
306 
307 		opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
308 		break;
309 
310 	default:
311 		return NULL;
312 	}
313 
314 	__net_timestamp(skb);
315 
316 	hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
317 	hdr->opcode = opcode;
318 	hdr->index = cpu_to_le16(hdev->id);
319 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
320 
321 	return skb;
322 }
323 
324 static void send_monitor_replay(struct sock *sk)
325 {
326 	struct hci_dev *hdev;
327 
328 	read_lock(&hci_dev_list_lock);
329 
330 	list_for_each_entry(hdev, &hci_dev_list, list) {
331 		struct sk_buff *skb;
332 
333 		skb = create_monitor_event(hdev, HCI_DEV_REG);
334 		if (!skb)
335 			continue;
336 
337 		if (sock_queue_rcv_skb(sk, skb))
338 			kfree_skb(skb);
339 	}
340 
341 	read_unlock(&hci_dev_list_lock);
342 }
343 
344 /* Generate internal stack event */
345 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
346 {
347 	struct hci_event_hdr *hdr;
348 	struct hci_ev_stack_internal *ev;
349 	struct sk_buff *skb;
350 
351 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
352 	if (!skb)
353 		return;
354 
355 	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
356 	hdr->evt  = HCI_EV_STACK_INTERNAL;
357 	hdr->plen = sizeof(*ev) + dlen;
358 
359 	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
360 	ev->type = type;
361 	memcpy(ev->data, data, dlen);
362 
363 	bt_cb(skb)->incoming = 1;
364 	__net_timestamp(skb);
365 
366 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
367 	skb->dev = (void *) hdev;
368 	hci_send_to_sock(hdev, skb);
369 	kfree_skb(skb);
370 }
371 
372 void hci_sock_dev_event(struct hci_dev *hdev, int event)
373 {
374 	struct hci_ev_si_device ev;
375 
376 	BT_DBG("hdev %s event %d", hdev->name, event);
377 
378 	/* Send event to monitor */
379 	if (atomic_read(&monitor_promisc)) {
380 		struct sk_buff *skb;
381 
382 		skb = create_monitor_event(hdev, event);
383 		if (skb) {
384 			send_monitor_event(skb);
385 			kfree_skb(skb);
386 		}
387 	}
388 
389 	/* Send event to sockets */
390 	ev.event  = event;
391 	ev.dev_id = hdev->id;
392 	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
393 
394 	if (event == HCI_DEV_UNREG) {
395 		struct sock *sk;
396 		struct hlist_node *node;
397 
398 		/* Detach sockets from device */
399 		read_lock(&hci_sk_list.lock);
400 		sk_for_each(sk, node, &hci_sk_list.head) {
401 			bh_lock_sock_nested(sk);
402 			if (hci_pi(sk)->hdev == hdev) {
403 				hci_pi(sk)->hdev = NULL;
404 				sk->sk_err = EPIPE;
405 				sk->sk_state = BT_OPEN;
406 				sk->sk_state_change(sk);
407 
408 				hci_dev_put(hdev);
409 			}
410 			bh_unlock_sock(sk);
411 		}
412 		read_unlock(&hci_sk_list.lock);
413 	}
414 }
415 
416 static int hci_sock_release(struct socket *sock)
417 {
418 	struct sock *sk = sock->sk;
419 	struct hci_dev *hdev;
420 
421 	BT_DBG("sock %p sk %p", sock, sk);
422 
423 	if (!sk)
424 		return 0;
425 
426 	hdev = hci_pi(sk)->hdev;
427 
428 	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
429 		atomic_dec(&monitor_promisc);
430 
431 	bt_sock_unlink(&hci_sk_list, sk);
432 
433 	if (hdev) {
434 		atomic_dec(&hdev->promisc);
435 		hci_dev_put(hdev);
436 	}
437 
438 	sock_orphan(sk);
439 
440 	skb_queue_purge(&sk->sk_receive_queue);
441 	skb_queue_purge(&sk->sk_write_queue);
442 
443 	sock_put(sk);
444 	return 0;
445 }
446 
447 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
448 {
449 	bdaddr_t bdaddr;
450 	int err;
451 
452 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
453 		return -EFAULT;
454 
455 	hci_dev_lock(hdev);
456 
457 	err = hci_blacklist_add(hdev, &bdaddr, 0);
458 
459 	hci_dev_unlock(hdev);
460 
461 	return err;
462 }
463 
464 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
465 {
466 	bdaddr_t bdaddr;
467 	int err;
468 
469 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
470 		return -EFAULT;
471 
472 	hci_dev_lock(hdev);
473 
474 	err = hci_blacklist_del(hdev, &bdaddr, 0);
475 
476 	hci_dev_unlock(hdev);
477 
478 	return err;
479 }
480 
481 /* Ioctls that require bound socket */
482 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
483 				unsigned long arg)
484 {
485 	struct hci_dev *hdev = hci_pi(sk)->hdev;
486 
487 	if (!hdev)
488 		return -EBADFD;
489 
490 	switch (cmd) {
491 	case HCISETRAW:
492 		if (!capable(CAP_NET_ADMIN))
493 			return -EACCES;
494 
495 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
496 			return -EPERM;
497 
498 		if (arg)
499 			set_bit(HCI_RAW, &hdev->flags);
500 		else
501 			clear_bit(HCI_RAW, &hdev->flags);
502 
503 		return 0;
504 
505 	case HCIGETCONNINFO:
506 		return hci_get_conn_info(hdev, (void __user *) arg);
507 
508 	case HCIGETAUTHINFO:
509 		return hci_get_auth_info(hdev, (void __user *) arg);
510 
511 	case HCIBLOCKADDR:
512 		if (!capable(CAP_NET_ADMIN))
513 			return -EACCES;
514 		return hci_sock_blacklist_add(hdev, (void __user *) arg);
515 
516 	case HCIUNBLOCKADDR:
517 		if (!capable(CAP_NET_ADMIN))
518 			return -EACCES;
519 		return hci_sock_blacklist_del(hdev, (void __user *) arg);
520 
521 	default:
522 		if (hdev->ioctl)
523 			return hdev->ioctl(hdev, cmd, arg);
524 		return -EINVAL;
525 	}
526 }
527 
528 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
529 			  unsigned long arg)
530 {
531 	struct sock *sk = sock->sk;
532 	void __user *argp = (void __user *) arg;
533 	int err;
534 
535 	BT_DBG("cmd %x arg %lx", cmd, arg);
536 
537 	switch (cmd) {
538 	case HCIGETDEVLIST:
539 		return hci_get_dev_list(argp);
540 
541 	case HCIGETDEVINFO:
542 		return hci_get_dev_info(argp);
543 
544 	case HCIGETCONNLIST:
545 		return hci_get_conn_list(argp);
546 
547 	case HCIDEVUP:
548 		if (!capable(CAP_NET_ADMIN))
549 			return -EACCES;
550 		return hci_dev_open(arg);
551 
552 	case HCIDEVDOWN:
553 		if (!capable(CAP_NET_ADMIN))
554 			return -EACCES;
555 		return hci_dev_close(arg);
556 
557 	case HCIDEVRESET:
558 		if (!capable(CAP_NET_ADMIN))
559 			return -EACCES;
560 		return hci_dev_reset(arg);
561 
562 	case HCIDEVRESTAT:
563 		if (!capable(CAP_NET_ADMIN))
564 			return -EACCES;
565 		return hci_dev_reset_stat(arg);
566 
567 	case HCISETSCAN:
568 	case HCISETAUTH:
569 	case HCISETENCRYPT:
570 	case HCISETPTYPE:
571 	case HCISETLINKPOL:
572 	case HCISETLINKMODE:
573 	case HCISETACLMTU:
574 	case HCISETSCOMTU:
575 		if (!capable(CAP_NET_ADMIN))
576 			return -EACCES;
577 		return hci_dev_cmd(cmd, argp);
578 
579 	case HCIINQUIRY:
580 		return hci_inquiry(argp);
581 
582 	default:
583 		lock_sock(sk);
584 		err = hci_sock_bound_ioctl(sk, cmd, arg);
585 		release_sock(sk);
586 		return err;
587 	}
588 }
589 
590 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
591 			 int addr_len)
592 {
593 	struct sockaddr_hci haddr;
594 	struct sock *sk = sock->sk;
595 	struct hci_dev *hdev = NULL;
596 	int len, err = 0;
597 
598 	BT_DBG("sock %p sk %p", sock, sk);
599 
600 	if (!addr)
601 		return -EINVAL;
602 
603 	memset(&haddr, 0, sizeof(haddr));
604 	len = min_t(unsigned int, sizeof(haddr), addr_len);
605 	memcpy(&haddr, addr, len);
606 
607 	if (haddr.hci_family != AF_BLUETOOTH)
608 		return -EINVAL;
609 
610 	lock_sock(sk);
611 
612 	if (sk->sk_state == BT_BOUND) {
613 		err = -EALREADY;
614 		goto done;
615 	}
616 
617 	switch (haddr.hci_channel) {
618 	case HCI_CHANNEL_RAW:
619 		if (hci_pi(sk)->hdev) {
620 			err = -EALREADY;
621 			goto done;
622 		}
623 
624 		if (haddr.hci_dev != HCI_DEV_NONE) {
625 			hdev = hci_dev_get(haddr.hci_dev);
626 			if (!hdev) {
627 				err = -ENODEV;
628 				goto done;
629 			}
630 
631 			atomic_inc(&hdev->promisc);
632 		}
633 
634 		hci_pi(sk)->hdev = hdev;
635 		break;
636 
637 	case HCI_CHANNEL_CONTROL:
638 		if (haddr.hci_dev != HCI_DEV_NONE) {
639 			err = -EINVAL;
640 			goto done;
641 		}
642 
643 		if (!capable(CAP_NET_ADMIN)) {
644 			err = -EPERM;
645 			goto done;
646 		}
647 
648 		break;
649 
650 	case HCI_CHANNEL_MONITOR:
651 		if (haddr.hci_dev != HCI_DEV_NONE) {
652 			err = -EINVAL;
653 			goto done;
654 		}
655 
656 		if (!capable(CAP_NET_RAW)) {
657 			err = -EPERM;
658 			goto done;
659 		}
660 
661 		send_monitor_replay(sk);
662 
663 		atomic_inc(&monitor_promisc);
664 		break;
665 
666 	default:
667 		err = -EINVAL;
668 		goto done;
669 	}
670 
671 
672 	hci_pi(sk)->channel = haddr.hci_channel;
673 	sk->sk_state = BT_BOUND;
674 
675 done:
676 	release_sock(sk);
677 	return err;
678 }
679 
680 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
681 			    int *addr_len, int peer)
682 {
683 	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
684 	struct sock *sk = sock->sk;
685 	struct hci_dev *hdev = hci_pi(sk)->hdev;
686 
687 	BT_DBG("sock %p sk %p", sock, sk);
688 
689 	if (!hdev)
690 		return -EBADFD;
691 
692 	lock_sock(sk);
693 
694 	*addr_len = sizeof(*haddr);
695 	haddr->hci_family = AF_BLUETOOTH;
696 	haddr->hci_dev    = hdev->id;
697 
698 	release_sock(sk);
699 	return 0;
700 }
701 
702 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
703 			  struct sk_buff *skb)
704 {
705 	__u32 mask = hci_pi(sk)->cmsg_mask;
706 
707 	if (mask & HCI_CMSG_DIR) {
708 		int incoming = bt_cb(skb)->incoming;
709 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
710 			 &incoming);
711 	}
712 
713 	if (mask & HCI_CMSG_TSTAMP) {
714 #ifdef CONFIG_COMPAT
715 		struct compat_timeval ctv;
716 #endif
717 		struct timeval tv;
718 		void *data;
719 		int len;
720 
721 		skb_get_timestamp(skb, &tv);
722 
723 		data = &tv;
724 		len = sizeof(tv);
725 #ifdef CONFIG_COMPAT
726 		if (!COMPAT_USE_64BIT_TIME &&
727 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
728 			ctv.tv_sec = tv.tv_sec;
729 			ctv.tv_usec = tv.tv_usec;
730 			data = &ctv;
731 			len = sizeof(ctv);
732 		}
733 #endif
734 
735 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
736 	}
737 }
738 
739 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
740 			    struct msghdr *msg, size_t len, int flags)
741 {
742 	int noblock = flags & MSG_DONTWAIT;
743 	struct sock *sk = sock->sk;
744 	struct sk_buff *skb;
745 	int copied, err;
746 
747 	BT_DBG("sock %p, sk %p", sock, sk);
748 
749 	if (flags & (MSG_OOB))
750 		return -EOPNOTSUPP;
751 
752 	if (sk->sk_state == BT_CLOSED)
753 		return 0;
754 
755 	skb = skb_recv_datagram(sk, flags, noblock, &err);
756 	if (!skb)
757 		return err;
758 
759 	msg->msg_namelen = 0;
760 
761 	copied = skb->len;
762 	if (len < copied) {
763 		msg->msg_flags |= MSG_TRUNC;
764 		copied = len;
765 	}
766 
767 	skb_reset_transport_header(skb);
768 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
769 
770 	switch (hci_pi(sk)->channel) {
771 	case HCI_CHANNEL_RAW:
772 		hci_sock_cmsg(sk, msg, skb);
773 		break;
774 	case HCI_CHANNEL_CONTROL:
775 	case HCI_CHANNEL_MONITOR:
776 		sock_recv_timestamp(msg, sk, skb);
777 		break;
778 	}
779 
780 	skb_free_datagram(sk, skb);
781 
782 	return err ? : copied;
783 }
784 
785 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
786 			    struct msghdr *msg, size_t len)
787 {
788 	struct sock *sk = sock->sk;
789 	struct hci_dev *hdev;
790 	struct sk_buff *skb;
791 	int err;
792 
793 	BT_DBG("sock %p sk %p", sock, sk);
794 
795 	if (msg->msg_flags & MSG_OOB)
796 		return -EOPNOTSUPP;
797 
798 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
799 		return -EINVAL;
800 
801 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
802 		return -EINVAL;
803 
804 	lock_sock(sk);
805 
806 	switch (hci_pi(sk)->channel) {
807 	case HCI_CHANNEL_RAW:
808 		break;
809 	case HCI_CHANNEL_CONTROL:
810 		err = mgmt_control(sk, msg, len);
811 		goto done;
812 	case HCI_CHANNEL_MONITOR:
813 		err = -EOPNOTSUPP;
814 		goto done;
815 	default:
816 		err = -EINVAL;
817 		goto done;
818 	}
819 
820 	hdev = hci_pi(sk)->hdev;
821 	if (!hdev) {
822 		err = -EBADFD;
823 		goto done;
824 	}
825 
826 	if (!test_bit(HCI_UP, &hdev->flags)) {
827 		err = -ENETDOWN;
828 		goto done;
829 	}
830 
831 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
832 	if (!skb)
833 		goto done;
834 
835 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
836 		err = -EFAULT;
837 		goto drop;
838 	}
839 
840 	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
841 	skb_pull(skb, 1);
842 	skb->dev = (void *) hdev;
843 
844 	if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
845 		u16 opcode = get_unaligned_le16(skb->data);
846 		u16 ogf = hci_opcode_ogf(opcode);
847 		u16 ocf = hci_opcode_ocf(opcode);
848 
849 		if (((ogf > HCI_SFLT_MAX_OGF) ||
850 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
851 				   &hci_sec_filter.ocf_mask[ogf])) &&
852 		    !capable(CAP_NET_RAW)) {
853 			err = -EPERM;
854 			goto drop;
855 		}
856 
857 		if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
858 			skb_queue_tail(&hdev->raw_q, skb);
859 			queue_work(hdev->workqueue, &hdev->tx_work);
860 		} else {
861 			skb_queue_tail(&hdev->cmd_q, skb);
862 			queue_work(hdev->workqueue, &hdev->cmd_work);
863 		}
864 	} else {
865 		if (!capable(CAP_NET_RAW)) {
866 			err = -EPERM;
867 			goto drop;
868 		}
869 
870 		skb_queue_tail(&hdev->raw_q, skb);
871 		queue_work(hdev->workqueue, &hdev->tx_work);
872 	}
873 
874 	err = len;
875 
876 done:
877 	release_sock(sk);
878 	return err;
879 
880 drop:
881 	kfree_skb(skb);
882 	goto done;
883 }
884 
885 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
886 			       char __user *optval, unsigned int len)
887 {
888 	struct hci_ufilter uf = { .opcode = 0 };
889 	struct sock *sk = sock->sk;
890 	int err = 0, opt = 0;
891 
892 	BT_DBG("sk %p, opt %d", sk, optname);
893 
894 	lock_sock(sk);
895 
896 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
897 		err = -EINVAL;
898 		goto done;
899 	}
900 
901 	switch (optname) {
902 	case HCI_DATA_DIR:
903 		if (get_user(opt, (int __user *)optval)) {
904 			err = -EFAULT;
905 			break;
906 		}
907 
908 		if (opt)
909 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
910 		else
911 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
912 		break;
913 
914 	case HCI_TIME_STAMP:
915 		if (get_user(opt, (int __user *)optval)) {
916 			err = -EFAULT;
917 			break;
918 		}
919 
920 		if (opt)
921 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
922 		else
923 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
924 		break;
925 
926 	case HCI_FILTER:
927 		{
928 			struct hci_filter *f = &hci_pi(sk)->filter;
929 
930 			uf.type_mask = f->type_mask;
931 			uf.opcode    = f->opcode;
932 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
933 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
934 		}
935 
936 		len = min_t(unsigned int, len, sizeof(uf));
937 		if (copy_from_user(&uf, optval, len)) {
938 			err = -EFAULT;
939 			break;
940 		}
941 
942 		if (!capable(CAP_NET_RAW)) {
943 			uf.type_mask &= hci_sec_filter.type_mask;
944 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
945 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
946 		}
947 
948 		{
949 			struct hci_filter *f = &hci_pi(sk)->filter;
950 
951 			f->type_mask = uf.type_mask;
952 			f->opcode    = uf.opcode;
953 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
954 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
955 		}
956 		break;
957 
958 	default:
959 		err = -ENOPROTOOPT;
960 		break;
961 	}
962 
963 done:
964 	release_sock(sk);
965 	return err;
966 }
967 
968 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
969 			       char __user *optval, int __user *optlen)
970 {
971 	struct hci_ufilter uf;
972 	struct sock *sk = sock->sk;
973 	int len, opt, err = 0;
974 
975 	BT_DBG("sk %p, opt %d", sk, optname);
976 
977 	if (get_user(len, optlen))
978 		return -EFAULT;
979 
980 	lock_sock(sk);
981 
982 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
983 		err = -EINVAL;
984 		goto done;
985 	}
986 
987 	switch (optname) {
988 	case HCI_DATA_DIR:
989 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
990 			opt = 1;
991 		else
992 			opt = 0;
993 
994 		if (put_user(opt, optval))
995 			err = -EFAULT;
996 		break;
997 
998 	case HCI_TIME_STAMP:
999 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1000 			opt = 1;
1001 		else
1002 			opt = 0;
1003 
1004 		if (put_user(opt, optval))
1005 			err = -EFAULT;
1006 		break;
1007 
1008 	case HCI_FILTER:
1009 		{
1010 			struct hci_filter *f = &hci_pi(sk)->filter;
1011 
1012 			uf.type_mask = f->type_mask;
1013 			uf.opcode    = f->opcode;
1014 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1015 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1016 		}
1017 
1018 		len = min_t(unsigned int, len, sizeof(uf));
1019 		if (copy_to_user(optval, &uf, len))
1020 			err = -EFAULT;
1021 		break;
1022 
1023 	default:
1024 		err = -ENOPROTOOPT;
1025 		break;
1026 	}
1027 
1028 done:
1029 	release_sock(sk);
1030 	return err;
1031 }
1032 
1033 static const struct proto_ops hci_sock_ops = {
1034 	.family		= PF_BLUETOOTH,
1035 	.owner		= THIS_MODULE,
1036 	.release	= hci_sock_release,
1037 	.bind		= hci_sock_bind,
1038 	.getname	= hci_sock_getname,
1039 	.sendmsg	= hci_sock_sendmsg,
1040 	.recvmsg	= hci_sock_recvmsg,
1041 	.ioctl		= hci_sock_ioctl,
1042 	.poll		= datagram_poll,
1043 	.listen		= sock_no_listen,
1044 	.shutdown	= sock_no_shutdown,
1045 	.setsockopt	= hci_sock_setsockopt,
1046 	.getsockopt	= hci_sock_getsockopt,
1047 	.connect	= sock_no_connect,
1048 	.socketpair	= sock_no_socketpair,
1049 	.accept		= sock_no_accept,
1050 	.mmap		= sock_no_mmap
1051 };
1052 
1053 static struct proto hci_sk_proto = {
1054 	.name		= "HCI",
1055 	.owner		= THIS_MODULE,
1056 	.obj_size	= sizeof(struct hci_pinfo)
1057 };
1058 
1059 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1060 			   int kern)
1061 {
1062 	struct sock *sk;
1063 
1064 	BT_DBG("sock %p", sock);
1065 
1066 	if (sock->type != SOCK_RAW)
1067 		return -ESOCKTNOSUPPORT;
1068 
1069 	sock->ops = &hci_sock_ops;
1070 
1071 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1072 	if (!sk)
1073 		return -ENOMEM;
1074 
1075 	sock_init_data(sock, sk);
1076 
1077 	sock_reset_flag(sk, SOCK_ZAPPED);
1078 
1079 	sk->sk_protocol = protocol;
1080 
1081 	sock->state = SS_UNCONNECTED;
1082 	sk->sk_state = BT_OPEN;
1083 
1084 	bt_sock_link(&hci_sk_list, sk);
1085 	return 0;
1086 }
1087 
1088 static const struct net_proto_family hci_sock_family_ops = {
1089 	.family	= PF_BLUETOOTH,
1090 	.owner	= THIS_MODULE,
1091 	.create	= hci_sock_create,
1092 };
1093 
1094 int __init hci_sock_init(void)
1095 {
1096 	int err;
1097 
1098 	err = proto_register(&hci_sk_proto, 0);
1099 	if (err < 0)
1100 		return err;
1101 
1102 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1103 	if (err < 0)
1104 		goto error;
1105 
1106 	BT_INFO("HCI socket layer initialized");
1107 
1108 	return 0;
1109 
1110 error:
1111 	BT_ERR("HCI socket registration failed");
1112 	proto_unregister(&hci_sk_proto);
1113 	return err;
1114 }
1115 
1116 void hci_sock_cleanup(void)
1117 {
1118 	if (bt_sock_unregister(BTPROTO_HCI) < 0)
1119 		BT_ERR("HCI socket unregistration failed");
1120 
1121 	proto_unregister(&hci_sk_proto);
1122 }
1123