xref: /linux/net/bluetooth/hci_sock.c (revision 1b98f357dadd6ea613a435fbaef1a5dd7b35fd21)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <linux/unaligned.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "mgmt_util.h"
38 
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41 
42 static DEFINE_IDA(sock_cookie_ida);
43 
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45 
46 /* ----- HCI socket interface ----- */
47 
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 
51 struct hci_pinfo {
52 	struct bt_sock    bt;
53 	struct hci_dev    *hdev;
54 	struct hci_filter filter;
55 	__u8              cmsg_mask;
56 	unsigned short    channel;
57 	unsigned long     flags;
58 	__u32             cookie;
59 	char              comm[TASK_COMM_LEN];
60 	__u16             mtu;
61 };
62 
63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64 {
65 	struct hci_dev *hdev = hci_pi(sk)->hdev;
66 
67 	if (!hdev)
68 		return ERR_PTR(-EBADFD);
69 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70 		return ERR_PTR(-EPIPE);
71 	return hdev;
72 }
73 
74 void hci_sock_set_flag(struct sock *sk, int nr)
75 {
76 	set_bit(nr, &hci_pi(sk)->flags);
77 }
78 
79 void hci_sock_clear_flag(struct sock *sk, int nr)
80 {
81 	clear_bit(nr, &hci_pi(sk)->flags);
82 }
83 
84 int hci_sock_test_flag(struct sock *sk, int nr)
85 {
86 	return test_bit(nr, &hci_pi(sk)->flags);
87 }
88 
89 unsigned short hci_sock_get_channel(struct sock *sk)
90 {
91 	return hci_pi(sk)->channel;
92 }
93 
94 u32 hci_sock_get_cookie(struct sock *sk)
95 {
96 	return hci_pi(sk)->cookie;
97 }
98 
99 static bool hci_sock_gen_cookie(struct sock *sk)
100 {
101 	int id = hci_pi(sk)->cookie;
102 
103 	if (!id) {
104 		id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL);
105 		if (id < 0)
106 			id = 0xffffffff;
107 
108 		hci_pi(sk)->cookie = id;
109 		get_task_comm(hci_pi(sk)->comm, current);
110 		return true;
111 	}
112 
113 	return false;
114 }
115 
116 static void hci_sock_free_cookie(struct sock *sk)
117 {
118 	int id = hci_pi(sk)->cookie;
119 
120 	if (id) {
121 		hci_pi(sk)->cookie = 0xffffffff;
122 		ida_free(&sock_cookie_ida, id);
123 	}
124 }
125 
126 static inline int hci_test_bit(int nr, const void *addr)
127 {
128 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
129 }
130 
131 /* Security filter */
132 #define HCI_SFLT_MAX_OGF  5
133 
134 struct hci_sec_filter {
135 	__u32 type_mask;
136 	__u32 event_mask[2];
137 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
138 };
139 
140 static const struct hci_sec_filter hci_sec_filter = {
141 	/* Packet types */
142 	0x10,
143 	/* Events */
144 	{ 0x1000d9fe, 0x0000b00c },
145 	/* Commands */
146 	{
147 		{ 0x0 },
148 		/* OGF_LINK_CTL */
149 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150 		/* OGF_LINK_POLICY */
151 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
152 		/* OGF_HOST_CTL */
153 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154 		/* OGF_INFO_PARAM */
155 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
156 		/* OGF_STATUS_PARAM */
157 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
158 	}
159 };
160 
161 static struct bt_sock_list hci_sk_list = {
162 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
163 };
164 
165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166 {
167 	struct hci_filter *flt;
168 	int flt_type, flt_event;
169 
170 	/* Apply filter */
171 	flt = &hci_pi(sk)->filter;
172 
173 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174 
175 	if (!test_bit(flt_type, &flt->type_mask))
176 		return true;
177 
178 	/* Extra filter for event packets only */
179 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
180 		return false;
181 
182 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183 
184 	if (!hci_test_bit(flt_event, &flt->event_mask))
185 		return true;
186 
187 	/* Check filter only when opcode is set */
188 	if (!flt->opcode)
189 		return false;
190 
191 	if (flt_event == HCI_EV_CMD_COMPLETE &&
192 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
193 		return true;
194 
195 	if (flt_event == HCI_EV_CMD_STATUS &&
196 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
197 		return true;
198 
199 	return false;
200 }
201 
202 /* Send frame to RAW socket */
203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
204 {
205 	struct sock *sk;
206 	struct sk_buff *skb_copy = NULL;
207 
208 	BT_DBG("hdev %p len %d", hdev, skb->len);
209 
210 	read_lock(&hci_sk_list.lock);
211 
212 	sk_for_each(sk, &hci_sk_list.head) {
213 		struct sk_buff *nskb;
214 
215 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
216 			continue;
217 
218 		/* Don't send frame to the socket it came from */
219 		if (skb->sk == sk)
220 			continue;
221 
222 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
228 				continue;
229 			if (is_filtered_packet(sk, skb))
230 				continue;
231 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232 			if (!bt_cb(skb)->incoming)
233 				continue;
234 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237 			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT &&
238 			    hci_skb_pkt_type(skb) != HCI_DRV_PKT)
239 				continue;
240 		} else {
241 			/* Don't send frame to other channel types */
242 			continue;
243 		}
244 
245 		if (!skb_copy) {
246 			/* Create a private copy with headroom */
247 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
248 			if (!skb_copy)
249 				continue;
250 
251 			/* Put type byte before the data */
252 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
253 		}
254 
255 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
256 		if (!nskb)
257 			continue;
258 
259 		if (sock_queue_rcv_skb(sk, nskb))
260 			kfree_skb(nskb);
261 	}
262 
263 	read_unlock(&hci_sk_list.lock);
264 
265 	kfree_skb(skb_copy);
266 }
267 
268 static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
269 {
270 	struct scm_creds *creds;
271 
272 	if (!sk || WARN_ON(!skb))
273 		return;
274 
275 	creds = &bt_cb(skb)->creds;
276 
277 	/* Check if peer credentials is set */
278 	if (!sk->sk_peer_pid) {
279 		/* Check if parent peer credentials is set */
280 		if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
281 			sk = bt_sk(sk)->parent;
282 		else
283 			return;
284 	}
285 
286 	/* Check if scm_creds already set */
287 	if (creds->pid == pid_vnr(sk->sk_peer_pid))
288 		return;
289 
290 	memset(creds, 0, sizeof(*creds));
291 
292 	creds->pid = pid_vnr(sk->sk_peer_pid);
293 	if (sk->sk_peer_cred) {
294 		creds->uid = sk->sk_peer_cred->uid;
295 		creds->gid = sk->sk_peer_cred->gid;
296 	}
297 }
298 
299 static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
300 {
301 	struct sk_buff *nskb;
302 
303 	if (!skb)
304 		return NULL;
305 
306 	nskb = skb_clone(skb, GFP_ATOMIC);
307 	if (!nskb)
308 		return NULL;
309 
310 	hci_sock_copy_creds(skb->sk, nskb);
311 
312 	return nskb;
313 }
314 
315 /* Send frame to sockets with specific channel */
316 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
317 				  int flag, struct sock *skip_sk)
318 {
319 	struct sock *sk;
320 
321 	BT_DBG("channel %u len %d", channel, skb->len);
322 
323 	sk_for_each(sk, &hci_sk_list.head) {
324 		struct sk_buff *nskb;
325 
326 		/* Ignore socket without the flag set */
327 		if (!hci_sock_test_flag(sk, flag))
328 			continue;
329 
330 		/* Skip the original socket */
331 		if (sk == skip_sk)
332 			continue;
333 
334 		if (sk->sk_state != BT_BOUND)
335 			continue;
336 
337 		if (hci_pi(sk)->channel != channel)
338 			continue;
339 
340 		nskb = hci_skb_clone(skb);
341 		if (!nskb)
342 			continue;
343 
344 		if (sock_queue_rcv_skb(sk, nskb))
345 			kfree_skb(nskb);
346 	}
347 
348 }
349 
350 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
351 			 int flag, struct sock *skip_sk)
352 {
353 	read_lock(&hci_sk_list.lock);
354 	__hci_send_to_channel(channel, skb, flag, skip_sk);
355 	read_unlock(&hci_sk_list.lock);
356 }
357 
358 /* Send frame to monitor socket */
359 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
360 {
361 	struct sk_buff *skb_copy = NULL;
362 	struct hci_mon_hdr *hdr;
363 	__le16 opcode;
364 
365 	if (!atomic_read(&monitor_promisc))
366 		return;
367 
368 	BT_DBG("hdev %p len %d", hdev, skb->len);
369 
370 	switch (hci_skb_pkt_type(skb)) {
371 	case HCI_COMMAND_PKT:
372 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
373 		break;
374 	case HCI_EVENT_PKT:
375 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
376 		break;
377 	case HCI_ACLDATA_PKT:
378 		if (bt_cb(skb)->incoming)
379 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
380 		else
381 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
382 		break;
383 	case HCI_SCODATA_PKT:
384 		if (bt_cb(skb)->incoming)
385 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
386 		else
387 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
388 		break;
389 	case HCI_ISODATA_PKT:
390 		if (bt_cb(skb)->incoming)
391 			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
392 		else
393 			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
394 		break;
395 	case HCI_DRV_PKT:
396 		if (bt_cb(skb)->incoming)
397 			opcode = cpu_to_le16(HCI_MON_DRV_RX_PKT);
398 		else
399 			opcode = cpu_to_le16(HCI_MON_DRV_TX_PKT);
400 		break;
401 	case HCI_DIAG_PKT:
402 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
403 		break;
404 	default:
405 		return;
406 	}
407 
408 	/* Create a private copy with headroom */
409 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
410 	if (!skb_copy)
411 		return;
412 
413 	hci_sock_copy_creds(skb->sk, skb_copy);
414 
415 	/* Put header before the data */
416 	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
417 	hdr->opcode = opcode;
418 	hdr->index = cpu_to_le16(hdev->id);
419 	hdr->len = cpu_to_le16(skb->len);
420 
421 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
422 			    HCI_SOCK_TRUSTED, NULL);
423 	kfree_skb(skb_copy);
424 }
425 
426 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
427 				 void *data, u16 data_len, ktime_t tstamp,
428 				 int flag, struct sock *skip_sk)
429 {
430 	struct sock *sk;
431 	__le16 index;
432 
433 	if (hdev)
434 		index = cpu_to_le16(hdev->id);
435 	else
436 		index = cpu_to_le16(MGMT_INDEX_NONE);
437 
438 	read_lock(&hci_sk_list.lock);
439 
440 	sk_for_each(sk, &hci_sk_list.head) {
441 		struct hci_mon_hdr *hdr;
442 		struct sk_buff *skb;
443 
444 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
445 			continue;
446 
447 		/* Ignore socket without the flag set */
448 		if (!hci_sock_test_flag(sk, flag))
449 			continue;
450 
451 		/* Skip the original socket */
452 		if (sk == skip_sk)
453 			continue;
454 
455 		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
456 		if (!skb)
457 			continue;
458 
459 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
460 		put_unaligned_le16(event, skb_put(skb, 2));
461 
462 		if (data)
463 			skb_put_data(skb, data, data_len);
464 
465 		skb->tstamp = tstamp;
466 
467 		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
468 		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
469 		hdr->index = index;
470 		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
471 
472 		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
473 				      HCI_SOCK_TRUSTED, NULL);
474 		kfree_skb(skb);
475 	}
476 
477 	read_unlock(&hci_sk_list.lock);
478 }
479 
480 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
481 {
482 	struct hci_mon_hdr *hdr;
483 	struct hci_mon_new_index *ni;
484 	struct hci_mon_index_info *ii;
485 	struct sk_buff *skb;
486 	__le16 opcode;
487 
488 	switch (event) {
489 	case HCI_DEV_REG:
490 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
491 		if (!skb)
492 			return NULL;
493 
494 		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
495 		ni->type = 0x00; /* Old hdev->dev_type */
496 		ni->bus = hdev->bus;
497 		bacpy(&ni->bdaddr, &hdev->bdaddr);
498 		memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
499 			       strnlen(hdev->name, sizeof(ni->name)), '\0');
500 
501 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
502 		break;
503 
504 	case HCI_DEV_UNREG:
505 		skb = bt_skb_alloc(0, GFP_ATOMIC);
506 		if (!skb)
507 			return NULL;
508 
509 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
510 		break;
511 
512 	case HCI_DEV_SETUP:
513 		if (hdev->manufacturer == 0xffff)
514 			return NULL;
515 		fallthrough;
516 
517 	case HCI_DEV_UP:
518 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
519 		if (!skb)
520 			return NULL;
521 
522 		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
523 		bacpy(&ii->bdaddr, &hdev->bdaddr);
524 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
525 
526 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
527 		break;
528 
529 	case HCI_DEV_OPEN:
530 		skb = bt_skb_alloc(0, GFP_ATOMIC);
531 		if (!skb)
532 			return NULL;
533 
534 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
535 		break;
536 
537 	case HCI_DEV_CLOSE:
538 		skb = bt_skb_alloc(0, GFP_ATOMIC);
539 		if (!skb)
540 			return NULL;
541 
542 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
543 		break;
544 
545 	default:
546 		return NULL;
547 	}
548 
549 	__net_timestamp(skb);
550 
551 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
552 	hdr->opcode = opcode;
553 	hdr->index = cpu_to_le16(hdev->id);
554 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
555 
556 	return skb;
557 }
558 
559 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
560 {
561 	struct hci_mon_hdr *hdr;
562 	struct sk_buff *skb;
563 	u16 format;
564 	u8 ver[3];
565 	u32 flags;
566 
567 	/* No message needed when cookie is not present */
568 	if (!hci_pi(sk)->cookie)
569 		return NULL;
570 
571 	switch (hci_pi(sk)->channel) {
572 	case HCI_CHANNEL_RAW:
573 		format = 0x0000;
574 		ver[0] = BT_SUBSYS_VERSION;
575 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
576 		break;
577 	case HCI_CHANNEL_USER:
578 		format = 0x0001;
579 		ver[0] = BT_SUBSYS_VERSION;
580 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
581 		break;
582 	case HCI_CHANNEL_CONTROL:
583 		format = 0x0002;
584 		mgmt_fill_version_info(ver);
585 		break;
586 	default:
587 		/* No message for unsupported format */
588 		return NULL;
589 	}
590 
591 	skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
592 	if (!skb)
593 		return NULL;
594 
595 	hci_sock_copy_creds(sk, skb);
596 
597 	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
598 
599 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
600 	put_unaligned_le16(format, skb_put(skb, 2));
601 	skb_put_data(skb, ver, sizeof(ver));
602 	put_unaligned_le32(flags, skb_put(skb, 4));
603 	skb_put_u8(skb, TASK_COMM_LEN);
604 	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
605 
606 	__net_timestamp(skb);
607 
608 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
609 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
610 	if (hci_pi(sk)->hdev)
611 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
612 	else
613 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
614 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
615 
616 	return skb;
617 }
618 
619 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
620 {
621 	struct hci_mon_hdr *hdr;
622 	struct sk_buff *skb;
623 
624 	/* No message needed when cookie is not present */
625 	if (!hci_pi(sk)->cookie)
626 		return NULL;
627 
628 	switch (hci_pi(sk)->channel) {
629 	case HCI_CHANNEL_RAW:
630 	case HCI_CHANNEL_USER:
631 	case HCI_CHANNEL_CONTROL:
632 		break;
633 	default:
634 		/* No message for unsupported format */
635 		return NULL;
636 	}
637 
638 	skb = bt_skb_alloc(4, GFP_ATOMIC);
639 	if (!skb)
640 		return NULL;
641 
642 	hci_sock_copy_creds(sk, skb);
643 
644 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
645 
646 	__net_timestamp(skb);
647 
648 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
649 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
650 	if (hci_pi(sk)->hdev)
651 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
652 	else
653 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
654 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
655 
656 	return skb;
657 }
658 
659 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
660 						   u16 opcode, u16 len,
661 						   const void *buf)
662 {
663 	struct hci_mon_hdr *hdr;
664 	struct sk_buff *skb;
665 
666 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
667 	if (!skb)
668 		return NULL;
669 
670 	hci_sock_copy_creds(sk, skb);
671 
672 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
673 	put_unaligned_le16(opcode, skb_put(skb, 2));
674 
675 	if (buf)
676 		skb_put_data(skb, buf, len);
677 
678 	__net_timestamp(skb);
679 
680 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
681 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
682 	hdr->index = cpu_to_le16(index);
683 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
684 
685 	return skb;
686 }
687 
688 static void __printf(2, 3)
689 send_monitor_note(struct sock *sk, const char *fmt, ...)
690 {
691 	size_t len;
692 	struct hci_mon_hdr *hdr;
693 	struct sk_buff *skb;
694 	va_list args;
695 
696 	va_start(args, fmt);
697 	len = vsnprintf(NULL, 0, fmt, args);
698 	va_end(args);
699 
700 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
701 	if (!skb)
702 		return;
703 
704 	hci_sock_copy_creds(sk, skb);
705 
706 	va_start(args, fmt);
707 	vsprintf(skb_put(skb, len), fmt, args);
708 	*(u8 *)skb_put(skb, 1) = 0;
709 	va_end(args);
710 
711 	__net_timestamp(skb);
712 
713 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
714 	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
715 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
716 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
717 
718 	if (sock_queue_rcv_skb(sk, skb))
719 		kfree_skb(skb);
720 }
721 
722 static void send_monitor_replay(struct sock *sk)
723 {
724 	struct hci_dev *hdev;
725 
726 	read_lock(&hci_dev_list_lock);
727 
728 	list_for_each_entry(hdev, &hci_dev_list, list) {
729 		struct sk_buff *skb;
730 
731 		skb = create_monitor_event(hdev, HCI_DEV_REG);
732 		if (!skb)
733 			continue;
734 
735 		if (sock_queue_rcv_skb(sk, skb))
736 			kfree_skb(skb);
737 
738 		if (!test_bit(HCI_RUNNING, &hdev->flags))
739 			continue;
740 
741 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
742 		if (!skb)
743 			continue;
744 
745 		if (sock_queue_rcv_skb(sk, skb))
746 			kfree_skb(skb);
747 
748 		if (test_bit(HCI_UP, &hdev->flags))
749 			skb = create_monitor_event(hdev, HCI_DEV_UP);
750 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
751 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
752 		else
753 			skb = NULL;
754 
755 		if (skb) {
756 			if (sock_queue_rcv_skb(sk, skb))
757 				kfree_skb(skb);
758 		}
759 	}
760 
761 	read_unlock(&hci_dev_list_lock);
762 }
763 
764 static void send_monitor_control_replay(struct sock *mon_sk)
765 {
766 	struct sock *sk;
767 
768 	read_lock(&hci_sk_list.lock);
769 
770 	sk_for_each(sk, &hci_sk_list.head) {
771 		struct sk_buff *skb;
772 
773 		skb = create_monitor_ctrl_open(sk);
774 		if (!skb)
775 			continue;
776 
777 		if (sock_queue_rcv_skb(mon_sk, skb))
778 			kfree_skb(skb);
779 	}
780 
781 	read_unlock(&hci_sk_list.lock);
782 }
783 
784 /* Generate internal stack event */
785 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
786 {
787 	struct hci_event_hdr *hdr;
788 	struct hci_ev_stack_internal *ev;
789 	struct sk_buff *skb;
790 
791 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
792 	if (!skb)
793 		return;
794 
795 	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
796 	hdr->evt  = HCI_EV_STACK_INTERNAL;
797 	hdr->plen = sizeof(*ev) + dlen;
798 
799 	ev = skb_put(skb, sizeof(*ev) + dlen);
800 	ev->type = type;
801 	memcpy(ev->data, data, dlen);
802 
803 	bt_cb(skb)->incoming = 1;
804 	__net_timestamp(skb);
805 
806 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
807 	hci_send_to_sock(hdev, skb);
808 	kfree_skb(skb);
809 }
810 
811 void hci_sock_dev_event(struct hci_dev *hdev, int event)
812 {
813 	BT_DBG("hdev %s event %d", hdev->name, event);
814 
815 	if (atomic_read(&monitor_promisc)) {
816 		struct sk_buff *skb;
817 
818 		/* Send event to monitor */
819 		skb = create_monitor_event(hdev, event);
820 		if (skb) {
821 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
822 					    HCI_SOCK_TRUSTED, NULL);
823 			kfree_skb(skb);
824 		}
825 	}
826 
827 	if (event <= HCI_DEV_DOWN) {
828 		struct hci_ev_si_device ev;
829 
830 		/* Send event to sockets */
831 		ev.event  = event;
832 		ev.dev_id = hdev->id;
833 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
834 	}
835 
836 	if (event == HCI_DEV_UNREG) {
837 		struct sock *sk;
838 
839 		/* Wake up sockets using this dead device */
840 		read_lock(&hci_sk_list.lock);
841 		sk_for_each(sk, &hci_sk_list.head) {
842 			if (hci_pi(sk)->hdev == hdev) {
843 				sk->sk_err = EPIPE;
844 				sk->sk_state_change(sk);
845 			}
846 		}
847 		read_unlock(&hci_sk_list.lock);
848 	}
849 }
850 
851 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
852 {
853 	struct hci_mgmt_chan *c;
854 
855 	list_for_each_entry(c, &mgmt_chan_list, list) {
856 		if (c->channel == channel)
857 			return c;
858 	}
859 
860 	return NULL;
861 }
862 
863 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
864 {
865 	struct hci_mgmt_chan *c;
866 
867 	mutex_lock(&mgmt_chan_list_lock);
868 	c = __hci_mgmt_chan_find(channel);
869 	mutex_unlock(&mgmt_chan_list_lock);
870 
871 	return c;
872 }
873 
874 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
875 {
876 	if (c->channel < HCI_CHANNEL_CONTROL)
877 		return -EINVAL;
878 
879 	mutex_lock(&mgmt_chan_list_lock);
880 	if (__hci_mgmt_chan_find(c->channel)) {
881 		mutex_unlock(&mgmt_chan_list_lock);
882 		return -EALREADY;
883 	}
884 
885 	list_add_tail(&c->list, &mgmt_chan_list);
886 
887 	mutex_unlock(&mgmt_chan_list_lock);
888 
889 	return 0;
890 }
891 EXPORT_SYMBOL(hci_mgmt_chan_register);
892 
893 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
894 {
895 	mutex_lock(&mgmt_chan_list_lock);
896 	list_del(&c->list);
897 	mutex_unlock(&mgmt_chan_list_lock);
898 }
899 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
900 
901 static int hci_sock_release(struct socket *sock)
902 {
903 	struct sock *sk = sock->sk;
904 	struct hci_dev *hdev;
905 	struct sk_buff *skb;
906 
907 	BT_DBG("sock %p sk %p", sock, sk);
908 
909 	if (!sk)
910 		return 0;
911 
912 	lock_sock(sk);
913 
914 	switch (hci_pi(sk)->channel) {
915 	case HCI_CHANNEL_MONITOR:
916 		atomic_dec(&monitor_promisc);
917 		break;
918 	case HCI_CHANNEL_RAW:
919 	case HCI_CHANNEL_USER:
920 	case HCI_CHANNEL_CONTROL:
921 		/* Send event to monitor */
922 		skb = create_monitor_ctrl_close(sk);
923 		if (skb) {
924 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
925 					    HCI_SOCK_TRUSTED, NULL);
926 			kfree_skb(skb);
927 		}
928 
929 		hci_sock_free_cookie(sk);
930 		break;
931 	}
932 
933 	bt_sock_unlink(&hci_sk_list, sk);
934 
935 	hdev = hci_pi(sk)->hdev;
936 	if (hdev) {
937 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
938 		    !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
939 			/* When releasing a user channel exclusive access,
940 			 * call hci_dev_do_close directly instead of calling
941 			 * hci_dev_close to ensure the exclusive access will
942 			 * be released and the controller brought back down.
943 			 *
944 			 * The checking of HCI_AUTO_OFF is not needed in this
945 			 * case since it will have been cleared already when
946 			 * opening the user channel.
947 			 *
948 			 * Make sure to also check that we haven't already
949 			 * unregistered since all the cleanup will have already
950 			 * been complete and hdev will get released when we put
951 			 * below.
952 			 */
953 			hci_dev_do_close(hdev);
954 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
955 			mgmt_index_added(hdev);
956 		}
957 
958 		atomic_dec(&hdev->promisc);
959 		hci_dev_put(hdev);
960 	}
961 
962 	sock_orphan(sk);
963 	release_sock(sk);
964 	sock_put(sk);
965 	return 0;
966 }
967 
968 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
969 {
970 	bdaddr_t bdaddr;
971 	int err;
972 
973 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
974 		return -EFAULT;
975 
976 	hci_dev_lock(hdev);
977 
978 	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
979 
980 	hci_dev_unlock(hdev);
981 
982 	return err;
983 }
984 
985 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
986 {
987 	bdaddr_t bdaddr;
988 	int err;
989 
990 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
991 		return -EFAULT;
992 
993 	hci_dev_lock(hdev);
994 
995 	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
996 
997 	hci_dev_unlock(hdev);
998 
999 	return err;
1000 }
1001 
1002 /* Ioctls that require bound socket */
1003 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
1004 				unsigned long arg)
1005 {
1006 	struct hci_dev *hdev = hci_hdev_from_sock(sk);
1007 
1008 	if (IS_ERR(hdev))
1009 		return PTR_ERR(hdev);
1010 
1011 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1012 		return -EBUSY;
1013 
1014 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1015 		return -EOPNOTSUPP;
1016 
1017 	switch (cmd) {
1018 	case HCISETRAW:
1019 		if (!capable(CAP_NET_ADMIN))
1020 			return -EPERM;
1021 		return -EOPNOTSUPP;
1022 
1023 	case HCIGETCONNINFO:
1024 		return hci_get_conn_info(hdev, (void __user *)arg);
1025 
1026 	case HCIGETAUTHINFO:
1027 		return hci_get_auth_info(hdev, (void __user *)arg);
1028 
1029 	case HCIBLOCKADDR:
1030 		if (!capable(CAP_NET_ADMIN))
1031 			return -EPERM;
1032 		return hci_sock_reject_list_add(hdev, (void __user *)arg);
1033 
1034 	case HCIUNBLOCKADDR:
1035 		if (!capable(CAP_NET_ADMIN))
1036 			return -EPERM;
1037 		return hci_sock_reject_list_del(hdev, (void __user *)arg);
1038 	}
1039 
1040 	return -ENOIOCTLCMD;
1041 }
1042 
1043 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1044 			  unsigned long arg)
1045 {
1046 	void __user *argp = (void __user *)arg;
1047 	struct sock *sk = sock->sk;
1048 	int err;
1049 
1050 	BT_DBG("cmd %x arg %lx", cmd, arg);
1051 
1052 	/* Make sure the cmd is valid before doing anything */
1053 	switch (cmd) {
1054 	case HCIGETDEVLIST:
1055 	case HCIGETDEVINFO:
1056 	case HCIGETCONNLIST:
1057 	case HCIDEVUP:
1058 	case HCIDEVDOWN:
1059 	case HCIDEVRESET:
1060 	case HCIDEVRESTAT:
1061 	case HCISETSCAN:
1062 	case HCISETAUTH:
1063 	case HCISETENCRYPT:
1064 	case HCISETPTYPE:
1065 	case HCISETLINKPOL:
1066 	case HCISETLINKMODE:
1067 	case HCISETACLMTU:
1068 	case HCISETSCOMTU:
1069 	case HCIINQUIRY:
1070 	case HCISETRAW:
1071 	case HCIGETCONNINFO:
1072 	case HCIGETAUTHINFO:
1073 	case HCIBLOCKADDR:
1074 	case HCIUNBLOCKADDR:
1075 		break;
1076 	default:
1077 		return -ENOIOCTLCMD;
1078 	}
1079 
1080 	lock_sock(sk);
1081 
1082 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1083 		err = -EBADFD;
1084 		goto done;
1085 	}
1086 
1087 	/* When calling an ioctl on an unbound raw socket, then ensure
1088 	 * that the monitor gets informed. Ensure that the resulting event
1089 	 * is only send once by checking if the cookie exists or not. The
1090 	 * socket cookie will be only ever generated once for the lifetime
1091 	 * of a given socket.
1092 	 */
1093 	if (hci_sock_gen_cookie(sk)) {
1094 		struct sk_buff *skb;
1095 
1096 		/* Perform careful checks before setting the HCI_SOCK_TRUSTED
1097 		 * flag. Make sure that not only the current task but also
1098 		 * the socket opener has the required capability, since
1099 		 * privileged programs can be tricked into making ioctl calls
1100 		 * on HCI sockets, and the socket should not be marked as
1101 		 * trusted simply because the ioctl caller is privileged.
1102 		 */
1103 		if (sk_capable(sk, CAP_NET_ADMIN))
1104 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1105 
1106 		/* Send event to monitor */
1107 		skb = create_monitor_ctrl_open(sk);
1108 		if (skb) {
1109 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1110 					    HCI_SOCK_TRUSTED, NULL);
1111 			kfree_skb(skb);
1112 		}
1113 	}
1114 
1115 	release_sock(sk);
1116 
1117 	switch (cmd) {
1118 	case HCIGETDEVLIST:
1119 		return hci_get_dev_list(argp);
1120 
1121 	case HCIGETDEVINFO:
1122 		return hci_get_dev_info(argp);
1123 
1124 	case HCIGETCONNLIST:
1125 		return hci_get_conn_list(argp);
1126 
1127 	case HCIDEVUP:
1128 		if (!capable(CAP_NET_ADMIN))
1129 			return -EPERM;
1130 		return hci_dev_open(arg);
1131 
1132 	case HCIDEVDOWN:
1133 		if (!capable(CAP_NET_ADMIN))
1134 			return -EPERM;
1135 		return hci_dev_close(arg);
1136 
1137 	case HCIDEVRESET:
1138 		if (!capable(CAP_NET_ADMIN))
1139 			return -EPERM;
1140 		return hci_dev_reset(arg);
1141 
1142 	case HCIDEVRESTAT:
1143 		if (!capable(CAP_NET_ADMIN))
1144 			return -EPERM;
1145 		return hci_dev_reset_stat(arg);
1146 
1147 	case HCISETSCAN:
1148 	case HCISETAUTH:
1149 	case HCISETENCRYPT:
1150 	case HCISETPTYPE:
1151 	case HCISETLINKPOL:
1152 	case HCISETLINKMODE:
1153 	case HCISETACLMTU:
1154 	case HCISETSCOMTU:
1155 		if (!capable(CAP_NET_ADMIN))
1156 			return -EPERM;
1157 		return hci_dev_cmd(cmd, argp);
1158 
1159 	case HCIINQUIRY:
1160 		return hci_inquiry(argp);
1161 	}
1162 
1163 	lock_sock(sk);
1164 
1165 	err = hci_sock_bound_ioctl(sk, cmd, arg);
1166 
1167 done:
1168 	release_sock(sk);
1169 	return err;
1170 }
1171 
1172 #ifdef CONFIG_COMPAT
1173 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1174 				 unsigned long arg)
1175 {
1176 	switch (cmd) {
1177 	case HCIDEVUP:
1178 	case HCIDEVDOWN:
1179 	case HCIDEVRESET:
1180 	case HCIDEVRESTAT:
1181 		return hci_sock_ioctl(sock, cmd, arg);
1182 	}
1183 
1184 	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1185 }
1186 #endif
1187 
1188 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1189 			 int addr_len)
1190 {
1191 	struct sockaddr_hci haddr;
1192 	struct sock *sk = sock->sk;
1193 	struct hci_dev *hdev = NULL;
1194 	struct sk_buff *skb;
1195 	int len, err = 0;
1196 
1197 	BT_DBG("sock %p sk %p", sock, sk);
1198 
1199 	if (!addr)
1200 		return -EINVAL;
1201 
1202 	memset(&haddr, 0, sizeof(haddr));
1203 	len = min_t(unsigned int, sizeof(haddr), addr_len);
1204 	memcpy(&haddr, addr, len);
1205 
1206 	if (haddr.hci_family != AF_BLUETOOTH)
1207 		return -EINVAL;
1208 
1209 	lock_sock(sk);
1210 
1211 	/* Allow detaching from dead device and attaching to alive device, if
1212 	 * the caller wants to re-bind (instead of close) this socket in
1213 	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1214 	 */
1215 	hdev = hci_pi(sk)->hdev;
1216 	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1217 		hci_pi(sk)->hdev = NULL;
1218 		sk->sk_state = BT_OPEN;
1219 		hci_dev_put(hdev);
1220 	}
1221 	hdev = NULL;
1222 
1223 	if (sk->sk_state == BT_BOUND) {
1224 		err = -EALREADY;
1225 		goto done;
1226 	}
1227 
1228 	switch (haddr.hci_channel) {
1229 	case HCI_CHANNEL_RAW:
1230 		if (hci_pi(sk)->hdev) {
1231 			err = -EALREADY;
1232 			goto done;
1233 		}
1234 
1235 		if (haddr.hci_dev != HCI_DEV_NONE) {
1236 			hdev = hci_dev_get(haddr.hci_dev);
1237 			if (!hdev) {
1238 				err = -ENODEV;
1239 				goto done;
1240 			}
1241 
1242 			atomic_inc(&hdev->promisc);
1243 		}
1244 
1245 		hci_pi(sk)->channel = haddr.hci_channel;
1246 
1247 		if (!hci_sock_gen_cookie(sk)) {
1248 			/* In the case when a cookie has already been assigned,
1249 			 * then there has been already an ioctl issued against
1250 			 * an unbound socket and with that triggered an open
1251 			 * notification. Send a close notification first to
1252 			 * allow the state transition to bounded.
1253 			 */
1254 			skb = create_monitor_ctrl_close(sk);
1255 			if (skb) {
1256 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1257 						    HCI_SOCK_TRUSTED, NULL);
1258 				kfree_skb(skb);
1259 			}
1260 		}
1261 
1262 		if (capable(CAP_NET_ADMIN))
1263 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1264 
1265 		hci_pi(sk)->hdev = hdev;
1266 
1267 		/* Send event to monitor */
1268 		skb = create_monitor_ctrl_open(sk);
1269 		if (skb) {
1270 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1271 					    HCI_SOCK_TRUSTED, NULL);
1272 			kfree_skb(skb);
1273 		}
1274 		break;
1275 
1276 	case HCI_CHANNEL_USER:
1277 		if (hci_pi(sk)->hdev) {
1278 			err = -EALREADY;
1279 			goto done;
1280 		}
1281 
1282 		if (haddr.hci_dev == HCI_DEV_NONE) {
1283 			err = -EINVAL;
1284 			goto done;
1285 		}
1286 
1287 		if (!capable(CAP_NET_ADMIN)) {
1288 			err = -EPERM;
1289 			goto done;
1290 		}
1291 
1292 		hdev = hci_dev_get(haddr.hci_dev);
1293 		if (!hdev) {
1294 			err = -ENODEV;
1295 			goto done;
1296 		}
1297 
1298 		if (test_bit(HCI_INIT, &hdev->flags) ||
1299 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1300 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1301 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1302 		     test_bit(HCI_UP, &hdev->flags))) {
1303 			err = -EBUSY;
1304 			hci_dev_put(hdev);
1305 			goto done;
1306 		}
1307 
1308 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1309 			err = -EUSERS;
1310 			hci_dev_put(hdev);
1311 			goto done;
1312 		}
1313 
1314 		mgmt_index_removed(hdev);
1315 
1316 		err = hci_dev_open(hdev->id);
1317 		if (err) {
1318 			if (err == -EALREADY) {
1319 				/* In case the transport is already up and
1320 				 * running, clear the error here.
1321 				 *
1322 				 * This can happen when opening a user
1323 				 * channel and HCI_AUTO_OFF grace period
1324 				 * is still active.
1325 				 */
1326 				err = 0;
1327 			} else {
1328 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1329 				mgmt_index_added(hdev);
1330 				hci_dev_put(hdev);
1331 				goto done;
1332 			}
1333 		}
1334 
1335 		hci_pi(sk)->channel = haddr.hci_channel;
1336 
1337 		if (!hci_sock_gen_cookie(sk)) {
1338 			/* In the case when a cookie has already been assigned,
1339 			 * this socket will transition from a raw socket into
1340 			 * a user channel socket. For a clean transition, send
1341 			 * the close notification first.
1342 			 */
1343 			skb = create_monitor_ctrl_close(sk);
1344 			if (skb) {
1345 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1346 						    HCI_SOCK_TRUSTED, NULL);
1347 				kfree_skb(skb);
1348 			}
1349 		}
1350 
1351 		/* The user channel is restricted to CAP_NET_ADMIN
1352 		 * capabilities and with that implicitly trusted.
1353 		 */
1354 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1355 
1356 		hci_pi(sk)->hdev = hdev;
1357 
1358 		/* Send event to monitor */
1359 		skb = create_monitor_ctrl_open(sk);
1360 		if (skb) {
1361 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1362 					    HCI_SOCK_TRUSTED, NULL);
1363 			kfree_skb(skb);
1364 		}
1365 
1366 		atomic_inc(&hdev->promisc);
1367 		break;
1368 
1369 	case HCI_CHANNEL_MONITOR:
1370 		if (haddr.hci_dev != HCI_DEV_NONE) {
1371 			err = -EINVAL;
1372 			goto done;
1373 		}
1374 
1375 		if (!capable(CAP_NET_RAW)) {
1376 			err = -EPERM;
1377 			goto done;
1378 		}
1379 
1380 		hci_pi(sk)->channel = haddr.hci_channel;
1381 
1382 		/* The monitor interface is restricted to CAP_NET_RAW
1383 		 * capabilities and with that implicitly trusted.
1384 		 */
1385 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1386 
1387 		send_monitor_note(sk, "Linux version %s (%s)",
1388 				  init_utsname()->release,
1389 				  init_utsname()->machine);
1390 		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1391 				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1392 		send_monitor_replay(sk);
1393 		send_monitor_control_replay(sk);
1394 
1395 		atomic_inc(&monitor_promisc);
1396 		break;
1397 
1398 	case HCI_CHANNEL_LOGGING:
1399 		if (haddr.hci_dev != HCI_DEV_NONE) {
1400 			err = -EINVAL;
1401 			goto done;
1402 		}
1403 
1404 		if (!capable(CAP_NET_ADMIN)) {
1405 			err = -EPERM;
1406 			goto done;
1407 		}
1408 
1409 		hci_pi(sk)->channel = haddr.hci_channel;
1410 		break;
1411 
1412 	default:
1413 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1414 			err = -EINVAL;
1415 			goto done;
1416 		}
1417 
1418 		if (haddr.hci_dev != HCI_DEV_NONE) {
1419 			err = -EINVAL;
1420 			goto done;
1421 		}
1422 
1423 		/* Users with CAP_NET_ADMIN capabilities are allowed
1424 		 * access to all management commands and events. For
1425 		 * untrusted users the interface is restricted and
1426 		 * also only untrusted events are sent.
1427 		 */
1428 		if (capable(CAP_NET_ADMIN))
1429 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1430 
1431 		hci_pi(sk)->channel = haddr.hci_channel;
1432 
1433 		/* At the moment the index and unconfigured index events
1434 		 * are enabled unconditionally. Setting them on each
1435 		 * socket when binding keeps this functionality. They
1436 		 * however might be cleared later and then sending of these
1437 		 * events will be disabled, but that is then intentional.
1438 		 *
1439 		 * This also enables generic events that are safe to be
1440 		 * received by untrusted users. Example for such events
1441 		 * are changes to settings, class of device, name etc.
1442 		 */
1443 		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1444 			if (!hci_sock_gen_cookie(sk)) {
1445 				/* In the case when a cookie has already been
1446 				 * assigned, this socket will transition from
1447 				 * a raw socket into a control socket. To
1448 				 * allow for a clean transition, send the
1449 				 * close notification first.
1450 				 */
1451 				skb = create_monitor_ctrl_close(sk);
1452 				if (skb) {
1453 					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1454 							    HCI_SOCK_TRUSTED, NULL);
1455 					kfree_skb(skb);
1456 				}
1457 			}
1458 
1459 			/* Send event to monitor */
1460 			skb = create_monitor_ctrl_open(sk);
1461 			if (skb) {
1462 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1463 						    HCI_SOCK_TRUSTED, NULL);
1464 				kfree_skb(skb);
1465 			}
1466 
1467 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1468 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1469 			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1470 			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1471 			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1472 			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1473 		}
1474 		break;
1475 	}
1476 
1477 	/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1478 	if (!hci_pi(sk)->mtu)
1479 		hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1480 
1481 	sk->sk_state = BT_BOUND;
1482 
1483 done:
1484 	release_sock(sk);
1485 	return err;
1486 }
1487 
1488 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1489 			    int peer)
1490 {
1491 	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1492 	struct sock *sk = sock->sk;
1493 	struct hci_dev *hdev;
1494 	int err = 0;
1495 
1496 	BT_DBG("sock %p sk %p", sock, sk);
1497 
1498 	if (peer)
1499 		return -EOPNOTSUPP;
1500 
1501 	lock_sock(sk);
1502 
1503 	hdev = hci_hdev_from_sock(sk);
1504 	if (IS_ERR(hdev)) {
1505 		err = PTR_ERR(hdev);
1506 		goto done;
1507 	}
1508 
1509 	haddr->hci_family = AF_BLUETOOTH;
1510 	haddr->hci_dev    = hdev->id;
1511 	haddr->hci_channel= hci_pi(sk)->channel;
1512 	err = sizeof(*haddr);
1513 
1514 done:
1515 	release_sock(sk);
1516 	return err;
1517 }
1518 
1519 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1520 			  struct sk_buff *skb)
1521 {
1522 	__u8 mask = hci_pi(sk)->cmsg_mask;
1523 
1524 	if (mask & HCI_CMSG_DIR) {
1525 		int incoming = bt_cb(skb)->incoming;
1526 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1527 			 &incoming);
1528 	}
1529 
1530 	if (mask & HCI_CMSG_TSTAMP) {
1531 #ifdef CONFIG_COMPAT
1532 		struct old_timeval32 ctv;
1533 #endif
1534 		struct __kernel_old_timeval tv;
1535 		void *data;
1536 		int len;
1537 
1538 		skb_get_timestamp(skb, &tv);
1539 
1540 		data = &tv;
1541 		len = sizeof(tv);
1542 #ifdef CONFIG_COMPAT
1543 		if (!COMPAT_USE_64BIT_TIME &&
1544 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1545 			ctv.tv_sec = tv.tv_sec;
1546 			ctv.tv_usec = tv.tv_usec;
1547 			data = &ctv;
1548 			len = sizeof(ctv);
1549 		}
1550 #endif
1551 
1552 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1553 	}
1554 }
1555 
1556 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1557 			    size_t len, int flags)
1558 {
1559 	struct scm_cookie scm;
1560 	struct sock *sk = sock->sk;
1561 	struct sk_buff *skb;
1562 	int copied, err;
1563 	unsigned int skblen;
1564 
1565 	BT_DBG("sock %p, sk %p", sock, sk);
1566 
1567 	if (flags & MSG_OOB)
1568 		return -EOPNOTSUPP;
1569 
1570 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1571 		return -EOPNOTSUPP;
1572 
1573 	if (sk->sk_state == BT_CLOSED)
1574 		return 0;
1575 
1576 	skb = skb_recv_datagram(sk, flags, &err);
1577 	if (!skb)
1578 		return err;
1579 
1580 	skblen = skb->len;
1581 	copied = skb->len;
1582 	if (len < copied) {
1583 		msg->msg_flags |= MSG_TRUNC;
1584 		copied = len;
1585 	}
1586 
1587 	skb_reset_transport_header(skb);
1588 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1589 
1590 	switch (hci_pi(sk)->channel) {
1591 	case HCI_CHANNEL_RAW:
1592 		hci_sock_cmsg(sk, msg, skb);
1593 		break;
1594 	case HCI_CHANNEL_USER:
1595 	case HCI_CHANNEL_MONITOR:
1596 		sock_recv_timestamp(msg, sk, skb);
1597 		break;
1598 	default:
1599 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1600 			sock_recv_timestamp(msg, sk, skb);
1601 		break;
1602 	}
1603 
1604 	memset(&scm, 0, sizeof(scm));
1605 	scm.creds = bt_cb(skb)->creds;
1606 
1607 	skb_free_datagram(sk, skb);
1608 
1609 	if (flags & MSG_TRUNC)
1610 		copied = skblen;
1611 
1612 	scm_recv(sock, msg, &scm, flags);
1613 
1614 	return err ? : copied;
1615 }
1616 
1617 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1618 			struct sk_buff *skb)
1619 {
1620 	u8 *cp;
1621 	struct mgmt_hdr *hdr;
1622 	u16 opcode, index, len;
1623 	struct hci_dev *hdev = NULL;
1624 	const struct hci_mgmt_handler *handler;
1625 	bool var_len, no_hdev;
1626 	int err;
1627 
1628 	BT_DBG("got %d bytes", skb->len);
1629 
1630 	if (skb->len < sizeof(*hdr))
1631 		return -EINVAL;
1632 
1633 	hdr = (void *)skb->data;
1634 	opcode = __le16_to_cpu(hdr->opcode);
1635 	index = __le16_to_cpu(hdr->index);
1636 	len = __le16_to_cpu(hdr->len);
1637 
1638 	if (len != skb->len - sizeof(*hdr)) {
1639 		err = -EINVAL;
1640 		goto done;
1641 	}
1642 
1643 	if (chan->channel == HCI_CHANNEL_CONTROL) {
1644 		struct sk_buff *cmd;
1645 
1646 		/* Send event to monitor */
1647 		cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1648 						  skb->data + sizeof(*hdr));
1649 		if (cmd) {
1650 			hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1651 					    HCI_SOCK_TRUSTED, NULL);
1652 			kfree_skb(cmd);
1653 		}
1654 	}
1655 
1656 	if (opcode >= chan->handler_count ||
1657 	    chan->handlers[opcode].func == NULL) {
1658 		BT_DBG("Unknown op %u", opcode);
1659 		err = mgmt_cmd_status(sk, index, opcode,
1660 				      MGMT_STATUS_UNKNOWN_COMMAND);
1661 		goto done;
1662 	}
1663 
1664 	handler = &chan->handlers[opcode];
1665 
1666 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1667 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1668 		err = mgmt_cmd_status(sk, index, opcode,
1669 				      MGMT_STATUS_PERMISSION_DENIED);
1670 		goto done;
1671 	}
1672 
1673 	if (index != MGMT_INDEX_NONE) {
1674 		hdev = hci_dev_get(index);
1675 		if (!hdev) {
1676 			err = mgmt_cmd_status(sk, index, opcode,
1677 					      MGMT_STATUS_INVALID_INDEX);
1678 			goto done;
1679 		}
1680 
1681 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1682 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1683 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1684 			err = mgmt_cmd_status(sk, index, opcode,
1685 					      MGMT_STATUS_INVALID_INDEX);
1686 			goto done;
1687 		}
1688 
1689 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1690 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1691 			err = mgmt_cmd_status(sk, index, opcode,
1692 					      MGMT_STATUS_INVALID_INDEX);
1693 			goto done;
1694 		}
1695 	}
1696 
1697 	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1698 		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1699 		if (no_hdev != !hdev) {
1700 			err = mgmt_cmd_status(sk, index, opcode,
1701 					      MGMT_STATUS_INVALID_INDEX);
1702 			goto done;
1703 		}
1704 	}
1705 
1706 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1707 	if ((var_len && len < handler->data_len) ||
1708 	    (!var_len && len != handler->data_len)) {
1709 		err = mgmt_cmd_status(sk, index, opcode,
1710 				      MGMT_STATUS_INVALID_PARAMS);
1711 		goto done;
1712 	}
1713 
1714 	if (hdev && chan->hdev_init)
1715 		chan->hdev_init(sk, hdev);
1716 
1717 	cp = skb->data + sizeof(*hdr);
1718 
1719 	err = handler->func(sk, hdev, cp, len);
1720 	if (err < 0)
1721 		goto done;
1722 
1723 	err = skb->len;
1724 
1725 done:
1726 	if (hdev)
1727 		hci_dev_put(hdev);
1728 
1729 	return err;
1730 }
1731 
1732 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1733 			     unsigned int flags)
1734 {
1735 	struct hci_mon_hdr *hdr;
1736 	struct hci_dev *hdev;
1737 	u16 index;
1738 	int err;
1739 
1740 	/* The logging frame consists at minimum of the standard header,
1741 	 * the priority byte, the ident length byte and at least one string
1742 	 * terminator NUL byte. Anything shorter are invalid packets.
1743 	 */
1744 	if (skb->len < sizeof(*hdr) + 3)
1745 		return -EINVAL;
1746 
1747 	hdr = (void *)skb->data;
1748 
1749 	if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1750 		return -EINVAL;
1751 
1752 	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1753 		__u8 priority = skb->data[sizeof(*hdr)];
1754 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1755 
1756 		/* Only the priorities 0-7 are valid and with that any other
1757 		 * value results in an invalid packet.
1758 		 *
1759 		 * The priority byte is followed by an ident length byte and
1760 		 * the NUL terminated ident string. Check that the ident
1761 		 * length is not overflowing the packet and also that the
1762 		 * ident string itself is NUL terminated. In case the ident
1763 		 * length is zero, the length value actually doubles as NUL
1764 		 * terminator identifier.
1765 		 *
1766 		 * The message follows the ident string (if present) and
1767 		 * must be NUL terminated. Otherwise it is not a valid packet.
1768 		 */
1769 		if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1770 		    ident_len > skb->len - sizeof(*hdr) - 3 ||
1771 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1772 			return -EINVAL;
1773 	} else {
1774 		return -EINVAL;
1775 	}
1776 
1777 	index = __le16_to_cpu(hdr->index);
1778 
1779 	if (index != MGMT_INDEX_NONE) {
1780 		hdev = hci_dev_get(index);
1781 		if (!hdev)
1782 			return -ENODEV;
1783 	} else {
1784 		hdev = NULL;
1785 	}
1786 
1787 	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1788 
1789 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1790 	err = skb->len;
1791 
1792 	if (hdev)
1793 		hci_dev_put(hdev);
1794 
1795 	return err;
1796 }
1797 
1798 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1799 			    size_t len)
1800 {
1801 	struct sock *sk = sock->sk;
1802 	struct hci_mgmt_chan *chan;
1803 	struct hci_dev *hdev;
1804 	struct sk_buff *skb;
1805 	int err;
1806 	const unsigned int flags = msg->msg_flags;
1807 
1808 	BT_DBG("sock %p sk %p", sock, sk);
1809 
1810 	if (flags & MSG_OOB)
1811 		return -EOPNOTSUPP;
1812 
1813 	if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1814 		return -EINVAL;
1815 
1816 	if (len < 4 || len > hci_pi(sk)->mtu)
1817 		return -EINVAL;
1818 
1819 	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1820 	if (IS_ERR(skb))
1821 		return PTR_ERR(skb);
1822 
1823 	lock_sock(sk);
1824 
1825 	switch (hci_pi(sk)->channel) {
1826 	case HCI_CHANNEL_RAW:
1827 	case HCI_CHANNEL_USER:
1828 		break;
1829 	case HCI_CHANNEL_MONITOR:
1830 		err = -EOPNOTSUPP;
1831 		goto drop;
1832 	case HCI_CHANNEL_LOGGING:
1833 		err = hci_logging_frame(sk, skb, flags);
1834 		goto drop;
1835 	default:
1836 		mutex_lock(&mgmt_chan_list_lock);
1837 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1838 		if (chan)
1839 			err = hci_mgmt_cmd(chan, sk, skb);
1840 		else
1841 			err = -EINVAL;
1842 
1843 		mutex_unlock(&mgmt_chan_list_lock);
1844 		goto drop;
1845 	}
1846 
1847 	hdev = hci_hdev_from_sock(sk);
1848 	if (IS_ERR(hdev)) {
1849 		err = PTR_ERR(hdev);
1850 		goto drop;
1851 	}
1852 
1853 	if (!test_bit(HCI_UP, &hdev->flags)) {
1854 		err = -ENETDOWN;
1855 		goto drop;
1856 	}
1857 
1858 	hci_skb_pkt_type(skb) = skb->data[0];
1859 	skb_pull(skb, 1);
1860 
1861 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1862 		/* No permission check is needed for user channel
1863 		 * since that gets enforced when binding the socket.
1864 		 *
1865 		 * However check that the packet type is valid.
1866 		 */
1867 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1868 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1869 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1870 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT &&
1871 		    hci_skb_pkt_type(skb) != HCI_DRV_PKT) {
1872 			err = -EINVAL;
1873 			goto drop;
1874 		}
1875 
1876 		skb_queue_tail(&hdev->raw_q, skb);
1877 		queue_work(hdev->workqueue, &hdev->tx_work);
1878 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1879 		u16 opcode = get_unaligned_le16(skb->data);
1880 		u16 ogf = hci_opcode_ogf(opcode);
1881 		u16 ocf = hci_opcode_ocf(opcode);
1882 
1883 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1884 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1885 				   &hci_sec_filter.ocf_mask[ogf])) &&
1886 		    !capable(CAP_NET_RAW)) {
1887 			err = -EPERM;
1888 			goto drop;
1889 		}
1890 
1891 		/* Since the opcode has already been extracted here, store
1892 		 * a copy of the value for later use by the drivers.
1893 		 */
1894 		hci_skb_opcode(skb) = opcode;
1895 
1896 		if (ogf == 0x3f) {
1897 			skb_queue_tail(&hdev->raw_q, skb);
1898 			queue_work(hdev->workqueue, &hdev->tx_work);
1899 		} else {
1900 			/* Stand-alone HCI commands must be flagged as
1901 			 * single-command requests.
1902 			 */
1903 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1904 
1905 			skb_queue_tail(&hdev->cmd_q, skb);
1906 			queue_work(hdev->workqueue, &hdev->cmd_work);
1907 		}
1908 	} else {
1909 		if (!capable(CAP_NET_RAW)) {
1910 			err = -EPERM;
1911 			goto drop;
1912 		}
1913 
1914 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1915 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1916 		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1917 			err = -EINVAL;
1918 			goto drop;
1919 		}
1920 
1921 		skb_queue_tail(&hdev->raw_q, skb);
1922 		queue_work(hdev->workqueue, &hdev->tx_work);
1923 	}
1924 
1925 	err = len;
1926 
1927 done:
1928 	release_sock(sk);
1929 	return err;
1930 
1931 drop:
1932 	kfree_skb(skb);
1933 	goto done;
1934 }
1935 
1936 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1937 				   sockptr_t optval, unsigned int optlen)
1938 {
1939 	struct hci_ufilter uf = { .opcode = 0 };
1940 	struct sock *sk = sock->sk;
1941 	int err = 0, opt = 0;
1942 
1943 	BT_DBG("sk %p, opt %d", sk, optname);
1944 
1945 	lock_sock(sk);
1946 
1947 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1948 		err = -EBADFD;
1949 		goto done;
1950 	}
1951 
1952 	switch (optname) {
1953 	case HCI_DATA_DIR:
1954 		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
1955 		if (err)
1956 			break;
1957 
1958 		if (opt)
1959 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1960 		else
1961 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1962 		break;
1963 
1964 	case HCI_TIME_STAMP:
1965 		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
1966 		if (err)
1967 			break;
1968 
1969 		if (opt)
1970 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1971 		else
1972 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1973 		break;
1974 
1975 	case HCI_FILTER:
1976 		{
1977 			struct hci_filter *f = &hci_pi(sk)->filter;
1978 
1979 			uf.type_mask = f->type_mask;
1980 			uf.opcode    = f->opcode;
1981 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1982 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1983 		}
1984 
1985 		err = copy_safe_from_sockptr(&uf, sizeof(uf), optval, optlen);
1986 		if (err)
1987 			break;
1988 
1989 		if (!capable(CAP_NET_RAW)) {
1990 			uf.type_mask &= hci_sec_filter.type_mask;
1991 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1992 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1993 		}
1994 
1995 		{
1996 			struct hci_filter *f = &hci_pi(sk)->filter;
1997 
1998 			f->type_mask = uf.type_mask;
1999 			f->opcode    = uf.opcode;
2000 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
2001 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
2002 		}
2003 		break;
2004 
2005 	default:
2006 		err = -ENOPROTOOPT;
2007 		break;
2008 	}
2009 
2010 done:
2011 	release_sock(sk);
2012 	return err;
2013 }
2014 
2015 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2016 			       sockptr_t optval, unsigned int optlen)
2017 {
2018 	struct sock *sk = sock->sk;
2019 	int err = 0;
2020 	u16 opt;
2021 
2022 	BT_DBG("sk %p, opt %d", sk, optname);
2023 
2024 	if (level == SOL_HCI)
2025 		return hci_sock_setsockopt_old(sock, level, optname, optval,
2026 					       optlen);
2027 
2028 	if (level != SOL_BLUETOOTH)
2029 		return -ENOPROTOOPT;
2030 
2031 	lock_sock(sk);
2032 
2033 	switch (optname) {
2034 	case BT_SNDMTU:
2035 	case BT_RCVMTU:
2036 		switch (hci_pi(sk)->channel) {
2037 		/* Don't allow changing MTU for channels that are meant for HCI
2038 		 * traffic only.
2039 		 */
2040 		case HCI_CHANNEL_RAW:
2041 		case HCI_CHANNEL_USER:
2042 			err = -ENOPROTOOPT;
2043 			goto done;
2044 		}
2045 
2046 		err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
2047 		if (err)
2048 			break;
2049 
2050 		hci_pi(sk)->mtu = opt;
2051 		break;
2052 
2053 	default:
2054 		err = -ENOPROTOOPT;
2055 		break;
2056 	}
2057 
2058 done:
2059 	release_sock(sk);
2060 	return err;
2061 }
2062 
2063 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2064 				   char __user *optval, int __user *optlen)
2065 {
2066 	struct hci_ufilter uf;
2067 	struct sock *sk = sock->sk;
2068 	int len, opt, err = 0;
2069 
2070 	BT_DBG("sk %p, opt %d", sk, optname);
2071 
2072 	if (get_user(len, optlen))
2073 		return -EFAULT;
2074 
2075 	lock_sock(sk);
2076 
2077 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2078 		err = -EBADFD;
2079 		goto done;
2080 	}
2081 
2082 	switch (optname) {
2083 	case HCI_DATA_DIR:
2084 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2085 			opt = 1;
2086 		else
2087 			opt = 0;
2088 
2089 		if (put_user(opt, optval))
2090 			err = -EFAULT;
2091 		break;
2092 
2093 	case HCI_TIME_STAMP:
2094 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2095 			opt = 1;
2096 		else
2097 			opt = 0;
2098 
2099 		if (put_user(opt, optval))
2100 			err = -EFAULT;
2101 		break;
2102 
2103 	case HCI_FILTER:
2104 		{
2105 			struct hci_filter *f = &hci_pi(sk)->filter;
2106 
2107 			memset(&uf, 0, sizeof(uf));
2108 			uf.type_mask = f->type_mask;
2109 			uf.opcode    = f->opcode;
2110 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2111 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2112 		}
2113 
2114 		len = min_t(unsigned int, len, sizeof(uf));
2115 		if (copy_to_user(optval, &uf, len))
2116 			err = -EFAULT;
2117 		break;
2118 
2119 	default:
2120 		err = -ENOPROTOOPT;
2121 		break;
2122 	}
2123 
2124 done:
2125 	release_sock(sk);
2126 	return err;
2127 }
2128 
2129 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2130 			       char __user *optval, int __user *optlen)
2131 {
2132 	struct sock *sk = sock->sk;
2133 	int err = 0;
2134 
2135 	BT_DBG("sk %p, opt %d", sk, optname);
2136 
2137 	if (level == SOL_HCI)
2138 		return hci_sock_getsockopt_old(sock, level, optname, optval,
2139 					       optlen);
2140 
2141 	if (level != SOL_BLUETOOTH)
2142 		return -ENOPROTOOPT;
2143 
2144 	lock_sock(sk);
2145 
2146 	switch (optname) {
2147 	case BT_SNDMTU:
2148 	case BT_RCVMTU:
2149 		if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2150 			err = -EFAULT;
2151 		break;
2152 
2153 	default:
2154 		err = -ENOPROTOOPT;
2155 		break;
2156 	}
2157 
2158 	release_sock(sk);
2159 	return err;
2160 }
2161 
2162 static void hci_sock_destruct(struct sock *sk)
2163 {
2164 	mgmt_cleanup(sk);
2165 	skb_queue_purge(&sk->sk_receive_queue);
2166 	skb_queue_purge(&sk->sk_write_queue);
2167 }
2168 
2169 static const struct proto_ops hci_sock_ops = {
2170 	.family		= PF_BLUETOOTH,
2171 	.owner		= THIS_MODULE,
2172 	.release	= hci_sock_release,
2173 	.bind		= hci_sock_bind,
2174 	.getname	= hci_sock_getname,
2175 	.sendmsg	= hci_sock_sendmsg,
2176 	.recvmsg	= hci_sock_recvmsg,
2177 	.ioctl		= hci_sock_ioctl,
2178 #ifdef CONFIG_COMPAT
2179 	.compat_ioctl	= hci_sock_compat_ioctl,
2180 #endif
2181 	.poll		= datagram_poll,
2182 	.listen		= sock_no_listen,
2183 	.shutdown	= sock_no_shutdown,
2184 	.setsockopt	= hci_sock_setsockopt,
2185 	.getsockopt	= hci_sock_getsockopt,
2186 	.connect	= sock_no_connect,
2187 	.socketpair	= sock_no_socketpair,
2188 	.accept		= sock_no_accept,
2189 	.mmap		= sock_no_mmap
2190 };
2191 
2192 static struct proto hci_sk_proto = {
2193 	.name		= "HCI",
2194 	.owner		= THIS_MODULE,
2195 	.obj_size	= sizeof(struct hci_pinfo)
2196 };
2197 
2198 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2199 			   int kern)
2200 {
2201 	struct sock *sk;
2202 
2203 	BT_DBG("sock %p", sock);
2204 
2205 	if (sock->type != SOCK_RAW)
2206 		return -ESOCKTNOSUPPORT;
2207 
2208 	sock->ops = &hci_sock_ops;
2209 
2210 	sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2211 			   kern);
2212 	if (!sk)
2213 		return -ENOMEM;
2214 
2215 	sock->state = SS_UNCONNECTED;
2216 	sk->sk_destruct = hci_sock_destruct;
2217 
2218 	bt_sock_link(&hci_sk_list, sk);
2219 	return 0;
2220 }
2221 
2222 static const struct net_proto_family hci_sock_family_ops = {
2223 	.family	= PF_BLUETOOTH,
2224 	.owner	= THIS_MODULE,
2225 	.create	= hci_sock_create,
2226 };
2227 
2228 int __init hci_sock_init(void)
2229 {
2230 	int err;
2231 
2232 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2233 
2234 	err = proto_register(&hci_sk_proto, 0);
2235 	if (err < 0)
2236 		return err;
2237 
2238 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2239 	if (err < 0) {
2240 		BT_ERR("HCI socket registration failed");
2241 		goto error;
2242 	}
2243 
2244 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2245 	if (err < 0) {
2246 		BT_ERR("Failed to create HCI proc file");
2247 		bt_sock_unregister(BTPROTO_HCI);
2248 		goto error;
2249 	}
2250 
2251 	BT_INFO("HCI socket layer initialized");
2252 
2253 	return 0;
2254 
2255 error:
2256 	proto_unregister(&hci_sk_proto);
2257 	return err;
2258 }
2259 
2260 void hci_sock_cleanup(void)
2261 {
2262 	bt_procfs_cleanup(&init_net, "hci");
2263 	bt_sock_unregister(BTPROTO_HCI);
2264 	proto_unregister(&hci_sk_proto);
2265 }
2266