xref: /linux/net/bluetooth/hci_conn.c (revision 5f4123be3cdb1dbd77fa9d6d2bb96bb9689a0a19)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40 
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/unaligned.h>
44 
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47 
48 #ifndef CONFIG_BT_HCI_CORE_DEBUG
49 #undef  BT_DBG
50 #define BT_DBG(D...)
51 #endif
52 
53 void hci_acl_connect(struct hci_conn *conn)
54 {
55 	struct hci_dev *hdev = conn->hdev;
56 	struct inquiry_entry *ie;
57 	struct hci_cp_create_conn cp;
58 
59 	BT_DBG("%p", conn);
60 
61 	conn->state = BT_CONNECT;
62 	conn->out = 1;
63 
64 	conn->link_mode = HCI_LM_MASTER;
65 
66 	conn->attempt++;
67 
68 	conn->link_policy = hdev->link_policy;
69 
70 	memset(&cp, 0, sizeof(cp));
71 	bacpy(&cp.bdaddr, &conn->dst);
72 	cp.pscan_rep_mode = 0x02;
73 
74 	if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
75 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
76 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
77 			cp.pscan_mode     = ie->data.pscan_mode;
78 			cp.clock_offset   = ie->data.clock_offset |
79 							cpu_to_le16(0x8000);
80 		}
81 
82 		memcpy(conn->dev_class, ie->data.dev_class, 3);
83 		conn->ssp_mode = ie->data.ssp_mode;
84 	}
85 
86 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
87 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
88 		cp.role_switch = 0x01;
89 	else
90 		cp.role_switch = 0x00;
91 
92 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
93 }
94 
95 static void hci_acl_connect_cancel(struct hci_conn *conn)
96 {
97 	struct hci_cp_create_conn_cancel cp;
98 
99 	BT_DBG("%p", conn);
100 
101 	if (conn->hdev->hci_ver < 2)
102 		return;
103 
104 	bacpy(&cp.bdaddr, &conn->dst);
105 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
106 }
107 
108 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
109 {
110 	struct hci_cp_disconnect cp;
111 
112 	BT_DBG("%p", conn);
113 
114 	conn->state = BT_DISCONN;
115 
116 	cp.handle = cpu_to_le16(conn->handle);
117 	cp.reason = reason;
118 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
119 }
120 
121 void hci_add_sco(struct hci_conn *conn, __u16 handle)
122 {
123 	struct hci_dev *hdev = conn->hdev;
124 	struct hci_cp_add_sco cp;
125 
126 	BT_DBG("%p", conn);
127 
128 	conn->state = BT_CONNECT;
129 	conn->out = 1;
130 
131 	cp.handle   = cpu_to_le16(handle);
132 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
133 
134 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
135 }
136 
137 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
138 {
139 	struct hci_dev *hdev = conn->hdev;
140 	struct hci_cp_setup_sync_conn cp;
141 
142 	BT_DBG("%p", conn);
143 
144 	conn->state = BT_CONNECT;
145 	conn->out = 1;
146 
147 	cp.handle   = cpu_to_le16(handle);
148 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
149 
150 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
151 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
152 	cp.max_latency    = cpu_to_le16(0xffff);
153 	cp.voice_setting  = cpu_to_le16(hdev->voice_setting);
154 	cp.retrans_effort = 0xff;
155 
156 	hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
157 }
158 
159 static void hci_conn_timeout(unsigned long arg)
160 {
161 	struct hci_conn *conn = (void *) arg;
162 	struct hci_dev *hdev = conn->hdev;
163 
164 	BT_DBG("conn %p state %d", conn, conn->state);
165 
166 	if (atomic_read(&conn->refcnt))
167 		return;
168 
169 	hci_dev_lock(hdev);
170 
171 	switch (conn->state) {
172 	case BT_CONNECT:
173 	case BT_CONNECT2:
174 		if (conn->type == ACL_LINK)
175 			hci_acl_connect_cancel(conn);
176 		else
177 			hci_acl_disconn(conn, 0x13);
178 		break;
179 	case BT_CONFIG:
180 	case BT_CONNECTED:
181 		hci_acl_disconn(conn, 0x13);
182 		break;
183 	default:
184 		conn->state = BT_CLOSED;
185 		break;
186 	}
187 
188 	hci_dev_unlock(hdev);
189 }
190 
191 static void hci_conn_idle(unsigned long arg)
192 {
193 	struct hci_conn *conn = (void *) arg;
194 
195 	BT_DBG("conn %p mode %d", conn, conn->mode);
196 
197 	hci_conn_enter_sniff_mode(conn);
198 }
199 
200 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
201 {
202 	struct hci_conn *conn;
203 
204 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
205 
206 	conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
207 	if (!conn)
208 		return NULL;
209 
210 	bacpy(&conn->dst, dst);
211 	conn->hdev  = hdev;
212 	conn->type  = type;
213 	conn->mode  = HCI_CM_ACTIVE;
214 	conn->state = BT_OPEN;
215 
216 	conn->power_save = 1;
217 
218 	switch (type) {
219 	case ACL_LINK:
220 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
221 		break;
222 	case SCO_LINK:
223 		if (lmp_esco_capable(hdev))
224 			conn->pkt_type = hdev->esco_type & SCO_ESCO_MASK;
225 		else
226 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
227 		break;
228 	case ESCO_LINK:
229 		conn->pkt_type = hdev->esco_type;
230 		break;
231 	}
232 
233 	skb_queue_head_init(&conn->data_q);
234 
235 	setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
236 	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
237 
238 	atomic_set(&conn->refcnt, 0);
239 
240 	hci_dev_hold(hdev);
241 
242 	tasklet_disable(&hdev->tx_task);
243 
244 	hci_conn_hash_add(hdev, conn);
245 	if (hdev->notify)
246 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
247 
248 	tasklet_enable(&hdev->tx_task);
249 
250 	return conn;
251 }
252 
253 int hci_conn_del(struct hci_conn *conn)
254 {
255 	struct hci_dev *hdev = conn->hdev;
256 
257 	BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
258 
259 	del_timer(&conn->idle_timer);
260 
261 	del_timer(&conn->disc_timer);
262 
263 	if (conn->type == ACL_LINK) {
264 		struct hci_conn *sco = conn->link;
265 		if (sco)
266 			sco->link = NULL;
267 
268 		/* Unacked frames */
269 		hdev->acl_cnt += conn->sent;
270 	} else {
271 		struct hci_conn *acl = conn->link;
272 		if (acl) {
273 			acl->link = NULL;
274 			hci_conn_put(acl);
275 		}
276 	}
277 
278 	tasklet_disable(&hdev->tx_task);
279 
280 	hci_conn_hash_del(hdev, conn);
281 	if (hdev->notify)
282 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
283 
284 	tasklet_enable(&hdev->tx_task);
285 
286 	skb_queue_purge(&conn->data_q);
287 
288 	return 0;
289 }
290 
291 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
292 {
293 	int use_src = bacmp(src, BDADDR_ANY);
294 	struct hci_dev *hdev = NULL;
295 	struct list_head *p;
296 
297 	BT_DBG("%s -> %s", batostr(src), batostr(dst));
298 
299 	read_lock_bh(&hci_dev_list_lock);
300 
301 	list_for_each(p, &hci_dev_list) {
302 		struct hci_dev *d = list_entry(p, struct hci_dev, list);
303 
304 		if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
305 			continue;
306 
307 		/* Simple routing:
308 		 *   No source address - find interface with bdaddr != dst
309 		 *   Source address    - find interface with bdaddr == src
310 		 */
311 
312 		if (use_src) {
313 			if (!bacmp(&d->bdaddr, src)) {
314 				hdev = d; break;
315 			}
316 		} else {
317 			if (bacmp(&d->bdaddr, dst)) {
318 				hdev = d; break;
319 			}
320 		}
321 	}
322 
323 	if (hdev)
324 		hdev = hci_dev_hold(hdev);
325 
326 	read_unlock_bh(&hci_dev_list_lock);
327 	return hdev;
328 }
329 EXPORT_SYMBOL(hci_get_route);
330 
331 /* Create SCO or ACL connection.
332  * Device _must_ be locked */
333 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type)
334 {
335 	struct hci_conn *acl;
336 	struct hci_conn *sco;
337 
338 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
339 
340 	if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
341 		if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
342 			return NULL;
343 	}
344 
345 	hci_conn_hold(acl);
346 
347 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
348 		acl->auth_type = auth_type;
349 		hci_acl_connect(acl);
350 	}
351 
352 	if (type == ACL_LINK)
353 		return acl;
354 
355 	if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
356 		if (!(sco = hci_conn_add(hdev, type, dst))) {
357 			hci_conn_put(acl);
358 			return NULL;
359 		}
360 	}
361 
362 	acl->link = sco;
363 	sco->link = acl;
364 
365 	hci_conn_hold(sco);
366 
367 	if (acl->state == BT_CONNECTED &&
368 			(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
369 		if (lmp_esco_capable(hdev))
370 			hci_setup_sync(sco, acl->handle);
371 		else
372 			hci_add_sco(sco, acl->handle);
373 	}
374 
375 	return sco;
376 }
377 EXPORT_SYMBOL(hci_connect);
378 
379 /* Check link security requirement */
380 int hci_conn_check_link_mode(struct hci_conn *conn)
381 {
382 	BT_DBG("conn %p", conn);
383 
384 	if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
385 					!(conn->link_mode & HCI_LM_ENCRYPT))
386 		return 0;
387 
388 	return 1;
389 }
390 EXPORT_SYMBOL(hci_conn_check_link_mode);
391 
392 /* Authenticate remote device */
393 int hci_conn_auth(struct hci_conn *conn)
394 {
395 	BT_DBG("conn %p", conn);
396 
397 	if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) {
398 		if (!(conn->auth_type & 0x01)) {
399 			conn->auth_type |= 0x01;
400 			conn->link_mode &= ~HCI_LM_AUTH;
401 		}
402 	}
403 
404 	if (conn->link_mode & HCI_LM_AUTH)
405 		return 1;
406 
407 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
408 		struct hci_cp_auth_requested cp;
409 		cp.handle = cpu_to_le16(conn->handle);
410 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
411 							sizeof(cp), &cp);
412 	}
413 	return 0;
414 }
415 EXPORT_SYMBOL(hci_conn_auth);
416 
417 /* Enable encryption */
418 int hci_conn_encrypt(struct hci_conn *conn)
419 {
420 	BT_DBG("conn %p", conn);
421 
422 	if (conn->link_mode & HCI_LM_ENCRYPT)
423 		return hci_conn_auth(conn);
424 
425 	if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
426 		return 0;
427 
428 	if (hci_conn_auth(conn)) {
429 		struct hci_cp_set_conn_encrypt cp;
430 		cp.handle  = cpu_to_le16(conn->handle);
431 		cp.encrypt = 1;
432 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
433 							sizeof(cp), &cp);
434 	}
435 	return 0;
436 }
437 EXPORT_SYMBOL(hci_conn_encrypt);
438 
439 /* Change link key */
440 int hci_conn_change_link_key(struct hci_conn *conn)
441 {
442 	BT_DBG("conn %p", conn);
443 
444 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
445 		struct hci_cp_change_conn_link_key cp;
446 		cp.handle = cpu_to_le16(conn->handle);
447 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
448 							sizeof(cp), &cp);
449 	}
450 	return 0;
451 }
452 EXPORT_SYMBOL(hci_conn_change_link_key);
453 
454 /* Switch role */
455 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
456 {
457 	BT_DBG("conn %p", conn);
458 
459 	if (!role && conn->link_mode & HCI_LM_MASTER)
460 		return 1;
461 
462 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
463 		struct hci_cp_switch_role cp;
464 		bacpy(&cp.bdaddr, &conn->dst);
465 		cp.role = role;
466 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
467 	}
468 	return 0;
469 }
470 EXPORT_SYMBOL(hci_conn_switch_role);
471 
472 /* Enter active mode */
473 void hci_conn_enter_active_mode(struct hci_conn *conn)
474 {
475 	struct hci_dev *hdev = conn->hdev;
476 
477 	BT_DBG("conn %p mode %d", conn, conn->mode);
478 
479 	if (test_bit(HCI_RAW, &hdev->flags))
480 		return;
481 
482 	if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
483 		goto timer;
484 
485 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
486 		struct hci_cp_exit_sniff_mode cp;
487 		cp.handle = cpu_to_le16(conn->handle);
488 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
489 	}
490 
491 timer:
492 	if (hdev->idle_timeout > 0)
493 		mod_timer(&conn->idle_timer,
494 			jiffies + msecs_to_jiffies(hdev->idle_timeout));
495 }
496 
497 /* Enter sniff mode */
498 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
499 {
500 	struct hci_dev *hdev = conn->hdev;
501 
502 	BT_DBG("conn %p mode %d", conn, conn->mode);
503 
504 	if (test_bit(HCI_RAW, &hdev->flags))
505 		return;
506 
507 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
508 		return;
509 
510 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
511 		return;
512 
513 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
514 		struct hci_cp_sniff_subrate cp;
515 		cp.handle             = cpu_to_le16(conn->handle);
516 		cp.max_latency        = cpu_to_le16(0);
517 		cp.min_remote_timeout = cpu_to_le16(0);
518 		cp.min_local_timeout  = cpu_to_le16(0);
519 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
520 	}
521 
522 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
523 		struct hci_cp_sniff_mode cp;
524 		cp.handle       = cpu_to_le16(conn->handle);
525 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
526 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
527 		cp.attempt      = cpu_to_le16(4);
528 		cp.timeout      = cpu_to_le16(1);
529 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
530 	}
531 }
532 
533 /* Drop all connection on the device */
534 void hci_conn_hash_flush(struct hci_dev *hdev)
535 {
536 	struct hci_conn_hash *h = &hdev->conn_hash;
537 	struct list_head *p;
538 
539 	BT_DBG("hdev %s", hdev->name);
540 
541 	p = h->list.next;
542 	while (p != &h->list) {
543 		struct hci_conn *c;
544 
545 		c = list_entry(p, struct hci_conn, list);
546 		p = p->next;
547 
548 		c->state = BT_CLOSED;
549 
550 		hci_conn_del_sysfs(c);
551 
552 		hci_proto_disconn_ind(c, 0x16);
553 		hci_conn_del(c);
554 	}
555 }
556 
557 /* Check pending connect attempts */
558 void hci_conn_check_pending(struct hci_dev *hdev)
559 {
560 	struct hci_conn *conn;
561 
562 	BT_DBG("hdev %s", hdev->name);
563 
564 	hci_dev_lock(hdev);
565 
566 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
567 	if (conn)
568 		hci_acl_connect(conn);
569 
570 	hci_dev_unlock(hdev);
571 }
572 
573 int hci_get_conn_list(void __user *arg)
574 {
575 	struct hci_conn_list_req req, *cl;
576 	struct hci_conn_info *ci;
577 	struct hci_dev *hdev;
578 	struct list_head *p;
579 	int n = 0, size, err;
580 
581 	if (copy_from_user(&req, arg, sizeof(req)))
582 		return -EFAULT;
583 
584 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
585 		return -EINVAL;
586 
587 	size = sizeof(req) + req.conn_num * sizeof(*ci);
588 
589 	if (!(cl = kmalloc(size, GFP_KERNEL)))
590 		return -ENOMEM;
591 
592 	if (!(hdev = hci_dev_get(req.dev_id))) {
593 		kfree(cl);
594 		return -ENODEV;
595 	}
596 
597 	ci = cl->conn_info;
598 
599 	hci_dev_lock_bh(hdev);
600 	list_for_each(p, &hdev->conn_hash.list) {
601 		register struct hci_conn *c;
602 		c = list_entry(p, struct hci_conn, list);
603 
604 		bacpy(&(ci + n)->bdaddr, &c->dst);
605 		(ci + n)->handle = c->handle;
606 		(ci + n)->type  = c->type;
607 		(ci + n)->out   = c->out;
608 		(ci + n)->state = c->state;
609 		(ci + n)->link_mode = c->link_mode;
610 		if (++n >= req.conn_num)
611 			break;
612 	}
613 	hci_dev_unlock_bh(hdev);
614 
615 	cl->dev_id = hdev->id;
616 	cl->conn_num = n;
617 	size = sizeof(req) + n * sizeof(*ci);
618 
619 	hci_dev_put(hdev);
620 
621 	err = copy_to_user(arg, cl, size);
622 	kfree(cl);
623 
624 	return err ? -EFAULT : 0;
625 }
626 
627 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
628 {
629 	struct hci_conn_info_req req;
630 	struct hci_conn_info ci;
631 	struct hci_conn *conn;
632 	char __user *ptr = arg + sizeof(req);
633 
634 	if (copy_from_user(&req, arg, sizeof(req)))
635 		return -EFAULT;
636 
637 	hci_dev_lock_bh(hdev);
638 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
639 	if (conn) {
640 		bacpy(&ci.bdaddr, &conn->dst);
641 		ci.handle = conn->handle;
642 		ci.type  = conn->type;
643 		ci.out   = conn->out;
644 		ci.state = conn->state;
645 		ci.link_mode = conn->link_mode;
646 	}
647 	hci_dev_unlock_bh(hdev);
648 
649 	if (!conn)
650 		return -ENOENT;
651 
652 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
653 }
654 
655 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
656 {
657 	struct hci_auth_info_req req;
658 	struct hci_conn *conn;
659 
660 	if (copy_from_user(&req, arg, sizeof(req)))
661 		return -EFAULT;
662 
663 	hci_dev_lock_bh(hdev);
664 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
665 	if (conn)
666 		req.type = conn->auth_type;
667 	hci_dev_unlock_bh(hdev);
668 
669 	if (!conn)
670 		return -ENOENT;
671 
672 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
673 }
674