xref: /linux/net/bluetooth/hci_conn.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/a2mp.h>
32 
33 static void hci_le_connect(struct hci_conn *conn)
34 {
35 	struct hci_dev *hdev = conn->hdev;
36 	struct hci_cp_le_create_conn cp;
37 
38 	conn->state = BT_CONNECT;
39 	conn->out = true;
40 	conn->link_mode |= HCI_LM_MASTER;
41 	conn->sec_level = BT_SECURITY_LOW;
42 
43 	memset(&cp, 0, sizeof(cp));
44 	cp.scan_interval = __constant_cpu_to_le16(0x0060);
45 	cp.scan_window = __constant_cpu_to_le16(0x0030);
46 	bacpy(&cp.peer_addr, &conn->dst);
47 	cp.peer_addr_type = conn->dst_type;
48 	cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
49 	cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
50 	cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
51 	cp.min_ce_len = __constant_cpu_to_le16(0x0000);
52 	cp.max_ce_len = __constant_cpu_to_le16(0x0000);
53 
54 	hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
55 }
56 
57 static void hci_le_connect_cancel(struct hci_conn *conn)
58 {
59 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
60 }
61 
62 void hci_acl_connect(struct hci_conn *conn)
63 {
64 	struct hci_dev *hdev = conn->hdev;
65 	struct inquiry_entry *ie;
66 	struct hci_cp_create_conn cp;
67 
68 	BT_DBG("hcon %p", conn);
69 
70 	conn->state = BT_CONNECT;
71 	conn->out = true;
72 
73 	conn->link_mode = HCI_LM_MASTER;
74 
75 	conn->attempt++;
76 
77 	conn->link_policy = hdev->link_policy;
78 
79 	memset(&cp, 0, sizeof(cp));
80 	bacpy(&cp.bdaddr, &conn->dst);
81 	cp.pscan_rep_mode = 0x02;
82 
83 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
84 	if (ie) {
85 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
86 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
87 			cp.pscan_mode     = ie->data.pscan_mode;
88 			cp.clock_offset   = ie->data.clock_offset |
89 					    __constant_cpu_to_le16(0x8000);
90 		}
91 
92 		memcpy(conn->dev_class, ie->data.dev_class, 3);
93 		if (ie->data.ssp_mode > 0)
94 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
95 	}
96 
97 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
98 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
99 		cp.role_switch = 0x01;
100 	else
101 		cp.role_switch = 0x00;
102 
103 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
104 }
105 
106 static void hci_acl_connect_cancel(struct hci_conn *conn)
107 {
108 	struct hci_cp_create_conn_cancel cp;
109 
110 	BT_DBG("hcon %p", conn);
111 
112 	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
113 		return;
114 
115 	bacpy(&cp.bdaddr, &conn->dst);
116 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
117 }
118 
119 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
120 {
121 	struct hci_cp_disconnect cp;
122 
123 	BT_DBG("hcon %p", conn);
124 
125 	conn->state = BT_DISCONN;
126 
127 	cp.handle = cpu_to_le16(conn->handle);
128 	cp.reason = reason;
129 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
130 }
131 
132 void hci_add_sco(struct hci_conn *conn, __u16 handle)
133 {
134 	struct hci_dev *hdev = conn->hdev;
135 	struct hci_cp_add_sco cp;
136 
137 	BT_DBG("hcon %p", conn);
138 
139 	conn->state = BT_CONNECT;
140 	conn->out = true;
141 
142 	conn->attempt++;
143 
144 	cp.handle   = cpu_to_le16(handle);
145 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
146 
147 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
148 }
149 
150 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
151 {
152 	struct hci_dev *hdev = conn->hdev;
153 	struct hci_cp_setup_sync_conn cp;
154 
155 	BT_DBG("hcon %p", conn);
156 
157 	conn->state = BT_CONNECT;
158 	conn->out = true;
159 
160 	conn->attempt++;
161 
162 	cp.handle   = cpu_to_le16(handle);
163 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
164 
165 	cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
166 	cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
167 	cp.max_latency    = __constant_cpu_to_le16(0xffff);
168 	cp.voice_setting  = cpu_to_le16(hdev->voice_setting);
169 	cp.retrans_effort = 0xff;
170 
171 	hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
172 }
173 
174 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
175 			u16 latency, u16 to_multiplier)
176 {
177 	struct hci_cp_le_conn_update cp;
178 	struct hci_dev *hdev = conn->hdev;
179 
180 	memset(&cp, 0, sizeof(cp));
181 
182 	cp.handle		= cpu_to_le16(conn->handle);
183 	cp.conn_interval_min	= cpu_to_le16(min);
184 	cp.conn_interval_max	= cpu_to_le16(max);
185 	cp.conn_latency		= cpu_to_le16(latency);
186 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
187 	cp.min_ce_len		= __constant_cpu_to_le16(0x0001);
188 	cp.max_ce_len		= __constant_cpu_to_le16(0x0001);
189 
190 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
191 }
192 
193 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
194 		      __u8 ltk[16])
195 {
196 	struct hci_dev *hdev = conn->hdev;
197 	struct hci_cp_le_start_enc cp;
198 
199 	BT_DBG("hcon %p", conn);
200 
201 	memset(&cp, 0, sizeof(cp));
202 
203 	cp.handle = cpu_to_le16(conn->handle);
204 	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
205 	cp.ediv = ediv;
206 	memcpy(cp.rand, rand, sizeof(cp.rand));
207 
208 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
209 }
210 
211 /* Device _must_ be locked */
212 void hci_sco_setup(struct hci_conn *conn, __u8 status)
213 {
214 	struct hci_conn *sco = conn->link;
215 
216 	if (!sco)
217 		return;
218 
219 	BT_DBG("hcon %p", conn);
220 
221 	if (!status) {
222 		if (lmp_esco_capable(conn->hdev))
223 			hci_setup_sync(sco, conn->handle);
224 		else
225 			hci_add_sco(sco, conn->handle);
226 	} else {
227 		hci_proto_connect_cfm(sco, status);
228 		hci_conn_del(sco);
229 	}
230 }
231 
232 static void hci_conn_timeout(struct work_struct *work)
233 {
234 	struct hci_conn *conn = container_of(work, struct hci_conn,
235 					     disc_work.work);
236 	__u8 reason;
237 
238 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
239 
240 	if (atomic_read(&conn->refcnt))
241 		return;
242 
243 	switch (conn->state) {
244 	case BT_CONNECT:
245 	case BT_CONNECT2:
246 		if (conn->out) {
247 			if (conn->type == ACL_LINK)
248 				hci_acl_connect_cancel(conn);
249 			else if (conn->type == LE_LINK)
250 				hci_le_connect_cancel(conn);
251 		}
252 		break;
253 	case BT_CONFIG:
254 	case BT_CONNECTED:
255 		reason = hci_proto_disconn_ind(conn);
256 		hci_acl_disconn(conn, reason);
257 		break;
258 	default:
259 		conn->state = BT_CLOSED;
260 		break;
261 	}
262 }
263 
264 /* Enter sniff mode */
265 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
266 {
267 	struct hci_dev *hdev = conn->hdev;
268 
269 	BT_DBG("hcon %p mode %d", conn, conn->mode);
270 
271 	if (test_bit(HCI_RAW, &hdev->flags))
272 		return;
273 
274 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
275 		return;
276 
277 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
278 		return;
279 
280 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
281 		struct hci_cp_sniff_subrate cp;
282 		cp.handle             = cpu_to_le16(conn->handle);
283 		cp.max_latency        = __constant_cpu_to_le16(0);
284 		cp.min_remote_timeout = __constant_cpu_to_le16(0);
285 		cp.min_local_timeout  = __constant_cpu_to_le16(0);
286 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
287 	}
288 
289 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
290 		struct hci_cp_sniff_mode cp;
291 		cp.handle       = cpu_to_le16(conn->handle);
292 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
293 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
294 		cp.attempt      = __constant_cpu_to_le16(4);
295 		cp.timeout      = __constant_cpu_to_le16(1);
296 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
297 	}
298 }
299 
300 static void hci_conn_idle(unsigned long arg)
301 {
302 	struct hci_conn *conn = (void *) arg;
303 
304 	BT_DBG("hcon %p mode %d", conn, conn->mode);
305 
306 	hci_conn_enter_sniff_mode(conn);
307 }
308 
309 static void hci_conn_auto_accept(unsigned long arg)
310 {
311 	struct hci_conn *conn = (void *) arg;
312 	struct hci_dev *hdev = conn->hdev;
313 
314 	hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
315 		     &conn->dst);
316 }
317 
318 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
319 {
320 	struct hci_conn *conn;
321 
322 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
323 
324 	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
325 	if (!conn)
326 		return NULL;
327 
328 	bacpy(&conn->dst, dst);
329 	conn->hdev  = hdev;
330 	conn->type  = type;
331 	conn->mode  = HCI_CM_ACTIVE;
332 	conn->state = BT_OPEN;
333 	conn->auth_type = HCI_AT_GENERAL_BONDING;
334 	conn->io_capability = hdev->io_capability;
335 	conn->remote_auth = 0xff;
336 	conn->key_type = 0xff;
337 
338 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
339 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
340 
341 	switch (type) {
342 	case ACL_LINK:
343 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
344 		break;
345 	case SCO_LINK:
346 		if (lmp_esco_capable(hdev))
347 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
348 					(hdev->esco_type & EDR_ESCO_MASK);
349 		else
350 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
351 		break;
352 	case ESCO_LINK:
353 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
354 		break;
355 	}
356 
357 	skb_queue_head_init(&conn->data_q);
358 
359 	INIT_LIST_HEAD(&conn->chan_list);
360 
361 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
362 	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
363 	setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
364 		    (unsigned long) conn);
365 
366 	atomic_set(&conn->refcnt, 0);
367 
368 	hci_dev_hold(hdev);
369 
370 	hci_conn_hash_add(hdev, conn);
371 	if (hdev->notify)
372 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
373 
374 	atomic_set(&conn->devref, 0);
375 
376 	hci_conn_init_sysfs(conn);
377 
378 	return conn;
379 }
380 
381 int hci_conn_del(struct hci_conn *conn)
382 {
383 	struct hci_dev *hdev = conn->hdev;
384 
385 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
386 
387 	del_timer(&conn->idle_timer);
388 
389 	cancel_delayed_work_sync(&conn->disc_work);
390 
391 	del_timer(&conn->auto_accept_timer);
392 
393 	if (conn->type == ACL_LINK) {
394 		struct hci_conn *sco = conn->link;
395 		if (sco)
396 			sco->link = NULL;
397 
398 		/* Unacked frames */
399 		hdev->acl_cnt += conn->sent;
400 	} else if (conn->type == LE_LINK) {
401 		if (hdev->le_pkts)
402 			hdev->le_cnt += conn->sent;
403 		else
404 			hdev->acl_cnt += conn->sent;
405 	} else {
406 		struct hci_conn *acl = conn->link;
407 		if (acl) {
408 			acl->link = NULL;
409 			hci_conn_put(acl);
410 		}
411 	}
412 
413 	hci_chan_list_flush(conn);
414 
415 	if (conn->amp_mgr)
416 		amp_mgr_put(conn->amp_mgr);
417 
418 	hci_conn_hash_del(hdev, conn);
419 	if (hdev->notify)
420 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
421 
422 	skb_queue_purge(&conn->data_q);
423 
424 	hci_conn_put_device(conn);
425 
426 	hci_dev_put(hdev);
427 
428 	if (conn->handle == 0)
429 		kfree(conn);
430 
431 	return 0;
432 }
433 
434 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
435 {
436 	int use_src = bacmp(src, BDADDR_ANY);
437 	struct hci_dev *hdev = NULL, *d;
438 
439 	BT_DBG("%s -> %s", batostr(src), batostr(dst));
440 
441 	read_lock(&hci_dev_list_lock);
442 
443 	list_for_each_entry(d, &hci_dev_list, list) {
444 		if (!test_bit(HCI_UP, &d->flags) ||
445 		    test_bit(HCI_RAW, &d->flags) ||
446 		    d->dev_type != HCI_BREDR)
447 			continue;
448 
449 		/* Simple routing:
450 		 *   No source address - find interface with bdaddr != dst
451 		 *   Source address    - find interface with bdaddr == src
452 		 */
453 
454 		if (use_src) {
455 			if (!bacmp(&d->bdaddr, src)) {
456 				hdev = d; break;
457 			}
458 		} else {
459 			if (bacmp(&d->bdaddr, dst)) {
460 				hdev = d; break;
461 			}
462 		}
463 	}
464 
465 	if (hdev)
466 		hdev = hci_dev_hold(hdev);
467 
468 	read_unlock(&hci_dev_list_lock);
469 	return hdev;
470 }
471 EXPORT_SYMBOL(hci_get_route);
472 
473 /* Create SCO, ACL or LE connection.
474  * Device _must_ be locked */
475 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
476 			     __u8 dst_type, __u8 sec_level, __u8 auth_type)
477 {
478 	struct hci_conn *acl;
479 	struct hci_conn *sco;
480 	struct hci_conn *le;
481 
482 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
483 
484 	if (type == LE_LINK) {
485 		le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
486 		if (!le) {
487 			le = hci_conn_hash_lookup_state(hdev, LE_LINK,
488 							BT_CONNECT);
489 			if (le)
490 				return ERR_PTR(-EBUSY);
491 
492 			le = hci_conn_add(hdev, LE_LINK, dst);
493 			if (!le)
494 				return ERR_PTR(-ENOMEM);
495 
496 			le->dst_type = bdaddr_to_le(dst_type);
497 			hci_le_connect(le);
498 		}
499 
500 		le->pending_sec_level = sec_level;
501 		le->auth_type = auth_type;
502 
503 		hci_conn_hold(le);
504 
505 		return le;
506 	}
507 
508 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
509 	if (!acl) {
510 		acl = hci_conn_add(hdev, ACL_LINK, dst);
511 		if (!acl)
512 			return ERR_PTR(-ENOMEM);
513 	}
514 
515 	hci_conn_hold(acl);
516 
517 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
518 		acl->sec_level = BT_SECURITY_LOW;
519 		acl->pending_sec_level = sec_level;
520 		acl->auth_type = auth_type;
521 		hci_acl_connect(acl);
522 	}
523 
524 	if (type == ACL_LINK)
525 		return acl;
526 
527 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
528 	if (!sco) {
529 		sco = hci_conn_add(hdev, type, dst);
530 		if (!sco) {
531 			hci_conn_put(acl);
532 			return ERR_PTR(-ENOMEM);
533 		}
534 	}
535 
536 	acl->link = sco;
537 	sco->link = acl;
538 
539 	hci_conn_hold(sco);
540 
541 	if (acl->state == BT_CONNECTED &&
542 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
543 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
544 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
545 
546 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
547 			/* defer SCO setup until mode change completed */
548 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
549 			return sco;
550 		}
551 
552 		hci_sco_setup(acl, 0x00);
553 	}
554 
555 	return sco;
556 }
557 
558 /* Check link security requirement */
559 int hci_conn_check_link_mode(struct hci_conn *conn)
560 {
561 	BT_DBG("hcon %p", conn);
562 
563 	if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
564 		return 0;
565 
566 	return 1;
567 }
568 
569 /* Authenticate remote device */
570 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
571 {
572 	BT_DBG("hcon %p", conn);
573 
574 	if (conn->pending_sec_level > sec_level)
575 		sec_level = conn->pending_sec_level;
576 
577 	if (sec_level > conn->sec_level)
578 		conn->pending_sec_level = sec_level;
579 	else if (conn->link_mode & HCI_LM_AUTH)
580 		return 1;
581 
582 	/* Make sure we preserve an existing MITM requirement*/
583 	auth_type |= (conn->auth_type & 0x01);
584 
585 	conn->auth_type = auth_type;
586 
587 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
588 		struct hci_cp_auth_requested cp;
589 
590 		/* encrypt must be pending if auth is also pending */
591 		set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
592 
593 		cp.handle = cpu_to_le16(conn->handle);
594 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
595 			     sizeof(cp), &cp);
596 		if (conn->key_type != 0xff)
597 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
598 	}
599 
600 	return 0;
601 }
602 
603 /* Encrypt the the link */
604 static void hci_conn_encrypt(struct hci_conn *conn)
605 {
606 	BT_DBG("hcon %p", conn);
607 
608 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
609 		struct hci_cp_set_conn_encrypt cp;
610 		cp.handle  = cpu_to_le16(conn->handle);
611 		cp.encrypt = 0x01;
612 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
613 			     &cp);
614 	}
615 }
616 
617 /* Enable security */
618 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
619 {
620 	BT_DBG("hcon %p", conn);
621 
622 	/* For sdp we don't need the link key. */
623 	if (sec_level == BT_SECURITY_SDP)
624 		return 1;
625 
626 	/* For non 2.1 devices and low security level we don't need the link
627 	   key. */
628 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
629 		return 1;
630 
631 	/* For other security levels we need the link key. */
632 	if (!(conn->link_mode & HCI_LM_AUTH))
633 		goto auth;
634 
635 	/* An authenticated combination key has sufficient security for any
636 	   security level. */
637 	if (conn->key_type == HCI_LK_AUTH_COMBINATION)
638 		goto encrypt;
639 
640 	/* An unauthenticated combination key has sufficient security for
641 	   security level 1 and 2. */
642 	if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
643 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
644 		goto encrypt;
645 
646 	/* A combination key has always sufficient security for the security
647 	   levels 1 or 2. High security level requires the combination key
648 	   is generated using maximum PIN code length (16).
649 	   For pre 2.1 units. */
650 	if (conn->key_type == HCI_LK_COMBINATION &&
651 	    (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
652 		goto encrypt;
653 
654 auth:
655 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
656 		return 0;
657 
658 	if (!hci_conn_auth(conn, sec_level, auth_type))
659 		return 0;
660 
661 encrypt:
662 	if (conn->link_mode & HCI_LM_ENCRYPT)
663 		return 1;
664 
665 	hci_conn_encrypt(conn);
666 	return 0;
667 }
668 EXPORT_SYMBOL(hci_conn_security);
669 
670 /* Check secure link requirement */
671 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
672 {
673 	BT_DBG("hcon %p", conn);
674 
675 	if (sec_level != BT_SECURITY_HIGH)
676 		return 1; /* Accept if non-secure is required */
677 
678 	if (conn->sec_level == BT_SECURITY_HIGH)
679 		return 1;
680 
681 	return 0; /* Reject not secure link */
682 }
683 EXPORT_SYMBOL(hci_conn_check_secure);
684 
685 /* Change link key */
686 int hci_conn_change_link_key(struct hci_conn *conn)
687 {
688 	BT_DBG("hcon %p", conn);
689 
690 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
691 		struct hci_cp_change_conn_link_key cp;
692 		cp.handle = cpu_to_le16(conn->handle);
693 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
694 			     sizeof(cp), &cp);
695 	}
696 
697 	return 0;
698 }
699 
700 /* Switch role */
701 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
702 {
703 	BT_DBG("hcon %p", conn);
704 
705 	if (!role && conn->link_mode & HCI_LM_MASTER)
706 		return 1;
707 
708 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
709 		struct hci_cp_switch_role cp;
710 		bacpy(&cp.bdaddr, &conn->dst);
711 		cp.role = role;
712 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
713 	}
714 
715 	return 0;
716 }
717 EXPORT_SYMBOL(hci_conn_switch_role);
718 
719 /* Enter active mode */
720 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
721 {
722 	struct hci_dev *hdev = conn->hdev;
723 
724 	BT_DBG("hcon %p mode %d", conn, conn->mode);
725 
726 	if (test_bit(HCI_RAW, &hdev->flags))
727 		return;
728 
729 	if (conn->mode != HCI_CM_SNIFF)
730 		goto timer;
731 
732 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
733 		goto timer;
734 
735 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
736 		struct hci_cp_exit_sniff_mode cp;
737 		cp.handle = cpu_to_le16(conn->handle);
738 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
739 	}
740 
741 timer:
742 	if (hdev->idle_timeout > 0)
743 		mod_timer(&conn->idle_timer,
744 			  jiffies + msecs_to_jiffies(hdev->idle_timeout));
745 }
746 
747 /* Drop all connection on the device */
748 void hci_conn_hash_flush(struct hci_dev *hdev)
749 {
750 	struct hci_conn_hash *h = &hdev->conn_hash;
751 	struct hci_conn *c, *n;
752 
753 	BT_DBG("hdev %s", hdev->name);
754 
755 	list_for_each_entry_safe(c, n, &h->list, list) {
756 		c->state = BT_CLOSED;
757 
758 		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
759 		hci_conn_del(c);
760 	}
761 }
762 
763 /* Check pending connect attempts */
764 void hci_conn_check_pending(struct hci_dev *hdev)
765 {
766 	struct hci_conn *conn;
767 
768 	BT_DBG("hdev %s", hdev->name);
769 
770 	hci_dev_lock(hdev);
771 
772 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
773 	if (conn)
774 		hci_acl_connect(conn);
775 
776 	hci_dev_unlock(hdev);
777 }
778 
779 void hci_conn_hold_device(struct hci_conn *conn)
780 {
781 	atomic_inc(&conn->devref);
782 }
783 EXPORT_SYMBOL(hci_conn_hold_device);
784 
785 void hci_conn_put_device(struct hci_conn *conn)
786 {
787 	if (atomic_dec_and_test(&conn->devref))
788 		hci_conn_del_sysfs(conn);
789 }
790 EXPORT_SYMBOL(hci_conn_put_device);
791 
792 int hci_get_conn_list(void __user *arg)
793 {
794 	struct hci_conn *c;
795 	struct hci_conn_list_req req, *cl;
796 	struct hci_conn_info *ci;
797 	struct hci_dev *hdev;
798 	int n = 0, size, err;
799 
800 	if (copy_from_user(&req, arg, sizeof(req)))
801 		return -EFAULT;
802 
803 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
804 		return -EINVAL;
805 
806 	size = sizeof(req) + req.conn_num * sizeof(*ci);
807 
808 	cl = kmalloc(size, GFP_KERNEL);
809 	if (!cl)
810 		return -ENOMEM;
811 
812 	hdev = hci_dev_get(req.dev_id);
813 	if (!hdev) {
814 		kfree(cl);
815 		return -ENODEV;
816 	}
817 
818 	ci = cl->conn_info;
819 
820 	hci_dev_lock(hdev);
821 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
822 		bacpy(&(ci + n)->bdaddr, &c->dst);
823 		(ci + n)->handle = c->handle;
824 		(ci + n)->type  = c->type;
825 		(ci + n)->out   = c->out;
826 		(ci + n)->state = c->state;
827 		(ci + n)->link_mode = c->link_mode;
828 		if (++n >= req.conn_num)
829 			break;
830 	}
831 	hci_dev_unlock(hdev);
832 
833 	cl->dev_id = hdev->id;
834 	cl->conn_num = n;
835 	size = sizeof(req) + n * sizeof(*ci);
836 
837 	hci_dev_put(hdev);
838 
839 	err = copy_to_user(arg, cl, size);
840 	kfree(cl);
841 
842 	return err ? -EFAULT : 0;
843 }
844 
845 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
846 {
847 	struct hci_conn_info_req req;
848 	struct hci_conn_info ci;
849 	struct hci_conn *conn;
850 	char __user *ptr = arg + sizeof(req);
851 
852 	if (copy_from_user(&req, arg, sizeof(req)))
853 		return -EFAULT;
854 
855 	hci_dev_lock(hdev);
856 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
857 	if (conn) {
858 		bacpy(&ci.bdaddr, &conn->dst);
859 		ci.handle = conn->handle;
860 		ci.type  = conn->type;
861 		ci.out   = conn->out;
862 		ci.state = conn->state;
863 		ci.link_mode = conn->link_mode;
864 	}
865 	hci_dev_unlock(hdev);
866 
867 	if (!conn)
868 		return -ENOENT;
869 
870 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
871 }
872 
873 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
874 {
875 	struct hci_auth_info_req req;
876 	struct hci_conn *conn;
877 
878 	if (copy_from_user(&req, arg, sizeof(req)))
879 		return -EFAULT;
880 
881 	hci_dev_lock(hdev);
882 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
883 	if (conn)
884 		req.type = conn->auth_type;
885 	hci_dev_unlock(hdev);
886 
887 	if (!conn)
888 		return -ENOENT;
889 
890 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
891 }
892 
893 struct hci_chan *hci_chan_create(struct hci_conn *conn)
894 {
895 	struct hci_dev *hdev = conn->hdev;
896 	struct hci_chan *chan;
897 
898 	BT_DBG("%s hcon %p", hdev->name, conn);
899 
900 	chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
901 	if (!chan)
902 		return NULL;
903 
904 	chan->conn = conn;
905 	skb_queue_head_init(&chan->data_q);
906 
907 	list_add_rcu(&chan->list, &conn->chan_list);
908 
909 	return chan;
910 }
911 
912 int hci_chan_del(struct hci_chan *chan)
913 {
914 	struct hci_conn *conn = chan->conn;
915 	struct hci_dev *hdev = conn->hdev;
916 
917 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
918 
919 	list_del_rcu(&chan->list);
920 
921 	synchronize_rcu();
922 
923 	skb_queue_purge(&chan->data_q);
924 	kfree(chan);
925 
926 	return 0;
927 }
928 
929 void hci_chan_list_flush(struct hci_conn *conn)
930 {
931 	struct hci_chan *chan, *n;
932 
933 	BT_DBG("hcon %p", conn);
934 
935 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
936 		hci_chan_del(chan);
937 }
938