xref: /linux/net/bluetooth/hci_conn.c (revision 5d085ad2e68cceec8332b23ea8f630a28b506366)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37 
38 struct sco_param {
39 	u16 pkt_type;
40 	u16 max_latency;
41 	u8  retrans_effort;
42 };
43 
44 static const struct sco_param esco_param_cvsd[] = {
45 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
46 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
47 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
48 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
49 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
50 };
51 
52 static const struct sco_param sco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
54 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
55 };
56 
57 static const struct sco_param esco_param_msbc[] = {
58 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
59 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
60 };
61 
62 /* This function requires the caller holds hdev->lock */
63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65 	struct hci_conn_params *params;
66 	struct hci_dev *hdev = conn->hdev;
67 	struct smp_irk *irk;
68 	bdaddr_t *bdaddr;
69 	u8 bdaddr_type;
70 
71 	bdaddr = &conn->dst;
72 	bdaddr_type = conn->dst_type;
73 
74 	/* Check if we need to convert to identity address */
75 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76 	if (irk) {
77 		bdaddr = &irk->bdaddr;
78 		bdaddr_type = irk->addr_type;
79 	}
80 
81 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82 					   bdaddr_type);
83 	if (!params || !params->explicit_connect)
84 		return;
85 
86 	/* The connection attempt was doing scan for new RPA, and is
87 	 * in scan phase. If params are not associated with any other
88 	 * autoconnect action, remove them completely. If they are, just unmark
89 	 * them as waiting for connection, by clearing explicit_connect field.
90 	 */
91 	params->explicit_connect = false;
92 
93 	list_del_init(&params->action);
94 
95 	switch (params->auto_connect) {
96 	case HCI_AUTO_CONN_EXPLICIT:
97 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98 		/* return instead of break to avoid duplicate scan update */
99 		return;
100 	case HCI_AUTO_CONN_DIRECT:
101 	case HCI_AUTO_CONN_ALWAYS:
102 		list_add(&params->action, &hdev->pend_le_conns);
103 		break;
104 	case HCI_AUTO_CONN_REPORT:
105 		list_add(&params->action, &hdev->pend_le_reports);
106 		break;
107 	default:
108 		break;
109 	}
110 
111 	hci_update_background_scan(hdev);
112 }
113 
114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116 	struct hci_dev *hdev = conn->hdev;
117 
118 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120 
121 	hci_chan_list_flush(conn);
122 
123 	hci_conn_hash_del(hdev, conn);
124 
125 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
126 		switch (conn->setting & SCO_AIRMODE_MASK) {
127 		case SCO_AIRMODE_CVSD:
128 		case SCO_AIRMODE_TRANSP:
129 			if (hdev->notify)
130 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
131 			break;
132 		}
133 	} else {
134 		if (hdev->notify)
135 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
136 	}
137 
138 	hci_conn_del_sysfs(conn);
139 
140 	debugfs_remove_recursive(conn->debugfs);
141 
142 	hci_dev_put(hdev);
143 
144 	hci_conn_put(conn);
145 }
146 
147 static void le_scan_cleanup(struct work_struct *work)
148 {
149 	struct hci_conn *conn = container_of(work, struct hci_conn,
150 					     le_scan_cleanup);
151 	struct hci_dev *hdev = conn->hdev;
152 	struct hci_conn *c = NULL;
153 
154 	BT_DBG("%s hcon %p", hdev->name, conn);
155 
156 	hci_dev_lock(hdev);
157 
158 	/* Check that the hci_conn is still around */
159 	rcu_read_lock();
160 	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
161 		if (c == conn)
162 			break;
163 	}
164 	rcu_read_unlock();
165 
166 	if (c == conn) {
167 		hci_connect_le_scan_cleanup(conn);
168 		hci_conn_cleanup(conn);
169 	}
170 
171 	hci_dev_unlock(hdev);
172 	hci_dev_put(hdev);
173 	hci_conn_put(conn);
174 }
175 
176 static void hci_connect_le_scan_remove(struct hci_conn *conn)
177 {
178 	BT_DBG("%s hcon %p", conn->hdev->name, conn);
179 
180 	/* We can't call hci_conn_del/hci_conn_cleanup here since that
181 	 * could deadlock with another hci_conn_del() call that's holding
182 	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
183 	 * Instead, grab temporary extra references to the hci_dev and
184 	 * hci_conn and perform the necessary cleanup in a separate work
185 	 * callback.
186 	 */
187 
188 	hci_dev_hold(conn->hdev);
189 	hci_conn_get(conn);
190 
191 	/* Even though we hold a reference to the hdev, many other
192 	 * things might get cleaned up meanwhile, including the hdev's
193 	 * own workqueue, so we can't use that for scheduling.
194 	 */
195 	schedule_work(&conn->le_scan_cleanup);
196 }
197 
198 static void hci_acl_create_connection(struct hci_conn *conn)
199 {
200 	struct hci_dev *hdev = conn->hdev;
201 	struct inquiry_entry *ie;
202 	struct hci_cp_create_conn cp;
203 
204 	BT_DBG("hcon %p", conn);
205 
206 	conn->state = BT_CONNECT;
207 	conn->out = true;
208 	conn->role = HCI_ROLE_MASTER;
209 
210 	conn->attempt++;
211 
212 	conn->link_policy = hdev->link_policy;
213 
214 	memset(&cp, 0, sizeof(cp));
215 	bacpy(&cp.bdaddr, &conn->dst);
216 	cp.pscan_rep_mode = 0x02;
217 
218 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
219 	if (ie) {
220 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222 			cp.pscan_mode     = ie->data.pscan_mode;
223 			cp.clock_offset   = ie->data.clock_offset |
224 					    cpu_to_le16(0x8000);
225 		}
226 
227 		memcpy(conn->dev_class, ie->data.dev_class, 3);
228 		if (ie->data.ssp_mode > 0)
229 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
230 	}
231 
232 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
233 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
234 		cp.role_switch = 0x01;
235 	else
236 		cp.role_switch = 0x00;
237 
238 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
239 }
240 
241 int hci_disconnect(struct hci_conn *conn, __u8 reason)
242 {
243 	BT_DBG("hcon %p", conn);
244 
245 	/* When we are master of an established connection and it enters
246 	 * the disconnect timeout, then go ahead and try to read the
247 	 * current clock offset.  Processing of the result is done
248 	 * within the event handling and hci_clock_offset_evt function.
249 	 */
250 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
251 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
252 		struct hci_dev *hdev = conn->hdev;
253 		struct hci_cp_read_clock_offset clkoff_cp;
254 
255 		clkoff_cp.handle = cpu_to_le16(conn->handle);
256 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
257 			     &clkoff_cp);
258 	}
259 
260 	return hci_abort_conn(conn, reason);
261 }
262 
263 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
264 {
265 	struct hci_dev *hdev = conn->hdev;
266 	struct hci_cp_add_sco cp;
267 
268 	BT_DBG("hcon %p", conn);
269 
270 	conn->state = BT_CONNECT;
271 	conn->out = true;
272 
273 	conn->attempt++;
274 
275 	cp.handle   = cpu_to_le16(handle);
276 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
277 
278 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
279 }
280 
281 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
282 {
283 	struct hci_dev *hdev = conn->hdev;
284 	struct hci_cp_setup_sync_conn cp;
285 	const struct sco_param *param;
286 
287 	BT_DBG("hcon %p", conn);
288 
289 	conn->state = BT_CONNECT;
290 	conn->out = true;
291 
292 	conn->attempt++;
293 
294 	cp.handle   = cpu_to_le16(handle);
295 
296 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
297 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
298 	cp.voice_setting  = cpu_to_le16(conn->setting);
299 
300 	switch (conn->setting & SCO_AIRMODE_MASK) {
301 	case SCO_AIRMODE_TRANSP:
302 		if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
303 			return false;
304 		param = &esco_param_msbc[conn->attempt - 1];
305 		break;
306 	case SCO_AIRMODE_CVSD:
307 		if (lmp_esco_capable(conn->link)) {
308 			if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
309 				return false;
310 			param = &esco_param_cvsd[conn->attempt - 1];
311 		} else {
312 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
313 				return false;
314 			param = &sco_param_cvsd[conn->attempt - 1];
315 		}
316 		break;
317 	default:
318 		return false;
319 	}
320 
321 	cp.retrans_effort = param->retrans_effort;
322 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
323 	cp.max_latency = __cpu_to_le16(param->max_latency);
324 
325 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
326 		return false;
327 
328 	return true;
329 }
330 
331 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
332 		      u16 to_multiplier)
333 {
334 	struct hci_dev *hdev = conn->hdev;
335 	struct hci_conn_params *params;
336 	struct hci_cp_le_conn_update cp;
337 
338 	hci_dev_lock(hdev);
339 
340 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
341 	if (params) {
342 		params->conn_min_interval = min;
343 		params->conn_max_interval = max;
344 		params->conn_latency = latency;
345 		params->supervision_timeout = to_multiplier;
346 	}
347 
348 	hci_dev_unlock(hdev);
349 
350 	memset(&cp, 0, sizeof(cp));
351 	cp.handle		= cpu_to_le16(conn->handle);
352 	cp.conn_interval_min	= cpu_to_le16(min);
353 	cp.conn_interval_max	= cpu_to_le16(max);
354 	cp.conn_latency		= cpu_to_le16(latency);
355 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
356 	cp.min_ce_len		= cpu_to_le16(0x0000);
357 	cp.max_ce_len		= cpu_to_le16(0x0000);
358 
359 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
360 
361 	if (params)
362 		return 0x01;
363 
364 	return 0x00;
365 }
366 
367 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
368 		      __u8 ltk[16], __u8 key_size)
369 {
370 	struct hci_dev *hdev = conn->hdev;
371 	struct hci_cp_le_start_enc cp;
372 
373 	BT_DBG("hcon %p", conn);
374 
375 	memset(&cp, 0, sizeof(cp));
376 
377 	cp.handle = cpu_to_le16(conn->handle);
378 	cp.rand = rand;
379 	cp.ediv = ediv;
380 	memcpy(cp.ltk, ltk, key_size);
381 
382 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
383 }
384 
385 /* Device _must_ be locked */
386 void hci_sco_setup(struct hci_conn *conn, __u8 status)
387 {
388 	struct hci_conn *sco = conn->link;
389 
390 	if (!sco)
391 		return;
392 
393 	BT_DBG("hcon %p", conn);
394 
395 	if (!status) {
396 		if (lmp_esco_capable(conn->hdev))
397 			hci_setup_sync(sco, conn->handle);
398 		else
399 			hci_add_sco(sco, conn->handle);
400 	} else {
401 		hci_connect_cfm(sco, status);
402 		hci_conn_del(sco);
403 	}
404 }
405 
406 static void hci_conn_timeout(struct work_struct *work)
407 {
408 	struct hci_conn *conn = container_of(work, struct hci_conn,
409 					     disc_work.work);
410 	int refcnt = atomic_read(&conn->refcnt);
411 
412 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
413 
414 	WARN_ON(refcnt < 0);
415 
416 	/* FIXME: It was observed that in pairing failed scenario, refcnt
417 	 * drops below 0. Probably this is because l2cap_conn_del calls
418 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
419 	 * dropped. After that loop hci_chan_del is called which also drops
420 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
421 	 * otherwise drop it.
422 	 */
423 	if (refcnt > 0)
424 		return;
425 
426 	/* LE connections in scanning state need special handling */
427 	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
428 	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
429 		hci_connect_le_scan_remove(conn);
430 		return;
431 	}
432 
433 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
434 }
435 
436 /* Enter sniff mode */
437 static void hci_conn_idle(struct work_struct *work)
438 {
439 	struct hci_conn *conn = container_of(work, struct hci_conn,
440 					     idle_work.work);
441 	struct hci_dev *hdev = conn->hdev;
442 
443 	BT_DBG("hcon %p mode %d", conn, conn->mode);
444 
445 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
446 		return;
447 
448 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
449 		return;
450 
451 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
452 		struct hci_cp_sniff_subrate cp;
453 		cp.handle             = cpu_to_le16(conn->handle);
454 		cp.max_latency        = cpu_to_le16(0);
455 		cp.min_remote_timeout = cpu_to_le16(0);
456 		cp.min_local_timeout  = cpu_to_le16(0);
457 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
458 	}
459 
460 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
461 		struct hci_cp_sniff_mode cp;
462 		cp.handle       = cpu_to_le16(conn->handle);
463 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
464 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
465 		cp.attempt      = cpu_to_le16(4);
466 		cp.timeout      = cpu_to_le16(1);
467 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
468 	}
469 }
470 
471 static void hci_conn_auto_accept(struct work_struct *work)
472 {
473 	struct hci_conn *conn = container_of(work, struct hci_conn,
474 					     auto_accept_work.work);
475 
476 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
477 		     &conn->dst);
478 }
479 
480 static void le_disable_advertising(struct hci_dev *hdev)
481 {
482 	if (ext_adv_capable(hdev)) {
483 		struct hci_cp_le_set_ext_adv_enable cp;
484 
485 		cp.enable = 0x00;
486 		cp.num_of_sets = 0x00;
487 
488 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
489 			     &cp);
490 	} else {
491 		u8 enable = 0x00;
492 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
493 			     &enable);
494 	}
495 }
496 
497 static void le_conn_timeout(struct work_struct *work)
498 {
499 	struct hci_conn *conn = container_of(work, struct hci_conn,
500 					     le_conn_timeout.work);
501 	struct hci_dev *hdev = conn->hdev;
502 
503 	BT_DBG("");
504 
505 	/* We could end up here due to having done directed advertising,
506 	 * so clean up the state if necessary. This should however only
507 	 * happen with broken hardware or if low duty cycle was used
508 	 * (which doesn't have a timeout of its own).
509 	 */
510 	if (conn->role == HCI_ROLE_SLAVE) {
511 		/* Disable LE Advertising */
512 		le_disable_advertising(hdev);
513 		hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
514 		return;
515 	}
516 
517 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
518 }
519 
520 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
521 			      u8 role)
522 {
523 	struct hci_conn *conn;
524 
525 	BT_DBG("%s dst %pMR", hdev->name, dst);
526 
527 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
528 	if (!conn)
529 		return NULL;
530 
531 	bacpy(&conn->dst, dst);
532 	bacpy(&conn->src, &hdev->bdaddr);
533 	conn->hdev  = hdev;
534 	conn->type  = type;
535 	conn->role  = role;
536 	conn->mode  = HCI_CM_ACTIVE;
537 	conn->state = BT_OPEN;
538 	conn->auth_type = HCI_AT_GENERAL_BONDING;
539 	conn->io_capability = hdev->io_capability;
540 	conn->remote_auth = 0xff;
541 	conn->key_type = 0xff;
542 	conn->rssi = HCI_RSSI_INVALID;
543 	conn->tx_power = HCI_TX_POWER_INVALID;
544 	conn->max_tx_power = HCI_TX_POWER_INVALID;
545 
546 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
547 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
548 
549 	/* Set Default Authenticated payload timeout to 30s */
550 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
551 
552 	if (conn->role == HCI_ROLE_MASTER)
553 		conn->out = true;
554 
555 	switch (type) {
556 	case ACL_LINK:
557 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
558 		break;
559 	case LE_LINK:
560 		/* conn->src should reflect the local identity address */
561 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
562 		break;
563 	case SCO_LINK:
564 		if (lmp_esco_capable(hdev))
565 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
566 					(hdev->esco_type & EDR_ESCO_MASK);
567 		else
568 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
569 		break;
570 	case ESCO_LINK:
571 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
572 		break;
573 	}
574 
575 	skb_queue_head_init(&conn->data_q);
576 
577 	INIT_LIST_HEAD(&conn->chan_list);
578 
579 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
580 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
581 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
582 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
583 	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
584 
585 	atomic_set(&conn->refcnt, 0);
586 
587 	hci_dev_hold(hdev);
588 
589 	hci_conn_hash_add(hdev, conn);
590 
591 	/* The SCO and eSCO connections will only be notified when their
592 	 * setup has been completed. This is different to ACL links which
593 	 * can be notified right away.
594 	 */
595 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
596 		if (hdev->notify)
597 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
598 	}
599 
600 	hci_conn_init_sysfs(conn);
601 
602 	return conn;
603 }
604 
605 int hci_conn_del(struct hci_conn *conn)
606 {
607 	struct hci_dev *hdev = conn->hdev;
608 
609 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
610 
611 	cancel_delayed_work_sync(&conn->disc_work);
612 	cancel_delayed_work_sync(&conn->auto_accept_work);
613 	cancel_delayed_work_sync(&conn->idle_work);
614 
615 	if (conn->type == ACL_LINK) {
616 		struct hci_conn *sco = conn->link;
617 		if (sco)
618 			sco->link = NULL;
619 
620 		/* Unacked frames */
621 		hdev->acl_cnt += conn->sent;
622 	} else if (conn->type == LE_LINK) {
623 		cancel_delayed_work(&conn->le_conn_timeout);
624 
625 		if (hdev->le_pkts)
626 			hdev->le_cnt += conn->sent;
627 		else
628 			hdev->acl_cnt += conn->sent;
629 	} else {
630 		struct hci_conn *acl = conn->link;
631 		if (acl) {
632 			acl->link = NULL;
633 			hci_conn_drop(acl);
634 		}
635 	}
636 
637 	if (conn->amp_mgr)
638 		amp_mgr_put(conn->amp_mgr);
639 
640 	skb_queue_purge(&conn->data_q);
641 
642 	/* Remove the connection from the list and cleanup its remaining
643 	 * state. This is a separate function since for some cases like
644 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
645 	 * rest of hci_conn_del.
646 	 */
647 	hci_conn_cleanup(conn);
648 
649 	return 0;
650 }
651 
652 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
653 {
654 	int use_src = bacmp(src, BDADDR_ANY);
655 	struct hci_dev *hdev = NULL, *d;
656 
657 	BT_DBG("%pMR -> %pMR", src, dst);
658 
659 	read_lock(&hci_dev_list_lock);
660 
661 	list_for_each_entry(d, &hci_dev_list, list) {
662 		if (!test_bit(HCI_UP, &d->flags) ||
663 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
664 		    d->dev_type != HCI_PRIMARY)
665 			continue;
666 
667 		/* Simple routing:
668 		 *   No source address - find interface with bdaddr != dst
669 		 *   Source address    - find interface with bdaddr == src
670 		 */
671 
672 		if (use_src) {
673 			bdaddr_t id_addr;
674 			u8 id_addr_type;
675 
676 			if (src_type == BDADDR_BREDR) {
677 				if (!lmp_bredr_capable(d))
678 					continue;
679 				bacpy(&id_addr, &d->bdaddr);
680 				id_addr_type = BDADDR_BREDR;
681 			} else {
682 				if (!lmp_le_capable(d))
683 					continue;
684 
685 				hci_copy_identity_address(d, &id_addr,
686 							  &id_addr_type);
687 
688 				/* Convert from HCI to three-value type */
689 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
690 					id_addr_type = BDADDR_LE_PUBLIC;
691 				else
692 					id_addr_type = BDADDR_LE_RANDOM;
693 			}
694 
695 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
696 				hdev = d; break;
697 			}
698 		} else {
699 			if (bacmp(&d->bdaddr, dst)) {
700 				hdev = d; break;
701 			}
702 		}
703 	}
704 
705 	if (hdev)
706 		hdev = hci_dev_hold(hdev);
707 
708 	read_unlock(&hci_dev_list_lock);
709 	return hdev;
710 }
711 EXPORT_SYMBOL(hci_get_route);
712 
713 /* This function requires the caller holds hdev->lock */
714 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
715 {
716 	struct hci_dev *hdev = conn->hdev;
717 	struct hci_conn_params *params;
718 
719 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
720 					   conn->dst_type);
721 	if (params && params->conn) {
722 		hci_conn_drop(params->conn);
723 		hci_conn_put(params->conn);
724 		params->conn = NULL;
725 	}
726 
727 	conn->state = BT_CLOSED;
728 
729 	/* If the status indicates successful cancellation of
730 	 * the attempt (i.e. Unkown Connection Id) there's no point of
731 	 * notifying failure since we'll go back to keep trying to
732 	 * connect. The only exception is explicit connect requests
733 	 * where a timeout + cancel does indicate an actual failure.
734 	 */
735 	if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
736 	    (params && params->explicit_connect))
737 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
738 				    conn->dst_type, status);
739 
740 	hci_connect_cfm(conn, status);
741 
742 	hci_conn_del(conn);
743 
744 	/* Since we may have temporarily stopped the background scanning in
745 	 * favor of connection establishment, we should restart it.
746 	 */
747 	hci_update_background_scan(hdev);
748 
749 	/* Re-enable advertising in case this was a failed connection
750 	 * attempt as a peripheral.
751 	 */
752 	hci_req_reenable_advertising(hdev);
753 }
754 
755 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
756 {
757 	struct hci_conn *conn;
758 
759 	hci_dev_lock(hdev);
760 
761 	conn = hci_lookup_le_connect(hdev);
762 
763 	if (!status) {
764 		hci_connect_le_scan_cleanup(conn);
765 		goto done;
766 	}
767 
768 	bt_dev_err(hdev, "request failed to create LE connection: "
769 		   "status 0x%2.2x", status);
770 
771 	if (!conn)
772 		goto done;
773 
774 	hci_le_conn_failed(conn, status);
775 
776 done:
777 	hci_dev_unlock(hdev);
778 }
779 
780 static bool conn_use_rpa(struct hci_conn *conn)
781 {
782 	struct hci_dev *hdev = conn->hdev;
783 
784 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
785 }
786 
787 static void set_ext_conn_params(struct hci_conn *conn,
788 				struct hci_cp_le_ext_conn_param *p)
789 {
790 	struct hci_dev *hdev = conn->hdev;
791 
792 	memset(p, 0, sizeof(*p));
793 
794 	/* Set window to be the same value as the interval to
795 	 * enable continuous scanning.
796 	 */
797 	p->scan_interval = cpu_to_le16(hdev->le_scan_interval);
798 	p->scan_window = p->scan_interval;
799 	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
800 	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
801 	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
802 	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
803 	p->min_ce_len = cpu_to_le16(0x0000);
804 	p->max_ce_len = cpu_to_le16(0x0000);
805 }
806 
807 static void hci_req_add_le_create_conn(struct hci_request *req,
808 				       struct hci_conn *conn,
809 				       bdaddr_t *direct_rpa)
810 {
811 	struct hci_dev *hdev = conn->hdev;
812 	u8 own_addr_type;
813 
814 	/* If direct address was provided we use it instead of current
815 	 * address.
816 	 */
817 	if (direct_rpa) {
818 		if (bacmp(&req->hdev->random_addr, direct_rpa))
819 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
820 								direct_rpa);
821 
822 		/* direct address is always RPA */
823 		own_addr_type = ADDR_LE_DEV_RANDOM;
824 	} else {
825 		/* Update random address, but set require_privacy to false so
826 		 * that we never connect with an non-resolvable address.
827 		 */
828 		if (hci_update_random_address(req, false, conn_use_rpa(conn),
829 					      &own_addr_type))
830 			return;
831 	}
832 
833 	if (use_ext_conn(hdev)) {
834 		struct hci_cp_le_ext_create_conn *cp;
835 		struct hci_cp_le_ext_conn_param *p;
836 		u8 data[sizeof(*cp) + sizeof(*p) * 3];
837 		u32 plen;
838 
839 		cp = (void *) data;
840 		p = (void *) cp->data;
841 
842 		memset(cp, 0, sizeof(*cp));
843 
844 		bacpy(&cp->peer_addr, &conn->dst);
845 		cp->peer_addr_type = conn->dst_type;
846 		cp->own_addr_type = own_addr_type;
847 
848 		plen = sizeof(*cp);
849 
850 		if (scan_1m(hdev)) {
851 			cp->phys |= LE_SCAN_PHY_1M;
852 			set_ext_conn_params(conn, p);
853 
854 			p++;
855 			plen += sizeof(*p);
856 		}
857 
858 		if (scan_2m(hdev)) {
859 			cp->phys |= LE_SCAN_PHY_2M;
860 			set_ext_conn_params(conn, p);
861 
862 			p++;
863 			plen += sizeof(*p);
864 		}
865 
866 		if (scan_coded(hdev)) {
867 			cp->phys |= LE_SCAN_PHY_CODED;
868 			set_ext_conn_params(conn, p);
869 
870 			plen += sizeof(*p);
871 		}
872 
873 		hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
874 
875 	} else {
876 		struct hci_cp_le_create_conn cp;
877 
878 		memset(&cp, 0, sizeof(cp));
879 
880 		/* Set window to be the same value as the interval to enable
881 		 * continuous scanning.
882 		 */
883 		cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
884 		cp.scan_window = cp.scan_interval;
885 
886 		bacpy(&cp.peer_addr, &conn->dst);
887 		cp.peer_addr_type = conn->dst_type;
888 		cp.own_address_type = own_addr_type;
889 		cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
890 		cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
891 		cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
892 		cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
893 		cp.min_ce_len = cpu_to_le16(0x0000);
894 		cp.max_ce_len = cpu_to_le16(0x0000);
895 
896 		hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
897 	}
898 
899 	conn->state = BT_CONNECT;
900 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
901 }
902 
903 static void hci_req_directed_advertising(struct hci_request *req,
904 					 struct hci_conn *conn)
905 {
906 	struct hci_dev *hdev = req->hdev;
907 	u8 own_addr_type;
908 	u8 enable;
909 
910 	if (ext_adv_capable(hdev)) {
911 		struct hci_cp_le_set_ext_adv_params cp;
912 		bdaddr_t random_addr;
913 
914 		/* Set require_privacy to false so that the remote device has a
915 		 * chance of identifying us.
916 		 */
917 		if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
918 					   &own_addr_type, &random_addr) < 0)
919 			return;
920 
921 		memset(&cp, 0, sizeof(cp));
922 
923 		cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
924 		cp.own_addr_type = own_addr_type;
925 		cp.channel_map = hdev->le_adv_channel_map;
926 		cp.tx_power = HCI_TX_POWER_INVALID;
927 		cp.primary_phy = HCI_ADV_PHY_1M;
928 		cp.secondary_phy = HCI_ADV_PHY_1M;
929 		cp.handle = 0; /* Use instance 0 for directed adv */
930 		cp.own_addr_type = own_addr_type;
931 		cp.peer_addr_type = conn->dst_type;
932 		bacpy(&cp.peer_addr, &conn->dst);
933 
934 		/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
935 		 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
936 		 * does not supports advertising data when the advertising set already
937 		 * contains some, the controller shall return erroc code 'Invalid
938 		 * HCI Command Parameters(0x12).
939 		 * So it is required to remove adv set for handle 0x00. since we use
940 		 * instance 0 for directed adv.
941 		 */
942 		hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(cp.handle), &cp.handle);
943 
944 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
945 
946 		if (own_addr_type == ADDR_LE_DEV_RANDOM &&
947 		    bacmp(&random_addr, BDADDR_ANY) &&
948 		    bacmp(&random_addr, &hdev->random_addr)) {
949 			struct hci_cp_le_set_adv_set_rand_addr cp;
950 
951 			memset(&cp, 0, sizeof(cp));
952 
953 			cp.handle = 0;
954 			bacpy(&cp.bdaddr, &random_addr);
955 
956 			hci_req_add(req,
957 				    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
958 				    sizeof(cp), &cp);
959 		}
960 
961 		__hci_req_enable_ext_advertising(req, 0x00);
962 	} else {
963 		struct hci_cp_le_set_adv_param cp;
964 
965 		/* Clear the HCI_LE_ADV bit temporarily so that the
966 		 * hci_update_random_address knows that it's safe to go ahead
967 		 * and write a new random address. The flag will be set back on
968 		 * as soon as the SET_ADV_ENABLE HCI command completes.
969 		 */
970 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
971 
972 		/* Set require_privacy to false so that the remote device has a
973 		 * chance of identifying us.
974 		 */
975 		if (hci_update_random_address(req, false, conn_use_rpa(conn),
976 					      &own_addr_type) < 0)
977 			return;
978 
979 		memset(&cp, 0, sizeof(cp));
980 
981 		/* Some controllers might reject command if intervals are not
982 		 * within range for undirected advertising.
983 		 * BCM20702A0 is known to be affected by this.
984 		 */
985 		cp.min_interval = cpu_to_le16(0x0020);
986 		cp.max_interval = cpu_to_le16(0x0020);
987 
988 		cp.type = LE_ADV_DIRECT_IND;
989 		cp.own_address_type = own_addr_type;
990 		cp.direct_addr_type = conn->dst_type;
991 		bacpy(&cp.direct_addr, &conn->dst);
992 		cp.channel_map = hdev->le_adv_channel_map;
993 
994 		hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
995 
996 		enable = 0x01;
997 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
998 			    &enable);
999 	}
1000 
1001 	conn->state = BT_CONNECT;
1002 }
1003 
1004 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1005 				u8 dst_type, u8 sec_level, u16 conn_timeout,
1006 				u8 role, bdaddr_t *direct_rpa)
1007 {
1008 	struct hci_conn_params *params;
1009 	struct hci_conn *conn;
1010 	struct smp_irk *irk;
1011 	struct hci_request req;
1012 	int err;
1013 
1014 	/* Let's make sure that le is enabled.*/
1015 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1016 		if (lmp_le_capable(hdev))
1017 			return ERR_PTR(-ECONNREFUSED);
1018 
1019 		return ERR_PTR(-EOPNOTSUPP);
1020 	}
1021 
1022 	/* Since the controller supports only one LE connection attempt at a
1023 	 * time, we return -EBUSY if there is any connection attempt running.
1024 	 */
1025 	if (hci_lookup_le_connect(hdev))
1026 		return ERR_PTR(-EBUSY);
1027 
1028 	/* If there's already a connection object but it's not in
1029 	 * scanning state it means it must already be established, in
1030 	 * which case we can't do anything else except report a failure
1031 	 * to connect.
1032 	 */
1033 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1034 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1035 		return ERR_PTR(-EBUSY);
1036 	}
1037 
1038 	/* When given an identity address with existing identity
1039 	 * resolving key, the connection needs to be established
1040 	 * to a resolvable random address.
1041 	 *
1042 	 * Storing the resolvable random address is required here
1043 	 * to handle connection failures. The address will later
1044 	 * be resolved back into the original identity address
1045 	 * from the connect request.
1046 	 */
1047 	irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1048 	if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1049 		dst = &irk->rpa;
1050 		dst_type = ADDR_LE_DEV_RANDOM;
1051 	}
1052 
1053 	if (conn) {
1054 		bacpy(&conn->dst, dst);
1055 	} else {
1056 		conn = hci_conn_add(hdev, LE_LINK, dst, role);
1057 		if (!conn)
1058 			return ERR_PTR(-ENOMEM);
1059 		hci_conn_hold(conn);
1060 		conn->pending_sec_level = sec_level;
1061 	}
1062 
1063 	conn->dst_type = dst_type;
1064 	conn->sec_level = BT_SECURITY_LOW;
1065 	conn->conn_timeout = conn_timeout;
1066 
1067 	hci_req_init(&req, hdev);
1068 
1069 	/* Disable advertising if we're active. For master role
1070 	 * connections most controllers will refuse to connect if
1071 	 * advertising is enabled, and for slave role connections we
1072 	 * anyway have to disable it in order to start directed
1073 	 * advertising.
1074 	 */
1075 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1076 		 __hci_req_disable_advertising(&req);
1077 
1078 	/* If requested to connect as slave use directed advertising */
1079 	if (conn->role == HCI_ROLE_SLAVE) {
1080 		/* If we're active scanning most controllers are unable
1081 		 * to initiate advertising. Simply reject the attempt.
1082 		 */
1083 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
1084 		    hdev->le_scan_type == LE_SCAN_ACTIVE) {
1085 			hci_req_purge(&req);
1086 			hci_conn_del(conn);
1087 			return ERR_PTR(-EBUSY);
1088 		}
1089 
1090 		hci_req_directed_advertising(&req, conn);
1091 		goto create_conn;
1092 	}
1093 
1094 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1095 	if (params) {
1096 		conn->le_conn_min_interval = params->conn_min_interval;
1097 		conn->le_conn_max_interval = params->conn_max_interval;
1098 		conn->le_conn_latency = params->conn_latency;
1099 		conn->le_supv_timeout = params->supervision_timeout;
1100 	} else {
1101 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
1102 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
1103 		conn->le_conn_latency = hdev->le_conn_latency;
1104 		conn->le_supv_timeout = hdev->le_supv_timeout;
1105 	}
1106 
1107 	/* If controller is scanning, we stop it since some controllers are
1108 	 * not able to scan and connect at the same time. Also set the
1109 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
1110 	 * handler for scan disabling knows to set the correct discovery
1111 	 * state.
1112 	 */
1113 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1114 		hci_req_add_le_scan_disable(&req);
1115 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
1116 	}
1117 
1118 	hci_req_add_le_create_conn(&req, conn, direct_rpa);
1119 
1120 create_conn:
1121 	err = hci_req_run(&req, create_le_conn_complete);
1122 	if (err) {
1123 		hci_conn_del(conn);
1124 		return ERR_PTR(err);
1125 	}
1126 
1127 	return conn;
1128 }
1129 
1130 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1131 {
1132 	struct hci_conn *conn;
1133 
1134 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1135 	if (!conn)
1136 		return false;
1137 
1138 	if (conn->state != BT_CONNECTED)
1139 		return false;
1140 
1141 	return true;
1142 }
1143 
1144 /* This function requires the caller holds hdev->lock */
1145 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1146 					bdaddr_t *addr, u8 addr_type)
1147 {
1148 	struct hci_conn_params *params;
1149 
1150 	if (is_connected(hdev, addr, addr_type))
1151 		return -EISCONN;
1152 
1153 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1154 	if (!params) {
1155 		params = hci_conn_params_add(hdev, addr, addr_type);
1156 		if (!params)
1157 			return -ENOMEM;
1158 
1159 		/* If we created new params, mark them to be deleted in
1160 		 * hci_connect_le_scan_cleanup. It's different case than
1161 		 * existing disabled params, those will stay after cleanup.
1162 		 */
1163 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1164 	}
1165 
1166 	/* We're trying to connect, so make sure params are at pend_le_conns */
1167 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1168 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1169 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1170 		list_del_init(&params->action);
1171 		list_add(&params->action, &hdev->pend_le_conns);
1172 	}
1173 
1174 	params->explicit_connect = true;
1175 
1176 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1177 	       params->auto_connect);
1178 
1179 	return 0;
1180 }
1181 
1182 /* This function requires the caller holds hdev->lock */
1183 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1184 				     u8 dst_type, u8 sec_level,
1185 				     u16 conn_timeout)
1186 {
1187 	struct hci_conn *conn;
1188 
1189 	/* Let's make sure that le is enabled.*/
1190 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1191 		if (lmp_le_capable(hdev))
1192 			return ERR_PTR(-ECONNREFUSED);
1193 
1194 		return ERR_PTR(-EOPNOTSUPP);
1195 	}
1196 
1197 	/* Some devices send ATT messages as soon as the physical link is
1198 	 * established. To be able to handle these ATT messages, the user-
1199 	 * space first establishes the connection and then starts the pairing
1200 	 * process.
1201 	 *
1202 	 * So if a hci_conn object already exists for the following connection
1203 	 * attempt, we simply update pending_sec_level and auth_type fields
1204 	 * and return the object found.
1205 	 */
1206 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1207 	if (conn) {
1208 		if (conn->pending_sec_level < sec_level)
1209 			conn->pending_sec_level = sec_level;
1210 		goto done;
1211 	}
1212 
1213 	BT_DBG("requesting refresh of dst_addr");
1214 
1215 	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1216 	if (!conn)
1217 		return ERR_PTR(-ENOMEM);
1218 
1219 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1220 		hci_conn_del(conn);
1221 		return ERR_PTR(-EBUSY);
1222 	}
1223 
1224 	conn->state = BT_CONNECT;
1225 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1226 	conn->dst_type = dst_type;
1227 	conn->sec_level = BT_SECURITY_LOW;
1228 	conn->pending_sec_level = sec_level;
1229 	conn->conn_timeout = conn_timeout;
1230 
1231 	hci_update_background_scan(hdev);
1232 
1233 done:
1234 	hci_conn_hold(conn);
1235 	return conn;
1236 }
1237 
1238 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1239 				 u8 sec_level, u8 auth_type)
1240 {
1241 	struct hci_conn *acl;
1242 
1243 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1244 		if (lmp_bredr_capable(hdev))
1245 			return ERR_PTR(-ECONNREFUSED);
1246 
1247 		return ERR_PTR(-EOPNOTSUPP);
1248 	}
1249 
1250 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1251 	if (!acl) {
1252 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1253 		if (!acl)
1254 			return ERR_PTR(-ENOMEM);
1255 	}
1256 
1257 	hci_conn_hold(acl);
1258 
1259 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1260 		acl->sec_level = BT_SECURITY_LOW;
1261 		acl->pending_sec_level = sec_level;
1262 		acl->auth_type = auth_type;
1263 		hci_acl_create_connection(acl);
1264 	}
1265 
1266 	return acl;
1267 }
1268 
1269 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1270 				 __u16 setting)
1271 {
1272 	struct hci_conn *acl;
1273 	struct hci_conn *sco;
1274 
1275 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
1276 	if (IS_ERR(acl))
1277 		return acl;
1278 
1279 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1280 	if (!sco) {
1281 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1282 		if (!sco) {
1283 			hci_conn_drop(acl);
1284 			return ERR_PTR(-ENOMEM);
1285 		}
1286 	}
1287 
1288 	acl->link = sco;
1289 	sco->link = acl;
1290 
1291 	hci_conn_hold(sco);
1292 
1293 	sco->setting = setting;
1294 
1295 	if (acl->state == BT_CONNECTED &&
1296 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1297 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1298 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1299 
1300 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1301 			/* defer SCO setup until mode change completed */
1302 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1303 			return sco;
1304 		}
1305 
1306 		hci_sco_setup(acl, 0x00);
1307 	}
1308 
1309 	return sco;
1310 }
1311 
1312 /* Check link security requirement */
1313 int hci_conn_check_link_mode(struct hci_conn *conn)
1314 {
1315 	BT_DBG("hcon %p", conn);
1316 
1317 	/* In Secure Connections Only mode, it is required that Secure
1318 	 * Connections is used and the link is encrypted with AES-CCM
1319 	 * using a P-256 authenticated combination key.
1320 	 */
1321 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1322 		if (!hci_conn_sc_enabled(conn) ||
1323 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1324 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1325 			return 0;
1326 	}
1327 
1328 	if (hci_conn_ssp_enabled(conn) &&
1329 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1330 		return 0;
1331 
1332 	return 1;
1333 }
1334 
1335 /* Authenticate remote device */
1336 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1337 {
1338 	BT_DBG("hcon %p", conn);
1339 
1340 	if (conn->pending_sec_level > sec_level)
1341 		sec_level = conn->pending_sec_level;
1342 
1343 	if (sec_level > conn->sec_level)
1344 		conn->pending_sec_level = sec_level;
1345 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1346 		return 1;
1347 
1348 	/* Make sure we preserve an existing MITM requirement*/
1349 	auth_type |= (conn->auth_type & 0x01);
1350 
1351 	conn->auth_type = auth_type;
1352 
1353 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1354 		struct hci_cp_auth_requested cp;
1355 
1356 		cp.handle = cpu_to_le16(conn->handle);
1357 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1358 			     sizeof(cp), &cp);
1359 
1360 		/* If we're already encrypted set the REAUTH_PEND flag,
1361 		 * otherwise set the ENCRYPT_PEND.
1362 		 */
1363 		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1364 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1365 		else
1366 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1367 	}
1368 
1369 	return 0;
1370 }
1371 
1372 /* Encrypt the the link */
1373 static void hci_conn_encrypt(struct hci_conn *conn)
1374 {
1375 	BT_DBG("hcon %p", conn);
1376 
1377 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1378 		struct hci_cp_set_conn_encrypt cp;
1379 		cp.handle  = cpu_to_le16(conn->handle);
1380 		cp.encrypt = 0x01;
1381 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1382 			     &cp);
1383 	}
1384 }
1385 
1386 /* Enable security */
1387 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1388 		      bool initiator)
1389 {
1390 	BT_DBG("hcon %p", conn);
1391 
1392 	if (conn->type == LE_LINK)
1393 		return smp_conn_security(conn, sec_level);
1394 
1395 	/* For sdp we don't need the link key. */
1396 	if (sec_level == BT_SECURITY_SDP)
1397 		return 1;
1398 
1399 	/* For non 2.1 devices and low security level we don't need the link
1400 	   key. */
1401 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1402 		return 1;
1403 
1404 	/* For other security levels we need the link key. */
1405 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1406 		goto auth;
1407 
1408 	/* An authenticated FIPS approved combination key has sufficient
1409 	 * security for security level 4. */
1410 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1411 	    sec_level == BT_SECURITY_FIPS)
1412 		goto encrypt;
1413 
1414 	/* An authenticated combination key has sufficient security for
1415 	   security level 3. */
1416 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1417 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1418 	    sec_level == BT_SECURITY_HIGH)
1419 		goto encrypt;
1420 
1421 	/* An unauthenticated combination key has sufficient security for
1422 	   security level 1 and 2. */
1423 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1424 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1425 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1426 		goto encrypt;
1427 
1428 	/* A combination key has always sufficient security for the security
1429 	   levels 1 or 2. High security level requires the combination key
1430 	   is generated using maximum PIN code length (16).
1431 	   For pre 2.1 units. */
1432 	if (conn->key_type == HCI_LK_COMBINATION &&
1433 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1434 	     conn->pin_length == 16))
1435 		goto encrypt;
1436 
1437 auth:
1438 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1439 		return 0;
1440 
1441 	if (initiator)
1442 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1443 
1444 	if (!hci_conn_auth(conn, sec_level, auth_type))
1445 		return 0;
1446 
1447 encrypt:
1448 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1449 		/* Ensure that the encryption key size has been read,
1450 		 * otherwise stall the upper layer responses.
1451 		 */
1452 		if (!conn->enc_key_size)
1453 			return 0;
1454 
1455 		/* Nothing else needed, all requirements are met */
1456 		return 1;
1457 	}
1458 
1459 	hci_conn_encrypt(conn);
1460 	return 0;
1461 }
1462 EXPORT_SYMBOL(hci_conn_security);
1463 
1464 /* Check secure link requirement */
1465 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1466 {
1467 	BT_DBG("hcon %p", conn);
1468 
1469 	/* Accept if non-secure or higher security level is required */
1470 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1471 		return 1;
1472 
1473 	/* Accept if secure or higher security level is already present */
1474 	if (conn->sec_level == BT_SECURITY_HIGH ||
1475 	    conn->sec_level == BT_SECURITY_FIPS)
1476 		return 1;
1477 
1478 	/* Reject not secure link */
1479 	return 0;
1480 }
1481 EXPORT_SYMBOL(hci_conn_check_secure);
1482 
1483 /* Switch role */
1484 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1485 {
1486 	BT_DBG("hcon %p", conn);
1487 
1488 	if (role == conn->role)
1489 		return 1;
1490 
1491 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1492 		struct hci_cp_switch_role cp;
1493 		bacpy(&cp.bdaddr, &conn->dst);
1494 		cp.role = role;
1495 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1496 	}
1497 
1498 	return 0;
1499 }
1500 EXPORT_SYMBOL(hci_conn_switch_role);
1501 
1502 /* Enter active mode */
1503 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1504 {
1505 	struct hci_dev *hdev = conn->hdev;
1506 
1507 	BT_DBG("hcon %p mode %d", conn, conn->mode);
1508 
1509 	if (conn->mode != HCI_CM_SNIFF)
1510 		goto timer;
1511 
1512 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1513 		goto timer;
1514 
1515 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1516 		struct hci_cp_exit_sniff_mode cp;
1517 		cp.handle = cpu_to_le16(conn->handle);
1518 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1519 	}
1520 
1521 timer:
1522 	if (hdev->idle_timeout > 0)
1523 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1524 				   msecs_to_jiffies(hdev->idle_timeout));
1525 }
1526 
1527 /* Drop all connection on the device */
1528 void hci_conn_hash_flush(struct hci_dev *hdev)
1529 {
1530 	struct hci_conn_hash *h = &hdev->conn_hash;
1531 	struct hci_conn *c, *n;
1532 
1533 	BT_DBG("hdev %s", hdev->name);
1534 
1535 	list_for_each_entry_safe(c, n, &h->list, list) {
1536 		c->state = BT_CLOSED;
1537 
1538 		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1539 		hci_conn_del(c);
1540 	}
1541 }
1542 
1543 /* Check pending connect attempts */
1544 void hci_conn_check_pending(struct hci_dev *hdev)
1545 {
1546 	struct hci_conn *conn;
1547 
1548 	BT_DBG("hdev %s", hdev->name);
1549 
1550 	hci_dev_lock(hdev);
1551 
1552 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1553 	if (conn)
1554 		hci_acl_create_connection(conn);
1555 
1556 	hci_dev_unlock(hdev);
1557 }
1558 
1559 static u32 get_link_mode(struct hci_conn *conn)
1560 {
1561 	u32 link_mode = 0;
1562 
1563 	if (conn->role == HCI_ROLE_MASTER)
1564 		link_mode |= HCI_LM_MASTER;
1565 
1566 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1567 		link_mode |= HCI_LM_ENCRYPT;
1568 
1569 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
1570 		link_mode |= HCI_LM_AUTH;
1571 
1572 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
1573 		link_mode |= HCI_LM_SECURE;
1574 
1575 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
1576 		link_mode |= HCI_LM_FIPS;
1577 
1578 	return link_mode;
1579 }
1580 
1581 int hci_get_conn_list(void __user *arg)
1582 {
1583 	struct hci_conn *c;
1584 	struct hci_conn_list_req req, *cl;
1585 	struct hci_conn_info *ci;
1586 	struct hci_dev *hdev;
1587 	int n = 0, size, err;
1588 
1589 	if (copy_from_user(&req, arg, sizeof(req)))
1590 		return -EFAULT;
1591 
1592 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1593 		return -EINVAL;
1594 
1595 	size = sizeof(req) + req.conn_num * sizeof(*ci);
1596 
1597 	cl = kmalloc(size, GFP_KERNEL);
1598 	if (!cl)
1599 		return -ENOMEM;
1600 
1601 	hdev = hci_dev_get(req.dev_id);
1602 	if (!hdev) {
1603 		kfree(cl);
1604 		return -ENODEV;
1605 	}
1606 
1607 	ci = cl->conn_info;
1608 
1609 	hci_dev_lock(hdev);
1610 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1611 		bacpy(&(ci + n)->bdaddr, &c->dst);
1612 		(ci + n)->handle = c->handle;
1613 		(ci + n)->type  = c->type;
1614 		(ci + n)->out   = c->out;
1615 		(ci + n)->state = c->state;
1616 		(ci + n)->link_mode = get_link_mode(c);
1617 		if (++n >= req.conn_num)
1618 			break;
1619 	}
1620 	hci_dev_unlock(hdev);
1621 
1622 	cl->dev_id = hdev->id;
1623 	cl->conn_num = n;
1624 	size = sizeof(req) + n * sizeof(*ci);
1625 
1626 	hci_dev_put(hdev);
1627 
1628 	err = copy_to_user(arg, cl, size);
1629 	kfree(cl);
1630 
1631 	return err ? -EFAULT : 0;
1632 }
1633 
1634 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1635 {
1636 	struct hci_conn_info_req req;
1637 	struct hci_conn_info ci;
1638 	struct hci_conn *conn;
1639 	char __user *ptr = arg + sizeof(req);
1640 
1641 	if (copy_from_user(&req, arg, sizeof(req)))
1642 		return -EFAULT;
1643 
1644 	hci_dev_lock(hdev);
1645 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1646 	if (conn) {
1647 		bacpy(&ci.bdaddr, &conn->dst);
1648 		ci.handle = conn->handle;
1649 		ci.type  = conn->type;
1650 		ci.out   = conn->out;
1651 		ci.state = conn->state;
1652 		ci.link_mode = get_link_mode(conn);
1653 	}
1654 	hci_dev_unlock(hdev);
1655 
1656 	if (!conn)
1657 		return -ENOENT;
1658 
1659 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1660 }
1661 
1662 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1663 {
1664 	struct hci_auth_info_req req;
1665 	struct hci_conn *conn;
1666 
1667 	if (copy_from_user(&req, arg, sizeof(req)))
1668 		return -EFAULT;
1669 
1670 	hci_dev_lock(hdev);
1671 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1672 	if (conn)
1673 		req.type = conn->auth_type;
1674 	hci_dev_unlock(hdev);
1675 
1676 	if (!conn)
1677 		return -ENOENT;
1678 
1679 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1680 }
1681 
1682 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1683 {
1684 	struct hci_dev *hdev = conn->hdev;
1685 	struct hci_chan *chan;
1686 
1687 	BT_DBG("%s hcon %p", hdev->name, conn);
1688 
1689 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1690 		BT_DBG("Refusing to create new hci_chan");
1691 		return NULL;
1692 	}
1693 
1694 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1695 	if (!chan)
1696 		return NULL;
1697 
1698 	chan->conn = hci_conn_get(conn);
1699 	skb_queue_head_init(&chan->data_q);
1700 	chan->state = BT_CONNECTED;
1701 
1702 	list_add_rcu(&chan->list, &conn->chan_list);
1703 
1704 	return chan;
1705 }
1706 
1707 void hci_chan_del(struct hci_chan *chan)
1708 {
1709 	struct hci_conn *conn = chan->conn;
1710 	struct hci_dev *hdev = conn->hdev;
1711 
1712 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1713 
1714 	list_del_rcu(&chan->list);
1715 
1716 	synchronize_rcu();
1717 
1718 	/* Prevent new hci_chan's to be created for this hci_conn */
1719 	set_bit(HCI_CONN_DROP, &conn->flags);
1720 
1721 	hci_conn_put(conn);
1722 
1723 	skb_queue_purge(&chan->data_q);
1724 	kfree(chan);
1725 }
1726 
1727 void hci_chan_list_flush(struct hci_conn *conn)
1728 {
1729 	struct hci_chan *chan, *n;
1730 
1731 	BT_DBG("hcon %p", conn);
1732 
1733 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1734 		hci_chan_del(chan);
1735 }
1736 
1737 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1738 						 __u16 handle)
1739 {
1740 	struct hci_chan *hchan;
1741 
1742 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1743 		if (hchan->handle == handle)
1744 			return hchan;
1745 	}
1746 
1747 	return NULL;
1748 }
1749 
1750 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1751 {
1752 	struct hci_conn_hash *h = &hdev->conn_hash;
1753 	struct hci_conn *hcon;
1754 	struct hci_chan *hchan = NULL;
1755 
1756 	rcu_read_lock();
1757 
1758 	list_for_each_entry_rcu(hcon, &h->list, list) {
1759 		hchan = __hci_chan_lookup_handle(hcon, handle);
1760 		if (hchan)
1761 			break;
1762 	}
1763 
1764 	rcu_read_unlock();
1765 
1766 	return hchan;
1767 }
1768 
1769 u32 hci_conn_get_phy(struct hci_conn *conn)
1770 {
1771 	u32 phys = 0;
1772 
1773 	hci_dev_lock(conn->hdev);
1774 
1775 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
1776 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
1777 	 * CSB logical transport types.
1778 	 */
1779 	switch (conn->type) {
1780 	case SCO_LINK:
1781 		/* SCO logical transport (1 Mb/s):
1782 		 * HV1, HV2, HV3 and DV.
1783 		 */
1784 		phys |= BT_PHY_BR_1M_1SLOT;
1785 
1786 		break;
1787 
1788 	case ACL_LINK:
1789 		/* ACL logical transport (1 Mb/s) ptt=0:
1790 		 * DH1, DM3, DH3, DM5 and DH5.
1791 		 */
1792 		phys |= BT_PHY_BR_1M_1SLOT;
1793 
1794 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
1795 			phys |= BT_PHY_BR_1M_3SLOT;
1796 
1797 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
1798 			phys |= BT_PHY_BR_1M_5SLOT;
1799 
1800 		/* ACL logical transport (2 Mb/s) ptt=1:
1801 		 * 2-DH1, 2-DH3 and 2-DH5.
1802 		 */
1803 		if (!(conn->pkt_type & HCI_2DH1))
1804 			phys |= BT_PHY_EDR_2M_1SLOT;
1805 
1806 		if (!(conn->pkt_type & HCI_2DH3))
1807 			phys |= BT_PHY_EDR_2M_3SLOT;
1808 
1809 		if (!(conn->pkt_type & HCI_2DH5))
1810 			phys |= BT_PHY_EDR_2M_5SLOT;
1811 
1812 		/* ACL logical transport (3 Mb/s) ptt=1:
1813 		 * 3-DH1, 3-DH3 and 3-DH5.
1814 		 */
1815 		if (!(conn->pkt_type & HCI_3DH1))
1816 			phys |= BT_PHY_EDR_3M_1SLOT;
1817 
1818 		if (!(conn->pkt_type & HCI_3DH3))
1819 			phys |= BT_PHY_EDR_3M_3SLOT;
1820 
1821 		if (!(conn->pkt_type & HCI_3DH5))
1822 			phys |= BT_PHY_EDR_3M_5SLOT;
1823 
1824 		break;
1825 
1826 	case ESCO_LINK:
1827 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
1828 		phys |= BT_PHY_BR_1M_1SLOT;
1829 
1830 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
1831 			phys |= BT_PHY_BR_1M_3SLOT;
1832 
1833 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
1834 		if (!(conn->pkt_type & ESCO_2EV3))
1835 			phys |= BT_PHY_EDR_2M_1SLOT;
1836 
1837 		if (!(conn->pkt_type & ESCO_2EV5))
1838 			phys |= BT_PHY_EDR_2M_3SLOT;
1839 
1840 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
1841 		if (!(conn->pkt_type & ESCO_3EV3))
1842 			phys |= BT_PHY_EDR_3M_1SLOT;
1843 
1844 		if (!(conn->pkt_type & ESCO_3EV5))
1845 			phys |= BT_PHY_EDR_3M_3SLOT;
1846 
1847 		break;
1848 
1849 	case LE_LINK:
1850 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
1851 			phys |= BT_PHY_LE_1M_TX;
1852 
1853 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
1854 			phys |= BT_PHY_LE_1M_RX;
1855 
1856 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
1857 			phys |= BT_PHY_LE_2M_TX;
1858 
1859 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
1860 			phys |= BT_PHY_LE_2M_RX;
1861 
1862 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
1863 			phys |= BT_PHY_LE_CODED_TX;
1864 
1865 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
1866 			phys |= BT_PHY_LE_CODED_RX;
1867 
1868 		break;
1869 	}
1870 
1871 	hci_dev_unlock(conn->hdev);
1872 
1873 	return phys;
1874 }
1875