xref: /linux/net/bluetooth/hci_conn.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023-2024 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "smp.h"
38 #include "eir.h"
39 
40 struct sco_param {
41 	u16 pkt_type;
42 	u16 max_latency;
43 	u8  retrans_effort;
44 };
45 
46 struct conn_handle_t {
47 	struct hci_conn *conn;
48 	__u16 handle;
49 };
50 
51 static const struct sco_param esco_param_cvsd[] = {
52 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
53 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
54 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
55 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
56 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
57 };
58 
59 static const struct sco_param sco_param_cvsd[] = {
60 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
61 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
62 };
63 
64 static const struct sco_param esco_param_msbc[] = {
65 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
66 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
67 };
68 
69 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan_cleanup(struct hci_conn * conn,u8 status)70 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
71 {
72 	struct hci_conn_params *params;
73 	struct hci_dev *hdev = conn->hdev;
74 	struct smp_irk *irk;
75 	bdaddr_t *bdaddr;
76 	u8 bdaddr_type;
77 
78 	bdaddr = &conn->dst;
79 	bdaddr_type = conn->dst_type;
80 
81 	/* Check if we need to convert to identity address */
82 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
83 	if (irk) {
84 		bdaddr = &irk->bdaddr;
85 		bdaddr_type = irk->addr_type;
86 	}
87 
88 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
89 					   bdaddr_type);
90 	if (!params)
91 		return;
92 
93 	if (params->conn) {
94 		hci_conn_drop(params->conn);
95 		hci_conn_put(params->conn);
96 		params->conn = NULL;
97 	}
98 
99 	if (!params->explicit_connect)
100 		return;
101 
102 	/* If the status indicates successful cancellation of
103 	 * the attempt (i.e. Unknown Connection Id) there's no point of
104 	 * notifying failure since we'll go back to keep trying to
105 	 * connect. The only exception is explicit connect requests
106 	 * where a timeout + cancel does indicate an actual failure.
107 	 */
108 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
109 		mgmt_connect_failed(hdev, conn, status);
110 
111 	/* The connection attempt was doing scan for new RPA, and is
112 	 * in scan phase. If params are not associated with any other
113 	 * autoconnect action, remove them completely. If they are, just unmark
114 	 * them as waiting for connection, by clearing explicit_connect field.
115 	 */
116 	params->explicit_connect = false;
117 
118 	hci_pend_le_list_del_init(params);
119 
120 	switch (params->auto_connect) {
121 	case HCI_AUTO_CONN_EXPLICIT:
122 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
123 		/* return instead of break to avoid duplicate scan update */
124 		return;
125 	case HCI_AUTO_CONN_DIRECT:
126 	case HCI_AUTO_CONN_ALWAYS:
127 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
128 		break;
129 	case HCI_AUTO_CONN_REPORT:
130 		hci_pend_le_list_add(params, &hdev->pend_le_reports);
131 		break;
132 	default:
133 		break;
134 	}
135 
136 	hci_update_passive_scan(hdev);
137 }
138 
hci_conn_cleanup(struct hci_conn * conn)139 static void hci_conn_cleanup(struct hci_conn *conn)
140 {
141 	struct hci_dev *hdev = conn->hdev;
142 
143 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
144 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
145 
146 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
147 		hci_remove_link_key(hdev, &conn->dst);
148 
149 	hci_chan_list_flush(conn);
150 
151 	hci_conn_hash_del(hdev, conn);
152 
153 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
154 		ida_free(&hdev->unset_handle_ida, conn->handle);
155 
156 	if (conn->cleanup)
157 		conn->cleanup(conn);
158 
159 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
160 		switch (conn->setting & SCO_AIRMODE_MASK) {
161 		case SCO_AIRMODE_CVSD:
162 		case SCO_AIRMODE_TRANSP:
163 			if (hdev->notify)
164 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
165 			break;
166 		}
167 	} else {
168 		if (hdev->notify)
169 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
170 	}
171 
172 	debugfs_remove_recursive(conn->debugfs);
173 
174 	hci_conn_del_sysfs(conn);
175 
176 	hci_dev_put(hdev);
177 }
178 
hci_disconnect(struct hci_conn * conn,__u8 reason)179 int hci_disconnect(struct hci_conn *conn, __u8 reason)
180 {
181 	BT_DBG("hcon %p", conn);
182 
183 	/* When we are central of an established connection and it enters
184 	 * the disconnect timeout, then go ahead and try to read the
185 	 * current clock offset.  Processing of the result is done
186 	 * within the event handling and hci_clock_offset_evt function.
187 	 */
188 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
189 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
190 		struct hci_dev *hdev = conn->hdev;
191 		struct hci_cp_read_clock_offset clkoff_cp;
192 
193 		clkoff_cp.handle = cpu_to_le16(conn->handle);
194 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
195 			     &clkoff_cp);
196 	}
197 
198 	return hci_abort_conn(conn, reason);
199 }
200 
hci_add_sco(struct hci_conn * conn,__u16 handle)201 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
202 {
203 	struct hci_dev *hdev = conn->hdev;
204 	struct hci_cp_add_sco cp;
205 
206 	BT_DBG("hcon %p", conn);
207 
208 	conn->state = BT_CONNECT;
209 	conn->out = true;
210 
211 	conn->attempt++;
212 
213 	cp.handle   = cpu_to_le16(handle);
214 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
215 
216 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
217 }
218 
find_next_esco_param(struct hci_conn * conn,const struct sco_param * esco_param,int size)219 static bool find_next_esco_param(struct hci_conn *conn,
220 				 const struct sco_param *esco_param, int size)
221 {
222 	if (!conn->parent)
223 		return false;
224 
225 	for (; conn->attempt <= size; conn->attempt++) {
226 		if (lmp_esco_2m_capable(conn->parent) ||
227 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
228 			break;
229 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
230 		       conn, conn->attempt);
231 	}
232 
233 	return conn->attempt <= size;
234 }
235 
configure_datapath_sync(struct hci_dev * hdev,struct bt_codec * codec)236 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
237 {
238 	int err;
239 	__u8 vnd_len, *vnd_data = NULL;
240 	struct hci_op_configure_data_path *cmd = NULL;
241 
242 	/* Do not take below 2 checks as error since the 1st means user do not
243 	 * want to use HFP offload mode and the 2nd means the vendor controller
244 	 * do not need to send below HCI command for offload mode.
245 	 */
246 	if (!codec->data_path || !hdev->get_codec_config_data)
247 		return 0;
248 
249 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
250 					  &vnd_data);
251 	if (err < 0)
252 		goto error;
253 
254 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
255 	if (!cmd) {
256 		err = -ENOMEM;
257 		goto error;
258 	}
259 
260 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
261 	if (err < 0)
262 		goto error;
263 
264 	cmd->vnd_len = vnd_len;
265 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
266 
267 	cmd->direction = 0x00;
268 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
269 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
270 
271 	cmd->direction = 0x01;
272 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
273 				    sizeof(*cmd) + vnd_len, cmd,
274 				    HCI_CMD_TIMEOUT);
275 error:
276 
277 	kfree(cmd);
278 	kfree(vnd_data);
279 	return err;
280 }
281 
hci_enhanced_setup_sync(struct hci_dev * hdev,void * data)282 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
283 {
284 	struct conn_handle_t *conn_handle = data;
285 	struct hci_conn *conn = conn_handle->conn;
286 	__u16 handle = conn_handle->handle;
287 	struct hci_cp_enhanced_setup_sync_conn cp;
288 	const struct sco_param *param;
289 
290 	kfree(conn_handle);
291 
292 	bt_dev_dbg(hdev, "hcon %p", conn);
293 
294 	configure_datapath_sync(hdev, &conn->codec);
295 
296 	conn->state = BT_CONNECT;
297 	conn->out = true;
298 
299 	conn->attempt++;
300 
301 	memset(&cp, 0x00, sizeof(cp));
302 
303 	cp.handle   = cpu_to_le16(handle);
304 
305 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
306 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
307 
308 	switch (conn->codec.id) {
309 	case BT_CODEC_MSBC:
310 		if (!find_next_esco_param(conn, esco_param_msbc,
311 					  ARRAY_SIZE(esco_param_msbc)))
312 			return -EINVAL;
313 
314 		param = &esco_param_msbc[conn->attempt - 1];
315 		cp.tx_coding_format.id = 0x05;
316 		cp.rx_coding_format.id = 0x05;
317 		cp.tx_codec_frame_size = __cpu_to_le16(60);
318 		cp.rx_codec_frame_size = __cpu_to_le16(60);
319 		cp.in_bandwidth = __cpu_to_le32(32000);
320 		cp.out_bandwidth = __cpu_to_le32(32000);
321 		cp.in_coding_format.id = 0x04;
322 		cp.out_coding_format.id = 0x04;
323 		cp.in_coded_data_size = __cpu_to_le16(16);
324 		cp.out_coded_data_size = __cpu_to_le16(16);
325 		cp.in_pcm_data_format = 2;
326 		cp.out_pcm_data_format = 2;
327 		cp.in_pcm_sample_payload_msb_pos = 0;
328 		cp.out_pcm_sample_payload_msb_pos = 0;
329 		cp.in_data_path = conn->codec.data_path;
330 		cp.out_data_path = conn->codec.data_path;
331 		cp.in_transport_unit_size = 1;
332 		cp.out_transport_unit_size = 1;
333 		break;
334 
335 	case BT_CODEC_TRANSPARENT:
336 		if (!find_next_esco_param(conn, esco_param_msbc,
337 					  ARRAY_SIZE(esco_param_msbc)))
338 			return false;
339 		param = &esco_param_msbc[conn->attempt - 1];
340 		cp.tx_coding_format.id = 0x03;
341 		cp.rx_coding_format.id = 0x03;
342 		cp.tx_codec_frame_size = __cpu_to_le16(60);
343 		cp.rx_codec_frame_size = __cpu_to_le16(60);
344 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
345 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
346 		cp.in_coding_format.id = 0x03;
347 		cp.out_coding_format.id = 0x03;
348 		cp.in_coded_data_size = __cpu_to_le16(16);
349 		cp.out_coded_data_size = __cpu_to_le16(16);
350 		cp.in_pcm_data_format = 2;
351 		cp.out_pcm_data_format = 2;
352 		cp.in_pcm_sample_payload_msb_pos = 0;
353 		cp.out_pcm_sample_payload_msb_pos = 0;
354 		cp.in_data_path = conn->codec.data_path;
355 		cp.out_data_path = conn->codec.data_path;
356 		cp.in_transport_unit_size = 1;
357 		cp.out_transport_unit_size = 1;
358 		break;
359 
360 	case BT_CODEC_CVSD:
361 		if (conn->parent && lmp_esco_capable(conn->parent)) {
362 			if (!find_next_esco_param(conn, esco_param_cvsd,
363 						  ARRAY_SIZE(esco_param_cvsd)))
364 				return -EINVAL;
365 			param = &esco_param_cvsd[conn->attempt - 1];
366 		} else {
367 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
368 				return -EINVAL;
369 			param = &sco_param_cvsd[conn->attempt - 1];
370 		}
371 		cp.tx_coding_format.id = 2;
372 		cp.rx_coding_format.id = 2;
373 		cp.tx_codec_frame_size = __cpu_to_le16(60);
374 		cp.rx_codec_frame_size = __cpu_to_le16(60);
375 		cp.in_bandwidth = __cpu_to_le32(16000);
376 		cp.out_bandwidth = __cpu_to_le32(16000);
377 		cp.in_coding_format.id = 4;
378 		cp.out_coding_format.id = 4;
379 		cp.in_coded_data_size = __cpu_to_le16(16);
380 		cp.out_coded_data_size = __cpu_to_le16(16);
381 		cp.in_pcm_data_format = 2;
382 		cp.out_pcm_data_format = 2;
383 		cp.in_pcm_sample_payload_msb_pos = 0;
384 		cp.out_pcm_sample_payload_msb_pos = 0;
385 		cp.in_data_path = conn->codec.data_path;
386 		cp.out_data_path = conn->codec.data_path;
387 		cp.in_transport_unit_size = 16;
388 		cp.out_transport_unit_size = 16;
389 		break;
390 	default:
391 		return -EINVAL;
392 	}
393 
394 	cp.retrans_effort = param->retrans_effort;
395 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
396 	cp.max_latency = __cpu_to_le16(param->max_latency);
397 
398 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
399 		return -EIO;
400 
401 	return 0;
402 }
403 
hci_setup_sync_conn(struct hci_conn * conn,__u16 handle)404 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
405 {
406 	struct hci_dev *hdev = conn->hdev;
407 	struct hci_cp_setup_sync_conn cp;
408 	const struct sco_param *param;
409 
410 	bt_dev_dbg(hdev, "hcon %p", conn);
411 
412 	conn->state = BT_CONNECT;
413 	conn->out = true;
414 
415 	conn->attempt++;
416 
417 	cp.handle   = cpu_to_le16(handle);
418 
419 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
420 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
421 	cp.voice_setting  = cpu_to_le16(conn->setting);
422 
423 	switch (conn->setting & SCO_AIRMODE_MASK) {
424 	case SCO_AIRMODE_TRANSP:
425 		if (!find_next_esco_param(conn, esco_param_msbc,
426 					  ARRAY_SIZE(esco_param_msbc)))
427 			return false;
428 		param = &esco_param_msbc[conn->attempt - 1];
429 		break;
430 	case SCO_AIRMODE_CVSD:
431 		if (conn->parent && lmp_esco_capable(conn->parent)) {
432 			if (!find_next_esco_param(conn, esco_param_cvsd,
433 						  ARRAY_SIZE(esco_param_cvsd)))
434 				return false;
435 			param = &esco_param_cvsd[conn->attempt - 1];
436 		} else {
437 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
438 				return false;
439 			param = &sco_param_cvsd[conn->attempt - 1];
440 		}
441 		break;
442 	default:
443 		return false;
444 	}
445 
446 	cp.retrans_effort = param->retrans_effort;
447 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
448 	cp.max_latency = __cpu_to_le16(param->max_latency);
449 
450 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
451 		return false;
452 
453 	return true;
454 }
455 
hci_setup_sync(struct hci_conn * conn,__u16 handle)456 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
457 {
458 	int result;
459 	struct conn_handle_t *conn_handle;
460 
461 	if (enhanced_sync_conn_capable(conn->hdev)) {
462 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
463 
464 		if (!conn_handle)
465 			return false;
466 
467 		conn_handle->conn = conn;
468 		conn_handle->handle = handle;
469 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
470 					    conn_handle, NULL);
471 		if (result < 0)
472 			kfree(conn_handle);
473 
474 		return result == 0;
475 	}
476 
477 	return hci_setup_sync_conn(conn, handle);
478 }
479 
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)480 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
481 		      u16 to_multiplier)
482 {
483 	struct hci_dev *hdev = conn->hdev;
484 	struct hci_conn_params *params;
485 	struct hci_cp_le_conn_update cp;
486 
487 	hci_dev_lock(hdev);
488 
489 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
490 	if (params) {
491 		params->conn_min_interval = min;
492 		params->conn_max_interval = max;
493 		params->conn_latency = latency;
494 		params->supervision_timeout = to_multiplier;
495 	}
496 
497 	hci_dev_unlock(hdev);
498 
499 	memset(&cp, 0, sizeof(cp));
500 	cp.handle		= cpu_to_le16(conn->handle);
501 	cp.conn_interval_min	= cpu_to_le16(min);
502 	cp.conn_interval_max	= cpu_to_le16(max);
503 	cp.conn_latency		= cpu_to_le16(latency);
504 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
505 	cp.min_ce_len		= cpu_to_le16(0x0000);
506 	cp.max_ce_len		= cpu_to_le16(0x0000);
507 
508 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
509 
510 	if (params)
511 		return 0x01;
512 
513 	return 0x00;
514 }
515 
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__le64 rand,__u8 ltk[16],__u8 key_size)516 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
517 		      __u8 ltk[16], __u8 key_size)
518 {
519 	struct hci_dev *hdev = conn->hdev;
520 	struct hci_cp_le_start_enc cp;
521 
522 	BT_DBG("hcon %p", conn);
523 
524 	memset(&cp, 0, sizeof(cp));
525 
526 	cp.handle = cpu_to_le16(conn->handle);
527 	cp.rand = rand;
528 	cp.ediv = ediv;
529 	memcpy(cp.ltk, ltk, key_size);
530 
531 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
532 }
533 
534 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)535 void hci_sco_setup(struct hci_conn *conn, __u8 status)
536 {
537 	struct hci_link *link;
538 
539 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
540 	if (!link || !link->conn)
541 		return;
542 
543 	BT_DBG("hcon %p", conn);
544 
545 	if (!status) {
546 		if (lmp_esco_capable(conn->hdev))
547 			hci_setup_sync(link->conn, conn->handle);
548 		else
549 			hci_add_sco(link->conn, conn->handle);
550 	} else {
551 		hci_connect_cfm(link->conn, status);
552 		hci_conn_del(link->conn);
553 	}
554 }
555 
hci_conn_timeout(struct work_struct * work)556 static void hci_conn_timeout(struct work_struct *work)
557 {
558 	struct hci_conn *conn = container_of(work, struct hci_conn,
559 					     disc_work.work);
560 	int refcnt = atomic_read(&conn->refcnt);
561 
562 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
563 
564 	WARN_ON(refcnt < 0);
565 
566 	/* FIXME: It was observed that in pairing failed scenario, refcnt
567 	 * drops below 0. Probably this is because l2cap_conn_del calls
568 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
569 	 * dropped. After that loop hci_chan_del is called which also drops
570 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
571 	 * otherwise drop it.
572 	 */
573 	if (refcnt > 0)
574 		return;
575 
576 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
577 }
578 
579 /* Enter sniff mode */
hci_conn_idle(struct work_struct * work)580 static void hci_conn_idle(struct work_struct *work)
581 {
582 	struct hci_conn *conn = container_of(work, struct hci_conn,
583 					     idle_work.work);
584 	struct hci_dev *hdev = conn->hdev;
585 
586 	BT_DBG("hcon %p mode %d", conn, conn->mode);
587 
588 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
589 		return;
590 
591 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
592 		return;
593 
594 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
595 		struct hci_cp_sniff_subrate cp;
596 		cp.handle             = cpu_to_le16(conn->handle);
597 		cp.max_latency        = cpu_to_le16(0);
598 		cp.min_remote_timeout = cpu_to_le16(0);
599 		cp.min_local_timeout  = cpu_to_le16(0);
600 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
601 	}
602 
603 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
604 		struct hci_cp_sniff_mode cp;
605 		cp.handle       = cpu_to_le16(conn->handle);
606 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
607 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
608 		cp.attempt      = cpu_to_le16(4);
609 		cp.timeout      = cpu_to_le16(1);
610 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
611 	}
612 }
613 
hci_conn_auto_accept(struct work_struct * work)614 static void hci_conn_auto_accept(struct work_struct *work)
615 {
616 	struct hci_conn *conn = container_of(work, struct hci_conn,
617 					     auto_accept_work.work);
618 
619 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
620 		     &conn->dst);
621 }
622 
le_disable_advertising(struct hci_dev * hdev)623 static void le_disable_advertising(struct hci_dev *hdev)
624 {
625 	if (ext_adv_capable(hdev)) {
626 		struct hci_cp_le_set_ext_adv_enable cp;
627 
628 		cp.enable = 0x00;
629 		cp.num_of_sets = 0x00;
630 
631 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
632 			     &cp);
633 	} else {
634 		u8 enable = 0x00;
635 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
636 			     &enable);
637 	}
638 }
639 
le_conn_timeout(struct work_struct * work)640 static void le_conn_timeout(struct work_struct *work)
641 {
642 	struct hci_conn *conn = container_of(work, struct hci_conn,
643 					     le_conn_timeout.work);
644 	struct hci_dev *hdev = conn->hdev;
645 
646 	BT_DBG("");
647 
648 	/* We could end up here due to having done directed advertising,
649 	 * so clean up the state if necessary. This should however only
650 	 * happen with broken hardware or if low duty cycle was used
651 	 * (which doesn't have a timeout of its own).
652 	 */
653 	if (conn->role == HCI_ROLE_SLAVE) {
654 		/* Disable LE Advertising */
655 		le_disable_advertising(hdev);
656 		hci_dev_lock(hdev);
657 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
658 		hci_dev_unlock(hdev);
659 		return;
660 	}
661 
662 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
663 }
664 
665 struct iso_list_data {
666 	union {
667 		u8  cig;
668 		u8  big;
669 	};
670 	union {
671 		u8  cis;
672 		u8  bis;
673 		u16 sync_handle;
674 	};
675 	int count;
676 	bool big_term;
677 	bool pa_sync_term;
678 	bool big_sync_term;
679 };
680 
bis_list(struct hci_conn * conn,void * data)681 static void bis_list(struct hci_conn *conn, void *data)
682 {
683 	struct iso_list_data *d = data;
684 
685 	/* Skip if not broadcast/ANY address */
686 	if (bacmp(&conn->dst, BDADDR_ANY))
687 		return;
688 
689 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
690 	    d->bis != conn->iso_qos.bcast.bis)
691 		return;
692 
693 	d->count++;
694 }
695 
terminate_big_sync(struct hci_dev * hdev,void * data)696 static int terminate_big_sync(struct hci_dev *hdev, void *data)
697 {
698 	struct iso_list_data *d = data;
699 
700 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
701 
702 	hci_disable_per_advertising_sync(hdev, d->bis);
703 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
704 
705 	/* Only terminate BIG if it has been created */
706 	if (!d->big_term)
707 		return 0;
708 
709 	return hci_le_terminate_big_sync(hdev, d->big,
710 					 HCI_ERROR_LOCAL_HOST_TERM);
711 }
712 
terminate_big_destroy(struct hci_dev * hdev,void * data,int err)713 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
714 {
715 	kfree(data);
716 }
717 
hci_le_terminate_big(struct hci_dev * hdev,struct hci_conn * conn)718 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
719 {
720 	struct iso_list_data *d;
721 	int ret;
722 
723 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
724 		   conn->iso_qos.bcast.bis);
725 
726 	d = kzalloc(sizeof(*d), GFP_KERNEL);
727 	if (!d)
728 		return -ENOMEM;
729 
730 	d->big = conn->iso_qos.bcast.big;
731 	d->bis = conn->iso_qos.bcast.bis;
732 	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
733 
734 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
735 				 terminate_big_destroy);
736 	if (ret)
737 		kfree(d);
738 
739 	return ret;
740 }
741 
big_terminate_sync(struct hci_dev * hdev,void * data)742 static int big_terminate_sync(struct hci_dev *hdev, void *data)
743 {
744 	struct iso_list_data *d = data;
745 
746 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
747 		   d->sync_handle);
748 
749 	if (d->big_sync_term)
750 		hci_le_big_terminate_sync(hdev, d->big);
751 
752 	if (d->pa_sync_term)
753 		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
754 
755 	return 0;
756 }
757 
find_bis(struct hci_conn * conn,void * data)758 static void find_bis(struct hci_conn *conn, void *data)
759 {
760 	struct iso_list_data *d = data;
761 
762 	/* Ignore if BIG doesn't match */
763 	if (d->big != conn->iso_qos.bcast.big)
764 		return;
765 
766 	d->count++;
767 }
768 
hci_le_big_terminate(struct hci_dev * hdev,u8 big,struct hci_conn * conn)769 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
770 {
771 	struct iso_list_data *d;
772 	int ret;
773 
774 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
775 
776 	d = kzalloc(sizeof(*d), GFP_KERNEL);
777 	if (!d)
778 		return -ENOMEM;
779 
780 	d->big = big;
781 	d->sync_handle = conn->sync_handle;
782 
783 	if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
784 		hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
785 					HCI_CONN_PA_SYNC, d);
786 
787 		if (!d->count)
788 			d->pa_sync_term = true;
789 
790 		d->count = 0;
791 	}
792 
793 	if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
794 		hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
795 					HCI_CONN_BIG_SYNC, d);
796 
797 		if (!d->count)
798 			d->big_sync_term = true;
799 	}
800 
801 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
802 				 terminate_big_destroy);
803 	if (ret)
804 		kfree(d);
805 
806 	return ret;
807 }
808 
809 /* Cleanup BIS connection
810  *
811  * Detects if there any BIS left connected in a BIG
812  * broadcaster: Remove advertising instance and terminate BIG.
813  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
814  */
bis_cleanup(struct hci_conn * conn)815 static void bis_cleanup(struct hci_conn *conn)
816 {
817 	struct hci_dev *hdev = conn->hdev;
818 	struct hci_conn *bis;
819 
820 	bt_dev_dbg(hdev, "conn %p", conn);
821 
822 	if (conn->role == HCI_ROLE_MASTER) {
823 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
824 			return;
825 
826 		/* Check if ISO connection is a BIS and terminate advertising
827 		 * set and BIG if there are no other connections using it.
828 		 */
829 		bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
830 		if (bis)
831 			return;
832 
833 		hci_le_terminate_big(hdev, conn);
834 	} else {
835 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
836 				     conn);
837 	}
838 }
839 
remove_cig_sync(struct hci_dev * hdev,void * data)840 static int remove_cig_sync(struct hci_dev *hdev, void *data)
841 {
842 	u8 handle = PTR_UINT(data);
843 
844 	return hci_le_remove_cig_sync(hdev, handle);
845 }
846 
hci_le_remove_cig(struct hci_dev * hdev,u8 handle)847 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
848 {
849 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
850 
851 	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
852 				  NULL);
853 }
854 
find_cis(struct hci_conn * conn,void * data)855 static void find_cis(struct hci_conn *conn, void *data)
856 {
857 	struct iso_list_data *d = data;
858 
859 	/* Ignore broadcast or if CIG don't match */
860 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
861 		return;
862 
863 	d->count++;
864 }
865 
866 /* Cleanup CIS connection:
867  *
868  * Detects if there any CIS left connected in a CIG and remove it.
869  */
cis_cleanup(struct hci_conn * conn)870 static void cis_cleanup(struct hci_conn *conn)
871 {
872 	struct hci_dev *hdev = conn->hdev;
873 	struct iso_list_data d;
874 
875 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
876 		return;
877 
878 	memset(&d, 0, sizeof(d));
879 	d.cig = conn->iso_qos.ucast.cig;
880 
881 	/* Check if ISO connection is a CIS and remove CIG if there are
882 	 * no other connections using it.
883 	 */
884 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
885 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
886 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
887 	if (d.count)
888 		return;
889 
890 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
891 }
892 
hci_conn_hash_alloc_unset(struct hci_dev * hdev)893 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
894 {
895 	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
896 			       U16_MAX, GFP_ATOMIC);
897 }
898 
__hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)899 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
900 				       u8 role, u16 handle)
901 {
902 	struct hci_conn *conn;
903 
904 	switch (type) {
905 	case ACL_LINK:
906 		if (!hdev->acl_mtu)
907 			return ERR_PTR(-ECONNREFUSED);
908 		break;
909 	case ISO_LINK:
910 		if (hdev->iso_mtu)
911 			/* Dedicated ISO Buffer exists */
912 			break;
913 		fallthrough;
914 	case LE_LINK:
915 		if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
916 			return ERR_PTR(-ECONNREFUSED);
917 		if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
918 			return ERR_PTR(-ECONNREFUSED);
919 		break;
920 	case SCO_LINK:
921 	case ESCO_LINK:
922 		if (!hdev->sco_pkts)
923 			/* Controller does not support SCO or eSCO over HCI */
924 			return ERR_PTR(-ECONNREFUSED);
925 		break;
926 	default:
927 		return ERR_PTR(-ECONNREFUSED);
928 	}
929 
930 	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
931 
932 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
933 	if (!conn)
934 		return ERR_PTR(-ENOMEM);
935 
936 	bacpy(&conn->dst, dst);
937 	bacpy(&conn->src, &hdev->bdaddr);
938 	conn->handle = handle;
939 	conn->hdev  = hdev;
940 	conn->type  = type;
941 	conn->role  = role;
942 	conn->mode  = HCI_CM_ACTIVE;
943 	conn->state = BT_OPEN;
944 	conn->auth_type = HCI_AT_GENERAL_BONDING;
945 	conn->io_capability = hdev->io_capability;
946 	conn->remote_auth = 0xff;
947 	conn->key_type = 0xff;
948 	conn->rssi = HCI_RSSI_INVALID;
949 	conn->tx_power = HCI_TX_POWER_INVALID;
950 	conn->max_tx_power = HCI_TX_POWER_INVALID;
951 	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
952 
953 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
954 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
955 
956 	/* Set Default Authenticated payload timeout to 30s */
957 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
958 
959 	if (conn->role == HCI_ROLE_MASTER)
960 		conn->out = true;
961 
962 	switch (type) {
963 	case ACL_LINK:
964 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
965 		conn->mtu = hdev->acl_mtu;
966 		break;
967 	case LE_LINK:
968 		/* conn->src should reflect the local identity address */
969 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
970 		conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
971 		break;
972 	case ISO_LINK:
973 		/* conn->src should reflect the local identity address */
974 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
975 
976 		/* set proper cleanup function */
977 		if (!bacmp(dst, BDADDR_ANY))
978 			conn->cleanup = bis_cleanup;
979 		else if (conn->role == HCI_ROLE_MASTER)
980 			conn->cleanup = cis_cleanup;
981 
982 		conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
983 			    hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
984 		break;
985 	case SCO_LINK:
986 		if (lmp_esco_capable(hdev))
987 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
988 					(hdev->esco_type & EDR_ESCO_MASK);
989 		else
990 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
991 
992 		conn->mtu = hdev->sco_mtu;
993 		break;
994 	case ESCO_LINK:
995 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
996 		conn->mtu = hdev->sco_mtu;
997 		break;
998 	}
999 
1000 	skb_queue_head_init(&conn->data_q);
1001 
1002 	INIT_LIST_HEAD(&conn->chan_list);
1003 	INIT_LIST_HEAD(&conn->link_list);
1004 
1005 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1006 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1007 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1008 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1009 
1010 	atomic_set(&conn->refcnt, 0);
1011 
1012 	hci_dev_hold(hdev);
1013 
1014 	hci_conn_hash_add(hdev, conn);
1015 
1016 	/* The SCO and eSCO connections will only be notified when their
1017 	 * setup has been completed. This is different to ACL links which
1018 	 * can be notified right away.
1019 	 */
1020 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1021 		if (hdev->notify)
1022 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1023 	}
1024 
1025 	hci_conn_init_sysfs(conn);
1026 
1027 	return conn;
1028 }
1029 
hci_conn_add_unset(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role)1030 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1031 				    bdaddr_t *dst, u8 role)
1032 {
1033 	int handle;
1034 
1035 	bt_dev_dbg(hdev, "dst %pMR", dst);
1036 
1037 	handle = hci_conn_hash_alloc_unset(hdev);
1038 	if (unlikely(handle < 0))
1039 		return ERR_PTR(-ECONNREFUSED);
1040 
1041 	return __hci_conn_add(hdev, type, dst, role, handle);
1042 }
1043 
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)1044 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1045 			      u8 role, u16 handle)
1046 {
1047 	if (handle > HCI_CONN_HANDLE_MAX)
1048 		return ERR_PTR(-EINVAL);
1049 
1050 	return __hci_conn_add(hdev, type, dst, role, handle);
1051 }
1052 
hci_conn_cleanup_child(struct hci_conn * conn,u8 reason)1053 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1054 {
1055 	if (!reason)
1056 		reason = HCI_ERROR_REMOTE_USER_TERM;
1057 
1058 	/* Due to race, SCO/ISO conn might be not established yet at this point,
1059 	 * and nothing else will clean it up. In other cases it is done via HCI
1060 	 * events.
1061 	 */
1062 	switch (conn->type) {
1063 	case SCO_LINK:
1064 	case ESCO_LINK:
1065 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
1066 			hci_conn_failed(conn, reason);
1067 		break;
1068 	case ISO_LINK:
1069 		if ((conn->state != BT_CONNECTED &&
1070 		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
1071 		    test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
1072 			hci_conn_failed(conn, reason);
1073 		break;
1074 	}
1075 }
1076 
hci_conn_unlink(struct hci_conn * conn)1077 static void hci_conn_unlink(struct hci_conn *conn)
1078 {
1079 	struct hci_dev *hdev = conn->hdev;
1080 
1081 	bt_dev_dbg(hdev, "hcon %p", conn);
1082 
1083 	if (!conn->parent) {
1084 		struct hci_link *link, *t;
1085 
1086 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1087 			struct hci_conn *child = link->conn;
1088 
1089 			hci_conn_unlink(child);
1090 
1091 			/* If hdev is down it means
1092 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1093 			 * and links don't need to be cleanup as all connections
1094 			 * would be cleanup.
1095 			 */
1096 			if (!test_bit(HCI_UP, &hdev->flags))
1097 				continue;
1098 
1099 			hci_conn_cleanup_child(child, conn->abort_reason);
1100 		}
1101 
1102 		return;
1103 	}
1104 
1105 	if (!conn->link)
1106 		return;
1107 
1108 	list_del_rcu(&conn->link->list);
1109 	synchronize_rcu();
1110 
1111 	hci_conn_drop(conn->parent);
1112 	hci_conn_put(conn->parent);
1113 	conn->parent = NULL;
1114 
1115 	kfree(conn->link);
1116 	conn->link = NULL;
1117 }
1118 
hci_conn_del(struct hci_conn * conn)1119 void hci_conn_del(struct hci_conn *conn)
1120 {
1121 	struct hci_dev *hdev = conn->hdev;
1122 
1123 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1124 
1125 	hci_conn_unlink(conn);
1126 
1127 	cancel_delayed_work_sync(&conn->disc_work);
1128 	cancel_delayed_work_sync(&conn->auto_accept_work);
1129 	cancel_delayed_work_sync(&conn->idle_work);
1130 
1131 	if (conn->type == ACL_LINK) {
1132 		/* Unacked frames */
1133 		hdev->acl_cnt += conn->sent;
1134 	} else if (conn->type == LE_LINK) {
1135 		cancel_delayed_work(&conn->le_conn_timeout);
1136 
1137 		if (hdev->le_pkts)
1138 			hdev->le_cnt += conn->sent;
1139 		else
1140 			hdev->acl_cnt += conn->sent;
1141 	} else {
1142 		/* Unacked ISO frames */
1143 		if (conn->type == ISO_LINK) {
1144 			if (hdev->iso_pkts)
1145 				hdev->iso_cnt += conn->sent;
1146 			else if (hdev->le_pkts)
1147 				hdev->le_cnt += conn->sent;
1148 			else
1149 				hdev->acl_cnt += conn->sent;
1150 		}
1151 	}
1152 
1153 	skb_queue_purge(&conn->data_q);
1154 
1155 	/* Remove the connection from the list and cleanup its remaining
1156 	 * state. This is a separate function since for some cases like
1157 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1158 	 * rest of hci_conn_del.
1159 	 */
1160 	hci_conn_cleanup(conn);
1161 
1162 	/* Dequeue callbacks using connection pointer as data */
1163 	hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
1164 }
1165 
hci_get_route(bdaddr_t * dst,bdaddr_t * src,uint8_t src_type)1166 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1167 {
1168 	int use_src = bacmp(src, BDADDR_ANY);
1169 	struct hci_dev *hdev = NULL, *d;
1170 
1171 	BT_DBG("%pMR -> %pMR", src, dst);
1172 
1173 	read_lock(&hci_dev_list_lock);
1174 
1175 	list_for_each_entry(d, &hci_dev_list, list) {
1176 		if (!test_bit(HCI_UP, &d->flags) ||
1177 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
1178 			continue;
1179 
1180 		/* Simple routing:
1181 		 *   No source address - find interface with bdaddr != dst
1182 		 *   Source address    - find interface with bdaddr == src
1183 		 */
1184 
1185 		if (use_src) {
1186 			bdaddr_t id_addr;
1187 			u8 id_addr_type;
1188 
1189 			if (src_type == BDADDR_BREDR) {
1190 				if (!lmp_bredr_capable(d))
1191 					continue;
1192 				bacpy(&id_addr, &d->bdaddr);
1193 				id_addr_type = BDADDR_BREDR;
1194 			} else {
1195 				if (!lmp_le_capable(d))
1196 					continue;
1197 
1198 				hci_copy_identity_address(d, &id_addr,
1199 							  &id_addr_type);
1200 
1201 				/* Convert from HCI to three-value type */
1202 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1203 					id_addr_type = BDADDR_LE_PUBLIC;
1204 				else
1205 					id_addr_type = BDADDR_LE_RANDOM;
1206 			}
1207 
1208 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1209 				hdev = d; break;
1210 			}
1211 		} else {
1212 			if (bacmp(&d->bdaddr, dst)) {
1213 				hdev = d; break;
1214 			}
1215 		}
1216 	}
1217 
1218 	if (hdev)
1219 		hdev = hci_dev_hold(hdev);
1220 
1221 	read_unlock(&hci_dev_list_lock);
1222 	return hdev;
1223 }
1224 EXPORT_SYMBOL(hci_get_route);
1225 
1226 /* This function requires the caller holds hdev->lock */
hci_le_conn_failed(struct hci_conn * conn,u8 status)1227 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1228 {
1229 	struct hci_dev *hdev = conn->hdev;
1230 
1231 	hci_connect_le_scan_cleanup(conn, status);
1232 
1233 	/* Enable advertising in case this was a failed connection
1234 	 * attempt as a peripheral.
1235 	 */
1236 	hci_enable_advertising(hdev);
1237 }
1238 
1239 /* This function requires the caller holds hdev->lock */
hci_conn_failed(struct hci_conn * conn,u8 status)1240 void hci_conn_failed(struct hci_conn *conn, u8 status)
1241 {
1242 	struct hci_dev *hdev = conn->hdev;
1243 
1244 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1245 
1246 	switch (conn->type) {
1247 	case LE_LINK:
1248 		hci_le_conn_failed(conn, status);
1249 		break;
1250 	case ACL_LINK:
1251 		mgmt_connect_failed(hdev, conn, status);
1252 		break;
1253 	}
1254 
1255 	/* In case of BIG/PA sync failed, clear conn flags so that
1256 	 * the conns will be correctly cleaned up by ISO layer
1257 	 */
1258 	test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1259 	test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1260 
1261 	conn->state = BT_CLOSED;
1262 	hci_connect_cfm(conn, status);
1263 	hci_conn_del(conn);
1264 }
1265 
1266 /* This function requires the caller holds hdev->lock */
hci_conn_set_handle(struct hci_conn * conn,u16 handle)1267 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1268 {
1269 	struct hci_dev *hdev = conn->hdev;
1270 
1271 	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1272 
1273 	if (conn->handle == handle)
1274 		return 0;
1275 
1276 	if (handle > HCI_CONN_HANDLE_MAX) {
1277 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1278 			   handle, HCI_CONN_HANDLE_MAX);
1279 		return HCI_ERROR_INVALID_PARAMETERS;
1280 	}
1281 
1282 	/* If abort_reason has been sent it means the connection is being
1283 	 * aborted and the handle shall not be changed.
1284 	 */
1285 	if (conn->abort_reason)
1286 		return conn->abort_reason;
1287 
1288 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
1289 		ida_free(&hdev->unset_handle_ida, conn->handle);
1290 
1291 	conn->handle = handle;
1292 
1293 	return 0;
1294 }
1295 
hci_connect_le(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,bool dst_resolved,u8 sec_level,u16 conn_timeout,u8 role,u8 phy,u8 sec_phy)1296 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1297 				u8 dst_type, bool dst_resolved, u8 sec_level,
1298 				u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
1299 {
1300 	struct hci_conn *conn;
1301 	struct smp_irk *irk;
1302 	int err;
1303 
1304 	/* Let's make sure that le is enabled.*/
1305 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1306 		if (lmp_le_capable(hdev))
1307 			return ERR_PTR(-ECONNREFUSED);
1308 
1309 		return ERR_PTR(-EOPNOTSUPP);
1310 	}
1311 
1312 	/* Since the controller supports only one LE connection attempt at a
1313 	 * time, we return -EBUSY if there is any connection attempt running.
1314 	 */
1315 	if (hci_lookup_le_connect(hdev))
1316 		return ERR_PTR(-EBUSY);
1317 
1318 	/* If there's already a connection object but it's not in
1319 	 * scanning state it means it must already be established, in
1320 	 * which case we can't do anything else except report a failure
1321 	 * to connect.
1322 	 */
1323 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1324 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1325 		return ERR_PTR(-EBUSY);
1326 	}
1327 
1328 	/* Check if the destination address has been resolved by the controller
1329 	 * since if it did then the identity address shall be used.
1330 	 */
1331 	if (!dst_resolved) {
1332 		/* When given an identity address with existing identity
1333 		 * resolving key, the connection needs to be established
1334 		 * to a resolvable random address.
1335 		 *
1336 		 * Storing the resolvable random address is required here
1337 		 * to handle connection failures. The address will later
1338 		 * be resolved back into the original identity address
1339 		 * from the connect request.
1340 		 */
1341 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1342 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1343 			dst = &irk->rpa;
1344 			dst_type = ADDR_LE_DEV_RANDOM;
1345 		}
1346 	}
1347 
1348 	if (conn) {
1349 		bacpy(&conn->dst, dst);
1350 	} else {
1351 		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1352 		if (IS_ERR(conn))
1353 			return conn;
1354 		hci_conn_hold(conn);
1355 		conn->pending_sec_level = sec_level;
1356 	}
1357 
1358 	conn->dst_type = dst_type;
1359 	conn->sec_level = BT_SECURITY_LOW;
1360 	conn->conn_timeout = conn_timeout;
1361 	conn->le_adv_phy = phy;
1362 	conn->le_adv_sec_phy = sec_phy;
1363 
1364 	err = hci_connect_le_sync(hdev, conn);
1365 	if (err) {
1366 		hci_conn_del(conn);
1367 		return ERR_PTR(err);
1368 	}
1369 
1370 	return conn;
1371 }
1372 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)1373 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1374 {
1375 	struct hci_conn *conn;
1376 
1377 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1378 	if (!conn)
1379 		return false;
1380 
1381 	if (conn->state != BT_CONNECTED)
1382 		return false;
1383 
1384 	return true;
1385 }
1386 
1387 /* This function requires the caller holds hdev->lock */
hci_explicit_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)1388 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1389 					bdaddr_t *addr, u8 addr_type)
1390 {
1391 	struct hci_conn_params *params;
1392 
1393 	if (is_connected(hdev, addr, addr_type))
1394 		return -EISCONN;
1395 
1396 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1397 	if (!params) {
1398 		params = hci_conn_params_add(hdev, addr, addr_type);
1399 		if (!params)
1400 			return -ENOMEM;
1401 
1402 		/* If we created new params, mark them to be deleted in
1403 		 * hci_connect_le_scan_cleanup. It's different case than
1404 		 * existing disabled params, those will stay after cleanup.
1405 		 */
1406 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1407 	}
1408 
1409 	/* We're trying to connect, so make sure params are at pend_le_conns */
1410 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1411 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1412 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1413 		hci_pend_le_list_del_init(params);
1414 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1415 	}
1416 
1417 	params->explicit_connect = true;
1418 
1419 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1420 	       params->auto_connect);
1421 
1422 	return 0;
1423 }
1424 
qos_set_big(struct hci_dev * hdev,struct bt_iso_qos * qos)1425 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1426 {
1427 	struct hci_conn *conn;
1428 	u8  big;
1429 
1430 	/* Allocate a BIG if not set */
1431 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1432 		for (big = 0x00; big < 0xef; big++) {
1433 
1434 			conn = hci_conn_hash_lookup_big(hdev, big);
1435 			if (!conn)
1436 				break;
1437 		}
1438 
1439 		if (big == 0xef)
1440 			return -EADDRNOTAVAIL;
1441 
1442 		/* Update BIG */
1443 		qos->bcast.big = big;
1444 	}
1445 
1446 	return 0;
1447 }
1448 
qos_set_bis(struct hci_dev * hdev,struct bt_iso_qos * qos)1449 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1450 {
1451 	struct hci_conn *conn;
1452 	u8  bis;
1453 
1454 	/* Allocate BIS if not set */
1455 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1456 		if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) {
1457 			conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1458 
1459 			if (conn) {
1460 				/* If the BIG handle is already matched to an advertising
1461 				 * handle, do not allocate a new one.
1462 				 */
1463 				qos->bcast.bis = conn->iso_qos.bcast.bis;
1464 				return 0;
1465 			}
1466 		}
1467 
1468 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1469 		 * since it is reserved as general purpose set.
1470 		 */
1471 		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1472 		     bis++) {
1473 
1474 			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1475 			if (!conn)
1476 				break;
1477 		}
1478 
1479 		if (bis == hdev->le_num_of_adv_sets)
1480 			return -EADDRNOTAVAIL;
1481 
1482 		/* Update BIS */
1483 		qos->bcast.bis = bis;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 /* This function requires the caller holds hdev->lock */
hci_add_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)1490 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1491 				    struct bt_iso_qos *qos, __u8 base_len,
1492 				    __u8 *base)
1493 {
1494 	struct hci_conn *conn;
1495 	int err;
1496 
1497 	/* Let's make sure that le is enabled.*/
1498 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1499 		if (lmp_le_capable(hdev))
1500 			return ERR_PTR(-ECONNREFUSED);
1501 		return ERR_PTR(-EOPNOTSUPP);
1502 	}
1503 
1504 	err = qos_set_big(hdev, qos);
1505 	if (err)
1506 		return ERR_PTR(err);
1507 
1508 	err = qos_set_bis(hdev, qos);
1509 	if (err)
1510 		return ERR_PTR(err);
1511 
1512 	/* Check if the LE Create BIG command has already been sent */
1513 	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1514 						qos->bcast.big);
1515 	if (conn)
1516 		return ERR_PTR(-EADDRINUSE);
1517 
1518 	/* Check BIS settings against other bound BISes, since all
1519 	 * BISes in a BIG must have the same value for all parameters
1520 	 */
1521 	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1522 
1523 	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1524 		     base_len != conn->le_per_adv_data_len ||
1525 		     memcmp(conn->le_per_adv_data, base, base_len)))
1526 		return ERR_PTR(-EADDRINUSE);
1527 
1528 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1529 	if (IS_ERR(conn))
1530 		return conn;
1531 
1532 	conn->state = BT_CONNECT;
1533 
1534 	hci_conn_hold(conn);
1535 	return conn;
1536 }
1537 
1538 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,enum conn_reasons conn_reason)1539 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1540 				     u8 dst_type, u8 sec_level,
1541 				     u16 conn_timeout,
1542 				     enum conn_reasons conn_reason)
1543 {
1544 	struct hci_conn *conn;
1545 
1546 	/* Let's make sure that le is enabled.*/
1547 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1548 		if (lmp_le_capable(hdev))
1549 			return ERR_PTR(-ECONNREFUSED);
1550 
1551 		return ERR_PTR(-EOPNOTSUPP);
1552 	}
1553 
1554 	/* Some devices send ATT messages as soon as the physical link is
1555 	 * established. To be able to handle these ATT messages, the user-
1556 	 * space first establishes the connection and then starts the pairing
1557 	 * process.
1558 	 *
1559 	 * So if a hci_conn object already exists for the following connection
1560 	 * attempt, we simply update pending_sec_level and auth_type fields
1561 	 * and return the object found.
1562 	 */
1563 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1564 	if (conn) {
1565 		if (conn->pending_sec_level < sec_level)
1566 			conn->pending_sec_level = sec_level;
1567 		goto done;
1568 	}
1569 
1570 	BT_DBG("requesting refresh of dst_addr");
1571 
1572 	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1573 	if (IS_ERR(conn))
1574 		return conn;
1575 
1576 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1577 		hci_conn_del(conn);
1578 		return ERR_PTR(-EBUSY);
1579 	}
1580 
1581 	conn->state = BT_CONNECT;
1582 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1583 	conn->dst_type = dst_type;
1584 	conn->sec_level = BT_SECURITY_LOW;
1585 	conn->pending_sec_level = sec_level;
1586 	conn->conn_timeout = conn_timeout;
1587 	conn->conn_reason = conn_reason;
1588 
1589 	hci_update_passive_scan(hdev);
1590 
1591 done:
1592 	hci_conn_hold(conn);
1593 	return conn;
1594 }
1595 
hci_connect_acl(struct hci_dev * hdev,bdaddr_t * dst,u8 sec_level,u8 auth_type,enum conn_reasons conn_reason,u16 timeout)1596 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1597 				 u8 sec_level, u8 auth_type,
1598 				 enum conn_reasons conn_reason, u16 timeout)
1599 {
1600 	struct hci_conn *acl;
1601 
1602 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1603 		if (lmp_bredr_capable(hdev))
1604 			return ERR_PTR(-ECONNREFUSED);
1605 
1606 		return ERR_PTR(-EOPNOTSUPP);
1607 	}
1608 
1609 	/* Reject outgoing connection to device with same BD ADDR against
1610 	 * CVE-2020-26555
1611 	 */
1612 	if (!bacmp(&hdev->bdaddr, dst)) {
1613 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1614 			   dst);
1615 		return ERR_PTR(-ECONNREFUSED);
1616 	}
1617 
1618 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1619 	if (!acl) {
1620 		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1621 		if (IS_ERR(acl))
1622 			return acl;
1623 	}
1624 
1625 	hci_conn_hold(acl);
1626 
1627 	acl->conn_reason = conn_reason;
1628 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1629 		int err;
1630 
1631 		acl->sec_level = BT_SECURITY_LOW;
1632 		acl->pending_sec_level = sec_level;
1633 		acl->auth_type = auth_type;
1634 		acl->conn_timeout = timeout;
1635 
1636 		err = hci_connect_acl_sync(hdev, acl);
1637 		if (err) {
1638 			hci_conn_del(acl);
1639 			return ERR_PTR(err);
1640 		}
1641 	}
1642 
1643 	return acl;
1644 }
1645 
hci_conn_link(struct hci_conn * parent,struct hci_conn * conn)1646 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1647 				      struct hci_conn *conn)
1648 {
1649 	struct hci_dev *hdev = parent->hdev;
1650 	struct hci_link *link;
1651 
1652 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1653 
1654 	if (conn->link)
1655 		return conn->link;
1656 
1657 	if (conn->parent)
1658 		return NULL;
1659 
1660 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1661 	if (!link)
1662 		return NULL;
1663 
1664 	link->conn = hci_conn_hold(conn);
1665 	conn->link = link;
1666 	conn->parent = hci_conn_get(parent);
1667 
1668 	/* Use list_add_tail_rcu append to the list */
1669 	list_add_tail_rcu(&link->list, &parent->link_list);
1670 
1671 	return link;
1672 }
1673 
hci_connect_sco(struct hci_dev * hdev,int type,bdaddr_t * dst,__u16 setting,struct bt_codec * codec,u16 timeout)1674 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1675 				 __u16 setting, struct bt_codec *codec,
1676 				 u16 timeout)
1677 {
1678 	struct hci_conn *acl;
1679 	struct hci_conn *sco;
1680 	struct hci_link *link;
1681 
1682 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1683 			      CONN_REASON_SCO_CONNECT, timeout);
1684 	if (IS_ERR(acl))
1685 		return acl;
1686 
1687 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1688 	if (!sco) {
1689 		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1690 		if (IS_ERR(sco)) {
1691 			hci_conn_drop(acl);
1692 			return sco;
1693 		}
1694 	}
1695 
1696 	link = hci_conn_link(acl, sco);
1697 	if (!link) {
1698 		hci_conn_drop(acl);
1699 		hci_conn_drop(sco);
1700 		return ERR_PTR(-ENOLINK);
1701 	}
1702 
1703 	sco->setting = setting;
1704 	sco->codec = *codec;
1705 
1706 	if (acl->state == BT_CONNECTED &&
1707 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1708 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1709 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1710 
1711 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1712 			/* defer SCO setup until mode change completed */
1713 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1714 			return sco;
1715 		}
1716 
1717 		hci_sco_setup(acl, 0x00);
1718 	}
1719 
1720 	return sco;
1721 }
1722 
hci_le_create_big(struct hci_conn * conn,struct bt_iso_qos * qos)1723 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1724 {
1725 	struct hci_dev *hdev = conn->hdev;
1726 	struct hci_cp_le_create_big cp;
1727 	struct iso_list_data data;
1728 
1729 	memset(&cp, 0, sizeof(cp));
1730 
1731 	data.big = qos->bcast.big;
1732 	data.bis = qos->bcast.bis;
1733 	data.count = 0;
1734 
1735 	/* Create a BIS for each bound connection */
1736 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1737 				 BT_BOUND, &data);
1738 
1739 	cp.handle = qos->bcast.big;
1740 	cp.adv_handle = qos->bcast.bis;
1741 	cp.num_bis  = data.count;
1742 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1743 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1744 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1745 	cp.bis.rtn  = qos->bcast.out.rtn;
1746 	cp.bis.phy  = qos->bcast.out.phy;
1747 	cp.bis.packing = qos->bcast.packing;
1748 	cp.bis.framing = qos->bcast.framing;
1749 	cp.bis.encryption = qos->bcast.encryption;
1750 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1751 
1752 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1753 }
1754 
set_cig_params_sync(struct hci_dev * hdev,void * data)1755 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1756 {
1757 	DEFINE_FLEX(struct hci_cp_le_set_cig_params, pdu, cis, num_cis, 0x1f);
1758 	u8 cig_id = PTR_UINT(data);
1759 	struct hci_conn *conn;
1760 	struct bt_iso_qos *qos;
1761 	u8 aux_num_cis = 0;
1762 	u8 cis_id;
1763 
1764 	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1765 	if (!conn)
1766 		return 0;
1767 
1768 	qos = &conn->iso_qos;
1769 	pdu->cig_id = cig_id;
1770 	hci_cpu_to_le24(qos->ucast.out.interval, pdu->c_interval);
1771 	hci_cpu_to_le24(qos->ucast.in.interval, pdu->p_interval);
1772 	pdu->sca = qos->ucast.sca;
1773 	pdu->packing = qos->ucast.packing;
1774 	pdu->framing = qos->ucast.framing;
1775 	pdu->c_latency = cpu_to_le16(qos->ucast.out.latency);
1776 	pdu->p_latency = cpu_to_le16(qos->ucast.in.latency);
1777 
1778 	/* Reprogram all CIS(s) with the same CIG, valid range are:
1779 	 * num_cis: 0x00 to 0x1F
1780 	 * cis_id: 0x00 to 0xEF
1781 	 */
1782 	for (cis_id = 0x00; cis_id < 0xf0 &&
1783 	     aux_num_cis < pdu->num_cis; cis_id++) {
1784 		struct hci_cis_params *cis;
1785 
1786 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1787 		if (!conn)
1788 			continue;
1789 
1790 		qos = &conn->iso_qos;
1791 
1792 		cis = &pdu->cis[aux_num_cis++];
1793 		cis->cis_id = cis_id;
1794 		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1795 		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1796 		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1797 			      qos->ucast.in.phy;
1798 		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1799 			      qos->ucast.out.phy;
1800 		cis->c_rtn  = qos->ucast.out.rtn;
1801 		cis->p_rtn  = qos->ucast.in.rtn;
1802 	}
1803 	pdu->num_cis = aux_num_cis;
1804 
1805 	if (!pdu->num_cis)
1806 		return 0;
1807 
1808 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1809 				     struct_size(pdu, cis, pdu->num_cis),
1810 				     pdu, HCI_CMD_TIMEOUT);
1811 }
1812 
hci_le_set_cig_params(struct hci_conn * conn,struct bt_iso_qos * qos)1813 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1814 {
1815 	struct hci_dev *hdev = conn->hdev;
1816 	struct iso_list_data data;
1817 
1818 	memset(&data, 0, sizeof(data));
1819 
1820 	/* Allocate first still reconfigurable CIG if not set */
1821 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1822 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1823 			data.count = 0;
1824 
1825 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1826 						 BT_CONNECT, &data);
1827 			if (data.count)
1828 				continue;
1829 
1830 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1831 						 BT_CONNECTED, &data);
1832 			if (!data.count)
1833 				break;
1834 		}
1835 
1836 		if (data.cig == 0xf0)
1837 			return false;
1838 
1839 		/* Update CIG */
1840 		qos->ucast.cig = data.cig;
1841 	}
1842 
1843 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1844 		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1845 					     qos->ucast.cis))
1846 			return false;
1847 		goto done;
1848 	}
1849 
1850 	/* Allocate first available CIS if not set */
1851 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1852 	     data.cis++) {
1853 		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1854 					      data.cis)) {
1855 			/* Update CIS */
1856 			qos->ucast.cis = data.cis;
1857 			break;
1858 		}
1859 	}
1860 
1861 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1862 		return false;
1863 
1864 done:
1865 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1866 			       UINT_PTR(qos->ucast.cig), NULL) < 0)
1867 		return false;
1868 
1869 	return true;
1870 }
1871 
hci_bind_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)1872 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1873 			      __u8 dst_type, struct bt_iso_qos *qos)
1874 {
1875 	struct hci_conn *cis;
1876 
1877 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1878 				       qos->ucast.cis);
1879 	if (!cis) {
1880 		cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1881 		if (IS_ERR(cis))
1882 			return cis;
1883 		cis->cleanup = cis_cleanup;
1884 		cis->dst_type = dst_type;
1885 		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1886 		cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1887 	}
1888 
1889 	if (cis->state == BT_CONNECTED)
1890 		return cis;
1891 
1892 	/* Check if CIS has been set and the settings matches */
1893 	if (cis->state == BT_BOUND &&
1894 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1895 		return cis;
1896 
1897 	/* Update LINK PHYs according to QoS preference */
1898 	cis->le_tx_phy = qos->ucast.out.phy;
1899 	cis->le_rx_phy = qos->ucast.in.phy;
1900 
1901 	/* If output interval is not set use the input interval as it cannot be
1902 	 * 0x000000.
1903 	 */
1904 	if (!qos->ucast.out.interval)
1905 		qos->ucast.out.interval = qos->ucast.in.interval;
1906 
1907 	/* If input interval is not set use the output interval as it cannot be
1908 	 * 0x000000.
1909 	 */
1910 	if (!qos->ucast.in.interval)
1911 		qos->ucast.in.interval = qos->ucast.out.interval;
1912 
1913 	/* If output latency is not set use the input latency as it cannot be
1914 	 * 0x0000.
1915 	 */
1916 	if (!qos->ucast.out.latency)
1917 		qos->ucast.out.latency = qos->ucast.in.latency;
1918 
1919 	/* If input latency is not set use the output latency as it cannot be
1920 	 * 0x0000.
1921 	 */
1922 	if (!qos->ucast.in.latency)
1923 		qos->ucast.in.latency = qos->ucast.out.latency;
1924 
1925 	if (!hci_le_set_cig_params(cis, qos)) {
1926 		hci_conn_drop(cis);
1927 		return ERR_PTR(-EINVAL);
1928 	}
1929 
1930 	hci_conn_hold(cis);
1931 
1932 	cis->iso_qos = *qos;
1933 	cis->state = BT_BOUND;
1934 
1935 	return cis;
1936 }
1937 
hci_iso_setup_path(struct hci_conn * conn)1938 bool hci_iso_setup_path(struct hci_conn *conn)
1939 {
1940 	struct hci_dev *hdev = conn->hdev;
1941 	struct hci_cp_le_setup_iso_path cmd;
1942 
1943 	memset(&cmd, 0, sizeof(cmd));
1944 
1945 	if (conn->iso_qos.ucast.out.sdu) {
1946 		cmd.handle = cpu_to_le16(conn->handle);
1947 		cmd.direction = 0x00; /* Input (Host to Controller) */
1948 		cmd.path = 0x00; /* HCI path if enabled */
1949 		cmd.codec = 0x03; /* Transparent Data */
1950 
1951 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1952 				 &cmd) < 0)
1953 			return false;
1954 	}
1955 
1956 	if (conn->iso_qos.ucast.in.sdu) {
1957 		cmd.handle = cpu_to_le16(conn->handle);
1958 		cmd.direction = 0x01; /* Output (Controller to Host) */
1959 		cmd.path = 0x00; /* HCI path if enabled */
1960 		cmd.codec = 0x03; /* Transparent Data */
1961 
1962 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1963 				 &cmd) < 0)
1964 			return false;
1965 	}
1966 
1967 	return true;
1968 }
1969 
hci_conn_check_create_cis(struct hci_conn * conn)1970 int hci_conn_check_create_cis(struct hci_conn *conn)
1971 {
1972 	if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1973 		return -EINVAL;
1974 
1975 	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1976 	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1977 		return 1;
1978 
1979 	return 0;
1980 }
1981 
hci_create_cis_sync(struct hci_dev * hdev,void * data)1982 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1983 {
1984 	return hci_le_create_cis_sync(hdev);
1985 }
1986 
hci_le_create_cis_pending(struct hci_dev * hdev)1987 int hci_le_create_cis_pending(struct hci_dev *hdev)
1988 {
1989 	struct hci_conn *conn;
1990 	bool pending = false;
1991 
1992 	rcu_read_lock();
1993 
1994 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1995 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
1996 			rcu_read_unlock();
1997 			return -EBUSY;
1998 		}
1999 
2000 		if (!hci_conn_check_create_cis(conn))
2001 			pending = true;
2002 	}
2003 
2004 	rcu_read_unlock();
2005 
2006 	if (!pending)
2007 		return 0;
2008 
2009 	/* Queue Create CIS */
2010 	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
2011 }
2012 
hci_iso_qos_setup(struct hci_dev * hdev,struct hci_conn * conn,struct bt_iso_io_qos * qos,__u8 phy)2013 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2014 			      struct bt_iso_io_qos *qos, __u8 phy)
2015 {
2016 	/* Only set MTU if PHY is enabled */
2017 	if (!qos->sdu && qos->phy)
2018 		qos->sdu = conn->mtu;
2019 
2020 	/* Use the same PHY as ACL if set to any */
2021 	if (qos->phy == BT_ISO_PHY_ANY)
2022 		qos->phy = phy;
2023 
2024 	/* Use LE ACL connection interval if not set */
2025 	if (!qos->interval)
2026 		/* ACL interval unit in 1.25 ms to us */
2027 		qos->interval = conn->le_conn_interval * 1250;
2028 
2029 	/* Use LE ACL connection latency if not set */
2030 	if (!qos->latency)
2031 		qos->latency = conn->le_conn_latency;
2032 }
2033 
create_big_sync(struct hci_dev * hdev,void * data)2034 static int create_big_sync(struct hci_dev *hdev, void *data)
2035 {
2036 	struct hci_conn *conn = data;
2037 	struct bt_iso_qos *qos = &conn->iso_qos;
2038 	u16 interval, sync_interval = 0;
2039 	u32 flags = 0;
2040 	int err;
2041 
2042 	if (qos->bcast.out.phy == 0x02)
2043 		flags |= MGMT_ADV_FLAG_SEC_2M;
2044 
2045 	/* Align intervals */
2046 	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2047 
2048 	if (qos->bcast.bis)
2049 		sync_interval = interval * 4;
2050 
2051 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2052 				     conn->le_per_adv_data, flags, interval,
2053 				     interval, sync_interval);
2054 	if (err)
2055 		return err;
2056 
2057 	return hci_le_create_big(conn, &conn->iso_qos);
2058 }
2059 
create_pa_complete(struct hci_dev * hdev,void * data,int err)2060 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2061 {
2062 	struct hci_cp_le_pa_create_sync *cp = data;
2063 
2064 	bt_dev_dbg(hdev, "");
2065 
2066 	if (err)
2067 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2068 
2069 	kfree(cp);
2070 }
2071 
create_pa_sync(struct hci_dev * hdev,void * data)2072 static int create_pa_sync(struct hci_dev *hdev, void *data)
2073 {
2074 	struct hci_cp_le_pa_create_sync *cp = data;
2075 	int err;
2076 
2077 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2078 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2079 	if (err) {
2080 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2081 		return err;
2082 	}
2083 
2084 	return hci_update_passive_scan_sync(hdev);
2085 }
2086 
hci_pa_create_sync(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,__u8 sid,struct bt_iso_qos * qos)2087 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
2088 				    __u8 dst_type, __u8 sid,
2089 				    struct bt_iso_qos *qos)
2090 {
2091 	struct hci_cp_le_pa_create_sync *cp;
2092 	struct hci_conn *conn;
2093 	int err;
2094 
2095 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2096 		return ERR_PTR(-EBUSY);
2097 
2098 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
2099 	if (IS_ERR(conn))
2100 		return conn;
2101 
2102 	conn->iso_qos = *qos;
2103 	conn->state = BT_LISTEN;
2104 
2105 	hci_conn_hold(conn);
2106 
2107 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2108 	if (!cp) {
2109 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2110 		hci_conn_drop(conn);
2111 		return ERR_PTR(-ENOMEM);
2112 	}
2113 
2114 	cp->options = qos->bcast.options;
2115 	cp->sid = sid;
2116 	cp->addr_type = dst_type;
2117 	bacpy(&cp->addr, dst);
2118 	cp->skip = cpu_to_le16(qos->bcast.skip);
2119 	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2120 	cp->sync_cte_type = qos->bcast.sync_cte_type;
2121 
2122 	/* Queue start pa_create_sync and scan */
2123 	err = hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2124 	if (err < 0) {
2125 		hci_conn_drop(conn);
2126 		kfree(cp);
2127 		return ERR_PTR(err);
2128 	}
2129 
2130 	return conn;
2131 }
2132 
hci_le_big_create_sync(struct hci_dev * hdev,struct hci_conn * hcon,struct bt_iso_qos * qos,__u16 sync_handle,__u8 num_bis,__u8 bis[])2133 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2134 			   struct bt_iso_qos *qos,
2135 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2136 {
2137 	DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
2138 	int err;
2139 
2140 	if (num_bis < 0x01 || num_bis > pdu->num_bis)
2141 		return -EINVAL;
2142 
2143 	err = qos_set_big(hdev, qos);
2144 	if (err)
2145 		return err;
2146 
2147 	if (hcon)
2148 		hcon->iso_qos.bcast.big = qos->bcast.big;
2149 
2150 	pdu->handle = qos->bcast.big;
2151 	pdu->sync_handle = cpu_to_le16(sync_handle);
2152 	pdu->encryption = qos->bcast.encryption;
2153 	memcpy(pdu->bcode, qos->bcast.bcode, sizeof(pdu->bcode));
2154 	pdu->mse = qos->bcast.mse;
2155 	pdu->timeout = cpu_to_le16(qos->bcast.timeout);
2156 	pdu->num_bis = num_bis;
2157 	memcpy(pdu->bis, bis, num_bis);
2158 
2159 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2160 			    struct_size(pdu, bis, num_bis), pdu);
2161 }
2162 
create_big_complete(struct hci_dev * hdev,void * data,int err)2163 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2164 {
2165 	struct hci_conn *conn = data;
2166 
2167 	bt_dev_dbg(hdev, "conn %p", conn);
2168 
2169 	if (err) {
2170 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2171 		hci_connect_cfm(conn, err);
2172 		hci_conn_del(conn);
2173 	}
2174 }
2175 
hci_bind_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2176 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2177 			      struct bt_iso_qos *qos,
2178 			      __u8 base_len, __u8 *base)
2179 {
2180 	struct hci_conn *conn;
2181 	struct hci_conn *parent;
2182 	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2183 	struct hci_link *link;
2184 
2185 	/* Look for any BIS that is open for rebinding */
2186 	conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN);
2187 	if (conn) {
2188 		memcpy(qos, &conn->iso_qos, sizeof(*qos));
2189 		conn->state = BT_CONNECTED;
2190 		return conn;
2191 	}
2192 
2193 	if (base_len && base)
2194 		base_len = eir_append_service_data(eir, 0,  0x1851,
2195 						   base, base_len);
2196 
2197 	/* We need hci_conn object using the BDADDR_ANY as dst */
2198 	conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2199 	if (IS_ERR(conn))
2200 		return conn;
2201 
2202 	/* Update LINK PHYs according to QoS preference */
2203 	conn->le_tx_phy = qos->bcast.out.phy;
2204 	conn->le_tx_phy = qos->bcast.out.phy;
2205 
2206 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2207 	if (base_len && base) {
2208 		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2209 		conn->le_per_adv_data_len = base_len;
2210 	}
2211 
2212 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2213 			  conn->le_tx_phy ? conn->le_tx_phy :
2214 			  hdev->le_tx_def_phys);
2215 
2216 	conn->iso_qos = *qos;
2217 	conn->state = BT_BOUND;
2218 
2219 	/* Link BISes together */
2220 	parent = hci_conn_hash_lookup_big(hdev,
2221 					  conn->iso_qos.bcast.big);
2222 	if (parent && parent != conn) {
2223 		link = hci_conn_link(parent, conn);
2224 		if (!link) {
2225 			hci_conn_drop(conn);
2226 			return ERR_PTR(-ENOLINK);
2227 		}
2228 
2229 		/* Link takes the refcount */
2230 		hci_conn_drop(conn);
2231 	}
2232 
2233 	return conn;
2234 }
2235 
bis_mark_per_adv(struct hci_conn * conn,void * data)2236 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2237 {
2238 	struct iso_list_data *d = data;
2239 
2240 	/* Skip if not broadcast/ANY address */
2241 	if (bacmp(&conn->dst, BDADDR_ANY))
2242 		return;
2243 
2244 	if (d->big != conn->iso_qos.bcast.big ||
2245 	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2246 	    d->bis != conn->iso_qos.bcast.bis)
2247 		return;
2248 
2249 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2250 }
2251 
hci_connect_bis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2252 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2253 				 __u8 dst_type, struct bt_iso_qos *qos,
2254 				 __u8 base_len, __u8 *base)
2255 {
2256 	struct hci_conn *conn;
2257 	int err;
2258 	struct iso_list_data data;
2259 
2260 	conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2261 	if (IS_ERR(conn))
2262 		return conn;
2263 
2264 	if (conn->state == BT_CONNECTED)
2265 		return conn;
2266 
2267 	data.big = qos->bcast.big;
2268 	data.bis = qos->bcast.bis;
2269 
2270 	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2271 	 * the start periodic advertising and create BIG commands have
2272 	 * been queued
2273 	 */
2274 	hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2275 				 BT_BOUND, &data);
2276 
2277 	/* Queue start periodic advertising and create BIG */
2278 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2279 				 create_big_complete);
2280 	if (err < 0) {
2281 		hci_conn_drop(conn);
2282 		return ERR_PTR(err);
2283 	}
2284 
2285 	return conn;
2286 }
2287 
hci_connect_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)2288 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2289 				 __u8 dst_type, struct bt_iso_qos *qos)
2290 {
2291 	struct hci_conn *le;
2292 	struct hci_conn *cis;
2293 	struct hci_link *link;
2294 
2295 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2296 		le = hci_connect_le(hdev, dst, dst_type, false,
2297 				    BT_SECURITY_LOW,
2298 				    HCI_LE_CONN_TIMEOUT,
2299 				    HCI_ROLE_SLAVE, 0, 0);
2300 	else
2301 		le = hci_connect_le_scan(hdev, dst, dst_type,
2302 					 BT_SECURITY_LOW,
2303 					 HCI_LE_CONN_TIMEOUT,
2304 					 CONN_REASON_ISO_CONNECT);
2305 	if (IS_ERR(le))
2306 		return le;
2307 
2308 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2309 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2310 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2311 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2312 
2313 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2314 	if (IS_ERR(cis)) {
2315 		hci_conn_drop(le);
2316 		return cis;
2317 	}
2318 
2319 	link = hci_conn_link(le, cis);
2320 	if (!link) {
2321 		hci_conn_drop(le);
2322 		hci_conn_drop(cis);
2323 		return ERR_PTR(-ENOLINK);
2324 	}
2325 
2326 	/* Link takes the refcount */
2327 	hci_conn_drop(cis);
2328 
2329 	cis->state = BT_CONNECT;
2330 
2331 	hci_le_create_cis_pending(hdev);
2332 
2333 	return cis;
2334 }
2335 
2336 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)2337 int hci_conn_check_link_mode(struct hci_conn *conn)
2338 {
2339 	BT_DBG("hcon %p", conn);
2340 
2341 	/* In Secure Connections Only mode, it is required that Secure
2342 	 * Connections is used and the link is encrypted with AES-CCM
2343 	 * using a P-256 authenticated combination key.
2344 	 */
2345 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2346 		if (!hci_conn_sc_enabled(conn) ||
2347 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2348 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2349 			return 0;
2350 	}
2351 
2352 	 /* AES encryption is required for Level 4:
2353 	  *
2354 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2355 	  * page 1319:
2356 	  *
2357 	  * 128-bit equivalent strength for link and encryption keys
2358 	  * required using FIPS approved algorithms (E0 not allowed,
2359 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2360 	  * not shortened)
2361 	  */
2362 	if (conn->sec_level == BT_SECURITY_FIPS &&
2363 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2364 		bt_dev_err(conn->hdev,
2365 			   "Invalid security: Missing AES-CCM usage");
2366 		return 0;
2367 	}
2368 
2369 	if (hci_conn_ssp_enabled(conn) &&
2370 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2371 		return 0;
2372 
2373 	return 1;
2374 }
2375 
2376 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)2377 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2378 {
2379 	BT_DBG("hcon %p", conn);
2380 
2381 	if (conn->pending_sec_level > sec_level)
2382 		sec_level = conn->pending_sec_level;
2383 
2384 	if (sec_level > conn->sec_level)
2385 		conn->pending_sec_level = sec_level;
2386 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2387 		return 1;
2388 
2389 	/* Make sure we preserve an existing MITM requirement*/
2390 	auth_type |= (conn->auth_type & 0x01);
2391 
2392 	conn->auth_type = auth_type;
2393 
2394 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2395 		struct hci_cp_auth_requested cp;
2396 
2397 		cp.handle = cpu_to_le16(conn->handle);
2398 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2399 			     sizeof(cp), &cp);
2400 
2401 		/* Set the ENCRYPT_PEND to trigger encryption after
2402 		 * authentication.
2403 		 */
2404 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2405 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2406 	}
2407 
2408 	return 0;
2409 }
2410 
2411 /* Encrypt the link */
hci_conn_encrypt(struct hci_conn * conn)2412 static void hci_conn_encrypt(struct hci_conn *conn)
2413 {
2414 	BT_DBG("hcon %p", conn);
2415 
2416 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2417 		struct hci_cp_set_conn_encrypt cp;
2418 		cp.handle  = cpu_to_le16(conn->handle);
2419 		cp.encrypt = 0x01;
2420 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2421 			     &cp);
2422 	}
2423 }
2424 
2425 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type,bool initiator)2426 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2427 		      bool initiator)
2428 {
2429 	BT_DBG("hcon %p", conn);
2430 
2431 	if (conn->type == LE_LINK)
2432 		return smp_conn_security(conn, sec_level);
2433 
2434 	/* For sdp we don't need the link key. */
2435 	if (sec_level == BT_SECURITY_SDP)
2436 		return 1;
2437 
2438 	/* For non 2.1 devices and low security level we don't need the link
2439 	   key. */
2440 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2441 		return 1;
2442 
2443 	/* For other security levels we need the link key. */
2444 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2445 		goto auth;
2446 
2447 	switch (conn->key_type) {
2448 	case HCI_LK_AUTH_COMBINATION_P256:
2449 		/* An authenticated FIPS approved combination key has
2450 		 * sufficient security for security level 4 or lower.
2451 		 */
2452 		if (sec_level <= BT_SECURITY_FIPS)
2453 			goto encrypt;
2454 		break;
2455 	case HCI_LK_AUTH_COMBINATION_P192:
2456 		/* An authenticated combination key has sufficient security for
2457 		 * security level 3 or lower.
2458 		 */
2459 		if (sec_level <= BT_SECURITY_HIGH)
2460 			goto encrypt;
2461 		break;
2462 	case HCI_LK_UNAUTH_COMBINATION_P192:
2463 	case HCI_LK_UNAUTH_COMBINATION_P256:
2464 		/* An unauthenticated combination key has sufficient security
2465 		 * for security level 2 or lower.
2466 		 */
2467 		if (sec_level <= BT_SECURITY_MEDIUM)
2468 			goto encrypt;
2469 		break;
2470 	case HCI_LK_COMBINATION:
2471 		/* A combination key has always sufficient security for the
2472 		 * security levels 2 or lower. High security level requires the
2473 		 * combination key is generated using maximum PIN code length
2474 		 * (16). For pre 2.1 units.
2475 		 */
2476 		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2477 			goto encrypt;
2478 		break;
2479 	default:
2480 		break;
2481 	}
2482 
2483 auth:
2484 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2485 		return 0;
2486 
2487 	if (initiator)
2488 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2489 
2490 	if (!hci_conn_auth(conn, sec_level, auth_type))
2491 		return 0;
2492 
2493 encrypt:
2494 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2495 		/* Ensure that the encryption key size has been read,
2496 		 * otherwise stall the upper layer responses.
2497 		 */
2498 		if (!conn->enc_key_size)
2499 			return 0;
2500 
2501 		/* Nothing else needed, all requirements are met */
2502 		return 1;
2503 	}
2504 
2505 	hci_conn_encrypt(conn);
2506 	return 0;
2507 }
2508 EXPORT_SYMBOL(hci_conn_security);
2509 
2510 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)2511 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2512 {
2513 	BT_DBG("hcon %p", conn);
2514 
2515 	/* Accept if non-secure or higher security level is required */
2516 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2517 		return 1;
2518 
2519 	/* Accept if secure or higher security level is already present */
2520 	if (conn->sec_level == BT_SECURITY_HIGH ||
2521 	    conn->sec_level == BT_SECURITY_FIPS)
2522 		return 1;
2523 
2524 	/* Reject not secure link */
2525 	return 0;
2526 }
2527 EXPORT_SYMBOL(hci_conn_check_secure);
2528 
2529 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)2530 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2531 {
2532 	BT_DBG("hcon %p", conn);
2533 
2534 	if (role == conn->role)
2535 		return 1;
2536 
2537 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2538 		struct hci_cp_switch_role cp;
2539 		bacpy(&cp.bdaddr, &conn->dst);
2540 		cp.role = role;
2541 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2542 	}
2543 
2544 	return 0;
2545 }
2546 EXPORT_SYMBOL(hci_conn_switch_role);
2547 
2548 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)2549 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2550 {
2551 	struct hci_dev *hdev = conn->hdev;
2552 
2553 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2554 
2555 	if (conn->mode != HCI_CM_SNIFF)
2556 		goto timer;
2557 
2558 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2559 		goto timer;
2560 
2561 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2562 		struct hci_cp_exit_sniff_mode cp;
2563 		cp.handle = cpu_to_le16(conn->handle);
2564 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2565 	}
2566 
2567 timer:
2568 	if (hdev->idle_timeout > 0)
2569 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2570 				   msecs_to_jiffies(hdev->idle_timeout));
2571 }
2572 
2573 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)2574 void hci_conn_hash_flush(struct hci_dev *hdev)
2575 {
2576 	struct list_head *head = &hdev->conn_hash.list;
2577 	struct hci_conn *conn;
2578 
2579 	BT_DBG("hdev %s", hdev->name);
2580 
2581 	/* We should not traverse the list here, because hci_conn_del
2582 	 * can remove extra links, which may cause the list traversal
2583 	 * to hit items that have already been released.
2584 	 */
2585 	while ((conn = list_first_entry_or_null(head,
2586 						struct hci_conn,
2587 						list)) != NULL) {
2588 		conn->state = BT_CLOSED;
2589 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2590 		hci_conn_del(conn);
2591 	}
2592 }
2593 
get_link_mode(struct hci_conn * conn)2594 static u32 get_link_mode(struct hci_conn *conn)
2595 {
2596 	u32 link_mode = 0;
2597 
2598 	if (conn->role == HCI_ROLE_MASTER)
2599 		link_mode |= HCI_LM_MASTER;
2600 
2601 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2602 		link_mode |= HCI_LM_ENCRYPT;
2603 
2604 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2605 		link_mode |= HCI_LM_AUTH;
2606 
2607 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2608 		link_mode |= HCI_LM_SECURE;
2609 
2610 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2611 		link_mode |= HCI_LM_FIPS;
2612 
2613 	return link_mode;
2614 }
2615 
hci_get_conn_list(void __user * arg)2616 int hci_get_conn_list(void __user *arg)
2617 {
2618 	struct hci_conn *c;
2619 	struct hci_conn_list_req req, *cl;
2620 	struct hci_conn_info *ci;
2621 	struct hci_dev *hdev;
2622 	int n = 0, size, err;
2623 
2624 	if (copy_from_user(&req, arg, sizeof(req)))
2625 		return -EFAULT;
2626 
2627 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2628 		return -EINVAL;
2629 
2630 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2631 
2632 	cl = kmalloc(size, GFP_KERNEL);
2633 	if (!cl)
2634 		return -ENOMEM;
2635 
2636 	hdev = hci_dev_get(req.dev_id);
2637 	if (!hdev) {
2638 		kfree(cl);
2639 		return -ENODEV;
2640 	}
2641 
2642 	ci = cl->conn_info;
2643 
2644 	hci_dev_lock(hdev);
2645 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2646 		bacpy(&(ci + n)->bdaddr, &c->dst);
2647 		(ci + n)->handle = c->handle;
2648 		(ci + n)->type  = c->type;
2649 		(ci + n)->out   = c->out;
2650 		(ci + n)->state = c->state;
2651 		(ci + n)->link_mode = get_link_mode(c);
2652 		if (++n >= req.conn_num)
2653 			break;
2654 	}
2655 	hci_dev_unlock(hdev);
2656 
2657 	cl->dev_id = hdev->id;
2658 	cl->conn_num = n;
2659 	size = sizeof(req) + n * sizeof(*ci);
2660 
2661 	hci_dev_put(hdev);
2662 
2663 	err = copy_to_user(arg, cl, size);
2664 	kfree(cl);
2665 
2666 	return err ? -EFAULT : 0;
2667 }
2668 
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)2669 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2670 {
2671 	struct hci_conn_info_req req;
2672 	struct hci_conn_info ci;
2673 	struct hci_conn *conn;
2674 	char __user *ptr = arg + sizeof(req);
2675 
2676 	if (copy_from_user(&req, arg, sizeof(req)))
2677 		return -EFAULT;
2678 
2679 	hci_dev_lock(hdev);
2680 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2681 	if (conn) {
2682 		bacpy(&ci.bdaddr, &conn->dst);
2683 		ci.handle = conn->handle;
2684 		ci.type  = conn->type;
2685 		ci.out   = conn->out;
2686 		ci.state = conn->state;
2687 		ci.link_mode = get_link_mode(conn);
2688 	}
2689 	hci_dev_unlock(hdev);
2690 
2691 	if (!conn)
2692 		return -ENOENT;
2693 
2694 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2695 }
2696 
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)2697 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2698 {
2699 	struct hci_auth_info_req req;
2700 	struct hci_conn *conn;
2701 
2702 	if (copy_from_user(&req, arg, sizeof(req)))
2703 		return -EFAULT;
2704 
2705 	hci_dev_lock(hdev);
2706 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2707 	if (conn)
2708 		req.type = conn->auth_type;
2709 	hci_dev_unlock(hdev);
2710 
2711 	if (!conn)
2712 		return -ENOENT;
2713 
2714 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2715 }
2716 
hci_chan_create(struct hci_conn * conn)2717 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2718 {
2719 	struct hci_dev *hdev = conn->hdev;
2720 	struct hci_chan *chan;
2721 
2722 	BT_DBG("%s hcon %p", hdev->name, conn);
2723 
2724 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2725 		BT_DBG("Refusing to create new hci_chan");
2726 		return NULL;
2727 	}
2728 
2729 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2730 	if (!chan)
2731 		return NULL;
2732 
2733 	chan->conn = hci_conn_get(conn);
2734 	skb_queue_head_init(&chan->data_q);
2735 	chan->state = BT_CONNECTED;
2736 
2737 	list_add_rcu(&chan->list, &conn->chan_list);
2738 
2739 	return chan;
2740 }
2741 
hci_chan_del(struct hci_chan * chan)2742 void hci_chan_del(struct hci_chan *chan)
2743 {
2744 	struct hci_conn *conn = chan->conn;
2745 	struct hci_dev *hdev = conn->hdev;
2746 
2747 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2748 
2749 	list_del_rcu(&chan->list);
2750 
2751 	synchronize_rcu();
2752 
2753 	/* Prevent new hci_chan's to be created for this hci_conn */
2754 	set_bit(HCI_CONN_DROP, &conn->flags);
2755 
2756 	hci_conn_put(conn);
2757 
2758 	skb_queue_purge(&chan->data_q);
2759 	kfree(chan);
2760 }
2761 
hci_chan_list_flush(struct hci_conn * conn)2762 void hci_chan_list_flush(struct hci_conn *conn)
2763 {
2764 	struct hci_chan *chan, *n;
2765 
2766 	BT_DBG("hcon %p", conn);
2767 
2768 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2769 		hci_chan_del(chan);
2770 }
2771 
__hci_chan_lookup_handle(struct hci_conn * hcon,__u16 handle)2772 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2773 						 __u16 handle)
2774 {
2775 	struct hci_chan *hchan;
2776 
2777 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2778 		if (hchan->handle == handle)
2779 			return hchan;
2780 	}
2781 
2782 	return NULL;
2783 }
2784 
hci_chan_lookup_handle(struct hci_dev * hdev,__u16 handle)2785 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2786 {
2787 	struct hci_conn_hash *h = &hdev->conn_hash;
2788 	struct hci_conn *hcon;
2789 	struct hci_chan *hchan = NULL;
2790 
2791 	rcu_read_lock();
2792 
2793 	list_for_each_entry_rcu(hcon, &h->list, list) {
2794 		hchan = __hci_chan_lookup_handle(hcon, handle);
2795 		if (hchan)
2796 			break;
2797 	}
2798 
2799 	rcu_read_unlock();
2800 
2801 	return hchan;
2802 }
2803 
hci_conn_get_phy(struct hci_conn * conn)2804 u32 hci_conn_get_phy(struct hci_conn *conn)
2805 {
2806 	u32 phys = 0;
2807 
2808 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2809 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2810 	 * CPB logical transport types.
2811 	 */
2812 	switch (conn->type) {
2813 	case SCO_LINK:
2814 		/* SCO logical transport (1 Mb/s):
2815 		 * HV1, HV2, HV3 and DV.
2816 		 */
2817 		phys |= BT_PHY_BR_1M_1SLOT;
2818 
2819 		break;
2820 
2821 	case ACL_LINK:
2822 		/* ACL logical transport (1 Mb/s) ptt=0:
2823 		 * DH1, DM3, DH3, DM5 and DH5.
2824 		 */
2825 		phys |= BT_PHY_BR_1M_1SLOT;
2826 
2827 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2828 			phys |= BT_PHY_BR_1M_3SLOT;
2829 
2830 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2831 			phys |= BT_PHY_BR_1M_5SLOT;
2832 
2833 		/* ACL logical transport (2 Mb/s) ptt=1:
2834 		 * 2-DH1, 2-DH3 and 2-DH5.
2835 		 */
2836 		if (!(conn->pkt_type & HCI_2DH1))
2837 			phys |= BT_PHY_EDR_2M_1SLOT;
2838 
2839 		if (!(conn->pkt_type & HCI_2DH3))
2840 			phys |= BT_PHY_EDR_2M_3SLOT;
2841 
2842 		if (!(conn->pkt_type & HCI_2DH5))
2843 			phys |= BT_PHY_EDR_2M_5SLOT;
2844 
2845 		/* ACL logical transport (3 Mb/s) ptt=1:
2846 		 * 3-DH1, 3-DH3 and 3-DH5.
2847 		 */
2848 		if (!(conn->pkt_type & HCI_3DH1))
2849 			phys |= BT_PHY_EDR_3M_1SLOT;
2850 
2851 		if (!(conn->pkt_type & HCI_3DH3))
2852 			phys |= BT_PHY_EDR_3M_3SLOT;
2853 
2854 		if (!(conn->pkt_type & HCI_3DH5))
2855 			phys |= BT_PHY_EDR_3M_5SLOT;
2856 
2857 		break;
2858 
2859 	case ESCO_LINK:
2860 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2861 		phys |= BT_PHY_BR_1M_1SLOT;
2862 
2863 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2864 			phys |= BT_PHY_BR_1M_3SLOT;
2865 
2866 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2867 		if (!(conn->pkt_type & ESCO_2EV3))
2868 			phys |= BT_PHY_EDR_2M_1SLOT;
2869 
2870 		if (!(conn->pkt_type & ESCO_2EV5))
2871 			phys |= BT_PHY_EDR_2M_3SLOT;
2872 
2873 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2874 		if (!(conn->pkt_type & ESCO_3EV3))
2875 			phys |= BT_PHY_EDR_3M_1SLOT;
2876 
2877 		if (!(conn->pkt_type & ESCO_3EV5))
2878 			phys |= BT_PHY_EDR_3M_3SLOT;
2879 
2880 		break;
2881 
2882 	case LE_LINK:
2883 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2884 			phys |= BT_PHY_LE_1M_TX;
2885 
2886 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2887 			phys |= BT_PHY_LE_1M_RX;
2888 
2889 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2890 			phys |= BT_PHY_LE_2M_TX;
2891 
2892 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2893 			phys |= BT_PHY_LE_2M_RX;
2894 
2895 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2896 			phys |= BT_PHY_LE_CODED_TX;
2897 
2898 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2899 			phys |= BT_PHY_LE_CODED_RX;
2900 
2901 		break;
2902 	}
2903 
2904 	return phys;
2905 }
2906 
abort_conn_sync(struct hci_dev * hdev,void * data)2907 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2908 {
2909 	struct hci_conn *conn = data;
2910 
2911 	if (!hci_conn_valid(hdev, conn))
2912 		return -ECANCELED;
2913 
2914 	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2915 }
2916 
hci_abort_conn(struct hci_conn * conn,u8 reason)2917 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2918 {
2919 	struct hci_dev *hdev = conn->hdev;
2920 
2921 	/* If abort_reason has already been set it means the connection is
2922 	 * already being aborted so don't attempt to overwrite it.
2923 	 */
2924 	if (conn->abort_reason)
2925 		return 0;
2926 
2927 	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2928 
2929 	conn->abort_reason = reason;
2930 
2931 	/* If the connection is pending check the command opcode since that
2932 	 * might be blocking on hci_cmd_sync_work while waiting its respective
2933 	 * event so we need to hci_cmd_sync_cancel to cancel it.
2934 	 *
2935 	 * hci_connect_le serializes the connection attempts so only one
2936 	 * connection can be in BT_CONNECT at time.
2937 	 */
2938 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2939 		switch (hci_skb_event(hdev->sent_cmd)) {
2940 		case HCI_EV_CONN_COMPLETE:
2941 		case HCI_EV_LE_CONN_COMPLETE:
2942 		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2943 		case HCI_EVT_LE_CIS_ESTABLISHED:
2944 			hci_cmd_sync_cancel(hdev, ECANCELED);
2945 			break;
2946 		}
2947 	/* Cancel connect attempt if still queued/pending */
2948 	} else if (!hci_cancel_connect_sync(hdev, conn)) {
2949 		return 0;
2950 	}
2951 
2952 	/* Run immediately if on cmd_sync_work since this may be called
2953 	 * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
2954 	 * already queue its callback on cmd_sync_work.
2955 	 */
2956 	return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
2957 }
2958