xref: /linux/net/bluetooth/hci_conn.c (revision e5763491237ffee22d9b554febc2d00669f81dee)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023-2024 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 #include <linux/errqueue.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/l2cap.h>
35 #include <net/bluetooth/iso.h>
36 #include <net/bluetooth/mgmt.h>
37 
38 #include "smp.h"
39 #include "eir.h"
40 
41 struct sco_param {
42 	u16 pkt_type;
43 	u16 max_latency;
44 	u8  retrans_effort;
45 };
46 
47 struct conn_handle_t {
48 	struct hci_conn *conn;
49 	__u16 handle;
50 };
51 
52 static const struct sco_param esco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
55 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
56 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
57 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
58 };
59 
60 static const struct sco_param sco_param_cvsd[] = {
61 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
62 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
63 };
64 
65 static const struct sco_param esco_param_msbc[] = {
66 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
67 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
68 };
69 
70 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan_cleanup(struct hci_conn * conn,u8 status)71 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73 	struct hci_conn_params *params;
74 	struct hci_dev *hdev = conn->hdev;
75 	struct smp_irk *irk;
76 	bdaddr_t *bdaddr;
77 	u8 bdaddr_type;
78 
79 	bdaddr = &conn->dst;
80 	bdaddr_type = conn->dst_type;
81 
82 	/* Check if we need to convert to identity address */
83 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 	if (irk) {
85 		bdaddr = &irk->bdaddr;
86 		bdaddr_type = irk->addr_type;
87 	}
88 
89 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 					   bdaddr_type);
91 	if (!params)
92 		return;
93 
94 	if (params->conn) {
95 		hci_conn_drop(params->conn);
96 		hci_conn_put(params->conn);
97 		params->conn = NULL;
98 	}
99 
100 	if (!params->explicit_connect)
101 		return;
102 
103 	/* If the status indicates successful cancellation of
104 	 * the attempt (i.e. Unknown Connection Id) there's no point of
105 	 * notifying failure since we'll go back to keep trying to
106 	 * connect. The only exception is explicit connect requests
107 	 * where a timeout + cancel does indicate an actual failure.
108 	 */
109 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 		mgmt_connect_failed(hdev, conn, status);
111 
112 	/* The connection attempt was doing scan for new RPA, and is
113 	 * in scan phase. If params are not associated with any other
114 	 * autoconnect action, remove them completely. If they are, just unmark
115 	 * them as waiting for connection, by clearing explicit_connect field.
116 	 */
117 	params->explicit_connect = false;
118 
119 	hci_pend_le_list_del_init(params);
120 
121 	switch (params->auto_connect) {
122 	case HCI_AUTO_CONN_EXPLICIT:
123 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
124 		/* return instead of break to avoid duplicate scan update */
125 		return;
126 	case HCI_AUTO_CONN_DIRECT:
127 	case HCI_AUTO_CONN_ALWAYS:
128 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
129 		break;
130 	case HCI_AUTO_CONN_REPORT:
131 		hci_pend_le_list_add(params, &hdev->pend_le_reports);
132 		break;
133 	default:
134 		break;
135 	}
136 
137 	hci_update_passive_scan(hdev);
138 }
139 
hci_conn_cleanup(struct hci_conn * conn)140 static void hci_conn_cleanup(struct hci_conn *conn)
141 {
142 	struct hci_dev *hdev = conn->hdev;
143 
144 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
145 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
146 
147 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
148 		hci_remove_link_key(hdev, &conn->dst);
149 
150 	hci_chan_list_flush(conn);
151 
152 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
153 		ida_free(&hdev->unset_handle_ida, conn->handle);
154 
155 	if (conn->cleanup)
156 		conn->cleanup(conn);
157 
158 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
159 		switch (conn->setting & SCO_AIRMODE_MASK) {
160 		case SCO_AIRMODE_CVSD:
161 		case SCO_AIRMODE_TRANSP:
162 			if (hdev->notify)
163 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
164 			break;
165 		}
166 	} else {
167 		if (hdev->notify)
168 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
169 	}
170 
171 	debugfs_remove_recursive(conn->debugfs);
172 
173 	hci_conn_del_sysfs(conn);
174 
175 	hci_dev_put(hdev);
176 }
177 
hci_disconnect(struct hci_conn * conn,__u8 reason)178 int hci_disconnect(struct hci_conn *conn, __u8 reason)
179 {
180 	BT_DBG("hcon %p", conn);
181 
182 	/* When we are central of an established connection and it enters
183 	 * the disconnect timeout, then go ahead and try to read the
184 	 * current clock offset.  Processing of the result is done
185 	 * within the event handling and hci_clock_offset_evt function.
186 	 */
187 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
188 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
189 		struct hci_dev *hdev = conn->hdev;
190 		struct hci_cp_read_clock_offset clkoff_cp;
191 
192 		clkoff_cp.handle = cpu_to_le16(conn->handle);
193 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
194 			     &clkoff_cp);
195 	}
196 
197 	return hci_abort_conn(conn, reason);
198 }
199 
hci_add_sco(struct hci_conn * conn,__u16 handle)200 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
201 {
202 	struct hci_dev *hdev = conn->hdev;
203 	struct hci_cp_add_sco cp;
204 
205 	BT_DBG("hcon %p", conn);
206 
207 	conn->state = BT_CONNECT;
208 	conn->out = true;
209 
210 	conn->attempt++;
211 
212 	cp.handle   = cpu_to_le16(handle);
213 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
214 
215 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
216 }
217 
find_next_esco_param(struct hci_conn * conn,const struct sco_param * esco_param,int size)218 static bool find_next_esco_param(struct hci_conn *conn,
219 				 const struct sco_param *esco_param, int size)
220 {
221 	if (!conn->parent)
222 		return false;
223 
224 	for (; conn->attempt <= size; conn->attempt++) {
225 		if (lmp_esco_2m_capable(conn->parent) ||
226 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
227 			break;
228 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
229 		       conn, conn->attempt);
230 	}
231 
232 	return conn->attempt <= size;
233 }
234 
configure_datapath_sync(struct hci_dev * hdev,struct bt_codec * codec)235 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
236 {
237 	int err;
238 	__u8 vnd_len, *vnd_data = NULL;
239 	struct hci_op_configure_data_path *cmd = NULL;
240 
241 	/* Do not take below 2 checks as error since the 1st means user do not
242 	 * want to use HFP offload mode and the 2nd means the vendor controller
243 	 * do not need to send below HCI command for offload mode.
244 	 */
245 	if (!codec->data_path || !hdev->get_codec_config_data)
246 		return 0;
247 
248 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
249 					  &vnd_data);
250 	if (err < 0)
251 		goto error;
252 
253 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
254 	if (!cmd) {
255 		err = -ENOMEM;
256 		goto error;
257 	}
258 
259 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
260 	if (err < 0)
261 		goto error;
262 
263 	cmd->vnd_len = vnd_len;
264 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
265 
266 	cmd->direction = 0x00;
267 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
268 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
269 
270 	cmd->direction = 0x01;
271 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
272 				    sizeof(*cmd) + vnd_len, cmd,
273 				    HCI_CMD_TIMEOUT);
274 error:
275 
276 	kfree(cmd);
277 	kfree(vnd_data);
278 	return err;
279 }
280 
hci_enhanced_setup_sync(struct hci_dev * hdev,void * data)281 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
282 {
283 	struct conn_handle_t *conn_handle = data;
284 	struct hci_conn *conn = conn_handle->conn;
285 	__u16 handle = conn_handle->handle;
286 	struct hci_cp_enhanced_setup_sync_conn cp;
287 	const struct sco_param *param;
288 
289 	kfree(conn_handle);
290 
291 	if (!hci_conn_valid(hdev, conn))
292 		return -ECANCELED;
293 
294 	bt_dev_dbg(hdev, "hcon %p", conn);
295 
296 	configure_datapath_sync(hdev, &conn->codec);
297 
298 	conn->state = BT_CONNECT;
299 	conn->out = true;
300 
301 	conn->attempt++;
302 
303 	memset(&cp, 0x00, sizeof(cp));
304 
305 	cp.handle   = cpu_to_le16(handle);
306 
307 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
308 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
309 
310 	switch (conn->codec.id) {
311 	case BT_CODEC_MSBC:
312 		if (!find_next_esco_param(conn, esco_param_msbc,
313 					  ARRAY_SIZE(esco_param_msbc)))
314 			return -EINVAL;
315 
316 		param = &esco_param_msbc[conn->attempt - 1];
317 		cp.tx_coding_format.id = 0x05;
318 		cp.rx_coding_format.id = 0x05;
319 		cp.tx_codec_frame_size = __cpu_to_le16(60);
320 		cp.rx_codec_frame_size = __cpu_to_le16(60);
321 		cp.in_bandwidth = __cpu_to_le32(32000);
322 		cp.out_bandwidth = __cpu_to_le32(32000);
323 		cp.in_coding_format.id = 0x04;
324 		cp.out_coding_format.id = 0x04;
325 		cp.in_coded_data_size = __cpu_to_le16(16);
326 		cp.out_coded_data_size = __cpu_to_le16(16);
327 		cp.in_pcm_data_format = 2;
328 		cp.out_pcm_data_format = 2;
329 		cp.in_pcm_sample_payload_msb_pos = 0;
330 		cp.out_pcm_sample_payload_msb_pos = 0;
331 		cp.in_data_path = conn->codec.data_path;
332 		cp.out_data_path = conn->codec.data_path;
333 		cp.in_transport_unit_size = 1;
334 		cp.out_transport_unit_size = 1;
335 		break;
336 
337 	case BT_CODEC_TRANSPARENT:
338 		if (!find_next_esco_param(conn, esco_param_msbc,
339 					  ARRAY_SIZE(esco_param_msbc)))
340 			return -EINVAL;
341 
342 		param = &esco_param_msbc[conn->attempt - 1];
343 		cp.tx_coding_format.id = 0x03;
344 		cp.rx_coding_format.id = 0x03;
345 		cp.tx_codec_frame_size = __cpu_to_le16(60);
346 		cp.rx_codec_frame_size = __cpu_to_le16(60);
347 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
348 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
349 		cp.in_coding_format.id = 0x03;
350 		cp.out_coding_format.id = 0x03;
351 		cp.in_coded_data_size = __cpu_to_le16(16);
352 		cp.out_coded_data_size = __cpu_to_le16(16);
353 		cp.in_pcm_data_format = 2;
354 		cp.out_pcm_data_format = 2;
355 		cp.in_pcm_sample_payload_msb_pos = 0;
356 		cp.out_pcm_sample_payload_msb_pos = 0;
357 		cp.in_data_path = conn->codec.data_path;
358 		cp.out_data_path = conn->codec.data_path;
359 		cp.in_transport_unit_size = 1;
360 		cp.out_transport_unit_size = 1;
361 		break;
362 
363 	case BT_CODEC_CVSD:
364 		if (conn->parent && lmp_esco_capable(conn->parent)) {
365 			if (!find_next_esco_param(conn, esco_param_cvsd,
366 						  ARRAY_SIZE(esco_param_cvsd)))
367 				return -EINVAL;
368 			param = &esco_param_cvsd[conn->attempt - 1];
369 		} else {
370 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
371 				return -EINVAL;
372 			param = &sco_param_cvsd[conn->attempt - 1];
373 		}
374 		cp.tx_coding_format.id = 2;
375 		cp.rx_coding_format.id = 2;
376 		cp.tx_codec_frame_size = __cpu_to_le16(60);
377 		cp.rx_codec_frame_size = __cpu_to_le16(60);
378 		cp.in_bandwidth = __cpu_to_le32(16000);
379 		cp.out_bandwidth = __cpu_to_le32(16000);
380 		cp.in_coding_format.id = 4;
381 		cp.out_coding_format.id = 4;
382 		cp.in_coded_data_size = __cpu_to_le16(16);
383 		cp.out_coded_data_size = __cpu_to_le16(16);
384 		cp.in_pcm_data_format = 2;
385 		cp.out_pcm_data_format = 2;
386 		cp.in_pcm_sample_payload_msb_pos = 0;
387 		cp.out_pcm_sample_payload_msb_pos = 0;
388 		cp.in_data_path = conn->codec.data_path;
389 		cp.out_data_path = conn->codec.data_path;
390 		cp.in_transport_unit_size = 16;
391 		cp.out_transport_unit_size = 16;
392 		break;
393 	default:
394 		return -EINVAL;
395 	}
396 
397 	cp.retrans_effort = param->retrans_effort;
398 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
399 	cp.max_latency = __cpu_to_le16(param->max_latency);
400 
401 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
402 		return -EIO;
403 
404 	return 0;
405 }
406 
hci_setup_sync_conn(struct hci_conn * conn,__u16 handle)407 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
408 {
409 	struct hci_dev *hdev = conn->hdev;
410 	struct hci_cp_setup_sync_conn cp;
411 	const struct sco_param *param;
412 
413 	bt_dev_dbg(hdev, "hcon %p", conn);
414 
415 	conn->state = BT_CONNECT;
416 	conn->out = true;
417 
418 	conn->attempt++;
419 
420 	cp.handle   = cpu_to_le16(handle);
421 
422 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
423 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
424 	cp.voice_setting  = cpu_to_le16(conn->setting);
425 
426 	switch (conn->setting & SCO_AIRMODE_MASK) {
427 	case SCO_AIRMODE_TRANSP:
428 		if (!find_next_esco_param(conn, esco_param_msbc,
429 					  ARRAY_SIZE(esco_param_msbc)))
430 			return false;
431 		param = &esco_param_msbc[conn->attempt - 1];
432 		break;
433 	case SCO_AIRMODE_CVSD:
434 		if (conn->parent && lmp_esco_capable(conn->parent)) {
435 			if (!find_next_esco_param(conn, esco_param_cvsd,
436 						  ARRAY_SIZE(esco_param_cvsd)))
437 				return false;
438 			param = &esco_param_cvsd[conn->attempt - 1];
439 		} else {
440 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
441 				return false;
442 			param = &sco_param_cvsd[conn->attempt - 1];
443 		}
444 		break;
445 	default:
446 		return false;
447 	}
448 
449 	cp.retrans_effort = param->retrans_effort;
450 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
451 	cp.max_latency = __cpu_to_le16(param->max_latency);
452 
453 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
454 		return false;
455 
456 	return true;
457 }
458 
hci_setup_sync(struct hci_conn * conn,__u16 handle)459 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
460 {
461 	int result;
462 	struct conn_handle_t *conn_handle;
463 
464 	if (enhanced_sync_conn_capable(conn->hdev)) {
465 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
466 
467 		if (!conn_handle)
468 			return false;
469 
470 		conn_handle->conn = conn;
471 		conn_handle->handle = handle;
472 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
473 					    conn_handle, NULL);
474 		if (result < 0)
475 			kfree(conn_handle);
476 
477 		return result == 0;
478 	}
479 
480 	return hci_setup_sync_conn(conn, handle);
481 }
482 
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)483 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
484 		      u16 to_multiplier)
485 {
486 	struct hci_dev *hdev = conn->hdev;
487 	struct hci_conn_params *params;
488 	struct hci_cp_le_conn_update cp;
489 
490 	hci_dev_lock(hdev);
491 
492 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
493 	if (params) {
494 		params->conn_min_interval = min;
495 		params->conn_max_interval = max;
496 		params->conn_latency = latency;
497 		params->supervision_timeout = to_multiplier;
498 	}
499 
500 	hci_dev_unlock(hdev);
501 
502 	memset(&cp, 0, sizeof(cp));
503 	cp.handle		= cpu_to_le16(conn->handle);
504 	cp.conn_interval_min	= cpu_to_le16(min);
505 	cp.conn_interval_max	= cpu_to_le16(max);
506 	cp.conn_latency		= cpu_to_le16(latency);
507 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
508 	cp.min_ce_len		= cpu_to_le16(0x0000);
509 	cp.max_ce_len		= cpu_to_le16(0x0000);
510 
511 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
512 
513 	if (params)
514 		return 0x01;
515 
516 	return 0x00;
517 }
518 
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__le64 rand,__u8 ltk[16],__u8 key_size)519 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
520 		      __u8 ltk[16], __u8 key_size)
521 {
522 	struct hci_dev *hdev = conn->hdev;
523 	struct hci_cp_le_start_enc cp;
524 
525 	BT_DBG("hcon %p", conn);
526 
527 	memset(&cp, 0, sizeof(cp));
528 
529 	cp.handle = cpu_to_le16(conn->handle);
530 	cp.rand = rand;
531 	cp.ediv = ediv;
532 	memcpy(cp.ltk, ltk, key_size);
533 
534 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
535 }
536 
537 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)538 void hci_sco_setup(struct hci_conn *conn, __u8 status)
539 {
540 	struct hci_link *link;
541 
542 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
543 	if (!link || !link->conn)
544 		return;
545 
546 	BT_DBG("hcon %p", conn);
547 
548 	if (!status) {
549 		if (lmp_esco_capable(conn->hdev))
550 			hci_setup_sync(link->conn, conn->handle);
551 		else
552 			hci_add_sco(link->conn, conn->handle);
553 	} else {
554 		hci_connect_cfm(link->conn, status);
555 		hci_conn_del(link->conn);
556 	}
557 }
558 
hci_conn_timeout(struct work_struct * work)559 static void hci_conn_timeout(struct work_struct *work)
560 {
561 	struct hci_conn *conn = container_of(work, struct hci_conn,
562 					     disc_work.work);
563 	int refcnt = atomic_read(&conn->refcnt);
564 
565 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
566 
567 	WARN_ON(refcnt < 0);
568 
569 	/* FIXME: It was observed that in pairing failed scenario, refcnt
570 	 * drops below 0. Probably this is because l2cap_conn_del calls
571 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
572 	 * dropped. After that loop hci_chan_del is called which also drops
573 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
574 	 * otherwise drop it.
575 	 */
576 	if (refcnt > 0)
577 		return;
578 
579 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
580 }
581 
582 /* Enter sniff mode */
hci_conn_idle(struct work_struct * work)583 static void hci_conn_idle(struct work_struct *work)
584 {
585 	struct hci_conn *conn = container_of(work, struct hci_conn,
586 					     idle_work.work);
587 	struct hci_dev *hdev = conn->hdev;
588 
589 	BT_DBG("hcon %p mode %d", conn, conn->mode);
590 
591 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
592 		return;
593 
594 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
595 		return;
596 
597 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
598 		struct hci_cp_sniff_subrate cp;
599 		cp.handle             = cpu_to_le16(conn->handle);
600 		cp.max_latency        = cpu_to_le16(0);
601 		cp.min_remote_timeout = cpu_to_le16(0);
602 		cp.min_local_timeout  = cpu_to_le16(0);
603 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
604 	}
605 
606 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
607 		struct hci_cp_sniff_mode cp;
608 		cp.handle       = cpu_to_le16(conn->handle);
609 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
610 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
611 		cp.attempt      = cpu_to_le16(4);
612 		cp.timeout      = cpu_to_le16(1);
613 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
614 	}
615 }
616 
hci_conn_auto_accept(struct work_struct * work)617 static void hci_conn_auto_accept(struct work_struct *work)
618 {
619 	struct hci_conn *conn = container_of(work, struct hci_conn,
620 					     auto_accept_work.work);
621 
622 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
623 		     &conn->dst);
624 }
625 
le_disable_advertising(struct hci_dev * hdev)626 static void le_disable_advertising(struct hci_dev *hdev)
627 {
628 	if (ext_adv_capable(hdev)) {
629 		struct hci_cp_le_set_ext_adv_enable cp;
630 
631 		cp.enable = 0x00;
632 		cp.num_of_sets = 0x00;
633 
634 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
635 			     &cp);
636 	} else {
637 		u8 enable = 0x00;
638 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
639 			     &enable);
640 	}
641 }
642 
le_conn_timeout(struct work_struct * work)643 static void le_conn_timeout(struct work_struct *work)
644 {
645 	struct hci_conn *conn = container_of(work, struct hci_conn,
646 					     le_conn_timeout.work);
647 	struct hci_dev *hdev = conn->hdev;
648 
649 	BT_DBG("");
650 
651 	/* We could end up here due to having done directed advertising,
652 	 * so clean up the state if necessary. This should however only
653 	 * happen with broken hardware or if low duty cycle was used
654 	 * (which doesn't have a timeout of its own).
655 	 */
656 	if (conn->role == HCI_ROLE_SLAVE) {
657 		/* Disable LE Advertising */
658 		le_disable_advertising(hdev);
659 		hci_dev_lock(hdev);
660 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
661 		hci_dev_unlock(hdev);
662 		return;
663 	}
664 
665 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
666 }
667 
668 struct iso_list_data {
669 	union {
670 		u8  cig;
671 		u8  big;
672 	};
673 	union {
674 		u8  cis;
675 		u8  bis;
676 		u16 sync_handle;
677 	};
678 	int count;
679 	bool big_term;
680 	bool pa_sync_term;
681 	bool big_sync_term;
682 };
683 
bis_list(struct hci_conn * conn,void * data)684 static void bis_list(struct hci_conn *conn, void *data)
685 {
686 	struct iso_list_data *d = data;
687 
688 	/* Skip if not broadcast/ANY address */
689 	if (bacmp(&conn->dst, BDADDR_ANY))
690 		return;
691 
692 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
693 	    d->bis != conn->iso_qos.bcast.bis)
694 		return;
695 
696 	d->count++;
697 }
698 
terminate_big_sync(struct hci_dev * hdev,void * data)699 static int terminate_big_sync(struct hci_dev *hdev, void *data)
700 {
701 	struct iso_list_data *d = data;
702 
703 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
704 
705 	hci_disable_per_advertising_sync(hdev, d->bis);
706 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
707 
708 	/* Only terminate BIG if it has been created */
709 	if (!d->big_term)
710 		return 0;
711 
712 	return hci_le_terminate_big_sync(hdev, d->big,
713 					 HCI_ERROR_LOCAL_HOST_TERM);
714 }
715 
terminate_big_destroy(struct hci_dev * hdev,void * data,int err)716 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
717 {
718 	kfree(data);
719 }
720 
hci_le_terminate_big(struct hci_dev * hdev,struct hci_conn * conn)721 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
722 {
723 	struct iso_list_data *d;
724 	int ret;
725 
726 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
727 		   conn->iso_qos.bcast.bis);
728 
729 	d = kzalloc(sizeof(*d), GFP_KERNEL);
730 	if (!d)
731 		return -ENOMEM;
732 
733 	d->big = conn->iso_qos.bcast.big;
734 	d->bis = conn->iso_qos.bcast.bis;
735 	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
736 
737 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
738 				 terminate_big_destroy);
739 	if (ret)
740 		kfree(d);
741 
742 	return ret;
743 }
744 
big_terminate_sync(struct hci_dev * hdev,void * data)745 static int big_terminate_sync(struct hci_dev *hdev, void *data)
746 {
747 	struct iso_list_data *d = data;
748 
749 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
750 		   d->sync_handle);
751 
752 	if (d->big_sync_term)
753 		hci_le_big_terminate_sync(hdev, d->big);
754 
755 	if (d->pa_sync_term)
756 		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
757 
758 	return 0;
759 }
760 
find_bis(struct hci_conn * conn,void * data)761 static void find_bis(struct hci_conn *conn, void *data)
762 {
763 	struct iso_list_data *d = data;
764 
765 	/* Ignore if BIG doesn't match */
766 	if (d->big != conn->iso_qos.bcast.big)
767 		return;
768 
769 	d->count++;
770 }
771 
hci_le_big_terminate(struct hci_dev * hdev,u8 big,struct hci_conn * conn)772 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
773 {
774 	struct iso_list_data *d;
775 	int ret;
776 
777 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
778 
779 	d = kzalloc(sizeof(*d), GFP_KERNEL);
780 	if (!d)
781 		return -ENOMEM;
782 
783 	d->big = big;
784 	d->sync_handle = conn->sync_handle;
785 
786 	if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
787 		hci_conn_hash_list_flag(hdev, find_bis, PA_LINK,
788 					HCI_CONN_PA_SYNC, d);
789 
790 		if (!d->count)
791 			d->pa_sync_term = true;
792 
793 		d->count = 0;
794 	}
795 
796 	if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
797 		hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK,
798 					HCI_CONN_BIG_SYNC, d);
799 
800 		if (!d->count)
801 			d->big_sync_term = true;
802 	}
803 
804 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
805 				 terminate_big_destroy);
806 	if (ret)
807 		kfree(d);
808 
809 	return ret;
810 }
811 
812 /* Cleanup BIS connection
813  *
814  * Detects if there any BIS left connected in a BIG
815  * broadcaster: Remove advertising instance and terminate BIG.
816  * broadcaster receiver: Terminate BIG sync and terminate PA sync.
817  */
bis_cleanup(struct hci_conn * conn)818 static void bis_cleanup(struct hci_conn *conn)
819 {
820 	struct hci_dev *hdev = conn->hdev;
821 	struct hci_conn *bis;
822 
823 	bt_dev_dbg(hdev, "conn %p", conn);
824 
825 	if (conn->role == HCI_ROLE_MASTER) {
826 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
827 			return;
828 
829 		/* Check if ISO connection is a BIS and terminate advertising
830 		 * set and BIG if there are no other connections using it.
831 		 */
832 		bis = hci_conn_hash_lookup_big_state(hdev,
833 						     conn->iso_qos.bcast.big,
834 						     BT_CONNECTED,
835 						     HCI_ROLE_MASTER);
836 		if (bis)
837 			return;
838 
839 		bis = hci_conn_hash_lookup_big_state(hdev,
840 						     conn->iso_qos.bcast.big,
841 						     BT_CONNECT,
842 						     HCI_ROLE_MASTER);
843 		if (bis)
844 			return;
845 
846 		bis = hci_conn_hash_lookup_big_state(hdev,
847 						     conn->iso_qos.bcast.big,
848 						     BT_OPEN,
849 						     HCI_ROLE_MASTER);
850 		if (bis)
851 			return;
852 
853 		hci_le_terminate_big(hdev, conn);
854 	} else {
855 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
856 				     conn);
857 	}
858 }
859 
remove_cig_sync(struct hci_dev * hdev,void * data)860 static int remove_cig_sync(struct hci_dev *hdev, void *data)
861 {
862 	u8 handle = PTR_UINT(data);
863 
864 	return hci_le_remove_cig_sync(hdev, handle);
865 }
866 
hci_le_remove_cig(struct hci_dev * hdev,u8 handle)867 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
868 {
869 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
870 
871 	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
872 				  NULL);
873 }
874 
find_cis(struct hci_conn * conn,void * data)875 static void find_cis(struct hci_conn *conn, void *data)
876 {
877 	struct iso_list_data *d = data;
878 
879 	/* Ignore broadcast or if CIG don't match */
880 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
881 		return;
882 
883 	d->count++;
884 }
885 
886 /* Cleanup CIS connection:
887  *
888  * Detects if there any CIS left connected in a CIG and remove it.
889  */
cis_cleanup(struct hci_conn * conn)890 static void cis_cleanup(struct hci_conn *conn)
891 {
892 	struct hci_dev *hdev = conn->hdev;
893 	struct iso_list_data d;
894 
895 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
896 		return;
897 
898 	memset(&d, 0, sizeof(d));
899 	d.cig = conn->iso_qos.ucast.cig;
900 
901 	/* Check if ISO connection is a CIS and remove CIG if there are
902 	 * no other connections using it.
903 	 */
904 	hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_BOUND, &d);
905 	hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECT,
906 				 &d);
907 	hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECTED,
908 				 &d);
909 	if (d.count)
910 		return;
911 
912 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
913 }
914 
hci_conn_hash_alloc_unset(struct hci_dev * hdev)915 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
916 {
917 	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
918 			       U16_MAX, GFP_ATOMIC);
919 }
920 
__hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)921 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
922 				       u8 role, u16 handle)
923 {
924 	struct hci_conn *conn;
925 
926 	switch (type) {
927 	case ACL_LINK:
928 		if (!hdev->acl_mtu)
929 			return ERR_PTR(-ECONNREFUSED);
930 		break;
931 	case CIS_LINK:
932 	case BIS_LINK:
933 	case PA_LINK:
934 		if (!hdev->iso_mtu)
935 			return ERR_PTR(-ECONNREFUSED);
936 		break;
937 	case LE_LINK:
938 		if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
939 			return ERR_PTR(-ECONNREFUSED);
940 		if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
941 			return ERR_PTR(-ECONNREFUSED);
942 		break;
943 	case SCO_LINK:
944 	case ESCO_LINK:
945 		if (!hdev->sco_pkts)
946 			/* Controller does not support SCO or eSCO over HCI */
947 			return ERR_PTR(-ECONNREFUSED);
948 		break;
949 	default:
950 		return ERR_PTR(-ECONNREFUSED);
951 	}
952 
953 	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
954 
955 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
956 	if (!conn)
957 		return ERR_PTR(-ENOMEM);
958 
959 	bacpy(&conn->dst, dst);
960 	bacpy(&conn->src, &hdev->bdaddr);
961 	conn->handle = handle;
962 	conn->hdev  = hdev;
963 	conn->type  = type;
964 	conn->role  = role;
965 	conn->mode  = HCI_CM_ACTIVE;
966 	conn->state = BT_OPEN;
967 	conn->auth_type = HCI_AT_GENERAL_BONDING;
968 	conn->io_capability = hdev->io_capability;
969 	conn->remote_auth = 0xff;
970 	conn->key_type = 0xff;
971 	conn->rssi = HCI_RSSI_INVALID;
972 	conn->tx_power = HCI_TX_POWER_INVALID;
973 	conn->max_tx_power = HCI_TX_POWER_INVALID;
974 	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
975 	conn->sid = HCI_SID_INVALID;
976 
977 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
978 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
979 
980 	/* Set Default Authenticated payload timeout to 30s */
981 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
982 
983 	if (conn->role == HCI_ROLE_MASTER)
984 		conn->out = true;
985 
986 	switch (type) {
987 	case ACL_LINK:
988 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
989 		conn->mtu = hdev->acl_mtu;
990 		break;
991 	case LE_LINK:
992 		/* conn->src should reflect the local identity address */
993 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
994 		conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
995 		break;
996 	case CIS_LINK:
997 	case BIS_LINK:
998 	case PA_LINK:
999 		/* conn->src should reflect the local identity address */
1000 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1001 
1002 		/* set proper cleanup function */
1003 		if (!bacmp(dst, BDADDR_ANY))
1004 			conn->cleanup = bis_cleanup;
1005 		else if (conn->role == HCI_ROLE_MASTER)
1006 			conn->cleanup = cis_cleanup;
1007 
1008 		conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
1009 			    hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
1010 		break;
1011 	case SCO_LINK:
1012 		if (lmp_esco_capable(hdev))
1013 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1014 					(hdev->esco_type & EDR_ESCO_MASK);
1015 		else
1016 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1017 
1018 		conn->mtu = hdev->sco_mtu;
1019 		break;
1020 	case ESCO_LINK:
1021 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1022 		conn->mtu = hdev->sco_mtu;
1023 		break;
1024 	}
1025 
1026 	skb_queue_head_init(&conn->data_q);
1027 	skb_queue_head_init(&conn->tx_q.queue);
1028 
1029 	INIT_LIST_HEAD(&conn->chan_list);
1030 	INIT_LIST_HEAD(&conn->link_list);
1031 
1032 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1033 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1034 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1035 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1036 
1037 	atomic_set(&conn->refcnt, 0);
1038 
1039 	hci_dev_hold(hdev);
1040 
1041 	hci_conn_hash_add(hdev, conn);
1042 
1043 	/* The SCO and eSCO connections will only be notified when their
1044 	 * setup has been completed. This is different to ACL links which
1045 	 * can be notified right away.
1046 	 */
1047 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1048 		if (hdev->notify)
1049 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1050 	}
1051 
1052 	hci_conn_init_sysfs(conn);
1053 	return conn;
1054 }
1055 
hci_conn_add_unset(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role)1056 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1057 				    bdaddr_t *dst, u8 role)
1058 {
1059 	int handle;
1060 
1061 	bt_dev_dbg(hdev, "dst %pMR", dst);
1062 
1063 	handle = hci_conn_hash_alloc_unset(hdev);
1064 	if (unlikely(handle < 0))
1065 		return ERR_PTR(-ECONNREFUSED);
1066 
1067 	return __hci_conn_add(hdev, type, dst, role, handle);
1068 }
1069 
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)1070 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1071 			      u8 role, u16 handle)
1072 {
1073 	if (handle > HCI_CONN_HANDLE_MAX)
1074 		return ERR_PTR(-EINVAL);
1075 
1076 	return __hci_conn_add(hdev, type, dst, role, handle);
1077 }
1078 
hci_conn_cleanup_child(struct hci_conn * conn,u8 reason)1079 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1080 {
1081 	if (!reason)
1082 		reason = HCI_ERROR_REMOTE_USER_TERM;
1083 
1084 	/* Due to race, SCO/ISO conn might be not established yet at this point,
1085 	 * and nothing else will clean it up. In other cases it is done via HCI
1086 	 * events.
1087 	 */
1088 	switch (conn->type) {
1089 	case SCO_LINK:
1090 	case ESCO_LINK:
1091 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
1092 			hci_conn_failed(conn, reason);
1093 		break;
1094 	case CIS_LINK:
1095 	case BIS_LINK:
1096 	case PA_LINK:
1097 		if ((conn->state != BT_CONNECTED &&
1098 		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
1099 		    test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
1100 			hci_conn_failed(conn, reason);
1101 		break;
1102 	}
1103 }
1104 
hci_conn_unlink(struct hci_conn * conn)1105 static void hci_conn_unlink(struct hci_conn *conn)
1106 {
1107 	struct hci_dev *hdev = conn->hdev;
1108 
1109 	bt_dev_dbg(hdev, "hcon %p", conn);
1110 
1111 	if (!conn->parent) {
1112 		struct hci_link *link, *t;
1113 
1114 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1115 			struct hci_conn *child = link->conn;
1116 
1117 			hci_conn_unlink(child);
1118 
1119 			/* If hdev is down it means
1120 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1121 			 * and links don't need to be cleanup as all connections
1122 			 * would be cleanup.
1123 			 */
1124 			if (!test_bit(HCI_UP, &hdev->flags))
1125 				continue;
1126 
1127 			hci_conn_cleanup_child(child, conn->abort_reason);
1128 		}
1129 
1130 		return;
1131 	}
1132 
1133 	if (!conn->link)
1134 		return;
1135 
1136 	list_del_rcu(&conn->link->list);
1137 	synchronize_rcu();
1138 
1139 	hci_conn_drop(conn->parent);
1140 	hci_conn_put(conn->parent);
1141 	conn->parent = NULL;
1142 
1143 	kfree(conn->link);
1144 	conn->link = NULL;
1145 }
1146 
hci_conn_del(struct hci_conn * conn)1147 void hci_conn_del(struct hci_conn *conn)
1148 {
1149 	struct hci_dev *hdev = conn->hdev;
1150 
1151 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1152 
1153 	hci_conn_unlink(conn);
1154 
1155 	disable_delayed_work_sync(&conn->disc_work);
1156 	disable_delayed_work_sync(&conn->auto_accept_work);
1157 	disable_delayed_work_sync(&conn->idle_work);
1158 
1159 	/* Remove the connection from the list so unacked logic can detect when
1160 	 * a certain pool is not being utilized.
1161 	 */
1162 	hci_conn_hash_del(hdev, conn);
1163 
1164 	/* Handle unacked frames:
1165 	 *
1166 	 * - In case there are no connection, or if restoring the buffers
1167 	 *   considered in transist would overflow, restore all buffers to the
1168 	 *   pool.
1169 	 * - Otherwise restore just the buffers considered in transit for the
1170 	 *   hci_conn
1171 	 */
1172 	switch (conn->type) {
1173 	case ACL_LINK:
1174 		if (!hci_conn_num(hdev, ACL_LINK) ||
1175 		    hdev->acl_cnt + conn->sent > hdev->acl_pkts)
1176 			hdev->acl_cnt = hdev->acl_pkts;
1177 		else
1178 			hdev->acl_cnt += conn->sent;
1179 		break;
1180 	case LE_LINK:
1181 		cancel_delayed_work(&conn->le_conn_timeout);
1182 
1183 		if (hdev->le_pkts) {
1184 			if (!hci_conn_num(hdev, LE_LINK) ||
1185 			    hdev->le_cnt + conn->sent > hdev->le_pkts)
1186 				hdev->le_cnt = hdev->le_pkts;
1187 			else
1188 				hdev->le_cnt += conn->sent;
1189 		} else {
1190 			if ((!hci_conn_num(hdev, LE_LINK) &&
1191 			     !hci_conn_num(hdev, ACL_LINK)) ||
1192 			    hdev->acl_cnt + conn->sent > hdev->acl_pkts)
1193 				hdev->acl_cnt = hdev->acl_pkts;
1194 			else
1195 				hdev->acl_cnt += conn->sent;
1196 		}
1197 		break;
1198 	case CIS_LINK:
1199 	case BIS_LINK:
1200 	case PA_LINK:
1201 		if (!hci_iso_count(hdev) ||
1202 		    hdev->iso_cnt + conn->sent > hdev->iso_pkts)
1203 			hdev->iso_cnt = hdev->iso_pkts;
1204 		else
1205 			hdev->iso_cnt += conn->sent;
1206 		break;
1207 	}
1208 
1209 	skb_queue_purge(&conn->data_q);
1210 	skb_queue_purge(&conn->tx_q.queue);
1211 
1212 	/* Remove the connection from the list and cleanup its remaining
1213 	 * state. This is a separate function since for some cases like
1214 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1215 	 * rest of hci_conn_del.
1216 	 */
1217 	hci_conn_cleanup(conn);
1218 
1219 	/* Dequeue callbacks using connection pointer as data */
1220 	hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
1221 }
1222 
hci_get_route(bdaddr_t * dst,bdaddr_t * src,uint8_t src_type)1223 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1224 {
1225 	int use_src = bacmp(src, BDADDR_ANY);
1226 	struct hci_dev *hdev = NULL, *d;
1227 
1228 	BT_DBG("%pMR -> %pMR", src, dst);
1229 
1230 	read_lock(&hci_dev_list_lock);
1231 
1232 	list_for_each_entry(d, &hci_dev_list, list) {
1233 		if (!test_bit(HCI_UP, &d->flags) ||
1234 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
1235 			continue;
1236 
1237 		/* Simple routing:
1238 		 *   No source address - find interface with bdaddr != dst
1239 		 *   Source address    - find interface with bdaddr == src
1240 		 */
1241 
1242 		if (use_src) {
1243 			bdaddr_t id_addr;
1244 			u8 id_addr_type;
1245 
1246 			if (src_type == BDADDR_BREDR) {
1247 				if (!lmp_bredr_capable(d))
1248 					continue;
1249 				bacpy(&id_addr, &d->bdaddr);
1250 				id_addr_type = BDADDR_BREDR;
1251 			} else {
1252 				if (!lmp_le_capable(d))
1253 					continue;
1254 
1255 				hci_copy_identity_address(d, &id_addr,
1256 							  &id_addr_type);
1257 
1258 				/* Convert from HCI to three-value type */
1259 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1260 					id_addr_type = BDADDR_LE_PUBLIC;
1261 				else
1262 					id_addr_type = BDADDR_LE_RANDOM;
1263 			}
1264 
1265 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1266 				hdev = d; break;
1267 			}
1268 		} else {
1269 			if (bacmp(&d->bdaddr, dst)) {
1270 				hdev = d; break;
1271 			}
1272 		}
1273 	}
1274 
1275 	if (hdev)
1276 		hdev = hci_dev_hold(hdev);
1277 
1278 	read_unlock(&hci_dev_list_lock);
1279 	return hdev;
1280 }
1281 EXPORT_SYMBOL(hci_get_route);
1282 
1283 /* This function requires the caller holds hdev->lock */
hci_le_conn_failed(struct hci_conn * conn,u8 status)1284 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1285 {
1286 	struct hci_dev *hdev = conn->hdev;
1287 
1288 	hci_connect_le_scan_cleanup(conn, status);
1289 
1290 	/* Enable advertising in case this was a failed connection
1291 	 * attempt as a peripheral.
1292 	 */
1293 	hci_enable_advertising(hdev);
1294 }
1295 
1296 /* This function requires the caller holds hdev->lock */
hci_conn_failed(struct hci_conn * conn,u8 status)1297 void hci_conn_failed(struct hci_conn *conn, u8 status)
1298 {
1299 	struct hci_dev *hdev = conn->hdev;
1300 
1301 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1302 
1303 	switch (conn->type) {
1304 	case LE_LINK:
1305 		hci_le_conn_failed(conn, status);
1306 		break;
1307 	case ACL_LINK:
1308 		mgmt_connect_failed(hdev, conn, status);
1309 		break;
1310 	}
1311 
1312 	/* In case of BIG/PA sync failed, clear conn flags so that
1313 	 * the conns will be correctly cleaned up by ISO layer
1314 	 */
1315 	test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1316 	test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1317 
1318 	conn->state = BT_CLOSED;
1319 	hci_connect_cfm(conn, status);
1320 	hci_conn_del(conn);
1321 }
1322 
1323 /* This function requires the caller holds hdev->lock */
hci_conn_set_handle(struct hci_conn * conn,u16 handle)1324 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1325 {
1326 	struct hci_dev *hdev = conn->hdev;
1327 
1328 	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1329 
1330 	if (conn->handle == handle)
1331 		return 0;
1332 
1333 	if (handle > HCI_CONN_HANDLE_MAX) {
1334 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1335 			   handle, HCI_CONN_HANDLE_MAX);
1336 		return HCI_ERROR_INVALID_PARAMETERS;
1337 	}
1338 
1339 	/* If abort_reason has been sent it means the connection is being
1340 	 * aborted and the handle shall not be changed.
1341 	 */
1342 	if (conn->abort_reason)
1343 		return conn->abort_reason;
1344 
1345 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
1346 		ida_free(&hdev->unset_handle_ida, conn->handle);
1347 
1348 	conn->handle = handle;
1349 
1350 	return 0;
1351 }
1352 
hci_connect_le(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,bool dst_resolved,u8 sec_level,u16 conn_timeout,u8 role,u8 phy,u8 sec_phy)1353 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1354 				u8 dst_type, bool dst_resolved, u8 sec_level,
1355 				u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
1356 {
1357 	struct hci_conn *conn;
1358 	struct smp_irk *irk;
1359 	int err;
1360 
1361 	/* Let's make sure that le is enabled.*/
1362 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1363 		if (lmp_le_capable(hdev))
1364 			return ERR_PTR(-ECONNREFUSED);
1365 
1366 		return ERR_PTR(-EOPNOTSUPP);
1367 	}
1368 
1369 	/* Since the controller supports only one LE connection attempt at a
1370 	 * time, we return -EBUSY if there is any connection attempt running.
1371 	 */
1372 	if (hci_lookup_le_connect(hdev))
1373 		return ERR_PTR(-EBUSY);
1374 
1375 	/* If there's already a connection object but it's not in
1376 	 * scanning state it means it must already be established, in
1377 	 * which case we can't do anything else except report a failure
1378 	 * to connect.
1379 	 */
1380 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1381 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1382 		return ERR_PTR(-EBUSY);
1383 	}
1384 
1385 	/* Check if the destination address has been resolved by the controller
1386 	 * since if it did then the identity address shall be used.
1387 	 */
1388 	if (!dst_resolved) {
1389 		/* When given an identity address with existing identity
1390 		 * resolving key, the connection needs to be established
1391 		 * to a resolvable random address.
1392 		 *
1393 		 * Storing the resolvable random address is required here
1394 		 * to handle connection failures. The address will later
1395 		 * be resolved back into the original identity address
1396 		 * from the connect request.
1397 		 */
1398 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1399 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1400 			dst = &irk->rpa;
1401 			dst_type = ADDR_LE_DEV_RANDOM;
1402 		}
1403 	}
1404 
1405 	if (conn) {
1406 		bacpy(&conn->dst, dst);
1407 	} else {
1408 		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1409 		if (IS_ERR(conn))
1410 			return conn;
1411 		hci_conn_hold(conn);
1412 		conn->pending_sec_level = sec_level;
1413 	}
1414 
1415 	conn->dst_type = dst_type;
1416 	conn->sec_level = BT_SECURITY_LOW;
1417 	conn->conn_timeout = conn_timeout;
1418 	conn->le_adv_phy = phy;
1419 	conn->le_adv_sec_phy = sec_phy;
1420 
1421 	err = hci_connect_le_sync(hdev, conn);
1422 	if (err) {
1423 		hci_conn_del(conn);
1424 		return ERR_PTR(err);
1425 	}
1426 
1427 	return conn;
1428 }
1429 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)1430 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1431 {
1432 	struct hci_conn *conn;
1433 
1434 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1435 	if (!conn)
1436 		return false;
1437 
1438 	if (conn->state != BT_CONNECTED)
1439 		return false;
1440 
1441 	return true;
1442 }
1443 
1444 /* This function requires the caller holds hdev->lock */
hci_explicit_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)1445 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1446 					bdaddr_t *addr, u8 addr_type)
1447 {
1448 	struct hci_conn_params *params;
1449 
1450 	if (is_connected(hdev, addr, addr_type))
1451 		return -EISCONN;
1452 
1453 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1454 	if (!params) {
1455 		params = hci_conn_params_add(hdev, addr, addr_type);
1456 		if (!params)
1457 			return -ENOMEM;
1458 
1459 		/* If we created new params, mark them to be deleted in
1460 		 * hci_connect_le_scan_cleanup. It's different case than
1461 		 * existing disabled params, those will stay after cleanup.
1462 		 */
1463 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1464 	}
1465 
1466 	/* We're trying to connect, so make sure params are at pend_le_conns */
1467 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1468 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1469 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1470 		hci_pend_le_list_del_init(params);
1471 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1472 	}
1473 
1474 	params->explicit_connect = true;
1475 
1476 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1477 	       params->auto_connect);
1478 
1479 	return 0;
1480 }
1481 
qos_set_big(struct hci_dev * hdev,struct bt_iso_qos * qos)1482 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1483 {
1484 	struct hci_conn *conn;
1485 	u8  big;
1486 
1487 	/* Allocate a BIG if not set */
1488 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1489 		for (big = 0x00; big < 0xef; big++) {
1490 
1491 			conn = hci_conn_hash_lookup_big(hdev, big);
1492 			if (!conn)
1493 				break;
1494 		}
1495 
1496 		if (big == 0xef)
1497 			return -EADDRNOTAVAIL;
1498 
1499 		/* Update BIG */
1500 		qos->bcast.big = big;
1501 	}
1502 
1503 	return 0;
1504 }
1505 
qos_set_bis(struct hci_dev * hdev,struct bt_iso_qos * qos)1506 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1507 {
1508 	struct hci_conn *conn;
1509 	u8  bis;
1510 
1511 	/* Allocate BIS if not set */
1512 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1513 		if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) {
1514 			conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1515 
1516 			if (conn) {
1517 				/* If the BIG handle is already matched to an advertising
1518 				 * handle, do not allocate a new one.
1519 				 */
1520 				qos->bcast.bis = conn->iso_qos.bcast.bis;
1521 				return 0;
1522 			}
1523 		}
1524 
1525 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1526 		 * since it is reserved as general purpose set.
1527 		 */
1528 		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1529 		     bis++) {
1530 
1531 			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1532 			if (!conn)
1533 				break;
1534 		}
1535 
1536 		if (bis == hdev->le_num_of_adv_sets)
1537 			return -EADDRNOTAVAIL;
1538 
1539 		/* Update BIS */
1540 		qos->bcast.bis = bis;
1541 	}
1542 
1543 	return 0;
1544 }
1545 
1546 /* This function requires the caller holds hdev->lock */
hci_add_bis(struct hci_dev * hdev,bdaddr_t * dst,__u8 sid,struct bt_iso_qos * qos,__u8 base_len,__u8 * base,u16 timeout)1547 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1548 				    __u8 sid, struct bt_iso_qos *qos,
1549 				    __u8 base_len, __u8 *base, u16 timeout)
1550 {
1551 	struct hci_conn *conn;
1552 	int err;
1553 
1554 	/* Let's make sure that le is enabled.*/
1555 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1556 		if (lmp_le_capable(hdev))
1557 			return ERR_PTR(-ECONNREFUSED);
1558 		return ERR_PTR(-EOPNOTSUPP);
1559 	}
1560 
1561 	err = qos_set_big(hdev, qos);
1562 	if (err)
1563 		return ERR_PTR(err);
1564 
1565 	err = qos_set_bis(hdev, qos);
1566 	if (err)
1567 		return ERR_PTR(err);
1568 
1569 	/* Check if the LE Create BIG command has already been sent */
1570 	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1571 						qos->bcast.big);
1572 	if (conn)
1573 		return ERR_PTR(-EADDRINUSE);
1574 
1575 	/* Check BIS settings against other bound BISes, since all
1576 	 * BISes in a BIG must have the same value for all parameters
1577 	 */
1578 	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1579 
1580 	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1581 		     base_len != conn->le_per_adv_data_len ||
1582 		     memcmp(conn->le_per_adv_data, base, base_len)))
1583 		return ERR_PTR(-EADDRINUSE);
1584 
1585 	conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_MASTER);
1586 	if (IS_ERR(conn))
1587 		return conn;
1588 
1589 	conn->state = BT_CONNECT;
1590 	conn->sid = sid;
1591 	conn->conn_timeout = timeout;
1592 
1593 	hci_conn_hold(conn);
1594 	return conn;
1595 }
1596 
1597 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,enum conn_reasons conn_reason)1598 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1599 				     u8 dst_type, u8 sec_level,
1600 				     u16 conn_timeout,
1601 				     enum conn_reasons conn_reason)
1602 {
1603 	struct hci_conn *conn;
1604 
1605 	/* Let's make sure that le is enabled.*/
1606 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1607 		if (lmp_le_capable(hdev))
1608 			return ERR_PTR(-ECONNREFUSED);
1609 
1610 		return ERR_PTR(-EOPNOTSUPP);
1611 	}
1612 
1613 	/* Some devices send ATT messages as soon as the physical link is
1614 	 * established. To be able to handle these ATT messages, the user-
1615 	 * space first establishes the connection and then starts the pairing
1616 	 * process.
1617 	 *
1618 	 * So if a hci_conn object already exists for the following connection
1619 	 * attempt, we simply update pending_sec_level and auth_type fields
1620 	 * and return the object found.
1621 	 */
1622 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1623 	if (conn) {
1624 		if (conn->pending_sec_level < sec_level)
1625 			conn->pending_sec_level = sec_level;
1626 		goto done;
1627 	}
1628 
1629 	BT_DBG("requesting refresh of dst_addr");
1630 
1631 	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1632 	if (IS_ERR(conn))
1633 		return conn;
1634 
1635 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1636 		hci_conn_del(conn);
1637 		return ERR_PTR(-EBUSY);
1638 	}
1639 
1640 	conn->state = BT_CONNECT;
1641 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1642 	conn->dst_type = dst_type;
1643 	conn->sec_level = BT_SECURITY_LOW;
1644 	conn->pending_sec_level = sec_level;
1645 	conn->conn_timeout = conn_timeout;
1646 	conn->conn_reason = conn_reason;
1647 
1648 	hci_update_passive_scan(hdev);
1649 
1650 done:
1651 	hci_conn_hold(conn);
1652 	return conn;
1653 }
1654 
hci_connect_acl(struct hci_dev * hdev,bdaddr_t * dst,u8 sec_level,u8 auth_type,enum conn_reasons conn_reason,u16 timeout)1655 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1656 				 u8 sec_level, u8 auth_type,
1657 				 enum conn_reasons conn_reason, u16 timeout)
1658 {
1659 	struct hci_conn *acl;
1660 
1661 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1662 		if (lmp_bredr_capable(hdev))
1663 			return ERR_PTR(-ECONNREFUSED);
1664 
1665 		return ERR_PTR(-EOPNOTSUPP);
1666 	}
1667 
1668 	/* Reject outgoing connection to device with same BD ADDR against
1669 	 * CVE-2020-26555
1670 	 */
1671 	if (!bacmp(&hdev->bdaddr, dst)) {
1672 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1673 			   dst);
1674 		return ERR_PTR(-ECONNREFUSED);
1675 	}
1676 
1677 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1678 	if (!acl) {
1679 		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1680 		if (IS_ERR(acl))
1681 			return acl;
1682 	}
1683 
1684 	hci_conn_hold(acl);
1685 
1686 	acl->conn_reason = conn_reason;
1687 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1688 		int err;
1689 
1690 		acl->sec_level = BT_SECURITY_LOW;
1691 		acl->pending_sec_level = sec_level;
1692 		acl->auth_type = auth_type;
1693 		acl->conn_timeout = timeout;
1694 
1695 		err = hci_connect_acl_sync(hdev, acl);
1696 		if (err) {
1697 			hci_conn_del(acl);
1698 			return ERR_PTR(err);
1699 		}
1700 	}
1701 
1702 	return acl;
1703 }
1704 
hci_conn_link(struct hci_conn * parent,struct hci_conn * conn)1705 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1706 				      struct hci_conn *conn)
1707 {
1708 	struct hci_dev *hdev = parent->hdev;
1709 	struct hci_link *link;
1710 
1711 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1712 
1713 	if (conn->link)
1714 		return conn->link;
1715 
1716 	if (conn->parent)
1717 		return NULL;
1718 
1719 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1720 	if (!link)
1721 		return NULL;
1722 
1723 	link->conn = hci_conn_hold(conn);
1724 	conn->link = link;
1725 	conn->parent = hci_conn_get(parent);
1726 
1727 	/* Use list_add_tail_rcu append to the list */
1728 	list_add_tail_rcu(&link->list, &parent->link_list);
1729 
1730 	return link;
1731 }
1732 
hci_connect_sco(struct hci_dev * hdev,int type,bdaddr_t * dst,__u16 setting,struct bt_codec * codec,u16 timeout)1733 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1734 				 __u16 setting, struct bt_codec *codec,
1735 				 u16 timeout)
1736 {
1737 	struct hci_conn *acl;
1738 	struct hci_conn *sco;
1739 	struct hci_link *link;
1740 
1741 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1742 			      CONN_REASON_SCO_CONNECT, timeout);
1743 	if (IS_ERR(acl))
1744 		return acl;
1745 
1746 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1747 	if (!sco) {
1748 		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1749 		if (IS_ERR(sco)) {
1750 			hci_conn_drop(acl);
1751 			return sco;
1752 		}
1753 	}
1754 
1755 	link = hci_conn_link(acl, sco);
1756 	if (!link) {
1757 		hci_conn_drop(acl);
1758 		hci_conn_drop(sco);
1759 		return ERR_PTR(-ENOLINK);
1760 	}
1761 
1762 	sco->setting = setting;
1763 	sco->codec = *codec;
1764 
1765 	if (acl->state == BT_CONNECTED &&
1766 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1767 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1768 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1769 
1770 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1771 			/* defer SCO setup until mode change completed */
1772 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1773 			return sco;
1774 		}
1775 
1776 		hci_sco_setup(acl, 0x00);
1777 	}
1778 
1779 	return sco;
1780 }
1781 
hci_le_create_big(struct hci_conn * conn,struct bt_iso_qos * qos)1782 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1783 {
1784 	struct hci_dev *hdev = conn->hdev;
1785 	struct hci_cp_le_create_big cp;
1786 	struct iso_list_data data;
1787 
1788 	memset(&cp, 0, sizeof(cp));
1789 
1790 	data.big = qos->bcast.big;
1791 	data.bis = qos->bcast.bis;
1792 	data.count = 0;
1793 
1794 	/* Create a BIS for each bound connection */
1795 	hci_conn_hash_list_state(hdev, bis_list, BIS_LINK,
1796 				 BT_BOUND, &data);
1797 
1798 	cp.handle = qos->bcast.big;
1799 	cp.adv_handle = qos->bcast.bis;
1800 	cp.num_bis  = data.count;
1801 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1802 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1803 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1804 	cp.bis.rtn  = qos->bcast.out.rtn;
1805 	cp.bis.phy  = qos->bcast.out.phy;
1806 	cp.bis.packing = qos->bcast.packing;
1807 	cp.bis.framing = qos->bcast.framing;
1808 	cp.bis.encryption = qos->bcast.encryption;
1809 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1810 
1811 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1812 }
1813 
set_cig_params_sync(struct hci_dev * hdev,void * data)1814 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1815 {
1816 	DEFINE_FLEX(struct hci_cp_le_set_cig_params, pdu, cis, num_cis, 0x1f);
1817 	u8 cig_id = PTR_UINT(data);
1818 	struct hci_conn *conn;
1819 	struct bt_iso_qos *qos;
1820 	u8 aux_num_cis = 0;
1821 	u8 cis_id;
1822 
1823 	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1824 	if (!conn)
1825 		return 0;
1826 
1827 	qos = &conn->iso_qos;
1828 	pdu->cig_id = cig_id;
1829 	hci_cpu_to_le24(qos->ucast.out.interval, pdu->c_interval);
1830 	hci_cpu_to_le24(qos->ucast.in.interval, pdu->p_interval);
1831 	pdu->sca = qos->ucast.sca;
1832 	pdu->packing = qos->ucast.packing;
1833 	pdu->framing = qos->ucast.framing;
1834 	pdu->c_latency = cpu_to_le16(qos->ucast.out.latency);
1835 	pdu->p_latency = cpu_to_le16(qos->ucast.in.latency);
1836 
1837 	/* Reprogram all CIS(s) with the same CIG, valid range are:
1838 	 * num_cis: 0x00 to 0x1F
1839 	 * cis_id: 0x00 to 0xEF
1840 	 */
1841 	for (cis_id = 0x00; cis_id < 0xf0 &&
1842 	     aux_num_cis < pdu->num_cis; cis_id++) {
1843 		struct hci_cis_params *cis;
1844 
1845 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1846 		if (!conn)
1847 			continue;
1848 
1849 		qos = &conn->iso_qos;
1850 
1851 		cis = &pdu->cis[aux_num_cis++];
1852 		cis->cis_id = cis_id;
1853 		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1854 		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1855 		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1856 			      qos->ucast.in.phy;
1857 		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1858 			      qos->ucast.out.phy;
1859 		cis->c_rtn  = qos->ucast.out.rtn;
1860 		cis->p_rtn  = qos->ucast.in.rtn;
1861 	}
1862 	pdu->num_cis = aux_num_cis;
1863 
1864 	if (!pdu->num_cis)
1865 		return 0;
1866 
1867 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1868 				     struct_size(pdu, cis, pdu->num_cis),
1869 				     pdu, HCI_CMD_TIMEOUT);
1870 }
1871 
hci_le_set_cig_params(struct hci_conn * conn,struct bt_iso_qos * qos)1872 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1873 {
1874 	struct hci_dev *hdev = conn->hdev;
1875 	struct iso_list_data data;
1876 
1877 	memset(&data, 0, sizeof(data));
1878 
1879 	/* Allocate first still reconfigurable CIG if not set */
1880 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1881 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1882 			data.count = 0;
1883 
1884 			hci_conn_hash_list_state(hdev, find_cis, CIS_LINK,
1885 						 BT_CONNECT, &data);
1886 			if (data.count)
1887 				continue;
1888 
1889 			hci_conn_hash_list_state(hdev, find_cis, CIS_LINK,
1890 						 BT_CONNECTED, &data);
1891 			if (!data.count)
1892 				break;
1893 		}
1894 
1895 		if (data.cig == 0xf0)
1896 			return false;
1897 
1898 		/* Update CIG */
1899 		qos->ucast.cig = data.cig;
1900 	}
1901 
1902 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1903 		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1904 					     qos->ucast.cis))
1905 			return false;
1906 		goto done;
1907 	}
1908 
1909 	/* Allocate first available CIS if not set */
1910 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1911 	     data.cis++) {
1912 		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1913 					      data.cis)) {
1914 			/* Update CIS */
1915 			qos->ucast.cis = data.cis;
1916 			break;
1917 		}
1918 	}
1919 
1920 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1921 		return false;
1922 
1923 done:
1924 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1925 			       UINT_PTR(qos->ucast.cig), NULL) < 0)
1926 		return false;
1927 
1928 	return true;
1929 }
1930 
hci_bind_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos,u16 timeout)1931 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1932 			      __u8 dst_type, struct bt_iso_qos *qos,
1933 			      u16 timeout)
1934 {
1935 	struct hci_conn *cis;
1936 
1937 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1938 				       qos->ucast.cis);
1939 	if (!cis) {
1940 		cis = hci_conn_add_unset(hdev, CIS_LINK, dst,
1941 					 HCI_ROLE_MASTER);
1942 		if (IS_ERR(cis))
1943 			return cis;
1944 		cis->cleanup = cis_cleanup;
1945 		cis->dst_type = dst_type;
1946 		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1947 		cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1948 		cis->conn_timeout = timeout;
1949 	}
1950 
1951 	if (cis->state == BT_CONNECTED)
1952 		return cis;
1953 
1954 	/* Check if CIS has been set and the settings matches */
1955 	if (cis->state == BT_BOUND &&
1956 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1957 		return cis;
1958 
1959 	/* Update LINK PHYs according to QoS preference */
1960 	cis->le_tx_phy = qos->ucast.out.phy;
1961 	cis->le_rx_phy = qos->ucast.in.phy;
1962 
1963 	/* If output interval is not set use the input interval as it cannot be
1964 	 * 0x000000.
1965 	 */
1966 	if (!qos->ucast.out.interval)
1967 		qos->ucast.out.interval = qos->ucast.in.interval;
1968 
1969 	/* If input interval is not set use the output interval as it cannot be
1970 	 * 0x000000.
1971 	 */
1972 	if (!qos->ucast.in.interval)
1973 		qos->ucast.in.interval = qos->ucast.out.interval;
1974 
1975 	/* If output latency is not set use the input latency as it cannot be
1976 	 * 0x0000.
1977 	 */
1978 	if (!qos->ucast.out.latency)
1979 		qos->ucast.out.latency = qos->ucast.in.latency;
1980 
1981 	/* If input latency is not set use the output latency as it cannot be
1982 	 * 0x0000.
1983 	 */
1984 	if (!qos->ucast.in.latency)
1985 		qos->ucast.in.latency = qos->ucast.out.latency;
1986 
1987 	if (!hci_le_set_cig_params(cis, qos)) {
1988 		hci_conn_drop(cis);
1989 		return ERR_PTR(-EINVAL);
1990 	}
1991 
1992 	hci_conn_hold(cis);
1993 
1994 	cis->iso_qos = *qos;
1995 	cis->state = BT_BOUND;
1996 
1997 	return cis;
1998 }
1999 
hci_iso_setup_path(struct hci_conn * conn)2000 bool hci_iso_setup_path(struct hci_conn *conn)
2001 {
2002 	struct hci_dev *hdev = conn->hdev;
2003 	struct hci_cp_le_setup_iso_path cmd;
2004 
2005 	memset(&cmd, 0, sizeof(cmd));
2006 
2007 	if (conn->iso_qos.ucast.out.sdu) {
2008 		cmd.handle = cpu_to_le16(conn->handle);
2009 		cmd.direction = 0x00; /* Input (Host to Controller) */
2010 		cmd.path = 0x00; /* HCI path if enabled */
2011 		cmd.codec = 0x03; /* Transparent Data */
2012 
2013 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2014 				 &cmd) < 0)
2015 			return false;
2016 	}
2017 
2018 	if (conn->iso_qos.ucast.in.sdu) {
2019 		cmd.handle = cpu_to_le16(conn->handle);
2020 		cmd.direction = 0x01; /* Output (Controller to Host) */
2021 		cmd.path = 0x00; /* HCI path if enabled */
2022 		cmd.codec = 0x03; /* Transparent Data */
2023 
2024 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2025 				 &cmd) < 0)
2026 			return false;
2027 	}
2028 
2029 	return true;
2030 }
2031 
hci_conn_check_create_cis(struct hci_conn * conn)2032 int hci_conn_check_create_cis(struct hci_conn *conn)
2033 {
2034 	if (conn->type != CIS_LINK)
2035 		return -EINVAL;
2036 
2037 	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
2038 	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
2039 		return 1;
2040 
2041 	return 0;
2042 }
2043 
hci_create_cis_sync(struct hci_dev * hdev,void * data)2044 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
2045 {
2046 	return hci_le_create_cis_sync(hdev);
2047 }
2048 
hci_le_create_cis_pending(struct hci_dev * hdev)2049 int hci_le_create_cis_pending(struct hci_dev *hdev)
2050 {
2051 	struct hci_conn *conn;
2052 	bool pending = false;
2053 
2054 	rcu_read_lock();
2055 
2056 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2057 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
2058 			rcu_read_unlock();
2059 			return -EBUSY;
2060 		}
2061 
2062 		if (!hci_conn_check_create_cis(conn))
2063 			pending = true;
2064 	}
2065 
2066 	rcu_read_unlock();
2067 
2068 	if (!pending)
2069 		return 0;
2070 
2071 	/* Queue Create CIS */
2072 	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
2073 }
2074 
hci_iso_qos_setup(struct hci_dev * hdev,struct hci_conn * conn,struct bt_iso_io_qos * qos,__u8 phy)2075 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2076 			      struct bt_iso_io_qos *qos, __u8 phy)
2077 {
2078 	/* Only set MTU if PHY is enabled */
2079 	if (!qos->sdu && qos->phy)
2080 		qos->sdu = conn->mtu;
2081 
2082 	/* Use the same PHY as ACL if set to any */
2083 	if (qos->phy == BT_ISO_PHY_ANY)
2084 		qos->phy = phy;
2085 
2086 	/* Use LE ACL connection interval if not set */
2087 	if (!qos->interval)
2088 		/* ACL interval unit in 1.25 ms to us */
2089 		qos->interval = conn->le_conn_interval * 1250;
2090 
2091 	/* Use LE ACL connection latency if not set */
2092 	if (!qos->latency)
2093 		qos->latency = conn->le_conn_latency;
2094 }
2095 
create_big_sync(struct hci_dev * hdev,void * data)2096 static int create_big_sync(struct hci_dev *hdev, void *data)
2097 {
2098 	struct hci_conn *conn = data;
2099 	struct bt_iso_qos *qos = &conn->iso_qos;
2100 	u16 interval, sync_interval = 0;
2101 	u32 flags = 0;
2102 	int err;
2103 
2104 	if (qos->bcast.out.phy == 0x02)
2105 		flags |= MGMT_ADV_FLAG_SEC_2M;
2106 
2107 	/* Align intervals */
2108 	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2109 
2110 	if (qos->bcast.bis)
2111 		sync_interval = interval * 4;
2112 
2113 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->sid,
2114 				     conn->le_per_adv_data_len,
2115 				     conn->le_per_adv_data, flags, interval,
2116 				     interval, sync_interval);
2117 	if (err)
2118 		return err;
2119 
2120 	return hci_le_create_big(conn, &conn->iso_qos);
2121 }
2122 
hci_pa_create_sync(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,__u8 sid,struct bt_iso_qos * qos)2123 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
2124 				    __u8 dst_type, __u8 sid,
2125 				    struct bt_iso_qos *qos)
2126 {
2127 	struct hci_conn *conn;
2128 
2129 	bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid);
2130 
2131 	conn = hci_conn_add_unset(hdev, PA_LINK, dst, HCI_ROLE_SLAVE);
2132 	if (IS_ERR(conn))
2133 		return conn;
2134 
2135 	conn->iso_qos = *qos;
2136 	conn->dst_type = dst_type;
2137 	conn->sid = sid;
2138 	conn->state = BT_LISTEN;
2139 	conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10);
2140 
2141 	hci_conn_hold(conn);
2142 
2143 	hci_connect_pa_sync(hdev, conn);
2144 
2145 	return conn;
2146 }
2147 
hci_conn_big_create_sync(struct hci_dev * hdev,struct hci_conn * hcon,struct bt_iso_qos * qos,__u16 sync_handle,__u8 num_bis,__u8 bis[])2148 int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2149 			     struct bt_iso_qos *qos, __u16 sync_handle,
2150 			     __u8 num_bis, __u8 bis[])
2151 {
2152 	int err;
2153 
2154 	if (num_bis < 0x01 || num_bis > ISO_MAX_NUM_BIS)
2155 		return -EINVAL;
2156 
2157 	err = qos_set_big(hdev, qos);
2158 	if (err)
2159 		return err;
2160 
2161 	if (hcon) {
2162 		/* Update hcon QoS */
2163 		hcon->iso_qos = *qos;
2164 
2165 		hcon->num_bis = num_bis;
2166 		memcpy(hcon->bis, bis, num_bis);
2167 		hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10);
2168 	}
2169 
2170 	return hci_connect_big_sync(hdev, hcon);
2171 }
2172 
create_big_complete(struct hci_dev * hdev,void * data,int err)2173 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2174 {
2175 	struct hci_conn *conn = data;
2176 
2177 	bt_dev_dbg(hdev, "conn %p", conn);
2178 
2179 	if (err) {
2180 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2181 		hci_connect_cfm(conn, err);
2182 		hci_conn_del(conn);
2183 	}
2184 }
2185 
hci_bind_bis(struct hci_dev * hdev,bdaddr_t * dst,__u8 sid,struct bt_iso_qos * qos,__u8 base_len,__u8 * base,u16 timeout)2186 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid,
2187 			      struct bt_iso_qos *qos,
2188 			      __u8 base_len, __u8 *base, u16 timeout)
2189 {
2190 	struct hci_conn *conn;
2191 	struct hci_conn *parent;
2192 	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2193 	struct hci_link *link;
2194 
2195 	/* Look for any BIS that is open for rebinding */
2196 	conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN,
2197 					      HCI_ROLE_MASTER);
2198 	if (conn) {
2199 		memcpy(qos, &conn->iso_qos, sizeof(*qos));
2200 		conn->state = BT_CONNECTED;
2201 		return conn;
2202 	}
2203 
2204 	if (base_len && base)
2205 		base_len = eir_append_service_data(eir, 0,  0x1851,
2206 						   base, base_len);
2207 
2208 	/* We need hci_conn object using the BDADDR_ANY as dst */
2209 	conn = hci_add_bis(hdev, dst, sid, qos, base_len, eir, timeout);
2210 	if (IS_ERR(conn))
2211 		return conn;
2212 
2213 	/* Update LINK PHYs according to QoS preference */
2214 	conn->le_tx_phy = qos->bcast.out.phy;
2215 	conn->le_tx_phy = qos->bcast.out.phy;
2216 
2217 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2218 	if (base_len && base) {
2219 		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2220 		conn->le_per_adv_data_len = base_len;
2221 	}
2222 
2223 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2224 			  conn->le_tx_phy ? conn->le_tx_phy :
2225 			  hdev->le_tx_def_phys);
2226 
2227 	conn->iso_qos = *qos;
2228 	conn->state = BT_BOUND;
2229 
2230 	/* Link BISes together */
2231 	parent = hci_conn_hash_lookup_big(hdev,
2232 					  conn->iso_qos.bcast.big);
2233 	if (parent && parent != conn) {
2234 		link = hci_conn_link(parent, conn);
2235 		hci_conn_drop(conn);
2236 		if (!link)
2237 			return ERR_PTR(-ENOLINK);
2238 	}
2239 
2240 	return conn;
2241 }
2242 
bis_mark_per_adv(struct hci_conn * conn,void * data)2243 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2244 {
2245 	struct iso_list_data *d = data;
2246 
2247 	/* Skip if not broadcast/ANY address */
2248 	if (bacmp(&conn->dst, BDADDR_ANY))
2249 		return;
2250 
2251 	if (d->big != conn->iso_qos.bcast.big ||
2252 	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2253 	    d->bis != conn->iso_qos.bcast.bis)
2254 		return;
2255 
2256 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2257 }
2258 
hci_connect_bis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,__u8 sid,struct bt_iso_qos * qos,__u8 base_len,__u8 * base,u16 timeout)2259 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2260 				 __u8 dst_type, __u8 sid,
2261 				 struct bt_iso_qos *qos,
2262 				 __u8 base_len, __u8 *base, u16 timeout)
2263 {
2264 	struct hci_conn *conn;
2265 	int err;
2266 	struct iso_list_data data;
2267 
2268 	conn = hci_bind_bis(hdev, dst, sid, qos, base_len, base, timeout);
2269 	if (IS_ERR(conn))
2270 		return conn;
2271 
2272 	if (conn->state == BT_CONNECTED)
2273 		return conn;
2274 
2275 	/* Check if SID needs to be allocated then search for the first
2276 	 * available.
2277 	 */
2278 	if (conn->sid == HCI_SID_INVALID) {
2279 		u8 sid;
2280 
2281 		for (sid = 0; sid <= 0x0f; sid++) {
2282 			if (!hci_find_adv_sid(hdev, sid)) {
2283 				conn->sid = sid;
2284 				break;
2285 			}
2286 		}
2287 	}
2288 
2289 	data.big = qos->bcast.big;
2290 	data.bis = qos->bcast.bis;
2291 
2292 	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2293 	 * the start periodic advertising and create BIG commands have
2294 	 * been queued
2295 	 */
2296 	hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK,
2297 				 BT_BOUND, &data);
2298 
2299 	/* Queue start periodic advertising and create BIG */
2300 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2301 				 create_big_complete);
2302 	if (err < 0) {
2303 		hci_conn_drop(conn);
2304 		return ERR_PTR(err);
2305 	}
2306 
2307 	return conn;
2308 }
2309 
hci_connect_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos,u16 timeout)2310 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2311 				 __u8 dst_type, struct bt_iso_qos *qos,
2312 				 u16 timeout)
2313 {
2314 	struct hci_conn *le;
2315 	struct hci_conn *cis;
2316 	struct hci_link *link;
2317 
2318 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2319 		le = hci_connect_le(hdev, dst, dst_type, false,
2320 				    BT_SECURITY_LOW,
2321 				    HCI_LE_CONN_TIMEOUT,
2322 				    HCI_ROLE_SLAVE, 0, 0);
2323 	else
2324 		le = hci_connect_le_scan(hdev, dst, dst_type,
2325 					 BT_SECURITY_LOW,
2326 					 HCI_LE_CONN_TIMEOUT,
2327 					 CONN_REASON_ISO_CONNECT);
2328 	if (IS_ERR(le))
2329 		return le;
2330 
2331 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2332 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2333 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2334 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2335 
2336 	cis = hci_bind_cis(hdev, dst, dst_type, qos, timeout);
2337 	if (IS_ERR(cis)) {
2338 		hci_conn_drop(le);
2339 		return cis;
2340 	}
2341 
2342 	link = hci_conn_link(le, cis);
2343 	hci_conn_drop(cis);
2344 	if (!link) {
2345 		hci_conn_drop(le);
2346 		return ERR_PTR(-ENOLINK);
2347 	}
2348 
2349 	cis->state = BT_CONNECT;
2350 
2351 	hci_le_create_cis_pending(hdev);
2352 
2353 	return cis;
2354 }
2355 
2356 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)2357 int hci_conn_check_link_mode(struct hci_conn *conn)
2358 {
2359 	BT_DBG("hcon %p", conn);
2360 
2361 	/* In Secure Connections Only mode, it is required that Secure
2362 	 * Connections is used and the link is encrypted with AES-CCM
2363 	 * using a P-256 authenticated combination key.
2364 	 */
2365 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2366 		if (!hci_conn_sc_enabled(conn) ||
2367 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2368 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2369 			return 0;
2370 	}
2371 
2372 	 /* AES encryption is required for Level 4:
2373 	  *
2374 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2375 	  * page 1319:
2376 	  *
2377 	  * 128-bit equivalent strength for link and encryption keys
2378 	  * required using FIPS approved algorithms (E0 not allowed,
2379 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2380 	  * not shortened)
2381 	  */
2382 	if (conn->sec_level == BT_SECURITY_FIPS &&
2383 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2384 		bt_dev_err(conn->hdev,
2385 			   "Invalid security: Missing AES-CCM usage");
2386 		return 0;
2387 	}
2388 
2389 	if (hci_conn_ssp_enabled(conn) &&
2390 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2391 		return 0;
2392 
2393 	return 1;
2394 }
2395 
2396 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)2397 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2398 {
2399 	BT_DBG("hcon %p", conn);
2400 
2401 	if (conn->pending_sec_level > sec_level)
2402 		sec_level = conn->pending_sec_level;
2403 
2404 	if (sec_level > conn->sec_level)
2405 		conn->pending_sec_level = sec_level;
2406 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2407 		return 1;
2408 
2409 	/* Make sure we preserve an existing MITM requirement*/
2410 	auth_type |= (conn->auth_type & 0x01);
2411 
2412 	conn->auth_type = auth_type;
2413 
2414 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2415 		struct hci_cp_auth_requested cp;
2416 
2417 		cp.handle = cpu_to_le16(conn->handle);
2418 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2419 			     sizeof(cp), &cp);
2420 
2421 		/* Set the ENCRYPT_PEND to trigger encryption after
2422 		 * authentication.
2423 		 */
2424 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2425 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2426 	}
2427 
2428 	return 0;
2429 }
2430 
2431 /* Encrypt the link */
hci_conn_encrypt(struct hci_conn * conn)2432 static void hci_conn_encrypt(struct hci_conn *conn)
2433 {
2434 	BT_DBG("hcon %p", conn);
2435 
2436 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2437 		struct hci_cp_set_conn_encrypt cp;
2438 		cp.handle  = cpu_to_le16(conn->handle);
2439 		cp.encrypt = 0x01;
2440 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2441 			     &cp);
2442 	}
2443 }
2444 
2445 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type,bool initiator)2446 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2447 		      bool initiator)
2448 {
2449 	BT_DBG("hcon %p", conn);
2450 
2451 	if (conn->type == LE_LINK)
2452 		return smp_conn_security(conn, sec_level);
2453 
2454 	/* For sdp we don't need the link key. */
2455 	if (sec_level == BT_SECURITY_SDP)
2456 		return 1;
2457 
2458 	/* For non 2.1 devices and low security level we don't need the link
2459 	   key. */
2460 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2461 		return 1;
2462 
2463 	/* For other security levels we need the link key. */
2464 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2465 		goto auth;
2466 
2467 	switch (conn->key_type) {
2468 	case HCI_LK_AUTH_COMBINATION_P256:
2469 		/* An authenticated FIPS approved combination key has
2470 		 * sufficient security for security level 4 or lower.
2471 		 */
2472 		if (sec_level <= BT_SECURITY_FIPS)
2473 			goto encrypt;
2474 		break;
2475 	case HCI_LK_AUTH_COMBINATION_P192:
2476 		/* An authenticated combination key has sufficient security for
2477 		 * security level 3 or lower.
2478 		 */
2479 		if (sec_level <= BT_SECURITY_HIGH)
2480 			goto encrypt;
2481 		break;
2482 	case HCI_LK_UNAUTH_COMBINATION_P192:
2483 	case HCI_LK_UNAUTH_COMBINATION_P256:
2484 		/* An unauthenticated combination key has sufficient security
2485 		 * for security level 2 or lower.
2486 		 */
2487 		if (sec_level <= BT_SECURITY_MEDIUM)
2488 			goto encrypt;
2489 		break;
2490 	case HCI_LK_COMBINATION:
2491 		/* A combination key has always sufficient security for the
2492 		 * security levels 2 or lower. High security level requires the
2493 		 * combination key is generated using maximum PIN code length
2494 		 * (16). For pre 2.1 units.
2495 		 */
2496 		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2497 			goto encrypt;
2498 		break;
2499 	default:
2500 		break;
2501 	}
2502 
2503 auth:
2504 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2505 		return 0;
2506 
2507 	if (initiator)
2508 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2509 
2510 	if (!hci_conn_auth(conn, sec_level, auth_type))
2511 		return 0;
2512 
2513 encrypt:
2514 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2515 		/* Ensure that the encryption key size has been read,
2516 		 * otherwise stall the upper layer responses.
2517 		 */
2518 		if (!conn->enc_key_size)
2519 			return 0;
2520 
2521 		/* Nothing else needed, all requirements are met */
2522 		return 1;
2523 	}
2524 
2525 	hci_conn_encrypt(conn);
2526 	return 0;
2527 }
2528 EXPORT_SYMBOL(hci_conn_security);
2529 
2530 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)2531 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2532 {
2533 	BT_DBG("hcon %p", conn);
2534 
2535 	/* Accept if non-secure or higher security level is required */
2536 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2537 		return 1;
2538 
2539 	/* Accept if secure or higher security level is already present */
2540 	if (conn->sec_level == BT_SECURITY_HIGH ||
2541 	    conn->sec_level == BT_SECURITY_FIPS)
2542 		return 1;
2543 
2544 	/* Reject not secure link */
2545 	return 0;
2546 }
2547 EXPORT_SYMBOL(hci_conn_check_secure);
2548 
2549 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)2550 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2551 {
2552 	BT_DBG("hcon %p", conn);
2553 
2554 	if (role == conn->role)
2555 		return 1;
2556 
2557 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2558 		struct hci_cp_switch_role cp;
2559 		bacpy(&cp.bdaddr, &conn->dst);
2560 		cp.role = role;
2561 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2562 	}
2563 
2564 	return 0;
2565 }
2566 EXPORT_SYMBOL(hci_conn_switch_role);
2567 
2568 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)2569 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2570 {
2571 	struct hci_dev *hdev = conn->hdev;
2572 
2573 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2574 
2575 	if (conn->mode != HCI_CM_SNIFF)
2576 		goto timer;
2577 
2578 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2579 		goto timer;
2580 
2581 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2582 		struct hci_cp_exit_sniff_mode cp;
2583 		cp.handle = cpu_to_le16(conn->handle);
2584 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2585 	}
2586 
2587 timer:
2588 	if (hdev->idle_timeout > 0)
2589 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2590 				   msecs_to_jiffies(hdev->idle_timeout));
2591 }
2592 
2593 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)2594 void hci_conn_hash_flush(struct hci_dev *hdev)
2595 {
2596 	struct list_head *head = &hdev->conn_hash.list;
2597 	struct hci_conn *conn;
2598 
2599 	BT_DBG("hdev %s", hdev->name);
2600 
2601 	/* We should not traverse the list here, because hci_conn_del
2602 	 * can remove extra links, which may cause the list traversal
2603 	 * to hit items that have already been released.
2604 	 */
2605 	while ((conn = list_first_entry_or_null(head,
2606 						struct hci_conn,
2607 						list)) != NULL) {
2608 		conn->state = BT_CLOSED;
2609 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2610 		hci_conn_del(conn);
2611 	}
2612 }
2613 
get_link_mode(struct hci_conn * conn)2614 static u32 get_link_mode(struct hci_conn *conn)
2615 {
2616 	u32 link_mode = 0;
2617 
2618 	if (conn->role == HCI_ROLE_MASTER)
2619 		link_mode |= HCI_LM_MASTER;
2620 
2621 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2622 		link_mode |= HCI_LM_ENCRYPT;
2623 
2624 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2625 		link_mode |= HCI_LM_AUTH;
2626 
2627 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2628 		link_mode |= HCI_LM_SECURE;
2629 
2630 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2631 		link_mode |= HCI_LM_FIPS;
2632 
2633 	return link_mode;
2634 }
2635 
hci_get_conn_list(void __user * arg)2636 int hci_get_conn_list(void __user *arg)
2637 {
2638 	struct hci_conn *c;
2639 	struct hci_conn_list_req req, *cl;
2640 	struct hci_conn_info *ci;
2641 	struct hci_dev *hdev;
2642 	int n = 0, size, err;
2643 
2644 	if (copy_from_user(&req, arg, sizeof(req)))
2645 		return -EFAULT;
2646 
2647 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2648 		return -EINVAL;
2649 
2650 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2651 
2652 	cl = kmalloc(size, GFP_KERNEL);
2653 	if (!cl)
2654 		return -ENOMEM;
2655 
2656 	hdev = hci_dev_get(req.dev_id);
2657 	if (!hdev) {
2658 		kfree(cl);
2659 		return -ENODEV;
2660 	}
2661 
2662 	ci = cl->conn_info;
2663 
2664 	hci_dev_lock(hdev);
2665 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2666 		bacpy(&(ci + n)->bdaddr, &c->dst);
2667 		(ci + n)->handle = c->handle;
2668 		(ci + n)->type  = c->type;
2669 		(ci + n)->out   = c->out;
2670 		(ci + n)->state = c->state;
2671 		(ci + n)->link_mode = get_link_mode(c);
2672 		if (++n >= req.conn_num)
2673 			break;
2674 	}
2675 	hci_dev_unlock(hdev);
2676 
2677 	cl->dev_id = hdev->id;
2678 	cl->conn_num = n;
2679 	size = sizeof(req) + n * sizeof(*ci);
2680 
2681 	hci_dev_put(hdev);
2682 
2683 	err = copy_to_user(arg, cl, size);
2684 	kfree(cl);
2685 
2686 	return err ? -EFAULT : 0;
2687 }
2688 
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)2689 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2690 {
2691 	struct hci_conn_info_req req;
2692 	struct hci_conn_info ci;
2693 	struct hci_conn *conn;
2694 	char __user *ptr = arg + sizeof(req);
2695 
2696 	if (copy_from_user(&req, arg, sizeof(req)))
2697 		return -EFAULT;
2698 
2699 	hci_dev_lock(hdev);
2700 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2701 	if (conn) {
2702 		bacpy(&ci.bdaddr, &conn->dst);
2703 		ci.handle = conn->handle;
2704 		ci.type  = conn->type;
2705 		ci.out   = conn->out;
2706 		ci.state = conn->state;
2707 		ci.link_mode = get_link_mode(conn);
2708 	}
2709 	hci_dev_unlock(hdev);
2710 
2711 	if (!conn)
2712 		return -ENOENT;
2713 
2714 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2715 }
2716 
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)2717 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2718 {
2719 	struct hci_auth_info_req req;
2720 	struct hci_conn *conn;
2721 
2722 	if (copy_from_user(&req, arg, sizeof(req)))
2723 		return -EFAULT;
2724 
2725 	hci_dev_lock(hdev);
2726 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2727 	if (conn)
2728 		req.type = conn->auth_type;
2729 	hci_dev_unlock(hdev);
2730 
2731 	if (!conn)
2732 		return -ENOENT;
2733 
2734 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2735 }
2736 
hci_chan_create(struct hci_conn * conn)2737 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2738 {
2739 	struct hci_dev *hdev = conn->hdev;
2740 	struct hci_chan *chan;
2741 
2742 	BT_DBG("%s hcon %p", hdev->name, conn);
2743 
2744 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2745 		BT_DBG("Refusing to create new hci_chan");
2746 		return NULL;
2747 	}
2748 
2749 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2750 	if (!chan)
2751 		return NULL;
2752 
2753 	chan->conn = hci_conn_get(conn);
2754 	skb_queue_head_init(&chan->data_q);
2755 	chan->state = BT_CONNECTED;
2756 
2757 	list_add_rcu(&chan->list, &conn->chan_list);
2758 
2759 	return chan;
2760 }
2761 
hci_chan_del(struct hci_chan * chan)2762 void hci_chan_del(struct hci_chan *chan)
2763 {
2764 	struct hci_conn *conn = chan->conn;
2765 	struct hci_dev *hdev = conn->hdev;
2766 
2767 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2768 
2769 	list_del_rcu(&chan->list);
2770 
2771 	synchronize_rcu();
2772 
2773 	/* Prevent new hci_chan's to be created for this hci_conn */
2774 	set_bit(HCI_CONN_DROP, &conn->flags);
2775 
2776 	hci_conn_put(conn);
2777 
2778 	skb_queue_purge(&chan->data_q);
2779 	kfree(chan);
2780 }
2781 
hci_chan_list_flush(struct hci_conn * conn)2782 void hci_chan_list_flush(struct hci_conn *conn)
2783 {
2784 	struct hci_chan *chan, *n;
2785 
2786 	BT_DBG("hcon %p", conn);
2787 
2788 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2789 		hci_chan_del(chan);
2790 }
2791 
__hci_chan_lookup_handle(struct hci_conn * hcon,__u16 handle)2792 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2793 						 __u16 handle)
2794 {
2795 	struct hci_chan *hchan;
2796 
2797 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2798 		if (hchan->handle == handle)
2799 			return hchan;
2800 	}
2801 
2802 	return NULL;
2803 }
2804 
hci_chan_lookup_handle(struct hci_dev * hdev,__u16 handle)2805 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2806 {
2807 	struct hci_conn_hash *h = &hdev->conn_hash;
2808 	struct hci_conn *hcon;
2809 	struct hci_chan *hchan = NULL;
2810 
2811 	rcu_read_lock();
2812 
2813 	list_for_each_entry_rcu(hcon, &h->list, list) {
2814 		hchan = __hci_chan_lookup_handle(hcon, handle);
2815 		if (hchan)
2816 			break;
2817 	}
2818 
2819 	rcu_read_unlock();
2820 
2821 	return hchan;
2822 }
2823 
hci_conn_get_phy(struct hci_conn * conn)2824 u32 hci_conn_get_phy(struct hci_conn *conn)
2825 {
2826 	u32 phys = 0;
2827 
2828 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2829 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2830 	 * CPB logical transport types.
2831 	 */
2832 	switch (conn->type) {
2833 	case SCO_LINK:
2834 		/* SCO logical transport (1 Mb/s):
2835 		 * HV1, HV2, HV3 and DV.
2836 		 */
2837 		phys |= BT_PHY_BR_1M_1SLOT;
2838 
2839 		break;
2840 
2841 	case ACL_LINK:
2842 		/* ACL logical transport (1 Mb/s) ptt=0:
2843 		 * DH1, DM3, DH3, DM5 and DH5.
2844 		 */
2845 		phys |= BT_PHY_BR_1M_1SLOT;
2846 
2847 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2848 			phys |= BT_PHY_BR_1M_3SLOT;
2849 
2850 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2851 			phys |= BT_PHY_BR_1M_5SLOT;
2852 
2853 		/* ACL logical transport (2 Mb/s) ptt=1:
2854 		 * 2-DH1, 2-DH3 and 2-DH5.
2855 		 */
2856 		if (!(conn->pkt_type & HCI_2DH1))
2857 			phys |= BT_PHY_EDR_2M_1SLOT;
2858 
2859 		if (!(conn->pkt_type & HCI_2DH3))
2860 			phys |= BT_PHY_EDR_2M_3SLOT;
2861 
2862 		if (!(conn->pkt_type & HCI_2DH5))
2863 			phys |= BT_PHY_EDR_2M_5SLOT;
2864 
2865 		/* ACL logical transport (3 Mb/s) ptt=1:
2866 		 * 3-DH1, 3-DH3 and 3-DH5.
2867 		 */
2868 		if (!(conn->pkt_type & HCI_3DH1))
2869 			phys |= BT_PHY_EDR_3M_1SLOT;
2870 
2871 		if (!(conn->pkt_type & HCI_3DH3))
2872 			phys |= BT_PHY_EDR_3M_3SLOT;
2873 
2874 		if (!(conn->pkt_type & HCI_3DH5))
2875 			phys |= BT_PHY_EDR_3M_5SLOT;
2876 
2877 		break;
2878 
2879 	case ESCO_LINK:
2880 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2881 		phys |= BT_PHY_BR_1M_1SLOT;
2882 
2883 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2884 			phys |= BT_PHY_BR_1M_3SLOT;
2885 
2886 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2887 		if (!(conn->pkt_type & ESCO_2EV3))
2888 			phys |= BT_PHY_EDR_2M_1SLOT;
2889 
2890 		if (!(conn->pkt_type & ESCO_2EV5))
2891 			phys |= BT_PHY_EDR_2M_3SLOT;
2892 
2893 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2894 		if (!(conn->pkt_type & ESCO_3EV3))
2895 			phys |= BT_PHY_EDR_3M_1SLOT;
2896 
2897 		if (!(conn->pkt_type & ESCO_3EV5))
2898 			phys |= BT_PHY_EDR_3M_3SLOT;
2899 
2900 		break;
2901 
2902 	case LE_LINK:
2903 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2904 			phys |= BT_PHY_LE_1M_TX;
2905 
2906 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2907 			phys |= BT_PHY_LE_1M_RX;
2908 
2909 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2910 			phys |= BT_PHY_LE_2M_TX;
2911 
2912 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2913 			phys |= BT_PHY_LE_2M_RX;
2914 
2915 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2916 			phys |= BT_PHY_LE_CODED_TX;
2917 
2918 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2919 			phys |= BT_PHY_LE_CODED_RX;
2920 
2921 		break;
2922 	}
2923 
2924 	return phys;
2925 }
2926 
abort_conn_sync(struct hci_dev * hdev,void * data)2927 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2928 {
2929 	struct hci_conn *conn = data;
2930 
2931 	if (!hci_conn_valid(hdev, conn))
2932 		return -ECANCELED;
2933 
2934 	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2935 }
2936 
hci_abort_conn(struct hci_conn * conn,u8 reason)2937 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2938 {
2939 	struct hci_dev *hdev = conn->hdev;
2940 
2941 	/* If abort_reason has already been set it means the connection is
2942 	 * already being aborted so don't attempt to overwrite it.
2943 	 */
2944 	if (conn->abort_reason)
2945 		return 0;
2946 
2947 	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2948 
2949 	conn->abort_reason = reason;
2950 
2951 	/* If the connection is pending check the command opcode since that
2952 	 * might be blocking on hci_cmd_sync_work while waiting its respective
2953 	 * event so we need to hci_cmd_sync_cancel to cancel it.
2954 	 *
2955 	 * hci_connect_le serializes the connection attempts so only one
2956 	 * connection can be in BT_CONNECT at time.
2957 	 */
2958 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2959 		switch (hci_skb_event(hdev->sent_cmd)) {
2960 		case HCI_EV_CONN_COMPLETE:
2961 		case HCI_EV_LE_CONN_COMPLETE:
2962 		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2963 		case HCI_EVT_LE_CIS_ESTABLISHED:
2964 			hci_cmd_sync_cancel(hdev, ECANCELED);
2965 			break;
2966 		}
2967 	/* Cancel connect attempt if still queued/pending */
2968 	} else if (!hci_cancel_connect_sync(hdev, conn)) {
2969 		return 0;
2970 	}
2971 
2972 	/* Run immediately if on cmd_sync_work since this may be called
2973 	 * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
2974 	 * already queue its callback on cmd_sync_work.
2975 	 */
2976 	return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
2977 }
2978 
hci_setup_tx_timestamp(struct sk_buff * skb,size_t key_offset,const struct sockcm_cookie * sockc)2979 void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset,
2980 			    const struct sockcm_cookie *sockc)
2981 {
2982 	struct sock *sk = skb ? skb->sk : NULL;
2983 	int key;
2984 
2985 	/* This shall be called on a single skb of those generated by user
2986 	 * sendmsg(), and only when the sendmsg() does not return error to
2987 	 * user. This is required for keeping the tskey that increments here in
2988 	 * sync with possible sendmsg() counting by user.
2989 	 *
2990 	 * Stream sockets shall set key_offset to sendmsg() length in bytes
2991 	 * and call with the last fragment, others to 1 and first fragment.
2992 	 */
2993 
2994 	if (!skb || !sockc || !sk || !key_offset)
2995 		return;
2996 
2997 	sock_tx_timestamp(sk, sockc, &skb_shinfo(skb)->tx_flags);
2998 
2999 	if (sk->sk_type == SOCK_STREAM)
3000 		key = atomic_add_return(key_offset, &sk->sk_tskey);
3001 
3002 	if (sockc->tsflags & SOF_TIMESTAMPING_OPT_ID &&
3003 	    sockc->tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) {
3004 		if (sockc->tsflags & SOCKCM_FLAG_TS_OPT_ID) {
3005 			skb_shinfo(skb)->tskey = sockc->ts_opt_id;
3006 		} else {
3007 			if (sk->sk_type != SOCK_STREAM)
3008 				key = atomic_inc_return(&sk->sk_tskey);
3009 			skb_shinfo(skb)->tskey = key - 1;
3010 		}
3011 	}
3012 }
3013 
hci_conn_tx_queue(struct hci_conn * conn,struct sk_buff * skb)3014 void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb)
3015 {
3016 	struct tx_queue *comp = &conn->tx_q;
3017 	bool track = false;
3018 
3019 	/* Emit SND now, ie. just before sending to driver */
3020 	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3021 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SND);
3022 
3023 	/* COMPLETION tstamp is emitted for tracked skb later in Number of
3024 	 * Completed Packets event. Available only for flow controlled cases.
3025 	 *
3026 	 * TODO: SCO support without flowctl (needs to be done in drivers)
3027 	 */
3028 	switch (conn->type) {
3029 	case CIS_LINK:
3030 	case BIS_LINK:
3031 	case PA_LINK:
3032 	case ACL_LINK:
3033 	case LE_LINK:
3034 		break;
3035 	case SCO_LINK:
3036 	case ESCO_LINK:
3037 		if (!hci_dev_test_flag(conn->hdev, HCI_SCO_FLOWCTL))
3038 			return;
3039 		break;
3040 	default:
3041 		return;
3042 	}
3043 
3044 	if (skb->sk && (skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP))
3045 		track = true;
3046 
3047 	/* If nothing is tracked, just count extra skbs at the queue head */
3048 	if (!track && !comp->tracked) {
3049 		comp->extra++;
3050 		return;
3051 	}
3052 
3053 	if (track) {
3054 		skb = skb_clone_sk(skb);
3055 		if (!skb)
3056 			goto count_only;
3057 
3058 		comp->tracked++;
3059 	} else {
3060 		skb = skb_clone(skb, GFP_KERNEL);
3061 		if (!skb)
3062 			goto count_only;
3063 	}
3064 
3065 	skb_queue_tail(&comp->queue, skb);
3066 	return;
3067 
3068 count_only:
3069 	/* Stop tracking skbs, and only count. This will not emit timestamps for
3070 	 * the packets, but if we get here something is more seriously wrong.
3071 	 */
3072 	comp->tracked = 0;
3073 	comp->extra += skb_queue_len(&comp->queue) + 1;
3074 	skb_queue_purge(&comp->queue);
3075 }
3076 
hci_conn_tx_dequeue(struct hci_conn * conn)3077 void hci_conn_tx_dequeue(struct hci_conn *conn)
3078 {
3079 	struct tx_queue *comp = &conn->tx_q;
3080 	struct sk_buff *skb;
3081 
3082 	/* If there are tracked skbs, the counted extra go before dequeuing real
3083 	 * skbs, to keep ordering. When nothing is tracked, the ordering doesn't
3084 	 * matter so dequeue real skbs first to get rid of them ASAP.
3085 	 */
3086 	if (comp->extra && (comp->tracked || skb_queue_empty(&comp->queue))) {
3087 		comp->extra--;
3088 		return;
3089 	}
3090 
3091 	skb = skb_dequeue(&comp->queue);
3092 	if (!skb)
3093 		return;
3094 
3095 	if (skb->sk) {
3096 		comp->tracked--;
3097 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk,
3098 				SCM_TSTAMP_COMPLETION);
3099 	}
3100 
3101 	kfree_skb(skb);
3102 }
3103 
hci_conn_key_enc_size(struct hci_conn * conn)3104 u8 *hci_conn_key_enc_size(struct hci_conn *conn)
3105 {
3106 	if (conn->type == ACL_LINK) {
3107 		struct link_key *key;
3108 
3109 		key = hci_find_link_key(conn->hdev, &conn->dst);
3110 		if (!key)
3111 			return NULL;
3112 
3113 		return &key->pin_len;
3114 	} else if (conn->type == LE_LINK) {
3115 		struct smp_ltk *ltk;
3116 
3117 		ltk = hci_find_ltk(conn->hdev, &conn->dst, conn->dst_type,
3118 				   conn->role);
3119 		if (!ltk)
3120 			return NULL;
3121 
3122 		return &ltk->enc_size;
3123 	}
3124 
3125 	return NULL;
3126 }
3127 
hci_ethtool_ts_info(unsigned int index,int sk_proto,struct kernel_ethtool_ts_info * info)3128 int hci_ethtool_ts_info(unsigned int index, int sk_proto,
3129 			struct kernel_ethtool_ts_info *info)
3130 {
3131 	struct hci_dev *hdev;
3132 
3133 	hdev = hci_dev_get(index);
3134 	if (!hdev)
3135 		return -ENODEV;
3136 
3137 	info->so_timestamping =
3138 		SOF_TIMESTAMPING_RX_SOFTWARE |
3139 		SOF_TIMESTAMPING_SOFTWARE;
3140 	info->phc_index = -1;
3141 	info->tx_types = BIT(HWTSTAMP_TX_OFF);
3142 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3143 
3144 	switch (sk_proto) {
3145 	case BTPROTO_ISO:
3146 	case BTPROTO_L2CAP:
3147 		info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
3148 		info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION;
3149 		break;
3150 	case BTPROTO_SCO:
3151 		info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
3152 		if (hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3153 			info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION;
3154 		break;
3155 	}
3156 
3157 	hci_dev_put(hdev);
3158 	return 0;
3159 }
3160