xref: /linux/net/bluetooth/hci_conn.c (revision bc1a59cff9f797bfbf8f3104507584d89e9ecf2e)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023-2024 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 #include <linux/errqueue.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/l2cap.h>
35 #include <net/bluetooth/iso.h>
36 #include <net/bluetooth/mgmt.h>
37 
38 #include "smp.h"
39 #include "eir.h"
40 
41 struct sco_param {
42 	u16 pkt_type;
43 	u16 max_latency;
44 	u8  retrans_effort;
45 };
46 
47 struct conn_handle_t {
48 	struct hci_conn *conn;
49 	__u16 handle;
50 };
51 
52 static const struct sco_param esco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
55 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
56 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
57 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
58 };
59 
60 static const struct sco_param sco_param_cvsd[] = {
61 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
62 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
63 };
64 
65 static const struct sco_param esco_param_msbc[] = {
66 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
67 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
68 };
69 
70 /* This function requires the caller holds hdev->lock */
71 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73 	struct hci_conn_params *params;
74 	struct hci_dev *hdev = conn->hdev;
75 	struct smp_irk *irk;
76 	bdaddr_t *bdaddr;
77 	u8 bdaddr_type;
78 
79 	bdaddr = &conn->dst;
80 	bdaddr_type = conn->dst_type;
81 
82 	/* Check if we need to convert to identity address */
83 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 	if (irk) {
85 		bdaddr = &irk->bdaddr;
86 		bdaddr_type = irk->addr_type;
87 	}
88 
89 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 					   bdaddr_type);
91 	if (!params)
92 		return;
93 
94 	if (params->conn) {
95 		hci_conn_drop(params->conn);
96 		hci_conn_put(params->conn);
97 		params->conn = NULL;
98 	}
99 
100 	if (!params->explicit_connect)
101 		return;
102 
103 	/* If the status indicates successful cancellation of
104 	 * the attempt (i.e. Unknown Connection Id) there's no point of
105 	 * notifying failure since we'll go back to keep trying to
106 	 * connect. The only exception is explicit connect requests
107 	 * where a timeout + cancel does indicate an actual failure.
108 	 */
109 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 		mgmt_connect_failed(hdev, conn, status);
111 
112 	/* The connection attempt was doing scan for new RPA, and is
113 	 * in scan phase. If params are not associated with any other
114 	 * autoconnect action, remove them completely. If they are, just unmark
115 	 * them as waiting for connection, by clearing explicit_connect field.
116 	 */
117 	params->explicit_connect = false;
118 
119 	hci_pend_le_list_del_init(params);
120 
121 	switch (params->auto_connect) {
122 	case HCI_AUTO_CONN_EXPLICIT:
123 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
124 		/* return instead of break to avoid duplicate scan update */
125 		return;
126 	case HCI_AUTO_CONN_DIRECT:
127 	case HCI_AUTO_CONN_ALWAYS:
128 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
129 		break;
130 	case HCI_AUTO_CONN_REPORT:
131 		hci_pend_le_list_add(params, &hdev->pend_le_reports);
132 		break;
133 	default:
134 		break;
135 	}
136 
137 	hci_update_passive_scan(hdev);
138 }
139 
140 static void hci_conn_cleanup(struct hci_conn *conn)
141 {
142 	struct hci_dev *hdev = conn->hdev;
143 
144 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
145 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
146 
147 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
148 		hci_remove_link_key(hdev, &conn->dst);
149 
150 	hci_chan_list_flush(conn);
151 
152 	hci_conn_hash_del(hdev, conn);
153 
154 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
155 		ida_free(&hdev->unset_handle_ida, conn->handle);
156 
157 	if (conn->cleanup)
158 		conn->cleanup(conn);
159 
160 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
161 		switch (conn->setting & SCO_AIRMODE_MASK) {
162 		case SCO_AIRMODE_CVSD:
163 		case SCO_AIRMODE_TRANSP:
164 			if (hdev->notify)
165 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
166 			break;
167 		}
168 	} else {
169 		if (hdev->notify)
170 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
171 	}
172 
173 	debugfs_remove_recursive(conn->debugfs);
174 
175 	hci_conn_del_sysfs(conn);
176 
177 	hci_dev_put(hdev);
178 }
179 
180 int hci_disconnect(struct hci_conn *conn, __u8 reason)
181 {
182 	BT_DBG("hcon %p", conn);
183 
184 	/* When we are central of an established connection and it enters
185 	 * the disconnect timeout, then go ahead and try to read the
186 	 * current clock offset.  Processing of the result is done
187 	 * within the event handling and hci_clock_offset_evt function.
188 	 */
189 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
190 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
191 		struct hci_dev *hdev = conn->hdev;
192 		struct hci_cp_read_clock_offset clkoff_cp;
193 
194 		clkoff_cp.handle = cpu_to_le16(conn->handle);
195 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
196 			     &clkoff_cp);
197 	}
198 
199 	return hci_abort_conn(conn, reason);
200 }
201 
202 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
203 {
204 	struct hci_dev *hdev = conn->hdev;
205 	struct hci_cp_add_sco cp;
206 
207 	BT_DBG("hcon %p", conn);
208 
209 	conn->state = BT_CONNECT;
210 	conn->out = true;
211 
212 	conn->attempt++;
213 
214 	cp.handle   = cpu_to_le16(handle);
215 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
216 
217 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
218 }
219 
220 static bool find_next_esco_param(struct hci_conn *conn,
221 				 const struct sco_param *esco_param, int size)
222 {
223 	if (!conn->parent)
224 		return false;
225 
226 	for (; conn->attempt <= size; conn->attempt++) {
227 		if (lmp_esco_2m_capable(conn->parent) ||
228 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
229 			break;
230 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
231 		       conn, conn->attempt);
232 	}
233 
234 	return conn->attempt <= size;
235 }
236 
237 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
238 {
239 	int err;
240 	__u8 vnd_len, *vnd_data = NULL;
241 	struct hci_op_configure_data_path *cmd = NULL;
242 
243 	/* Do not take below 2 checks as error since the 1st means user do not
244 	 * want to use HFP offload mode and the 2nd means the vendor controller
245 	 * do not need to send below HCI command for offload mode.
246 	 */
247 	if (!codec->data_path || !hdev->get_codec_config_data)
248 		return 0;
249 
250 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
251 					  &vnd_data);
252 	if (err < 0)
253 		goto error;
254 
255 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
256 	if (!cmd) {
257 		err = -ENOMEM;
258 		goto error;
259 	}
260 
261 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
262 	if (err < 0)
263 		goto error;
264 
265 	cmd->vnd_len = vnd_len;
266 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
267 
268 	cmd->direction = 0x00;
269 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
270 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
271 
272 	cmd->direction = 0x01;
273 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
274 				    sizeof(*cmd) + vnd_len, cmd,
275 				    HCI_CMD_TIMEOUT);
276 error:
277 
278 	kfree(cmd);
279 	kfree(vnd_data);
280 	return err;
281 }
282 
283 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
284 {
285 	struct conn_handle_t *conn_handle = data;
286 	struct hci_conn *conn = conn_handle->conn;
287 	__u16 handle = conn_handle->handle;
288 	struct hci_cp_enhanced_setup_sync_conn cp;
289 	const struct sco_param *param;
290 
291 	kfree(conn_handle);
292 
293 	if (!hci_conn_valid(hdev, conn))
294 		return -ECANCELED;
295 
296 	bt_dev_dbg(hdev, "hcon %p", conn);
297 
298 	configure_datapath_sync(hdev, &conn->codec);
299 
300 	conn->state = BT_CONNECT;
301 	conn->out = true;
302 
303 	conn->attempt++;
304 
305 	memset(&cp, 0x00, sizeof(cp));
306 
307 	cp.handle   = cpu_to_le16(handle);
308 
309 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
310 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
311 
312 	switch (conn->codec.id) {
313 	case BT_CODEC_MSBC:
314 		if (!find_next_esco_param(conn, esco_param_msbc,
315 					  ARRAY_SIZE(esco_param_msbc)))
316 			return -EINVAL;
317 
318 		param = &esco_param_msbc[conn->attempt - 1];
319 		cp.tx_coding_format.id = 0x05;
320 		cp.rx_coding_format.id = 0x05;
321 		cp.tx_codec_frame_size = __cpu_to_le16(60);
322 		cp.rx_codec_frame_size = __cpu_to_le16(60);
323 		cp.in_bandwidth = __cpu_to_le32(32000);
324 		cp.out_bandwidth = __cpu_to_le32(32000);
325 		cp.in_coding_format.id = 0x04;
326 		cp.out_coding_format.id = 0x04;
327 		cp.in_coded_data_size = __cpu_to_le16(16);
328 		cp.out_coded_data_size = __cpu_to_le16(16);
329 		cp.in_pcm_data_format = 2;
330 		cp.out_pcm_data_format = 2;
331 		cp.in_pcm_sample_payload_msb_pos = 0;
332 		cp.out_pcm_sample_payload_msb_pos = 0;
333 		cp.in_data_path = conn->codec.data_path;
334 		cp.out_data_path = conn->codec.data_path;
335 		cp.in_transport_unit_size = 1;
336 		cp.out_transport_unit_size = 1;
337 		break;
338 
339 	case BT_CODEC_TRANSPARENT:
340 		if (!find_next_esco_param(conn, esco_param_msbc,
341 					  ARRAY_SIZE(esco_param_msbc)))
342 			return -EINVAL;
343 
344 		param = &esco_param_msbc[conn->attempt - 1];
345 		cp.tx_coding_format.id = 0x03;
346 		cp.rx_coding_format.id = 0x03;
347 		cp.tx_codec_frame_size = __cpu_to_le16(60);
348 		cp.rx_codec_frame_size = __cpu_to_le16(60);
349 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
350 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
351 		cp.in_coding_format.id = 0x03;
352 		cp.out_coding_format.id = 0x03;
353 		cp.in_coded_data_size = __cpu_to_le16(16);
354 		cp.out_coded_data_size = __cpu_to_le16(16);
355 		cp.in_pcm_data_format = 2;
356 		cp.out_pcm_data_format = 2;
357 		cp.in_pcm_sample_payload_msb_pos = 0;
358 		cp.out_pcm_sample_payload_msb_pos = 0;
359 		cp.in_data_path = conn->codec.data_path;
360 		cp.out_data_path = conn->codec.data_path;
361 		cp.in_transport_unit_size = 1;
362 		cp.out_transport_unit_size = 1;
363 		break;
364 
365 	case BT_CODEC_CVSD:
366 		if (conn->parent && lmp_esco_capable(conn->parent)) {
367 			if (!find_next_esco_param(conn, esco_param_cvsd,
368 						  ARRAY_SIZE(esco_param_cvsd)))
369 				return -EINVAL;
370 			param = &esco_param_cvsd[conn->attempt - 1];
371 		} else {
372 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
373 				return -EINVAL;
374 			param = &sco_param_cvsd[conn->attempt - 1];
375 		}
376 		cp.tx_coding_format.id = 2;
377 		cp.rx_coding_format.id = 2;
378 		cp.tx_codec_frame_size = __cpu_to_le16(60);
379 		cp.rx_codec_frame_size = __cpu_to_le16(60);
380 		cp.in_bandwidth = __cpu_to_le32(16000);
381 		cp.out_bandwidth = __cpu_to_le32(16000);
382 		cp.in_coding_format.id = 4;
383 		cp.out_coding_format.id = 4;
384 		cp.in_coded_data_size = __cpu_to_le16(16);
385 		cp.out_coded_data_size = __cpu_to_le16(16);
386 		cp.in_pcm_data_format = 2;
387 		cp.out_pcm_data_format = 2;
388 		cp.in_pcm_sample_payload_msb_pos = 0;
389 		cp.out_pcm_sample_payload_msb_pos = 0;
390 		cp.in_data_path = conn->codec.data_path;
391 		cp.out_data_path = conn->codec.data_path;
392 		cp.in_transport_unit_size = 16;
393 		cp.out_transport_unit_size = 16;
394 		break;
395 	default:
396 		return -EINVAL;
397 	}
398 
399 	cp.retrans_effort = param->retrans_effort;
400 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
401 	cp.max_latency = __cpu_to_le16(param->max_latency);
402 
403 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
404 		return -EIO;
405 
406 	return 0;
407 }
408 
409 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
410 {
411 	struct hci_dev *hdev = conn->hdev;
412 	struct hci_cp_setup_sync_conn cp;
413 	const struct sco_param *param;
414 
415 	bt_dev_dbg(hdev, "hcon %p", conn);
416 
417 	conn->state = BT_CONNECT;
418 	conn->out = true;
419 
420 	conn->attempt++;
421 
422 	cp.handle   = cpu_to_le16(handle);
423 
424 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
425 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
426 	cp.voice_setting  = cpu_to_le16(conn->setting);
427 
428 	switch (conn->setting & SCO_AIRMODE_MASK) {
429 	case SCO_AIRMODE_TRANSP:
430 		if (!find_next_esco_param(conn, esco_param_msbc,
431 					  ARRAY_SIZE(esco_param_msbc)))
432 			return false;
433 		param = &esco_param_msbc[conn->attempt - 1];
434 		break;
435 	case SCO_AIRMODE_CVSD:
436 		if (conn->parent && lmp_esco_capable(conn->parent)) {
437 			if (!find_next_esco_param(conn, esco_param_cvsd,
438 						  ARRAY_SIZE(esco_param_cvsd)))
439 				return false;
440 			param = &esco_param_cvsd[conn->attempt - 1];
441 		} else {
442 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
443 				return false;
444 			param = &sco_param_cvsd[conn->attempt - 1];
445 		}
446 		break;
447 	default:
448 		return false;
449 	}
450 
451 	cp.retrans_effort = param->retrans_effort;
452 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
453 	cp.max_latency = __cpu_to_le16(param->max_latency);
454 
455 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
456 		return false;
457 
458 	return true;
459 }
460 
461 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
462 {
463 	int result;
464 	struct conn_handle_t *conn_handle;
465 
466 	if (enhanced_sync_conn_capable(conn->hdev)) {
467 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
468 
469 		if (!conn_handle)
470 			return false;
471 
472 		conn_handle->conn = conn;
473 		conn_handle->handle = handle;
474 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
475 					    conn_handle, NULL);
476 		if (result < 0)
477 			kfree(conn_handle);
478 
479 		return result == 0;
480 	}
481 
482 	return hci_setup_sync_conn(conn, handle);
483 }
484 
485 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
486 		      u16 to_multiplier)
487 {
488 	struct hci_dev *hdev = conn->hdev;
489 	struct hci_conn_params *params;
490 	struct hci_cp_le_conn_update cp;
491 
492 	hci_dev_lock(hdev);
493 
494 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
495 	if (params) {
496 		params->conn_min_interval = min;
497 		params->conn_max_interval = max;
498 		params->conn_latency = latency;
499 		params->supervision_timeout = to_multiplier;
500 	}
501 
502 	hci_dev_unlock(hdev);
503 
504 	memset(&cp, 0, sizeof(cp));
505 	cp.handle		= cpu_to_le16(conn->handle);
506 	cp.conn_interval_min	= cpu_to_le16(min);
507 	cp.conn_interval_max	= cpu_to_le16(max);
508 	cp.conn_latency		= cpu_to_le16(latency);
509 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
510 	cp.min_ce_len		= cpu_to_le16(0x0000);
511 	cp.max_ce_len		= cpu_to_le16(0x0000);
512 
513 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
514 
515 	if (params)
516 		return 0x01;
517 
518 	return 0x00;
519 }
520 
521 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
522 		      __u8 ltk[16], __u8 key_size)
523 {
524 	struct hci_dev *hdev = conn->hdev;
525 	struct hci_cp_le_start_enc cp;
526 
527 	BT_DBG("hcon %p", conn);
528 
529 	memset(&cp, 0, sizeof(cp));
530 
531 	cp.handle = cpu_to_le16(conn->handle);
532 	cp.rand = rand;
533 	cp.ediv = ediv;
534 	memcpy(cp.ltk, ltk, key_size);
535 
536 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
537 }
538 
539 /* Device _must_ be locked */
540 void hci_sco_setup(struct hci_conn *conn, __u8 status)
541 {
542 	struct hci_link *link;
543 
544 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
545 	if (!link || !link->conn)
546 		return;
547 
548 	BT_DBG("hcon %p", conn);
549 
550 	if (!status) {
551 		if (lmp_esco_capable(conn->hdev))
552 			hci_setup_sync(link->conn, conn->handle);
553 		else
554 			hci_add_sco(link->conn, conn->handle);
555 	} else {
556 		hci_connect_cfm(link->conn, status);
557 		hci_conn_del(link->conn);
558 	}
559 }
560 
561 static void hci_conn_timeout(struct work_struct *work)
562 {
563 	struct hci_conn *conn = container_of(work, struct hci_conn,
564 					     disc_work.work);
565 	int refcnt = atomic_read(&conn->refcnt);
566 
567 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
568 
569 	WARN_ON(refcnt < 0);
570 
571 	/* FIXME: It was observed that in pairing failed scenario, refcnt
572 	 * drops below 0. Probably this is because l2cap_conn_del calls
573 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
574 	 * dropped. After that loop hci_chan_del is called which also drops
575 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
576 	 * otherwise drop it.
577 	 */
578 	if (refcnt > 0)
579 		return;
580 
581 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
582 }
583 
584 /* Enter sniff mode */
585 static void hci_conn_idle(struct work_struct *work)
586 {
587 	struct hci_conn *conn = container_of(work, struct hci_conn,
588 					     idle_work.work);
589 	struct hci_dev *hdev = conn->hdev;
590 
591 	BT_DBG("hcon %p mode %d", conn, conn->mode);
592 
593 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
594 		return;
595 
596 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
597 		return;
598 
599 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
600 		struct hci_cp_sniff_subrate cp;
601 		cp.handle             = cpu_to_le16(conn->handle);
602 		cp.max_latency        = cpu_to_le16(0);
603 		cp.min_remote_timeout = cpu_to_le16(0);
604 		cp.min_local_timeout  = cpu_to_le16(0);
605 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
606 	}
607 
608 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
609 		struct hci_cp_sniff_mode cp;
610 		cp.handle       = cpu_to_le16(conn->handle);
611 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
612 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
613 		cp.attempt      = cpu_to_le16(4);
614 		cp.timeout      = cpu_to_le16(1);
615 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
616 	}
617 }
618 
619 static void hci_conn_auto_accept(struct work_struct *work)
620 {
621 	struct hci_conn *conn = container_of(work, struct hci_conn,
622 					     auto_accept_work.work);
623 
624 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
625 		     &conn->dst);
626 }
627 
628 static void le_disable_advertising(struct hci_dev *hdev)
629 {
630 	if (ext_adv_capable(hdev)) {
631 		struct hci_cp_le_set_ext_adv_enable cp;
632 
633 		cp.enable = 0x00;
634 		cp.num_of_sets = 0x00;
635 
636 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
637 			     &cp);
638 	} else {
639 		u8 enable = 0x00;
640 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
641 			     &enable);
642 	}
643 }
644 
645 static void le_conn_timeout(struct work_struct *work)
646 {
647 	struct hci_conn *conn = container_of(work, struct hci_conn,
648 					     le_conn_timeout.work);
649 	struct hci_dev *hdev = conn->hdev;
650 
651 	BT_DBG("");
652 
653 	/* We could end up here due to having done directed advertising,
654 	 * so clean up the state if necessary. This should however only
655 	 * happen with broken hardware or if low duty cycle was used
656 	 * (which doesn't have a timeout of its own).
657 	 */
658 	if (conn->role == HCI_ROLE_SLAVE) {
659 		/* Disable LE Advertising */
660 		le_disable_advertising(hdev);
661 		hci_dev_lock(hdev);
662 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
663 		hci_dev_unlock(hdev);
664 		return;
665 	}
666 
667 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
668 }
669 
670 struct iso_list_data {
671 	union {
672 		u8  cig;
673 		u8  big;
674 	};
675 	union {
676 		u8  cis;
677 		u8  bis;
678 		u16 sync_handle;
679 	};
680 	int count;
681 	bool big_term;
682 	bool pa_sync_term;
683 	bool big_sync_term;
684 };
685 
686 static void bis_list(struct hci_conn *conn, void *data)
687 {
688 	struct iso_list_data *d = data;
689 
690 	/* Skip if not broadcast/ANY address */
691 	if (bacmp(&conn->dst, BDADDR_ANY))
692 		return;
693 
694 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
695 	    d->bis != conn->iso_qos.bcast.bis)
696 		return;
697 
698 	d->count++;
699 }
700 
701 static int terminate_big_sync(struct hci_dev *hdev, void *data)
702 {
703 	struct iso_list_data *d = data;
704 
705 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
706 
707 	hci_disable_per_advertising_sync(hdev, d->bis);
708 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
709 
710 	/* Only terminate BIG if it has been created */
711 	if (!d->big_term)
712 		return 0;
713 
714 	return hci_le_terminate_big_sync(hdev, d->big,
715 					 HCI_ERROR_LOCAL_HOST_TERM);
716 }
717 
718 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
719 {
720 	kfree(data);
721 }
722 
723 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
724 {
725 	struct iso_list_data *d;
726 	int ret;
727 
728 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
729 		   conn->iso_qos.bcast.bis);
730 
731 	d = kzalloc(sizeof(*d), GFP_KERNEL);
732 	if (!d)
733 		return -ENOMEM;
734 
735 	d->big = conn->iso_qos.bcast.big;
736 	d->bis = conn->iso_qos.bcast.bis;
737 	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
738 
739 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
740 				 terminate_big_destroy);
741 	if (ret)
742 		kfree(d);
743 
744 	return ret;
745 }
746 
747 static int big_terminate_sync(struct hci_dev *hdev, void *data)
748 {
749 	struct iso_list_data *d = data;
750 
751 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
752 		   d->sync_handle);
753 
754 	if (d->big_sync_term)
755 		hci_le_big_terminate_sync(hdev, d->big);
756 
757 	if (d->pa_sync_term)
758 		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
759 
760 	return 0;
761 }
762 
763 static void find_bis(struct hci_conn *conn, void *data)
764 {
765 	struct iso_list_data *d = data;
766 
767 	/* Ignore if BIG doesn't match */
768 	if (d->big != conn->iso_qos.bcast.big)
769 		return;
770 
771 	d->count++;
772 }
773 
774 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
775 {
776 	struct iso_list_data *d;
777 	int ret;
778 
779 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
780 
781 	d = kzalloc(sizeof(*d), GFP_KERNEL);
782 	if (!d)
783 		return -ENOMEM;
784 
785 	d->big = big;
786 	d->sync_handle = conn->sync_handle;
787 
788 	if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
789 		hci_conn_hash_list_flag(hdev, find_bis, PA_LINK,
790 					HCI_CONN_PA_SYNC, d);
791 
792 		if (!d->count)
793 			d->pa_sync_term = true;
794 
795 		d->count = 0;
796 	}
797 
798 	if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
799 		hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK,
800 					HCI_CONN_BIG_SYNC, d);
801 
802 		if (!d->count)
803 			d->big_sync_term = true;
804 	}
805 
806 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
807 				 terminate_big_destroy);
808 	if (ret)
809 		kfree(d);
810 
811 	return ret;
812 }
813 
814 /* Cleanup BIS connection
815  *
816  * Detects if there any BIS left connected in a BIG
817  * broadcaster: Remove advertising instance and terminate BIG.
818  * broadcaster receiver: Terminate BIG sync and terminate PA sync.
819  */
820 static void bis_cleanup(struct hci_conn *conn)
821 {
822 	struct hci_dev *hdev = conn->hdev;
823 	struct hci_conn *bis;
824 
825 	bt_dev_dbg(hdev, "conn %p", conn);
826 
827 	if (conn->role == HCI_ROLE_MASTER) {
828 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
829 			return;
830 
831 		/* Check if ISO connection is a BIS and terminate advertising
832 		 * set and BIG if there are no other connections using it.
833 		 */
834 		bis = hci_conn_hash_lookup_big_state(hdev,
835 						     conn->iso_qos.bcast.big,
836 						     BT_CONNECTED,
837 						     HCI_ROLE_MASTER);
838 		if (bis)
839 			return;
840 
841 		bis = hci_conn_hash_lookup_big_state(hdev,
842 						     conn->iso_qos.bcast.big,
843 						     BT_CONNECT,
844 						     HCI_ROLE_MASTER);
845 		if (bis)
846 			return;
847 
848 		hci_le_terminate_big(hdev, conn);
849 	} else {
850 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
851 				     conn);
852 	}
853 }
854 
855 static int remove_cig_sync(struct hci_dev *hdev, void *data)
856 {
857 	u8 handle = PTR_UINT(data);
858 
859 	return hci_le_remove_cig_sync(hdev, handle);
860 }
861 
862 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
863 {
864 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
865 
866 	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
867 				  NULL);
868 }
869 
870 static void find_cis(struct hci_conn *conn, void *data)
871 {
872 	struct iso_list_data *d = data;
873 
874 	/* Ignore broadcast or if CIG don't match */
875 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
876 		return;
877 
878 	d->count++;
879 }
880 
881 /* Cleanup CIS connection:
882  *
883  * Detects if there any CIS left connected in a CIG and remove it.
884  */
885 static void cis_cleanup(struct hci_conn *conn)
886 {
887 	struct hci_dev *hdev = conn->hdev;
888 	struct iso_list_data d;
889 
890 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
891 		return;
892 
893 	memset(&d, 0, sizeof(d));
894 	d.cig = conn->iso_qos.ucast.cig;
895 
896 	/* Check if ISO connection is a CIS and remove CIG if there are
897 	 * no other connections using it.
898 	 */
899 	hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_BOUND, &d);
900 	hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECT,
901 				 &d);
902 	hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECTED,
903 				 &d);
904 	if (d.count)
905 		return;
906 
907 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
908 }
909 
910 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
911 {
912 	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
913 			       U16_MAX, GFP_ATOMIC);
914 }
915 
916 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
917 				       u8 role, u16 handle)
918 {
919 	struct hci_conn *conn;
920 
921 	switch (type) {
922 	case ACL_LINK:
923 		if (!hdev->acl_mtu)
924 			return ERR_PTR(-ECONNREFUSED);
925 		break;
926 	case CIS_LINK:
927 	case BIS_LINK:
928 	case PA_LINK:
929 		if (hdev->iso_mtu)
930 			/* Dedicated ISO Buffer exists */
931 			break;
932 		fallthrough;
933 	case LE_LINK:
934 		if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
935 			return ERR_PTR(-ECONNREFUSED);
936 		if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
937 			return ERR_PTR(-ECONNREFUSED);
938 		break;
939 	case SCO_LINK:
940 	case ESCO_LINK:
941 		if (!hdev->sco_pkts)
942 			/* Controller does not support SCO or eSCO over HCI */
943 			return ERR_PTR(-ECONNREFUSED);
944 		break;
945 	default:
946 		return ERR_PTR(-ECONNREFUSED);
947 	}
948 
949 	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
950 
951 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
952 	if (!conn)
953 		return ERR_PTR(-ENOMEM);
954 
955 	bacpy(&conn->dst, dst);
956 	bacpy(&conn->src, &hdev->bdaddr);
957 	conn->handle = handle;
958 	conn->hdev  = hdev;
959 	conn->type  = type;
960 	conn->role  = role;
961 	conn->mode  = HCI_CM_ACTIVE;
962 	conn->state = BT_OPEN;
963 	conn->auth_type = HCI_AT_GENERAL_BONDING;
964 	conn->io_capability = hdev->io_capability;
965 	conn->remote_auth = 0xff;
966 	conn->key_type = 0xff;
967 	conn->rssi = HCI_RSSI_INVALID;
968 	conn->tx_power = HCI_TX_POWER_INVALID;
969 	conn->max_tx_power = HCI_TX_POWER_INVALID;
970 	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
971 	conn->sid = HCI_SID_INVALID;
972 
973 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
974 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
975 
976 	/* Set Default Authenticated payload timeout to 30s */
977 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
978 
979 	if (conn->role == HCI_ROLE_MASTER)
980 		conn->out = true;
981 
982 	switch (type) {
983 	case ACL_LINK:
984 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
985 		conn->mtu = hdev->acl_mtu;
986 		break;
987 	case LE_LINK:
988 		/* conn->src should reflect the local identity address */
989 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
990 		conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
991 		break;
992 	case CIS_LINK:
993 	case BIS_LINK:
994 	case PA_LINK:
995 		/* conn->src should reflect the local identity address */
996 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
997 
998 		/* set proper cleanup function */
999 		if (!bacmp(dst, BDADDR_ANY))
1000 			conn->cleanup = bis_cleanup;
1001 		else if (conn->role == HCI_ROLE_MASTER)
1002 			conn->cleanup = cis_cleanup;
1003 
1004 		conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
1005 			    hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
1006 		break;
1007 	case SCO_LINK:
1008 		if (lmp_esco_capable(hdev))
1009 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1010 					(hdev->esco_type & EDR_ESCO_MASK);
1011 		else
1012 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1013 
1014 		conn->mtu = hdev->sco_mtu;
1015 		break;
1016 	case ESCO_LINK:
1017 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1018 		conn->mtu = hdev->sco_mtu;
1019 		break;
1020 	}
1021 
1022 	skb_queue_head_init(&conn->data_q);
1023 	skb_queue_head_init(&conn->tx_q.queue);
1024 
1025 	INIT_LIST_HEAD(&conn->chan_list);
1026 	INIT_LIST_HEAD(&conn->link_list);
1027 
1028 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1029 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1030 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1031 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1032 
1033 	atomic_set(&conn->refcnt, 0);
1034 
1035 	hci_dev_hold(hdev);
1036 
1037 	hci_conn_hash_add(hdev, conn);
1038 
1039 	/* The SCO and eSCO connections will only be notified when their
1040 	 * setup has been completed. This is different to ACL links which
1041 	 * can be notified right away.
1042 	 */
1043 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1044 		if (hdev->notify)
1045 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1046 	}
1047 
1048 	hci_conn_init_sysfs(conn);
1049 	return conn;
1050 }
1051 
1052 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1053 				    bdaddr_t *dst, u8 role)
1054 {
1055 	int handle;
1056 
1057 	bt_dev_dbg(hdev, "dst %pMR", dst);
1058 
1059 	handle = hci_conn_hash_alloc_unset(hdev);
1060 	if (unlikely(handle < 0))
1061 		return ERR_PTR(-ECONNREFUSED);
1062 
1063 	return __hci_conn_add(hdev, type, dst, role, handle);
1064 }
1065 
1066 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1067 			      u8 role, u16 handle)
1068 {
1069 	if (handle > HCI_CONN_HANDLE_MAX)
1070 		return ERR_PTR(-EINVAL);
1071 
1072 	return __hci_conn_add(hdev, type, dst, role, handle);
1073 }
1074 
1075 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1076 {
1077 	if (!reason)
1078 		reason = HCI_ERROR_REMOTE_USER_TERM;
1079 
1080 	/* Due to race, SCO/ISO conn might be not established yet at this point,
1081 	 * and nothing else will clean it up. In other cases it is done via HCI
1082 	 * events.
1083 	 */
1084 	switch (conn->type) {
1085 	case SCO_LINK:
1086 	case ESCO_LINK:
1087 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
1088 			hci_conn_failed(conn, reason);
1089 		break;
1090 	case CIS_LINK:
1091 	case BIS_LINK:
1092 	case PA_LINK:
1093 		if ((conn->state != BT_CONNECTED &&
1094 		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
1095 		    test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
1096 			hci_conn_failed(conn, reason);
1097 		break;
1098 	}
1099 }
1100 
1101 static void hci_conn_unlink(struct hci_conn *conn)
1102 {
1103 	struct hci_dev *hdev = conn->hdev;
1104 
1105 	bt_dev_dbg(hdev, "hcon %p", conn);
1106 
1107 	if (!conn->parent) {
1108 		struct hci_link *link, *t;
1109 
1110 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1111 			struct hci_conn *child = link->conn;
1112 
1113 			hci_conn_unlink(child);
1114 
1115 			/* If hdev is down it means
1116 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1117 			 * and links don't need to be cleanup as all connections
1118 			 * would be cleanup.
1119 			 */
1120 			if (!test_bit(HCI_UP, &hdev->flags))
1121 				continue;
1122 
1123 			hci_conn_cleanup_child(child, conn->abort_reason);
1124 		}
1125 
1126 		return;
1127 	}
1128 
1129 	if (!conn->link)
1130 		return;
1131 
1132 	list_del_rcu(&conn->link->list);
1133 	synchronize_rcu();
1134 
1135 	hci_conn_drop(conn->parent);
1136 	hci_conn_put(conn->parent);
1137 	conn->parent = NULL;
1138 
1139 	kfree(conn->link);
1140 	conn->link = NULL;
1141 }
1142 
1143 void hci_conn_del(struct hci_conn *conn)
1144 {
1145 	struct hci_dev *hdev = conn->hdev;
1146 
1147 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1148 
1149 	hci_conn_unlink(conn);
1150 
1151 	disable_delayed_work_sync(&conn->disc_work);
1152 	disable_delayed_work_sync(&conn->auto_accept_work);
1153 	disable_delayed_work_sync(&conn->idle_work);
1154 
1155 	if (conn->type == ACL_LINK) {
1156 		/* Unacked frames */
1157 		hdev->acl_cnt += conn->sent;
1158 	} else if (conn->type == LE_LINK) {
1159 		cancel_delayed_work(&conn->le_conn_timeout);
1160 
1161 		if (hdev->le_pkts)
1162 			hdev->le_cnt += conn->sent;
1163 		else
1164 			hdev->acl_cnt += conn->sent;
1165 	} else {
1166 		/* Unacked ISO frames */
1167 		if (conn->type == CIS_LINK ||
1168 		    conn->type == BIS_LINK ||
1169 		    conn->type == PA_LINK) {
1170 			if (hdev->iso_pkts)
1171 				hdev->iso_cnt += conn->sent;
1172 			else if (hdev->le_pkts)
1173 				hdev->le_cnt += conn->sent;
1174 			else
1175 				hdev->acl_cnt += conn->sent;
1176 		}
1177 	}
1178 
1179 	skb_queue_purge(&conn->data_q);
1180 	skb_queue_purge(&conn->tx_q.queue);
1181 
1182 	/* Remove the connection from the list and cleanup its remaining
1183 	 * state. This is a separate function since for some cases like
1184 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1185 	 * rest of hci_conn_del.
1186 	 */
1187 	hci_conn_cleanup(conn);
1188 
1189 	/* Dequeue callbacks using connection pointer as data */
1190 	hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
1191 }
1192 
1193 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1194 {
1195 	int use_src = bacmp(src, BDADDR_ANY);
1196 	struct hci_dev *hdev = NULL, *d;
1197 
1198 	BT_DBG("%pMR -> %pMR", src, dst);
1199 
1200 	read_lock(&hci_dev_list_lock);
1201 
1202 	list_for_each_entry(d, &hci_dev_list, list) {
1203 		if (!test_bit(HCI_UP, &d->flags) ||
1204 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
1205 			continue;
1206 
1207 		/* Simple routing:
1208 		 *   No source address - find interface with bdaddr != dst
1209 		 *   Source address    - find interface with bdaddr == src
1210 		 */
1211 
1212 		if (use_src) {
1213 			bdaddr_t id_addr;
1214 			u8 id_addr_type;
1215 
1216 			if (src_type == BDADDR_BREDR) {
1217 				if (!lmp_bredr_capable(d))
1218 					continue;
1219 				bacpy(&id_addr, &d->bdaddr);
1220 				id_addr_type = BDADDR_BREDR;
1221 			} else {
1222 				if (!lmp_le_capable(d))
1223 					continue;
1224 
1225 				hci_copy_identity_address(d, &id_addr,
1226 							  &id_addr_type);
1227 
1228 				/* Convert from HCI to three-value type */
1229 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1230 					id_addr_type = BDADDR_LE_PUBLIC;
1231 				else
1232 					id_addr_type = BDADDR_LE_RANDOM;
1233 			}
1234 
1235 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1236 				hdev = d; break;
1237 			}
1238 		} else {
1239 			if (bacmp(&d->bdaddr, dst)) {
1240 				hdev = d; break;
1241 			}
1242 		}
1243 	}
1244 
1245 	if (hdev)
1246 		hdev = hci_dev_hold(hdev);
1247 
1248 	read_unlock(&hci_dev_list_lock);
1249 	return hdev;
1250 }
1251 EXPORT_SYMBOL(hci_get_route);
1252 
1253 /* This function requires the caller holds hdev->lock */
1254 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1255 {
1256 	struct hci_dev *hdev = conn->hdev;
1257 
1258 	hci_connect_le_scan_cleanup(conn, status);
1259 
1260 	/* Enable advertising in case this was a failed connection
1261 	 * attempt as a peripheral.
1262 	 */
1263 	hci_enable_advertising(hdev);
1264 }
1265 
1266 /* This function requires the caller holds hdev->lock */
1267 void hci_conn_failed(struct hci_conn *conn, u8 status)
1268 {
1269 	struct hci_dev *hdev = conn->hdev;
1270 
1271 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1272 
1273 	switch (conn->type) {
1274 	case LE_LINK:
1275 		hci_le_conn_failed(conn, status);
1276 		break;
1277 	case ACL_LINK:
1278 		mgmt_connect_failed(hdev, conn, status);
1279 		break;
1280 	}
1281 
1282 	/* In case of BIG/PA sync failed, clear conn flags so that
1283 	 * the conns will be correctly cleaned up by ISO layer
1284 	 */
1285 	test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1286 	test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1287 
1288 	conn->state = BT_CLOSED;
1289 	hci_connect_cfm(conn, status);
1290 	hci_conn_del(conn);
1291 }
1292 
1293 /* This function requires the caller holds hdev->lock */
1294 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1295 {
1296 	struct hci_dev *hdev = conn->hdev;
1297 
1298 	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1299 
1300 	if (conn->handle == handle)
1301 		return 0;
1302 
1303 	if (handle > HCI_CONN_HANDLE_MAX) {
1304 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1305 			   handle, HCI_CONN_HANDLE_MAX);
1306 		return HCI_ERROR_INVALID_PARAMETERS;
1307 	}
1308 
1309 	/* If abort_reason has been sent it means the connection is being
1310 	 * aborted and the handle shall not be changed.
1311 	 */
1312 	if (conn->abort_reason)
1313 		return conn->abort_reason;
1314 
1315 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
1316 		ida_free(&hdev->unset_handle_ida, conn->handle);
1317 
1318 	conn->handle = handle;
1319 
1320 	return 0;
1321 }
1322 
1323 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1324 				u8 dst_type, bool dst_resolved, u8 sec_level,
1325 				u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
1326 {
1327 	struct hci_conn *conn;
1328 	struct smp_irk *irk;
1329 	int err;
1330 
1331 	/* Let's make sure that le is enabled.*/
1332 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1333 		if (lmp_le_capable(hdev))
1334 			return ERR_PTR(-ECONNREFUSED);
1335 
1336 		return ERR_PTR(-EOPNOTSUPP);
1337 	}
1338 
1339 	/* Since the controller supports only one LE connection attempt at a
1340 	 * time, we return -EBUSY if there is any connection attempt running.
1341 	 */
1342 	if (hci_lookup_le_connect(hdev))
1343 		return ERR_PTR(-EBUSY);
1344 
1345 	/* If there's already a connection object but it's not in
1346 	 * scanning state it means it must already be established, in
1347 	 * which case we can't do anything else except report a failure
1348 	 * to connect.
1349 	 */
1350 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1351 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1352 		return ERR_PTR(-EBUSY);
1353 	}
1354 
1355 	/* Check if the destination address has been resolved by the controller
1356 	 * since if it did then the identity address shall be used.
1357 	 */
1358 	if (!dst_resolved) {
1359 		/* When given an identity address with existing identity
1360 		 * resolving key, the connection needs to be established
1361 		 * to a resolvable random address.
1362 		 *
1363 		 * Storing the resolvable random address is required here
1364 		 * to handle connection failures. The address will later
1365 		 * be resolved back into the original identity address
1366 		 * from the connect request.
1367 		 */
1368 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1369 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1370 			dst = &irk->rpa;
1371 			dst_type = ADDR_LE_DEV_RANDOM;
1372 		}
1373 	}
1374 
1375 	if (conn) {
1376 		bacpy(&conn->dst, dst);
1377 	} else {
1378 		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1379 		if (IS_ERR(conn))
1380 			return conn;
1381 		hci_conn_hold(conn);
1382 		conn->pending_sec_level = sec_level;
1383 	}
1384 
1385 	conn->dst_type = dst_type;
1386 	conn->sec_level = BT_SECURITY_LOW;
1387 	conn->conn_timeout = conn_timeout;
1388 	conn->le_adv_phy = phy;
1389 	conn->le_adv_sec_phy = sec_phy;
1390 
1391 	err = hci_connect_le_sync(hdev, conn);
1392 	if (err) {
1393 		hci_conn_del(conn);
1394 		return ERR_PTR(err);
1395 	}
1396 
1397 	return conn;
1398 }
1399 
1400 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1401 {
1402 	struct hci_conn *conn;
1403 
1404 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1405 	if (!conn)
1406 		return false;
1407 
1408 	if (conn->state != BT_CONNECTED)
1409 		return false;
1410 
1411 	return true;
1412 }
1413 
1414 /* This function requires the caller holds hdev->lock */
1415 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1416 					bdaddr_t *addr, u8 addr_type)
1417 {
1418 	struct hci_conn_params *params;
1419 
1420 	if (is_connected(hdev, addr, addr_type))
1421 		return -EISCONN;
1422 
1423 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1424 	if (!params) {
1425 		params = hci_conn_params_add(hdev, addr, addr_type);
1426 		if (!params)
1427 			return -ENOMEM;
1428 
1429 		/* If we created new params, mark them to be deleted in
1430 		 * hci_connect_le_scan_cleanup. It's different case than
1431 		 * existing disabled params, those will stay after cleanup.
1432 		 */
1433 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1434 	}
1435 
1436 	/* We're trying to connect, so make sure params are at pend_le_conns */
1437 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1438 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1439 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1440 		hci_pend_le_list_del_init(params);
1441 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1442 	}
1443 
1444 	params->explicit_connect = true;
1445 
1446 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1447 	       params->auto_connect);
1448 
1449 	return 0;
1450 }
1451 
1452 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1453 {
1454 	struct hci_conn *conn;
1455 	u8  big;
1456 
1457 	/* Allocate a BIG if not set */
1458 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1459 		for (big = 0x00; big < 0xef; big++) {
1460 
1461 			conn = hci_conn_hash_lookup_big(hdev, big);
1462 			if (!conn)
1463 				break;
1464 		}
1465 
1466 		if (big == 0xef)
1467 			return -EADDRNOTAVAIL;
1468 
1469 		/* Update BIG */
1470 		qos->bcast.big = big;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1477 {
1478 	struct hci_conn *conn;
1479 	u8  bis;
1480 
1481 	/* Allocate BIS if not set */
1482 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1483 		if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) {
1484 			conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1485 
1486 			if (conn) {
1487 				/* If the BIG handle is already matched to an advertising
1488 				 * handle, do not allocate a new one.
1489 				 */
1490 				qos->bcast.bis = conn->iso_qos.bcast.bis;
1491 				return 0;
1492 			}
1493 		}
1494 
1495 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1496 		 * since it is reserved as general purpose set.
1497 		 */
1498 		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1499 		     bis++) {
1500 
1501 			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1502 			if (!conn)
1503 				break;
1504 		}
1505 
1506 		if (bis == hdev->le_num_of_adv_sets)
1507 			return -EADDRNOTAVAIL;
1508 
1509 		/* Update BIS */
1510 		qos->bcast.bis = bis;
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 /* This function requires the caller holds hdev->lock */
1517 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1518 				    __u8 sid, struct bt_iso_qos *qos,
1519 				    __u8 base_len, __u8 *base)
1520 {
1521 	struct hci_conn *conn;
1522 	int err;
1523 
1524 	/* Let's make sure that le is enabled.*/
1525 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1526 		if (lmp_le_capable(hdev))
1527 			return ERR_PTR(-ECONNREFUSED);
1528 		return ERR_PTR(-EOPNOTSUPP);
1529 	}
1530 
1531 	err = qos_set_big(hdev, qos);
1532 	if (err)
1533 		return ERR_PTR(err);
1534 
1535 	err = qos_set_bis(hdev, qos);
1536 	if (err)
1537 		return ERR_PTR(err);
1538 
1539 	/* Check if the LE Create BIG command has already been sent */
1540 	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1541 						qos->bcast.big);
1542 	if (conn)
1543 		return ERR_PTR(-EADDRINUSE);
1544 
1545 	/* Check BIS settings against other bound BISes, since all
1546 	 * BISes in a BIG must have the same value for all parameters
1547 	 */
1548 	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1549 
1550 	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1551 		     base_len != conn->le_per_adv_data_len ||
1552 		     memcmp(conn->le_per_adv_data, base, base_len)))
1553 		return ERR_PTR(-EADDRINUSE);
1554 
1555 	conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_MASTER);
1556 	if (IS_ERR(conn))
1557 		return conn;
1558 
1559 	conn->state = BT_CONNECT;
1560 	conn->sid = sid;
1561 
1562 	hci_conn_hold(conn);
1563 	return conn;
1564 }
1565 
1566 /* This function requires the caller holds hdev->lock */
1567 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1568 				     u8 dst_type, u8 sec_level,
1569 				     u16 conn_timeout,
1570 				     enum conn_reasons conn_reason)
1571 {
1572 	struct hci_conn *conn;
1573 
1574 	/* Let's make sure that le is enabled.*/
1575 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1576 		if (lmp_le_capable(hdev))
1577 			return ERR_PTR(-ECONNREFUSED);
1578 
1579 		return ERR_PTR(-EOPNOTSUPP);
1580 	}
1581 
1582 	/* Some devices send ATT messages as soon as the physical link is
1583 	 * established. To be able to handle these ATT messages, the user-
1584 	 * space first establishes the connection and then starts the pairing
1585 	 * process.
1586 	 *
1587 	 * So if a hci_conn object already exists for the following connection
1588 	 * attempt, we simply update pending_sec_level and auth_type fields
1589 	 * and return the object found.
1590 	 */
1591 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1592 	if (conn) {
1593 		if (conn->pending_sec_level < sec_level)
1594 			conn->pending_sec_level = sec_level;
1595 		goto done;
1596 	}
1597 
1598 	BT_DBG("requesting refresh of dst_addr");
1599 
1600 	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1601 	if (IS_ERR(conn))
1602 		return conn;
1603 
1604 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1605 		hci_conn_del(conn);
1606 		return ERR_PTR(-EBUSY);
1607 	}
1608 
1609 	conn->state = BT_CONNECT;
1610 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1611 	conn->dst_type = dst_type;
1612 	conn->sec_level = BT_SECURITY_LOW;
1613 	conn->pending_sec_level = sec_level;
1614 	conn->conn_timeout = conn_timeout;
1615 	conn->conn_reason = conn_reason;
1616 
1617 	hci_update_passive_scan(hdev);
1618 
1619 done:
1620 	hci_conn_hold(conn);
1621 	return conn;
1622 }
1623 
1624 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1625 				 u8 sec_level, u8 auth_type,
1626 				 enum conn_reasons conn_reason, u16 timeout)
1627 {
1628 	struct hci_conn *acl;
1629 
1630 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1631 		if (lmp_bredr_capable(hdev))
1632 			return ERR_PTR(-ECONNREFUSED);
1633 
1634 		return ERR_PTR(-EOPNOTSUPP);
1635 	}
1636 
1637 	/* Reject outgoing connection to device with same BD ADDR against
1638 	 * CVE-2020-26555
1639 	 */
1640 	if (!bacmp(&hdev->bdaddr, dst)) {
1641 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1642 			   dst);
1643 		return ERR_PTR(-ECONNREFUSED);
1644 	}
1645 
1646 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1647 	if (!acl) {
1648 		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1649 		if (IS_ERR(acl))
1650 			return acl;
1651 	}
1652 
1653 	hci_conn_hold(acl);
1654 
1655 	acl->conn_reason = conn_reason;
1656 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1657 		int err;
1658 
1659 		acl->sec_level = BT_SECURITY_LOW;
1660 		acl->pending_sec_level = sec_level;
1661 		acl->auth_type = auth_type;
1662 		acl->conn_timeout = timeout;
1663 
1664 		err = hci_connect_acl_sync(hdev, acl);
1665 		if (err) {
1666 			hci_conn_del(acl);
1667 			return ERR_PTR(err);
1668 		}
1669 	}
1670 
1671 	return acl;
1672 }
1673 
1674 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1675 				      struct hci_conn *conn)
1676 {
1677 	struct hci_dev *hdev = parent->hdev;
1678 	struct hci_link *link;
1679 
1680 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1681 
1682 	if (conn->link)
1683 		return conn->link;
1684 
1685 	if (conn->parent)
1686 		return NULL;
1687 
1688 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1689 	if (!link)
1690 		return NULL;
1691 
1692 	link->conn = hci_conn_hold(conn);
1693 	conn->link = link;
1694 	conn->parent = hci_conn_get(parent);
1695 
1696 	/* Use list_add_tail_rcu append to the list */
1697 	list_add_tail_rcu(&link->list, &parent->link_list);
1698 
1699 	return link;
1700 }
1701 
1702 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1703 				 __u16 setting, struct bt_codec *codec,
1704 				 u16 timeout)
1705 {
1706 	struct hci_conn *acl;
1707 	struct hci_conn *sco;
1708 	struct hci_link *link;
1709 
1710 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1711 			      CONN_REASON_SCO_CONNECT, timeout);
1712 	if (IS_ERR(acl))
1713 		return acl;
1714 
1715 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1716 	if (!sco) {
1717 		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1718 		if (IS_ERR(sco)) {
1719 			hci_conn_drop(acl);
1720 			return sco;
1721 		}
1722 	}
1723 
1724 	link = hci_conn_link(acl, sco);
1725 	if (!link) {
1726 		hci_conn_drop(acl);
1727 		hci_conn_drop(sco);
1728 		return ERR_PTR(-ENOLINK);
1729 	}
1730 
1731 	sco->setting = setting;
1732 	sco->codec = *codec;
1733 
1734 	if (acl->state == BT_CONNECTED &&
1735 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1736 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1737 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1738 
1739 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1740 			/* defer SCO setup until mode change completed */
1741 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1742 			return sco;
1743 		}
1744 
1745 		hci_sco_setup(acl, 0x00);
1746 	}
1747 
1748 	return sco;
1749 }
1750 
1751 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1752 {
1753 	struct hci_dev *hdev = conn->hdev;
1754 	struct hci_cp_le_create_big cp;
1755 	struct iso_list_data data;
1756 
1757 	memset(&cp, 0, sizeof(cp));
1758 
1759 	data.big = qos->bcast.big;
1760 	data.bis = qos->bcast.bis;
1761 	data.count = 0;
1762 
1763 	/* Create a BIS for each bound connection */
1764 	hci_conn_hash_list_state(hdev, bis_list, BIS_LINK,
1765 				 BT_BOUND, &data);
1766 
1767 	cp.handle = qos->bcast.big;
1768 	cp.adv_handle = qos->bcast.bis;
1769 	cp.num_bis  = data.count;
1770 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1771 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1772 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1773 	cp.bis.rtn  = qos->bcast.out.rtn;
1774 	cp.bis.phy  = qos->bcast.out.phy;
1775 	cp.bis.packing = qos->bcast.packing;
1776 	cp.bis.framing = qos->bcast.framing;
1777 	cp.bis.encryption = qos->bcast.encryption;
1778 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1779 
1780 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1781 }
1782 
1783 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1784 {
1785 	DEFINE_FLEX(struct hci_cp_le_set_cig_params, pdu, cis, num_cis, 0x1f);
1786 	u8 cig_id = PTR_UINT(data);
1787 	struct hci_conn *conn;
1788 	struct bt_iso_qos *qos;
1789 	u8 aux_num_cis = 0;
1790 	u8 cis_id;
1791 
1792 	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1793 	if (!conn)
1794 		return 0;
1795 
1796 	qos = &conn->iso_qos;
1797 	pdu->cig_id = cig_id;
1798 	hci_cpu_to_le24(qos->ucast.out.interval, pdu->c_interval);
1799 	hci_cpu_to_le24(qos->ucast.in.interval, pdu->p_interval);
1800 	pdu->sca = qos->ucast.sca;
1801 	pdu->packing = qos->ucast.packing;
1802 	pdu->framing = qos->ucast.framing;
1803 	pdu->c_latency = cpu_to_le16(qos->ucast.out.latency);
1804 	pdu->p_latency = cpu_to_le16(qos->ucast.in.latency);
1805 
1806 	/* Reprogram all CIS(s) with the same CIG, valid range are:
1807 	 * num_cis: 0x00 to 0x1F
1808 	 * cis_id: 0x00 to 0xEF
1809 	 */
1810 	for (cis_id = 0x00; cis_id < 0xf0 &&
1811 	     aux_num_cis < pdu->num_cis; cis_id++) {
1812 		struct hci_cis_params *cis;
1813 
1814 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1815 		if (!conn)
1816 			continue;
1817 
1818 		qos = &conn->iso_qos;
1819 
1820 		cis = &pdu->cis[aux_num_cis++];
1821 		cis->cis_id = cis_id;
1822 		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1823 		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1824 		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1825 			      qos->ucast.in.phy;
1826 		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1827 			      qos->ucast.out.phy;
1828 		cis->c_rtn  = qos->ucast.out.rtn;
1829 		cis->p_rtn  = qos->ucast.in.rtn;
1830 	}
1831 	pdu->num_cis = aux_num_cis;
1832 
1833 	if (!pdu->num_cis)
1834 		return 0;
1835 
1836 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1837 				     struct_size(pdu, cis, pdu->num_cis),
1838 				     pdu, HCI_CMD_TIMEOUT);
1839 }
1840 
1841 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1842 {
1843 	struct hci_dev *hdev = conn->hdev;
1844 	struct iso_list_data data;
1845 
1846 	memset(&data, 0, sizeof(data));
1847 
1848 	/* Allocate first still reconfigurable CIG if not set */
1849 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1850 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1851 			data.count = 0;
1852 
1853 			hci_conn_hash_list_state(hdev, find_cis, CIS_LINK,
1854 						 BT_CONNECT, &data);
1855 			if (data.count)
1856 				continue;
1857 
1858 			hci_conn_hash_list_state(hdev, find_cis, CIS_LINK,
1859 						 BT_CONNECTED, &data);
1860 			if (!data.count)
1861 				break;
1862 		}
1863 
1864 		if (data.cig == 0xf0)
1865 			return false;
1866 
1867 		/* Update CIG */
1868 		qos->ucast.cig = data.cig;
1869 	}
1870 
1871 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1872 		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1873 					     qos->ucast.cis))
1874 			return false;
1875 		goto done;
1876 	}
1877 
1878 	/* Allocate first available CIS if not set */
1879 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1880 	     data.cis++) {
1881 		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1882 					      data.cis)) {
1883 			/* Update CIS */
1884 			qos->ucast.cis = data.cis;
1885 			break;
1886 		}
1887 	}
1888 
1889 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1890 		return false;
1891 
1892 done:
1893 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1894 			       UINT_PTR(qos->ucast.cig), NULL) < 0)
1895 		return false;
1896 
1897 	return true;
1898 }
1899 
1900 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1901 			      __u8 dst_type, struct bt_iso_qos *qos)
1902 {
1903 	struct hci_conn *cis;
1904 
1905 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1906 				       qos->ucast.cis);
1907 	if (!cis) {
1908 		cis = hci_conn_add_unset(hdev, CIS_LINK, dst,
1909 					 HCI_ROLE_MASTER);
1910 		if (IS_ERR(cis))
1911 			return cis;
1912 		cis->cleanup = cis_cleanup;
1913 		cis->dst_type = dst_type;
1914 		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1915 		cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1916 	}
1917 
1918 	if (cis->state == BT_CONNECTED)
1919 		return cis;
1920 
1921 	/* Check if CIS has been set and the settings matches */
1922 	if (cis->state == BT_BOUND &&
1923 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1924 		return cis;
1925 
1926 	/* Update LINK PHYs according to QoS preference */
1927 	cis->le_tx_phy = qos->ucast.out.phy;
1928 	cis->le_rx_phy = qos->ucast.in.phy;
1929 
1930 	/* If output interval is not set use the input interval as it cannot be
1931 	 * 0x000000.
1932 	 */
1933 	if (!qos->ucast.out.interval)
1934 		qos->ucast.out.interval = qos->ucast.in.interval;
1935 
1936 	/* If input interval is not set use the output interval as it cannot be
1937 	 * 0x000000.
1938 	 */
1939 	if (!qos->ucast.in.interval)
1940 		qos->ucast.in.interval = qos->ucast.out.interval;
1941 
1942 	/* If output latency is not set use the input latency as it cannot be
1943 	 * 0x0000.
1944 	 */
1945 	if (!qos->ucast.out.latency)
1946 		qos->ucast.out.latency = qos->ucast.in.latency;
1947 
1948 	/* If input latency is not set use the output latency as it cannot be
1949 	 * 0x0000.
1950 	 */
1951 	if (!qos->ucast.in.latency)
1952 		qos->ucast.in.latency = qos->ucast.out.latency;
1953 
1954 	if (!hci_le_set_cig_params(cis, qos)) {
1955 		hci_conn_drop(cis);
1956 		return ERR_PTR(-EINVAL);
1957 	}
1958 
1959 	hci_conn_hold(cis);
1960 
1961 	cis->iso_qos = *qos;
1962 	cis->state = BT_BOUND;
1963 
1964 	return cis;
1965 }
1966 
1967 bool hci_iso_setup_path(struct hci_conn *conn)
1968 {
1969 	struct hci_dev *hdev = conn->hdev;
1970 	struct hci_cp_le_setup_iso_path cmd;
1971 
1972 	memset(&cmd, 0, sizeof(cmd));
1973 
1974 	if (conn->iso_qos.ucast.out.sdu) {
1975 		cmd.handle = cpu_to_le16(conn->handle);
1976 		cmd.direction = 0x00; /* Input (Host to Controller) */
1977 		cmd.path = 0x00; /* HCI path if enabled */
1978 		cmd.codec = 0x03; /* Transparent Data */
1979 
1980 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1981 				 &cmd) < 0)
1982 			return false;
1983 	}
1984 
1985 	if (conn->iso_qos.ucast.in.sdu) {
1986 		cmd.handle = cpu_to_le16(conn->handle);
1987 		cmd.direction = 0x01; /* Output (Controller to Host) */
1988 		cmd.path = 0x00; /* HCI path if enabled */
1989 		cmd.codec = 0x03; /* Transparent Data */
1990 
1991 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1992 				 &cmd) < 0)
1993 			return false;
1994 	}
1995 
1996 	return true;
1997 }
1998 
1999 int hci_conn_check_create_cis(struct hci_conn *conn)
2000 {
2001 	if (conn->type != CIS_LINK)
2002 		return -EINVAL;
2003 
2004 	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
2005 	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
2006 		return 1;
2007 
2008 	return 0;
2009 }
2010 
2011 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
2012 {
2013 	return hci_le_create_cis_sync(hdev);
2014 }
2015 
2016 int hci_le_create_cis_pending(struct hci_dev *hdev)
2017 {
2018 	struct hci_conn *conn;
2019 	bool pending = false;
2020 
2021 	rcu_read_lock();
2022 
2023 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2024 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
2025 			rcu_read_unlock();
2026 			return -EBUSY;
2027 		}
2028 
2029 		if (!hci_conn_check_create_cis(conn))
2030 			pending = true;
2031 	}
2032 
2033 	rcu_read_unlock();
2034 
2035 	if (!pending)
2036 		return 0;
2037 
2038 	/* Queue Create CIS */
2039 	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
2040 }
2041 
2042 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2043 			      struct bt_iso_io_qos *qos, __u8 phy)
2044 {
2045 	/* Only set MTU if PHY is enabled */
2046 	if (!qos->sdu && qos->phy)
2047 		qos->sdu = conn->mtu;
2048 
2049 	/* Use the same PHY as ACL if set to any */
2050 	if (qos->phy == BT_ISO_PHY_ANY)
2051 		qos->phy = phy;
2052 
2053 	/* Use LE ACL connection interval if not set */
2054 	if (!qos->interval)
2055 		/* ACL interval unit in 1.25 ms to us */
2056 		qos->interval = conn->le_conn_interval * 1250;
2057 
2058 	/* Use LE ACL connection latency if not set */
2059 	if (!qos->latency)
2060 		qos->latency = conn->le_conn_latency;
2061 }
2062 
2063 static int create_big_sync(struct hci_dev *hdev, void *data)
2064 {
2065 	struct hci_conn *conn = data;
2066 	struct bt_iso_qos *qos = &conn->iso_qos;
2067 	u16 interval, sync_interval = 0;
2068 	u32 flags = 0;
2069 	int err;
2070 
2071 	if (qos->bcast.out.phy == 0x02)
2072 		flags |= MGMT_ADV_FLAG_SEC_2M;
2073 
2074 	/* Align intervals */
2075 	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2076 
2077 	if (qos->bcast.bis)
2078 		sync_interval = interval * 4;
2079 
2080 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->sid,
2081 				     conn->le_per_adv_data_len,
2082 				     conn->le_per_adv_data, flags, interval,
2083 				     interval, sync_interval);
2084 	if (err)
2085 		return err;
2086 
2087 	return hci_le_create_big(conn, &conn->iso_qos);
2088 }
2089 
2090 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
2091 				    __u8 dst_type, __u8 sid,
2092 				    struct bt_iso_qos *qos)
2093 {
2094 	struct hci_conn *conn;
2095 
2096 	bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid);
2097 
2098 	conn = hci_conn_add_unset(hdev, PA_LINK, dst, HCI_ROLE_SLAVE);
2099 	if (IS_ERR(conn))
2100 		return conn;
2101 
2102 	conn->iso_qos = *qos;
2103 	conn->dst_type = dst_type;
2104 	conn->sid = sid;
2105 	conn->state = BT_LISTEN;
2106 	conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10);
2107 
2108 	hci_conn_hold(conn);
2109 
2110 	hci_connect_pa_sync(hdev, conn);
2111 
2112 	return conn;
2113 }
2114 
2115 int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2116 			     struct bt_iso_qos *qos, __u16 sync_handle,
2117 			     __u8 num_bis, __u8 bis[])
2118 {
2119 	int err;
2120 
2121 	if (num_bis < 0x01 || num_bis > ISO_MAX_NUM_BIS)
2122 		return -EINVAL;
2123 
2124 	err = qos_set_big(hdev, qos);
2125 	if (err)
2126 		return err;
2127 
2128 	if (hcon) {
2129 		/* Update hcon QoS */
2130 		hcon->iso_qos = *qos;
2131 
2132 		hcon->num_bis = num_bis;
2133 		memcpy(hcon->bis, bis, num_bis);
2134 		hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10);
2135 	}
2136 
2137 	return hci_connect_big_sync(hdev, hcon);
2138 }
2139 
2140 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2141 {
2142 	struct hci_conn *conn = data;
2143 
2144 	bt_dev_dbg(hdev, "conn %p", conn);
2145 
2146 	if (err) {
2147 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2148 		hci_connect_cfm(conn, err);
2149 		hci_conn_del(conn);
2150 	}
2151 }
2152 
2153 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid,
2154 			      struct bt_iso_qos *qos,
2155 			      __u8 base_len, __u8 *base)
2156 {
2157 	struct hci_conn *conn;
2158 	struct hci_conn *parent;
2159 	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2160 	struct hci_link *link;
2161 
2162 	/* Look for any BIS that is open for rebinding */
2163 	conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN,
2164 					      HCI_ROLE_MASTER);
2165 	if (conn) {
2166 		memcpy(qos, &conn->iso_qos, sizeof(*qos));
2167 		conn->state = BT_CONNECTED;
2168 		return conn;
2169 	}
2170 
2171 	if (base_len && base)
2172 		base_len = eir_append_service_data(eir, 0,  0x1851,
2173 						   base, base_len);
2174 
2175 	/* We need hci_conn object using the BDADDR_ANY as dst */
2176 	conn = hci_add_bis(hdev, dst, sid, qos, base_len, eir);
2177 	if (IS_ERR(conn))
2178 		return conn;
2179 
2180 	/* Update LINK PHYs according to QoS preference */
2181 	conn->le_tx_phy = qos->bcast.out.phy;
2182 	conn->le_tx_phy = qos->bcast.out.phy;
2183 
2184 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2185 	if (base_len && base) {
2186 		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2187 		conn->le_per_adv_data_len = base_len;
2188 	}
2189 
2190 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2191 			  conn->le_tx_phy ? conn->le_tx_phy :
2192 			  hdev->le_tx_def_phys);
2193 
2194 	conn->iso_qos = *qos;
2195 	conn->state = BT_BOUND;
2196 
2197 	/* Link BISes together */
2198 	parent = hci_conn_hash_lookup_big(hdev,
2199 					  conn->iso_qos.bcast.big);
2200 	if (parent && parent != conn) {
2201 		link = hci_conn_link(parent, conn);
2202 		hci_conn_drop(conn);
2203 		if (!link)
2204 			return ERR_PTR(-ENOLINK);
2205 	}
2206 
2207 	return conn;
2208 }
2209 
2210 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2211 {
2212 	struct iso_list_data *d = data;
2213 
2214 	/* Skip if not broadcast/ANY address */
2215 	if (bacmp(&conn->dst, BDADDR_ANY))
2216 		return;
2217 
2218 	if (d->big != conn->iso_qos.bcast.big ||
2219 	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2220 	    d->bis != conn->iso_qos.bcast.bis)
2221 		return;
2222 
2223 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2224 }
2225 
2226 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2227 				 __u8 dst_type, __u8 sid,
2228 				 struct bt_iso_qos *qos,
2229 				 __u8 base_len, __u8 *base)
2230 {
2231 	struct hci_conn *conn;
2232 	int err;
2233 	struct iso_list_data data;
2234 
2235 	conn = hci_bind_bis(hdev, dst, sid, qos, base_len, base);
2236 	if (IS_ERR(conn))
2237 		return conn;
2238 
2239 	if (conn->state == BT_CONNECTED)
2240 		return conn;
2241 
2242 	/* Check if SID needs to be allocated then search for the first
2243 	 * available.
2244 	 */
2245 	if (conn->sid == HCI_SID_INVALID) {
2246 		u8 sid;
2247 
2248 		for (sid = 0; sid <= 0x0f; sid++) {
2249 			if (!hci_find_adv_sid(hdev, sid)) {
2250 				conn->sid = sid;
2251 				break;
2252 			}
2253 		}
2254 	}
2255 
2256 	data.big = qos->bcast.big;
2257 	data.bis = qos->bcast.bis;
2258 
2259 	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2260 	 * the start periodic advertising and create BIG commands have
2261 	 * been queued
2262 	 */
2263 	hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK,
2264 				 BT_BOUND, &data);
2265 
2266 	/* Queue start periodic advertising and create BIG */
2267 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2268 				 create_big_complete);
2269 	if (err < 0) {
2270 		hci_conn_drop(conn);
2271 		return ERR_PTR(err);
2272 	}
2273 
2274 	return conn;
2275 }
2276 
2277 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2278 				 __u8 dst_type, struct bt_iso_qos *qos)
2279 {
2280 	struct hci_conn *le;
2281 	struct hci_conn *cis;
2282 	struct hci_link *link;
2283 
2284 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2285 		le = hci_connect_le(hdev, dst, dst_type, false,
2286 				    BT_SECURITY_LOW,
2287 				    HCI_LE_CONN_TIMEOUT,
2288 				    HCI_ROLE_SLAVE, 0, 0);
2289 	else
2290 		le = hci_connect_le_scan(hdev, dst, dst_type,
2291 					 BT_SECURITY_LOW,
2292 					 HCI_LE_CONN_TIMEOUT,
2293 					 CONN_REASON_ISO_CONNECT);
2294 	if (IS_ERR(le))
2295 		return le;
2296 
2297 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2298 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2299 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2300 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2301 
2302 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2303 	if (IS_ERR(cis)) {
2304 		hci_conn_drop(le);
2305 		return cis;
2306 	}
2307 
2308 	link = hci_conn_link(le, cis);
2309 	hci_conn_drop(cis);
2310 	if (!link) {
2311 		hci_conn_drop(le);
2312 		return ERR_PTR(-ENOLINK);
2313 	}
2314 
2315 	cis->state = BT_CONNECT;
2316 
2317 	hci_le_create_cis_pending(hdev);
2318 
2319 	return cis;
2320 }
2321 
2322 /* Check link security requirement */
2323 int hci_conn_check_link_mode(struct hci_conn *conn)
2324 {
2325 	BT_DBG("hcon %p", conn);
2326 
2327 	/* In Secure Connections Only mode, it is required that Secure
2328 	 * Connections is used and the link is encrypted with AES-CCM
2329 	 * using a P-256 authenticated combination key.
2330 	 */
2331 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2332 		if (!hci_conn_sc_enabled(conn) ||
2333 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2334 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2335 			return 0;
2336 	}
2337 
2338 	 /* AES encryption is required for Level 4:
2339 	  *
2340 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2341 	  * page 1319:
2342 	  *
2343 	  * 128-bit equivalent strength for link and encryption keys
2344 	  * required using FIPS approved algorithms (E0 not allowed,
2345 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2346 	  * not shortened)
2347 	  */
2348 	if (conn->sec_level == BT_SECURITY_FIPS &&
2349 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2350 		bt_dev_err(conn->hdev,
2351 			   "Invalid security: Missing AES-CCM usage");
2352 		return 0;
2353 	}
2354 
2355 	if (hci_conn_ssp_enabled(conn) &&
2356 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2357 		return 0;
2358 
2359 	return 1;
2360 }
2361 
2362 /* Authenticate remote device */
2363 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2364 {
2365 	BT_DBG("hcon %p", conn);
2366 
2367 	if (conn->pending_sec_level > sec_level)
2368 		sec_level = conn->pending_sec_level;
2369 
2370 	if (sec_level > conn->sec_level)
2371 		conn->pending_sec_level = sec_level;
2372 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2373 		return 1;
2374 
2375 	/* Make sure we preserve an existing MITM requirement*/
2376 	auth_type |= (conn->auth_type & 0x01);
2377 
2378 	conn->auth_type = auth_type;
2379 
2380 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2381 		struct hci_cp_auth_requested cp;
2382 
2383 		cp.handle = cpu_to_le16(conn->handle);
2384 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2385 			     sizeof(cp), &cp);
2386 
2387 		/* Set the ENCRYPT_PEND to trigger encryption after
2388 		 * authentication.
2389 		 */
2390 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2391 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2392 	}
2393 
2394 	return 0;
2395 }
2396 
2397 /* Encrypt the link */
2398 static void hci_conn_encrypt(struct hci_conn *conn)
2399 {
2400 	BT_DBG("hcon %p", conn);
2401 
2402 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2403 		struct hci_cp_set_conn_encrypt cp;
2404 		cp.handle  = cpu_to_le16(conn->handle);
2405 		cp.encrypt = 0x01;
2406 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2407 			     &cp);
2408 	}
2409 }
2410 
2411 /* Enable security */
2412 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2413 		      bool initiator)
2414 {
2415 	BT_DBG("hcon %p", conn);
2416 
2417 	if (conn->type == LE_LINK)
2418 		return smp_conn_security(conn, sec_level);
2419 
2420 	/* For sdp we don't need the link key. */
2421 	if (sec_level == BT_SECURITY_SDP)
2422 		return 1;
2423 
2424 	/* For non 2.1 devices and low security level we don't need the link
2425 	   key. */
2426 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2427 		return 1;
2428 
2429 	/* For other security levels we need the link key. */
2430 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2431 		goto auth;
2432 
2433 	switch (conn->key_type) {
2434 	case HCI_LK_AUTH_COMBINATION_P256:
2435 		/* An authenticated FIPS approved combination key has
2436 		 * sufficient security for security level 4 or lower.
2437 		 */
2438 		if (sec_level <= BT_SECURITY_FIPS)
2439 			goto encrypt;
2440 		break;
2441 	case HCI_LK_AUTH_COMBINATION_P192:
2442 		/* An authenticated combination key has sufficient security for
2443 		 * security level 3 or lower.
2444 		 */
2445 		if (sec_level <= BT_SECURITY_HIGH)
2446 			goto encrypt;
2447 		break;
2448 	case HCI_LK_UNAUTH_COMBINATION_P192:
2449 	case HCI_LK_UNAUTH_COMBINATION_P256:
2450 		/* An unauthenticated combination key has sufficient security
2451 		 * for security level 2 or lower.
2452 		 */
2453 		if (sec_level <= BT_SECURITY_MEDIUM)
2454 			goto encrypt;
2455 		break;
2456 	case HCI_LK_COMBINATION:
2457 		/* A combination key has always sufficient security for the
2458 		 * security levels 2 or lower. High security level requires the
2459 		 * combination key is generated using maximum PIN code length
2460 		 * (16). For pre 2.1 units.
2461 		 */
2462 		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2463 			goto encrypt;
2464 		break;
2465 	default:
2466 		break;
2467 	}
2468 
2469 auth:
2470 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2471 		return 0;
2472 
2473 	if (initiator)
2474 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2475 
2476 	if (!hci_conn_auth(conn, sec_level, auth_type))
2477 		return 0;
2478 
2479 encrypt:
2480 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2481 		/* Ensure that the encryption key size has been read,
2482 		 * otherwise stall the upper layer responses.
2483 		 */
2484 		if (!conn->enc_key_size)
2485 			return 0;
2486 
2487 		/* Nothing else needed, all requirements are met */
2488 		return 1;
2489 	}
2490 
2491 	hci_conn_encrypt(conn);
2492 	return 0;
2493 }
2494 EXPORT_SYMBOL(hci_conn_security);
2495 
2496 /* Check secure link requirement */
2497 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2498 {
2499 	BT_DBG("hcon %p", conn);
2500 
2501 	/* Accept if non-secure or higher security level is required */
2502 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2503 		return 1;
2504 
2505 	/* Accept if secure or higher security level is already present */
2506 	if (conn->sec_level == BT_SECURITY_HIGH ||
2507 	    conn->sec_level == BT_SECURITY_FIPS)
2508 		return 1;
2509 
2510 	/* Reject not secure link */
2511 	return 0;
2512 }
2513 EXPORT_SYMBOL(hci_conn_check_secure);
2514 
2515 /* Switch role */
2516 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2517 {
2518 	BT_DBG("hcon %p", conn);
2519 
2520 	if (role == conn->role)
2521 		return 1;
2522 
2523 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2524 		struct hci_cp_switch_role cp;
2525 		bacpy(&cp.bdaddr, &conn->dst);
2526 		cp.role = role;
2527 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2528 	}
2529 
2530 	return 0;
2531 }
2532 EXPORT_SYMBOL(hci_conn_switch_role);
2533 
2534 /* Enter active mode */
2535 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2536 {
2537 	struct hci_dev *hdev = conn->hdev;
2538 
2539 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2540 
2541 	if (conn->mode != HCI_CM_SNIFF)
2542 		goto timer;
2543 
2544 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2545 		goto timer;
2546 
2547 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2548 		struct hci_cp_exit_sniff_mode cp;
2549 		cp.handle = cpu_to_le16(conn->handle);
2550 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2551 	}
2552 
2553 timer:
2554 	if (hdev->idle_timeout > 0)
2555 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2556 				   msecs_to_jiffies(hdev->idle_timeout));
2557 }
2558 
2559 /* Drop all connection on the device */
2560 void hci_conn_hash_flush(struct hci_dev *hdev)
2561 {
2562 	struct list_head *head = &hdev->conn_hash.list;
2563 	struct hci_conn *conn;
2564 
2565 	BT_DBG("hdev %s", hdev->name);
2566 
2567 	/* We should not traverse the list here, because hci_conn_del
2568 	 * can remove extra links, which may cause the list traversal
2569 	 * to hit items that have already been released.
2570 	 */
2571 	while ((conn = list_first_entry_or_null(head,
2572 						struct hci_conn,
2573 						list)) != NULL) {
2574 		conn->state = BT_CLOSED;
2575 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2576 		hci_conn_del(conn);
2577 	}
2578 }
2579 
2580 static u32 get_link_mode(struct hci_conn *conn)
2581 {
2582 	u32 link_mode = 0;
2583 
2584 	if (conn->role == HCI_ROLE_MASTER)
2585 		link_mode |= HCI_LM_MASTER;
2586 
2587 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2588 		link_mode |= HCI_LM_ENCRYPT;
2589 
2590 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2591 		link_mode |= HCI_LM_AUTH;
2592 
2593 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2594 		link_mode |= HCI_LM_SECURE;
2595 
2596 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2597 		link_mode |= HCI_LM_FIPS;
2598 
2599 	return link_mode;
2600 }
2601 
2602 int hci_get_conn_list(void __user *arg)
2603 {
2604 	struct hci_conn *c;
2605 	struct hci_conn_list_req req, *cl;
2606 	struct hci_conn_info *ci;
2607 	struct hci_dev *hdev;
2608 	int n = 0, size, err;
2609 
2610 	if (copy_from_user(&req, arg, sizeof(req)))
2611 		return -EFAULT;
2612 
2613 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2614 		return -EINVAL;
2615 
2616 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2617 
2618 	cl = kmalloc(size, GFP_KERNEL);
2619 	if (!cl)
2620 		return -ENOMEM;
2621 
2622 	hdev = hci_dev_get(req.dev_id);
2623 	if (!hdev) {
2624 		kfree(cl);
2625 		return -ENODEV;
2626 	}
2627 
2628 	ci = cl->conn_info;
2629 
2630 	hci_dev_lock(hdev);
2631 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2632 		bacpy(&(ci + n)->bdaddr, &c->dst);
2633 		(ci + n)->handle = c->handle;
2634 		(ci + n)->type  = c->type;
2635 		(ci + n)->out   = c->out;
2636 		(ci + n)->state = c->state;
2637 		(ci + n)->link_mode = get_link_mode(c);
2638 		if (++n >= req.conn_num)
2639 			break;
2640 	}
2641 	hci_dev_unlock(hdev);
2642 
2643 	cl->dev_id = hdev->id;
2644 	cl->conn_num = n;
2645 	size = sizeof(req) + n * sizeof(*ci);
2646 
2647 	hci_dev_put(hdev);
2648 
2649 	err = copy_to_user(arg, cl, size);
2650 	kfree(cl);
2651 
2652 	return err ? -EFAULT : 0;
2653 }
2654 
2655 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2656 {
2657 	struct hci_conn_info_req req;
2658 	struct hci_conn_info ci;
2659 	struct hci_conn *conn;
2660 	char __user *ptr = arg + sizeof(req);
2661 
2662 	if (copy_from_user(&req, arg, sizeof(req)))
2663 		return -EFAULT;
2664 
2665 	hci_dev_lock(hdev);
2666 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2667 	if (conn) {
2668 		bacpy(&ci.bdaddr, &conn->dst);
2669 		ci.handle = conn->handle;
2670 		ci.type  = conn->type;
2671 		ci.out   = conn->out;
2672 		ci.state = conn->state;
2673 		ci.link_mode = get_link_mode(conn);
2674 	}
2675 	hci_dev_unlock(hdev);
2676 
2677 	if (!conn)
2678 		return -ENOENT;
2679 
2680 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2681 }
2682 
2683 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2684 {
2685 	struct hci_auth_info_req req;
2686 	struct hci_conn *conn;
2687 
2688 	if (copy_from_user(&req, arg, sizeof(req)))
2689 		return -EFAULT;
2690 
2691 	hci_dev_lock(hdev);
2692 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2693 	if (conn)
2694 		req.type = conn->auth_type;
2695 	hci_dev_unlock(hdev);
2696 
2697 	if (!conn)
2698 		return -ENOENT;
2699 
2700 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2701 }
2702 
2703 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2704 {
2705 	struct hci_dev *hdev = conn->hdev;
2706 	struct hci_chan *chan;
2707 
2708 	BT_DBG("%s hcon %p", hdev->name, conn);
2709 
2710 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2711 		BT_DBG("Refusing to create new hci_chan");
2712 		return NULL;
2713 	}
2714 
2715 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2716 	if (!chan)
2717 		return NULL;
2718 
2719 	chan->conn = hci_conn_get(conn);
2720 	skb_queue_head_init(&chan->data_q);
2721 	chan->state = BT_CONNECTED;
2722 
2723 	list_add_rcu(&chan->list, &conn->chan_list);
2724 
2725 	return chan;
2726 }
2727 
2728 void hci_chan_del(struct hci_chan *chan)
2729 {
2730 	struct hci_conn *conn = chan->conn;
2731 	struct hci_dev *hdev = conn->hdev;
2732 
2733 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2734 
2735 	list_del_rcu(&chan->list);
2736 
2737 	synchronize_rcu();
2738 
2739 	/* Prevent new hci_chan's to be created for this hci_conn */
2740 	set_bit(HCI_CONN_DROP, &conn->flags);
2741 
2742 	hci_conn_put(conn);
2743 
2744 	skb_queue_purge(&chan->data_q);
2745 	kfree(chan);
2746 }
2747 
2748 void hci_chan_list_flush(struct hci_conn *conn)
2749 {
2750 	struct hci_chan *chan, *n;
2751 
2752 	BT_DBG("hcon %p", conn);
2753 
2754 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2755 		hci_chan_del(chan);
2756 }
2757 
2758 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2759 						 __u16 handle)
2760 {
2761 	struct hci_chan *hchan;
2762 
2763 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2764 		if (hchan->handle == handle)
2765 			return hchan;
2766 	}
2767 
2768 	return NULL;
2769 }
2770 
2771 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2772 {
2773 	struct hci_conn_hash *h = &hdev->conn_hash;
2774 	struct hci_conn *hcon;
2775 	struct hci_chan *hchan = NULL;
2776 
2777 	rcu_read_lock();
2778 
2779 	list_for_each_entry_rcu(hcon, &h->list, list) {
2780 		hchan = __hci_chan_lookup_handle(hcon, handle);
2781 		if (hchan)
2782 			break;
2783 	}
2784 
2785 	rcu_read_unlock();
2786 
2787 	return hchan;
2788 }
2789 
2790 u32 hci_conn_get_phy(struct hci_conn *conn)
2791 {
2792 	u32 phys = 0;
2793 
2794 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2795 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2796 	 * CPB logical transport types.
2797 	 */
2798 	switch (conn->type) {
2799 	case SCO_LINK:
2800 		/* SCO logical transport (1 Mb/s):
2801 		 * HV1, HV2, HV3 and DV.
2802 		 */
2803 		phys |= BT_PHY_BR_1M_1SLOT;
2804 
2805 		break;
2806 
2807 	case ACL_LINK:
2808 		/* ACL logical transport (1 Mb/s) ptt=0:
2809 		 * DH1, DM3, DH3, DM5 and DH5.
2810 		 */
2811 		phys |= BT_PHY_BR_1M_1SLOT;
2812 
2813 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2814 			phys |= BT_PHY_BR_1M_3SLOT;
2815 
2816 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2817 			phys |= BT_PHY_BR_1M_5SLOT;
2818 
2819 		/* ACL logical transport (2 Mb/s) ptt=1:
2820 		 * 2-DH1, 2-DH3 and 2-DH5.
2821 		 */
2822 		if (!(conn->pkt_type & HCI_2DH1))
2823 			phys |= BT_PHY_EDR_2M_1SLOT;
2824 
2825 		if (!(conn->pkt_type & HCI_2DH3))
2826 			phys |= BT_PHY_EDR_2M_3SLOT;
2827 
2828 		if (!(conn->pkt_type & HCI_2DH5))
2829 			phys |= BT_PHY_EDR_2M_5SLOT;
2830 
2831 		/* ACL logical transport (3 Mb/s) ptt=1:
2832 		 * 3-DH1, 3-DH3 and 3-DH5.
2833 		 */
2834 		if (!(conn->pkt_type & HCI_3DH1))
2835 			phys |= BT_PHY_EDR_3M_1SLOT;
2836 
2837 		if (!(conn->pkt_type & HCI_3DH3))
2838 			phys |= BT_PHY_EDR_3M_3SLOT;
2839 
2840 		if (!(conn->pkt_type & HCI_3DH5))
2841 			phys |= BT_PHY_EDR_3M_5SLOT;
2842 
2843 		break;
2844 
2845 	case ESCO_LINK:
2846 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2847 		phys |= BT_PHY_BR_1M_1SLOT;
2848 
2849 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2850 			phys |= BT_PHY_BR_1M_3SLOT;
2851 
2852 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2853 		if (!(conn->pkt_type & ESCO_2EV3))
2854 			phys |= BT_PHY_EDR_2M_1SLOT;
2855 
2856 		if (!(conn->pkt_type & ESCO_2EV5))
2857 			phys |= BT_PHY_EDR_2M_3SLOT;
2858 
2859 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2860 		if (!(conn->pkt_type & ESCO_3EV3))
2861 			phys |= BT_PHY_EDR_3M_1SLOT;
2862 
2863 		if (!(conn->pkt_type & ESCO_3EV5))
2864 			phys |= BT_PHY_EDR_3M_3SLOT;
2865 
2866 		break;
2867 
2868 	case LE_LINK:
2869 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2870 			phys |= BT_PHY_LE_1M_TX;
2871 
2872 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2873 			phys |= BT_PHY_LE_1M_RX;
2874 
2875 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2876 			phys |= BT_PHY_LE_2M_TX;
2877 
2878 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2879 			phys |= BT_PHY_LE_2M_RX;
2880 
2881 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2882 			phys |= BT_PHY_LE_CODED_TX;
2883 
2884 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2885 			phys |= BT_PHY_LE_CODED_RX;
2886 
2887 		break;
2888 	}
2889 
2890 	return phys;
2891 }
2892 
2893 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2894 {
2895 	struct hci_conn *conn = data;
2896 
2897 	if (!hci_conn_valid(hdev, conn))
2898 		return -ECANCELED;
2899 
2900 	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2901 }
2902 
2903 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2904 {
2905 	struct hci_dev *hdev = conn->hdev;
2906 
2907 	/* If abort_reason has already been set it means the connection is
2908 	 * already being aborted so don't attempt to overwrite it.
2909 	 */
2910 	if (conn->abort_reason)
2911 		return 0;
2912 
2913 	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2914 
2915 	conn->abort_reason = reason;
2916 
2917 	/* If the connection is pending check the command opcode since that
2918 	 * might be blocking on hci_cmd_sync_work while waiting its respective
2919 	 * event so we need to hci_cmd_sync_cancel to cancel it.
2920 	 *
2921 	 * hci_connect_le serializes the connection attempts so only one
2922 	 * connection can be in BT_CONNECT at time.
2923 	 */
2924 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2925 		switch (hci_skb_event(hdev->sent_cmd)) {
2926 		case HCI_EV_CONN_COMPLETE:
2927 		case HCI_EV_LE_CONN_COMPLETE:
2928 		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2929 		case HCI_EVT_LE_CIS_ESTABLISHED:
2930 			hci_cmd_sync_cancel(hdev, ECANCELED);
2931 			break;
2932 		}
2933 	/* Cancel connect attempt if still queued/pending */
2934 	} else if (!hci_cancel_connect_sync(hdev, conn)) {
2935 		return 0;
2936 	}
2937 
2938 	/* Run immediately if on cmd_sync_work since this may be called
2939 	 * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
2940 	 * already queue its callback on cmd_sync_work.
2941 	 */
2942 	return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
2943 }
2944 
2945 void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset,
2946 			    const struct sockcm_cookie *sockc)
2947 {
2948 	struct sock *sk = skb ? skb->sk : NULL;
2949 	int key;
2950 
2951 	/* This shall be called on a single skb of those generated by user
2952 	 * sendmsg(), and only when the sendmsg() does not return error to
2953 	 * user. This is required for keeping the tskey that increments here in
2954 	 * sync with possible sendmsg() counting by user.
2955 	 *
2956 	 * Stream sockets shall set key_offset to sendmsg() length in bytes
2957 	 * and call with the last fragment, others to 1 and first fragment.
2958 	 */
2959 
2960 	if (!skb || !sockc || !sk || !key_offset)
2961 		return;
2962 
2963 	sock_tx_timestamp(sk, sockc, &skb_shinfo(skb)->tx_flags);
2964 
2965 	if (sk->sk_type == SOCK_STREAM)
2966 		key = atomic_add_return(key_offset, &sk->sk_tskey);
2967 
2968 	if (sockc->tsflags & SOF_TIMESTAMPING_OPT_ID &&
2969 	    sockc->tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) {
2970 		if (sockc->tsflags & SOCKCM_FLAG_TS_OPT_ID) {
2971 			skb_shinfo(skb)->tskey = sockc->ts_opt_id;
2972 		} else {
2973 			if (sk->sk_type != SOCK_STREAM)
2974 				key = atomic_inc_return(&sk->sk_tskey);
2975 			skb_shinfo(skb)->tskey = key - 1;
2976 		}
2977 	}
2978 }
2979 
2980 void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb)
2981 {
2982 	struct tx_queue *comp = &conn->tx_q;
2983 	bool track = false;
2984 
2985 	/* Emit SND now, ie. just before sending to driver */
2986 	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
2987 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SND);
2988 
2989 	/* COMPLETION tstamp is emitted for tracked skb later in Number of
2990 	 * Completed Packets event. Available only for flow controlled cases.
2991 	 *
2992 	 * TODO: SCO support without flowctl (needs to be done in drivers)
2993 	 */
2994 	switch (conn->type) {
2995 	case CIS_LINK:
2996 	case BIS_LINK:
2997 	case PA_LINK:
2998 	case ACL_LINK:
2999 	case LE_LINK:
3000 		break;
3001 	case SCO_LINK:
3002 	case ESCO_LINK:
3003 		if (!hci_dev_test_flag(conn->hdev, HCI_SCO_FLOWCTL))
3004 			return;
3005 		break;
3006 	default:
3007 		return;
3008 	}
3009 
3010 	if (skb->sk && (skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP))
3011 		track = true;
3012 
3013 	/* If nothing is tracked, just count extra skbs at the queue head */
3014 	if (!track && !comp->tracked) {
3015 		comp->extra++;
3016 		return;
3017 	}
3018 
3019 	if (track) {
3020 		skb = skb_clone_sk(skb);
3021 		if (!skb)
3022 			goto count_only;
3023 
3024 		comp->tracked++;
3025 	} else {
3026 		skb = skb_clone(skb, GFP_KERNEL);
3027 		if (!skb)
3028 			goto count_only;
3029 	}
3030 
3031 	skb_queue_tail(&comp->queue, skb);
3032 	return;
3033 
3034 count_only:
3035 	/* Stop tracking skbs, and only count. This will not emit timestamps for
3036 	 * the packets, but if we get here something is more seriously wrong.
3037 	 */
3038 	comp->tracked = 0;
3039 	comp->extra += skb_queue_len(&comp->queue) + 1;
3040 	skb_queue_purge(&comp->queue);
3041 }
3042 
3043 void hci_conn_tx_dequeue(struct hci_conn *conn)
3044 {
3045 	struct tx_queue *comp = &conn->tx_q;
3046 	struct sk_buff *skb;
3047 
3048 	/* If there are tracked skbs, the counted extra go before dequeuing real
3049 	 * skbs, to keep ordering. When nothing is tracked, the ordering doesn't
3050 	 * matter so dequeue real skbs first to get rid of them ASAP.
3051 	 */
3052 	if (comp->extra && (comp->tracked || skb_queue_empty(&comp->queue))) {
3053 		comp->extra--;
3054 		return;
3055 	}
3056 
3057 	skb = skb_dequeue(&comp->queue);
3058 	if (!skb)
3059 		return;
3060 
3061 	if (skb->sk) {
3062 		comp->tracked--;
3063 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk,
3064 				SCM_TSTAMP_COMPLETION);
3065 	}
3066 
3067 	kfree_skb(skb);
3068 }
3069 
3070 u8 *hci_conn_key_enc_size(struct hci_conn *conn)
3071 {
3072 	if (conn->type == ACL_LINK) {
3073 		struct link_key *key;
3074 
3075 		key = hci_find_link_key(conn->hdev, &conn->dst);
3076 		if (!key)
3077 			return NULL;
3078 
3079 		return &key->pin_len;
3080 	} else if (conn->type == LE_LINK) {
3081 		struct smp_ltk *ltk;
3082 
3083 		ltk = hci_find_ltk(conn->hdev, &conn->dst, conn->dst_type,
3084 				   conn->role);
3085 		if (!ltk)
3086 			return NULL;
3087 
3088 		return &ltk->enc_size;
3089 	}
3090 
3091 	return NULL;
3092 }
3093 
3094 int hci_ethtool_ts_info(unsigned int index, int sk_proto,
3095 			struct kernel_ethtool_ts_info *info)
3096 {
3097 	struct hci_dev *hdev;
3098 
3099 	hdev = hci_dev_get(index);
3100 	if (!hdev)
3101 		return -ENODEV;
3102 
3103 	info->so_timestamping =
3104 		SOF_TIMESTAMPING_RX_SOFTWARE |
3105 		SOF_TIMESTAMPING_SOFTWARE;
3106 	info->phc_index = -1;
3107 	info->tx_types = BIT(HWTSTAMP_TX_OFF);
3108 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3109 
3110 	switch (sk_proto) {
3111 	case BTPROTO_ISO:
3112 	case BTPROTO_L2CAP:
3113 		info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
3114 		info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION;
3115 		break;
3116 	case BTPROTO_SCO:
3117 		info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
3118 		if (hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3119 			info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION;
3120 		break;
3121 	}
3122 
3123 	hci_dev_put(hdev);
3124 	return 0;
3125 }
3126