xref: /linux/net/bluetooth/l2cap_core.c (revision 409c188c57cdb5cb1dfcac79e72b5169f0463fe4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 {
67 	if (link_type == LE_LINK) {
68 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 {
79 	return bdaddr_type(hcon->type, hcon->src_type);
80 }
81 
82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 {
84 	return bdaddr_type(hcon->type, hcon->dst_type);
85 }
86 
87 /* ---- L2CAP channels ---- */
88 
89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
90 						   u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 }
100 
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
102 						   u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns locked channel. */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
116 						 u16 cid)
117 {
118 	struct l2cap_chan *c;
119 
120 	mutex_lock(&conn->chan_lock);
121 	c = __l2cap_get_chan_by_scid(conn, cid);
122 	if (c)
123 		l2cap_chan_lock(c);
124 	mutex_unlock(&conn->chan_lock);
125 
126 	return c;
127 }
128 
129 /* Find channel with given DCID.
130  * Returns locked channel.
131  */
132 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
133 						 u16 cid)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_dcid(conn, cid);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
147 						    u8 ident)
148 {
149 	struct l2cap_chan *c;
150 
151 	list_for_each_entry(c, &conn->chan_l, list) {
152 		if (c->ident == ident)
153 			return c;
154 	}
155 	return NULL;
156 }
157 
158 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						  u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	mutex_lock(&conn->chan_lock);
164 	c = __l2cap_get_chan_by_ident(conn, ident);
165 	if (c)
166 		l2cap_chan_lock(c);
167 	mutex_unlock(&conn->chan_lock);
168 
169 	return c;
170 }
171 
172 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
173 						      u8 src_type)
174 {
175 	struct l2cap_chan *c;
176 
177 	list_for_each_entry(c, &chan_list, global_l) {
178 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
179 			continue;
180 
181 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
182 			continue;
183 
184 		if (c->sport == psm && !bacmp(&c->src, src))
185 			return c;
186 	}
187 	return NULL;
188 }
189 
190 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
191 {
192 	int err;
193 
194 	write_lock(&chan_list_lock);
195 
196 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
197 		err = -EADDRINUSE;
198 		goto done;
199 	}
200 
201 	if (psm) {
202 		chan->psm = psm;
203 		chan->sport = psm;
204 		err = 0;
205 	} else {
206 		u16 p, start, end, incr;
207 
208 		if (chan->src_type == BDADDR_BREDR) {
209 			start = L2CAP_PSM_DYN_START;
210 			end = L2CAP_PSM_AUTO_END;
211 			incr = 2;
212 		} else {
213 			start = L2CAP_PSM_LE_DYN_START;
214 			end = L2CAP_PSM_LE_DYN_END;
215 			incr = 1;
216 		}
217 
218 		err = -EINVAL;
219 		for (p = start; p <= end; p += incr)
220 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
221 							 chan->src_type)) {
222 				chan->psm   = cpu_to_le16(p);
223 				chan->sport = cpu_to_le16(p);
224 				err = 0;
225 				break;
226 			}
227 	}
228 
229 done:
230 	write_unlock(&chan_list_lock);
231 	return err;
232 }
233 EXPORT_SYMBOL_GPL(l2cap_add_psm);
234 
235 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
236 {
237 	write_lock(&chan_list_lock);
238 
239 	/* Override the defaults (which are for conn-oriented) */
240 	chan->omtu = L2CAP_DEFAULT_MTU;
241 	chan->chan_type = L2CAP_CHAN_FIXED;
242 
243 	chan->scid = scid;
244 
245 	write_unlock(&chan_list_lock);
246 
247 	return 0;
248 }
249 
250 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
251 {
252 	u16 cid, dyn_end;
253 
254 	if (conn->hcon->type == LE_LINK)
255 		dyn_end = L2CAP_CID_LE_DYN_END;
256 	else
257 		dyn_end = L2CAP_CID_DYN_END;
258 
259 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
260 		if (!__l2cap_get_chan_by_scid(conn, cid))
261 			return cid;
262 	}
263 
264 	return 0;
265 }
266 
267 static void l2cap_state_change(struct l2cap_chan *chan, int state)
268 {
269 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
270 	       state_to_string(state));
271 
272 	chan->state = state;
273 	chan->ops->state_change(chan, state, 0);
274 }
275 
276 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
277 						int state, int err)
278 {
279 	chan->state = state;
280 	chan->ops->state_change(chan, chan->state, err);
281 }
282 
283 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
284 {
285 	chan->ops->state_change(chan, chan->state, err);
286 }
287 
288 static void __set_retrans_timer(struct l2cap_chan *chan)
289 {
290 	if (!delayed_work_pending(&chan->monitor_timer) &&
291 	    chan->retrans_timeout) {
292 		l2cap_set_timer(chan, &chan->retrans_timer,
293 				msecs_to_jiffies(chan->retrans_timeout));
294 	}
295 }
296 
297 static void __set_monitor_timer(struct l2cap_chan *chan)
298 {
299 	__clear_retrans_timer(chan);
300 	if (chan->monitor_timeout) {
301 		l2cap_set_timer(chan, &chan->monitor_timer,
302 				msecs_to_jiffies(chan->monitor_timeout));
303 	}
304 }
305 
306 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
307 					       u16 seq)
308 {
309 	struct sk_buff *skb;
310 
311 	skb_queue_walk(head, skb) {
312 		if (bt_cb(skb)->l2cap.txseq == seq)
313 			return skb;
314 	}
315 
316 	return NULL;
317 }
318 
319 /* ---- L2CAP sequence number lists ---- */
320 
321 /* For ERTM, ordered lists of sequence numbers must be tracked for
322  * SREJ requests that are received and for frames that are to be
323  * retransmitted. These seq_list functions implement a singly-linked
324  * list in an array, where membership in the list can also be checked
325  * in constant time. Items can also be added to the tail of the list
326  * and removed from the head in constant time, without further memory
327  * allocs or frees.
328  */
329 
330 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
331 {
332 	size_t alloc_size, i;
333 
334 	/* Allocated size is a power of 2 to map sequence numbers
335 	 * (which may be up to 14 bits) in to a smaller array that is
336 	 * sized for the negotiated ERTM transmit windows.
337 	 */
338 	alloc_size = roundup_pow_of_two(size);
339 
340 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
341 	if (!seq_list->list)
342 		return -ENOMEM;
343 
344 	seq_list->mask = alloc_size - 1;
345 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 	for (i = 0; i < alloc_size; i++)
348 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
349 
350 	return 0;
351 }
352 
353 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
354 {
355 	kfree(seq_list->list);
356 }
357 
358 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
359 					   u16 seq)
360 {
361 	/* Constant-time check for list membership */
362 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
363 }
364 
365 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
366 {
367 	u16 seq = seq_list->head;
368 	u16 mask = seq_list->mask;
369 
370 	seq_list->head = seq_list->list[seq & mask];
371 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
372 
373 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
374 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 	}
377 
378 	return seq;
379 }
380 
381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
382 {
383 	u16 i;
384 
385 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
386 		return;
387 
388 	for (i = 0; i <= seq_list->mask; i++)
389 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
390 
391 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
393 }
394 
395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
396 {
397 	u16 mask = seq_list->mask;
398 
399 	/* All appends happen in constant time */
400 
401 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 		seq_list->head = seq;
406 	else
407 		seq_list->list[seq_list->tail & mask] = seq;
408 
409 	seq_list->tail = seq;
410 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
411 }
412 
413 static void l2cap_chan_timeout(struct work_struct *work)
414 {
415 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
416 					       chan_timer.work);
417 	struct l2cap_conn *conn = chan->conn;
418 	int reason;
419 
420 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421 
422 	mutex_lock(&conn->chan_lock);
423 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
424 	 * this work. No need to call l2cap_chan_hold(chan) here again.
425 	 */
426 	l2cap_chan_lock(chan);
427 
428 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
429 		reason = ECONNREFUSED;
430 	else if (chan->state == BT_CONNECT &&
431 		 chan->sec_level != BT_SECURITY_SDP)
432 		reason = ECONNREFUSED;
433 	else
434 		reason = ETIMEDOUT;
435 
436 	l2cap_chan_close(chan, reason);
437 
438 	chan->ops->close(chan);
439 
440 	l2cap_chan_unlock(chan);
441 	l2cap_chan_put(chan);
442 
443 	mutex_unlock(&conn->chan_lock);
444 }
445 
446 struct l2cap_chan *l2cap_chan_create(void)
447 {
448 	struct l2cap_chan *chan;
449 
450 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
451 	if (!chan)
452 		return NULL;
453 
454 	skb_queue_head_init(&chan->tx_q);
455 	skb_queue_head_init(&chan->srej_q);
456 	mutex_init(&chan->lock);
457 
458 	/* Set default lock nesting level */
459 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
460 
461 	write_lock(&chan_list_lock);
462 	list_add(&chan->global_l, &chan_list);
463 	write_unlock(&chan_list_lock);
464 
465 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
466 
467 	chan->state = BT_OPEN;
468 
469 	kref_init(&chan->kref);
470 
471 	/* This flag is cleared in l2cap_chan_ready() */
472 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
473 
474 	BT_DBG("chan %p", chan);
475 
476 	return chan;
477 }
478 EXPORT_SYMBOL_GPL(l2cap_chan_create);
479 
480 static void l2cap_chan_destroy(struct kref *kref)
481 {
482 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
483 
484 	BT_DBG("chan %p", chan);
485 
486 	write_lock(&chan_list_lock);
487 	list_del(&chan->global_l);
488 	write_unlock(&chan_list_lock);
489 
490 	kfree(chan);
491 }
492 
493 void l2cap_chan_hold(struct l2cap_chan *c)
494 {
495 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
496 
497 	kref_get(&c->kref);
498 }
499 
500 void l2cap_chan_put(struct l2cap_chan *c)
501 {
502 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
503 
504 	kref_put(&c->kref, l2cap_chan_destroy);
505 }
506 EXPORT_SYMBOL_GPL(l2cap_chan_put);
507 
508 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
509 {
510 	chan->fcs  = L2CAP_FCS_CRC16;
511 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
512 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
513 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
514 	chan->remote_max_tx = chan->max_tx;
515 	chan->remote_tx_win = chan->tx_win;
516 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
517 	chan->sec_level = BT_SECURITY_LOW;
518 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
519 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
520 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
521 
522 	chan->conf_state = 0;
523 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
524 
525 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
526 }
527 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
528 
529 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
530 {
531 	chan->sdu = NULL;
532 	chan->sdu_last_frag = NULL;
533 	chan->sdu_len = 0;
534 	chan->tx_credits = tx_credits;
535 	/* Derive MPS from connection MTU to stop HCI fragmentation */
536 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
537 	/* Give enough credits for a full packet */
538 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
539 
540 	skb_queue_head_init(&chan->tx_q);
541 }
542 
543 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
544 {
545 	l2cap_le_flowctl_init(chan, tx_credits);
546 
547 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
548 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
549 		chan->mps = L2CAP_ECRED_MIN_MPS;
550 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
551 	}
552 }
553 
554 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 {
556 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
557 	       __le16_to_cpu(chan->psm), chan->dcid);
558 
559 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
560 
561 	chan->conn = conn;
562 
563 	switch (chan->chan_type) {
564 	case L2CAP_CHAN_CONN_ORIENTED:
565 		/* Alloc CID for connection-oriented socket */
566 		chan->scid = l2cap_alloc_cid(conn);
567 		if (conn->hcon->type == ACL_LINK)
568 			chan->omtu = L2CAP_DEFAULT_MTU;
569 		break;
570 
571 	case L2CAP_CHAN_CONN_LESS:
572 		/* Connectionless socket */
573 		chan->scid = L2CAP_CID_CONN_LESS;
574 		chan->dcid = L2CAP_CID_CONN_LESS;
575 		chan->omtu = L2CAP_DEFAULT_MTU;
576 		break;
577 
578 	case L2CAP_CHAN_FIXED:
579 		/* Caller will set CID and CID specific MTU values */
580 		break;
581 
582 	default:
583 		/* Raw socket can send/recv signalling messages only */
584 		chan->scid = L2CAP_CID_SIGNALING;
585 		chan->dcid = L2CAP_CID_SIGNALING;
586 		chan->omtu = L2CAP_DEFAULT_MTU;
587 	}
588 
589 	chan->local_id		= L2CAP_BESTEFFORT_ID;
590 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
591 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
592 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
593 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
594 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
595 
596 	l2cap_chan_hold(chan);
597 
598 	/* Only keep a reference for fixed channels if they requested it */
599 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
600 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
601 		hci_conn_hold(conn->hcon);
602 
603 	list_add(&chan->list, &conn->chan_l);
604 }
605 
606 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
607 {
608 	mutex_lock(&conn->chan_lock);
609 	__l2cap_chan_add(conn, chan);
610 	mutex_unlock(&conn->chan_lock);
611 }
612 
613 void l2cap_chan_del(struct l2cap_chan *chan, int err)
614 {
615 	struct l2cap_conn *conn = chan->conn;
616 
617 	__clear_chan_timer(chan);
618 
619 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
620 	       state_to_string(chan->state));
621 
622 	chan->ops->teardown(chan, err);
623 
624 	if (conn) {
625 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
626 		/* Delete from channel list */
627 		list_del(&chan->list);
628 
629 		l2cap_chan_put(chan);
630 
631 		chan->conn = NULL;
632 
633 		/* Reference was only held for non-fixed channels or
634 		 * fixed channels that explicitly requested it using the
635 		 * FLAG_HOLD_HCI_CONN flag.
636 		 */
637 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
638 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
639 			hci_conn_drop(conn->hcon);
640 
641 		if (mgr && mgr->bredr_chan == chan)
642 			mgr->bredr_chan = NULL;
643 	}
644 
645 	if (chan->hs_hchan) {
646 		struct hci_chan *hs_hchan = chan->hs_hchan;
647 
648 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
649 		amp_disconnect_logical_link(hs_hchan);
650 	}
651 
652 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
653 		return;
654 
655 	switch (chan->mode) {
656 	case L2CAP_MODE_BASIC:
657 		break;
658 
659 	case L2CAP_MODE_LE_FLOWCTL:
660 	case L2CAP_MODE_EXT_FLOWCTL:
661 		skb_queue_purge(&chan->tx_q);
662 		break;
663 
664 	case L2CAP_MODE_ERTM:
665 		__clear_retrans_timer(chan);
666 		__clear_monitor_timer(chan);
667 		__clear_ack_timer(chan);
668 
669 		skb_queue_purge(&chan->srej_q);
670 
671 		l2cap_seq_list_free(&chan->srej_list);
672 		l2cap_seq_list_free(&chan->retrans_list);
673 		fallthrough;
674 
675 	case L2CAP_MODE_STREAMING:
676 		skb_queue_purge(&chan->tx_q);
677 		break;
678 	}
679 }
680 EXPORT_SYMBOL_GPL(l2cap_chan_del);
681 
682 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
683 			      void *data)
684 {
685 	struct l2cap_chan *chan;
686 
687 	list_for_each_entry(chan, &conn->chan_l, list) {
688 		func(chan, data);
689 	}
690 }
691 
692 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
693 		     void *data)
694 {
695 	if (!conn)
696 		return;
697 
698 	mutex_lock(&conn->chan_lock);
699 	__l2cap_chan_list(conn, func, data);
700 	mutex_unlock(&conn->chan_lock);
701 }
702 
703 EXPORT_SYMBOL_GPL(l2cap_chan_list);
704 
705 static void l2cap_conn_update_id_addr(struct work_struct *work)
706 {
707 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
708 					       id_addr_update_work);
709 	struct hci_conn *hcon = conn->hcon;
710 	struct l2cap_chan *chan;
711 
712 	mutex_lock(&conn->chan_lock);
713 
714 	list_for_each_entry(chan, &conn->chan_l, list) {
715 		l2cap_chan_lock(chan);
716 		bacpy(&chan->dst, &hcon->dst);
717 		chan->dst_type = bdaddr_dst_type(hcon);
718 		l2cap_chan_unlock(chan);
719 	}
720 
721 	mutex_unlock(&conn->chan_lock);
722 }
723 
724 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
725 {
726 	struct l2cap_conn *conn = chan->conn;
727 	struct l2cap_le_conn_rsp rsp;
728 	u16 result;
729 
730 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
731 		result = L2CAP_CR_LE_AUTHORIZATION;
732 	else
733 		result = L2CAP_CR_LE_BAD_PSM;
734 
735 	l2cap_state_change(chan, BT_DISCONN);
736 
737 	rsp.dcid    = cpu_to_le16(chan->scid);
738 	rsp.mtu     = cpu_to_le16(chan->imtu);
739 	rsp.mps     = cpu_to_le16(chan->mps);
740 	rsp.credits = cpu_to_le16(chan->rx_credits);
741 	rsp.result  = cpu_to_le16(result);
742 
743 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
744 		       &rsp);
745 }
746 
747 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
748 {
749 	struct l2cap_conn *conn = chan->conn;
750 	struct l2cap_ecred_conn_rsp rsp;
751 	u16 result;
752 
753 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
754 		result = L2CAP_CR_LE_AUTHORIZATION;
755 	else
756 		result = L2CAP_CR_LE_BAD_PSM;
757 
758 	l2cap_state_change(chan, BT_DISCONN);
759 
760 	memset(&rsp, 0, sizeof(rsp));
761 
762 	rsp.result  = cpu_to_le16(result);
763 
764 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
765 		       &rsp);
766 }
767 
768 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
769 {
770 	struct l2cap_conn *conn = chan->conn;
771 	struct l2cap_conn_rsp rsp;
772 	u16 result;
773 
774 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
775 		result = L2CAP_CR_SEC_BLOCK;
776 	else
777 		result = L2CAP_CR_BAD_PSM;
778 
779 	l2cap_state_change(chan, BT_DISCONN);
780 
781 	rsp.scid   = cpu_to_le16(chan->dcid);
782 	rsp.dcid   = cpu_to_le16(chan->scid);
783 	rsp.result = cpu_to_le16(result);
784 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
785 
786 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
787 }
788 
789 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
790 {
791 	struct l2cap_conn *conn = chan->conn;
792 
793 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
794 
795 	switch (chan->state) {
796 	case BT_LISTEN:
797 		chan->ops->teardown(chan, 0);
798 		break;
799 
800 	case BT_CONNECTED:
801 	case BT_CONFIG:
802 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
803 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
804 			l2cap_send_disconn_req(chan, reason);
805 		} else
806 			l2cap_chan_del(chan, reason);
807 		break;
808 
809 	case BT_CONNECT2:
810 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
811 			if (conn->hcon->type == ACL_LINK)
812 				l2cap_chan_connect_reject(chan);
813 			else if (conn->hcon->type == LE_LINK) {
814 				switch (chan->mode) {
815 				case L2CAP_MODE_LE_FLOWCTL:
816 					l2cap_chan_le_connect_reject(chan);
817 					break;
818 				case L2CAP_MODE_EXT_FLOWCTL:
819 					l2cap_chan_ecred_connect_reject(chan);
820 					break;
821 				}
822 			}
823 		}
824 
825 		l2cap_chan_del(chan, reason);
826 		break;
827 
828 	case BT_CONNECT:
829 	case BT_DISCONN:
830 		l2cap_chan_del(chan, reason);
831 		break;
832 
833 	default:
834 		chan->ops->teardown(chan, 0);
835 		break;
836 	}
837 }
838 EXPORT_SYMBOL(l2cap_chan_close);
839 
840 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
841 {
842 	switch (chan->chan_type) {
843 	case L2CAP_CHAN_RAW:
844 		switch (chan->sec_level) {
845 		case BT_SECURITY_HIGH:
846 		case BT_SECURITY_FIPS:
847 			return HCI_AT_DEDICATED_BONDING_MITM;
848 		case BT_SECURITY_MEDIUM:
849 			return HCI_AT_DEDICATED_BONDING;
850 		default:
851 			return HCI_AT_NO_BONDING;
852 		}
853 		break;
854 	case L2CAP_CHAN_CONN_LESS:
855 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
856 			if (chan->sec_level == BT_SECURITY_LOW)
857 				chan->sec_level = BT_SECURITY_SDP;
858 		}
859 		if (chan->sec_level == BT_SECURITY_HIGH ||
860 		    chan->sec_level == BT_SECURITY_FIPS)
861 			return HCI_AT_NO_BONDING_MITM;
862 		else
863 			return HCI_AT_NO_BONDING;
864 		break;
865 	case L2CAP_CHAN_CONN_ORIENTED:
866 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
867 			if (chan->sec_level == BT_SECURITY_LOW)
868 				chan->sec_level = BT_SECURITY_SDP;
869 
870 			if (chan->sec_level == BT_SECURITY_HIGH ||
871 			    chan->sec_level == BT_SECURITY_FIPS)
872 				return HCI_AT_NO_BONDING_MITM;
873 			else
874 				return HCI_AT_NO_BONDING;
875 		}
876 		fallthrough;
877 
878 	default:
879 		switch (chan->sec_level) {
880 		case BT_SECURITY_HIGH:
881 		case BT_SECURITY_FIPS:
882 			return HCI_AT_GENERAL_BONDING_MITM;
883 		case BT_SECURITY_MEDIUM:
884 			return HCI_AT_GENERAL_BONDING;
885 		default:
886 			return HCI_AT_NO_BONDING;
887 		}
888 		break;
889 	}
890 }
891 
892 /* Service level security */
893 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
894 {
895 	struct l2cap_conn *conn = chan->conn;
896 	__u8 auth_type;
897 
898 	if (conn->hcon->type == LE_LINK)
899 		return smp_conn_security(conn->hcon, chan->sec_level);
900 
901 	auth_type = l2cap_get_auth_type(chan);
902 
903 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
904 				 initiator);
905 }
906 
907 static u8 l2cap_get_ident(struct l2cap_conn *conn)
908 {
909 	u8 id;
910 
911 	/* Get next available identificator.
912 	 *    1 - 128 are used by kernel.
913 	 *  129 - 199 are reserved.
914 	 *  200 - 254 are used by utilities like l2ping, etc.
915 	 */
916 
917 	mutex_lock(&conn->ident_lock);
918 
919 	if (++conn->tx_ident > 128)
920 		conn->tx_ident = 1;
921 
922 	id = conn->tx_ident;
923 
924 	mutex_unlock(&conn->ident_lock);
925 
926 	return id;
927 }
928 
929 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
930 			   void *data)
931 {
932 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
933 	u8 flags;
934 
935 	BT_DBG("code 0x%2.2x", code);
936 
937 	if (!skb)
938 		return;
939 
940 	/* Use NO_FLUSH if supported or we have an LE link (which does
941 	 * not support auto-flushing packets) */
942 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
943 	    conn->hcon->type == LE_LINK)
944 		flags = ACL_START_NO_FLUSH;
945 	else
946 		flags = ACL_START;
947 
948 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
949 	skb->priority = HCI_PRIO_MAX;
950 
951 	hci_send_acl(conn->hchan, skb, flags);
952 }
953 
954 static bool __chan_is_moving(struct l2cap_chan *chan)
955 {
956 	return chan->move_state != L2CAP_MOVE_STABLE &&
957 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
958 }
959 
960 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
961 {
962 	struct hci_conn *hcon = chan->conn->hcon;
963 	u16 flags;
964 
965 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
966 	       skb->priority);
967 
968 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
969 		if (chan->hs_hchan)
970 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
971 		else
972 			kfree_skb(skb);
973 
974 		return;
975 	}
976 
977 	/* Use NO_FLUSH for LE links (where this is the only option) or
978 	 * if the BR/EDR link supports it and flushing has not been
979 	 * explicitly requested (through FLAG_FLUSHABLE).
980 	 */
981 	if (hcon->type == LE_LINK ||
982 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
983 	     lmp_no_flush_capable(hcon->hdev)))
984 		flags = ACL_START_NO_FLUSH;
985 	else
986 		flags = ACL_START;
987 
988 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
989 	hci_send_acl(chan->conn->hchan, skb, flags);
990 }
991 
992 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
993 {
994 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
995 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
996 
997 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
998 		/* S-Frame */
999 		control->sframe = 1;
1000 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1001 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1002 
1003 		control->sar = 0;
1004 		control->txseq = 0;
1005 	} else {
1006 		/* I-Frame */
1007 		control->sframe = 0;
1008 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1009 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1010 
1011 		control->poll = 0;
1012 		control->super = 0;
1013 	}
1014 }
1015 
1016 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1017 {
1018 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1019 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1020 
1021 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1022 		/* S-Frame */
1023 		control->sframe = 1;
1024 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1025 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1026 
1027 		control->sar = 0;
1028 		control->txseq = 0;
1029 	} else {
1030 		/* I-Frame */
1031 		control->sframe = 0;
1032 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1033 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1034 
1035 		control->poll = 0;
1036 		control->super = 0;
1037 	}
1038 }
1039 
1040 static inline void __unpack_control(struct l2cap_chan *chan,
1041 				    struct sk_buff *skb)
1042 {
1043 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1044 		__unpack_extended_control(get_unaligned_le32(skb->data),
1045 					  &bt_cb(skb)->l2cap);
1046 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1047 	} else {
1048 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1049 					  &bt_cb(skb)->l2cap);
1050 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1051 	}
1052 }
1053 
1054 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1055 {
1056 	u32 packed;
1057 
1058 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1059 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1060 
1061 	if (control->sframe) {
1062 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1063 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1064 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1065 	} else {
1066 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1067 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1068 	}
1069 
1070 	return packed;
1071 }
1072 
1073 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1074 {
1075 	u16 packed;
1076 
1077 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1078 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1079 
1080 	if (control->sframe) {
1081 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1082 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1083 		packed |= L2CAP_CTRL_FRAME_TYPE;
1084 	} else {
1085 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1086 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1087 	}
1088 
1089 	return packed;
1090 }
1091 
1092 static inline void __pack_control(struct l2cap_chan *chan,
1093 				  struct l2cap_ctrl *control,
1094 				  struct sk_buff *skb)
1095 {
1096 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1097 		put_unaligned_le32(__pack_extended_control(control),
1098 				   skb->data + L2CAP_HDR_SIZE);
1099 	} else {
1100 		put_unaligned_le16(__pack_enhanced_control(control),
1101 				   skb->data + L2CAP_HDR_SIZE);
1102 	}
1103 }
1104 
1105 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1106 {
1107 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1108 		return L2CAP_EXT_HDR_SIZE;
1109 	else
1110 		return L2CAP_ENH_HDR_SIZE;
1111 }
1112 
1113 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1114 					       u32 control)
1115 {
1116 	struct sk_buff *skb;
1117 	struct l2cap_hdr *lh;
1118 	int hlen = __ertm_hdr_size(chan);
1119 
1120 	if (chan->fcs == L2CAP_FCS_CRC16)
1121 		hlen += L2CAP_FCS_SIZE;
1122 
1123 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1124 
1125 	if (!skb)
1126 		return ERR_PTR(-ENOMEM);
1127 
1128 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1129 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1130 	lh->cid = cpu_to_le16(chan->dcid);
1131 
1132 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1133 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1134 	else
1135 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1136 
1137 	if (chan->fcs == L2CAP_FCS_CRC16) {
1138 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1139 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1140 	}
1141 
1142 	skb->priority = HCI_PRIO_MAX;
1143 	return skb;
1144 }
1145 
1146 static void l2cap_send_sframe(struct l2cap_chan *chan,
1147 			      struct l2cap_ctrl *control)
1148 {
1149 	struct sk_buff *skb;
1150 	u32 control_field;
1151 
1152 	BT_DBG("chan %p, control %p", chan, control);
1153 
1154 	if (!control->sframe)
1155 		return;
1156 
1157 	if (__chan_is_moving(chan))
1158 		return;
1159 
1160 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1161 	    !control->poll)
1162 		control->final = 1;
1163 
1164 	if (control->super == L2CAP_SUPER_RR)
1165 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1166 	else if (control->super == L2CAP_SUPER_RNR)
1167 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1168 
1169 	if (control->super != L2CAP_SUPER_SREJ) {
1170 		chan->last_acked_seq = control->reqseq;
1171 		__clear_ack_timer(chan);
1172 	}
1173 
1174 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1175 	       control->final, control->poll, control->super);
1176 
1177 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1178 		control_field = __pack_extended_control(control);
1179 	else
1180 		control_field = __pack_enhanced_control(control);
1181 
1182 	skb = l2cap_create_sframe_pdu(chan, control_field);
1183 	if (!IS_ERR(skb))
1184 		l2cap_do_send(chan, skb);
1185 }
1186 
1187 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1188 {
1189 	struct l2cap_ctrl control;
1190 
1191 	BT_DBG("chan %p, poll %d", chan, poll);
1192 
1193 	memset(&control, 0, sizeof(control));
1194 	control.sframe = 1;
1195 	control.poll = poll;
1196 
1197 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1198 		control.super = L2CAP_SUPER_RNR;
1199 	else
1200 		control.super = L2CAP_SUPER_RR;
1201 
1202 	control.reqseq = chan->buffer_seq;
1203 	l2cap_send_sframe(chan, &control);
1204 }
1205 
1206 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1207 {
1208 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1209 		return true;
1210 
1211 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1212 }
1213 
1214 static bool __amp_capable(struct l2cap_chan *chan)
1215 {
1216 	struct l2cap_conn *conn = chan->conn;
1217 	struct hci_dev *hdev;
1218 	bool amp_available = false;
1219 
1220 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1221 		return false;
1222 
1223 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1224 		return false;
1225 
1226 	read_lock(&hci_dev_list_lock);
1227 	list_for_each_entry(hdev, &hci_dev_list, list) {
1228 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1229 		    test_bit(HCI_UP, &hdev->flags)) {
1230 			amp_available = true;
1231 			break;
1232 		}
1233 	}
1234 	read_unlock(&hci_dev_list_lock);
1235 
1236 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1237 		return amp_available;
1238 
1239 	return false;
1240 }
1241 
1242 static bool l2cap_check_efs(struct l2cap_chan *chan)
1243 {
1244 	/* Check EFS parameters */
1245 	return true;
1246 }
1247 
1248 void l2cap_send_conn_req(struct l2cap_chan *chan)
1249 {
1250 	struct l2cap_conn *conn = chan->conn;
1251 	struct l2cap_conn_req req;
1252 
1253 	req.scid = cpu_to_le16(chan->scid);
1254 	req.psm  = chan->psm;
1255 
1256 	chan->ident = l2cap_get_ident(conn);
1257 
1258 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1259 
1260 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1261 }
1262 
1263 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1264 {
1265 	struct l2cap_create_chan_req req;
1266 	req.scid = cpu_to_le16(chan->scid);
1267 	req.psm  = chan->psm;
1268 	req.amp_id = amp_id;
1269 
1270 	chan->ident = l2cap_get_ident(chan->conn);
1271 
1272 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1273 		       sizeof(req), &req);
1274 }
1275 
1276 static void l2cap_move_setup(struct l2cap_chan *chan)
1277 {
1278 	struct sk_buff *skb;
1279 
1280 	BT_DBG("chan %p", chan);
1281 
1282 	if (chan->mode != L2CAP_MODE_ERTM)
1283 		return;
1284 
1285 	__clear_retrans_timer(chan);
1286 	__clear_monitor_timer(chan);
1287 	__clear_ack_timer(chan);
1288 
1289 	chan->retry_count = 0;
1290 	skb_queue_walk(&chan->tx_q, skb) {
1291 		if (bt_cb(skb)->l2cap.retries)
1292 			bt_cb(skb)->l2cap.retries = 1;
1293 		else
1294 			break;
1295 	}
1296 
1297 	chan->expected_tx_seq = chan->buffer_seq;
1298 
1299 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1300 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1301 	l2cap_seq_list_clear(&chan->retrans_list);
1302 	l2cap_seq_list_clear(&chan->srej_list);
1303 	skb_queue_purge(&chan->srej_q);
1304 
1305 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1306 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1307 
1308 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1309 }
1310 
1311 static void l2cap_move_done(struct l2cap_chan *chan)
1312 {
1313 	u8 move_role = chan->move_role;
1314 	BT_DBG("chan %p", chan);
1315 
1316 	chan->move_state = L2CAP_MOVE_STABLE;
1317 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1318 
1319 	if (chan->mode != L2CAP_MODE_ERTM)
1320 		return;
1321 
1322 	switch (move_role) {
1323 	case L2CAP_MOVE_ROLE_INITIATOR:
1324 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1325 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1326 		break;
1327 	case L2CAP_MOVE_ROLE_RESPONDER:
1328 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1329 		break;
1330 	}
1331 }
1332 
1333 static void l2cap_chan_ready(struct l2cap_chan *chan)
1334 {
1335 	/* The channel may have already been flagged as connected in
1336 	 * case of receiving data before the L2CAP info req/rsp
1337 	 * procedure is complete.
1338 	 */
1339 	if (chan->state == BT_CONNECTED)
1340 		return;
1341 
1342 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1343 	chan->conf_state = 0;
1344 	__clear_chan_timer(chan);
1345 
1346 	switch (chan->mode) {
1347 	case L2CAP_MODE_LE_FLOWCTL:
1348 	case L2CAP_MODE_EXT_FLOWCTL:
1349 		if (!chan->tx_credits)
1350 			chan->ops->suspend(chan);
1351 		break;
1352 	}
1353 
1354 	chan->state = BT_CONNECTED;
1355 
1356 	chan->ops->ready(chan);
1357 }
1358 
1359 static void l2cap_le_connect(struct l2cap_chan *chan)
1360 {
1361 	struct l2cap_conn *conn = chan->conn;
1362 	struct l2cap_le_conn_req req;
1363 
1364 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1365 		return;
1366 
1367 	if (!chan->imtu)
1368 		chan->imtu = chan->conn->mtu;
1369 
1370 	l2cap_le_flowctl_init(chan, 0);
1371 
1372 	memset(&req, 0, sizeof(req));
1373 	req.psm     = chan->psm;
1374 	req.scid    = cpu_to_le16(chan->scid);
1375 	req.mtu     = cpu_to_le16(chan->imtu);
1376 	req.mps     = cpu_to_le16(chan->mps);
1377 	req.credits = cpu_to_le16(chan->rx_credits);
1378 
1379 	chan->ident = l2cap_get_ident(conn);
1380 
1381 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1382 		       sizeof(req), &req);
1383 }
1384 
1385 struct l2cap_ecred_conn_data {
1386 	struct {
1387 		struct l2cap_ecred_conn_req req;
1388 		__le16 scid[5];
1389 	} __packed pdu;
1390 	struct l2cap_chan *chan;
1391 	struct pid *pid;
1392 	int count;
1393 };
1394 
1395 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1396 {
1397 	struct l2cap_ecred_conn_data *conn = data;
1398 	struct pid *pid;
1399 
1400 	if (chan == conn->chan)
1401 		return;
1402 
1403 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1404 		return;
1405 
1406 	pid = chan->ops->get_peer_pid(chan);
1407 
1408 	/* Only add deferred channels with the same PID/PSM */
1409 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1410 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1411 		return;
1412 
1413 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1414 		return;
1415 
1416 	l2cap_ecred_init(chan, 0);
1417 
1418 	/* Set the same ident so we can match on the rsp */
1419 	chan->ident = conn->chan->ident;
1420 
1421 	/* Include all channels deferred */
1422 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1423 
1424 	conn->count++;
1425 }
1426 
1427 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1428 {
1429 	struct l2cap_conn *conn = chan->conn;
1430 	struct l2cap_ecred_conn_data data;
1431 
1432 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1433 		return;
1434 
1435 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1436 		return;
1437 
1438 	l2cap_ecred_init(chan, 0);
1439 
1440 	memset(&data, 0, sizeof(data));
1441 	data.pdu.req.psm     = chan->psm;
1442 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1443 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1444 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1445 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1446 
1447 	chan->ident = l2cap_get_ident(conn);
1448 
1449 	data.count = 1;
1450 	data.chan = chan;
1451 	data.pid = chan->ops->get_peer_pid(chan);
1452 
1453 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1454 
1455 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1456 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1457 		       &data.pdu);
1458 }
1459 
1460 static void l2cap_le_start(struct l2cap_chan *chan)
1461 {
1462 	struct l2cap_conn *conn = chan->conn;
1463 
1464 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1465 		return;
1466 
1467 	if (!chan->psm) {
1468 		l2cap_chan_ready(chan);
1469 		return;
1470 	}
1471 
1472 	if (chan->state == BT_CONNECT) {
1473 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1474 			l2cap_ecred_connect(chan);
1475 		else
1476 			l2cap_le_connect(chan);
1477 	}
1478 }
1479 
1480 static void l2cap_start_connection(struct l2cap_chan *chan)
1481 {
1482 	if (__amp_capable(chan)) {
1483 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1484 		a2mp_discover_amp(chan);
1485 	} else if (chan->conn->hcon->type == LE_LINK) {
1486 		l2cap_le_start(chan);
1487 	} else {
1488 		l2cap_send_conn_req(chan);
1489 	}
1490 }
1491 
1492 static void l2cap_request_info(struct l2cap_conn *conn)
1493 {
1494 	struct l2cap_info_req req;
1495 
1496 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1497 		return;
1498 
1499 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1500 
1501 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1502 	conn->info_ident = l2cap_get_ident(conn);
1503 
1504 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1505 
1506 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1507 		       sizeof(req), &req);
1508 }
1509 
1510 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1511 {
1512 	/* The minimum encryption key size needs to be enforced by the
1513 	 * host stack before establishing any L2CAP connections. The
1514 	 * specification in theory allows a minimum of 1, but to align
1515 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1516 	 *
1517 	 * This check might also be called for unencrypted connections
1518 	 * that have no key size requirements. Ensure that the link is
1519 	 * actually encrypted before enforcing a key size.
1520 	 */
1521 	int min_key_size = hcon->hdev->min_enc_key_size;
1522 
1523 	/* On FIPS security level, key size must be 16 bytes */
1524 	if (hcon->sec_level == BT_SECURITY_FIPS)
1525 		min_key_size = 16;
1526 
1527 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1528 		hcon->enc_key_size >= min_key_size);
1529 }
1530 
1531 static void l2cap_do_start(struct l2cap_chan *chan)
1532 {
1533 	struct l2cap_conn *conn = chan->conn;
1534 
1535 	if (conn->hcon->type == LE_LINK) {
1536 		l2cap_le_start(chan);
1537 		return;
1538 	}
1539 
1540 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1541 		l2cap_request_info(conn);
1542 		return;
1543 	}
1544 
1545 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1546 		return;
1547 
1548 	if (!l2cap_chan_check_security(chan, true) ||
1549 	    !__l2cap_no_conn_pending(chan))
1550 		return;
1551 
1552 	if (l2cap_check_enc_key_size(conn->hcon))
1553 		l2cap_start_connection(chan);
1554 	else
1555 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1556 }
1557 
1558 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1559 {
1560 	u32 local_feat_mask = l2cap_feat_mask;
1561 	if (!disable_ertm)
1562 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1563 
1564 	switch (mode) {
1565 	case L2CAP_MODE_ERTM:
1566 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1567 	case L2CAP_MODE_STREAMING:
1568 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1569 	default:
1570 		return 0x00;
1571 	}
1572 }
1573 
1574 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1575 {
1576 	struct l2cap_conn *conn = chan->conn;
1577 	struct l2cap_disconn_req req;
1578 
1579 	if (!conn)
1580 		return;
1581 
1582 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1583 		__clear_retrans_timer(chan);
1584 		__clear_monitor_timer(chan);
1585 		__clear_ack_timer(chan);
1586 	}
1587 
1588 	if (chan->scid == L2CAP_CID_A2MP) {
1589 		l2cap_state_change(chan, BT_DISCONN);
1590 		return;
1591 	}
1592 
1593 	req.dcid = cpu_to_le16(chan->dcid);
1594 	req.scid = cpu_to_le16(chan->scid);
1595 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1596 		       sizeof(req), &req);
1597 
1598 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1599 }
1600 
1601 /* ---- L2CAP connections ---- */
1602 static void l2cap_conn_start(struct l2cap_conn *conn)
1603 {
1604 	struct l2cap_chan *chan, *tmp;
1605 
1606 	BT_DBG("conn %p", conn);
1607 
1608 	mutex_lock(&conn->chan_lock);
1609 
1610 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1611 		l2cap_chan_lock(chan);
1612 
1613 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1614 			l2cap_chan_ready(chan);
1615 			l2cap_chan_unlock(chan);
1616 			continue;
1617 		}
1618 
1619 		if (chan->state == BT_CONNECT) {
1620 			if (!l2cap_chan_check_security(chan, true) ||
1621 			    !__l2cap_no_conn_pending(chan)) {
1622 				l2cap_chan_unlock(chan);
1623 				continue;
1624 			}
1625 
1626 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1627 			    && test_bit(CONF_STATE2_DEVICE,
1628 					&chan->conf_state)) {
1629 				l2cap_chan_close(chan, ECONNRESET);
1630 				l2cap_chan_unlock(chan);
1631 				continue;
1632 			}
1633 
1634 			if (l2cap_check_enc_key_size(conn->hcon))
1635 				l2cap_start_connection(chan);
1636 			else
1637 				l2cap_chan_close(chan, ECONNREFUSED);
1638 
1639 		} else if (chan->state == BT_CONNECT2) {
1640 			struct l2cap_conn_rsp rsp;
1641 			char buf[128];
1642 			rsp.scid = cpu_to_le16(chan->dcid);
1643 			rsp.dcid = cpu_to_le16(chan->scid);
1644 
1645 			if (l2cap_chan_check_security(chan, false)) {
1646 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1647 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1648 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1649 					chan->ops->defer(chan);
1650 
1651 				} else {
1652 					l2cap_state_change(chan, BT_CONFIG);
1653 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1654 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1655 				}
1656 			} else {
1657 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1658 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1659 			}
1660 
1661 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1662 				       sizeof(rsp), &rsp);
1663 
1664 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1665 			    rsp.result != L2CAP_CR_SUCCESS) {
1666 				l2cap_chan_unlock(chan);
1667 				continue;
1668 			}
1669 
1670 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1671 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1672 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1673 			chan->num_conf_req++;
1674 		}
1675 
1676 		l2cap_chan_unlock(chan);
1677 	}
1678 
1679 	mutex_unlock(&conn->chan_lock);
1680 }
1681 
1682 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1683 {
1684 	struct hci_conn *hcon = conn->hcon;
1685 	struct hci_dev *hdev = hcon->hdev;
1686 
1687 	BT_DBG("%s conn %p", hdev->name, conn);
1688 
1689 	/* For outgoing pairing which doesn't necessarily have an
1690 	 * associated socket (e.g. mgmt_pair_device).
1691 	 */
1692 	if (hcon->out)
1693 		smp_conn_security(hcon, hcon->pending_sec_level);
1694 
1695 	/* For LE peripheral connections, make sure the connection interval
1696 	 * is in the range of the minimum and maximum interval that has
1697 	 * been configured for this connection. If not, then trigger
1698 	 * the connection update procedure.
1699 	 */
1700 	if (hcon->role == HCI_ROLE_SLAVE &&
1701 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1702 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1703 		struct l2cap_conn_param_update_req req;
1704 
1705 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1706 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1707 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1708 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1709 
1710 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1711 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1712 	}
1713 }
1714 
1715 static void l2cap_conn_ready(struct l2cap_conn *conn)
1716 {
1717 	struct l2cap_chan *chan;
1718 	struct hci_conn *hcon = conn->hcon;
1719 
1720 	BT_DBG("conn %p", conn);
1721 
1722 	if (hcon->type == ACL_LINK)
1723 		l2cap_request_info(conn);
1724 
1725 	mutex_lock(&conn->chan_lock);
1726 
1727 	list_for_each_entry(chan, &conn->chan_l, list) {
1728 
1729 		l2cap_chan_lock(chan);
1730 
1731 		if (chan->scid == L2CAP_CID_A2MP) {
1732 			l2cap_chan_unlock(chan);
1733 			continue;
1734 		}
1735 
1736 		if (hcon->type == LE_LINK) {
1737 			l2cap_le_start(chan);
1738 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1739 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1740 				l2cap_chan_ready(chan);
1741 		} else if (chan->state == BT_CONNECT) {
1742 			l2cap_do_start(chan);
1743 		}
1744 
1745 		l2cap_chan_unlock(chan);
1746 	}
1747 
1748 	mutex_unlock(&conn->chan_lock);
1749 
1750 	if (hcon->type == LE_LINK)
1751 		l2cap_le_conn_ready(conn);
1752 
1753 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1754 }
1755 
1756 /* Notify sockets that we cannot guaranty reliability anymore */
1757 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1758 {
1759 	struct l2cap_chan *chan;
1760 
1761 	BT_DBG("conn %p", conn);
1762 
1763 	mutex_lock(&conn->chan_lock);
1764 
1765 	list_for_each_entry(chan, &conn->chan_l, list) {
1766 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1767 			l2cap_chan_set_err(chan, err);
1768 	}
1769 
1770 	mutex_unlock(&conn->chan_lock);
1771 }
1772 
1773 static void l2cap_info_timeout(struct work_struct *work)
1774 {
1775 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1776 					       info_timer.work);
1777 
1778 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1779 	conn->info_ident = 0;
1780 
1781 	l2cap_conn_start(conn);
1782 }
1783 
1784 /*
1785  * l2cap_user
1786  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1787  * callback is called during registration. The ->remove callback is called
1788  * during unregistration.
1789  * An l2cap_user object can either be explicitly unregistered or when the
1790  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1791  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1792  * External modules must own a reference to the l2cap_conn object if they intend
1793  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1794  * any time if they don't.
1795  */
1796 
1797 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1798 {
1799 	struct hci_dev *hdev = conn->hcon->hdev;
1800 	int ret;
1801 
1802 	/* We need to check whether l2cap_conn is registered. If it is not, we
1803 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1804 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1805 	 * relies on the parent hci_conn object to be locked. This itself relies
1806 	 * on the hci_dev object to be locked. So we must lock the hci device
1807 	 * here, too. */
1808 
1809 	hci_dev_lock(hdev);
1810 
1811 	if (!list_empty(&user->list)) {
1812 		ret = -EINVAL;
1813 		goto out_unlock;
1814 	}
1815 
1816 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1817 	if (!conn->hchan) {
1818 		ret = -ENODEV;
1819 		goto out_unlock;
1820 	}
1821 
1822 	ret = user->probe(conn, user);
1823 	if (ret)
1824 		goto out_unlock;
1825 
1826 	list_add(&user->list, &conn->users);
1827 	ret = 0;
1828 
1829 out_unlock:
1830 	hci_dev_unlock(hdev);
1831 	return ret;
1832 }
1833 EXPORT_SYMBOL(l2cap_register_user);
1834 
1835 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1836 {
1837 	struct hci_dev *hdev = conn->hcon->hdev;
1838 
1839 	hci_dev_lock(hdev);
1840 
1841 	if (list_empty(&user->list))
1842 		goto out_unlock;
1843 
1844 	list_del_init(&user->list);
1845 	user->remove(conn, user);
1846 
1847 out_unlock:
1848 	hci_dev_unlock(hdev);
1849 }
1850 EXPORT_SYMBOL(l2cap_unregister_user);
1851 
1852 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1853 {
1854 	struct l2cap_user *user;
1855 
1856 	while (!list_empty(&conn->users)) {
1857 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1858 		list_del_init(&user->list);
1859 		user->remove(conn, user);
1860 	}
1861 }
1862 
1863 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1864 {
1865 	struct l2cap_conn *conn = hcon->l2cap_data;
1866 	struct l2cap_chan *chan, *l;
1867 
1868 	if (!conn)
1869 		return;
1870 
1871 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1872 
1873 	kfree_skb(conn->rx_skb);
1874 
1875 	skb_queue_purge(&conn->pending_rx);
1876 
1877 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1878 	 * might block if we are running on a worker from the same workqueue
1879 	 * pending_rx_work is waiting on.
1880 	 */
1881 	if (work_pending(&conn->pending_rx_work))
1882 		cancel_work_sync(&conn->pending_rx_work);
1883 
1884 	if (work_pending(&conn->id_addr_update_work))
1885 		cancel_work_sync(&conn->id_addr_update_work);
1886 
1887 	l2cap_unregister_all_users(conn);
1888 
1889 	/* Force the connection to be immediately dropped */
1890 	hcon->disc_timeout = 0;
1891 
1892 	mutex_lock(&conn->chan_lock);
1893 
1894 	/* Kill channels */
1895 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1896 		l2cap_chan_hold(chan);
1897 		l2cap_chan_lock(chan);
1898 
1899 		l2cap_chan_del(chan, err);
1900 
1901 		chan->ops->close(chan);
1902 
1903 		l2cap_chan_unlock(chan);
1904 		l2cap_chan_put(chan);
1905 	}
1906 
1907 	mutex_unlock(&conn->chan_lock);
1908 
1909 	hci_chan_del(conn->hchan);
1910 
1911 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1912 		cancel_delayed_work_sync(&conn->info_timer);
1913 
1914 	hcon->l2cap_data = NULL;
1915 	conn->hchan = NULL;
1916 	l2cap_conn_put(conn);
1917 }
1918 
1919 static void l2cap_conn_free(struct kref *ref)
1920 {
1921 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1922 
1923 	hci_conn_put(conn->hcon);
1924 	kfree(conn);
1925 }
1926 
1927 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1928 {
1929 	kref_get(&conn->ref);
1930 	return conn;
1931 }
1932 EXPORT_SYMBOL(l2cap_conn_get);
1933 
1934 void l2cap_conn_put(struct l2cap_conn *conn)
1935 {
1936 	kref_put(&conn->ref, l2cap_conn_free);
1937 }
1938 EXPORT_SYMBOL(l2cap_conn_put);
1939 
1940 /* ---- Socket interface ---- */
1941 
1942 /* Find socket with psm and source / destination bdaddr.
1943  * Returns closest match.
1944  */
1945 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1946 						   bdaddr_t *src,
1947 						   bdaddr_t *dst,
1948 						   u8 link_type)
1949 {
1950 	struct l2cap_chan *c, *c1 = NULL;
1951 
1952 	read_lock(&chan_list_lock);
1953 
1954 	list_for_each_entry(c, &chan_list, global_l) {
1955 		if (state && c->state != state)
1956 			continue;
1957 
1958 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1959 			continue;
1960 
1961 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1962 			continue;
1963 
1964 		if (c->psm == psm) {
1965 			int src_match, dst_match;
1966 			int src_any, dst_any;
1967 
1968 			/* Exact match. */
1969 			src_match = !bacmp(&c->src, src);
1970 			dst_match = !bacmp(&c->dst, dst);
1971 			if (src_match && dst_match) {
1972 				l2cap_chan_hold(c);
1973 				read_unlock(&chan_list_lock);
1974 				return c;
1975 			}
1976 
1977 			/* Closest match */
1978 			src_any = !bacmp(&c->src, BDADDR_ANY);
1979 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1980 			if ((src_match && dst_any) || (src_any && dst_match) ||
1981 			    (src_any && dst_any))
1982 				c1 = c;
1983 		}
1984 	}
1985 
1986 	if (c1)
1987 		l2cap_chan_hold(c1);
1988 
1989 	read_unlock(&chan_list_lock);
1990 
1991 	return c1;
1992 }
1993 
1994 static void l2cap_monitor_timeout(struct work_struct *work)
1995 {
1996 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1997 					       monitor_timer.work);
1998 
1999 	BT_DBG("chan %p", chan);
2000 
2001 	l2cap_chan_lock(chan);
2002 
2003 	if (!chan->conn) {
2004 		l2cap_chan_unlock(chan);
2005 		l2cap_chan_put(chan);
2006 		return;
2007 	}
2008 
2009 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2010 
2011 	l2cap_chan_unlock(chan);
2012 	l2cap_chan_put(chan);
2013 }
2014 
2015 static void l2cap_retrans_timeout(struct work_struct *work)
2016 {
2017 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2018 					       retrans_timer.work);
2019 
2020 	BT_DBG("chan %p", chan);
2021 
2022 	l2cap_chan_lock(chan);
2023 
2024 	if (!chan->conn) {
2025 		l2cap_chan_unlock(chan);
2026 		l2cap_chan_put(chan);
2027 		return;
2028 	}
2029 
2030 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2031 	l2cap_chan_unlock(chan);
2032 	l2cap_chan_put(chan);
2033 }
2034 
2035 static void l2cap_streaming_send(struct l2cap_chan *chan,
2036 				 struct sk_buff_head *skbs)
2037 {
2038 	struct sk_buff *skb;
2039 	struct l2cap_ctrl *control;
2040 
2041 	BT_DBG("chan %p, skbs %p", chan, skbs);
2042 
2043 	if (__chan_is_moving(chan))
2044 		return;
2045 
2046 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2047 
2048 	while (!skb_queue_empty(&chan->tx_q)) {
2049 
2050 		skb = skb_dequeue(&chan->tx_q);
2051 
2052 		bt_cb(skb)->l2cap.retries = 1;
2053 		control = &bt_cb(skb)->l2cap;
2054 
2055 		control->reqseq = 0;
2056 		control->txseq = chan->next_tx_seq;
2057 
2058 		__pack_control(chan, control, skb);
2059 
2060 		if (chan->fcs == L2CAP_FCS_CRC16) {
2061 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2062 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2063 		}
2064 
2065 		l2cap_do_send(chan, skb);
2066 
2067 		BT_DBG("Sent txseq %u", control->txseq);
2068 
2069 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2070 		chan->frames_sent++;
2071 	}
2072 }
2073 
2074 static int l2cap_ertm_send(struct l2cap_chan *chan)
2075 {
2076 	struct sk_buff *skb, *tx_skb;
2077 	struct l2cap_ctrl *control;
2078 	int sent = 0;
2079 
2080 	BT_DBG("chan %p", chan);
2081 
2082 	if (chan->state != BT_CONNECTED)
2083 		return -ENOTCONN;
2084 
2085 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2086 		return 0;
2087 
2088 	if (__chan_is_moving(chan))
2089 		return 0;
2090 
2091 	while (chan->tx_send_head &&
2092 	       chan->unacked_frames < chan->remote_tx_win &&
2093 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2094 
2095 		skb = chan->tx_send_head;
2096 
2097 		bt_cb(skb)->l2cap.retries = 1;
2098 		control = &bt_cb(skb)->l2cap;
2099 
2100 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2101 			control->final = 1;
2102 
2103 		control->reqseq = chan->buffer_seq;
2104 		chan->last_acked_seq = chan->buffer_seq;
2105 		control->txseq = chan->next_tx_seq;
2106 
2107 		__pack_control(chan, control, skb);
2108 
2109 		if (chan->fcs == L2CAP_FCS_CRC16) {
2110 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2111 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2112 		}
2113 
2114 		/* Clone after data has been modified. Data is assumed to be
2115 		   read-only (for locking purposes) on cloned sk_buffs.
2116 		 */
2117 		tx_skb = skb_clone(skb, GFP_KERNEL);
2118 
2119 		if (!tx_skb)
2120 			break;
2121 
2122 		__set_retrans_timer(chan);
2123 
2124 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2125 		chan->unacked_frames++;
2126 		chan->frames_sent++;
2127 		sent++;
2128 
2129 		if (skb_queue_is_last(&chan->tx_q, skb))
2130 			chan->tx_send_head = NULL;
2131 		else
2132 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2133 
2134 		l2cap_do_send(chan, tx_skb);
2135 		BT_DBG("Sent txseq %u", control->txseq);
2136 	}
2137 
2138 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2139 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2140 
2141 	return sent;
2142 }
2143 
2144 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2145 {
2146 	struct l2cap_ctrl control;
2147 	struct sk_buff *skb;
2148 	struct sk_buff *tx_skb;
2149 	u16 seq;
2150 
2151 	BT_DBG("chan %p", chan);
2152 
2153 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2154 		return;
2155 
2156 	if (__chan_is_moving(chan))
2157 		return;
2158 
2159 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2160 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2161 
2162 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2163 		if (!skb) {
2164 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2165 			       seq);
2166 			continue;
2167 		}
2168 
2169 		bt_cb(skb)->l2cap.retries++;
2170 		control = bt_cb(skb)->l2cap;
2171 
2172 		if (chan->max_tx != 0 &&
2173 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2174 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2175 			l2cap_send_disconn_req(chan, ECONNRESET);
2176 			l2cap_seq_list_clear(&chan->retrans_list);
2177 			break;
2178 		}
2179 
2180 		control.reqseq = chan->buffer_seq;
2181 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2182 			control.final = 1;
2183 		else
2184 			control.final = 0;
2185 
2186 		if (skb_cloned(skb)) {
2187 			/* Cloned sk_buffs are read-only, so we need a
2188 			 * writeable copy
2189 			 */
2190 			tx_skb = skb_copy(skb, GFP_KERNEL);
2191 		} else {
2192 			tx_skb = skb_clone(skb, GFP_KERNEL);
2193 		}
2194 
2195 		if (!tx_skb) {
2196 			l2cap_seq_list_clear(&chan->retrans_list);
2197 			break;
2198 		}
2199 
2200 		/* Update skb contents */
2201 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2202 			put_unaligned_le32(__pack_extended_control(&control),
2203 					   tx_skb->data + L2CAP_HDR_SIZE);
2204 		} else {
2205 			put_unaligned_le16(__pack_enhanced_control(&control),
2206 					   tx_skb->data + L2CAP_HDR_SIZE);
2207 		}
2208 
2209 		/* Update FCS */
2210 		if (chan->fcs == L2CAP_FCS_CRC16) {
2211 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2212 					tx_skb->len - L2CAP_FCS_SIZE);
2213 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2214 						L2CAP_FCS_SIZE);
2215 		}
2216 
2217 		l2cap_do_send(chan, tx_skb);
2218 
2219 		BT_DBG("Resent txseq %d", control.txseq);
2220 
2221 		chan->last_acked_seq = chan->buffer_seq;
2222 	}
2223 }
2224 
2225 static void l2cap_retransmit(struct l2cap_chan *chan,
2226 			     struct l2cap_ctrl *control)
2227 {
2228 	BT_DBG("chan %p, control %p", chan, control);
2229 
2230 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2231 	l2cap_ertm_resend(chan);
2232 }
2233 
2234 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2235 				 struct l2cap_ctrl *control)
2236 {
2237 	struct sk_buff *skb;
2238 
2239 	BT_DBG("chan %p, control %p", chan, control);
2240 
2241 	if (control->poll)
2242 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2243 
2244 	l2cap_seq_list_clear(&chan->retrans_list);
2245 
2246 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2247 		return;
2248 
2249 	if (chan->unacked_frames) {
2250 		skb_queue_walk(&chan->tx_q, skb) {
2251 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2252 			    skb == chan->tx_send_head)
2253 				break;
2254 		}
2255 
2256 		skb_queue_walk_from(&chan->tx_q, skb) {
2257 			if (skb == chan->tx_send_head)
2258 				break;
2259 
2260 			l2cap_seq_list_append(&chan->retrans_list,
2261 					      bt_cb(skb)->l2cap.txseq);
2262 		}
2263 
2264 		l2cap_ertm_resend(chan);
2265 	}
2266 }
2267 
2268 static void l2cap_send_ack(struct l2cap_chan *chan)
2269 {
2270 	struct l2cap_ctrl control;
2271 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2272 					 chan->last_acked_seq);
2273 	int threshold;
2274 
2275 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2276 	       chan, chan->last_acked_seq, chan->buffer_seq);
2277 
2278 	memset(&control, 0, sizeof(control));
2279 	control.sframe = 1;
2280 
2281 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2282 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2283 		__clear_ack_timer(chan);
2284 		control.super = L2CAP_SUPER_RNR;
2285 		control.reqseq = chan->buffer_seq;
2286 		l2cap_send_sframe(chan, &control);
2287 	} else {
2288 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2289 			l2cap_ertm_send(chan);
2290 			/* If any i-frames were sent, they included an ack */
2291 			if (chan->buffer_seq == chan->last_acked_seq)
2292 				frames_to_ack = 0;
2293 		}
2294 
2295 		/* Ack now if the window is 3/4ths full.
2296 		 * Calculate without mul or div
2297 		 */
2298 		threshold = chan->ack_win;
2299 		threshold += threshold << 1;
2300 		threshold >>= 2;
2301 
2302 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2303 		       threshold);
2304 
2305 		if (frames_to_ack >= threshold) {
2306 			__clear_ack_timer(chan);
2307 			control.super = L2CAP_SUPER_RR;
2308 			control.reqseq = chan->buffer_seq;
2309 			l2cap_send_sframe(chan, &control);
2310 			frames_to_ack = 0;
2311 		}
2312 
2313 		if (frames_to_ack)
2314 			__set_ack_timer(chan);
2315 	}
2316 }
2317 
2318 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2319 					 struct msghdr *msg, int len,
2320 					 int count, struct sk_buff *skb)
2321 {
2322 	struct l2cap_conn *conn = chan->conn;
2323 	struct sk_buff **frag;
2324 	int sent = 0;
2325 
2326 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2327 		return -EFAULT;
2328 
2329 	sent += count;
2330 	len  -= count;
2331 
2332 	/* Continuation fragments (no L2CAP header) */
2333 	frag = &skb_shinfo(skb)->frag_list;
2334 	while (len) {
2335 		struct sk_buff *tmp;
2336 
2337 		count = min_t(unsigned int, conn->mtu, len);
2338 
2339 		tmp = chan->ops->alloc_skb(chan, 0, count,
2340 					   msg->msg_flags & MSG_DONTWAIT);
2341 		if (IS_ERR(tmp))
2342 			return PTR_ERR(tmp);
2343 
2344 		*frag = tmp;
2345 
2346 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2347 				   &msg->msg_iter))
2348 			return -EFAULT;
2349 
2350 		sent += count;
2351 		len  -= count;
2352 
2353 		skb->len += (*frag)->len;
2354 		skb->data_len += (*frag)->len;
2355 
2356 		frag = &(*frag)->next;
2357 	}
2358 
2359 	return sent;
2360 }
2361 
2362 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2363 						 struct msghdr *msg, size_t len)
2364 {
2365 	struct l2cap_conn *conn = chan->conn;
2366 	struct sk_buff *skb;
2367 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2368 	struct l2cap_hdr *lh;
2369 
2370 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2371 	       __le16_to_cpu(chan->psm), len);
2372 
2373 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2374 
2375 	skb = chan->ops->alloc_skb(chan, hlen, count,
2376 				   msg->msg_flags & MSG_DONTWAIT);
2377 	if (IS_ERR(skb))
2378 		return skb;
2379 
2380 	/* Create L2CAP header */
2381 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2382 	lh->cid = cpu_to_le16(chan->dcid);
2383 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2384 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2385 
2386 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2387 	if (unlikely(err < 0)) {
2388 		kfree_skb(skb);
2389 		return ERR_PTR(err);
2390 	}
2391 	return skb;
2392 }
2393 
2394 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2395 					      struct msghdr *msg, size_t len)
2396 {
2397 	struct l2cap_conn *conn = chan->conn;
2398 	struct sk_buff *skb;
2399 	int err, count;
2400 	struct l2cap_hdr *lh;
2401 
2402 	BT_DBG("chan %p len %zu", chan, len);
2403 
2404 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2405 
2406 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2407 				   msg->msg_flags & MSG_DONTWAIT);
2408 	if (IS_ERR(skb))
2409 		return skb;
2410 
2411 	/* Create L2CAP header */
2412 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2413 	lh->cid = cpu_to_le16(chan->dcid);
2414 	lh->len = cpu_to_le16(len);
2415 
2416 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2417 	if (unlikely(err < 0)) {
2418 		kfree_skb(skb);
2419 		return ERR_PTR(err);
2420 	}
2421 	return skb;
2422 }
2423 
2424 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2425 					       struct msghdr *msg, size_t len,
2426 					       u16 sdulen)
2427 {
2428 	struct l2cap_conn *conn = chan->conn;
2429 	struct sk_buff *skb;
2430 	int err, count, hlen;
2431 	struct l2cap_hdr *lh;
2432 
2433 	BT_DBG("chan %p len %zu", chan, len);
2434 
2435 	if (!conn)
2436 		return ERR_PTR(-ENOTCONN);
2437 
2438 	hlen = __ertm_hdr_size(chan);
2439 
2440 	if (sdulen)
2441 		hlen += L2CAP_SDULEN_SIZE;
2442 
2443 	if (chan->fcs == L2CAP_FCS_CRC16)
2444 		hlen += L2CAP_FCS_SIZE;
2445 
2446 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2447 
2448 	skb = chan->ops->alloc_skb(chan, hlen, count,
2449 				   msg->msg_flags & MSG_DONTWAIT);
2450 	if (IS_ERR(skb))
2451 		return skb;
2452 
2453 	/* Create L2CAP header */
2454 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2455 	lh->cid = cpu_to_le16(chan->dcid);
2456 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2457 
2458 	/* Control header is populated later */
2459 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2460 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2461 	else
2462 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2463 
2464 	if (sdulen)
2465 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2466 
2467 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2468 	if (unlikely(err < 0)) {
2469 		kfree_skb(skb);
2470 		return ERR_PTR(err);
2471 	}
2472 
2473 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2474 	bt_cb(skb)->l2cap.retries = 0;
2475 	return skb;
2476 }
2477 
2478 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2479 			     struct sk_buff_head *seg_queue,
2480 			     struct msghdr *msg, size_t len)
2481 {
2482 	struct sk_buff *skb;
2483 	u16 sdu_len;
2484 	size_t pdu_len;
2485 	u8 sar;
2486 
2487 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2488 
2489 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2490 	 * so fragmented skbs are not used.  The HCI layer's handling
2491 	 * of fragmented skbs is not compatible with ERTM's queueing.
2492 	 */
2493 
2494 	/* PDU size is derived from the HCI MTU */
2495 	pdu_len = chan->conn->mtu;
2496 
2497 	/* Constrain PDU size for BR/EDR connections */
2498 	if (!chan->hs_hcon)
2499 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2500 
2501 	/* Adjust for largest possible L2CAP overhead. */
2502 	if (chan->fcs)
2503 		pdu_len -= L2CAP_FCS_SIZE;
2504 
2505 	pdu_len -= __ertm_hdr_size(chan);
2506 
2507 	/* Remote device may have requested smaller PDUs */
2508 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2509 
2510 	if (len <= pdu_len) {
2511 		sar = L2CAP_SAR_UNSEGMENTED;
2512 		sdu_len = 0;
2513 		pdu_len = len;
2514 	} else {
2515 		sar = L2CAP_SAR_START;
2516 		sdu_len = len;
2517 	}
2518 
2519 	while (len > 0) {
2520 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2521 
2522 		if (IS_ERR(skb)) {
2523 			__skb_queue_purge(seg_queue);
2524 			return PTR_ERR(skb);
2525 		}
2526 
2527 		bt_cb(skb)->l2cap.sar = sar;
2528 		__skb_queue_tail(seg_queue, skb);
2529 
2530 		len -= pdu_len;
2531 		if (sdu_len)
2532 			sdu_len = 0;
2533 
2534 		if (len <= pdu_len) {
2535 			sar = L2CAP_SAR_END;
2536 			pdu_len = len;
2537 		} else {
2538 			sar = L2CAP_SAR_CONTINUE;
2539 		}
2540 	}
2541 
2542 	return 0;
2543 }
2544 
2545 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2546 						   struct msghdr *msg,
2547 						   size_t len, u16 sdulen)
2548 {
2549 	struct l2cap_conn *conn = chan->conn;
2550 	struct sk_buff *skb;
2551 	int err, count, hlen;
2552 	struct l2cap_hdr *lh;
2553 
2554 	BT_DBG("chan %p len %zu", chan, len);
2555 
2556 	if (!conn)
2557 		return ERR_PTR(-ENOTCONN);
2558 
2559 	hlen = L2CAP_HDR_SIZE;
2560 
2561 	if (sdulen)
2562 		hlen += L2CAP_SDULEN_SIZE;
2563 
2564 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2565 
2566 	skb = chan->ops->alloc_skb(chan, hlen, count,
2567 				   msg->msg_flags & MSG_DONTWAIT);
2568 	if (IS_ERR(skb))
2569 		return skb;
2570 
2571 	/* Create L2CAP header */
2572 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2573 	lh->cid = cpu_to_le16(chan->dcid);
2574 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2575 
2576 	if (sdulen)
2577 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2578 
2579 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2580 	if (unlikely(err < 0)) {
2581 		kfree_skb(skb);
2582 		return ERR_PTR(err);
2583 	}
2584 
2585 	return skb;
2586 }
2587 
2588 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2589 				struct sk_buff_head *seg_queue,
2590 				struct msghdr *msg, size_t len)
2591 {
2592 	struct sk_buff *skb;
2593 	size_t pdu_len;
2594 	u16 sdu_len;
2595 
2596 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2597 
2598 	sdu_len = len;
2599 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2600 
2601 	while (len > 0) {
2602 		if (len <= pdu_len)
2603 			pdu_len = len;
2604 
2605 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2606 		if (IS_ERR(skb)) {
2607 			__skb_queue_purge(seg_queue);
2608 			return PTR_ERR(skb);
2609 		}
2610 
2611 		__skb_queue_tail(seg_queue, skb);
2612 
2613 		len -= pdu_len;
2614 
2615 		if (sdu_len) {
2616 			sdu_len = 0;
2617 			pdu_len += L2CAP_SDULEN_SIZE;
2618 		}
2619 	}
2620 
2621 	return 0;
2622 }
2623 
2624 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2625 {
2626 	int sent = 0;
2627 
2628 	BT_DBG("chan %p", chan);
2629 
2630 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2631 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2632 		chan->tx_credits--;
2633 		sent++;
2634 	}
2635 
2636 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2637 	       skb_queue_len(&chan->tx_q));
2638 }
2639 
2640 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2641 {
2642 	struct sk_buff *skb;
2643 	int err;
2644 	struct sk_buff_head seg_queue;
2645 
2646 	if (!chan->conn)
2647 		return -ENOTCONN;
2648 
2649 	/* Connectionless channel */
2650 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2651 		skb = l2cap_create_connless_pdu(chan, msg, len);
2652 		if (IS_ERR(skb))
2653 			return PTR_ERR(skb);
2654 
2655 		/* Channel lock is released before requesting new skb and then
2656 		 * reacquired thus we need to recheck channel state.
2657 		 */
2658 		if (chan->state != BT_CONNECTED) {
2659 			kfree_skb(skb);
2660 			return -ENOTCONN;
2661 		}
2662 
2663 		l2cap_do_send(chan, skb);
2664 		return len;
2665 	}
2666 
2667 	switch (chan->mode) {
2668 	case L2CAP_MODE_LE_FLOWCTL:
2669 	case L2CAP_MODE_EXT_FLOWCTL:
2670 		/* Check outgoing MTU */
2671 		if (len > chan->omtu)
2672 			return -EMSGSIZE;
2673 
2674 		__skb_queue_head_init(&seg_queue);
2675 
2676 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2677 
2678 		if (chan->state != BT_CONNECTED) {
2679 			__skb_queue_purge(&seg_queue);
2680 			err = -ENOTCONN;
2681 		}
2682 
2683 		if (err)
2684 			return err;
2685 
2686 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2687 
2688 		l2cap_le_flowctl_send(chan);
2689 
2690 		if (!chan->tx_credits)
2691 			chan->ops->suspend(chan);
2692 
2693 		err = len;
2694 
2695 		break;
2696 
2697 	case L2CAP_MODE_BASIC:
2698 		/* Check outgoing MTU */
2699 		if (len > chan->omtu)
2700 			return -EMSGSIZE;
2701 
2702 		/* Create a basic PDU */
2703 		skb = l2cap_create_basic_pdu(chan, msg, len);
2704 		if (IS_ERR(skb))
2705 			return PTR_ERR(skb);
2706 
2707 		/* Channel lock is released before requesting new skb and then
2708 		 * reacquired thus we need to recheck channel state.
2709 		 */
2710 		if (chan->state != BT_CONNECTED) {
2711 			kfree_skb(skb);
2712 			return -ENOTCONN;
2713 		}
2714 
2715 		l2cap_do_send(chan, skb);
2716 		err = len;
2717 		break;
2718 
2719 	case L2CAP_MODE_ERTM:
2720 	case L2CAP_MODE_STREAMING:
2721 		/* Check outgoing MTU */
2722 		if (len > chan->omtu) {
2723 			err = -EMSGSIZE;
2724 			break;
2725 		}
2726 
2727 		__skb_queue_head_init(&seg_queue);
2728 
2729 		/* Do segmentation before calling in to the state machine,
2730 		 * since it's possible to block while waiting for memory
2731 		 * allocation.
2732 		 */
2733 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2734 
2735 		/* The channel could have been closed while segmenting,
2736 		 * check that it is still connected.
2737 		 */
2738 		if (chan->state != BT_CONNECTED) {
2739 			__skb_queue_purge(&seg_queue);
2740 			err = -ENOTCONN;
2741 		}
2742 
2743 		if (err)
2744 			break;
2745 
2746 		if (chan->mode == L2CAP_MODE_ERTM)
2747 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2748 		else
2749 			l2cap_streaming_send(chan, &seg_queue);
2750 
2751 		err = len;
2752 
2753 		/* If the skbs were not queued for sending, they'll still be in
2754 		 * seg_queue and need to be purged.
2755 		 */
2756 		__skb_queue_purge(&seg_queue);
2757 		break;
2758 
2759 	default:
2760 		BT_DBG("bad state %1.1x", chan->mode);
2761 		err = -EBADFD;
2762 	}
2763 
2764 	return err;
2765 }
2766 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2767 
2768 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2769 {
2770 	struct l2cap_ctrl control;
2771 	u16 seq;
2772 
2773 	BT_DBG("chan %p, txseq %u", chan, txseq);
2774 
2775 	memset(&control, 0, sizeof(control));
2776 	control.sframe = 1;
2777 	control.super = L2CAP_SUPER_SREJ;
2778 
2779 	for (seq = chan->expected_tx_seq; seq != txseq;
2780 	     seq = __next_seq(chan, seq)) {
2781 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2782 			control.reqseq = seq;
2783 			l2cap_send_sframe(chan, &control);
2784 			l2cap_seq_list_append(&chan->srej_list, seq);
2785 		}
2786 	}
2787 
2788 	chan->expected_tx_seq = __next_seq(chan, txseq);
2789 }
2790 
2791 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2792 {
2793 	struct l2cap_ctrl control;
2794 
2795 	BT_DBG("chan %p", chan);
2796 
2797 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2798 		return;
2799 
2800 	memset(&control, 0, sizeof(control));
2801 	control.sframe = 1;
2802 	control.super = L2CAP_SUPER_SREJ;
2803 	control.reqseq = chan->srej_list.tail;
2804 	l2cap_send_sframe(chan, &control);
2805 }
2806 
2807 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2808 {
2809 	struct l2cap_ctrl control;
2810 	u16 initial_head;
2811 	u16 seq;
2812 
2813 	BT_DBG("chan %p, txseq %u", chan, txseq);
2814 
2815 	memset(&control, 0, sizeof(control));
2816 	control.sframe = 1;
2817 	control.super = L2CAP_SUPER_SREJ;
2818 
2819 	/* Capture initial list head to allow only one pass through the list. */
2820 	initial_head = chan->srej_list.head;
2821 
2822 	do {
2823 		seq = l2cap_seq_list_pop(&chan->srej_list);
2824 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2825 			break;
2826 
2827 		control.reqseq = seq;
2828 		l2cap_send_sframe(chan, &control);
2829 		l2cap_seq_list_append(&chan->srej_list, seq);
2830 	} while (chan->srej_list.head != initial_head);
2831 }
2832 
2833 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2834 {
2835 	struct sk_buff *acked_skb;
2836 	u16 ackseq;
2837 
2838 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2839 
2840 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2841 		return;
2842 
2843 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2844 	       chan->expected_ack_seq, chan->unacked_frames);
2845 
2846 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2847 	     ackseq = __next_seq(chan, ackseq)) {
2848 
2849 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2850 		if (acked_skb) {
2851 			skb_unlink(acked_skb, &chan->tx_q);
2852 			kfree_skb(acked_skb);
2853 			chan->unacked_frames--;
2854 		}
2855 	}
2856 
2857 	chan->expected_ack_seq = reqseq;
2858 
2859 	if (chan->unacked_frames == 0)
2860 		__clear_retrans_timer(chan);
2861 
2862 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2863 }
2864 
2865 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2866 {
2867 	BT_DBG("chan %p", chan);
2868 
2869 	chan->expected_tx_seq = chan->buffer_seq;
2870 	l2cap_seq_list_clear(&chan->srej_list);
2871 	skb_queue_purge(&chan->srej_q);
2872 	chan->rx_state = L2CAP_RX_STATE_RECV;
2873 }
2874 
2875 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2876 				struct l2cap_ctrl *control,
2877 				struct sk_buff_head *skbs, u8 event)
2878 {
2879 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2880 	       event);
2881 
2882 	switch (event) {
2883 	case L2CAP_EV_DATA_REQUEST:
2884 		if (chan->tx_send_head == NULL)
2885 			chan->tx_send_head = skb_peek(skbs);
2886 
2887 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2888 		l2cap_ertm_send(chan);
2889 		break;
2890 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2891 		BT_DBG("Enter LOCAL_BUSY");
2892 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2893 
2894 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2895 			/* The SREJ_SENT state must be aborted if we are to
2896 			 * enter the LOCAL_BUSY state.
2897 			 */
2898 			l2cap_abort_rx_srej_sent(chan);
2899 		}
2900 
2901 		l2cap_send_ack(chan);
2902 
2903 		break;
2904 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2905 		BT_DBG("Exit LOCAL_BUSY");
2906 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2907 
2908 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2909 			struct l2cap_ctrl local_control;
2910 
2911 			memset(&local_control, 0, sizeof(local_control));
2912 			local_control.sframe = 1;
2913 			local_control.super = L2CAP_SUPER_RR;
2914 			local_control.poll = 1;
2915 			local_control.reqseq = chan->buffer_seq;
2916 			l2cap_send_sframe(chan, &local_control);
2917 
2918 			chan->retry_count = 1;
2919 			__set_monitor_timer(chan);
2920 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2921 		}
2922 		break;
2923 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2924 		l2cap_process_reqseq(chan, control->reqseq);
2925 		break;
2926 	case L2CAP_EV_EXPLICIT_POLL:
2927 		l2cap_send_rr_or_rnr(chan, 1);
2928 		chan->retry_count = 1;
2929 		__set_monitor_timer(chan);
2930 		__clear_ack_timer(chan);
2931 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2932 		break;
2933 	case L2CAP_EV_RETRANS_TO:
2934 		l2cap_send_rr_or_rnr(chan, 1);
2935 		chan->retry_count = 1;
2936 		__set_monitor_timer(chan);
2937 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2938 		break;
2939 	case L2CAP_EV_RECV_FBIT:
2940 		/* Nothing to process */
2941 		break;
2942 	default:
2943 		break;
2944 	}
2945 }
2946 
2947 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2948 				  struct l2cap_ctrl *control,
2949 				  struct sk_buff_head *skbs, u8 event)
2950 {
2951 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2952 	       event);
2953 
2954 	switch (event) {
2955 	case L2CAP_EV_DATA_REQUEST:
2956 		if (chan->tx_send_head == NULL)
2957 			chan->tx_send_head = skb_peek(skbs);
2958 		/* Queue data, but don't send. */
2959 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2960 		break;
2961 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2962 		BT_DBG("Enter LOCAL_BUSY");
2963 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2964 
2965 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2966 			/* The SREJ_SENT state must be aborted if we are to
2967 			 * enter the LOCAL_BUSY state.
2968 			 */
2969 			l2cap_abort_rx_srej_sent(chan);
2970 		}
2971 
2972 		l2cap_send_ack(chan);
2973 
2974 		break;
2975 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2976 		BT_DBG("Exit LOCAL_BUSY");
2977 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2978 
2979 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2980 			struct l2cap_ctrl local_control;
2981 			memset(&local_control, 0, sizeof(local_control));
2982 			local_control.sframe = 1;
2983 			local_control.super = L2CAP_SUPER_RR;
2984 			local_control.poll = 1;
2985 			local_control.reqseq = chan->buffer_seq;
2986 			l2cap_send_sframe(chan, &local_control);
2987 
2988 			chan->retry_count = 1;
2989 			__set_monitor_timer(chan);
2990 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2991 		}
2992 		break;
2993 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2994 		l2cap_process_reqseq(chan, control->reqseq);
2995 		fallthrough;
2996 
2997 	case L2CAP_EV_RECV_FBIT:
2998 		if (control && control->final) {
2999 			__clear_monitor_timer(chan);
3000 			if (chan->unacked_frames > 0)
3001 				__set_retrans_timer(chan);
3002 			chan->retry_count = 0;
3003 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3004 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3005 		}
3006 		break;
3007 	case L2CAP_EV_EXPLICIT_POLL:
3008 		/* Ignore */
3009 		break;
3010 	case L2CAP_EV_MONITOR_TO:
3011 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3012 			l2cap_send_rr_or_rnr(chan, 1);
3013 			__set_monitor_timer(chan);
3014 			chan->retry_count++;
3015 		} else {
3016 			l2cap_send_disconn_req(chan, ECONNABORTED);
3017 		}
3018 		break;
3019 	default:
3020 		break;
3021 	}
3022 }
3023 
3024 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3025 		     struct sk_buff_head *skbs, u8 event)
3026 {
3027 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3028 	       chan, control, skbs, event, chan->tx_state);
3029 
3030 	switch (chan->tx_state) {
3031 	case L2CAP_TX_STATE_XMIT:
3032 		l2cap_tx_state_xmit(chan, control, skbs, event);
3033 		break;
3034 	case L2CAP_TX_STATE_WAIT_F:
3035 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3036 		break;
3037 	default:
3038 		/* Ignore event */
3039 		break;
3040 	}
3041 }
3042 
3043 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3044 			     struct l2cap_ctrl *control)
3045 {
3046 	BT_DBG("chan %p, control %p", chan, control);
3047 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3048 }
3049 
3050 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3051 				  struct l2cap_ctrl *control)
3052 {
3053 	BT_DBG("chan %p, control %p", chan, control);
3054 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3055 }
3056 
3057 /* Copy frame to all raw sockets on that connection */
3058 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3059 {
3060 	struct sk_buff *nskb;
3061 	struct l2cap_chan *chan;
3062 
3063 	BT_DBG("conn %p", conn);
3064 
3065 	mutex_lock(&conn->chan_lock);
3066 
3067 	list_for_each_entry(chan, &conn->chan_l, list) {
3068 		if (chan->chan_type != L2CAP_CHAN_RAW)
3069 			continue;
3070 
3071 		/* Don't send frame to the channel it came from */
3072 		if (bt_cb(skb)->l2cap.chan == chan)
3073 			continue;
3074 
3075 		nskb = skb_clone(skb, GFP_KERNEL);
3076 		if (!nskb)
3077 			continue;
3078 		if (chan->ops->recv(chan, nskb))
3079 			kfree_skb(nskb);
3080 	}
3081 
3082 	mutex_unlock(&conn->chan_lock);
3083 }
3084 
3085 /* ---- L2CAP signalling commands ---- */
3086 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3087 				       u8 ident, u16 dlen, void *data)
3088 {
3089 	struct sk_buff *skb, **frag;
3090 	struct l2cap_cmd_hdr *cmd;
3091 	struct l2cap_hdr *lh;
3092 	int len, count;
3093 
3094 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3095 	       conn, code, ident, dlen);
3096 
3097 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3098 		return NULL;
3099 
3100 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3101 	count = min_t(unsigned int, conn->mtu, len);
3102 
3103 	skb = bt_skb_alloc(count, GFP_KERNEL);
3104 	if (!skb)
3105 		return NULL;
3106 
3107 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3108 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3109 
3110 	if (conn->hcon->type == LE_LINK)
3111 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3112 	else
3113 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3114 
3115 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3116 	cmd->code  = code;
3117 	cmd->ident = ident;
3118 	cmd->len   = cpu_to_le16(dlen);
3119 
3120 	if (dlen) {
3121 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3122 		skb_put_data(skb, data, count);
3123 		data += count;
3124 	}
3125 
3126 	len -= skb->len;
3127 
3128 	/* Continuation fragments (no L2CAP header) */
3129 	frag = &skb_shinfo(skb)->frag_list;
3130 	while (len) {
3131 		count = min_t(unsigned int, conn->mtu, len);
3132 
3133 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3134 		if (!*frag)
3135 			goto fail;
3136 
3137 		skb_put_data(*frag, data, count);
3138 
3139 		len  -= count;
3140 		data += count;
3141 
3142 		frag = &(*frag)->next;
3143 	}
3144 
3145 	return skb;
3146 
3147 fail:
3148 	kfree_skb(skb);
3149 	return NULL;
3150 }
3151 
3152 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3153 				     unsigned long *val)
3154 {
3155 	struct l2cap_conf_opt *opt = *ptr;
3156 	int len;
3157 
3158 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3159 	*ptr += len;
3160 
3161 	*type = opt->type;
3162 	*olen = opt->len;
3163 
3164 	switch (opt->len) {
3165 	case 1:
3166 		*val = *((u8 *) opt->val);
3167 		break;
3168 
3169 	case 2:
3170 		*val = get_unaligned_le16(opt->val);
3171 		break;
3172 
3173 	case 4:
3174 		*val = get_unaligned_le32(opt->val);
3175 		break;
3176 
3177 	default:
3178 		*val = (unsigned long) opt->val;
3179 		break;
3180 	}
3181 
3182 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3183 	return len;
3184 }
3185 
3186 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3187 {
3188 	struct l2cap_conf_opt *opt = *ptr;
3189 
3190 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3191 
3192 	if (size < L2CAP_CONF_OPT_SIZE + len)
3193 		return;
3194 
3195 	opt->type = type;
3196 	opt->len  = len;
3197 
3198 	switch (len) {
3199 	case 1:
3200 		*((u8 *) opt->val)  = val;
3201 		break;
3202 
3203 	case 2:
3204 		put_unaligned_le16(val, opt->val);
3205 		break;
3206 
3207 	case 4:
3208 		put_unaligned_le32(val, opt->val);
3209 		break;
3210 
3211 	default:
3212 		memcpy(opt->val, (void *) val, len);
3213 		break;
3214 	}
3215 
3216 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3217 }
3218 
3219 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3220 {
3221 	struct l2cap_conf_efs efs;
3222 
3223 	switch (chan->mode) {
3224 	case L2CAP_MODE_ERTM:
3225 		efs.id		= chan->local_id;
3226 		efs.stype	= chan->local_stype;
3227 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3228 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3229 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3230 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3231 		break;
3232 
3233 	case L2CAP_MODE_STREAMING:
3234 		efs.id		= 1;
3235 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3236 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3237 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3238 		efs.acc_lat	= 0;
3239 		efs.flush_to	= 0;
3240 		break;
3241 
3242 	default:
3243 		return;
3244 	}
3245 
3246 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3247 			   (unsigned long) &efs, size);
3248 }
3249 
3250 static void l2cap_ack_timeout(struct work_struct *work)
3251 {
3252 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3253 					       ack_timer.work);
3254 	u16 frames_to_ack;
3255 
3256 	BT_DBG("chan %p", chan);
3257 
3258 	l2cap_chan_lock(chan);
3259 
3260 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3261 				     chan->last_acked_seq);
3262 
3263 	if (frames_to_ack)
3264 		l2cap_send_rr_or_rnr(chan, 0);
3265 
3266 	l2cap_chan_unlock(chan);
3267 	l2cap_chan_put(chan);
3268 }
3269 
3270 int l2cap_ertm_init(struct l2cap_chan *chan)
3271 {
3272 	int err;
3273 
3274 	chan->next_tx_seq = 0;
3275 	chan->expected_tx_seq = 0;
3276 	chan->expected_ack_seq = 0;
3277 	chan->unacked_frames = 0;
3278 	chan->buffer_seq = 0;
3279 	chan->frames_sent = 0;
3280 	chan->last_acked_seq = 0;
3281 	chan->sdu = NULL;
3282 	chan->sdu_last_frag = NULL;
3283 	chan->sdu_len = 0;
3284 
3285 	skb_queue_head_init(&chan->tx_q);
3286 
3287 	chan->local_amp_id = AMP_ID_BREDR;
3288 	chan->move_id = AMP_ID_BREDR;
3289 	chan->move_state = L2CAP_MOVE_STABLE;
3290 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3291 
3292 	if (chan->mode != L2CAP_MODE_ERTM)
3293 		return 0;
3294 
3295 	chan->rx_state = L2CAP_RX_STATE_RECV;
3296 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3297 
3298 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3299 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3300 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3301 
3302 	skb_queue_head_init(&chan->srej_q);
3303 
3304 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3305 	if (err < 0)
3306 		return err;
3307 
3308 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3309 	if (err < 0)
3310 		l2cap_seq_list_free(&chan->srej_list);
3311 
3312 	return err;
3313 }
3314 
3315 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3316 {
3317 	switch (mode) {
3318 	case L2CAP_MODE_STREAMING:
3319 	case L2CAP_MODE_ERTM:
3320 		if (l2cap_mode_supported(mode, remote_feat_mask))
3321 			return mode;
3322 		fallthrough;
3323 	default:
3324 		return L2CAP_MODE_BASIC;
3325 	}
3326 }
3327 
3328 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3329 {
3330 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3331 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3332 }
3333 
3334 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3335 {
3336 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3337 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3338 }
3339 
3340 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3341 				      struct l2cap_conf_rfc *rfc)
3342 {
3343 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3344 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3345 
3346 		/* Class 1 devices have must have ERTM timeouts
3347 		 * exceeding the Link Supervision Timeout.  The
3348 		 * default Link Supervision Timeout for AMP
3349 		 * controllers is 10 seconds.
3350 		 *
3351 		 * Class 1 devices use 0xffffffff for their
3352 		 * best-effort flush timeout, so the clamping logic
3353 		 * will result in a timeout that meets the above
3354 		 * requirement.  ERTM timeouts are 16-bit values, so
3355 		 * the maximum timeout is 65.535 seconds.
3356 		 */
3357 
3358 		/* Convert timeout to milliseconds and round */
3359 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3360 
3361 		/* This is the recommended formula for class 2 devices
3362 		 * that start ERTM timers when packets are sent to the
3363 		 * controller.
3364 		 */
3365 		ertm_to = 3 * ertm_to + 500;
3366 
3367 		if (ertm_to > 0xffff)
3368 			ertm_to = 0xffff;
3369 
3370 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3371 		rfc->monitor_timeout = rfc->retrans_timeout;
3372 	} else {
3373 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3374 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3375 	}
3376 }
3377 
3378 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3379 {
3380 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3381 	    __l2cap_ews_supported(chan->conn)) {
3382 		/* use extended control field */
3383 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3384 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3385 	} else {
3386 		chan->tx_win = min_t(u16, chan->tx_win,
3387 				     L2CAP_DEFAULT_TX_WINDOW);
3388 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3389 	}
3390 	chan->ack_win = chan->tx_win;
3391 }
3392 
3393 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3394 {
3395 	struct hci_conn *conn = chan->conn->hcon;
3396 
3397 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3398 
3399 	/* The 2-DH1 packet has between 2 and 56 information bytes
3400 	 * (including the 2-byte payload header)
3401 	 */
3402 	if (!(conn->pkt_type & HCI_2DH1))
3403 		chan->imtu = 54;
3404 
3405 	/* The 3-DH1 packet has between 2 and 85 information bytes
3406 	 * (including the 2-byte payload header)
3407 	 */
3408 	if (!(conn->pkt_type & HCI_3DH1))
3409 		chan->imtu = 83;
3410 
3411 	/* The 2-DH3 packet has between 2 and 369 information bytes
3412 	 * (including the 2-byte payload header)
3413 	 */
3414 	if (!(conn->pkt_type & HCI_2DH3))
3415 		chan->imtu = 367;
3416 
3417 	/* The 3-DH3 packet has between 2 and 554 information bytes
3418 	 * (including the 2-byte payload header)
3419 	 */
3420 	if (!(conn->pkt_type & HCI_3DH3))
3421 		chan->imtu = 552;
3422 
3423 	/* The 2-DH5 packet has between 2 and 681 information bytes
3424 	 * (including the 2-byte payload header)
3425 	 */
3426 	if (!(conn->pkt_type & HCI_2DH5))
3427 		chan->imtu = 679;
3428 
3429 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3430 	 * (including the 2-byte payload header)
3431 	 */
3432 	if (!(conn->pkt_type & HCI_3DH5))
3433 		chan->imtu = 1021;
3434 }
3435 
3436 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3437 {
3438 	struct l2cap_conf_req *req = data;
3439 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3440 	void *ptr = req->data;
3441 	void *endptr = data + data_size;
3442 	u16 size;
3443 
3444 	BT_DBG("chan %p", chan);
3445 
3446 	if (chan->num_conf_req || chan->num_conf_rsp)
3447 		goto done;
3448 
3449 	switch (chan->mode) {
3450 	case L2CAP_MODE_STREAMING:
3451 	case L2CAP_MODE_ERTM:
3452 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3453 			break;
3454 
3455 		if (__l2cap_efs_supported(chan->conn))
3456 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3457 
3458 		fallthrough;
3459 	default:
3460 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3461 		break;
3462 	}
3463 
3464 done:
3465 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3466 		if (!chan->imtu)
3467 			l2cap_mtu_auto(chan);
3468 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3469 				   endptr - ptr);
3470 	}
3471 
3472 	switch (chan->mode) {
3473 	case L2CAP_MODE_BASIC:
3474 		if (disable_ertm)
3475 			break;
3476 
3477 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3478 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3479 			break;
3480 
3481 		rfc.mode            = L2CAP_MODE_BASIC;
3482 		rfc.txwin_size      = 0;
3483 		rfc.max_transmit    = 0;
3484 		rfc.retrans_timeout = 0;
3485 		rfc.monitor_timeout = 0;
3486 		rfc.max_pdu_size    = 0;
3487 
3488 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3489 				   (unsigned long) &rfc, endptr - ptr);
3490 		break;
3491 
3492 	case L2CAP_MODE_ERTM:
3493 		rfc.mode            = L2CAP_MODE_ERTM;
3494 		rfc.max_transmit    = chan->max_tx;
3495 
3496 		__l2cap_set_ertm_timeouts(chan, &rfc);
3497 
3498 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3499 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3500 			     L2CAP_FCS_SIZE);
3501 		rfc.max_pdu_size = cpu_to_le16(size);
3502 
3503 		l2cap_txwin_setup(chan);
3504 
3505 		rfc.txwin_size = min_t(u16, chan->tx_win,
3506 				       L2CAP_DEFAULT_TX_WINDOW);
3507 
3508 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3509 				   (unsigned long) &rfc, endptr - ptr);
3510 
3511 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3512 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3513 
3514 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3515 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3516 					   chan->tx_win, endptr - ptr);
3517 
3518 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3519 			if (chan->fcs == L2CAP_FCS_NONE ||
3520 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3521 				chan->fcs = L2CAP_FCS_NONE;
3522 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3523 						   chan->fcs, endptr - ptr);
3524 			}
3525 		break;
3526 
3527 	case L2CAP_MODE_STREAMING:
3528 		l2cap_txwin_setup(chan);
3529 		rfc.mode            = L2CAP_MODE_STREAMING;
3530 		rfc.txwin_size      = 0;
3531 		rfc.max_transmit    = 0;
3532 		rfc.retrans_timeout = 0;
3533 		rfc.monitor_timeout = 0;
3534 
3535 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3536 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3537 			     L2CAP_FCS_SIZE);
3538 		rfc.max_pdu_size = cpu_to_le16(size);
3539 
3540 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3541 				   (unsigned long) &rfc, endptr - ptr);
3542 
3543 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3544 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3545 
3546 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3547 			if (chan->fcs == L2CAP_FCS_NONE ||
3548 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3549 				chan->fcs = L2CAP_FCS_NONE;
3550 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3551 						   chan->fcs, endptr - ptr);
3552 			}
3553 		break;
3554 	}
3555 
3556 	req->dcid  = cpu_to_le16(chan->dcid);
3557 	req->flags = cpu_to_le16(0);
3558 
3559 	return ptr - data;
3560 }
3561 
3562 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3563 {
3564 	struct l2cap_conf_rsp *rsp = data;
3565 	void *ptr = rsp->data;
3566 	void *endptr = data + data_size;
3567 	void *req = chan->conf_req;
3568 	int len = chan->conf_len;
3569 	int type, hint, olen;
3570 	unsigned long val;
3571 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3572 	struct l2cap_conf_efs efs;
3573 	u8 remote_efs = 0;
3574 	u16 mtu = L2CAP_DEFAULT_MTU;
3575 	u16 result = L2CAP_CONF_SUCCESS;
3576 	u16 size;
3577 
3578 	BT_DBG("chan %p", chan);
3579 
3580 	while (len >= L2CAP_CONF_OPT_SIZE) {
3581 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3582 		if (len < 0)
3583 			break;
3584 
3585 		hint  = type & L2CAP_CONF_HINT;
3586 		type &= L2CAP_CONF_MASK;
3587 
3588 		switch (type) {
3589 		case L2CAP_CONF_MTU:
3590 			if (olen != 2)
3591 				break;
3592 			mtu = val;
3593 			break;
3594 
3595 		case L2CAP_CONF_FLUSH_TO:
3596 			if (olen != 2)
3597 				break;
3598 			chan->flush_to = val;
3599 			break;
3600 
3601 		case L2CAP_CONF_QOS:
3602 			break;
3603 
3604 		case L2CAP_CONF_RFC:
3605 			if (olen != sizeof(rfc))
3606 				break;
3607 			memcpy(&rfc, (void *) val, olen);
3608 			break;
3609 
3610 		case L2CAP_CONF_FCS:
3611 			if (olen != 1)
3612 				break;
3613 			if (val == L2CAP_FCS_NONE)
3614 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3615 			break;
3616 
3617 		case L2CAP_CONF_EFS:
3618 			if (olen != sizeof(efs))
3619 				break;
3620 			remote_efs = 1;
3621 			memcpy(&efs, (void *) val, olen);
3622 			break;
3623 
3624 		case L2CAP_CONF_EWS:
3625 			if (olen != 2)
3626 				break;
3627 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3628 				return -ECONNREFUSED;
3629 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3630 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3631 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3632 			chan->remote_tx_win = val;
3633 			break;
3634 
3635 		default:
3636 			if (hint)
3637 				break;
3638 			result = L2CAP_CONF_UNKNOWN;
3639 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3640 			break;
3641 		}
3642 	}
3643 
3644 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3645 		goto done;
3646 
3647 	switch (chan->mode) {
3648 	case L2CAP_MODE_STREAMING:
3649 	case L2CAP_MODE_ERTM:
3650 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3651 			chan->mode = l2cap_select_mode(rfc.mode,
3652 						       chan->conn->feat_mask);
3653 			break;
3654 		}
3655 
3656 		if (remote_efs) {
3657 			if (__l2cap_efs_supported(chan->conn))
3658 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3659 			else
3660 				return -ECONNREFUSED;
3661 		}
3662 
3663 		if (chan->mode != rfc.mode)
3664 			return -ECONNREFUSED;
3665 
3666 		break;
3667 	}
3668 
3669 done:
3670 	if (chan->mode != rfc.mode) {
3671 		result = L2CAP_CONF_UNACCEPT;
3672 		rfc.mode = chan->mode;
3673 
3674 		if (chan->num_conf_rsp == 1)
3675 			return -ECONNREFUSED;
3676 
3677 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3678 				   (unsigned long) &rfc, endptr - ptr);
3679 	}
3680 
3681 	if (result == L2CAP_CONF_SUCCESS) {
3682 		/* Configure output options and let the other side know
3683 		 * which ones we don't like. */
3684 
3685 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3686 			result = L2CAP_CONF_UNACCEPT;
3687 		else {
3688 			chan->omtu = mtu;
3689 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3690 		}
3691 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3692 
3693 		if (remote_efs) {
3694 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3695 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3696 			    efs.stype != chan->local_stype) {
3697 
3698 				result = L2CAP_CONF_UNACCEPT;
3699 
3700 				if (chan->num_conf_req >= 1)
3701 					return -ECONNREFUSED;
3702 
3703 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3704 						   sizeof(efs),
3705 						   (unsigned long) &efs, endptr - ptr);
3706 			} else {
3707 				/* Send PENDING Conf Rsp */
3708 				result = L2CAP_CONF_PENDING;
3709 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3710 			}
3711 		}
3712 
3713 		switch (rfc.mode) {
3714 		case L2CAP_MODE_BASIC:
3715 			chan->fcs = L2CAP_FCS_NONE;
3716 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3717 			break;
3718 
3719 		case L2CAP_MODE_ERTM:
3720 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3721 				chan->remote_tx_win = rfc.txwin_size;
3722 			else
3723 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3724 
3725 			chan->remote_max_tx = rfc.max_transmit;
3726 
3727 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3728 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3729 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3730 			rfc.max_pdu_size = cpu_to_le16(size);
3731 			chan->remote_mps = size;
3732 
3733 			__l2cap_set_ertm_timeouts(chan, &rfc);
3734 
3735 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3736 
3737 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3738 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3739 
3740 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3741 				chan->remote_id = efs.id;
3742 				chan->remote_stype = efs.stype;
3743 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3744 				chan->remote_flush_to =
3745 					le32_to_cpu(efs.flush_to);
3746 				chan->remote_acc_lat =
3747 					le32_to_cpu(efs.acc_lat);
3748 				chan->remote_sdu_itime =
3749 					le32_to_cpu(efs.sdu_itime);
3750 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3751 						   sizeof(efs),
3752 						   (unsigned long) &efs, endptr - ptr);
3753 			}
3754 			break;
3755 
3756 		case L2CAP_MODE_STREAMING:
3757 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3758 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3759 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3760 			rfc.max_pdu_size = cpu_to_le16(size);
3761 			chan->remote_mps = size;
3762 
3763 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3764 
3765 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3766 					   (unsigned long) &rfc, endptr - ptr);
3767 
3768 			break;
3769 
3770 		default:
3771 			result = L2CAP_CONF_UNACCEPT;
3772 
3773 			memset(&rfc, 0, sizeof(rfc));
3774 			rfc.mode = chan->mode;
3775 		}
3776 
3777 		if (result == L2CAP_CONF_SUCCESS)
3778 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3779 	}
3780 	rsp->scid   = cpu_to_le16(chan->dcid);
3781 	rsp->result = cpu_to_le16(result);
3782 	rsp->flags  = cpu_to_le16(0);
3783 
3784 	return ptr - data;
3785 }
3786 
3787 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3788 				void *data, size_t size, u16 *result)
3789 {
3790 	struct l2cap_conf_req *req = data;
3791 	void *ptr = req->data;
3792 	void *endptr = data + size;
3793 	int type, olen;
3794 	unsigned long val;
3795 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3796 	struct l2cap_conf_efs efs;
3797 
3798 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3799 
3800 	while (len >= L2CAP_CONF_OPT_SIZE) {
3801 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3802 		if (len < 0)
3803 			break;
3804 
3805 		switch (type) {
3806 		case L2CAP_CONF_MTU:
3807 			if (olen != 2)
3808 				break;
3809 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3810 				*result = L2CAP_CONF_UNACCEPT;
3811 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3812 			} else
3813 				chan->imtu = val;
3814 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3815 					   endptr - ptr);
3816 			break;
3817 
3818 		case L2CAP_CONF_FLUSH_TO:
3819 			if (olen != 2)
3820 				break;
3821 			chan->flush_to = val;
3822 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3823 					   chan->flush_to, endptr - ptr);
3824 			break;
3825 
3826 		case L2CAP_CONF_RFC:
3827 			if (olen != sizeof(rfc))
3828 				break;
3829 			memcpy(&rfc, (void *)val, olen);
3830 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3831 			    rfc.mode != chan->mode)
3832 				return -ECONNREFUSED;
3833 			chan->fcs = 0;
3834 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3835 					   (unsigned long) &rfc, endptr - ptr);
3836 			break;
3837 
3838 		case L2CAP_CONF_EWS:
3839 			if (olen != 2)
3840 				break;
3841 			chan->ack_win = min_t(u16, val, chan->ack_win);
3842 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3843 					   chan->tx_win, endptr - ptr);
3844 			break;
3845 
3846 		case L2CAP_CONF_EFS:
3847 			if (olen != sizeof(efs))
3848 				break;
3849 			memcpy(&efs, (void *)val, olen);
3850 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3851 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3852 			    efs.stype != chan->local_stype)
3853 				return -ECONNREFUSED;
3854 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3855 					   (unsigned long) &efs, endptr - ptr);
3856 			break;
3857 
3858 		case L2CAP_CONF_FCS:
3859 			if (olen != 1)
3860 				break;
3861 			if (*result == L2CAP_CONF_PENDING)
3862 				if (val == L2CAP_FCS_NONE)
3863 					set_bit(CONF_RECV_NO_FCS,
3864 						&chan->conf_state);
3865 			break;
3866 		}
3867 	}
3868 
3869 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3870 		return -ECONNREFUSED;
3871 
3872 	chan->mode = rfc.mode;
3873 
3874 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3875 		switch (rfc.mode) {
3876 		case L2CAP_MODE_ERTM:
3877 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3878 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3879 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3880 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3881 				chan->ack_win = min_t(u16, chan->ack_win,
3882 						      rfc.txwin_size);
3883 
3884 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3885 				chan->local_msdu = le16_to_cpu(efs.msdu);
3886 				chan->local_sdu_itime =
3887 					le32_to_cpu(efs.sdu_itime);
3888 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3889 				chan->local_flush_to =
3890 					le32_to_cpu(efs.flush_to);
3891 			}
3892 			break;
3893 
3894 		case L2CAP_MODE_STREAMING:
3895 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3896 		}
3897 	}
3898 
3899 	req->dcid   = cpu_to_le16(chan->dcid);
3900 	req->flags  = cpu_to_le16(0);
3901 
3902 	return ptr - data;
3903 }
3904 
3905 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3906 				u16 result, u16 flags)
3907 {
3908 	struct l2cap_conf_rsp *rsp = data;
3909 	void *ptr = rsp->data;
3910 
3911 	BT_DBG("chan %p", chan);
3912 
3913 	rsp->scid   = cpu_to_le16(chan->dcid);
3914 	rsp->result = cpu_to_le16(result);
3915 	rsp->flags  = cpu_to_le16(flags);
3916 
3917 	return ptr - data;
3918 }
3919 
3920 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3921 {
3922 	struct l2cap_le_conn_rsp rsp;
3923 	struct l2cap_conn *conn = chan->conn;
3924 
3925 	BT_DBG("chan %p", chan);
3926 
3927 	rsp.dcid    = cpu_to_le16(chan->scid);
3928 	rsp.mtu     = cpu_to_le16(chan->imtu);
3929 	rsp.mps     = cpu_to_le16(chan->mps);
3930 	rsp.credits = cpu_to_le16(chan->rx_credits);
3931 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3932 
3933 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3934 		       &rsp);
3935 }
3936 
3937 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3938 {
3939 	struct {
3940 		struct l2cap_ecred_conn_rsp rsp;
3941 		__le16 dcid[5];
3942 	} __packed pdu;
3943 	struct l2cap_conn *conn = chan->conn;
3944 	u16 ident = chan->ident;
3945 	int i = 0;
3946 
3947 	if (!ident)
3948 		return;
3949 
3950 	BT_DBG("chan %p ident %d", chan, ident);
3951 
3952 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3953 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3954 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3955 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3956 
3957 	mutex_lock(&conn->chan_lock);
3958 
3959 	list_for_each_entry(chan, &conn->chan_l, list) {
3960 		if (chan->ident != ident)
3961 			continue;
3962 
3963 		/* Reset ident so only one response is sent */
3964 		chan->ident = 0;
3965 
3966 		/* Include all channels pending with the same ident */
3967 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3968 	}
3969 
3970 	mutex_unlock(&conn->chan_lock);
3971 
3972 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3973 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3974 }
3975 
3976 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3977 {
3978 	struct l2cap_conn_rsp rsp;
3979 	struct l2cap_conn *conn = chan->conn;
3980 	u8 buf[128];
3981 	u8 rsp_code;
3982 
3983 	rsp.scid   = cpu_to_le16(chan->dcid);
3984 	rsp.dcid   = cpu_to_le16(chan->scid);
3985 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3986 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3987 
3988 	if (chan->hs_hcon)
3989 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3990 	else
3991 		rsp_code = L2CAP_CONN_RSP;
3992 
3993 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3994 
3995 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3996 
3997 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3998 		return;
3999 
4000 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4001 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4002 	chan->num_conf_req++;
4003 }
4004 
4005 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4006 {
4007 	int type, olen;
4008 	unsigned long val;
4009 	/* Use sane default values in case a misbehaving remote device
4010 	 * did not send an RFC or extended window size option.
4011 	 */
4012 	u16 txwin_ext = chan->ack_win;
4013 	struct l2cap_conf_rfc rfc = {
4014 		.mode = chan->mode,
4015 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4016 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4017 		.max_pdu_size = cpu_to_le16(chan->imtu),
4018 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4019 	};
4020 
4021 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4022 
4023 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4024 		return;
4025 
4026 	while (len >= L2CAP_CONF_OPT_SIZE) {
4027 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4028 		if (len < 0)
4029 			break;
4030 
4031 		switch (type) {
4032 		case L2CAP_CONF_RFC:
4033 			if (olen != sizeof(rfc))
4034 				break;
4035 			memcpy(&rfc, (void *)val, olen);
4036 			break;
4037 		case L2CAP_CONF_EWS:
4038 			if (olen != 2)
4039 				break;
4040 			txwin_ext = val;
4041 			break;
4042 		}
4043 	}
4044 
4045 	switch (rfc.mode) {
4046 	case L2CAP_MODE_ERTM:
4047 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4048 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4049 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4050 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4051 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4052 		else
4053 			chan->ack_win = min_t(u16, chan->ack_win,
4054 					      rfc.txwin_size);
4055 		break;
4056 	case L2CAP_MODE_STREAMING:
4057 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4058 	}
4059 }
4060 
4061 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4062 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4063 				    u8 *data)
4064 {
4065 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4066 
4067 	if (cmd_len < sizeof(*rej))
4068 		return -EPROTO;
4069 
4070 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4071 		return 0;
4072 
4073 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4074 	    cmd->ident == conn->info_ident) {
4075 		cancel_delayed_work(&conn->info_timer);
4076 
4077 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4078 		conn->info_ident = 0;
4079 
4080 		l2cap_conn_start(conn);
4081 	}
4082 
4083 	return 0;
4084 }
4085 
4086 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4087 					struct l2cap_cmd_hdr *cmd,
4088 					u8 *data, u8 rsp_code, u8 amp_id)
4089 {
4090 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4091 	struct l2cap_conn_rsp rsp;
4092 	struct l2cap_chan *chan = NULL, *pchan;
4093 	int result, status = L2CAP_CS_NO_INFO;
4094 
4095 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4096 	__le16 psm = req->psm;
4097 
4098 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4099 
4100 	/* Check if we have socket listening on psm */
4101 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4102 					 &conn->hcon->dst, ACL_LINK);
4103 	if (!pchan) {
4104 		result = L2CAP_CR_BAD_PSM;
4105 		goto sendresp;
4106 	}
4107 
4108 	mutex_lock(&conn->chan_lock);
4109 	l2cap_chan_lock(pchan);
4110 
4111 	/* Check if the ACL is secure enough (if not SDP) */
4112 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4113 	    !hci_conn_check_link_mode(conn->hcon)) {
4114 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4115 		result = L2CAP_CR_SEC_BLOCK;
4116 		goto response;
4117 	}
4118 
4119 	result = L2CAP_CR_NO_MEM;
4120 
4121 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4122 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4123 		result = L2CAP_CR_INVALID_SCID;
4124 		goto response;
4125 	}
4126 
4127 	/* Check if we already have channel with that dcid */
4128 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4129 		result = L2CAP_CR_SCID_IN_USE;
4130 		goto response;
4131 	}
4132 
4133 	chan = pchan->ops->new_connection(pchan);
4134 	if (!chan)
4135 		goto response;
4136 
4137 	/* For certain devices (ex: HID mouse), support for authentication,
4138 	 * pairing and bonding is optional. For such devices, inorder to avoid
4139 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4140 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4141 	 */
4142 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4143 
4144 	bacpy(&chan->src, &conn->hcon->src);
4145 	bacpy(&chan->dst, &conn->hcon->dst);
4146 	chan->src_type = bdaddr_src_type(conn->hcon);
4147 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4148 	chan->psm  = psm;
4149 	chan->dcid = scid;
4150 	chan->local_amp_id = amp_id;
4151 
4152 	__l2cap_chan_add(conn, chan);
4153 
4154 	dcid = chan->scid;
4155 
4156 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4157 
4158 	chan->ident = cmd->ident;
4159 
4160 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4161 		if (l2cap_chan_check_security(chan, false)) {
4162 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4163 				l2cap_state_change(chan, BT_CONNECT2);
4164 				result = L2CAP_CR_PEND;
4165 				status = L2CAP_CS_AUTHOR_PEND;
4166 				chan->ops->defer(chan);
4167 			} else {
4168 				/* Force pending result for AMP controllers.
4169 				 * The connection will succeed after the
4170 				 * physical link is up.
4171 				 */
4172 				if (amp_id == AMP_ID_BREDR) {
4173 					l2cap_state_change(chan, BT_CONFIG);
4174 					result = L2CAP_CR_SUCCESS;
4175 				} else {
4176 					l2cap_state_change(chan, BT_CONNECT2);
4177 					result = L2CAP_CR_PEND;
4178 				}
4179 				status = L2CAP_CS_NO_INFO;
4180 			}
4181 		} else {
4182 			l2cap_state_change(chan, BT_CONNECT2);
4183 			result = L2CAP_CR_PEND;
4184 			status = L2CAP_CS_AUTHEN_PEND;
4185 		}
4186 	} else {
4187 		l2cap_state_change(chan, BT_CONNECT2);
4188 		result = L2CAP_CR_PEND;
4189 		status = L2CAP_CS_NO_INFO;
4190 	}
4191 
4192 response:
4193 	l2cap_chan_unlock(pchan);
4194 	mutex_unlock(&conn->chan_lock);
4195 	l2cap_chan_put(pchan);
4196 
4197 sendresp:
4198 	rsp.scid   = cpu_to_le16(scid);
4199 	rsp.dcid   = cpu_to_le16(dcid);
4200 	rsp.result = cpu_to_le16(result);
4201 	rsp.status = cpu_to_le16(status);
4202 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4203 
4204 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4205 		struct l2cap_info_req info;
4206 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4207 
4208 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4209 		conn->info_ident = l2cap_get_ident(conn);
4210 
4211 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4212 
4213 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4214 			       sizeof(info), &info);
4215 	}
4216 
4217 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4218 	    result == L2CAP_CR_SUCCESS) {
4219 		u8 buf[128];
4220 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4221 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4222 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4223 		chan->num_conf_req++;
4224 	}
4225 
4226 	return chan;
4227 }
4228 
4229 static int l2cap_connect_req(struct l2cap_conn *conn,
4230 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4231 {
4232 	struct hci_dev *hdev = conn->hcon->hdev;
4233 	struct hci_conn *hcon = conn->hcon;
4234 
4235 	if (cmd_len < sizeof(struct l2cap_conn_req))
4236 		return -EPROTO;
4237 
4238 	hci_dev_lock(hdev);
4239 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4240 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4241 		mgmt_device_connected(hdev, hcon, NULL, 0);
4242 	hci_dev_unlock(hdev);
4243 
4244 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4245 	return 0;
4246 }
4247 
4248 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4249 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4250 				    u8 *data)
4251 {
4252 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4253 	u16 scid, dcid, result, status;
4254 	struct l2cap_chan *chan;
4255 	u8 req[128];
4256 	int err;
4257 
4258 	if (cmd_len < sizeof(*rsp))
4259 		return -EPROTO;
4260 
4261 	scid   = __le16_to_cpu(rsp->scid);
4262 	dcid   = __le16_to_cpu(rsp->dcid);
4263 	result = __le16_to_cpu(rsp->result);
4264 	status = __le16_to_cpu(rsp->status);
4265 
4266 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4267 	       dcid, scid, result, status);
4268 
4269 	mutex_lock(&conn->chan_lock);
4270 
4271 	if (scid) {
4272 		chan = __l2cap_get_chan_by_scid(conn, scid);
4273 		if (!chan) {
4274 			err = -EBADSLT;
4275 			goto unlock;
4276 		}
4277 	} else {
4278 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4279 		if (!chan) {
4280 			err = -EBADSLT;
4281 			goto unlock;
4282 		}
4283 	}
4284 
4285 	err = 0;
4286 
4287 	l2cap_chan_lock(chan);
4288 
4289 	switch (result) {
4290 	case L2CAP_CR_SUCCESS:
4291 		l2cap_state_change(chan, BT_CONFIG);
4292 		chan->ident = 0;
4293 		chan->dcid = dcid;
4294 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4295 
4296 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4297 			break;
4298 
4299 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4300 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4301 		chan->num_conf_req++;
4302 		break;
4303 
4304 	case L2CAP_CR_PEND:
4305 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4306 		break;
4307 
4308 	default:
4309 		l2cap_chan_del(chan, ECONNREFUSED);
4310 		break;
4311 	}
4312 
4313 	l2cap_chan_unlock(chan);
4314 
4315 unlock:
4316 	mutex_unlock(&conn->chan_lock);
4317 
4318 	return err;
4319 }
4320 
4321 static inline void set_default_fcs(struct l2cap_chan *chan)
4322 {
4323 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4324 	 * sides request it.
4325 	 */
4326 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4327 		chan->fcs = L2CAP_FCS_NONE;
4328 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4329 		chan->fcs = L2CAP_FCS_CRC16;
4330 }
4331 
4332 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4333 				    u8 ident, u16 flags)
4334 {
4335 	struct l2cap_conn *conn = chan->conn;
4336 
4337 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4338 	       flags);
4339 
4340 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4341 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4342 
4343 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4344 		       l2cap_build_conf_rsp(chan, data,
4345 					    L2CAP_CONF_SUCCESS, flags), data);
4346 }
4347 
4348 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4349 				   u16 scid, u16 dcid)
4350 {
4351 	struct l2cap_cmd_rej_cid rej;
4352 
4353 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4354 	rej.scid = __cpu_to_le16(scid);
4355 	rej.dcid = __cpu_to_le16(dcid);
4356 
4357 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4358 }
4359 
4360 static inline int l2cap_config_req(struct l2cap_conn *conn,
4361 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4362 				   u8 *data)
4363 {
4364 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4365 	u16 dcid, flags;
4366 	u8 rsp[64];
4367 	struct l2cap_chan *chan;
4368 	int len, err = 0;
4369 
4370 	if (cmd_len < sizeof(*req))
4371 		return -EPROTO;
4372 
4373 	dcid  = __le16_to_cpu(req->dcid);
4374 	flags = __le16_to_cpu(req->flags);
4375 
4376 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4377 
4378 	chan = l2cap_get_chan_by_scid(conn, dcid);
4379 	if (!chan) {
4380 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4381 		return 0;
4382 	}
4383 
4384 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4385 	    chan->state != BT_CONNECTED) {
4386 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4387 				       chan->dcid);
4388 		goto unlock;
4389 	}
4390 
4391 	/* Reject if config buffer is too small. */
4392 	len = cmd_len - sizeof(*req);
4393 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4394 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4395 			       l2cap_build_conf_rsp(chan, rsp,
4396 			       L2CAP_CONF_REJECT, flags), rsp);
4397 		goto unlock;
4398 	}
4399 
4400 	/* Store config. */
4401 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4402 	chan->conf_len += len;
4403 
4404 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4405 		/* Incomplete config. Send empty response. */
4406 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4407 			       l2cap_build_conf_rsp(chan, rsp,
4408 			       L2CAP_CONF_SUCCESS, flags), rsp);
4409 		goto unlock;
4410 	}
4411 
4412 	/* Complete config. */
4413 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4414 	if (len < 0) {
4415 		l2cap_send_disconn_req(chan, ECONNRESET);
4416 		goto unlock;
4417 	}
4418 
4419 	chan->ident = cmd->ident;
4420 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4421 	chan->num_conf_rsp++;
4422 
4423 	/* Reset config buffer. */
4424 	chan->conf_len = 0;
4425 
4426 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4427 		goto unlock;
4428 
4429 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4430 		set_default_fcs(chan);
4431 
4432 		if (chan->mode == L2CAP_MODE_ERTM ||
4433 		    chan->mode == L2CAP_MODE_STREAMING)
4434 			err = l2cap_ertm_init(chan);
4435 
4436 		if (err < 0)
4437 			l2cap_send_disconn_req(chan, -err);
4438 		else
4439 			l2cap_chan_ready(chan);
4440 
4441 		goto unlock;
4442 	}
4443 
4444 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4445 		u8 buf[64];
4446 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4447 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4448 		chan->num_conf_req++;
4449 	}
4450 
4451 	/* Got Conf Rsp PENDING from remote side and assume we sent
4452 	   Conf Rsp PENDING in the code above */
4453 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4454 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4455 
4456 		/* check compatibility */
4457 
4458 		/* Send rsp for BR/EDR channel */
4459 		if (!chan->hs_hcon)
4460 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4461 		else
4462 			chan->ident = cmd->ident;
4463 	}
4464 
4465 unlock:
4466 	l2cap_chan_unlock(chan);
4467 	return err;
4468 }
4469 
4470 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4471 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4472 				   u8 *data)
4473 {
4474 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4475 	u16 scid, flags, result;
4476 	struct l2cap_chan *chan;
4477 	int len = cmd_len - sizeof(*rsp);
4478 	int err = 0;
4479 
4480 	if (cmd_len < sizeof(*rsp))
4481 		return -EPROTO;
4482 
4483 	scid   = __le16_to_cpu(rsp->scid);
4484 	flags  = __le16_to_cpu(rsp->flags);
4485 	result = __le16_to_cpu(rsp->result);
4486 
4487 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4488 	       result, len);
4489 
4490 	chan = l2cap_get_chan_by_scid(conn, scid);
4491 	if (!chan)
4492 		return 0;
4493 
4494 	switch (result) {
4495 	case L2CAP_CONF_SUCCESS:
4496 		l2cap_conf_rfc_get(chan, rsp->data, len);
4497 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4498 		break;
4499 
4500 	case L2CAP_CONF_PENDING:
4501 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4502 
4503 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4504 			char buf[64];
4505 
4506 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4507 						   buf, sizeof(buf), &result);
4508 			if (len < 0) {
4509 				l2cap_send_disconn_req(chan, ECONNRESET);
4510 				goto done;
4511 			}
4512 
4513 			if (!chan->hs_hcon) {
4514 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4515 							0);
4516 			} else {
4517 				if (l2cap_check_efs(chan)) {
4518 					amp_create_logical_link(chan);
4519 					chan->ident = cmd->ident;
4520 				}
4521 			}
4522 		}
4523 		goto done;
4524 
4525 	case L2CAP_CONF_UNKNOWN:
4526 	case L2CAP_CONF_UNACCEPT:
4527 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4528 			char req[64];
4529 
4530 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4531 				l2cap_send_disconn_req(chan, ECONNRESET);
4532 				goto done;
4533 			}
4534 
4535 			/* throw out any old stored conf requests */
4536 			result = L2CAP_CONF_SUCCESS;
4537 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4538 						   req, sizeof(req), &result);
4539 			if (len < 0) {
4540 				l2cap_send_disconn_req(chan, ECONNRESET);
4541 				goto done;
4542 			}
4543 
4544 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4545 				       L2CAP_CONF_REQ, len, req);
4546 			chan->num_conf_req++;
4547 			if (result != L2CAP_CONF_SUCCESS)
4548 				goto done;
4549 			break;
4550 		}
4551 		fallthrough;
4552 
4553 	default:
4554 		l2cap_chan_set_err(chan, ECONNRESET);
4555 
4556 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4557 		l2cap_send_disconn_req(chan, ECONNRESET);
4558 		goto done;
4559 	}
4560 
4561 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4562 		goto done;
4563 
4564 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4565 
4566 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4567 		set_default_fcs(chan);
4568 
4569 		if (chan->mode == L2CAP_MODE_ERTM ||
4570 		    chan->mode == L2CAP_MODE_STREAMING)
4571 			err = l2cap_ertm_init(chan);
4572 
4573 		if (err < 0)
4574 			l2cap_send_disconn_req(chan, -err);
4575 		else
4576 			l2cap_chan_ready(chan);
4577 	}
4578 
4579 done:
4580 	l2cap_chan_unlock(chan);
4581 	return err;
4582 }
4583 
4584 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4585 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4586 				       u8 *data)
4587 {
4588 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4589 	struct l2cap_disconn_rsp rsp;
4590 	u16 dcid, scid;
4591 	struct l2cap_chan *chan;
4592 
4593 	if (cmd_len != sizeof(*req))
4594 		return -EPROTO;
4595 
4596 	scid = __le16_to_cpu(req->scid);
4597 	dcid = __le16_to_cpu(req->dcid);
4598 
4599 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4600 
4601 	mutex_lock(&conn->chan_lock);
4602 
4603 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4604 	if (!chan) {
4605 		mutex_unlock(&conn->chan_lock);
4606 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4607 		return 0;
4608 	}
4609 
4610 	l2cap_chan_hold(chan);
4611 	l2cap_chan_lock(chan);
4612 
4613 	rsp.dcid = cpu_to_le16(chan->scid);
4614 	rsp.scid = cpu_to_le16(chan->dcid);
4615 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4616 
4617 	chan->ops->set_shutdown(chan);
4618 
4619 	l2cap_chan_del(chan, ECONNRESET);
4620 
4621 	chan->ops->close(chan);
4622 
4623 	l2cap_chan_unlock(chan);
4624 	l2cap_chan_put(chan);
4625 
4626 	mutex_unlock(&conn->chan_lock);
4627 
4628 	return 0;
4629 }
4630 
4631 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4632 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4633 				       u8 *data)
4634 {
4635 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4636 	u16 dcid, scid;
4637 	struct l2cap_chan *chan;
4638 
4639 	if (cmd_len != sizeof(*rsp))
4640 		return -EPROTO;
4641 
4642 	scid = __le16_to_cpu(rsp->scid);
4643 	dcid = __le16_to_cpu(rsp->dcid);
4644 
4645 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4646 
4647 	mutex_lock(&conn->chan_lock);
4648 
4649 	chan = __l2cap_get_chan_by_scid(conn, scid);
4650 	if (!chan) {
4651 		mutex_unlock(&conn->chan_lock);
4652 		return 0;
4653 	}
4654 
4655 	l2cap_chan_hold(chan);
4656 	l2cap_chan_lock(chan);
4657 
4658 	if (chan->state != BT_DISCONN) {
4659 		l2cap_chan_unlock(chan);
4660 		l2cap_chan_put(chan);
4661 		mutex_unlock(&conn->chan_lock);
4662 		return 0;
4663 	}
4664 
4665 	l2cap_chan_del(chan, 0);
4666 
4667 	chan->ops->close(chan);
4668 
4669 	l2cap_chan_unlock(chan);
4670 	l2cap_chan_put(chan);
4671 
4672 	mutex_unlock(&conn->chan_lock);
4673 
4674 	return 0;
4675 }
4676 
4677 static inline int l2cap_information_req(struct l2cap_conn *conn,
4678 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4679 					u8 *data)
4680 {
4681 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4682 	u16 type;
4683 
4684 	if (cmd_len != sizeof(*req))
4685 		return -EPROTO;
4686 
4687 	type = __le16_to_cpu(req->type);
4688 
4689 	BT_DBG("type 0x%4.4x", type);
4690 
4691 	if (type == L2CAP_IT_FEAT_MASK) {
4692 		u8 buf[8];
4693 		u32 feat_mask = l2cap_feat_mask;
4694 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4695 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4696 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4697 		if (!disable_ertm)
4698 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4699 				| L2CAP_FEAT_FCS;
4700 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4701 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4702 				| L2CAP_FEAT_EXT_WINDOW;
4703 
4704 		put_unaligned_le32(feat_mask, rsp->data);
4705 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4706 			       buf);
4707 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4708 		u8 buf[12];
4709 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4710 
4711 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4712 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4713 		rsp->data[0] = conn->local_fixed_chan;
4714 		memset(rsp->data + 1, 0, 7);
4715 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4716 			       buf);
4717 	} else {
4718 		struct l2cap_info_rsp rsp;
4719 		rsp.type   = cpu_to_le16(type);
4720 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4721 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4722 			       &rsp);
4723 	}
4724 
4725 	return 0;
4726 }
4727 
4728 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4729 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4730 					u8 *data)
4731 {
4732 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4733 	u16 type, result;
4734 
4735 	if (cmd_len < sizeof(*rsp))
4736 		return -EPROTO;
4737 
4738 	type   = __le16_to_cpu(rsp->type);
4739 	result = __le16_to_cpu(rsp->result);
4740 
4741 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4742 
4743 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4744 	if (cmd->ident != conn->info_ident ||
4745 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4746 		return 0;
4747 
4748 	cancel_delayed_work(&conn->info_timer);
4749 
4750 	if (result != L2CAP_IR_SUCCESS) {
4751 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4752 		conn->info_ident = 0;
4753 
4754 		l2cap_conn_start(conn);
4755 
4756 		return 0;
4757 	}
4758 
4759 	switch (type) {
4760 	case L2CAP_IT_FEAT_MASK:
4761 		conn->feat_mask = get_unaligned_le32(rsp->data);
4762 
4763 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4764 			struct l2cap_info_req req;
4765 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4766 
4767 			conn->info_ident = l2cap_get_ident(conn);
4768 
4769 			l2cap_send_cmd(conn, conn->info_ident,
4770 				       L2CAP_INFO_REQ, sizeof(req), &req);
4771 		} else {
4772 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4773 			conn->info_ident = 0;
4774 
4775 			l2cap_conn_start(conn);
4776 		}
4777 		break;
4778 
4779 	case L2CAP_IT_FIXED_CHAN:
4780 		conn->remote_fixed_chan = rsp->data[0];
4781 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4782 		conn->info_ident = 0;
4783 
4784 		l2cap_conn_start(conn);
4785 		break;
4786 	}
4787 
4788 	return 0;
4789 }
4790 
4791 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4792 				    struct l2cap_cmd_hdr *cmd,
4793 				    u16 cmd_len, void *data)
4794 {
4795 	struct l2cap_create_chan_req *req = data;
4796 	struct l2cap_create_chan_rsp rsp;
4797 	struct l2cap_chan *chan;
4798 	struct hci_dev *hdev;
4799 	u16 psm, scid;
4800 
4801 	if (cmd_len != sizeof(*req))
4802 		return -EPROTO;
4803 
4804 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4805 		return -EINVAL;
4806 
4807 	psm = le16_to_cpu(req->psm);
4808 	scid = le16_to_cpu(req->scid);
4809 
4810 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4811 
4812 	/* For controller id 0 make BR/EDR connection */
4813 	if (req->amp_id == AMP_ID_BREDR) {
4814 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4815 			      req->amp_id);
4816 		return 0;
4817 	}
4818 
4819 	/* Validate AMP controller id */
4820 	hdev = hci_dev_get(req->amp_id);
4821 	if (!hdev)
4822 		goto error;
4823 
4824 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4825 		hci_dev_put(hdev);
4826 		goto error;
4827 	}
4828 
4829 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4830 			     req->amp_id);
4831 	if (chan) {
4832 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4833 		struct hci_conn *hs_hcon;
4834 
4835 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4836 						  &conn->hcon->dst);
4837 		if (!hs_hcon) {
4838 			hci_dev_put(hdev);
4839 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4840 					       chan->dcid);
4841 			return 0;
4842 		}
4843 
4844 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4845 
4846 		mgr->bredr_chan = chan;
4847 		chan->hs_hcon = hs_hcon;
4848 		chan->fcs = L2CAP_FCS_NONE;
4849 		conn->mtu = hdev->block_mtu;
4850 	}
4851 
4852 	hci_dev_put(hdev);
4853 
4854 	return 0;
4855 
4856 error:
4857 	rsp.dcid = 0;
4858 	rsp.scid = cpu_to_le16(scid);
4859 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4860 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4861 
4862 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4863 		       sizeof(rsp), &rsp);
4864 
4865 	return 0;
4866 }
4867 
4868 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4869 {
4870 	struct l2cap_move_chan_req req;
4871 	u8 ident;
4872 
4873 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4874 
4875 	ident = l2cap_get_ident(chan->conn);
4876 	chan->ident = ident;
4877 
4878 	req.icid = cpu_to_le16(chan->scid);
4879 	req.dest_amp_id = dest_amp_id;
4880 
4881 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4882 		       &req);
4883 
4884 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4885 }
4886 
4887 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4888 {
4889 	struct l2cap_move_chan_rsp rsp;
4890 
4891 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4892 
4893 	rsp.icid = cpu_to_le16(chan->dcid);
4894 	rsp.result = cpu_to_le16(result);
4895 
4896 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4897 		       sizeof(rsp), &rsp);
4898 }
4899 
4900 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4901 {
4902 	struct l2cap_move_chan_cfm cfm;
4903 
4904 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4905 
4906 	chan->ident = l2cap_get_ident(chan->conn);
4907 
4908 	cfm.icid = cpu_to_le16(chan->scid);
4909 	cfm.result = cpu_to_le16(result);
4910 
4911 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4912 		       sizeof(cfm), &cfm);
4913 
4914 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4915 }
4916 
4917 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4918 {
4919 	struct l2cap_move_chan_cfm cfm;
4920 
4921 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4922 
4923 	cfm.icid = cpu_to_le16(icid);
4924 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4925 
4926 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4927 		       sizeof(cfm), &cfm);
4928 }
4929 
4930 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4931 					 u16 icid)
4932 {
4933 	struct l2cap_move_chan_cfm_rsp rsp;
4934 
4935 	BT_DBG("icid 0x%4.4x", icid);
4936 
4937 	rsp.icid = cpu_to_le16(icid);
4938 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4939 }
4940 
4941 static void __release_logical_link(struct l2cap_chan *chan)
4942 {
4943 	chan->hs_hchan = NULL;
4944 	chan->hs_hcon = NULL;
4945 
4946 	/* Placeholder - release the logical link */
4947 }
4948 
4949 static void l2cap_logical_fail(struct l2cap_chan *chan)
4950 {
4951 	/* Logical link setup failed */
4952 	if (chan->state != BT_CONNECTED) {
4953 		/* Create channel failure, disconnect */
4954 		l2cap_send_disconn_req(chan, ECONNRESET);
4955 		return;
4956 	}
4957 
4958 	switch (chan->move_role) {
4959 	case L2CAP_MOVE_ROLE_RESPONDER:
4960 		l2cap_move_done(chan);
4961 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4962 		break;
4963 	case L2CAP_MOVE_ROLE_INITIATOR:
4964 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4965 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4966 			/* Remote has only sent pending or
4967 			 * success responses, clean up
4968 			 */
4969 			l2cap_move_done(chan);
4970 		}
4971 
4972 		/* Other amp move states imply that the move
4973 		 * has already aborted
4974 		 */
4975 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4976 		break;
4977 	}
4978 }
4979 
4980 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4981 					struct hci_chan *hchan)
4982 {
4983 	struct l2cap_conf_rsp rsp;
4984 
4985 	chan->hs_hchan = hchan;
4986 	chan->hs_hcon->l2cap_data = chan->conn;
4987 
4988 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4989 
4990 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4991 		int err;
4992 
4993 		set_default_fcs(chan);
4994 
4995 		err = l2cap_ertm_init(chan);
4996 		if (err < 0)
4997 			l2cap_send_disconn_req(chan, -err);
4998 		else
4999 			l2cap_chan_ready(chan);
5000 	}
5001 }
5002 
5003 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5004 				      struct hci_chan *hchan)
5005 {
5006 	chan->hs_hcon = hchan->conn;
5007 	chan->hs_hcon->l2cap_data = chan->conn;
5008 
5009 	BT_DBG("move_state %d", chan->move_state);
5010 
5011 	switch (chan->move_state) {
5012 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5013 		/* Move confirm will be sent after a success
5014 		 * response is received
5015 		 */
5016 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5017 		break;
5018 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5019 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5020 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5021 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5022 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5023 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5024 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5025 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5026 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5027 		}
5028 		break;
5029 	default:
5030 		/* Move was not in expected state, free the channel */
5031 		__release_logical_link(chan);
5032 
5033 		chan->move_state = L2CAP_MOVE_STABLE;
5034 	}
5035 }
5036 
5037 /* Call with chan locked */
5038 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5039 		       u8 status)
5040 {
5041 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5042 
5043 	if (status) {
5044 		l2cap_logical_fail(chan);
5045 		__release_logical_link(chan);
5046 		return;
5047 	}
5048 
5049 	if (chan->state != BT_CONNECTED) {
5050 		/* Ignore logical link if channel is on BR/EDR */
5051 		if (chan->local_amp_id != AMP_ID_BREDR)
5052 			l2cap_logical_finish_create(chan, hchan);
5053 	} else {
5054 		l2cap_logical_finish_move(chan, hchan);
5055 	}
5056 }
5057 
5058 void l2cap_move_start(struct l2cap_chan *chan)
5059 {
5060 	BT_DBG("chan %p", chan);
5061 
5062 	if (chan->local_amp_id == AMP_ID_BREDR) {
5063 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5064 			return;
5065 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5066 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5067 		/* Placeholder - start physical link setup */
5068 	} else {
5069 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5070 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5071 		chan->move_id = 0;
5072 		l2cap_move_setup(chan);
5073 		l2cap_send_move_chan_req(chan, 0);
5074 	}
5075 }
5076 
5077 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5078 			    u8 local_amp_id, u8 remote_amp_id)
5079 {
5080 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5081 	       local_amp_id, remote_amp_id);
5082 
5083 	chan->fcs = L2CAP_FCS_NONE;
5084 
5085 	/* Outgoing channel on AMP */
5086 	if (chan->state == BT_CONNECT) {
5087 		if (result == L2CAP_CR_SUCCESS) {
5088 			chan->local_amp_id = local_amp_id;
5089 			l2cap_send_create_chan_req(chan, remote_amp_id);
5090 		} else {
5091 			/* Revert to BR/EDR connect */
5092 			l2cap_send_conn_req(chan);
5093 		}
5094 
5095 		return;
5096 	}
5097 
5098 	/* Incoming channel on AMP */
5099 	if (__l2cap_no_conn_pending(chan)) {
5100 		struct l2cap_conn_rsp rsp;
5101 		char buf[128];
5102 		rsp.scid = cpu_to_le16(chan->dcid);
5103 		rsp.dcid = cpu_to_le16(chan->scid);
5104 
5105 		if (result == L2CAP_CR_SUCCESS) {
5106 			/* Send successful response */
5107 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5108 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5109 		} else {
5110 			/* Send negative response */
5111 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5112 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5113 		}
5114 
5115 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5116 			       sizeof(rsp), &rsp);
5117 
5118 		if (result == L2CAP_CR_SUCCESS) {
5119 			l2cap_state_change(chan, BT_CONFIG);
5120 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5121 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5122 				       L2CAP_CONF_REQ,
5123 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5124 			chan->num_conf_req++;
5125 		}
5126 	}
5127 }
5128 
5129 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5130 				   u8 remote_amp_id)
5131 {
5132 	l2cap_move_setup(chan);
5133 	chan->move_id = local_amp_id;
5134 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5135 
5136 	l2cap_send_move_chan_req(chan, remote_amp_id);
5137 }
5138 
5139 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5140 {
5141 	struct hci_chan *hchan = NULL;
5142 
5143 	/* Placeholder - get hci_chan for logical link */
5144 
5145 	if (hchan) {
5146 		if (hchan->state == BT_CONNECTED) {
5147 			/* Logical link is ready to go */
5148 			chan->hs_hcon = hchan->conn;
5149 			chan->hs_hcon->l2cap_data = chan->conn;
5150 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5151 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5152 
5153 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5154 		} else {
5155 			/* Wait for logical link to be ready */
5156 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5157 		}
5158 	} else {
5159 		/* Logical link not available */
5160 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5161 	}
5162 }
5163 
5164 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5165 {
5166 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5167 		u8 rsp_result;
5168 		if (result == -EINVAL)
5169 			rsp_result = L2CAP_MR_BAD_ID;
5170 		else
5171 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5172 
5173 		l2cap_send_move_chan_rsp(chan, rsp_result);
5174 	}
5175 
5176 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5177 	chan->move_state = L2CAP_MOVE_STABLE;
5178 
5179 	/* Restart data transmission */
5180 	l2cap_ertm_send(chan);
5181 }
5182 
5183 /* Invoke with locked chan */
5184 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5185 {
5186 	u8 local_amp_id = chan->local_amp_id;
5187 	u8 remote_amp_id = chan->remote_amp_id;
5188 
5189 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5190 	       chan, result, local_amp_id, remote_amp_id);
5191 
5192 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5193 		return;
5194 
5195 	if (chan->state != BT_CONNECTED) {
5196 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5197 	} else if (result != L2CAP_MR_SUCCESS) {
5198 		l2cap_do_move_cancel(chan, result);
5199 	} else {
5200 		switch (chan->move_role) {
5201 		case L2CAP_MOVE_ROLE_INITIATOR:
5202 			l2cap_do_move_initiate(chan, local_amp_id,
5203 					       remote_amp_id);
5204 			break;
5205 		case L2CAP_MOVE_ROLE_RESPONDER:
5206 			l2cap_do_move_respond(chan, result);
5207 			break;
5208 		default:
5209 			l2cap_do_move_cancel(chan, result);
5210 			break;
5211 		}
5212 	}
5213 }
5214 
5215 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5216 					 struct l2cap_cmd_hdr *cmd,
5217 					 u16 cmd_len, void *data)
5218 {
5219 	struct l2cap_move_chan_req *req = data;
5220 	struct l2cap_move_chan_rsp rsp;
5221 	struct l2cap_chan *chan;
5222 	u16 icid = 0;
5223 	u16 result = L2CAP_MR_NOT_ALLOWED;
5224 
5225 	if (cmd_len != sizeof(*req))
5226 		return -EPROTO;
5227 
5228 	icid = le16_to_cpu(req->icid);
5229 
5230 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5231 
5232 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5233 		return -EINVAL;
5234 
5235 	chan = l2cap_get_chan_by_dcid(conn, icid);
5236 	if (!chan) {
5237 		rsp.icid = cpu_to_le16(icid);
5238 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5239 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5240 			       sizeof(rsp), &rsp);
5241 		return 0;
5242 	}
5243 
5244 	chan->ident = cmd->ident;
5245 
5246 	if (chan->scid < L2CAP_CID_DYN_START ||
5247 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5248 	    (chan->mode != L2CAP_MODE_ERTM &&
5249 	     chan->mode != L2CAP_MODE_STREAMING)) {
5250 		result = L2CAP_MR_NOT_ALLOWED;
5251 		goto send_move_response;
5252 	}
5253 
5254 	if (chan->local_amp_id == req->dest_amp_id) {
5255 		result = L2CAP_MR_SAME_ID;
5256 		goto send_move_response;
5257 	}
5258 
5259 	if (req->dest_amp_id != AMP_ID_BREDR) {
5260 		struct hci_dev *hdev;
5261 		hdev = hci_dev_get(req->dest_amp_id);
5262 		if (!hdev || hdev->dev_type != HCI_AMP ||
5263 		    !test_bit(HCI_UP, &hdev->flags)) {
5264 			if (hdev)
5265 				hci_dev_put(hdev);
5266 
5267 			result = L2CAP_MR_BAD_ID;
5268 			goto send_move_response;
5269 		}
5270 		hci_dev_put(hdev);
5271 	}
5272 
5273 	/* Detect a move collision.  Only send a collision response
5274 	 * if this side has "lost", otherwise proceed with the move.
5275 	 * The winner has the larger bd_addr.
5276 	 */
5277 	if ((__chan_is_moving(chan) ||
5278 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5279 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5280 		result = L2CAP_MR_COLLISION;
5281 		goto send_move_response;
5282 	}
5283 
5284 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5285 	l2cap_move_setup(chan);
5286 	chan->move_id = req->dest_amp_id;
5287 
5288 	if (req->dest_amp_id == AMP_ID_BREDR) {
5289 		/* Moving to BR/EDR */
5290 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5291 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5292 			result = L2CAP_MR_PEND;
5293 		} else {
5294 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5295 			result = L2CAP_MR_SUCCESS;
5296 		}
5297 	} else {
5298 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5299 		/* Placeholder - uncomment when amp functions are available */
5300 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5301 		result = L2CAP_MR_PEND;
5302 	}
5303 
5304 send_move_response:
5305 	l2cap_send_move_chan_rsp(chan, result);
5306 
5307 	l2cap_chan_unlock(chan);
5308 
5309 	return 0;
5310 }
5311 
5312 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5313 {
5314 	struct l2cap_chan *chan;
5315 	struct hci_chan *hchan = NULL;
5316 
5317 	chan = l2cap_get_chan_by_scid(conn, icid);
5318 	if (!chan) {
5319 		l2cap_send_move_chan_cfm_icid(conn, icid);
5320 		return;
5321 	}
5322 
5323 	__clear_chan_timer(chan);
5324 	if (result == L2CAP_MR_PEND)
5325 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5326 
5327 	switch (chan->move_state) {
5328 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5329 		/* Move confirm will be sent when logical link
5330 		 * is complete.
5331 		 */
5332 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5333 		break;
5334 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5335 		if (result == L2CAP_MR_PEND) {
5336 			break;
5337 		} else if (test_bit(CONN_LOCAL_BUSY,
5338 				    &chan->conn_state)) {
5339 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5340 		} else {
5341 			/* Logical link is up or moving to BR/EDR,
5342 			 * proceed with move
5343 			 */
5344 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5345 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5346 		}
5347 		break;
5348 	case L2CAP_MOVE_WAIT_RSP:
5349 		/* Moving to AMP */
5350 		if (result == L2CAP_MR_SUCCESS) {
5351 			/* Remote is ready, send confirm immediately
5352 			 * after logical link is ready
5353 			 */
5354 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5355 		} else {
5356 			/* Both logical link and move success
5357 			 * are required to confirm
5358 			 */
5359 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5360 		}
5361 
5362 		/* Placeholder - get hci_chan for logical link */
5363 		if (!hchan) {
5364 			/* Logical link not available */
5365 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5366 			break;
5367 		}
5368 
5369 		/* If the logical link is not yet connected, do not
5370 		 * send confirmation.
5371 		 */
5372 		if (hchan->state != BT_CONNECTED)
5373 			break;
5374 
5375 		/* Logical link is already ready to go */
5376 
5377 		chan->hs_hcon = hchan->conn;
5378 		chan->hs_hcon->l2cap_data = chan->conn;
5379 
5380 		if (result == L2CAP_MR_SUCCESS) {
5381 			/* Can confirm now */
5382 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5383 		} else {
5384 			/* Now only need move success
5385 			 * to confirm
5386 			 */
5387 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5388 		}
5389 
5390 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5391 		break;
5392 	default:
5393 		/* Any other amp move state means the move failed. */
5394 		chan->move_id = chan->local_amp_id;
5395 		l2cap_move_done(chan);
5396 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5397 	}
5398 
5399 	l2cap_chan_unlock(chan);
5400 }
5401 
5402 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5403 			    u16 result)
5404 {
5405 	struct l2cap_chan *chan;
5406 
5407 	chan = l2cap_get_chan_by_ident(conn, ident);
5408 	if (!chan) {
5409 		/* Could not locate channel, icid is best guess */
5410 		l2cap_send_move_chan_cfm_icid(conn, icid);
5411 		return;
5412 	}
5413 
5414 	__clear_chan_timer(chan);
5415 
5416 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5417 		if (result == L2CAP_MR_COLLISION) {
5418 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5419 		} else {
5420 			/* Cleanup - cancel move */
5421 			chan->move_id = chan->local_amp_id;
5422 			l2cap_move_done(chan);
5423 		}
5424 	}
5425 
5426 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5427 
5428 	l2cap_chan_unlock(chan);
5429 }
5430 
5431 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5432 				  struct l2cap_cmd_hdr *cmd,
5433 				  u16 cmd_len, void *data)
5434 {
5435 	struct l2cap_move_chan_rsp *rsp = data;
5436 	u16 icid, result;
5437 
5438 	if (cmd_len != sizeof(*rsp))
5439 		return -EPROTO;
5440 
5441 	icid = le16_to_cpu(rsp->icid);
5442 	result = le16_to_cpu(rsp->result);
5443 
5444 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5445 
5446 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5447 		l2cap_move_continue(conn, icid, result);
5448 	else
5449 		l2cap_move_fail(conn, cmd->ident, icid, result);
5450 
5451 	return 0;
5452 }
5453 
5454 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5455 				      struct l2cap_cmd_hdr *cmd,
5456 				      u16 cmd_len, void *data)
5457 {
5458 	struct l2cap_move_chan_cfm *cfm = data;
5459 	struct l2cap_chan *chan;
5460 	u16 icid, result;
5461 
5462 	if (cmd_len != sizeof(*cfm))
5463 		return -EPROTO;
5464 
5465 	icid = le16_to_cpu(cfm->icid);
5466 	result = le16_to_cpu(cfm->result);
5467 
5468 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5469 
5470 	chan = l2cap_get_chan_by_dcid(conn, icid);
5471 	if (!chan) {
5472 		/* Spec requires a response even if the icid was not found */
5473 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5474 		return 0;
5475 	}
5476 
5477 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5478 		if (result == L2CAP_MC_CONFIRMED) {
5479 			chan->local_amp_id = chan->move_id;
5480 			if (chan->local_amp_id == AMP_ID_BREDR)
5481 				__release_logical_link(chan);
5482 		} else {
5483 			chan->move_id = chan->local_amp_id;
5484 		}
5485 
5486 		l2cap_move_done(chan);
5487 	}
5488 
5489 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5490 
5491 	l2cap_chan_unlock(chan);
5492 
5493 	return 0;
5494 }
5495 
5496 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5497 						 struct l2cap_cmd_hdr *cmd,
5498 						 u16 cmd_len, void *data)
5499 {
5500 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5501 	struct l2cap_chan *chan;
5502 	u16 icid;
5503 
5504 	if (cmd_len != sizeof(*rsp))
5505 		return -EPROTO;
5506 
5507 	icid = le16_to_cpu(rsp->icid);
5508 
5509 	BT_DBG("icid 0x%4.4x", icid);
5510 
5511 	chan = l2cap_get_chan_by_scid(conn, icid);
5512 	if (!chan)
5513 		return 0;
5514 
5515 	__clear_chan_timer(chan);
5516 
5517 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5518 		chan->local_amp_id = chan->move_id;
5519 
5520 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5521 			__release_logical_link(chan);
5522 
5523 		l2cap_move_done(chan);
5524 	}
5525 
5526 	l2cap_chan_unlock(chan);
5527 
5528 	return 0;
5529 }
5530 
5531 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5532 					      struct l2cap_cmd_hdr *cmd,
5533 					      u16 cmd_len, u8 *data)
5534 {
5535 	struct hci_conn *hcon = conn->hcon;
5536 	struct l2cap_conn_param_update_req *req;
5537 	struct l2cap_conn_param_update_rsp rsp;
5538 	u16 min, max, latency, to_multiplier;
5539 	int err;
5540 
5541 	if (hcon->role != HCI_ROLE_MASTER)
5542 		return -EINVAL;
5543 
5544 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5545 		return -EPROTO;
5546 
5547 	req = (struct l2cap_conn_param_update_req *) data;
5548 	min		= __le16_to_cpu(req->min);
5549 	max		= __le16_to_cpu(req->max);
5550 	latency		= __le16_to_cpu(req->latency);
5551 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5552 
5553 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5554 	       min, max, latency, to_multiplier);
5555 
5556 	memset(&rsp, 0, sizeof(rsp));
5557 
5558 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5559 	if (err)
5560 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5561 	else
5562 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5563 
5564 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5565 		       sizeof(rsp), &rsp);
5566 
5567 	if (!err) {
5568 		u8 store_hint;
5569 
5570 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5571 						to_multiplier);
5572 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5573 				    store_hint, min, max, latency,
5574 				    to_multiplier);
5575 
5576 	}
5577 
5578 	return 0;
5579 }
5580 
5581 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5582 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5583 				u8 *data)
5584 {
5585 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5586 	struct hci_conn *hcon = conn->hcon;
5587 	u16 dcid, mtu, mps, credits, result;
5588 	struct l2cap_chan *chan;
5589 	int err, sec_level;
5590 
5591 	if (cmd_len < sizeof(*rsp))
5592 		return -EPROTO;
5593 
5594 	dcid    = __le16_to_cpu(rsp->dcid);
5595 	mtu     = __le16_to_cpu(rsp->mtu);
5596 	mps     = __le16_to_cpu(rsp->mps);
5597 	credits = __le16_to_cpu(rsp->credits);
5598 	result  = __le16_to_cpu(rsp->result);
5599 
5600 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5601 					   dcid < L2CAP_CID_DYN_START ||
5602 					   dcid > L2CAP_CID_LE_DYN_END))
5603 		return -EPROTO;
5604 
5605 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5606 	       dcid, mtu, mps, credits, result);
5607 
5608 	mutex_lock(&conn->chan_lock);
5609 
5610 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5611 	if (!chan) {
5612 		err = -EBADSLT;
5613 		goto unlock;
5614 	}
5615 
5616 	err = 0;
5617 
5618 	l2cap_chan_lock(chan);
5619 
5620 	switch (result) {
5621 	case L2CAP_CR_LE_SUCCESS:
5622 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5623 			err = -EBADSLT;
5624 			break;
5625 		}
5626 
5627 		chan->ident = 0;
5628 		chan->dcid = dcid;
5629 		chan->omtu = mtu;
5630 		chan->remote_mps = mps;
5631 		chan->tx_credits = credits;
5632 		l2cap_chan_ready(chan);
5633 		break;
5634 
5635 	case L2CAP_CR_LE_AUTHENTICATION:
5636 	case L2CAP_CR_LE_ENCRYPTION:
5637 		/* If we already have MITM protection we can't do
5638 		 * anything.
5639 		 */
5640 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5641 			l2cap_chan_del(chan, ECONNREFUSED);
5642 			break;
5643 		}
5644 
5645 		sec_level = hcon->sec_level + 1;
5646 		if (chan->sec_level < sec_level)
5647 			chan->sec_level = sec_level;
5648 
5649 		/* We'll need to send a new Connect Request */
5650 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5651 
5652 		smp_conn_security(hcon, chan->sec_level);
5653 		break;
5654 
5655 	default:
5656 		l2cap_chan_del(chan, ECONNREFUSED);
5657 		break;
5658 	}
5659 
5660 	l2cap_chan_unlock(chan);
5661 
5662 unlock:
5663 	mutex_unlock(&conn->chan_lock);
5664 
5665 	return err;
5666 }
5667 
5668 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5669 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5670 				      u8 *data)
5671 {
5672 	int err = 0;
5673 
5674 	switch (cmd->code) {
5675 	case L2CAP_COMMAND_REJ:
5676 		l2cap_command_rej(conn, cmd, cmd_len, data);
5677 		break;
5678 
5679 	case L2CAP_CONN_REQ:
5680 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5681 		break;
5682 
5683 	case L2CAP_CONN_RSP:
5684 	case L2CAP_CREATE_CHAN_RSP:
5685 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5686 		break;
5687 
5688 	case L2CAP_CONF_REQ:
5689 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5690 		break;
5691 
5692 	case L2CAP_CONF_RSP:
5693 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5694 		break;
5695 
5696 	case L2CAP_DISCONN_REQ:
5697 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5698 		break;
5699 
5700 	case L2CAP_DISCONN_RSP:
5701 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5702 		break;
5703 
5704 	case L2CAP_ECHO_REQ:
5705 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5706 		break;
5707 
5708 	case L2CAP_ECHO_RSP:
5709 		break;
5710 
5711 	case L2CAP_INFO_REQ:
5712 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5713 		break;
5714 
5715 	case L2CAP_INFO_RSP:
5716 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5717 		break;
5718 
5719 	case L2CAP_CREATE_CHAN_REQ:
5720 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5721 		break;
5722 
5723 	case L2CAP_MOVE_CHAN_REQ:
5724 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5725 		break;
5726 
5727 	case L2CAP_MOVE_CHAN_RSP:
5728 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5729 		break;
5730 
5731 	case L2CAP_MOVE_CHAN_CFM:
5732 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5733 		break;
5734 
5735 	case L2CAP_MOVE_CHAN_CFM_RSP:
5736 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5737 		break;
5738 
5739 	default:
5740 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5741 		err = -EINVAL;
5742 		break;
5743 	}
5744 
5745 	return err;
5746 }
5747 
5748 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5749 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5750 				u8 *data)
5751 {
5752 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5753 	struct l2cap_le_conn_rsp rsp;
5754 	struct l2cap_chan *chan, *pchan;
5755 	u16 dcid, scid, credits, mtu, mps;
5756 	__le16 psm;
5757 	u8 result;
5758 
5759 	if (cmd_len != sizeof(*req))
5760 		return -EPROTO;
5761 
5762 	scid = __le16_to_cpu(req->scid);
5763 	mtu  = __le16_to_cpu(req->mtu);
5764 	mps  = __le16_to_cpu(req->mps);
5765 	psm  = req->psm;
5766 	dcid = 0;
5767 	credits = 0;
5768 
5769 	if (mtu < 23 || mps < 23)
5770 		return -EPROTO;
5771 
5772 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5773 	       scid, mtu, mps);
5774 
5775 	/* Check if we have socket listening on psm */
5776 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5777 					 &conn->hcon->dst, LE_LINK);
5778 	if (!pchan) {
5779 		result = L2CAP_CR_LE_BAD_PSM;
5780 		chan = NULL;
5781 		goto response;
5782 	}
5783 
5784 	mutex_lock(&conn->chan_lock);
5785 	l2cap_chan_lock(pchan);
5786 
5787 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5788 				     SMP_ALLOW_STK)) {
5789 		result = L2CAP_CR_LE_AUTHENTICATION;
5790 		chan = NULL;
5791 		goto response_unlock;
5792 	}
5793 
5794 	/* Check for valid dynamic CID range */
5795 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5796 		result = L2CAP_CR_LE_INVALID_SCID;
5797 		chan = NULL;
5798 		goto response_unlock;
5799 	}
5800 
5801 	/* Check if we already have channel with that dcid */
5802 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5803 		result = L2CAP_CR_LE_SCID_IN_USE;
5804 		chan = NULL;
5805 		goto response_unlock;
5806 	}
5807 
5808 	chan = pchan->ops->new_connection(pchan);
5809 	if (!chan) {
5810 		result = L2CAP_CR_LE_NO_MEM;
5811 		goto response_unlock;
5812 	}
5813 
5814 	bacpy(&chan->src, &conn->hcon->src);
5815 	bacpy(&chan->dst, &conn->hcon->dst);
5816 	chan->src_type = bdaddr_src_type(conn->hcon);
5817 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5818 	chan->psm  = psm;
5819 	chan->dcid = scid;
5820 	chan->omtu = mtu;
5821 	chan->remote_mps = mps;
5822 
5823 	__l2cap_chan_add(conn, chan);
5824 
5825 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5826 
5827 	dcid = chan->scid;
5828 	credits = chan->rx_credits;
5829 
5830 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5831 
5832 	chan->ident = cmd->ident;
5833 
5834 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5835 		l2cap_state_change(chan, BT_CONNECT2);
5836 		/* The following result value is actually not defined
5837 		 * for LE CoC but we use it to let the function know
5838 		 * that it should bail out after doing its cleanup
5839 		 * instead of sending a response.
5840 		 */
5841 		result = L2CAP_CR_PEND;
5842 		chan->ops->defer(chan);
5843 	} else {
5844 		l2cap_chan_ready(chan);
5845 		result = L2CAP_CR_LE_SUCCESS;
5846 	}
5847 
5848 response_unlock:
5849 	l2cap_chan_unlock(pchan);
5850 	mutex_unlock(&conn->chan_lock);
5851 	l2cap_chan_put(pchan);
5852 
5853 	if (result == L2CAP_CR_PEND)
5854 		return 0;
5855 
5856 response:
5857 	if (chan) {
5858 		rsp.mtu = cpu_to_le16(chan->imtu);
5859 		rsp.mps = cpu_to_le16(chan->mps);
5860 	} else {
5861 		rsp.mtu = 0;
5862 		rsp.mps = 0;
5863 	}
5864 
5865 	rsp.dcid    = cpu_to_le16(dcid);
5866 	rsp.credits = cpu_to_le16(credits);
5867 	rsp.result  = cpu_to_le16(result);
5868 
5869 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5870 
5871 	return 0;
5872 }
5873 
5874 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5875 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5876 				   u8 *data)
5877 {
5878 	struct l2cap_le_credits *pkt;
5879 	struct l2cap_chan *chan;
5880 	u16 cid, credits, max_credits;
5881 
5882 	if (cmd_len != sizeof(*pkt))
5883 		return -EPROTO;
5884 
5885 	pkt = (struct l2cap_le_credits *) data;
5886 	cid	= __le16_to_cpu(pkt->cid);
5887 	credits	= __le16_to_cpu(pkt->credits);
5888 
5889 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5890 
5891 	chan = l2cap_get_chan_by_dcid(conn, cid);
5892 	if (!chan)
5893 		return -EBADSLT;
5894 
5895 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5896 	if (credits > max_credits) {
5897 		BT_ERR("LE credits overflow");
5898 		l2cap_send_disconn_req(chan, ECONNRESET);
5899 		l2cap_chan_unlock(chan);
5900 
5901 		/* Return 0 so that we don't trigger an unnecessary
5902 		 * command reject packet.
5903 		 */
5904 		return 0;
5905 	}
5906 
5907 	chan->tx_credits += credits;
5908 
5909 	/* Resume sending */
5910 	l2cap_le_flowctl_send(chan);
5911 
5912 	if (chan->tx_credits)
5913 		chan->ops->resume(chan);
5914 
5915 	l2cap_chan_unlock(chan);
5916 
5917 	return 0;
5918 }
5919 
5920 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5921 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5922 				       u8 *data)
5923 {
5924 	struct l2cap_ecred_conn_req *req = (void *) data;
5925 	struct {
5926 		struct l2cap_ecred_conn_rsp rsp;
5927 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5928 	} __packed pdu;
5929 	struct l2cap_chan *chan, *pchan;
5930 	u16 mtu, mps;
5931 	__le16 psm;
5932 	u8 result, len = 0;
5933 	int i, num_scid;
5934 	bool defer = false;
5935 
5936 	if (!enable_ecred)
5937 		return -EINVAL;
5938 
5939 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5940 		result = L2CAP_CR_LE_INVALID_PARAMS;
5941 		goto response;
5942 	}
5943 
5944 	cmd_len -= sizeof(*req);
5945 	num_scid = cmd_len / sizeof(u16);
5946 
5947 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5948 		result = L2CAP_CR_LE_INVALID_PARAMS;
5949 		goto response;
5950 	}
5951 
5952 	mtu  = __le16_to_cpu(req->mtu);
5953 	mps  = __le16_to_cpu(req->mps);
5954 
5955 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5956 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5957 		goto response;
5958 	}
5959 
5960 	psm  = req->psm;
5961 
5962 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5963 
5964 	memset(&pdu, 0, sizeof(pdu));
5965 
5966 	/* Check if we have socket listening on psm */
5967 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5968 					 &conn->hcon->dst, LE_LINK);
5969 	if (!pchan) {
5970 		result = L2CAP_CR_LE_BAD_PSM;
5971 		goto response;
5972 	}
5973 
5974 	mutex_lock(&conn->chan_lock);
5975 	l2cap_chan_lock(pchan);
5976 
5977 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5978 				     SMP_ALLOW_STK)) {
5979 		result = L2CAP_CR_LE_AUTHENTICATION;
5980 		goto unlock;
5981 	}
5982 
5983 	result = L2CAP_CR_LE_SUCCESS;
5984 
5985 	for (i = 0; i < num_scid; i++) {
5986 		u16 scid = __le16_to_cpu(req->scid[i]);
5987 
5988 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5989 
5990 		pdu.dcid[i] = 0x0000;
5991 		len += sizeof(*pdu.dcid);
5992 
5993 		/* Check for valid dynamic CID range */
5994 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5995 			result = L2CAP_CR_LE_INVALID_SCID;
5996 			continue;
5997 		}
5998 
5999 		/* Check if we already have channel with that dcid */
6000 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6001 			result = L2CAP_CR_LE_SCID_IN_USE;
6002 			continue;
6003 		}
6004 
6005 		chan = pchan->ops->new_connection(pchan);
6006 		if (!chan) {
6007 			result = L2CAP_CR_LE_NO_MEM;
6008 			continue;
6009 		}
6010 
6011 		bacpy(&chan->src, &conn->hcon->src);
6012 		bacpy(&chan->dst, &conn->hcon->dst);
6013 		chan->src_type = bdaddr_src_type(conn->hcon);
6014 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6015 		chan->psm  = psm;
6016 		chan->dcid = scid;
6017 		chan->omtu = mtu;
6018 		chan->remote_mps = mps;
6019 
6020 		__l2cap_chan_add(conn, chan);
6021 
6022 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6023 
6024 		/* Init response */
6025 		if (!pdu.rsp.credits) {
6026 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6027 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6028 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6029 		}
6030 
6031 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6032 
6033 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6034 
6035 		chan->ident = cmd->ident;
6036 
6037 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6038 			l2cap_state_change(chan, BT_CONNECT2);
6039 			defer = true;
6040 			chan->ops->defer(chan);
6041 		} else {
6042 			l2cap_chan_ready(chan);
6043 		}
6044 	}
6045 
6046 unlock:
6047 	l2cap_chan_unlock(pchan);
6048 	mutex_unlock(&conn->chan_lock);
6049 	l2cap_chan_put(pchan);
6050 
6051 response:
6052 	pdu.rsp.result = cpu_to_le16(result);
6053 
6054 	if (defer)
6055 		return 0;
6056 
6057 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6058 		       sizeof(pdu.rsp) + len, &pdu);
6059 
6060 	return 0;
6061 }
6062 
6063 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6064 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6065 				       u8 *data)
6066 {
6067 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6068 	struct hci_conn *hcon = conn->hcon;
6069 	u16 mtu, mps, credits, result;
6070 	struct l2cap_chan *chan, *tmp;
6071 	int err = 0, sec_level;
6072 	int i = 0;
6073 
6074 	if (cmd_len < sizeof(*rsp))
6075 		return -EPROTO;
6076 
6077 	mtu     = __le16_to_cpu(rsp->mtu);
6078 	mps     = __le16_to_cpu(rsp->mps);
6079 	credits = __le16_to_cpu(rsp->credits);
6080 	result  = __le16_to_cpu(rsp->result);
6081 
6082 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6083 	       result);
6084 
6085 	mutex_lock(&conn->chan_lock);
6086 
6087 	cmd_len -= sizeof(*rsp);
6088 
6089 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6090 		u16 dcid;
6091 
6092 		if (chan->ident != cmd->ident ||
6093 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6094 		    chan->state == BT_CONNECTED)
6095 			continue;
6096 
6097 		l2cap_chan_lock(chan);
6098 
6099 		/* Check that there is a dcid for each pending channel */
6100 		if (cmd_len < sizeof(dcid)) {
6101 			l2cap_chan_del(chan, ECONNREFUSED);
6102 			l2cap_chan_unlock(chan);
6103 			continue;
6104 		}
6105 
6106 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6107 		cmd_len -= sizeof(u16);
6108 
6109 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6110 
6111 		/* Check if dcid is already in use */
6112 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6113 			/* If a device receives a
6114 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6115 			 * already-assigned Destination CID, then both the
6116 			 * original channel and the new channel shall be
6117 			 * immediately discarded and not used.
6118 			 */
6119 			l2cap_chan_del(chan, ECONNREFUSED);
6120 			l2cap_chan_unlock(chan);
6121 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6122 			l2cap_chan_lock(chan);
6123 			l2cap_chan_del(chan, ECONNRESET);
6124 			l2cap_chan_unlock(chan);
6125 			continue;
6126 		}
6127 
6128 		switch (result) {
6129 		case L2CAP_CR_LE_AUTHENTICATION:
6130 		case L2CAP_CR_LE_ENCRYPTION:
6131 			/* If we already have MITM protection we can't do
6132 			 * anything.
6133 			 */
6134 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6135 				l2cap_chan_del(chan, ECONNREFUSED);
6136 				break;
6137 			}
6138 
6139 			sec_level = hcon->sec_level + 1;
6140 			if (chan->sec_level < sec_level)
6141 				chan->sec_level = sec_level;
6142 
6143 			/* We'll need to send a new Connect Request */
6144 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6145 
6146 			smp_conn_security(hcon, chan->sec_level);
6147 			break;
6148 
6149 		case L2CAP_CR_LE_BAD_PSM:
6150 			l2cap_chan_del(chan, ECONNREFUSED);
6151 			break;
6152 
6153 		default:
6154 			/* If dcid was not set it means channels was refused */
6155 			if (!dcid) {
6156 				l2cap_chan_del(chan, ECONNREFUSED);
6157 				break;
6158 			}
6159 
6160 			chan->ident = 0;
6161 			chan->dcid = dcid;
6162 			chan->omtu = mtu;
6163 			chan->remote_mps = mps;
6164 			chan->tx_credits = credits;
6165 			l2cap_chan_ready(chan);
6166 			break;
6167 		}
6168 
6169 		l2cap_chan_unlock(chan);
6170 	}
6171 
6172 	mutex_unlock(&conn->chan_lock);
6173 
6174 	return err;
6175 }
6176 
6177 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6178 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6179 					 u8 *data)
6180 {
6181 	struct l2cap_ecred_reconf_req *req = (void *) data;
6182 	struct l2cap_ecred_reconf_rsp rsp;
6183 	u16 mtu, mps, result;
6184 	struct l2cap_chan *chan;
6185 	int i, num_scid;
6186 
6187 	if (!enable_ecred)
6188 		return -EINVAL;
6189 
6190 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6191 		result = L2CAP_CR_LE_INVALID_PARAMS;
6192 		goto respond;
6193 	}
6194 
6195 	mtu = __le16_to_cpu(req->mtu);
6196 	mps = __le16_to_cpu(req->mps);
6197 
6198 	BT_DBG("mtu %u mps %u", mtu, mps);
6199 
6200 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6201 		result = L2CAP_RECONF_INVALID_MTU;
6202 		goto respond;
6203 	}
6204 
6205 	if (mps < L2CAP_ECRED_MIN_MPS) {
6206 		result = L2CAP_RECONF_INVALID_MPS;
6207 		goto respond;
6208 	}
6209 
6210 	cmd_len -= sizeof(*req);
6211 	num_scid = cmd_len / sizeof(u16);
6212 	result = L2CAP_RECONF_SUCCESS;
6213 
6214 	for (i = 0; i < num_scid; i++) {
6215 		u16 scid;
6216 
6217 		scid = __le16_to_cpu(req->scid[i]);
6218 		if (!scid)
6219 			return -EPROTO;
6220 
6221 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6222 		if (!chan)
6223 			continue;
6224 
6225 		/* If the MTU value is decreased for any of the included
6226 		 * channels, then the receiver shall disconnect all
6227 		 * included channels.
6228 		 */
6229 		if (chan->omtu > mtu) {
6230 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6231 			       chan->omtu, mtu);
6232 			result = L2CAP_RECONF_INVALID_MTU;
6233 		}
6234 
6235 		chan->omtu = mtu;
6236 		chan->remote_mps = mps;
6237 	}
6238 
6239 respond:
6240 	rsp.result = cpu_to_le16(result);
6241 
6242 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6243 		       &rsp);
6244 
6245 	return 0;
6246 }
6247 
6248 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6249 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6250 					 u8 *data)
6251 {
6252 	struct l2cap_chan *chan, *tmp;
6253 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6254 	u16 result;
6255 
6256 	if (cmd_len < sizeof(*rsp))
6257 		return -EPROTO;
6258 
6259 	result = __le16_to_cpu(rsp->result);
6260 
6261 	BT_DBG("result 0x%4.4x", rsp->result);
6262 
6263 	if (!result)
6264 		return 0;
6265 
6266 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6267 		if (chan->ident != cmd->ident)
6268 			continue;
6269 
6270 		l2cap_chan_del(chan, ECONNRESET);
6271 	}
6272 
6273 	return 0;
6274 }
6275 
6276 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6277 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6278 				       u8 *data)
6279 {
6280 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6281 	struct l2cap_chan *chan;
6282 
6283 	if (cmd_len < sizeof(*rej))
6284 		return -EPROTO;
6285 
6286 	mutex_lock(&conn->chan_lock);
6287 
6288 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6289 	if (!chan)
6290 		goto done;
6291 
6292 	l2cap_chan_lock(chan);
6293 	l2cap_chan_del(chan, ECONNREFUSED);
6294 	l2cap_chan_unlock(chan);
6295 
6296 done:
6297 	mutex_unlock(&conn->chan_lock);
6298 	return 0;
6299 }
6300 
6301 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6302 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6303 				   u8 *data)
6304 {
6305 	int err = 0;
6306 
6307 	switch (cmd->code) {
6308 	case L2CAP_COMMAND_REJ:
6309 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6310 		break;
6311 
6312 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6313 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6314 		break;
6315 
6316 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6317 		break;
6318 
6319 	case L2CAP_LE_CONN_RSP:
6320 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6321 		break;
6322 
6323 	case L2CAP_LE_CONN_REQ:
6324 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6325 		break;
6326 
6327 	case L2CAP_LE_CREDITS:
6328 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6329 		break;
6330 
6331 	case L2CAP_ECRED_CONN_REQ:
6332 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6333 		break;
6334 
6335 	case L2CAP_ECRED_CONN_RSP:
6336 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6337 		break;
6338 
6339 	case L2CAP_ECRED_RECONF_REQ:
6340 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6341 		break;
6342 
6343 	case L2CAP_ECRED_RECONF_RSP:
6344 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6345 		break;
6346 
6347 	case L2CAP_DISCONN_REQ:
6348 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6349 		break;
6350 
6351 	case L2CAP_DISCONN_RSP:
6352 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6353 		break;
6354 
6355 	default:
6356 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6357 		err = -EINVAL;
6358 		break;
6359 	}
6360 
6361 	return err;
6362 }
6363 
6364 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6365 					struct sk_buff *skb)
6366 {
6367 	struct hci_conn *hcon = conn->hcon;
6368 	struct l2cap_cmd_hdr *cmd;
6369 	u16 len;
6370 	int err;
6371 
6372 	if (hcon->type != LE_LINK)
6373 		goto drop;
6374 
6375 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6376 		goto drop;
6377 
6378 	cmd = (void *) skb->data;
6379 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6380 
6381 	len = le16_to_cpu(cmd->len);
6382 
6383 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6384 
6385 	if (len != skb->len || !cmd->ident) {
6386 		BT_DBG("corrupted command");
6387 		goto drop;
6388 	}
6389 
6390 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6391 	if (err) {
6392 		struct l2cap_cmd_rej_unk rej;
6393 
6394 		BT_ERR("Wrong link type (%d)", err);
6395 
6396 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6397 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6398 			       sizeof(rej), &rej);
6399 	}
6400 
6401 drop:
6402 	kfree_skb(skb);
6403 }
6404 
6405 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6406 				     struct sk_buff *skb)
6407 {
6408 	struct hci_conn *hcon = conn->hcon;
6409 	struct l2cap_cmd_hdr *cmd;
6410 	int err;
6411 
6412 	l2cap_raw_recv(conn, skb);
6413 
6414 	if (hcon->type != ACL_LINK)
6415 		goto drop;
6416 
6417 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6418 		u16 len;
6419 
6420 		cmd = (void *) skb->data;
6421 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6422 
6423 		len = le16_to_cpu(cmd->len);
6424 
6425 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6426 		       cmd->ident);
6427 
6428 		if (len > skb->len || !cmd->ident) {
6429 			BT_DBG("corrupted command");
6430 			break;
6431 		}
6432 
6433 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6434 		if (err) {
6435 			struct l2cap_cmd_rej_unk rej;
6436 
6437 			BT_ERR("Wrong link type (%d)", err);
6438 
6439 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6440 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6441 				       sizeof(rej), &rej);
6442 		}
6443 
6444 		skb_pull(skb, len);
6445 	}
6446 
6447 drop:
6448 	kfree_skb(skb);
6449 }
6450 
6451 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6452 {
6453 	u16 our_fcs, rcv_fcs;
6454 	int hdr_size;
6455 
6456 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6457 		hdr_size = L2CAP_EXT_HDR_SIZE;
6458 	else
6459 		hdr_size = L2CAP_ENH_HDR_SIZE;
6460 
6461 	if (chan->fcs == L2CAP_FCS_CRC16) {
6462 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6463 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6464 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6465 
6466 		if (our_fcs != rcv_fcs)
6467 			return -EBADMSG;
6468 	}
6469 	return 0;
6470 }
6471 
6472 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6473 {
6474 	struct l2cap_ctrl control;
6475 
6476 	BT_DBG("chan %p", chan);
6477 
6478 	memset(&control, 0, sizeof(control));
6479 	control.sframe = 1;
6480 	control.final = 1;
6481 	control.reqseq = chan->buffer_seq;
6482 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6483 
6484 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6485 		control.super = L2CAP_SUPER_RNR;
6486 		l2cap_send_sframe(chan, &control);
6487 	}
6488 
6489 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6490 	    chan->unacked_frames > 0)
6491 		__set_retrans_timer(chan);
6492 
6493 	/* Send pending iframes */
6494 	l2cap_ertm_send(chan);
6495 
6496 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6497 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6498 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6499 		 * send it now.
6500 		 */
6501 		control.super = L2CAP_SUPER_RR;
6502 		l2cap_send_sframe(chan, &control);
6503 	}
6504 }
6505 
6506 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6507 			    struct sk_buff **last_frag)
6508 {
6509 	/* skb->len reflects data in skb as well as all fragments
6510 	 * skb->data_len reflects only data in fragments
6511 	 */
6512 	if (!skb_has_frag_list(skb))
6513 		skb_shinfo(skb)->frag_list = new_frag;
6514 
6515 	new_frag->next = NULL;
6516 
6517 	(*last_frag)->next = new_frag;
6518 	*last_frag = new_frag;
6519 
6520 	skb->len += new_frag->len;
6521 	skb->data_len += new_frag->len;
6522 	skb->truesize += new_frag->truesize;
6523 }
6524 
6525 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6526 				struct l2cap_ctrl *control)
6527 {
6528 	int err = -EINVAL;
6529 
6530 	switch (control->sar) {
6531 	case L2CAP_SAR_UNSEGMENTED:
6532 		if (chan->sdu)
6533 			break;
6534 
6535 		err = chan->ops->recv(chan, skb);
6536 		break;
6537 
6538 	case L2CAP_SAR_START:
6539 		if (chan->sdu)
6540 			break;
6541 
6542 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6543 			break;
6544 
6545 		chan->sdu_len = get_unaligned_le16(skb->data);
6546 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6547 
6548 		if (chan->sdu_len > chan->imtu) {
6549 			err = -EMSGSIZE;
6550 			break;
6551 		}
6552 
6553 		if (skb->len >= chan->sdu_len)
6554 			break;
6555 
6556 		chan->sdu = skb;
6557 		chan->sdu_last_frag = skb;
6558 
6559 		skb = NULL;
6560 		err = 0;
6561 		break;
6562 
6563 	case L2CAP_SAR_CONTINUE:
6564 		if (!chan->sdu)
6565 			break;
6566 
6567 		append_skb_frag(chan->sdu, skb,
6568 				&chan->sdu_last_frag);
6569 		skb = NULL;
6570 
6571 		if (chan->sdu->len >= chan->sdu_len)
6572 			break;
6573 
6574 		err = 0;
6575 		break;
6576 
6577 	case L2CAP_SAR_END:
6578 		if (!chan->sdu)
6579 			break;
6580 
6581 		append_skb_frag(chan->sdu, skb,
6582 				&chan->sdu_last_frag);
6583 		skb = NULL;
6584 
6585 		if (chan->sdu->len != chan->sdu_len)
6586 			break;
6587 
6588 		err = chan->ops->recv(chan, chan->sdu);
6589 
6590 		if (!err) {
6591 			/* Reassembly complete */
6592 			chan->sdu = NULL;
6593 			chan->sdu_last_frag = NULL;
6594 			chan->sdu_len = 0;
6595 		}
6596 		break;
6597 	}
6598 
6599 	if (err) {
6600 		kfree_skb(skb);
6601 		kfree_skb(chan->sdu);
6602 		chan->sdu = NULL;
6603 		chan->sdu_last_frag = NULL;
6604 		chan->sdu_len = 0;
6605 	}
6606 
6607 	return err;
6608 }
6609 
6610 static int l2cap_resegment(struct l2cap_chan *chan)
6611 {
6612 	/* Placeholder */
6613 	return 0;
6614 }
6615 
6616 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6617 {
6618 	u8 event;
6619 
6620 	if (chan->mode != L2CAP_MODE_ERTM)
6621 		return;
6622 
6623 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6624 	l2cap_tx(chan, NULL, NULL, event);
6625 }
6626 
6627 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6628 {
6629 	int err = 0;
6630 	/* Pass sequential frames to l2cap_reassemble_sdu()
6631 	 * until a gap is encountered.
6632 	 */
6633 
6634 	BT_DBG("chan %p", chan);
6635 
6636 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6637 		struct sk_buff *skb;
6638 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6639 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6640 
6641 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6642 
6643 		if (!skb)
6644 			break;
6645 
6646 		skb_unlink(skb, &chan->srej_q);
6647 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6648 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6649 		if (err)
6650 			break;
6651 	}
6652 
6653 	if (skb_queue_empty(&chan->srej_q)) {
6654 		chan->rx_state = L2CAP_RX_STATE_RECV;
6655 		l2cap_send_ack(chan);
6656 	}
6657 
6658 	return err;
6659 }
6660 
6661 static void l2cap_handle_srej(struct l2cap_chan *chan,
6662 			      struct l2cap_ctrl *control)
6663 {
6664 	struct sk_buff *skb;
6665 
6666 	BT_DBG("chan %p, control %p", chan, control);
6667 
6668 	if (control->reqseq == chan->next_tx_seq) {
6669 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6670 		l2cap_send_disconn_req(chan, ECONNRESET);
6671 		return;
6672 	}
6673 
6674 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6675 
6676 	if (skb == NULL) {
6677 		BT_DBG("Seq %d not available for retransmission",
6678 		       control->reqseq);
6679 		return;
6680 	}
6681 
6682 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6683 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6684 		l2cap_send_disconn_req(chan, ECONNRESET);
6685 		return;
6686 	}
6687 
6688 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6689 
6690 	if (control->poll) {
6691 		l2cap_pass_to_tx(chan, control);
6692 
6693 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6694 		l2cap_retransmit(chan, control);
6695 		l2cap_ertm_send(chan);
6696 
6697 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6698 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6699 			chan->srej_save_reqseq = control->reqseq;
6700 		}
6701 	} else {
6702 		l2cap_pass_to_tx_fbit(chan, control);
6703 
6704 		if (control->final) {
6705 			if (chan->srej_save_reqseq != control->reqseq ||
6706 			    !test_and_clear_bit(CONN_SREJ_ACT,
6707 						&chan->conn_state))
6708 				l2cap_retransmit(chan, control);
6709 		} else {
6710 			l2cap_retransmit(chan, control);
6711 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6712 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6713 				chan->srej_save_reqseq = control->reqseq;
6714 			}
6715 		}
6716 	}
6717 }
6718 
6719 static void l2cap_handle_rej(struct l2cap_chan *chan,
6720 			     struct l2cap_ctrl *control)
6721 {
6722 	struct sk_buff *skb;
6723 
6724 	BT_DBG("chan %p, control %p", chan, control);
6725 
6726 	if (control->reqseq == chan->next_tx_seq) {
6727 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6728 		l2cap_send_disconn_req(chan, ECONNRESET);
6729 		return;
6730 	}
6731 
6732 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6733 
6734 	if (chan->max_tx && skb &&
6735 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6736 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6737 		l2cap_send_disconn_req(chan, ECONNRESET);
6738 		return;
6739 	}
6740 
6741 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6742 
6743 	l2cap_pass_to_tx(chan, control);
6744 
6745 	if (control->final) {
6746 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6747 			l2cap_retransmit_all(chan, control);
6748 	} else {
6749 		l2cap_retransmit_all(chan, control);
6750 		l2cap_ertm_send(chan);
6751 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6752 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6753 	}
6754 }
6755 
6756 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6757 {
6758 	BT_DBG("chan %p, txseq %d", chan, txseq);
6759 
6760 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6761 	       chan->expected_tx_seq);
6762 
6763 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6764 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6765 		    chan->tx_win) {
6766 			/* See notes below regarding "double poll" and
6767 			 * invalid packets.
6768 			 */
6769 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6770 				BT_DBG("Invalid/Ignore - after SREJ");
6771 				return L2CAP_TXSEQ_INVALID_IGNORE;
6772 			} else {
6773 				BT_DBG("Invalid - in window after SREJ sent");
6774 				return L2CAP_TXSEQ_INVALID;
6775 			}
6776 		}
6777 
6778 		if (chan->srej_list.head == txseq) {
6779 			BT_DBG("Expected SREJ");
6780 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6781 		}
6782 
6783 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6784 			BT_DBG("Duplicate SREJ - txseq already stored");
6785 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6786 		}
6787 
6788 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6789 			BT_DBG("Unexpected SREJ - not requested");
6790 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6791 		}
6792 	}
6793 
6794 	if (chan->expected_tx_seq == txseq) {
6795 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6796 		    chan->tx_win) {
6797 			BT_DBG("Invalid - txseq outside tx window");
6798 			return L2CAP_TXSEQ_INVALID;
6799 		} else {
6800 			BT_DBG("Expected");
6801 			return L2CAP_TXSEQ_EXPECTED;
6802 		}
6803 	}
6804 
6805 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6806 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6807 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6808 		return L2CAP_TXSEQ_DUPLICATE;
6809 	}
6810 
6811 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6812 		/* A source of invalid packets is a "double poll" condition,
6813 		 * where delays cause us to send multiple poll packets.  If
6814 		 * the remote stack receives and processes both polls,
6815 		 * sequence numbers can wrap around in such a way that a
6816 		 * resent frame has a sequence number that looks like new data
6817 		 * with a sequence gap.  This would trigger an erroneous SREJ
6818 		 * request.
6819 		 *
6820 		 * Fortunately, this is impossible with a tx window that's
6821 		 * less than half of the maximum sequence number, which allows
6822 		 * invalid frames to be safely ignored.
6823 		 *
6824 		 * With tx window sizes greater than half of the tx window
6825 		 * maximum, the frame is invalid and cannot be ignored.  This
6826 		 * causes a disconnect.
6827 		 */
6828 
6829 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6830 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6831 			return L2CAP_TXSEQ_INVALID_IGNORE;
6832 		} else {
6833 			BT_DBG("Invalid - txseq outside tx window");
6834 			return L2CAP_TXSEQ_INVALID;
6835 		}
6836 	} else {
6837 		BT_DBG("Unexpected - txseq indicates missing frames");
6838 		return L2CAP_TXSEQ_UNEXPECTED;
6839 	}
6840 }
6841 
6842 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6843 			       struct l2cap_ctrl *control,
6844 			       struct sk_buff *skb, u8 event)
6845 {
6846 	int err = 0;
6847 	bool skb_in_use = false;
6848 
6849 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6850 	       event);
6851 
6852 	switch (event) {
6853 	case L2CAP_EV_RECV_IFRAME:
6854 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6855 		case L2CAP_TXSEQ_EXPECTED:
6856 			l2cap_pass_to_tx(chan, control);
6857 
6858 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6859 				BT_DBG("Busy, discarding expected seq %d",
6860 				       control->txseq);
6861 				break;
6862 			}
6863 
6864 			chan->expected_tx_seq = __next_seq(chan,
6865 							   control->txseq);
6866 
6867 			chan->buffer_seq = chan->expected_tx_seq;
6868 			skb_in_use = true;
6869 
6870 			err = l2cap_reassemble_sdu(chan, skb, control);
6871 			if (err)
6872 				break;
6873 
6874 			if (control->final) {
6875 				if (!test_and_clear_bit(CONN_REJ_ACT,
6876 							&chan->conn_state)) {
6877 					control->final = 0;
6878 					l2cap_retransmit_all(chan, control);
6879 					l2cap_ertm_send(chan);
6880 				}
6881 			}
6882 
6883 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6884 				l2cap_send_ack(chan);
6885 			break;
6886 		case L2CAP_TXSEQ_UNEXPECTED:
6887 			l2cap_pass_to_tx(chan, control);
6888 
6889 			/* Can't issue SREJ frames in the local busy state.
6890 			 * Drop this frame, it will be seen as missing
6891 			 * when local busy is exited.
6892 			 */
6893 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6894 				BT_DBG("Busy, discarding unexpected seq %d",
6895 				       control->txseq);
6896 				break;
6897 			}
6898 
6899 			/* There was a gap in the sequence, so an SREJ
6900 			 * must be sent for each missing frame.  The
6901 			 * current frame is stored for later use.
6902 			 */
6903 			skb_queue_tail(&chan->srej_q, skb);
6904 			skb_in_use = true;
6905 			BT_DBG("Queued %p (queue len %d)", skb,
6906 			       skb_queue_len(&chan->srej_q));
6907 
6908 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6909 			l2cap_seq_list_clear(&chan->srej_list);
6910 			l2cap_send_srej(chan, control->txseq);
6911 
6912 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6913 			break;
6914 		case L2CAP_TXSEQ_DUPLICATE:
6915 			l2cap_pass_to_tx(chan, control);
6916 			break;
6917 		case L2CAP_TXSEQ_INVALID_IGNORE:
6918 			break;
6919 		case L2CAP_TXSEQ_INVALID:
6920 		default:
6921 			l2cap_send_disconn_req(chan, ECONNRESET);
6922 			break;
6923 		}
6924 		break;
6925 	case L2CAP_EV_RECV_RR:
6926 		l2cap_pass_to_tx(chan, control);
6927 		if (control->final) {
6928 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6929 
6930 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6931 			    !__chan_is_moving(chan)) {
6932 				control->final = 0;
6933 				l2cap_retransmit_all(chan, control);
6934 			}
6935 
6936 			l2cap_ertm_send(chan);
6937 		} else if (control->poll) {
6938 			l2cap_send_i_or_rr_or_rnr(chan);
6939 		} else {
6940 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6941 					       &chan->conn_state) &&
6942 			    chan->unacked_frames)
6943 				__set_retrans_timer(chan);
6944 
6945 			l2cap_ertm_send(chan);
6946 		}
6947 		break;
6948 	case L2CAP_EV_RECV_RNR:
6949 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6950 		l2cap_pass_to_tx(chan, control);
6951 		if (control && control->poll) {
6952 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6953 			l2cap_send_rr_or_rnr(chan, 0);
6954 		}
6955 		__clear_retrans_timer(chan);
6956 		l2cap_seq_list_clear(&chan->retrans_list);
6957 		break;
6958 	case L2CAP_EV_RECV_REJ:
6959 		l2cap_handle_rej(chan, control);
6960 		break;
6961 	case L2CAP_EV_RECV_SREJ:
6962 		l2cap_handle_srej(chan, control);
6963 		break;
6964 	default:
6965 		break;
6966 	}
6967 
6968 	if (skb && !skb_in_use) {
6969 		BT_DBG("Freeing %p", skb);
6970 		kfree_skb(skb);
6971 	}
6972 
6973 	return err;
6974 }
6975 
6976 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6977 				    struct l2cap_ctrl *control,
6978 				    struct sk_buff *skb, u8 event)
6979 {
6980 	int err = 0;
6981 	u16 txseq = control->txseq;
6982 	bool skb_in_use = false;
6983 
6984 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6985 	       event);
6986 
6987 	switch (event) {
6988 	case L2CAP_EV_RECV_IFRAME:
6989 		switch (l2cap_classify_txseq(chan, txseq)) {
6990 		case L2CAP_TXSEQ_EXPECTED:
6991 			/* Keep frame for reassembly later */
6992 			l2cap_pass_to_tx(chan, control);
6993 			skb_queue_tail(&chan->srej_q, skb);
6994 			skb_in_use = true;
6995 			BT_DBG("Queued %p (queue len %d)", skb,
6996 			       skb_queue_len(&chan->srej_q));
6997 
6998 			chan->expected_tx_seq = __next_seq(chan, txseq);
6999 			break;
7000 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7001 			l2cap_seq_list_pop(&chan->srej_list);
7002 
7003 			l2cap_pass_to_tx(chan, control);
7004 			skb_queue_tail(&chan->srej_q, skb);
7005 			skb_in_use = true;
7006 			BT_DBG("Queued %p (queue len %d)", skb,
7007 			       skb_queue_len(&chan->srej_q));
7008 
7009 			err = l2cap_rx_queued_iframes(chan);
7010 			if (err)
7011 				break;
7012 
7013 			break;
7014 		case L2CAP_TXSEQ_UNEXPECTED:
7015 			/* Got a frame that can't be reassembled yet.
7016 			 * Save it for later, and send SREJs to cover
7017 			 * the missing frames.
7018 			 */
7019 			skb_queue_tail(&chan->srej_q, skb);
7020 			skb_in_use = true;
7021 			BT_DBG("Queued %p (queue len %d)", skb,
7022 			       skb_queue_len(&chan->srej_q));
7023 
7024 			l2cap_pass_to_tx(chan, control);
7025 			l2cap_send_srej(chan, control->txseq);
7026 			break;
7027 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7028 			/* This frame was requested with an SREJ, but
7029 			 * some expected retransmitted frames are
7030 			 * missing.  Request retransmission of missing
7031 			 * SREJ'd frames.
7032 			 */
7033 			skb_queue_tail(&chan->srej_q, skb);
7034 			skb_in_use = true;
7035 			BT_DBG("Queued %p (queue len %d)", skb,
7036 			       skb_queue_len(&chan->srej_q));
7037 
7038 			l2cap_pass_to_tx(chan, control);
7039 			l2cap_send_srej_list(chan, control->txseq);
7040 			break;
7041 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7042 			/* We've already queued this frame.  Drop this copy. */
7043 			l2cap_pass_to_tx(chan, control);
7044 			break;
7045 		case L2CAP_TXSEQ_DUPLICATE:
7046 			/* Expecting a later sequence number, so this frame
7047 			 * was already received.  Ignore it completely.
7048 			 */
7049 			break;
7050 		case L2CAP_TXSEQ_INVALID_IGNORE:
7051 			break;
7052 		case L2CAP_TXSEQ_INVALID:
7053 		default:
7054 			l2cap_send_disconn_req(chan, ECONNRESET);
7055 			break;
7056 		}
7057 		break;
7058 	case L2CAP_EV_RECV_RR:
7059 		l2cap_pass_to_tx(chan, control);
7060 		if (control->final) {
7061 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7062 
7063 			if (!test_and_clear_bit(CONN_REJ_ACT,
7064 						&chan->conn_state)) {
7065 				control->final = 0;
7066 				l2cap_retransmit_all(chan, control);
7067 			}
7068 
7069 			l2cap_ertm_send(chan);
7070 		} else if (control->poll) {
7071 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7072 					       &chan->conn_state) &&
7073 			    chan->unacked_frames) {
7074 				__set_retrans_timer(chan);
7075 			}
7076 
7077 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7078 			l2cap_send_srej_tail(chan);
7079 		} else {
7080 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7081 					       &chan->conn_state) &&
7082 			    chan->unacked_frames)
7083 				__set_retrans_timer(chan);
7084 
7085 			l2cap_send_ack(chan);
7086 		}
7087 		break;
7088 	case L2CAP_EV_RECV_RNR:
7089 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7090 		l2cap_pass_to_tx(chan, control);
7091 		if (control->poll) {
7092 			l2cap_send_srej_tail(chan);
7093 		} else {
7094 			struct l2cap_ctrl rr_control;
7095 			memset(&rr_control, 0, sizeof(rr_control));
7096 			rr_control.sframe = 1;
7097 			rr_control.super = L2CAP_SUPER_RR;
7098 			rr_control.reqseq = chan->buffer_seq;
7099 			l2cap_send_sframe(chan, &rr_control);
7100 		}
7101 
7102 		break;
7103 	case L2CAP_EV_RECV_REJ:
7104 		l2cap_handle_rej(chan, control);
7105 		break;
7106 	case L2CAP_EV_RECV_SREJ:
7107 		l2cap_handle_srej(chan, control);
7108 		break;
7109 	}
7110 
7111 	if (skb && !skb_in_use) {
7112 		BT_DBG("Freeing %p", skb);
7113 		kfree_skb(skb);
7114 	}
7115 
7116 	return err;
7117 }
7118 
7119 static int l2cap_finish_move(struct l2cap_chan *chan)
7120 {
7121 	BT_DBG("chan %p", chan);
7122 
7123 	chan->rx_state = L2CAP_RX_STATE_RECV;
7124 
7125 	if (chan->hs_hcon)
7126 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7127 	else
7128 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7129 
7130 	return l2cap_resegment(chan);
7131 }
7132 
7133 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7134 				 struct l2cap_ctrl *control,
7135 				 struct sk_buff *skb, u8 event)
7136 {
7137 	int err;
7138 
7139 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7140 	       event);
7141 
7142 	if (!control->poll)
7143 		return -EPROTO;
7144 
7145 	l2cap_process_reqseq(chan, control->reqseq);
7146 
7147 	if (!skb_queue_empty(&chan->tx_q))
7148 		chan->tx_send_head = skb_peek(&chan->tx_q);
7149 	else
7150 		chan->tx_send_head = NULL;
7151 
7152 	/* Rewind next_tx_seq to the point expected
7153 	 * by the receiver.
7154 	 */
7155 	chan->next_tx_seq = control->reqseq;
7156 	chan->unacked_frames = 0;
7157 
7158 	err = l2cap_finish_move(chan);
7159 	if (err)
7160 		return err;
7161 
7162 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7163 	l2cap_send_i_or_rr_or_rnr(chan);
7164 
7165 	if (event == L2CAP_EV_RECV_IFRAME)
7166 		return -EPROTO;
7167 
7168 	return l2cap_rx_state_recv(chan, control, NULL, event);
7169 }
7170 
7171 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7172 				 struct l2cap_ctrl *control,
7173 				 struct sk_buff *skb, u8 event)
7174 {
7175 	int err;
7176 
7177 	if (!control->final)
7178 		return -EPROTO;
7179 
7180 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7181 
7182 	chan->rx_state = L2CAP_RX_STATE_RECV;
7183 	l2cap_process_reqseq(chan, control->reqseq);
7184 
7185 	if (!skb_queue_empty(&chan->tx_q))
7186 		chan->tx_send_head = skb_peek(&chan->tx_q);
7187 	else
7188 		chan->tx_send_head = NULL;
7189 
7190 	/* Rewind next_tx_seq to the point expected
7191 	 * by the receiver.
7192 	 */
7193 	chan->next_tx_seq = control->reqseq;
7194 	chan->unacked_frames = 0;
7195 
7196 	if (chan->hs_hcon)
7197 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7198 	else
7199 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7200 
7201 	err = l2cap_resegment(chan);
7202 
7203 	if (!err)
7204 		err = l2cap_rx_state_recv(chan, control, skb, event);
7205 
7206 	return err;
7207 }
7208 
7209 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7210 {
7211 	/* Make sure reqseq is for a packet that has been sent but not acked */
7212 	u16 unacked;
7213 
7214 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7215 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7216 }
7217 
7218 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7219 		    struct sk_buff *skb, u8 event)
7220 {
7221 	int err = 0;
7222 
7223 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7224 	       control, skb, event, chan->rx_state);
7225 
7226 	if (__valid_reqseq(chan, control->reqseq)) {
7227 		switch (chan->rx_state) {
7228 		case L2CAP_RX_STATE_RECV:
7229 			err = l2cap_rx_state_recv(chan, control, skb, event);
7230 			break;
7231 		case L2CAP_RX_STATE_SREJ_SENT:
7232 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7233 						       event);
7234 			break;
7235 		case L2CAP_RX_STATE_WAIT_P:
7236 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7237 			break;
7238 		case L2CAP_RX_STATE_WAIT_F:
7239 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7240 			break;
7241 		default:
7242 			/* shut it down */
7243 			break;
7244 		}
7245 	} else {
7246 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7247 		       control->reqseq, chan->next_tx_seq,
7248 		       chan->expected_ack_seq);
7249 		l2cap_send_disconn_req(chan, ECONNRESET);
7250 	}
7251 
7252 	return err;
7253 }
7254 
7255 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7256 			   struct sk_buff *skb)
7257 {
7258 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7259 	       chan->rx_state);
7260 
7261 	if (l2cap_classify_txseq(chan, control->txseq) ==
7262 	    L2CAP_TXSEQ_EXPECTED) {
7263 		l2cap_pass_to_tx(chan, control);
7264 
7265 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7266 		       __next_seq(chan, chan->buffer_seq));
7267 
7268 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7269 
7270 		l2cap_reassemble_sdu(chan, skb, control);
7271 	} else {
7272 		if (chan->sdu) {
7273 			kfree_skb(chan->sdu);
7274 			chan->sdu = NULL;
7275 		}
7276 		chan->sdu_last_frag = NULL;
7277 		chan->sdu_len = 0;
7278 
7279 		if (skb) {
7280 			BT_DBG("Freeing %p", skb);
7281 			kfree_skb(skb);
7282 		}
7283 	}
7284 
7285 	chan->last_acked_seq = control->txseq;
7286 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
7287 
7288 	return 0;
7289 }
7290 
7291 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7292 {
7293 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7294 	u16 len;
7295 	u8 event;
7296 
7297 	__unpack_control(chan, skb);
7298 
7299 	len = skb->len;
7300 
7301 	/*
7302 	 * We can just drop the corrupted I-frame here.
7303 	 * Receiver will miss it and start proper recovery
7304 	 * procedures and ask for retransmission.
7305 	 */
7306 	if (l2cap_check_fcs(chan, skb))
7307 		goto drop;
7308 
7309 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7310 		len -= L2CAP_SDULEN_SIZE;
7311 
7312 	if (chan->fcs == L2CAP_FCS_CRC16)
7313 		len -= L2CAP_FCS_SIZE;
7314 
7315 	if (len > chan->mps) {
7316 		l2cap_send_disconn_req(chan, ECONNRESET);
7317 		goto drop;
7318 	}
7319 
7320 	if (chan->ops->filter) {
7321 		if (chan->ops->filter(chan, skb))
7322 			goto drop;
7323 	}
7324 
7325 	if (!control->sframe) {
7326 		int err;
7327 
7328 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7329 		       control->sar, control->reqseq, control->final,
7330 		       control->txseq);
7331 
7332 		/* Validate F-bit - F=0 always valid, F=1 only
7333 		 * valid in TX WAIT_F
7334 		 */
7335 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7336 			goto drop;
7337 
7338 		if (chan->mode != L2CAP_MODE_STREAMING) {
7339 			event = L2CAP_EV_RECV_IFRAME;
7340 			err = l2cap_rx(chan, control, skb, event);
7341 		} else {
7342 			err = l2cap_stream_rx(chan, control, skb);
7343 		}
7344 
7345 		if (err)
7346 			l2cap_send_disconn_req(chan, ECONNRESET);
7347 	} else {
7348 		const u8 rx_func_to_event[4] = {
7349 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7350 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7351 		};
7352 
7353 		/* Only I-frames are expected in streaming mode */
7354 		if (chan->mode == L2CAP_MODE_STREAMING)
7355 			goto drop;
7356 
7357 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7358 		       control->reqseq, control->final, control->poll,
7359 		       control->super);
7360 
7361 		if (len != 0) {
7362 			BT_ERR("Trailing bytes: %d in sframe", len);
7363 			l2cap_send_disconn_req(chan, ECONNRESET);
7364 			goto drop;
7365 		}
7366 
7367 		/* Validate F and P bits */
7368 		if (control->final && (control->poll ||
7369 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7370 			goto drop;
7371 
7372 		event = rx_func_to_event[control->super];
7373 		if (l2cap_rx(chan, control, skb, event))
7374 			l2cap_send_disconn_req(chan, ECONNRESET);
7375 	}
7376 
7377 	return 0;
7378 
7379 drop:
7380 	kfree_skb(skb);
7381 	return 0;
7382 }
7383 
7384 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7385 {
7386 	struct l2cap_conn *conn = chan->conn;
7387 	struct l2cap_le_credits pkt;
7388 	u16 return_credits;
7389 
7390 	return_credits = (chan->imtu / chan->mps) + 1;
7391 
7392 	if (chan->rx_credits >= return_credits)
7393 		return;
7394 
7395 	return_credits -= chan->rx_credits;
7396 
7397 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7398 
7399 	chan->rx_credits += return_credits;
7400 
7401 	pkt.cid     = cpu_to_le16(chan->scid);
7402 	pkt.credits = cpu_to_le16(return_credits);
7403 
7404 	chan->ident = l2cap_get_ident(conn);
7405 
7406 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7407 }
7408 
7409 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7410 {
7411 	int err;
7412 
7413 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7414 
7415 	/* Wait recv to confirm reception before updating the credits */
7416 	err = chan->ops->recv(chan, skb);
7417 
7418 	/* Update credits whenever an SDU is received */
7419 	l2cap_chan_le_send_credits(chan);
7420 
7421 	return err;
7422 }
7423 
7424 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7425 {
7426 	int err;
7427 
7428 	if (!chan->rx_credits) {
7429 		BT_ERR("No credits to receive LE L2CAP data");
7430 		l2cap_send_disconn_req(chan, ECONNRESET);
7431 		return -ENOBUFS;
7432 	}
7433 
7434 	if (chan->imtu < skb->len) {
7435 		BT_ERR("Too big LE L2CAP PDU");
7436 		return -ENOBUFS;
7437 	}
7438 
7439 	chan->rx_credits--;
7440 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7441 
7442 	/* Update if remote had run out of credits, this should only happens
7443 	 * if the remote is not using the entire MPS.
7444 	 */
7445 	if (!chan->rx_credits)
7446 		l2cap_chan_le_send_credits(chan);
7447 
7448 	err = 0;
7449 
7450 	if (!chan->sdu) {
7451 		u16 sdu_len;
7452 
7453 		sdu_len = get_unaligned_le16(skb->data);
7454 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7455 
7456 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7457 		       sdu_len, skb->len, chan->imtu);
7458 
7459 		if (sdu_len > chan->imtu) {
7460 			BT_ERR("Too big LE L2CAP SDU length received");
7461 			err = -EMSGSIZE;
7462 			goto failed;
7463 		}
7464 
7465 		if (skb->len > sdu_len) {
7466 			BT_ERR("Too much LE L2CAP data received");
7467 			err = -EINVAL;
7468 			goto failed;
7469 		}
7470 
7471 		if (skb->len == sdu_len)
7472 			return l2cap_ecred_recv(chan, skb);
7473 
7474 		chan->sdu = skb;
7475 		chan->sdu_len = sdu_len;
7476 		chan->sdu_last_frag = skb;
7477 
7478 		/* Detect if remote is not able to use the selected MPS */
7479 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7480 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7481 
7482 			/* Adjust the number of credits */
7483 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7484 			chan->mps = mps_len;
7485 			l2cap_chan_le_send_credits(chan);
7486 		}
7487 
7488 		return 0;
7489 	}
7490 
7491 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7492 	       chan->sdu->len, skb->len, chan->sdu_len);
7493 
7494 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7495 		BT_ERR("Too much LE L2CAP data received");
7496 		err = -EINVAL;
7497 		goto failed;
7498 	}
7499 
7500 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7501 	skb = NULL;
7502 
7503 	if (chan->sdu->len == chan->sdu_len) {
7504 		err = l2cap_ecred_recv(chan, chan->sdu);
7505 		if (!err) {
7506 			chan->sdu = NULL;
7507 			chan->sdu_last_frag = NULL;
7508 			chan->sdu_len = 0;
7509 		}
7510 	}
7511 
7512 failed:
7513 	if (err) {
7514 		kfree_skb(skb);
7515 		kfree_skb(chan->sdu);
7516 		chan->sdu = NULL;
7517 		chan->sdu_last_frag = NULL;
7518 		chan->sdu_len = 0;
7519 	}
7520 
7521 	/* We can't return an error here since we took care of the skb
7522 	 * freeing internally. An error return would cause the caller to
7523 	 * do a double-free of the skb.
7524 	 */
7525 	return 0;
7526 }
7527 
7528 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7529 			       struct sk_buff *skb)
7530 {
7531 	struct l2cap_chan *chan;
7532 
7533 	chan = l2cap_get_chan_by_scid(conn, cid);
7534 	if (!chan) {
7535 		if (cid == L2CAP_CID_A2MP) {
7536 			chan = a2mp_channel_create(conn, skb);
7537 			if (!chan) {
7538 				kfree_skb(skb);
7539 				return;
7540 			}
7541 
7542 			l2cap_chan_lock(chan);
7543 		} else {
7544 			BT_DBG("unknown cid 0x%4.4x", cid);
7545 			/* Drop packet and return */
7546 			kfree_skb(skb);
7547 			return;
7548 		}
7549 	}
7550 
7551 	BT_DBG("chan %p, len %d", chan, skb->len);
7552 
7553 	/* If we receive data on a fixed channel before the info req/rsp
7554 	 * procedure is done simply assume that the channel is supported
7555 	 * and mark it as ready.
7556 	 */
7557 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7558 		l2cap_chan_ready(chan);
7559 
7560 	if (chan->state != BT_CONNECTED)
7561 		goto drop;
7562 
7563 	switch (chan->mode) {
7564 	case L2CAP_MODE_LE_FLOWCTL:
7565 	case L2CAP_MODE_EXT_FLOWCTL:
7566 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7567 			goto drop;
7568 
7569 		goto done;
7570 
7571 	case L2CAP_MODE_BASIC:
7572 		/* If socket recv buffers overflows we drop data here
7573 		 * which is *bad* because L2CAP has to be reliable.
7574 		 * But we don't have any other choice. L2CAP doesn't
7575 		 * provide flow control mechanism. */
7576 
7577 		if (chan->imtu < skb->len) {
7578 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7579 			goto drop;
7580 		}
7581 
7582 		if (!chan->ops->recv(chan, skb))
7583 			goto done;
7584 		break;
7585 
7586 	case L2CAP_MODE_ERTM:
7587 	case L2CAP_MODE_STREAMING:
7588 		l2cap_data_rcv(chan, skb);
7589 		goto done;
7590 
7591 	default:
7592 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7593 		break;
7594 	}
7595 
7596 drop:
7597 	kfree_skb(skb);
7598 
7599 done:
7600 	l2cap_chan_unlock(chan);
7601 }
7602 
7603 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7604 				  struct sk_buff *skb)
7605 {
7606 	struct hci_conn *hcon = conn->hcon;
7607 	struct l2cap_chan *chan;
7608 
7609 	if (hcon->type != ACL_LINK)
7610 		goto free_skb;
7611 
7612 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7613 					ACL_LINK);
7614 	if (!chan)
7615 		goto free_skb;
7616 
7617 	BT_DBG("chan %p, len %d", chan, skb->len);
7618 
7619 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7620 		goto drop;
7621 
7622 	if (chan->imtu < skb->len)
7623 		goto drop;
7624 
7625 	/* Store remote BD_ADDR and PSM for msg_name */
7626 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7627 	bt_cb(skb)->l2cap.psm = psm;
7628 
7629 	if (!chan->ops->recv(chan, skb)) {
7630 		l2cap_chan_put(chan);
7631 		return;
7632 	}
7633 
7634 drop:
7635 	l2cap_chan_put(chan);
7636 free_skb:
7637 	kfree_skb(skb);
7638 }
7639 
7640 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7641 {
7642 	struct l2cap_hdr *lh = (void *) skb->data;
7643 	struct hci_conn *hcon = conn->hcon;
7644 	u16 cid, len;
7645 	__le16 psm;
7646 
7647 	if (hcon->state != BT_CONNECTED) {
7648 		BT_DBG("queueing pending rx skb");
7649 		skb_queue_tail(&conn->pending_rx, skb);
7650 		return;
7651 	}
7652 
7653 	skb_pull(skb, L2CAP_HDR_SIZE);
7654 	cid = __le16_to_cpu(lh->cid);
7655 	len = __le16_to_cpu(lh->len);
7656 
7657 	if (len != skb->len) {
7658 		kfree_skb(skb);
7659 		return;
7660 	}
7661 
7662 	/* Since we can't actively block incoming LE connections we must
7663 	 * at least ensure that we ignore incoming data from them.
7664 	 */
7665 	if (hcon->type == LE_LINK &&
7666 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7667 				   bdaddr_dst_type(hcon))) {
7668 		kfree_skb(skb);
7669 		return;
7670 	}
7671 
7672 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7673 
7674 	switch (cid) {
7675 	case L2CAP_CID_SIGNALING:
7676 		l2cap_sig_channel(conn, skb);
7677 		break;
7678 
7679 	case L2CAP_CID_CONN_LESS:
7680 		psm = get_unaligned((__le16 *) skb->data);
7681 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7682 		l2cap_conless_channel(conn, psm, skb);
7683 		break;
7684 
7685 	case L2CAP_CID_LE_SIGNALING:
7686 		l2cap_le_sig_channel(conn, skb);
7687 		break;
7688 
7689 	default:
7690 		l2cap_data_channel(conn, cid, skb);
7691 		break;
7692 	}
7693 }
7694 
7695 static void process_pending_rx(struct work_struct *work)
7696 {
7697 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7698 					       pending_rx_work);
7699 	struct sk_buff *skb;
7700 
7701 	BT_DBG("");
7702 
7703 	while ((skb = skb_dequeue(&conn->pending_rx)))
7704 		l2cap_recv_frame(conn, skb);
7705 }
7706 
7707 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7708 {
7709 	struct l2cap_conn *conn = hcon->l2cap_data;
7710 	struct hci_chan *hchan;
7711 
7712 	if (conn)
7713 		return conn;
7714 
7715 	hchan = hci_chan_create(hcon);
7716 	if (!hchan)
7717 		return NULL;
7718 
7719 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7720 	if (!conn) {
7721 		hci_chan_del(hchan);
7722 		return NULL;
7723 	}
7724 
7725 	kref_init(&conn->ref);
7726 	hcon->l2cap_data = conn;
7727 	conn->hcon = hci_conn_get(hcon);
7728 	conn->hchan = hchan;
7729 
7730 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7731 
7732 	switch (hcon->type) {
7733 	case LE_LINK:
7734 		if (hcon->hdev->le_mtu) {
7735 			conn->mtu = hcon->hdev->le_mtu;
7736 			break;
7737 		}
7738 		fallthrough;
7739 	default:
7740 		conn->mtu = hcon->hdev->acl_mtu;
7741 		break;
7742 	}
7743 
7744 	conn->feat_mask = 0;
7745 
7746 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7747 
7748 	if (hcon->type == ACL_LINK &&
7749 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7750 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7751 
7752 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7753 	    (bredr_sc_enabled(hcon->hdev) ||
7754 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7755 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7756 
7757 	mutex_init(&conn->ident_lock);
7758 	mutex_init(&conn->chan_lock);
7759 
7760 	INIT_LIST_HEAD(&conn->chan_l);
7761 	INIT_LIST_HEAD(&conn->users);
7762 
7763 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7764 
7765 	skb_queue_head_init(&conn->pending_rx);
7766 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7767 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7768 
7769 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7770 
7771 	return conn;
7772 }
7773 
7774 static bool is_valid_psm(u16 psm, u8 dst_type)
7775 {
7776 	if (!psm)
7777 		return false;
7778 
7779 	if (bdaddr_type_is_le(dst_type))
7780 		return (psm <= 0x00ff);
7781 
7782 	/* PSM must be odd and lsb of upper byte must be 0 */
7783 	return ((psm & 0x0101) == 0x0001);
7784 }
7785 
7786 struct l2cap_chan_data {
7787 	struct l2cap_chan *chan;
7788 	struct pid *pid;
7789 	int count;
7790 };
7791 
7792 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7793 {
7794 	struct l2cap_chan_data *d = data;
7795 	struct pid *pid;
7796 
7797 	if (chan == d->chan)
7798 		return;
7799 
7800 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7801 		return;
7802 
7803 	pid = chan->ops->get_peer_pid(chan);
7804 
7805 	/* Only count deferred channels with the same PID/PSM */
7806 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7807 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7808 		return;
7809 
7810 	d->count++;
7811 }
7812 
7813 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7814 		       bdaddr_t *dst, u8 dst_type)
7815 {
7816 	struct l2cap_conn *conn;
7817 	struct hci_conn *hcon;
7818 	struct hci_dev *hdev;
7819 	int err;
7820 
7821 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7822 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7823 
7824 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7825 	if (!hdev)
7826 		return -EHOSTUNREACH;
7827 
7828 	hci_dev_lock(hdev);
7829 
7830 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7831 	    chan->chan_type != L2CAP_CHAN_RAW) {
7832 		err = -EINVAL;
7833 		goto done;
7834 	}
7835 
7836 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7837 		err = -EINVAL;
7838 		goto done;
7839 	}
7840 
7841 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7842 		err = -EINVAL;
7843 		goto done;
7844 	}
7845 
7846 	switch (chan->mode) {
7847 	case L2CAP_MODE_BASIC:
7848 		break;
7849 	case L2CAP_MODE_LE_FLOWCTL:
7850 		break;
7851 	case L2CAP_MODE_EXT_FLOWCTL:
7852 		if (!enable_ecred) {
7853 			err = -EOPNOTSUPP;
7854 			goto done;
7855 		}
7856 		break;
7857 	case L2CAP_MODE_ERTM:
7858 	case L2CAP_MODE_STREAMING:
7859 		if (!disable_ertm)
7860 			break;
7861 		fallthrough;
7862 	default:
7863 		err = -EOPNOTSUPP;
7864 		goto done;
7865 	}
7866 
7867 	switch (chan->state) {
7868 	case BT_CONNECT:
7869 	case BT_CONNECT2:
7870 	case BT_CONFIG:
7871 		/* Already connecting */
7872 		err = 0;
7873 		goto done;
7874 
7875 	case BT_CONNECTED:
7876 		/* Already connected */
7877 		err = -EISCONN;
7878 		goto done;
7879 
7880 	case BT_OPEN:
7881 	case BT_BOUND:
7882 		/* Can connect */
7883 		break;
7884 
7885 	default:
7886 		err = -EBADFD;
7887 		goto done;
7888 	}
7889 
7890 	/* Set destination address and psm */
7891 	bacpy(&chan->dst, dst);
7892 	chan->dst_type = dst_type;
7893 
7894 	chan->psm = psm;
7895 	chan->dcid = cid;
7896 
7897 	if (bdaddr_type_is_le(dst_type)) {
7898 		/* Convert from L2CAP channel address type to HCI address type
7899 		 */
7900 		if (dst_type == BDADDR_LE_PUBLIC)
7901 			dst_type = ADDR_LE_DEV_PUBLIC;
7902 		else
7903 			dst_type = ADDR_LE_DEV_RANDOM;
7904 
7905 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7906 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7907 					      chan->sec_level,
7908 					      HCI_LE_CONN_TIMEOUT,
7909 					      HCI_ROLE_SLAVE);
7910 		else
7911 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7912 						   chan->sec_level,
7913 						   HCI_LE_CONN_TIMEOUT,
7914 						   CONN_REASON_L2CAP_CHAN);
7915 
7916 	} else {
7917 		u8 auth_type = l2cap_get_auth_type(chan);
7918 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7919 				       CONN_REASON_L2CAP_CHAN);
7920 	}
7921 
7922 	if (IS_ERR(hcon)) {
7923 		err = PTR_ERR(hcon);
7924 		goto done;
7925 	}
7926 
7927 	conn = l2cap_conn_add(hcon);
7928 	if (!conn) {
7929 		hci_conn_drop(hcon);
7930 		err = -ENOMEM;
7931 		goto done;
7932 	}
7933 
7934 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7935 		struct l2cap_chan_data data;
7936 
7937 		data.chan = chan;
7938 		data.pid = chan->ops->get_peer_pid(chan);
7939 		data.count = 1;
7940 
7941 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7942 
7943 		/* Check if there isn't too many channels being connected */
7944 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7945 			hci_conn_drop(hcon);
7946 			err = -EPROTO;
7947 			goto done;
7948 		}
7949 	}
7950 
7951 	mutex_lock(&conn->chan_lock);
7952 	l2cap_chan_lock(chan);
7953 
7954 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7955 		hci_conn_drop(hcon);
7956 		err = -EBUSY;
7957 		goto chan_unlock;
7958 	}
7959 
7960 	/* Update source addr of the socket */
7961 	bacpy(&chan->src, &hcon->src);
7962 	chan->src_type = bdaddr_src_type(hcon);
7963 
7964 	__l2cap_chan_add(conn, chan);
7965 
7966 	/* l2cap_chan_add takes its own ref so we can drop this one */
7967 	hci_conn_drop(hcon);
7968 
7969 	l2cap_state_change(chan, BT_CONNECT);
7970 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7971 
7972 	/* Release chan->sport so that it can be reused by other
7973 	 * sockets (as it's only used for listening sockets).
7974 	 */
7975 	write_lock(&chan_list_lock);
7976 	chan->sport = 0;
7977 	write_unlock(&chan_list_lock);
7978 
7979 	if (hcon->state == BT_CONNECTED) {
7980 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7981 			__clear_chan_timer(chan);
7982 			if (l2cap_chan_check_security(chan, true))
7983 				l2cap_state_change(chan, BT_CONNECTED);
7984 		} else
7985 			l2cap_do_start(chan);
7986 	}
7987 
7988 	err = 0;
7989 
7990 chan_unlock:
7991 	l2cap_chan_unlock(chan);
7992 	mutex_unlock(&conn->chan_lock);
7993 done:
7994 	hci_dev_unlock(hdev);
7995 	hci_dev_put(hdev);
7996 	return err;
7997 }
7998 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7999 
8000 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8001 {
8002 	struct l2cap_conn *conn = chan->conn;
8003 	struct {
8004 		struct l2cap_ecred_reconf_req req;
8005 		__le16 scid;
8006 	} pdu;
8007 
8008 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8009 	pdu.req.mps = cpu_to_le16(chan->mps);
8010 	pdu.scid    = cpu_to_le16(chan->scid);
8011 
8012 	chan->ident = l2cap_get_ident(conn);
8013 
8014 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8015 		       sizeof(pdu), &pdu);
8016 }
8017 
8018 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8019 {
8020 	if (chan->imtu > mtu)
8021 		return -EINVAL;
8022 
8023 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8024 
8025 	chan->imtu = mtu;
8026 
8027 	l2cap_ecred_reconfigure(chan);
8028 
8029 	return 0;
8030 }
8031 
8032 /* ---- L2CAP interface with lower layer (HCI) ---- */
8033 
8034 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8035 {
8036 	int exact = 0, lm1 = 0, lm2 = 0;
8037 	struct l2cap_chan *c;
8038 
8039 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8040 
8041 	/* Find listening sockets and check their link_mode */
8042 	read_lock(&chan_list_lock);
8043 	list_for_each_entry(c, &chan_list, global_l) {
8044 		if (c->state != BT_LISTEN)
8045 			continue;
8046 
8047 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8048 			lm1 |= HCI_LM_ACCEPT;
8049 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8050 				lm1 |= HCI_LM_MASTER;
8051 			exact++;
8052 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8053 			lm2 |= HCI_LM_ACCEPT;
8054 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8055 				lm2 |= HCI_LM_MASTER;
8056 		}
8057 	}
8058 	read_unlock(&chan_list_lock);
8059 
8060 	return exact ? lm1 : lm2;
8061 }
8062 
8063 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8064  * from an existing channel in the list or from the beginning of the
8065  * global list (by passing NULL as first parameter).
8066  */
8067 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8068 						  struct hci_conn *hcon)
8069 {
8070 	u8 src_type = bdaddr_src_type(hcon);
8071 
8072 	read_lock(&chan_list_lock);
8073 
8074 	if (c)
8075 		c = list_next_entry(c, global_l);
8076 	else
8077 		c = list_entry(chan_list.next, typeof(*c), global_l);
8078 
8079 	list_for_each_entry_from(c, &chan_list, global_l) {
8080 		if (c->chan_type != L2CAP_CHAN_FIXED)
8081 			continue;
8082 		if (c->state != BT_LISTEN)
8083 			continue;
8084 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8085 			continue;
8086 		if (src_type != c->src_type)
8087 			continue;
8088 
8089 		l2cap_chan_hold(c);
8090 		read_unlock(&chan_list_lock);
8091 		return c;
8092 	}
8093 
8094 	read_unlock(&chan_list_lock);
8095 
8096 	return NULL;
8097 }
8098 
8099 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8100 {
8101 	struct hci_dev *hdev = hcon->hdev;
8102 	struct l2cap_conn *conn;
8103 	struct l2cap_chan *pchan;
8104 	u8 dst_type;
8105 
8106 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8107 		return;
8108 
8109 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8110 
8111 	if (status) {
8112 		l2cap_conn_del(hcon, bt_to_errno(status));
8113 		return;
8114 	}
8115 
8116 	conn = l2cap_conn_add(hcon);
8117 	if (!conn)
8118 		return;
8119 
8120 	dst_type = bdaddr_dst_type(hcon);
8121 
8122 	/* If device is blocked, do not create channels for it */
8123 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8124 		return;
8125 
8126 	/* Find fixed channels and notify them of the new connection. We
8127 	 * use multiple individual lookups, continuing each time where
8128 	 * we left off, because the list lock would prevent calling the
8129 	 * potentially sleeping l2cap_chan_lock() function.
8130 	 */
8131 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8132 	while (pchan) {
8133 		struct l2cap_chan *chan, *next;
8134 
8135 		/* Client fixed channels should override server ones */
8136 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8137 			goto next;
8138 
8139 		l2cap_chan_lock(pchan);
8140 		chan = pchan->ops->new_connection(pchan);
8141 		if (chan) {
8142 			bacpy(&chan->src, &hcon->src);
8143 			bacpy(&chan->dst, &hcon->dst);
8144 			chan->src_type = bdaddr_src_type(hcon);
8145 			chan->dst_type = dst_type;
8146 
8147 			__l2cap_chan_add(conn, chan);
8148 		}
8149 
8150 		l2cap_chan_unlock(pchan);
8151 next:
8152 		next = l2cap_global_fixed_chan(pchan, hcon);
8153 		l2cap_chan_put(pchan);
8154 		pchan = next;
8155 	}
8156 
8157 	l2cap_conn_ready(conn);
8158 }
8159 
8160 int l2cap_disconn_ind(struct hci_conn *hcon)
8161 {
8162 	struct l2cap_conn *conn = hcon->l2cap_data;
8163 
8164 	BT_DBG("hcon %p", hcon);
8165 
8166 	if (!conn)
8167 		return HCI_ERROR_REMOTE_USER_TERM;
8168 	return conn->disc_reason;
8169 }
8170 
8171 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8172 {
8173 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8174 		return;
8175 
8176 	BT_DBG("hcon %p reason %d", hcon, reason);
8177 
8178 	l2cap_conn_del(hcon, bt_to_errno(reason));
8179 }
8180 
8181 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8182 {
8183 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8184 		return;
8185 
8186 	if (encrypt == 0x00) {
8187 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8188 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8189 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8190 			   chan->sec_level == BT_SECURITY_FIPS)
8191 			l2cap_chan_close(chan, ECONNREFUSED);
8192 	} else {
8193 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8194 			__clear_chan_timer(chan);
8195 	}
8196 }
8197 
8198 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8199 {
8200 	struct l2cap_conn *conn = hcon->l2cap_data;
8201 	struct l2cap_chan *chan;
8202 
8203 	if (!conn)
8204 		return;
8205 
8206 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8207 
8208 	mutex_lock(&conn->chan_lock);
8209 
8210 	list_for_each_entry(chan, &conn->chan_l, list) {
8211 		l2cap_chan_lock(chan);
8212 
8213 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8214 		       state_to_string(chan->state));
8215 
8216 		if (chan->scid == L2CAP_CID_A2MP) {
8217 			l2cap_chan_unlock(chan);
8218 			continue;
8219 		}
8220 
8221 		if (!status && encrypt)
8222 			chan->sec_level = hcon->sec_level;
8223 
8224 		if (!__l2cap_no_conn_pending(chan)) {
8225 			l2cap_chan_unlock(chan);
8226 			continue;
8227 		}
8228 
8229 		if (!status && (chan->state == BT_CONNECTED ||
8230 				chan->state == BT_CONFIG)) {
8231 			chan->ops->resume(chan);
8232 			l2cap_check_encryption(chan, encrypt);
8233 			l2cap_chan_unlock(chan);
8234 			continue;
8235 		}
8236 
8237 		if (chan->state == BT_CONNECT) {
8238 			if (!status && l2cap_check_enc_key_size(hcon))
8239 				l2cap_start_connection(chan);
8240 			else
8241 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8242 		} else if (chan->state == BT_CONNECT2 &&
8243 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8244 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8245 			struct l2cap_conn_rsp rsp;
8246 			__u16 res, stat;
8247 
8248 			if (!status && l2cap_check_enc_key_size(hcon)) {
8249 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8250 					res = L2CAP_CR_PEND;
8251 					stat = L2CAP_CS_AUTHOR_PEND;
8252 					chan->ops->defer(chan);
8253 				} else {
8254 					l2cap_state_change(chan, BT_CONFIG);
8255 					res = L2CAP_CR_SUCCESS;
8256 					stat = L2CAP_CS_NO_INFO;
8257 				}
8258 			} else {
8259 				l2cap_state_change(chan, BT_DISCONN);
8260 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8261 				res = L2CAP_CR_SEC_BLOCK;
8262 				stat = L2CAP_CS_NO_INFO;
8263 			}
8264 
8265 			rsp.scid   = cpu_to_le16(chan->dcid);
8266 			rsp.dcid   = cpu_to_le16(chan->scid);
8267 			rsp.result = cpu_to_le16(res);
8268 			rsp.status = cpu_to_le16(stat);
8269 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8270 				       sizeof(rsp), &rsp);
8271 
8272 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8273 			    res == L2CAP_CR_SUCCESS) {
8274 				char buf[128];
8275 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8276 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8277 					       L2CAP_CONF_REQ,
8278 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8279 					       buf);
8280 				chan->num_conf_req++;
8281 			}
8282 		}
8283 
8284 		l2cap_chan_unlock(chan);
8285 	}
8286 
8287 	mutex_unlock(&conn->chan_lock);
8288 }
8289 
8290 /* Append fragment into frame respecting the maximum len of rx_skb */
8291 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8292 			   u16 len)
8293 {
8294 	if (!conn->rx_skb) {
8295 		/* Allocate skb for the complete frame (with header) */
8296 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8297 		if (!conn->rx_skb)
8298 			return -ENOMEM;
8299 		/* Init rx_len */
8300 		conn->rx_len = len;
8301 	}
8302 
8303 	/* Copy as much as the rx_skb can hold */
8304 	len = min_t(u16, len, skb->len);
8305 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8306 	skb_pull(skb, len);
8307 	conn->rx_len -= len;
8308 
8309 	return len;
8310 }
8311 
8312 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8313 {
8314 	struct sk_buff *rx_skb;
8315 	int len;
8316 
8317 	/* Append just enough to complete the header */
8318 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8319 
8320 	/* If header could not be read just continue */
8321 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8322 		return len;
8323 
8324 	rx_skb = conn->rx_skb;
8325 	len = get_unaligned_le16(rx_skb->data);
8326 
8327 	/* Check if rx_skb has enough space to received all fragments */
8328 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8329 		/* Update expected len */
8330 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8331 		return L2CAP_LEN_SIZE;
8332 	}
8333 
8334 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8335 	 * fit all fragments.
8336 	 */
8337 	conn->rx_skb = NULL;
8338 
8339 	/* Reallocates rx_skb using the exact expected length */
8340 	len = l2cap_recv_frag(conn, rx_skb,
8341 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8342 	kfree_skb(rx_skb);
8343 
8344 	return len;
8345 }
8346 
8347 static void l2cap_recv_reset(struct l2cap_conn *conn)
8348 {
8349 	kfree_skb(conn->rx_skb);
8350 	conn->rx_skb = NULL;
8351 	conn->rx_len = 0;
8352 }
8353 
8354 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8355 {
8356 	struct l2cap_conn *conn = hcon->l2cap_data;
8357 	int len;
8358 
8359 	/* For AMP controller do not create l2cap conn */
8360 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8361 		goto drop;
8362 
8363 	if (!conn)
8364 		conn = l2cap_conn_add(hcon);
8365 
8366 	if (!conn)
8367 		goto drop;
8368 
8369 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8370 
8371 	switch (flags) {
8372 	case ACL_START:
8373 	case ACL_START_NO_FLUSH:
8374 	case ACL_COMPLETE:
8375 		if (conn->rx_skb) {
8376 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8377 			l2cap_recv_reset(conn);
8378 			l2cap_conn_unreliable(conn, ECOMM);
8379 		}
8380 
8381 		/* Start fragment may not contain the L2CAP length so just
8382 		 * copy the initial byte when that happens and use conn->mtu as
8383 		 * expected length.
8384 		 */
8385 		if (skb->len < L2CAP_LEN_SIZE) {
8386 			if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
8387 				goto drop;
8388 			return;
8389 		}
8390 
8391 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8392 
8393 		if (len == skb->len) {
8394 			/* Complete frame received */
8395 			l2cap_recv_frame(conn, skb);
8396 			return;
8397 		}
8398 
8399 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8400 
8401 		if (skb->len > len) {
8402 			BT_ERR("Frame is too long (len %u, expected len %d)",
8403 			       skb->len, len);
8404 			l2cap_conn_unreliable(conn, ECOMM);
8405 			goto drop;
8406 		}
8407 
8408 		/* Append fragment into frame (with header) */
8409 		if (l2cap_recv_frag(conn, skb, len) < 0)
8410 			goto drop;
8411 
8412 		break;
8413 
8414 	case ACL_CONT:
8415 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8416 
8417 		if (!conn->rx_skb) {
8418 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8419 			l2cap_conn_unreliable(conn, ECOMM);
8420 			goto drop;
8421 		}
8422 
8423 		/* Complete the L2CAP length if it has not been read */
8424 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8425 			if (l2cap_recv_len(conn, skb) < 0) {
8426 				l2cap_conn_unreliable(conn, ECOMM);
8427 				goto drop;
8428 			}
8429 
8430 			/* Header still could not be read just continue */
8431 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8432 				return;
8433 		}
8434 
8435 		if (skb->len > conn->rx_len) {
8436 			BT_ERR("Fragment is too long (len %u, expected %u)",
8437 			       skb->len, conn->rx_len);
8438 			l2cap_recv_reset(conn);
8439 			l2cap_conn_unreliable(conn, ECOMM);
8440 			goto drop;
8441 		}
8442 
8443 		/* Append fragment into frame (with header) */
8444 		l2cap_recv_frag(conn, skb, skb->len);
8445 
8446 		if (!conn->rx_len) {
8447 			/* Complete frame received. l2cap_recv_frame
8448 			 * takes ownership of the skb so set the global
8449 			 * rx_skb pointer to NULL first.
8450 			 */
8451 			struct sk_buff *rx_skb = conn->rx_skb;
8452 			conn->rx_skb = NULL;
8453 			l2cap_recv_frame(conn, rx_skb);
8454 		}
8455 		break;
8456 	}
8457 
8458 drop:
8459 	kfree_skb(skb);
8460 }
8461 
8462 static struct hci_cb l2cap_cb = {
8463 	.name		= "L2CAP",
8464 	.connect_cfm	= l2cap_connect_cfm,
8465 	.disconn_cfm	= l2cap_disconn_cfm,
8466 	.security_cfm	= l2cap_security_cfm,
8467 };
8468 
8469 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8470 {
8471 	struct l2cap_chan *c;
8472 
8473 	read_lock(&chan_list_lock);
8474 
8475 	list_for_each_entry(c, &chan_list, global_l) {
8476 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8477 			   &c->src, c->src_type, &c->dst, c->dst_type,
8478 			   c->state, __le16_to_cpu(c->psm),
8479 			   c->scid, c->dcid, c->imtu, c->omtu,
8480 			   c->sec_level, c->mode);
8481 	}
8482 
8483 	read_unlock(&chan_list_lock);
8484 
8485 	return 0;
8486 }
8487 
8488 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8489 
8490 static struct dentry *l2cap_debugfs;
8491 
8492 int __init l2cap_init(void)
8493 {
8494 	int err;
8495 
8496 	err = l2cap_init_sockets();
8497 	if (err < 0)
8498 		return err;
8499 
8500 	hci_register_cb(&l2cap_cb);
8501 
8502 	if (IS_ERR_OR_NULL(bt_debugfs))
8503 		return 0;
8504 
8505 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8506 					    NULL, &l2cap_debugfs_fops);
8507 
8508 	return 0;
8509 }
8510 
8511 void l2cap_exit(void)
8512 {
8513 	debugfs_remove(l2cap_debugfs);
8514 	hci_unregister_cb(&l2cap_cb);
8515 	l2cap_cleanup_sockets();
8516 }
8517 
8518 module_param(disable_ertm, bool, 0644);
8519 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8520 
8521 module_param(enable_ecred, bool, 0644);
8522 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8523