xref: /linux/net/bluetooth/l2cap_core.c (revision 56063823b9f0e2acdca4d621face5c6a7a1f4c99)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 
130 	return c;
131 }
132 
133 /* Find channel with given DCID.
134  * Returns a reference locked channel.
135  */
136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 						 u16 cid)
138 {
139 	struct l2cap_chan *c;
140 
141 	c = __l2cap_get_chan_by_dcid(conn, cid);
142 	if (c) {
143 		/* Only lock if chan reference is not 0 */
144 		c = l2cap_chan_hold_unless_zero(c);
145 		if (c)
146 			l2cap_chan_lock(c);
147 	}
148 
149 	return c;
150 }
151 
152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 						    u8 ident)
154 {
155 	struct l2cap_chan *c;
156 
157 	list_for_each_entry(c, &conn->chan_l, list) {
158 		if (c->ident == ident)
159 			return c;
160 	}
161 	return NULL;
162 }
163 
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 						      u8 src_type)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 			continue;
172 
173 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 			continue;
175 
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 							 chan->src_type)) {
214 				chan->psm   = cpu_to_le16(p);
215 				chan->sport = cpu_to_le16(p);
216 				err = 0;
217 				break;
218 			}
219 	}
220 
221 done:
222 	write_unlock(&chan_list_lock);
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 
227 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
228 {
229 	write_lock(&chan_list_lock);
230 
231 	/* Override the defaults (which are for conn-oriented) */
232 	chan->omtu = L2CAP_DEFAULT_MTU;
233 	chan->chan_type = L2CAP_CHAN_FIXED;
234 
235 	chan->scid = scid;
236 
237 	write_unlock(&chan_list_lock);
238 
239 	return 0;
240 }
241 
242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 	u16 cid, dyn_end;
245 
246 	if (conn->hcon->type == LE_LINK)
247 		dyn_end = L2CAP_CID_LE_DYN_END;
248 	else
249 		dyn_end = L2CAP_CID_DYN_END;
250 
251 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 		if (!__l2cap_get_chan_by_scid(conn, cid))
253 			return cid;
254 	}
255 
256 	return 0;
257 }
258 
259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 	       state_to_string(state));
263 
264 	chan->state = state;
265 	chan->ops->state_change(chan, state, 0);
266 }
267 
268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 						int state, int err)
270 {
271 	chan->state = state;
272 	chan->ops->state_change(chan, chan->state, err);
273 }
274 
275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 	chan->ops->state_change(chan, chan->state, err);
278 }
279 
280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 	if (!delayed_work_pending(&chan->monitor_timer) &&
283 	    chan->retrans_timeout) {
284 		l2cap_set_timer(chan, &chan->retrans_timer,
285 				msecs_to_jiffies(chan->retrans_timeout));
286 	}
287 }
288 
289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 	__clear_retrans_timer(chan);
292 	if (chan->monitor_timeout) {
293 		l2cap_set_timer(chan, &chan->monitor_timer,
294 				msecs_to_jiffies(chan->monitor_timeout));
295 	}
296 }
297 
298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 					       u16 seq)
300 {
301 	struct sk_buff *skb;
302 
303 	skb_queue_walk(head, skb) {
304 		if (bt_cb(skb)->l2cap.txseq == seq)
305 			return skb;
306 	}
307 
308 	return NULL;
309 }
310 
311 /* ---- L2CAP sequence number lists ---- */
312 
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314  * SREJ requests that are received and for frames that are to be
315  * retransmitted. These seq_list functions implement a singly-linked
316  * list in an array, where membership in the list can also be checked
317  * in constant time. Items can also be added to the tail of the list
318  * and removed from the head in constant time, without further memory
319  * allocs or frees.
320  */
321 
322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 	size_t alloc_size, i;
325 
326 	/* Allocated size is a power of 2 to map sequence numbers
327 	 * (which may be up to 14 bits) in to a smaller array that is
328 	 * sized for the negotiated ERTM transmit windows.
329 	 */
330 	alloc_size = roundup_pow_of_two(size);
331 
332 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 	if (!seq_list->list)
334 		return -ENOMEM;
335 
336 	seq_list->mask = alloc_size - 1;
337 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 	for (i = 0; i < alloc_size; i++)
340 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341 
342 	return 0;
343 }
344 
345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 	kfree(seq_list->list);
348 }
349 
350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 					   u16 seq)
352 {
353 	/* Constant-time check for list membership */
354 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356 
357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 	u16 seq = seq_list->head;
360 	u16 mask = seq_list->mask;
361 
362 	seq_list->head = seq_list->list[seq & mask];
363 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 	}
369 
370 	return seq;
371 }
372 
373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 	u16 i;
376 
377 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 		return;
379 
380 	for (i = 0; i <= seq_list->mask; i++)
381 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 
383 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386 
387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 	u16 mask = seq_list->mask;
390 
391 	/* All appends happen in constant time */
392 
393 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 		return;
395 
396 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 		seq_list->head = seq;
398 	else
399 		seq_list->list[seq_list->tail & mask] = seq;
400 
401 	seq_list->tail = seq;
402 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404 
405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 					       chan_timer.work);
409 	struct l2cap_conn *conn = chan->conn;
410 	int reason;
411 
412 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 
414 	if (!conn)
415 		return;
416 
417 	mutex_lock(&conn->lock);
418 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 	 * this work. No need to call l2cap_chan_hold(chan) here again.
420 	 */
421 	l2cap_chan_lock(chan);
422 
423 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 		reason = ECONNREFUSED;
425 	else if (chan->state == BT_CONNECT &&
426 		 chan->sec_level != BT_SECURITY_SDP)
427 		reason = ECONNREFUSED;
428 	else
429 		reason = ETIMEDOUT;
430 
431 	l2cap_chan_close(chan, reason);
432 
433 	chan->ops->close(chan);
434 
435 	l2cap_chan_unlock(chan);
436 	l2cap_chan_put(chan);
437 
438 	mutex_unlock(&conn->lock);
439 }
440 
441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 	struct l2cap_chan *chan;
444 
445 	chan = kzalloc_obj(*chan, GFP_ATOMIC);
446 	if (!chan)
447 		return NULL;
448 
449 	skb_queue_head_init(&chan->tx_q);
450 	skb_queue_head_init(&chan->srej_q);
451 	mutex_init(&chan->lock);
452 
453 	/* Set default lock nesting level */
454 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 
456 	/* Available receive buffer space is initially unknown */
457 	chan->rx_avail = -1;
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467 
468 	chan->state = BT_OPEN;
469 
470 	kref_init(&chan->kref);
471 
472 	/* This flag is cleared in l2cap_chan_ready() */
473 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 
475 	BT_DBG("chan %p", chan);
476 
477 	return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 
481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 
485 	BT_DBG("chan %p", chan);
486 
487 	write_lock(&chan_list_lock);
488 	list_del(&chan->global_l);
489 	write_unlock(&chan_list_lock);
490 
491 	kfree(chan);
492 }
493 
494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497 
498 	kref_get(&c->kref);
499 }
500 EXPORT_SYMBOL_GPL(l2cap_chan_hold);
501 
502 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
503 {
504 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505 
506 	if (!kref_get_unless_zero(&c->kref))
507 		return NULL;
508 
509 	return c;
510 }
511 
512 void l2cap_chan_put(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_put(&c->kref, l2cap_chan_destroy);
517 }
518 EXPORT_SYMBOL_GPL(l2cap_chan_put);
519 
520 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
521 {
522 	chan->fcs  = L2CAP_FCS_CRC16;
523 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526 	chan->remote_max_tx = chan->max_tx;
527 	chan->remote_tx_win = chan->tx_win;
528 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529 	chan->sec_level = BT_SECURITY_LOW;
530 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
533 
534 	chan->conf_state = 0;
535 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
536 
537 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
538 }
539 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
540 
541 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
542 {
543 	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
544 
545 	if (chan->mps == 0)
546 		return 0;
547 
548 	/* If we don't know the available space in the receiver buffer, give
549 	 * enough credits for a full packet.
550 	 */
551 	if (chan->rx_avail == -1)
552 		return (chan->imtu / chan->mps) + 1;
553 
554 	/* If we know how much space is available in the receive buffer, give
555 	 * out as many credits as would fill the buffer.
556 	 */
557 	if (chan->rx_avail <= sdu_len)
558 		return 0;
559 
560 	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
561 }
562 
563 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
564 {
565 	chan->sdu = NULL;
566 	chan->sdu_last_frag = NULL;
567 	chan->sdu_len = 0;
568 	chan->tx_credits = tx_credits;
569 	/* Derive MPS from connection MTU to stop HCI fragmentation */
570 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
571 	chan->rx_credits = l2cap_le_rx_credits(chan);
572 
573 	skb_queue_head_init(&chan->tx_q);
574 }
575 
576 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
577 {
578 	l2cap_le_flowctl_init(chan, tx_credits);
579 
580 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
581 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
582 		chan->mps = L2CAP_ECRED_MIN_MPS;
583 		chan->rx_credits = l2cap_le_rx_credits(chan);
584 	}
585 }
586 
587 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588 {
589 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
590 	       __le16_to_cpu(chan->psm), chan->dcid);
591 
592 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
593 
594 	chan->conn = conn;
595 
596 	switch (chan->chan_type) {
597 	case L2CAP_CHAN_CONN_ORIENTED:
598 		/* Alloc CID for connection-oriented socket */
599 		chan->scid = l2cap_alloc_cid(conn);
600 		if (conn->hcon->type == ACL_LINK)
601 			chan->omtu = L2CAP_DEFAULT_MTU;
602 		break;
603 
604 	case L2CAP_CHAN_CONN_LESS:
605 		/* Connectionless socket */
606 		chan->scid = L2CAP_CID_CONN_LESS;
607 		chan->dcid = L2CAP_CID_CONN_LESS;
608 		chan->omtu = L2CAP_DEFAULT_MTU;
609 		break;
610 
611 	case L2CAP_CHAN_FIXED:
612 		/* Caller will set CID and CID specific MTU values */
613 		break;
614 
615 	default:
616 		/* Raw socket can send/recv signalling messages only */
617 		chan->scid = L2CAP_CID_SIGNALING;
618 		chan->dcid = L2CAP_CID_SIGNALING;
619 		chan->omtu = L2CAP_DEFAULT_MTU;
620 	}
621 
622 	chan->local_id		= L2CAP_BESTEFFORT_ID;
623 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
624 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
625 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
626 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
627 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
628 
629 	l2cap_chan_hold(chan);
630 
631 	/* Only keep a reference for fixed channels if they requested it */
632 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
633 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
634 		hci_conn_hold(conn->hcon);
635 
636 	/* Append to the list since the order matters for ECRED */
637 	list_add_tail(&chan->list, &conn->chan_l);
638 }
639 
640 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
641 {
642 	mutex_lock(&conn->lock);
643 	__l2cap_chan_add(conn, chan);
644 	mutex_unlock(&conn->lock);
645 }
646 
647 void l2cap_chan_del(struct l2cap_chan *chan, int err)
648 {
649 	struct l2cap_conn *conn = chan->conn;
650 
651 	__clear_chan_timer(chan);
652 
653 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
654 	       state_to_string(chan->state));
655 
656 	chan->ops->teardown(chan, err);
657 
658 	if (conn) {
659 		/* Delete from channel list */
660 		list_del(&chan->list);
661 
662 		l2cap_chan_put(chan);
663 
664 		chan->conn = NULL;
665 
666 		/* Reference was only held for non-fixed channels or
667 		 * fixed channels that explicitly requested it using the
668 		 * FLAG_HOLD_HCI_CONN flag.
669 		 */
670 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
671 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
672 			hci_conn_drop(conn->hcon);
673 	}
674 
675 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 		return;
677 
678 	switch (chan->mode) {
679 	case L2CAP_MODE_BASIC:
680 		break;
681 
682 	case L2CAP_MODE_LE_FLOWCTL:
683 	case L2CAP_MODE_EXT_FLOWCTL:
684 		skb_queue_purge(&chan->tx_q);
685 		break;
686 
687 	case L2CAP_MODE_ERTM:
688 		__clear_retrans_timer(chan);
689 		__clear_monitor_timer(chan);
690 		__clear_ack_timer(chan);
691 
692 		skb_queue_purge(&chan->srej_q);
693 
694 		l2cap_seq_list_free(&chan->srej_list);
695 		l2cap_seq_list_free(&chan->retrans_list);
696 		fallthrough;
697 
698 	case L2CAP_MODE_STREAMING:
699 		skb_queue_purge(&chan->tx_q);
700 		break;
701 	}
702 }
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
704 
705 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
706 				 l2cap_chan_func_t func, void *data)
707 {
708 	struct l2cap_chan *chan, *l;
709 
710 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
711 		if (chan->ident == id)
712 			func(chan, data);
713 	}
714 }
715 
716 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
717 			      void *data)
718 {
719 	struct l2cap_chan *chan;
720 
721 	list_for_each_entry(chan, &conn->chan_l, list) {
722 		func(chan, data);
723 	}
724 }
725 
726 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
727 		     void *data)
728 {
729 	if (!conn)
730 		return;
731 
732 	mutex_lock(&conn->lock);
733 	__l2cap_chan_list(conn, func, data);
734 	mutex_unlock(&conn->lock);
735 }
736 
737 EXPORT_SYMBOL_GPL(l2cap_chan_list);
738 
739 static void l2cap_conn_update_id_addr(struct work_struct *work)
740 {
741 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
742 					       id_addr_timer.work);
743 	struct hci_conn *hcon = conn->hcon;
744 	struct l2cap_chan *chan;
745 
746 	mutex_lock(&conn->lock);
747 
748 	list_for_each_entry(chan, &conn->chan_l, list) {
749 		l2cap_chan_lock(chan);
750 		bacpy(&chan->dst, &hcon->dst);
751 		chan->dst_type = bdaddr_dst_type(hcon);
752 		l2cap_chan_unlock(chan);
753 	}
754 
755 	mutex_unlock(&conn->lock);
756 }
757 
758 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
759 {
760 	struct l2cap_conn *conn = chan->conn;
761 	struct l2cap_le_conn_rsp rsp;
762 	u16 result;
763 
764 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
765 		result = L2CAP_CR_LE_AUTHORIZATION;
766 	else
767 		result = L2CAP_CR_LE_BAD_PSM;
768 
769 	l2cap_state_change(chan, BT_DISCONN);
770 
771 	rsp.dcid    = cpu_to_le16(chan->scid);
772 	rsp.mtu     = cpu_to_le16(chan->imtu);
773 	rsp.mps     = cpu_to_le16(chan->mps);
774 	rsp.credits = cpu_to_le16(chan->rx_credits);
775 	rsp.result  = cpu_to_le16(result);
776 
777 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
778 		       &rsp);
779 }
780 
781 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
782 {
783 	l2cap_state_change(chan, BT_DISCONN);
784 
785 	__l2cap_ecred_conn_rsp_defer(chan);
786 }
787 
788 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
789 {
790 	struct l2cap_conn *conn = chan->conn;
791 	struct l2cap_conn_rsp rsp;
792 	u16 result;
793 
794 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
795 		result = L2CAP_CR_SEC_BLOCK;
796 	else
797 		result = L2CAP_CR_BAD_PSM;
798 
799 	l2cap_state_change(chan, BT_DISCONN);
800 
801 	rsp.scid   = cpu_to_le16(chan->dcid);
802 	rsp.dcid   = cpu_to_le16(chan->scid);
803 	rsp.result = cpu_to_le16(result);
804 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 
806 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
807 }
808 
809 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
810 {
811 	struct l2cap_conn *conn = chan->conn;
812 
813 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
814 
815 	switch (chan->state) {
816 	case BT_LISTEN:
817 		chan->ops->teardown(chan, 0);
818 		break;
819 
820 	case BT_CONNECTED:
821 	case BT_CONFIG:
822 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
823 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
824 			l2cap_send_disconn_req(chan, reason);
825 		} else
826 			l2cap_chan_del(chan, reason);
827 		break;
828 
829 	case BT_CONNECT2:
830 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
831 			if (conn->hcon->type == ACL_LINK)
832 				l2cap_chan_connect_reject(chan);
833 			else if (conn->hcon->type == LE_LINK) {
834 				switch (chan->mode) {
835 				case L2CAP_MODE_LE_FLOWCTL:
836 					l2cap_chan_le_connect_reject(chan);
837 					break;
838 				case L2CAP_MODE_EXT_FLOWCTL:
839 					l2cap_chan_ecred_connect_reject(chan);
840 					return;
841 				}
842 			}
843 		}
844 
845 		l2cap_chan_del(chan, reason);
846 		break;
847 
848 	case BT_CONNECT:
849 	case BT_DISCONN:
850 		l2cap_chan_del(chan, reason);
851 		break;
852 
853 	default:
854 		chan->ops->teardown(chan, 0);
855 		break;
856 	}
857 }
858 EXPORT_SYMBOL(l2cap_chan_close);
859 
860 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
861 {
862 	switch (chan->chan_type) {
863 	case L2CAP_CHAN_RAW:
864 		switch (chan->sec_level) {
865 		case BT_SECURITY_HIGH:
866 		case BT_SECURITY_FIPS:
867 			return HCI_AT_DEDICATED_BONDING_MITM;
868 		case BT_SECURITY_MEDIUM:
869 			return HCI_AT_DEDICATED_BONDING;
870 		default:
871 			return HCI_AT_NO_BONDING;
872 		}
873 		break;
874 	case L2CAP_CHAN_CONN_LESS:
875 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
876 			if (chan->sec_level == BT_SECURITY_LOW)
877 				chan->sec_level = BT_SECURITY_SDP;
878 		}
879 		if (chan->sec_level == BT_SECURITY_HIGH ||
880 		    chan->sec_level == BT_SECURITY_FIPS)
881 			return HCI_AT_NO_BONDING_MITM;
882 		else
883 			return HCI_AT_NO_BONDING;
884 		break;
885 	case L2CAP_CHAN_CONN_ORIENTED:
886 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
887 			if (chan->sec_level == BT_SECURITY_LOW)
888 				chan->sec_level = BT_SECURITY_SDP;
889 
890 			if (chan->sec_level == BT_SECURITY_HIGH ||
891 			    chan->sec_level == BT_SECURITY_FIPS)
892 				return HCI_AT_NO_BONDING_MITM;
893 			else
894 				return HCI_AT_NO_BONDING;
895 		}
896 		fallthrough;
897 
898 	default:
899 		switch (chan->sec_level) {
900 		case BT_SECURITY_HIGH:
901 		case BT_SECURITY_FIPS:
902 			return HCI_AT_GENERAL_BONDING_MITM;
903 		case BT_SECURITY_MEDIUM:
904 			return HCI_AT_GENERAL_BONDING;
905 		default:
906 			return HCI_AT_NO_BONDING;
907 		}
908 		break;
909 	}
910 }
911 
912 /* Service level security */
913 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
914 {
915 	struct l2cap_conn *conn = chan->conn;
916 	__u8 auth_type;
917 
918 	if (conn->hcon->type == LE_LINK)
919 		return smp_conn_security(conn->hcon, chan->sec_level);
920 
921 	auth_type = l2cap_get_auth_type(chan);
922 
923 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
924 				 initiator);
925 }
926 
927 static int l2cap_get_ident(struct l2cap_conn *conn)
928 {
929 	u8 max;
930 	int ident;
931 
932 	/* LE link does not support tools like l2ping so use the full range */
933 	if (conn->hcon->type == LE_LINK)
934 		max = 255;
935 	/* Get next available identificator.
936 	 *    1 - 128 are used by kernel.
937 	 *  129 - 199 are reserved.
938 	 *  200 - 254 are used by utilities like l2ping, etc.
939 	 */
940 	else
941 		max = 128;
942 
943 	/* Allocate ident using min as last used + 1 (cyclic) */
944 	ident = ida_alloc_range(&conn->tx_ida, READ_ONCE(conn->tx_ident) + 1,
945 				max, GFP_ATOMIC);
946 	/* Force min 1 to start over */
947 	if (ident <= 0) {
948 		ident = ida_alloc_range(&conn->tx_ida, 1, max, GFP_ATOMIC);
949 		if (ident <= 0) {
950 			/* If all idents are in use, log an error, this is
951 			 * extremely unlikely to happen and would indicate a bug
952 			 * in the code that idents are not being freed properly.
953 			 */
954 			BT_ERR("Unable to allocate ident: %d", ident);
955 			return 0;
956 		}
957 	}
958 
959 	WRITE_ONCE(conn->tx_ident, ident);
960 
961 	return ident;
962 }
963 
964 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
965 			   u8 flags)
966 {
967 	/* Check if the hcon still valid before attempting to send */
968 	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
969 		hci_send_acl(conn->hchan, skb, flags);
970 	else
971 		kfree_skb(skb);
972 }
973 
974 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
975 			   void *data)
976 {
977 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
978 	u8 flags;
979 
980 	BT_DBG("code 0x%2.2x", code);
981 
982 	if (!skb)
983 		return;
984 
985 	/* Use NO_FLUSH if supported or we have an LE link (which does
986 	 * not support auto-flushing packets) */
987 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
988 	    conn->hcon->type == LE_LINK)
989 		flags = ACL_START_NO_FLUSH;
990 	else
991 		flags = ACL_START;
992 
993 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
994 	skb->priority = HCI_PRIO_MAX;
995 
996 	l2cap_send_acl(conn, skb, flags);
997 }
998 
999 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1000 {
1001 	struct hci_conn *hcon = chan->conn->hcon;
1002 	u16 flags;
1003 
1004 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
1005 	       skb->priority);
1006 
1007 	/* Use NO_FLUSH for LE links (where this is the only option) or
1008 	 * if the BR/EDR link supports it and flushing has not been
1009 	 * explicitly requested (through FLAG_FLUSHABLE).
1010 	 */
1011 	if (hcon->type == LE_LINK ||
1012 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1013 	     lmp_no_flush_capable(hcon->hdev)))
1014 		flags = ACL_START_NO_FLUSH;
1015 	else
1016 		flags = ACL_START;
1017 
1018 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1019 	hci_send_acl(chan->conn->hchan, skb, flags);
1020 }
1021 
1022 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1023 {
1024 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1025 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1026 
1027 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1028 		/* S-Frame */
1029 		control->sframe = 1;
1030 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1031 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1032 
1033 		control->sar = 0;
1034 		control->txseq = 0;
1035 	} else {
1036 		/* I-Frame */
1037 		control->sframe = 0;
1038 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1039 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1040 
1041 		control->poll = 0;
1042 		control->super = 0;
1043 	}
1044 }
1045 
1046 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1047 {
1048 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1049 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1050 
1051 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1052 		/* S-Frame */
1053 		control->sframe = 1;
1054 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1055 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1056 
1057 		control->sar = 0;
1058 		control->txseq = 0;
1059 	} else {
1060 		/* I-Frame */
1061 		control->sframe = 0;
1062 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1063 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1064 
1065 		control->poll = 0;
1066 		control->super = 0;
1067 	}
1068 }
1069 
1070 static inline void __unpack_control(struct l2cap_chan *chan,
1071 				    struct sk_buff *skb)
1072 {
1073 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1074 		__unpack_extended_control(get_unaligned_le32(skb->data),
1075 					  &bt_cb(skb)->l2cap);
1076 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1077 	} else {
1078 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1079 					  &bt_cb(skb)->l2cap);
1080 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1081 	}
1082 }
1083 
1084 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1085 {
1086 	u32 packed;
1087 
1088 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1089 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1090 
1091 	if (control->sframe) {
1092 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1093 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1094 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1095 	} else {
1096 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1097 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1098 	}
1099 
1100 	return packed;
1101 }
1102 
1103 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1104 {
1105 	u16 packed;
1106 
1107 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1108 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1109 
1110 	if (control->sframe) {
1111 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1112 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1113 		packed |= L2CAP_CTRL_FRAME_TYPE;
1114 	} else {
1115 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1116 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1117 	}
1118 
1119 	return packed;
1120 }
1121 
1122 static inline void __pack_control(struct l2cap_chan *chan,
1123 				  struct l2cap_ctrl *control,
1124 				  struct sk_buff *skb)
1125 {
1126 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1127 		put_unaligned_le32(__pack_extended_control(control),
1128 				   skb->data + L2CAP_HDR_SIZE);
1129 	} else {
1130 		put_unaligned_le16(__pack_enhanced_control(control),
1131 				   skb->data + L2CAP_HDR_SIZE);
1132 	}
1133 }
1134 
1135 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1136 {
1137 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1138 		return L2CAP_EXT_HDR_SIZE;
1139 	else
1140 		return L2CAP_ENH_HDR_SIZE;
1141 }
1142 
1143 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1144 					       u32 control)
1145 {
1146 	struct sk_buff *skb;
1147 	struct l2cap_hdr *lh;
1148 	int hlen = __ertm_hdr_size(chan);
1149 
1150 	if (chan->fcs == L2CAP_FCS_CRC16)
1151 		hlen += L2CAP_FCS_SIZE;
1152 
1153 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1154 
1155 	if (!skb)
1156 		return ERR_PTR(-ENOMEM);
1157 
1158 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1159 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1160 	lh->cid = cpu_to_le16(chan->dcid);
1161 
1162 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1163 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1164 	else
1165 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1166 
1167 	if (chan->fcs == L2CAP_FCS_CRC16) {
1168 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1169 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1170 	}
1171 
1172 	skb->priority = HCI_PRIO_MAX;
1173 	return skb;
1174 }
1175 
1176 static void l2cap_send_sframe(struct l2cap_chan *chan,
1177 			      struct l2cap_ctrl *control)
1178 {
1179 	struct sk_buff *skb;
1180 	u32 control_field;
1181 
1182 	BT_DBG("chan %p, control %p", chan, control);
1183 
1184 	if (!control->sframe)
1185 		return;
1186 
1187 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1188 	    !control->poll)
1189 		control->final = 1;
1190 
1191 	if (control->super == L2CAP_SUPER_RR)
1192 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1193 	else if (control->super == L2CAP_SUPER_RNR)
1194 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1195 
1196 	if (control->super != L2CAP_SUPER_SREJ) {
1197 		chan->last_acked_seq = control->reqseq;
1198 		__clear_ack_timer(chan);
1199 	}
1200 
1201 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1202 	       control->final, control->poll, control->super);
1203 
1204 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1205 		control_field = __pack_extended_control(control);
1206 	else
1207 		control_field = __pack_enhanced_control(control);
1208 
1209 	skb = l2cap_create_sframe_pdu(chan, control_field);
1210 	if (!IS_ERR(skb))
1211 		l2cap_do_send(chan, skb);
1212 }
1213 
1214 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1215 {
1216 	struct l2cap_ctrl control;
1217 
1218 	BT_DBG("chan %p, poll %d", chan, poll);
1219 
1220 	memset(&control, 0, sizeof(control));
1221 	control.sframe = 1;
1222 	control.poll = poll;
1223 
1224 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1225 		control.super = L2CAP_SUPER_RNR;
1226 	else
1227 		control.super = L2CAP_SUPER_RR;
1228 
1229 	control.reqseq = chan->buffer_seq;
1230 	l2cap_send_sframe(chan, &control);
1231 }
1232 
1233 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1234 {
1235 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1236 		return true;
1237 
1238 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1239 }
1240 
1241 void l2cap_send_conn_req(struct l2cap_chan *chan)
1242 {
1243 	struct l2cap_conn *conn = chan->conn;
1244 	struct l2cap_conn_req req;
1245 
1246 	req.scid = cpu_to_le16(chan->scid);
1247 	req.psm  = chan->psm;
1248 
1249 	chan->ident = l2cap_get_ident(conn);
1250 
1251 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1252 
1253 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1254 }
1255 
1256 static void l2cap_chan_ready(struct l2cap_chan *chan)
1257 {
1258 	/* The channel may have already been flagged as connected in
1259 	 * case of receiving data before the L2CAP info req/rsp
1260 	 * procedure is complete.
1261 	 */
1262 	if (chan->state == BT_CONNECTED)
1263 		return;
1264 
1265 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1266 	chan->conf_state = 0;
1267 	__clear_chan_timer(chan);
1268 
1269 	switch (chan->mode) {
1270 	case L2CAP_MODE_LE_FLOWCTL:
1271 	case L2CAP_MODE_EXT_FLOWCTL:
1272 		if (!chan->tx_credits)
1273 			chan->ops->suspend(chan);
1274 		break;
1275 	}
1276 
1277 	chan->state = BT_CONNECTED;
1278 
1279 	chan->ops->ready(chan);
1280 }
1281 
1282 static void l2cap_le_connect(struct l2cap_chan *chan)
1283 {
1284 	struct l2cap_conn *conn = chan->conn;
1285 	struct l2cap_le_conn_req req;
1286 
1287 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1288 		return;
1289 
1290 	if (!chan->imtu)
1291 		chan->imtu = chan->conn->mtu;
1292 
1293 	l2cap_le_flowctl_init(chan, 0);
1294 
1295 	memset(&req, 0, sizeof(req));
1296 	req.psm     = chan->psm;
1297 	req.scid    = cpu_to_le16(chan->scid);
1298 	req.mtu     = cpu_to_le16(chan->imtu);
1299 	req.mps     = cpu_to_le16(chan->mps);
1300 	req.credits = cpu_to_le16(chan->rx_credits);
1301 
1302 	chan->ident = l2cap_get_ident(conn);
1303 
1304 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1305 		       sizeof(req), &req);
1306 }
1307 
1308 struct l2cap_ecred_conn_data {
1309 	struct {
1310 		struct l2cap_ecred_conn_req_hdr req;
1311 		__le16 scid[5];
1312 	} __packed pdu;
1313 	struct l2cap_chan *chan;
1314 	struct pid *pid;
1315 	int count;
1316 };
1317 
1318 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1319 {
1320 	struct l2cap_ecred_conn_data *conn = data;
1321 	struct pid *pid;
1322 
1323 	if (chan == conn->chan)
1324 		return;
1325 
1326 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1327 		return;
1328 
1329 	pid = chan->ops->get_peer_pid(chan);
1330 
1331 	/* Only add deferred channels with the same PID/PSM */
1332 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1333 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1334 		return;
1335 
1336 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1337 		return;
1338 
1339 	l2cap_ecred_init(chan, 0);
1340 
1341 	/* Set the same ident so we can match on the rsp */
1342 	chan->ident = conn->chan->ident;
1343 
1344 	/* Include all channels deferred */
1345 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1346 
1347 	conn->count++;
1348 }
1349 
1350 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1351 {
1352 	struct l2cap_conn *conn = chan->conn;
1353 	struct l2cap_ecred_conn_data data;
1354 
1355 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1356 		return;
1357 
1358 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1359 		return;
1360 
1361 	l2cap_ecred_init(chan, 0);
1362 
1363 	memset(&data, 0, sizeof(data));
1364 	data.pdu.req.psm     = chan->psm;
1365 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1366 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1367 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1368 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1369 
1370 	chan->ident = l2cap_get_ident(conn);
1371 
1372 	data.count = 1;
1373 	data.chan = chan;
1374 	data.pid = chan->ops->get_peer_pid(chan);
1375 
1376 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1377 
1378 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1379 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1380 		       &data.pdu);
1381 }
1382 
1383 static void l2cap_le_start(struct l2cap_chan *chan)
1384 {
1385 	struct l2cap_conn *conn = chan->conn;
1386 
1387 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1388 		return;
1389 
1390 	if (!chan->psm) {
1391 		l2cap_chan_ready(chan);
1392 		return;
1393 	}
1394 
1395 	if (chan->state == BT_CONNECT) {
1396 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1397 			l2cap_ecred_connect(chan);
1398 		else
1399 			l2cap_le_connect(chan);
1400 	}
1401 }
1402 
1403 static void l2cap_start_connection(struct l2cap_chan *chan)
1404 {
1405 	if (chan->conn->hcon->type == LE_LINK) {
1406 		l2cap_le_start(chan);
1407 	} else {
1408 		l2cap_send_conn_req(chan);
1409 	}
1410 }
1411 
1412 static void l2cap_request_info(struct l2cap_conn *conn)
1413 {
1414 	struct l2cap_info_req req;
1415 
1416 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1417 		return;
1418 
1419 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1420 
1421 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1422 	conn->info_ident = l2cap_get_ident(conn);
1423 
1424 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1425 
1426 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1427 		       sizeof(req), &req);
1428 }
1429 
1430 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1431 				     struct l2cap_chan *chan)
1432 {
1433 	/* The minimum encryption key size needs to be enforced by the
1434 	 * host stack before establishing any L2CAP connections. The
1435 	 * specification in theory allows a minimum of 1, but to align
1436 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1437 	 *
1438 	 * This check might also be called for unencrypted connections
1439 	 * that have no key size requirements. Ensure that the link is
1440 	 * actually encrypted before enforcing a key size.
1441 	 */
1442 	int min_key_size = hcon->hdev->min_enc_key_size;
1443 
1444 	/* On FIPS security level, key size must be 16 bytes */
1445 	if (chan->sec_level == BT_SECURITY_FIPS)
1446 		min_key_size = 16;
1447 
1448 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1449 		hcon->enc_key_size >= min_key_size);
1450 }
1451 
1452 static void l2cap_do_start(struct l2cap_chan *chan)
1453 {
1454 	struct l2cap_conn *conn = chan->conn;
1455 
1456 	if (conn->hcon->type == LE_LINK) {
1457 		l2cap_le_start(chan);
1458 		return;
1459 	}
1460 
1461 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1462 		l2cap_request_info(conn);
1463 		return;
1464 	}
1465 
1466 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1467 		return;
1468 
1469 	if (!l2cap_chan_check_security(chan, true) ||
1470 	    !__l2cap_no_conn_pending(chan))
1471 		return;
1472 
1473 	if (l2cap_check_enc_key_size(conn->hcon, chan))
1474 		l2cap_start_connection(chan);
1475 	else
1476 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1477 }
1478 
1479 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1480 {
1481 	u32 local_feat_mask = l2cap_feat_mask;
1482 	if (!disable_ertm)
1483 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1484 
1485 	switch (mode) {
1486 	case L2CAP_MODE_ERTM:
1487 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1488 	case L2CAP_MODE_STREAMING:
1489 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1490 	default:
1491 		return 0x00;
1492 	}
1493 }
1494 
1495 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1496 {
1497 	struct l2cap_conn *conn = chan->conn;
1498 	struct l2cap_disconn_req req;
1499 
1500 	if (!conn)
1501 		return;
1502 
1503 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1504 		__clear_retrans_timer(chan);
1505 		__clear_monitor_timer(chan);
1506 		__clear_ack_timer(chan);
1507 	}
1508 
1509 	req.dcid = cpu_to_le16(chan->dcid);
1510 	req.scid = cpu_to_le16(chan->scid);
1511 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1512 		       sizeof(req), &req);
1513 
1514 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1515 }
1516 
1517 /* ---- L2CAP connections ---- */
1518 static void l2cap_conn_start(struct l2cap_conn *conn)
1519 {
1520 	struct l2cap_chan *chan, *tmp;
1521 
1522 	BT_DBG("conn %p", conn);
1523 
1524 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1525 		l2cap_chan_lock(chan);
1526 
1527 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 			l2cap_chan_ready(chan);
1529 			l2cap_chan_unlock(chan);
1530 			continue;
1531 		}
1532 
1533 		if (chan->state == BT_CONNECT) {
1534 			if (!l2cap_chan_check_security(chan, true) ||
1535 			    !__l2cap_no_conn_pending(chan)) {
1536 				l2cap_chan_unlock(chan);
1537 				continue;
1538 			}
1539 
1540 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1541 			    && test_bit(CONF_STATE2_DEVICE,
1542 					&chan->conf_state)) {
1543 				l2cap_chan_close(chan, ECONNRESET);
1544 				l2cap_chan_unlock(chan);
1545 				continue;
1546 			}
1547 
1548 			if (l2cap_check_enc_key_size(conn->hcon, chan))
1549 				l2cap_start_connection(chan);
1550 			else
1551 				l2cap_chan_close(chan, ECONNREFUSED);
1552 
1553 		} else if (chan->state == BT_CONNECT2) {
1554 			struct l2cap_conn_rsp rsp;
1555 			char buf[128];
1556 			rsp.scid = cpu_to_le16(chan->dcid);
1557 			rsp.dcid = cpu_to_le16(chan->scid);
1558 
1559 			if (l2cap_chan_check_security(chan, false)) {
1560 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1561 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1562 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1563 					chan->ops->defer(chan);
1564 
1565 				} else {
1566 					l2cap_state_change(chan, BT_CONFIG);
1567 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1568 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1569 				}
1570 			} else {
1571 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1572 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1573 			}
1574 
1575 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1576 				       sizeof(rsp), &rsp);
1577 
1578 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1579 			    rsp.result != L2CAP_CR_SUCCESS) {
1580 				l2cap_chan_unlock(chan);
1581 				continue;
1582 			}
1583 
1584 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1585 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1586 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1587 			chan->num_conf_req++;
1588 		}
1589 
1590 		l2cap_chan_unlock(chan);
1591 	}
1592 }
1593 
1594 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1595 {
1596 	struct hci_conn *hcon = conn->hcon;
1597 	struct hci_dev *hdev = hcon->hdev;
1598 
1599 	BT_DBG("%s conn %p", hdev->name, conn);
1600 
1601 	/* For outgoing pairing which doesn't necessarily have an
1602 	 * associated socket (e.g. mgmt_pair_device).
1603 	 */
1604 	if (hcon->out)
1605 		smp_conn_security(hcon, hcon->pending_sec_level);
1606 
1607 	/* For LE peripheral connections, make sure the connection interval
1608 	 * is in the range of the minimum and maximum interval that has
1609 	 * been configured for this connection. If not, then trigger
1610 	 * the connection update procedure.
1611 	 */
1612 	if (hcon->role == HCI_ROLE_SLAVE &&
1613 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1614 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1615 		struct l2cap_conn_param_update_req req;
1616 
1617 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1618 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1619 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1620 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1621 
1622 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1623 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1624 	}
1625 }
1626 
1627 static void l2cap_conn_ready(struct l2cap_conn *conn)
1628 {
1629 	struct l2cap_chan *chan;
1630 	struct hci_conn *hcon = conn->hcon;
1631 
1632 	BT_DBG("conn %p", conn);
1633 
1634 	if (hcon->type == ACL_LINK)
1635 		l2cap_request_info(conn);
1636 
1637 	mutex_lock(&conn->lock);
1638 
1639 	list_for_each_entry(chan, &conn->chan_l, list) {
1640 
1641 		l2cap_chan_lock(chan);
1642 
1643 		if (hcon->type == LE_LINK) {
1644 			l2cap_le_start(chan);
1645 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1646 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1647 				l2cap_chan_ready(chan);
1648 		} else if (chan->state == BT_CONNECT) {
1649 			l2cap_do_start(chan);
1650 		}
1651 
1652 		l2cap_chan_unlock(chan);
1653 	}
1654 
1655 	mutex_unlock(&conn->lock);
1656 
1657 	if (hcon->type == LE_LINK)
1658 		l2cap_le_conn_ready(conn);
1659 
1660 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1661 }
1662 
1663 /* Notify sockets that we cannot guaranty reliability anymore */
1664 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1665 {
1666 	struct l2cap_chan *chan;
1667 
1668 	BT_DBG("conn %p", conn);
1669 
1670 	list_for_each_entry(chan, &conn->chan_l, list) {
1671 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1672 			l2cap_chan_set_err(chan, err);
1673 	}
1674 }
1675 
1676 static void l2cap_info_timeout(struct work_struct *work)
1677 {
1678 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1679 					       info_timer.work);
1680 
1681 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1682 	conn->info_ident = 0;
1683 
1684 	mutex_lock(&conn->lock);
1685 	l2cap_conn_start(conn);
1686 	mutex_unlock(&conn->lock);
1687 }
1688 
1689 /*
1690  * l2cap_user
1691  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1692  * callback is called during registration. The ->remove callback is called
1693  * during unregistration.
1694  * An l2cap_user object can either be explicitly unregistered or when the
1695  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1696  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1697  * External modules must own a reference to the l2cap_conn object if they intend
1698  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1699  * any time if they don't.
1700  */
1701 
1702 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1703 {
1704 	int ret;
1705 
1706 	/* We need to check whether l2cap_conn is registered. If it is not, we
1707 	 * must not register the l2cap_user. l2cap_conn_del() unregisters
1708 	 * l2cap_conn objects under conn->lock, and we use the same lock here
1709 	 * to protect access to conn->users and conn->hchan.
1710 	 */
1711 
1712 	mutex_lock(&conn->lock);
1713 
1714 	if (!list_empty(&user->list)) {
1715 		ret = -EINVAL;
1716 		goto out_unlock;
1717 	}
1718 
1719 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1720 	if (!conn->hchan) {
1721 		ret = -ENODEV;
1722 		goto out_unlock;
1723 	}
1724 
1725 	ret = user->probe(conn, user);
1726 	if (ret)
1727 		goto out_unlock;
1728 
1729 	list_add(&user->list, &conn->users);
1730 	ret = 0;
1731 
1732 out_unlock:
1733 	mutex_unlock(&conn->lock);
1734 	return ret;
1735 }
1736 EXPORT_SYMBOL(l2cap_register_user);
1737 
1738 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1739 {
1740 	mutex_lock(&conn->lock);
1741 
1742 	if (list_empty(&user->list))
1743 		goto out_unlock;
1744 
1745 	list_del_init(&user->list);
1746 	user->remove(conn, user);
1747 
1748 out_unlock:
1749 	mutex_unlock(&conn->lock);
1750 }
1751 EXPORT_SYMBOL(l2cap_unregister_user);
1752 
1753 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1754 {
1755 	struct l2cap_user *user;
1756 
1757 	while (!list_empty(&conn->users)) {
1758 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1759 		list_del_init(&user->list);
1760 		user->remove(conn, user);
1761 	}
1762 }
1763 
1764 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1765 {
1766 	struct l2cap_conn *conn = hcon->l2cap_data;
1767 	struct l2cap_chan *chan, *l;
1768 
1769 	if (!conn)
1770 		return;
1771 
1772 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1773 
1774 	mutex_lock(&conn->lock);
1775 
1776 	kfree_skb(conn->rx_skb);
1777 
1778 	skb_queue_purge(&conn->pending_rx);
1779 
1780 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1781 	 * might block if we are running on a worker from the same workqueue
1782 	 * pending_rx_work is waiting on.
1783 	 */
1784 	if (work_pending(&conn->pending_rx_work))
1785 		cancel_work_sync(&conn->pending_rx_work);
1786 
1787 	ida_destroy(&conn->tx_ida);
1788 
1789 	cancel_delayed_work_sync(&conn->id_addr_timer);
1790 
1791 	l2cap_unregister_all_users(conn);
1792 
1793 	/* Force the connection to be immediately dropped */
1794 	hcon->disc_timeout = 0;
1795 
1796 	/* Kill channels */
1797 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1798 		l2cap_chan_hold(chan);
1799 		l2cap_chan_lock(chan);
1800 
1801 		l2cap_chan_del(chan, err);
1802 
1803 		chan->ops->close(chan);
1804 
1805 		l2cap_chan_unlock(chan);
1806 		l2cap_chan_put(chan);
1807 	}
1808 
1809 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1810 		cancel_delayed_work_sync(&conn->info_timer);
1811 
1812 	hci_chan_del(conn->hchan);
1813 	conn->hchan = NULL;
1814 
1815 	hcon->l2cap_data = NULL;
1816 	mutex_unlock(&conn->lock);
1817 	l2cap_conn_put(conn);
1818 }
1819 
1820 static void l2cap_conn_free(struct kref *ref)
1821 {
1822 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1823 
1824 	hci_conn_put(conn->hcon);
1825 	kfree(conn);
1826 }
1827 
1828 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1829 {
1830 	kref_get(&conn->ref);
1831 	return conn;
1832 }
1833 EXPORT_SYMBOL(l2cap_conn_get);
1834 
1835 void l2cap_conn_put(struct l2cap_conn *conn)
1836 {
1837 	kref_put(&conn->ref, l2cap_conn_free);
1838 }
1839 EXPORT_SYMBOL(l2cap_conn_put);
1840 
1841 /* ---- Socket interface ---- */
1842 
1843 /* Find socket with psm and source / destination bdaddr.
1844  * Returns closest match.
1845  */
1846 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1847 						   bdaddr_t *src,
1848 						   bdaddr_t *dst,
1849 						   u8 link_type)
1850 {
1851 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1852 
1853 	read_lock(&chan_list_lock);
1854 
1855 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1856 		if (state && c->state != state)
1857 			continue;
1858 
1859 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1860 			continue;
1861 
1862 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1863 			continue;
1864 
1865 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1866 			int src_match, dst_match;
1867 			int src_any, dst_any;
1868 
1869 			/* Exact match. */
1870 			src_match = !bacmp(&c->src, src);
1871 			dst_match = !bacmp(&c->dst, dst);
1872 			if (src_match && dst_match) {
1873 				if (!l2cap_chan_hold_unless_zero(c))
1874 					continue;
1875 
1876 				read_unlock(&chan_list_lock);
1877 				return c;
1878 			}
1879 
1880 			/* Closest match */
1881 			src_any = !bacmp(&c->src, BDADDR_ANY);
1882 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1883 			if ((src_match && dst_any) || (src_any && dst_match) ||
1884 			    (src_any && dst_any))
1885 				c1 = c;
1886 		}
1887 	}
1888 
1889 	if (c1)
1890 		c1 = l2cap_chan_hold_unless_zero(c1);
1891 
1892 	read_unlock(&chan_list_lock);
1893 
1894 	return c1;
1895 }
1896 
1897 static void l2cap_monitor_timeout(struct work_struct *work)
1898 {
1899 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1900 					       monitor_timer.work);
1901 
1902 	BT_DBG("chan %p", chan);
1903 
1904 	l2cap_chan_lock(chan);
1905 
1906 	if (!chan->conn) {
1907 		l2cap_chan_unlock(chan);
1908 		l2cap_chan_put(chan);
1909 		return;
1910 	}
1911 
1912 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1913 
1914 	l2cap_chan_unlock(chan);
1915 	l2cap_chan_put(chan);
1916 }
1917 
1918 static void l2cap_retrans_timeout(struct work_struct *work)
1919 {
1920 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1921 					       retrans_timer.work);
1922 
1923 	BT_DBG("chan %p", chan);
1924 
1925 	l2cap_chan_lock(chan);
1926 
1927 	if (!chan->conn) {
1928 		l2cap_chan_unlock(chan);
1929 		l2cap_chan_put(chan);
1930 		return;
1931 	}
1932 
1933 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1934 	l2cap_chan_unlock(chan);
1935 	l2cap_chan_put(chan);
1936 }
1937 
1938 static void l2cap_streaming_send(struct l2cap_chan *chan,
1939 				 struct sk_buff_head *skbs)
1940 {
1941 	struct sk_buff *skb;
1942 	struct l2cap_ctrl *control;
1943 
1944 	BT_DBG("chan %p, skbs %p", chan, skbs);
1945 
1946 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1947 
1948 	while (!skb_queue_empty(&chan->tx_q)) {
1949 
1950 		skb = skb_dequeue(&chan->tx_q);
1951 
1952 		bt_cb(skb)->l2cap.retries = 1;
1953 		control = &bt_cb(skb)->l2cap;
1954 
1955 		control->reqseq = 0;
1956 		control->txseq = chan->next_tx_seq;
1957 
1958 		__pack_control(chan, control, skb);
1959 
1960 		if (chan->fcs == L2CAP_FCS_CRC16) {
1961 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1962 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1963 		}
1964 
1965 		l2cap_do_send(chan, skb);
1966 
1967 		BT_DBG("Sent txseq %u", control->txseq);
1968 
1969 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1970 		chan->frames_sent++;
1971 	}
1972 }
1973 
1974 static int l2cap_ertm_send(struct l2cap_chan *chan)
1975 {
1976 	struct sk_buff *skb, *tx_skb;
1977 	struct l2cap_ctrl *control;
1978 	int sent = 0;
1979 
1980 	BT_DBG("chan %p", chan);
1981 
1982 	if (chan->state != BT_CONNECTED)
1983 		return -ENOTCONN;
1984 
1985 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1986 		return 0;
1987 
1988 	while (chan->tx_send_head &&
1989 	       chan->unacked_frames < chan->remote_tx_win &&
1990 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1991 
1992 		skb = chan->tx_send_head;
1993 
1994 		bt_cb(skb)->l2cap.retries = 1;
1995 		control = &bt_cb(skb)->l2cap;
1996 
1997 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1998 			control->final = 1;
1999 
2000 		control->reqseq = chan->buffer_seq;
2001 		chan->last_acked_seq = chan->buffer_seq;
2002 		control->txseq = chan->next_tx_seq;
2003 
2004 		__pack_control(chan, control, skb);
2005 
2006 		if (chan->fcs == L2CAP_FCS_CRC16) {
2007 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2008 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2009 		}
2010 
2011 		/* Clone after data has been modified. Data is assumed to be
2012 		   read-only (for locking purposes) on cloned sk_buffs.
2013 		 */
2014 		tx_skb = skb_clone(skb, GFP_KERNEL);
2015 
2016 		if (!tx_skb)
2017 			break;
2018 
2019 		__set_retrans_timer(chan);
2020 
2021 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2022 		chan->unacked_frames++;
2023 		chan->frames_sent++;
2024 		sent++;
2025 
2026 		if (skb_queue_is_last(&chan->tx_q, skb))
2027 			chan->tx_send_head = NULL;
2028 		else
2029 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2030 
2031 		l2cap_do_send(chan, tx_skb);
2032 		BT_DBG("Sent txseq %u", control->txseq);
2033 	}
2034 
2035 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2036 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2037 
2038 	return sent;
2039 }
2040 
2041 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2042 {
2043 	struct l2cap_ctrl control;
2044 	struct sk_buff *skb;
2045 	struct sk_buff *tx_skb;
2046 	u16 seq;
2047 
2048 	BT_DBG("chan %p", chan);
2049 
2050 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2051 		return;
2052 
2053 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2054 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2055 
2056 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2057 		if (!skb) {
2058 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2059 			       seq);
2060 			continue;
2061 		}
2062 
2063 		bt_cb(skb)->l2cap.retries++;
2064 		control = bt_cb(skb)->l2cap;
2065 
2066 		if (chan->max_tx != 0 &&
2067 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2068 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2069 			l2cap_send_disconn_req(chan, ECONNRESET);
2070 			l2cap_seq_list_clear(&chan->retrans_list);
2071 			break;
2072 		}
2073 
2074 		control.reqseq = chan->buffer_seq;
2075 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2076 			control.final = 1;
2077 		else
2078 			control.final = 0;
2079 
2080 		if (skb_cloned(skb)) {
2081 			/* Cloned sk_buffs are read-only, so we need a
2082 			 * writeable copy
2083 			 */
2084 			tx_skb = skb_copy(skb, GFP_KERNEL);
2085 		} else {
2086 			tx_skb = skb_clone(skb, GFP_KERNEL);
2087 		}
2088 
2089 		if (!tx_skb) {
2090 			l2cap_seq_list_clear(&chan->retrans_list);
2091 			break;
2092 		}
2093 
2094 		/* Update skb contents */
2095 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2096 			put_unaligned_le32(__pack_extended_control(&control),
2097 					   tx_skb->data + L2CAP_HDR_SIZE);
2098 		} else {
2099 			put_unaligned_le16(__pack_enhanced_control(&control),
2100 					   tx_skb->data + L2CAP_HDR_SIZE);
2101 		}
2102 
2103 		/* Update FCS */
2104 		if (chan->fcs == L2CAP_FCS_CRC16) {
2105 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2106 					tx_skb->len - L2CAP_FCS_SIZE);
2107 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2108 						L2CAP_FCS_SIZE);
2109 		}
2110 
2111 		l2cap_do_send(chan, tx_skb);
2112 
2113 		BT_DBG("Resent txseq %d", control.txseq);
2114 
2115 		chan->last_acked_seq = chan->buffer_seq;
2116 	}
2117 }
2118 
2119 static void l2cap_retransmit(struct l2cap_chan *chan,
2120 			     struct l2cap_ctrl *control)
2121 {
2122 	BT_DBG("chan %p, control %p", chan, control);
2123 
2124 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2125 	l2cap_ertm_resend(chan);
2126 }
2127 
2128 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2129 				 struct l2cap_ctrl *control)
2130 {
2131 	struct sk_buff *skb;
2132 
2133 	BT_DBG("chan %p, control %p", chan, control);
2134 
2135 	if (control->poll)
2136 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2137 
2138 	l2cap_seq_list_clear(&chan->retrans_list);
2139 
2140 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2141 		return;
2142 
2143 	if (chan->unacked_frames) {
2144 		skb_queue_walk(&chan->tx_q, skb) {
2145 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2146 			    skb == chan->tx_send_head)
2147 				break;
2148 		}
2149 
2150 		skb_queue_walk_from(&chan->tx_q, skb) {
2151 			if (skb == chan->tx_send_head)
2152 				break;
2153 
2154 			l2cap_seq_list_append(&chan->retrans_list,
2155 					      bt_cb(skb)->l2cap.txseq);
2156 		}
2157 
2158 		l2cap_ertm_resend(chan);
2159 	}
2160 }
2161 
2162 static void l2cap_send_ack(struct l2cap_chan *chan)
2163 {
2164 	struct l2cap_ctrl control;
2165 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2166 					 chan->last_acked_seq);
2167 	int threshold;
2168 
2169 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2170 	       chan, chan->last_acked_seq, chan->buffer_seq);
2171 
2172 	memset(&control, 0, sizeof(control));
2173 	control.sframe = 1;
2174 
2175 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2176 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2177 		__clear_ack_timer(chan);
2178 		control.super = L2CAP_SUPER_RNR;
2179 		control.reqseq = chan->buffer_seq;
2180 		l2cap_send_sframe(chan, &control);
2181 	} else {
2182 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2183 			l2cap_ertm_send(chan);
2184 			/* If any i-frames were sent, they included an ack */
2185 			if (chan->buffer_seq == chan->last_acked_seq)
2186 				frames_to_ack = 0;
2187 		}
2188 
2189 		/* Ack now if the window is 3/4ths full.
2190 		 * Calculate without mul or div
2191 		 */
2192 		threshold = chan->ack_win;
2193 		threshold += threshold << 1;
2194 		threshold >>= 2;
2195 
2196 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2197 		       threshold);
2198 
2199 		if (frames_to_ack >= threshold) {
2200 			__clear_ack_timer(chan);
2201 			control.super = L2CAP_SUPER_RR;
2202 			control.reqseq = chan->buffer_seq;
2203 			l2cap_send_sframe(chan, &control);
2204 			frames_to_ack = 0;
2205 		}
2206 
2207 		if (frames_to_ack)
2208 			__set_ack_timer(chan);
2209 	}
2210 }
2211 
2212 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2213 					 struct msghdr *msg, int len,
2214 					 int count, struct sk_buff *skb)
2215 {
2216 	struct l2cap_conn *conn = chan->conn;
2217 	struct sk_buff **frag;
2218 	int sent = 0;
2219 
2220 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2221 		return -EFAULT;
2222 
2223 	sent += count;
2224 	len  -= count;
2225 
2226 	/* Continuation fragments (no L2CAP header) */
2227 	frag = &skb_shinfo(skb)->frag_list;
2228 	while (len) {
2229 		struct sk_buff *tmp;
2230 
2231 		count = min_t(unsigned int, conn->mtu, len);
2232 
2233 		tmp = chan->ops->alloc_skb(chan, 0, count,
2234 					   msg->msg_flags & MSG_DONTWAIT);
2235 		if (IS_ERR(tmp))
2236 			return PTR_ERR(tmp);
2237 
2238 		*frag = tmp;
2239 
2240 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2241 				   &msg->msg_iter))
2242 			return -EFAULT;
2243 
2244 		sent += count;
2245 		len  -= count;
2246 
2247 		skb->len += (*frag)->len;
2248 		skb->data_len += (*frag)->len;
2249 
2250 		frag = &(*frag)->next;
2251 	}
2252 
2253 	return sent;
2254 }
2255 
2256 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2257 						 struct msghdr *msg, size_t len)
2258 {
2259 	struct l2cap_conn *conn = chan->conn;
2260 	struct sk_buff *skb;
2261 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2262 	struct l2cap_hdr *lh;
2263 
2264 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2265 	       __le16_to_cpu(chan->psm), len);
2266 
2267 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2268 
2269 	skb = chan->ops->alloc_skb(chan, hlen, count,
2270 				   msg->msg_flags & MSG_DONTWAIT);
2271 	if (IS_ERR(skb))
2272 		return skb;
2273 
2274 	/* Create L2CAP header */
2275 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2276 	lh->cid = cpu_to_le16(chan->dcid);
2277 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2278 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2279 
2280 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2281 	if (unlikely(err < 0)) {
2282 		kfree_skb(skb);
2283 		return ERR_PTR(err);
2284 	}
2285 	return skb;
2286 }
2287 
2288 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2289 					      struct msghdr *msg, size_t len)
2290 {
2291 	struct l2cap_conn *conn = chan->conn;
2292 	struct sk_buff *skb;
2293 	int err, count;
2294 	struct l2cap_hdr *lh;
2295 
2296 	BT_DBG("chan %p len %zu", chan, len);
2297 
2298 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2299 
2300 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2301 				   msg->msg_flags & MSG_DONTWAIT);
2302 	if (IS_ERR(skb))
2303 		return skb;
2304 
2305 	/* Create L2CAP header */
2306 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2307 	lh->cid = cpu_to_le16(chan->dcid);
2308 	lh->len = cpu_to_le16(len);
2309 
2310 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2311 	if (unlikely(err < 0)) {
2312 		kfree_skb(skb);
2313 		return ERR_PTR(err);
2314 	}
2315 	return skb;
2316 }
2317 
2318 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2319 					       struct msghdr *msg, size_t len,
2320 					       u16 sdulen)
2321 {
2322 	struct l2cap_conn *conn = chan->conn;
2323 	struct sk_buff *skb;
2324 	int err, count, hlen;
2325 	struct l2cap_hdr *lh;
2326 
2327 	BT_DBG("chan %p len %zu", chan, len);
2328 
2329 	if (!conn)
2330 		return ERR_PTR(-ENOTCONN);
2331 
2332 	hlen = __ertm_hdr_size(chan);
2333 
2334 	if (sdulen)
2335 		hlen += L2CAP_SDULEN_SIZE;
2336 
2337 	if (chan->fcs == L2CAP_FCS_CRC16)
2338 		hlen += L2CAP_FCS_SIZE;
2339 
2340 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2341 
2342 	skb = chan->ops->alloc_skb(chan, hlen, count,
2343 				   msg->msg_flags & MSG_DONTWAIT);
2344 	if (IS_ERR(skb))
2345 		return skb;
2346 
2347 	/* Create L2CAP header */
2348 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2349 	lh->cid = cpu_to_le16(chan->dcid);
2350 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2351 
2352 	/* Control header is populated later */
2353 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2354 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2355 	else
2356 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2357 
2358 	if (sdulen)
2359 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2360 
2361 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2362 	if (unlikely(err < 0)) {
2363 		kfree_skb(skb);
2364 		return ERR_PTR(err);
2365 	}
2366 
2367 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2368 	bt_cb(skb)->l2cap.retries = 0;
2369 	return skb;
2370 }
2371 
2372 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2373 			     struct sk_buff_head *seg_queue,
2374 			     struct msghdr *msg, size_t len)
2375 {
2376 	struct sk_buff *skb;
2377 	u16 sdu_len;
2378 	size_t pdu_len;
2379 	u8 sar;
2380 
2381 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2382 
2383 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2384 	 * so fragmented skbs are not used.  The HCI layer's handling
2385 	 * of fragmented skbs is not compatible with ERTM's queueing.
2386 	 */
2387 
2388 	/* PDU size is derived from the HCI MTU */
2389 	pdu_len = chan->conn->mtu;
2390 
2391 	/* Constrain PDU size for BR/EDR connections */
2392 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2393 
2394 	/* Adjust for largest possible L2CAP overhead. */
2395 	if (chan->fcs)
2396 		pdu_len -= L2CAP_FCS_SIZE;
2397 
2398 	pdu_len -= __ertm_hdr_size(chan);
2399 
2400 	/* Remote device may have requested smaller PDUs */
2401 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2402 
2403 	if (len <= pdu_len) {
2404 		sar = L2CAP_SAR_UNSEGMENTED;
2405 		sdu_len = 0;
2406 		pdu_len = len;
2407 	} else {
2408 		sar = L2CAP_SAR_START;
2409 		sdu_len = len;
2410 	}
2411 
2412 	while (len > 0) {
2413 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2414 
2415 		if (IS_ERR(skb)) {
2416 			__skb_queue_purge(seg_queue);
2417 			return PTR_ERR(skb);
2418 		}
2419 
2420 		bt_cb(skb)->l2cap.sar = sar;
2421 		__skb_queue_tail(seg_queue, skb);
2422 
2423 		len -= pdu_len;
2424 		if (sdu_len)
2425 			sdu_len = 0;
2426 
2427 		if (len <= pdu_len) {
2428 			sar = L2CAP_SAR_END;
2429 			pdu_len = len;
2430 		} else {
2431 			sar = L2CAP_SAR_CONTINUE;
2432 		}
2433 	}
2434 
2435 	return 0;
2436 }
2437 
2438 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2439 						   struct msghdr *msg,
2440 						   size_t len, u16 sdulen)
2441 {
2442 	struct l2cap_conn *conn = chan->conn;
2443 	struct sk_buff *skb;
2444 	int err, count, hlen;
2445 	struct l2cap_hdr *lh;
2446 
2447 	BT_DBG("chan %p len %zu", chan, len);
2448 
2449 	if (!conn)
2450 		return ERR_PTR(-ENOTCONN);
2451 
2452 	hlen = L2CAP_HDR_SIZE;
2453 
2454 	if (sdulen)
2455 		hlen += L2CAP_SDULEN_SIZE;
2456 
2457 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2458 
2459 	skb = chan->ops->alloc_skb(chan, hlen, count,
2460 				   msg->msg_flags & MSG_DONTWAIT);
2461 	if (IS_ERR(skb))
2462 		return skb;
2463 
2464 	/* Create L2CAP header */
2465 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2466 	lh->cid = cpu_to_le16(chan->dcid);
2467 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2468 
2469 	if (sdulen)
2470 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2471 
2472 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2473 	if (unlikely(err < 0)) {
2474 		kfree_skb(skb);
2475 		return ERR_PTR(err);
2476 	}
2477 
2478 	return skb;
2479 }
2480 
2481 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2482 				struct sk_buff_head *seg_queue,
2483 				struct msghdr *msg, size_t len)
2484 {
2485 	struct sk_buff *skb;
2486 	size_t pdu_len;
2487 	u16 sdu_len;
2488 
2489 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2490 
2491 	sdu_len = len;
2492 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2493 
2494 	while (len > 0) {
2495 		if (len <= pdu_len)
2496 			pdu_len = len;
2497 
2498 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2499 		if (IS_ERR(skb)) {
2500 			__skb_queue_purge(seg_queue);
2501 			return PTR_ERR(skb);
2502 		}
2503 
2504 		__skb_queue_tail(seg_queue, skb);
2505 
2506 		len -= pdu_len;
2507 
2508 		if (sdu_len) {
2509 			sdu_len = 0;
2510 			pdu_len += L2CAP_SDULEN_SIZE;
2511 		}
2512 	}
2513 
2514 	return 0;
2515 }
2516 
2517 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2518 {
2519 	int sent = 0;
2520 
2521 	BT_DBG("chan %p", chan);
2522 
2523 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2524 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2525 		chan->tx_credits--;
2526 		sent++;
2527 	}
2528 
2529 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2530 	       skb_queue_len(&chan->tx_q));
2531 }
2532 
2533 static void l2cap_tx_timestamp(struct sk_buff *skb,
2534 			       const struct sockcm_cookie *sockc,
2535 			       size_t len)
2536 {
2537 	struct sock *sk = skb ? skb->sk : NULL;
2538 
2539 	if (sk && sk->sk_type == SOCK_STREAM)
2540 		hci_setup_tx_timestamp(skb, len, sockc);
2541 	else
2542 		hci_setup_tx_timestamp(skb, 1, sockc);
2543 }
2544 
2545 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2546 				   const struct sockcm_cookie *sockc,
2547 				   size_t len)
2548 {
2549 	struct sk_buff *skb = skb_peek(queue);
2550 	struct sock *sk = skb ? skb->sk : NULL;
2551 
2552 	if (sk && sk->sk_type == SOCK_STREAM)
2553 		l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2554 	else
2555 		l2cap_tx_timestamp(skb, sockc, len);
2556 }
2557 
2558 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2559 		    const struct sockcm_cookie *sockc)
2560 {
2561 	struct sk_buff *skb;
2562 	int err;
2563 	struct sk_buff_head seg_queue;
2564 
2565 	if (!chan->conn)
2566 		return -ENOTCONN;
2567 
2568 	/* Connectionless channel */
2569 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2570 		skb = l2cap_create_connless_pdu(chan, msg, len);
2571 		if (IS_ERR(skb))
2572 			return PTR_ERR(skb);
2573 
2574 		l2cap_tx_timestamp(skb, sockc, len);
2575 
2576 		l2cap_do_send(chan, skb);
2577 		return len;
2578 	}
2579 
2580 	switch (chan->mode) {
2581 	case L2CAP_MODE_LE_FLOWCTL:
2582 	case L2CAP_MODE_EXT_FLOWCTL:
2583 		/* Check outgoing MTU */
2584 		if (len > chan->omtu)
2585 			return -EMSGSIZE;
2586 
2587 		__skb_queue_head_init(&seg_queue);
2588 
2589 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2590 
2591 		if (chan->state != BT_CONNECTED) {
2592 			__skb_queue_purge(&seg_queue);
2593 			err = -ENOTCONN;
2594 		}
2595 
2596 		if (err)
2597 			return err;
2598 
2599 		l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2600 
2601 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2602 
2603 		l2cap_le_flowctl_send(chan);
2604 
2605 		if (!chan->tx_credits)
2606 			chan->ops->suspend(chan);
2607 
2608 		err = len;
2609 
2610 		break;
2611 
2612 	case L2CAP_MODE_BASIC:
2613 		/* Check outgoing MTU */
2614 		if (len > chan->omtu)
2615 			return -EMSGSIZE;
2616 
2617 		/* Create a basic PDU */
2618 		skb = l2cap_create_basic_pdu(chan, msg, len);
2619 		if (IS_ERR(skb))
2620 			return PTR_ERR(skb);
2621 
2622 		l2cap_tx_timestamp(skb, sockc, len);
2623 
2624 		l2cap_do_send(chan, skb);
2625 		err = len;
2626 		break;
2627 
2628 	case L2CAP_MODE_ERTM:
2629 	case L2CAP_MODE_STREAMING:
2630 		/* Check outgoing MTU */
2631 		if (len > chan->omtu) {
2632 			err = -EMSGSIZE;
2633 			break;
2634 		}
2635 
2636 		__skb_queue_head_init(&seg_queue);
2637 
2638 		/* Do segmentation before calling in to the state machine,
2639 		 * since it's possible to block while waiting for memory
2640 		 * allocation.
2641 		 */
2642 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2643 
2644 		if (err)
2645 			break;
2646 
2647 		if (chan->mode == L2CAP_MODE_ERTM) {
2648 			/* TODO: ERTM mode timestamping */
2649 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2650 		} else {
2651 			l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2652 			l2cap_streaming_send(chan, &seg_queue);
2653 		}
2654 
2655 		err = len;
2656 
2657 		/* If the skbs were not queued for sending, they'll still be in
2658 		 * seg_queue and need to be purged.
2659 		 */
2660 		__skb_queue_purge(&seg_queue);
2661 		break;
2662 
2663 	default:
2664 		BT_DBG("bad state %1.1x", chan->mode);
2665 		err = -EBADFD;
2666 	}
2667 
2668 	return err;
2669 }
2670 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2671 
2672 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2673 {
2674 	struct l2cap_ctrl control;
2675 	u16 seq;
2676 
2677 	BT_DBG("chan %p, txseq %u", chan, txseq);
2678 
2679 	memset(&control, 0, sizeof(control));
2680 	control.sframe = 1;
2681 	control.super = L2CAP_SUPER_SREJ;
2682 
2683 	for (seq = chan->expected_tx_seq; seq != txseq;
2684 	     seq = __next_seq(chan, seq)) {
2685 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2686 			control.reqseq = seq;
2687 			l2cap_send_sframe(chan, &control);
2688 			l2cap_seq_list_append(&chan->srej_list, seq);
2689 		}
2690 	}
2691 
2692 	chan->expected_tx_seq = __next_seq(chan, txseq);
2693 }
2694 
2695 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2696 {
2697 	struct l2cap_ctrl control;
2698 
2699 	BT_DBG("chan %p", chan);
2700 
2701 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2702 		return;
2703 
2704 	memset(&control, 0, sizeof(control));
2705 	control.sframe = 1;
2706 	control.super = L2CAP_SUPER_SREJ;
2707 	control.reqseq = chan->srej_list.tail;
2708 	l2cap_send_sframe(chan, &control);
2709 }
2710 
2711 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2712 {
2713 	struct l2cap_ctrl control;
2714 	u16 initial_head;
2715 	u16 seq;
2716 
2717 	BT_DBG("chan %p, txseq %u", chan, txseq);
2718 
2719 	memset(&control, 0, sizeof(control));
2720 	control.sframe = 1;
2721 	control.super = L2CAP_SUPER_SREJ;
2722 
2723 	/* Capture initial list head to allow only one pass through the list. */
2724 	initial_head = chan->srej_list.head;
2725 
2726 	do {
2727 		seq = l2cap_seq_list_pop(&chan->srej_list);
2728 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2729 			break;
2730 
2731 		control.reqseq = seq;
2732 		l2cap_send_sframe(chan, &control);
2733 		l2cap_seq_list_append(&chan->srej_list, seq);
2734 	} while (chan->srej_list.head != initial_head);
2735 }
2736 
2737 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2738 {
2739 	struct sk_buff *acked_skb;
2740 	u16 ackseq;
2741 
2742 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2743 
2744 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2745 		return;
2746 
2747 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2748 	       chan->expected_ack_seq, chan->unacked_frames);
2749 
2750 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2751 	     ackseq = __next_seq(chan, ackseq)) {
2752 
2753 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2754 		if (acked_skb) {
2755 			skb_unlink(acked_skb, &chan->tx_q);
2756 			kfree_skb(acked_skb);
2757 			chan->unacked_frames--;
2758 		}
2759 	}
2760 
2761 	chan->expected_ack_seq = reqseq;
2762 
2763 	if (chan->unacked_frames == 0)
2764 		__clear_retrans_timer(chan);
2765 
2766 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2767 }
2768 
2769 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2770 {
2771 	BT_DBG("chan %p", chan);
2772 
2773 	chan->expected_tx_seq = chan->buffer_seq;
2774 	l2cap_seq_list_clear(&chan->srej_list);
2775 	skb_queue_purge(&chan->srej_q);
2776 	chan->rx_state = L2CAP_RX_STATE_RECV;
2777 }
2778 
2779 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2780 				struct l2cap_ctrl *control,
2781 				struct sk_buff_head *skbs, u8 event)
2782 {
2783 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2784 	       event);
2785 
2786 	switch (event) {
2787 	case L2CAP_EV_DATA_REQUEST:
2788 		if (chan->tx_send_head == NULL)
2789 			chan->tx_send_head = skb_peek(skbs);
2790 
2791 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2792 		l2cap_ertm_send(chan);
2793 		break;
2794 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2795 		BT_DBG("Enter LOCAL_BUSY");
2796 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2797 
2798 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2799 			/* The SREJ_SENT state must be aborted if we are to
2800 			 * enter the LOCAL_BUSY state.
2801 			 */
2802 			l2cap_abort_rx_srej_sent(chan);
2803 		}
2804 
2805 		l2cap_send_ack(chan);
2806 
2807 		break;
2808 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2809 		BT_DBG("Exit LOCAL_BUSY");
2810 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2811 
2812 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2813 			struct l2cap_ctrl local_control;
2814 
2815 			memset(&local_control, 0, sizeof(local_control));
2816 			local_control.sframe = 1;
2817 			local_control.super = L2CAP_SUPER_RR;
2818 			local_control.poll = 1;
2819 			local_control.reqseq = chan->buffer_seq;
2820 			l2cap_send_sframe(chan, &local_control);
2821 
2822 			chan->retry_count = 1;
2823 			__set_monitor_timer(chan);
2824 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2825 		}
2826 		break;
2827 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2828 		l2cap_process_reqseq(chan, control->reqseq);
2829 		break;
2830 	case L2CAP_EV_EXPLICIT_POLL:
2831 		l2cap_send_rr_or_rnr(chan, 1);
2832 		chan->retry_count = 1;
2833 		__set_monitor_timer(chan);
2834 		__clear_ack_timer(chan);
2835 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2836 		break;
2837 	case L2CAP_EV_RETRANS_TO:
2838 		l2cap_send_rr_or_rnr(chan, 1);
2839 		chan->retry_count = 1;
2840 		__set_monitor_timer(chan);
2841 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2842 		break;
2843 	case L2CAP_EV_RECV_FBIT:
2844 		/* Nothing to process */
2845 		break;
2846 	default:
2847 		break;
2848 	}
2849 }
2850 
2851 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2852 				  struct l2cap_ctrl *control,
2853 				  struct sk_buff_head *skbs, u8 event)
2854 {
2855 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2856 	       event);
2857 
2858 	switch (event) {
2859 	case L2CAP_EV_DATA_REQUEST:
2860 		if (chan->tx_send_head == NULL)
2861 			chan->tx_send_head = skb_peek(skbs);
2862 		/* Queue data, but don't send. */
2863 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2864 		break;
2865 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2866 		BT_DBG("Enter LOCAL_BUSY");
2867 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2868 
2869 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2870 			/* The SREJ_SENT state must be aborted if we are to
2871 			 * enter the LOCAL_BUSY state.
2872 			 */
2873 			l2cap_abort_rx_srej_sent(chan);
2874 		}
2875 
2876 		l2cap_send_ack(chan);
2877 
2878 		break;
2879 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2880 		BT_DBG("Exit LOCAL_BUSY");
2881 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2882 
2883 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2884 			struct l2cap_ctrl local_control;
2885 			memset(&local_control, 0, sizeof(local_control));
2886 			local_control.sframe = 1;
2887 			local_control.super = L2CAP_SUPER_RR;
2888 			local_control.poll = 1;
2889 			local_control.reqseq = chan->buffer_seq;
2890 			l2cap_send_sframe(chan, &local_control);
2891 
2892 			chan->retry_count = 1;
2893 			__set_monitor_timer(chan);
2894 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2895 		}
2896 		break;
2897 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2898 		l2cap_process_reqseq(chan, control->reqseq);
2899 		fallthrough;
2900 
2901 	case L2CAP_EV_RECV_FBIT:
2902 		if (control && control->final) {
2903 			__clear_monitor_timer(chan);
2904 			if (chan->unacked_frames > 0)
2905 				__set_retrans_timer(chan);
2906 			chan->retry_count = 0;
2907 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2908 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2909 		}
2910 		break;
2911 	case L2CAP_EV_EXPLICIT_POLL:
2912 		/* Ignore */
2913 		break;
2914 	case L2CAP_EV_MONITOR_TO:
2915 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2916 			l2cap_send_rr_or_rnr(chan, 1);
2917 			__set_monitor_timer(chan);
2918 			chan->retry_count++;
2919 		} else {
2920 			l2cap_send_disconn_req(chan, ECONNABORTED);
2921 		}
2922 		break;
2923 	default:
2924 		break;
2925 	}
2926 }
2927 
2928 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2929 		     struct sk_buff_head *skbs, u8 event)
2930 {
2931 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2932 	       chan, control, skbs, event, chan->tx_state);
2933 
2934 	switch (chan->tx_state) {
2935 	case L2CAP_TX_STATE_XMIT:
2936 		l2cap_tx_state_xmit(chan, control, skbs, event);
2937 		break;
2938 	case L2CAP_TX_STATE_WAIT_F:
2939 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2940 		break;
2941 	default:
2942 		/* Ignore event */
2943 		break;
2944 	}
2945 }
2946 
2947 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2948 			     struct l2cap_ctrl *control)
2949 {
2950 	BT_DBG("chan %p, control %p", chan, control);
2951 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2952 }
2953 
2954 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2955 				  struct l2cap_ctrl *control)
2956 {
2957 	BT_DBG("chan %p, control %p", chan, control);
2958 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2959 }
2960 
2961 /* Copy frame to all raw sockets on that connection */
2962 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2963 {
2964 	struct sk_buff *nskb;
2965 	struct l2cap_chan *chan;
2966 
2967 	BT_DBG("conn %p", conn);
2968 
2969 	list_for_each_entry(chan, &conn->chan_l, list) {
2970 		if (chan->chan_type != L2CAP_CHAN_RAW)
2971 			continue;
2972 
2973 		/* Don't send frame to the channel it came from */
2974 		if (bt_cb(skb)->l2cap.chan == chan)
2975 			continue;
2976 
2977 		nskb = skb_clone(skb, GFP_KERNEL);
2978 		if (!nskb)
2979 			continue;
2980 		if (chan->ops->recv(chan, nskb))
2981 			kfree_skb(nskb);
2982 	}
2983 }
2984 
2985 /* ---- L2CAP signalling commands ---- */
2986 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2987 				       u8 ident, u16 dlen, void *data)
2988 {
2989 	struct sk_buff *skb, **frag;
2990 	struct l2cap_cmd_hdr *cmd;
2991 	struct l2cap_hdr *lh;
2992 	int len, count;
2993 
2994 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2995 	       conn, code, ident, dlen);
2996 
2997 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2998 		return NULL;
2999 
3000 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3001 	count = min_t(unsigned int, conn->mtu, len);
3002 
3003 	skb = bt_skb_alloc(count, GFP_KERNEL);
3004 	if (!skb)
3005 		return NULL;
3006 
3007 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3008 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3009 
3010 	if (conn->hcon->type == LE_LINK)
3011 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3012 	else
3013 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3014 
3015 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3016 	cmd->code  = code;
3017 	cmd->ident = ident;
3018 	cmd->len   = cpu_to_le16(dlen);
3019 
3020 	if (dlen) {
3021 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3022 		skb_put_data(skb, data, count);
3023 		data += count;
3024 	}
3025 
3026 	len -= skb->len;
3027 
3028 	/* Continuation fragments (no L2CAP header) */
3029 	frag = &skb_shinfo(skb)->frag_list;
3030 	while (len) {
3031 		count = min_t(unsigned int, conn->mtu, len);
3032 
3033 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3034 		if (!*frag)
3035 			goto fail;
3036 
3037 		skb_put_data(*frag, data, count);
3038 
3039 		len  -= count;
3040 		data += count;
3041 
3042 		frag = &(*frag)->next;
3043 	}
3044 
3045 	return skb;
3046 
3047 fail:
3048 	kfree_skb(skb);
3049 	return NULL;
3050 }
3051 
3052 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3053 				     unsigned long *val)
3054 {
3055 	struct l2cap_conf_opt *opt = *ptr;
3056 	int len;
3057 
3058 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3059 	*ptr += len;
3060 
3061 	*type = opt->type;
3062 	*olen = opt->len;
3063 
3064 	switch (opt->len) {
3065 	case 1:
3066 		*val = *((u8 *) opt->val);
3067 		break;
3068 
3069 	case 2:
3070 		*val = get_unaligned_le16(opt->val);
3071 		break;
3072 
3073 	case 4:
3074 		*val = get_unaligned_le32(opt->val);
3075 		break;
3076 
3077 	default:
3078 		*val = (unsigned long) opt->val;
3079 		break;
3080 	}
3081 
3082 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3083 	return len;
3084 }
3085 
3086 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3087 {
3088 	struct l2cap_conf_opt *opt = *ptr;
3089 
3090 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3091 
3092 	if (size < L2CAP_CONF_OPT_SIZE + len)
3093 		return;
3094 
3095 	opt->type = type;
3096 	opt->len  = len;
3097 
3098 	switch (len) {
3099 	case 1:
3100 		*((u8 *) opt->val)  = val;
3101 		break;
3102 
3103 	case 2:
3104 		put_unaligned_le16(val, opt->val);
3105 		break;
3106 
3107 	case 4:
3108 		put_unaligned_le32(val, opt->val);
3109 		break;
3110 
3111 	default:
3112 		memcpy(opt->val, (void *) val, len);
3113 		break;
3114 	}
3115 
3116 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3117 }
3118 
3119 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3120 {
3121 	struct l2cap_conf_efs efs;
3122 
3123 	switch (chan->mode) {
3124 	case L2CAP_MODE_ERTM:
3125 		efs.id		= chan->local_id;
3126 		efs.stype	= chan->local_stype;
3127 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3128 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3129 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3130 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3131 		break;
3132 
3133 	case L2CAP_MODE_STREAMING:
3134 		efs.id		= 1;
3135 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3136 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3137 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3138 		efs.acc_lat	= 0;
3139 		efs.flush_to	= 0;
3140 		break;
3141 
3142 	default:
3143 		return;
3144 	}
3145 
3146 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3147 			   (unsigned long) &efs, size);
3148 }
3149 
3150 static void l2cap_ack_timeout(struct work_struct *work)
3151 {
3152 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3153 					       ack_timer.work);
3154 	u16 frames_to_ack;
3155 
3156 	BT_DBG("chan %p", chan);
3157 
3158 	l2cap_chan_lock(chan);
3159 
3160 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3161 				     chan->last_acked_seq);
3162 
3163 	if (frames_to_ack)
3164 		l2cap_send_rr_or_rnr(chan, 0);
3165 
3166 	l2cap_chan_unlock(chan);
3167 	l2cap_chan_put(chan);
3168 }
3169 
3170 int l2cap_ertm_init(struct l2cap_chan *chan)
3171 {
3172 	int err;
3173 
3174 	chan->next_tx_seq = 0;
3175 	chan->expected_tx_seq = 0;
3176 	chan->expected_ack_seq = 0;
3177 	chan->unacked_frames = 0;
3178 	chan->buffer_seq = 0;
3179 	chan->frames_sent = 0;
3180 	chan->last_acked_seq = 0;
3181 	chan->sdu = NULL;
3182 	chan->sdu_last_frag = NULL;
3183 	chan->sdu_len = 0;
3184 
3185 	skb_queue_head_init(&chan->tx_q);
3186 
3187 	if (chan->mode != L2CAP_MODE_ERTM)
3188 		return 0;
3189 
3190 	chan->rx_state = L2CAP_RX_STATE_RECV;
3191 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3192 
3193 	skb_queue_head_init(&chan->srej_q);
3194 
3195 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3196 	if (err < 0)
3197 		return err;
3198 
3199 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3200 	if (err < 0)
3201 		l2cap_seq_list_free(&chan->srej_list);
3202 
3203 	return err;
3204 }
3205 
3206 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3207 {
3208 	switch (mode) {
3209 	case L2CAP_MODE_STREAMING:
3210 	case L2CAP_MODE_ERTM:
3211 		if (l2cap_mode_supported(mode, remote_feat_mask))
3212 			return mode;
3213 		fallthrough;
3214 	default:
3215 		return L2CAP_MODE_BASIC;
3216 	}
3217 }
3218 
3219 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3220 {
3221 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3222 }
3223 
3224 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3225 {
3226 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3227 }
3228 
3229 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3230 				      struct l2cap_conf_rfc *rfc)
3231 {
3232 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3233 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3234 }
3235 
3236 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3237 {
3238 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3239 	    __l2cap_ews_supported(chan->conn)) {
3240 		/* use extended control field */
3241 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3242 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3243 	} else {
3244 		chan->tx_win = min_t(u16, chan->tx_win,
3245 				     L2CAP_DEFAULT_TX_WINDOW);
3246 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3247 	}
3248 	chan->ack_win = chan->tx_win;
3249 }
3250 
3251 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3252 {
3253 	struct hci_conn *conn = chan->conn->hcon;
3254 
3255 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3256 
3257 	/* The 2-DH1 packet has between 2 and 56 information bytes
3258 	 * (including the 2-byte payload header)
3259 	 */
3260 	if (!(conn->pkt_type & HCI_2DH1))
3261 		chan->imtu = 54;
3262 
3263 	/* The 3-DH1 packet has between 2 and 85 information bytes
3264 	 * (including the 2-byte payload header)
3265 	 */
3266 	if (!(conn->pkt_type & HCI_3DH1))
3267 		chan->imtu = 83;
3268 
3269 	/* The 2-DH3 packet has between 2 and 369 information bytes
3270 	 * (including the 2-byte payload header)
3271 	 */
3272 	if (!(conn->pkt_type & HCI_2DH3))
3273 		chan->imtu = 367;
3274 
3275 	/* The 3-DH3 packet has between 2 and 554 information bytes
3276 	 * (including the 2-byte payload header)
3277 	 */
3278 	if (!(conn->pkt_type & HCI_3DH3))
3279 		chan->imtu = 552;
3280 
3281 	/* The 2-DH5 packet has between 2 and 681 information bytes
3282 	 * (including the 2-byte payload header)
3283 	 */
3284 	if (!(conn->pkt_type & HCI_2DH5))
3285 		chan->imtu = 679;
3286 
3287 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3288 	 * (including the 2-byte payload header)
3289 	 */
3290 	if (!(conn->pkt_type & HCI_3DH5))
3291 		chan->imtu = 1021;
3292 }
3293 
3294 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3295 {
3296 	struct l2cap_conf_req *req = data;
3297 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3298 	void *ptr = req->data;
3299 	void *endptr = data + data_size;
3300 	u16 size;
3301 
3302 	BT_DBG("chan %p", chan);
3303 
3304 	if (chan->num_conf_req || chan->num_conf_rsp)
3305 		goto done;
3306 
3307 	switch (chan->mode) {
3308 	case L2CAP_MODE_STREAMING:
3309 	case L2CAP_MODE_ERTM:
3310 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3311 			break;
3312 
3313 		if (__l2cap_efs_supported(chan->conn))
3314 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3315 
3316 		fallthrough;
3317 	default:
3318 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3319 		break;
3320 	}
3321 
3322 done:
3323 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3324 		if (!chan->imtu)
3325 			l2cap_mtu_auto(chan);
3326 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3327 				   endptr - ptr);
3328 	}
3329 
3330 	switch (chan->mode) {
3331 	case L2CAP_MODE_BASIC:
3332 		if (disable_ertm)
3333 			break;
3334 
3335 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3336 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3337 			break;
3338 
3339 		rfc.mode            = L2CAP_MODE_BASIC;
3340 		rfc.txwin_size      = 0;
3341 		rfc.max_transmit    = 0;
3342 		rfc.retrans_timeout = 0;
3343 		rfc.monitor_timeout = 0;
3344 		rfc.max_pdu_size    = 0;
3345 
3346 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3347 				   (unsigned long) &rfc, endptr - ptr);
3348 		break;
3349 
3350 	case L2CAP_MODE_ERTM:
3351 		rfc.mode            = L2CAP_MODE_ERTM;
3352 		rfc.max_transmit    = chan->max_tx;
3353 
3354 		__l2cap_set_ertm_timeouts(chan, &rfc);
3355 
3356 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3357 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3358 			     L2CAP_FCS_SIZE);
3359 		rfc.max_pdu_size = cpu_to_le16(size);
3360 
3361 		l2cap_txwin_setup(chan);
3362 
3363 		rfc.txwin_size = min_t(u16, chan->tx_win,
3364 				       L2CAP_DEFAULT_TX_WINDOW);
3365 
3366 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3367 				   (unsigned long) &rfc, endptr - ptr);
3368 
3369 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3370 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3371 
3372 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3373 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3374 					   chan->tx_win, endptr - ptr);
3375 
3376 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3377 			if (chan->fcs == L2CAP_FCS_NONE ||
3378 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3379 				chan->fcs = L2CAP_FCS_NONE;
3380 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3381 						   chan->fcs, endptr - ptr);
3382 			}
3383 		break;
3384 
3385 	case L2CAP_MODE_STREAMING:
3386 		l2cap_txwin_setup(chan);
3387 		rfc.mode            = L2CAP_MODE_STREAMING;
3388 		rfc.txwin_size      = 0;
3389 		rfc.max_transmit    = 0;
3390 		rfc.retrans_timeout = 0;
3391 		rfc.monitor_timeout = 0;
3392 
3393 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3394 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3395 			     L2CAP_FCS_SIZE);
3396 		rfc.max_pdu_size = cpu_to_le16(size);
3397 
3398 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3399 				   (unsigned long) &rfc, endptr - ptr);
3400 
3401 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3402 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3403 
3404 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3405 			if (chan->fcs == L2CAP_FCS_NONE ||
3406 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3407 				chan->fcs = L2CAP_FCS_NONE;
3408 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3409 						   chan->fcs, endptr - ptr);
3410 			}
3411 		break;
3412 	}
3413 
3414 	req->dcid  = cpu_to_le16(chan->dcid);
3415 	req->flags = cpu_to_le16(0);
3416 
3417 	return ptr - data;
3418 }
3419 
3420 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3421 {
3422 	struct l2cap_conf_rsp *rsp = data;
3423 	void *ptr = rsp->data;
3424 	void *endptr = data + data_size;
3425 	void *req = chan->conf_req;
3426 	int len = chan->conf_len;
3427 	int type, hint, olen;
3428 	unsigned long val;
3429 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3430 	struct l2cap_conf_efs efs;
3431 	u8 remote_efs = 0;
3432 	u16 mtu = 0;
3433 	u16 result = L2CAP_CONF_SUCCESS;
3434 	u16 size;
3435 
3436 	BT_DBG("chan %p", chan);
3437 
3438 	while (len >= L2CAP_CONF_OPT_SIZE) {
3439 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3440 		if (len < 0)
3441 			break;
3442 
3443 		hint  = type & L2CAP_CONF_HINT;
3444 		type &= L2CAP_CONF_MASK;
3445 
3446 		switch (type) {
3447 		case L2CAP_CONF_MTU:
3448 			if (olen != 2)
3449 				break;
3450 			mtu = val;
3451 			break;
3452 
3453 		case L2CAP_CONF_FLUSH_TO:
3454 			if (olen != 2)
3455 				break;
3456 			chan->flush_to = val;
3457 			break;
3458 
3459 		case L2CAP_CONF_QOS:
3460 			break;
3461 
3462 		case L2CAP_CONF_RFC:
3463 			if (olen != sizeof(rfc))
3464 				break;
3465 			memcpy(&rfc, (void *) val, olen);
3466 			break;
3467 
3468 		case L2CAP_CONF_FCS:
3469 			if (olen != 1)
3470 				break;
3471 			if (val == L2CAP_FCS_NONE)
3472 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3473 			break;
3474 
3475 		case L2CAP_CONF_EFS:
3476 			if (olen != sizeof(efs))
3477 				break;
3478 			remote_efs = 1;
3479 			memcpy(&efs, (void *) val, olen);
3480 			break;
3481 
3482 		case L2CAP_CONF_EWS:
3483 			if (olen != 2)
3484 				break;
3485 			return -ECONNREFUSED;
3486 
3487 		default:
3488 			if (hint)
3489 				break;
3490 			result = L2CAP_CONF_UNKNOWN;
3491 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3492 			break;
3493 		}
3494 	}
3495 
3496 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3497 		goto done;
3498 
3499 	switch (chan->mode) {
3500 	case L2CAP_MODE_STREAMING:
3501 	case L2CAP_MODE_ERTM:
3502 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3503 			chan->mode = l2cap_select_mode(rfc.mode,
3504 						       chan->conn->feat_mask);
3505 			break;
3506 		}
3507 
3508 		if (remote_efs) {
3509 			if (__l2cap_efs_supported(chan->conn))
3510 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3511 			else
3512 				return -ECONNREFUSED;
3513 		}
3514 
3515 		if (chan->mode != rfc.mode)
3516 			return -ECONNREFUSED;
3517 
3518 		break;
3519 	}
3520 
3521 done:
3522 	if (chan->mode != rfc.mode) {
3523 		result = L2CAP_CONF_UNACCEPT;
3524 		rfc.mode = chan->mode;
3525 
3526 		if (chan->num_conf_rsp == 1)
3527 			return -ECONNREFUSED;
3528 
3529 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3530 				   (unsigned long) &rfc, endptr - ptr);
3531 	}
3532 
3533 	if (result == L2CAP_CONF_SUCCESS) {
3534 		/* Configure output options and let the other side know
3535 		 * which ones we don't like. */
3536 
3537 		/* If MTU is not provided in configure request, try adjusting it
3538 		 * to the current output MTU if it has been set
3539 		 *
3540 		 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3541 		 *
3542 		 * Each configuration parameter value (if any is present) in an
3543 		 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3544 		 * configuration parameter value that has been sent (or, in case
3545 		 * of default values, implied) in the corresponding
3546 		 * L2CAP_CONFIGURATION_REQ packet.
3547 		 */
3548 		if (!mtu) {
3549 			/* Only adjust for ERTM channels as for older modes the
3550 			 * remote stack may not be able to detect that the
3551 			 * adjustment causing it to silently drop packets.
3552 			 */
3553 			if (chan->mode == L2CAP_MODE_ERTM &&
3554 			    chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3555 				mtu = chan->omtu;
3556 			else
3557 				mtu = L2CAP_DEFAULT_MTU;
3558 		}
3559 
3560 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3561 			result = L2CAP_CONF_UNACCEPT;
3562 		else {
3563 			chan->omtu = mtu;
3564 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3565 		}
3566 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3567 
3568 		if (remote_efs) {
3569 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3570 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3571 			    efs.stype != chan->local_stype) {
3572 
3573 				result = L2CAP_CONF_UNACCEPT;
3574 
3575 				if (chan->num_conf_req >= 1)
3576 					return -ECONNREFUSED;
3577 
3578 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3579 						   sizeof(efs),
3580 						   (unsigned long) &efs, endptr - ptr);
3581 			} else {
3582 				/* Send PENDING Conf Rsp */
3583 				result = L2CAP_CONF_PENDING;
3584 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3585 			}
3586 		}
3587 
3588 		switch (rfc.mode) {
3589 		case L2CAP_MODE_BASIC:
3590 			chan->fcs = L2CAP_FCS_NONE;
3591 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3592 			break;
3593 
3594 		case L2CAP_MODE_ERTM:
3595 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3596 				chan->remote_tx_win = rfc.txwin_size;
3597 			else
3598 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3599 
3600 			chan->remote_max_tx = rfc.max_transmit;
3601 
3602 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3603 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3604 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3605 			rfc.max_pdu_size = cpu_to_le16(size);
3606 			chan->remote_mps = size;
3607 
3608 			__l2cap_set_ertm_timeouts(chan, &rfc);
3609 
3610 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3611 
3612 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3613 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3614 
3615 			if (remote_efs &&
3616 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3617 				chan->remote_id = efs.id;
3618 				chan->remote_stype = efs.stype;
3619 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3620 				chan->remote_flush_to =
3621 					le32_to_cpu(efs.flush_to);
3622 				chan->remote_acc_lat =
3623 					le32_to_cpu(efs.acc_lat);
3624 				chan->remote_sdu_itime =
3625 					le32_to_cpu(efs.sdu_itime);
3626 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3627 						   sizeof(efs),
3628 						   (unsigned long) &efs, endptr - ptr);
3629 			}
3630 			break;
3631 
3632 		case L2CAP_MODE_STREAMING:
3633 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3634 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3635 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3636 			rfc.max_pdu_size = cpu_to_le16(size);
3637 			chan->remote_mps = size;
3638 
3639 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3640 
3641 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3642 					   (unsigned long) &rfc, endptr - ptr);
3643 
3644 			break;
3645 
3646 		default:
3647 			result = L2CAP_CONF_UNACCEPT;
3648 
3649 			memset(&rfc, 0, sizeof(rfc));
3650 			rfc.mode = chan->mode;
3651 		}
3652 
3653 		if (result == L2CAP_CONF_SUCCESS)
3654 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3655 	}
3656 	rsp->scid   = cpu_to_le16(chan->dcid);
3657 	rsp->result = cpu_to_le16(result);
3658 	rsp->flags  = cpu_to_le16(0);
3659 
3660 	return ptr - data;
3661 }
3662 
3663 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3664 				void *data, size_t size, u16 *result)
3665 {
3666 	struct l2cap_conf_req *req = data;
3667 	void *ptr = req->data;
3668 	void *endptr = data + size;
3669 	int type, olen;
3670 	unsigned long val;
3671 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3672 	struct l2cap_conf_efs efs;
3673 
3674 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3675 
3676 	while (len >= L2CAP_CONF_OPT_SIZE) {
3677 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3678 		if (len < 0)
3679 			break;
3680 
3681 		switch (type) {
3682 		case L2CAP_CONF_MTU:
3683 			if (olen != 2)
3684 				break;
3685 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3686 				*result = L2CAP_CONF_UNACCEPT;
3687 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3688 			} else
3689 				chan->imtu = val;
3690 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3691 					   endptr - ptr);
3692 			break;
3693 
3694 		case L2CAP_CONF_FLUSH_TO:
3695 			if (olen != 2)
3696 				break;
3697 			chan->flush_to = val;
3698 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3699 					   chan->flush_to, endptr - ptr);
3700 			break;
3701 
3702 		case L2CAP_CONF_RFC:
3703 			if (olen != sizeof(rfc))
3704 				break;
3705 			memcpy(&rfc, (void *)val, olen);
3706 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3707 			    rfc.mode != chan->mode)
3708 				return -ECONNREFUSED;
3709 			chan->fcs = 0;
3710 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3711 					   (unsigned long) &rfc, endptr - ptr);
3712 			break;
3713 
3714 		case L2CAP_CONF_EWS:
3715 			if (olen != 2)
3716 				break;
3717 			chan->ack_win = min_t(u16, val, chan->ack_win);
3718 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3719 					   chan->tx_win, endptr - ptr);
3720 			break;
3721 
3722 		case L2CAP_CONF_EFS:
3723 			if (olen != sizeof(efs))
3724 				break;
3725 			memcpy(&efs, (void *)val, olen);
3726 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3727 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3728 			    efs.stype != chan->local_stype)
3729 				return -ECONNREFUSED;
3730 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3731 					   (unsigned long) &efs, endptr - ptr);
3732 			break;
3733 
3734 		case L2CAP_CONF_FCS:
3735 			if (olen != 1)
3736 				break;
3737 			if (*result == L2CAP_CONF_PENDING)
3738 				if (val == L2CAP_FCS_NONE)
3739 					set_bit(CONF_RECV_NO_FCS,
3740 						&chan->conf_state);
3741 			break;
3742 		}
3743 	}
3744 
3745 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3746 		return -ECONNREFUSED;
3747 
3748 	chan->mode = rfc.mode;
3749 
3750 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3751 		switch (rfc.mode) {
3752 		case L2CAP_MODE_ERTM:
3753 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3754 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3755 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3756 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3757 				chan->ack_win = min_t(u16, chan->ack_win,
3758 						      rfc.txwin_size);
3759 
3760 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3761 				chan->local_msdu = le16_to_cpu(efs.msdu);
3762 				chan->local_sdu_itime =
3763 					le32_to_cpu(efs.sdu_itime);
3764 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3765 				chan->local_flush_to =
3766 					le32_to_cpu(efs.flush_to);
3767 			}
3768 			break;
3769 
3770 		case L2CAP_MODE_STREAMING:
3771 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3772 		}
3773 	}
3774 
3775 	req->dcid   = cpu_to_le16(chan->dcid);
3776 	req->flags  = cpu_to_le16(0);
3777 
3778 	return ptr - data;
3779 }
3780 
3781 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3782 				u16 result, u16 flags)
3783 {
3784 	struct l2cap_conf_rsp *rsp = data;
3785 	void *ptr = rsp->data;
3786 
3787 	BT_DBG("chan %p", chan);
3788 
3789 	rsp->scid   = cpu_to_le16(chan->dcid);
3790 	rsp->result = cpu_to_le16(result);
3791 	rsp->flags  = cpu_to_le16(flags);
3792 
3793 	return ptr - data;
3794 }
3795 
3796 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3797 {
3798 	struct l2cap_le_conn_rsp rsp;
3799 	struct l2cap_conn *conn = chan->conn;
3800 
3801 	BT_DBG("chan %p", chan);
3802 
3803 	rsp.dcid    = cpu_to_le16(chan->scid);
3804 	rsp.mtu     = cpu_to_le16(chan->imtu);
3805 	rsp.mps     = cpu_to_le16(chan->mps);
3806 	rsp.credits = cpu_to_le16(chan->rx_credits);
3807 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3808 
3809 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3810 		       &rsp);
3811 }
3812 
3813 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3814 {
3815 	int *result = data;
3816 
3817 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3818 		return;
3819 
3820 	switch (chan->state) {
3821 	case BT_CONNECT2:
3822 		/* If channel still pending accept add to result */
3823 		(*result)++;
3824 		return;
3825 	case BT_CONNECTED:
3826 		return;
3827 	default:
3828 		/* If not connected or pending accept it has been refused */
3829 		*result = -ECONNREFUSED;
3830 		return;
3831 	}
3832 }
3833 
3834 struct l2cap_ecred_rsp_data {
3835 	struct {
3836 		struct l2cap_ecred_conn_rsp_hdr rsp;
3837 		__le16 scid[L2CAP_ECRED_MAX_CID];
3838 	} __packed pdu;
3839 	int count;
3840 };
3841 
3842 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3843 {
3844 	struct l2cap_ecred_rsp_data *rsp = data;
3845 	struct l2cap_ecred_conn_rsp *rsp_flex =
3846 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3847 
3848 	/* Check if channel for outgoing connection or if it wasn't deferred
3849 	 * since in those cases it must be skipped.
3850 	 */
3851 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3852 	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3853 		return;
3854 
3855 	/* Reset ident so only one response is sent */
3856 	chan->ident = 0;
3857 
3858 	/* Include all channels pending with the same ident */
3859 	if (!rsp->pdu.rsp.result)
3860 		rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3861 	else
3862 		l2cap_chan_del(chan, ECONNRESET);
3863 }
3864 
3865 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3866 {
3867 	struct l2cap_conn *conn = chan->conn;
3868 	struct l2cap_ecred_rsp_data data;
3869 	u16 id = chan->ident;
3870 	int result = 0;
3871 
3872 	if (!id)
3873 		return;
3874 
3875 	BT_DBG("chan %p id %d", chan, id);
3876 
3877 	memset(&data, 0, sizeof(data));
3878 
3879 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3880 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3881 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3882 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3883 
3884 	/* Verify that all channels are ready */
3885 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3886 
3887 	if (result > 0)
3888 		return;
3889 
3890 	if (result < 0)
3891 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3892 
3893 	/* Build response */
3894 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3895 
3896 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3897 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3898 		       &data.pdu);
3899 }
3900 
3901 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3902 {
3903 	struct l2cap_conn_rsp rsp;
3904 	struct l2cap_conn *conn = chan->conn;
3905 	u8 buf[128];
3906 	u8 rsp_code;
3907 
3908 	rsp.scid   = cpu_to_le16(chan->dcid);
3909 	rsp.dcid   = cpu_to_le16(chan->scid);
3910 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3911 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3912 	rsp_code = L2CAP_CONN_RSP;
3913 
3914 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3915 
3916 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3917 
3918 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3919 		return;
3920 
3921 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3922 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3923 	chan->num_conf_req++;
3924 }
3925 
3926 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3927 {
3928 	int type, olen;
3929 	unsigned long val;
3930 	/* Use sane default values in case a misbehaving remote device
3931 	 * did not send an RFC or extended window size option.
3932 	 */
3933 	u16 txwin_ext = chan->ack_win;
3934 	struct l2cap_conf_rfc rfc = {
3935 		.mode = chan->mode,
3936 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3937 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3938 		.max_pdu_size = cpu_to_le16(chan->imtu),
3939 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3940 	};
3941 
3942 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3943 
3944 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3945 		return;
3946 
3947 	while (len >= L2CAP_CONF_OPT_SIZE) {
3948 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3949 		if (len < 0)
3950 			break;
3951 
3952 		switch (type) {
3953 		case L2CAP_CONF_RFC:
3954 			if (olen != sizeof(rfc))
3955 				break;
3956 			memcpy(&rfc, (void *)val, olen);
3957 			break;
3958 		case L2CAP_CONF_EWS:
3959 			if (olen != 2)
3960 				break;
3961 			txwin_ext = val;
3962 			break;
3963 		}
3964 	}
3965 
3966 	switch (rfc.mode) {
3967 	case L2CAP_MODE_ERTM:
3968 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3969 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3970 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3971 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3972 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3973 		else
3974 			chan->ack_win = min_t(u16, chan->ack_win,
3975 					      rfc.txwin_size);
3976 		break;
3977 	case L2CAP_MODE_STREAMING:
3978 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3979 	}
3980 }
3981 
3982 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3983 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3984 				    u8 *data)
3985 {
3986 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3987 
3988 	if (cmd_len < sizeof(*rej))
3989 		return -EPROTO;
3990 
3991 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3992 		return 0;
3993 
3994 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3995 	    cmd->ident == conn->info_ident) {
3996 		cancel_delayed_work(&conn->info_timer);
3997 
3998 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3999 		conn->info_ident = 0;
4000 
4001 		l2cap_conn_start(conn);
4002 	}
4003 
4004 	return 0;
4005 }
4006 
4007 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
4008 			  u8 *data, u8 rsp_code)
4009 {
4010 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4011 	struct l2cap_conn_rsp rsp;
4012 	struct l2cap_chan *chan = NULL, *pchan = NULL;
4013 	int result, status = L2CAP_CS_NO_INFO;
4014 
4015 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4016 	__le16 psm = req->psm;
4017 
4018 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4019 
4020 	/* Check if we have socket listening on psm */
4021 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4022 					 &conn->hcon->dst, ACL_LINK);
4023 	if (!pchan) {
4024 		result = L2CAP_CR_BAD_PSM;
4025 		goto response;
4026 	}
4027 
4028 	l2cap_chan_lock(pchan);
4029 
4030 	/* Check if the ACL is secure enough (if not SDP) */
4031 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4032 	    (!hci_conn_check_link_mode(conn->hcon) ||
4033 	    !l2cap_check_enc_key_size(conn->hcon, pchan))) {
4034 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4035 		result = L2CAP_CR_SEC_BLOCK;
4036 		goto response;
4037 	}
4038 
4039 	result = L2CAP_CR_NO_MEM;
4040 
4041 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4042 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4043 		result = L2CAP_CR_INVALID_SCID;
4044 		goto response;
4045 	}
4046 
4047 	/* Check if we already have channel with that dcid */
4048 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4049 		result = L2CAP_CR_SCID_IN_USE;
4050 		goto response;
4051 	}
4052 
4053 	chan = pchan->ops->new_connection(pchan);
4054 	if (!chan)
4055 		goto response;
4056 
4057 	/* For certain devices (ex: HID mouse), support for authentication,
4058 	 * pairing and bonding is optional. For such devices, inorder to avoid
4059 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4060 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4061 	 */
4062 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4063 
4064 	bacpy(&chan->src, &conn->hcon->src);
4065 	bacpy(&chan->dst, &conn->hcon->dst);
4066 	chan->src_type = bdaddr_src_type(conn->hcon);
4067 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4068 	chan->psm  = psm;
4069 	chan->dcid = scid;
4070 
4071 	__l2cap_chan_add(conn, chan);
4072 
4073 	dcid = chan->scid;
4074 
4075 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4076 
4077 	chan->ident = cmd->ident;
4078 
4079 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4080 		if (l2cap_chan_check_security(chan, false)) {
4081 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4082 				l2cap_state_change(chan, BT_CONNECT2);
4083 				result = L2CAP_CR_PEND;
4084 				status = L2CAP_CS_AUTHOR_PEND;
4085 				chan->ops->defer(chan);
4086 			} else {
4087 				l2cap_state_change(chan, BT_CONFIG);
4088 				result = L2CAP_CR_SUCCESS;
4089 				status = L2CAP_CS_NO_INFO;
4090 			}
4091 		} else {
4092 			l2cap_state_change(chan, BT_CONNECT2);
4093 			result = L2CAP_CR_PEND;
4094 			status = L2CAP_CS_AUTHEN_PEND;
4095 		}
4096 	} else {
4097 		l2cap_state_change(chan, BT_CONNECT2);
4098 		result = L2CAP_CR_PEND;
4099 		status = L2CAP_CS_NO_INFO;
4100 	}
4101 
4102 response:
4103 	rsp.scid   = cpu_to_le16(scid);
4104 	rsp.dcid   = cpu_to_le16(dcid);
4105 	rsp.result = cpu_to_le16(result);
4106 	rsp.status = cpu_to_le16(status);
4107 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4108 
4109 	if (!pchan)
4110 		return;
4111 
4112 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4113 		struct l2cap_info_req info;
4114 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4115 
4116 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4117 		conn->info_ident = l2cap_get_ident(conn);
4118 
4119 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4120 
4121 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4122 			       sizeof(info), &info);
4123 	}
4124 
4125 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4126 	    result == L2CAP_CR_SUCCESS) {
4127 		u8 buf[128];
4128 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4129 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4130 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4131 		chan->num_conf_req++;
4132 	}
4133 
4134 	l2cap_chan_unlock(pchan);
4135 	l2cap_chan_put(pchan);
4136 }
4137 
4138 static int l2cap_connect_req(struct l2cap_conn *conn,
4139 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4140 {
4141 	if (cmd_len < sizeof(struct l2cap_conn_req))
4142 		return -EPROTO;
4143 
4144 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4145 	return 0;
4146 }
4147 
4148 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4149 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4150 				    u8 *data)
4151 {
4152 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4153 	u16 scid, dcid, result, status;
4154 	struct l2cap_chan *chan;
4155 	u8 req[128];
4156 	int err;
4157 
4158 	if (cmd_len < sizeof(*rsp))
4159 		return -EPROTO;
4160 
4161 	scid   = __le16_to_cpu(rsp->scid);
4162 	dcid   = __le16_to_cpu(rsp->dcid);
4163 	result = __le16_to_cpu(rsp->result);
4164 	status = __le16_to_cpu(rsp->status);
4165 
4166 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4167 					   dcid > L2CAP_CID_DYN_END))
4168 		return -EPROTO;
4169 
4170 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4171 	       dcid, scid, result, status);
4172 
4173 	if (scid) {
4174 		chan = __l2cap_get_chan_by_scid(conn, scid);
4175 		if (!chan)
4176 			return -EBADSLT;
4177 	} else {
4178 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4179 		if (!chan)
4180 			return -EBADSLT;
4181 	}
4182 
4183 	chan = l2cap_chan_hold_unless_zero(chan);
4184 	if (!chan)
4185 		return -EBADSLT;
4186 
4187 	err = 0;
4188 
4189 	l2cap_chan_lock(chan);
4190 
4191 	switch (result) {
4192 	case L2CAP_CR_SUCCESS:
4193 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4194 			err = -EBADSLT;
4195 			break;
4196 		}
4197 
4198 		l2cap_state_change(chan, BT_CONFIG);
4199 		chan->ident = 0;
4200 		chan->dcid = dcid;
4201 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4202 
4203 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4204 			break;
4205 
4206 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4207 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4208 		chan->num_conf_req++;
4209 		break;
4210 
4211 	case L2CAP_CR_PEND:
4212 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4213 		break;
4214 
4215 	default:
4216 		l2cap_chan_del(chan, ECONNREFUSED);
4217 		break;
4218 	}
4219 
4220 	l2cap_chan_unlock(chan);
4221 	l2cap_chan_put(chan);
4222 
4223 	return err;
4224 }
4225 
4226 static inline void set_default_fcs(struct l2cap_chan *chan)
4227 {
4228 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4229 	 * sides request it.
4230 	 */
4231 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4232 		chan->fcs = L2CAP_FCS_NONE;
4233 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4234 		chan->fcs = L2CAP_FCS_CRC16;
4235 }
4236 
4237 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4238 				    u8 ident, u16 flags)
4239 {
4240 	struct l2cap_conn *conn = chan->conn;
4241 
4242 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4243 	       flags);
4244 
4245 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4246 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4247 
4248 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4249 		       l2cap_build_conf_rsp(chan, data,
4250 					    L2CAP_CONF_SUCCESS, flags), data);
4251 }
4252 
4253 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4254 				   u16 scid, u16 dcid)
4255 {
4256 	struct l2cap_cmd_rej_cid rej;
4257 
4258 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4259 	rej.scid = __cpu_to_le16(scid);
4260 	rej.dcid = __cpu_to_le16(dcid);
4261 
4262 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4263 }
4264 
4265 static inline int l2cap_config_req(struct l2cap_conn *conn,
4266 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4267 				   u8 *data)
4268 {
4269 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4270 	u16 dcid, flags;
4271 	u8 rsp[64];
4272 	struct l2cap_chan *chan;
4273 	int len, err = 0;
4274 
4275 	if (cmd_len < sizeof(*req))
4276 		return -EPROTO;
4277 
4278 	dcid  = __le16_to_cpu(req->dcid);
4279 	flags = __le16_to_cpu(req->flags);
4280 
4281 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4282 
4283 	chan = l2cap_get_chan_by_scid(conn, dcid);
4284 	if (!chan) {
4285 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4286 		return 0;
4287 	}
4288 
4289 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4290 	    chan->state != BT_CONNECTED) {
4291 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4292 				       chan->dcid);
4293 		goto unlock;
4294 	}
4295 
4296 	/* Reject if config buffer is too small. */
4297 	len = cmd_len - sizeof(*req);
4298 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4299 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4300 			       l2cap_build_conf_rsp(chan, rsp,
4301 			       L2CAP_CONF_REJECT, flags), rsp);
4302 		goto unlock;
4303 	}
4304 
4305 	/* Store config. */
4306 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4307 	chan->conf_len += len;
4308 
4309 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4310 		/* Incomplete config. Send empty response. */
4311 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4312 			       l2cap_build_conf_rsp(chan, rsp,
4313 			       L2CAP_CONF_SUCCESS, flags), rsp);
4314 		goto unlock;
4315 	}
4316 
4317 	/* Complete config. */
4318 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4319 	if (len < 0) {
4320 		l2cap_send_disconn_req(chan, ECONNRESET);
4321 		goto unlock;
4322 	}
4323 
4324 	chan->ident = cmd->ident;
4325 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4326 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4327 		chan->num_conf_rsp++;
4328 
4329 	/* Reset config buffer. */
4330 	chan->conf_len = 0;
4331 
4332 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4333 		goto unlock;
4334 
4335 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4336 		set_default_fcs(chan);
4337 
4338 		if (chan->mode == L2CAP_MODE_ERTM ||
4339 		    chan->mode == L2CAP_MODE_STREAMING)
4340 			err = l2cap_ertm_init(chan);
4341 
4342 		if (err < 0)
4343 			l2cap_send_disconn_req(chan, -err);
4344 		else
4345 			l2cap_chan_ready(chan);
4346 
4347 		goto unlock;
4348 	}
4349 
4350 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4351 		u8 buf[64];
4352 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4353 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4354 		chan->num_conf_req++;
4355 	}
4356 
4357 	/* Got Conf Rsp PENDING from remote side and assume we sent
4358 	   Conf Rsp PENDING in the code above */
4359 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4360 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4361 
4362 		/* check compatibility */
4363 
4364 		/* Send rsp for BR/EDR channel */
4365 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4366 	}
4367 
4368 unlock:
4369 	l2cap_chan_unlock(chan);
4370 	l2cap_chan_put(chan);
4371 	return err;
4372 }
4373 
4374 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4375 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4376 				   u8 *data)
4377 {
4378 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4379 	u16 scid, flags, result;
4380 	struct l2cap_chan *chan;
4381 	int len = cmd_len - sizeof(*rsp);
4382 	int err = 0;
4383 
4384 	if (cmd_len < sizeof(*rsp))
4385 		return -EPROTO;
4386 
4387 	scid   = __le16_to_cpu(rsp->scid);
4388 	flags  = __le16_to_cpu(rsp->flags);
4389 	result = __le16_to_cpu(rsp->result);
4390 
4391 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4392 	       result, len);
4393 
4394 	chan = l2cap_get_chan_by_scid(conn, scid);
4395 	if (!chan)
4396 		return 0;
4397 
4398 	switch (result) {
4399 	case L2CAP_CONF_SUCCESS:
4400 		l2cap_conf_rfc_get(chan, rsp->data, len);
4401 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4402 		break;
4403 
4404 	case L2CAP_CONF_PENDING:
4405 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4406 
4407 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4408 			char buf[64];
4409 
4410 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4411 						   buf, sizeof(buf), &result);
4412 			if (len < 0) {
4413 				l2cap_send_disconn_req(chan, ECONNRESET);
4414 				goto done;
4415 			}
4416 
4417 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4418 		}
4419 		goto done;
4420 
4421 	case L2CAP_CONF_UNKNOWN:
4422 	case L2CAP_CONF_UNACCEPT:
4423 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4424 			char req[64];
4425 
4426 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4427 				l2cap_send_disconn_req(chan, ECONNRESET);
4428 				goto done;
4429 			}
4430 
4431 			/* throw out any old stored conf requests */
4432 			result = L2CAP_CONF_SUCCESS;
4433 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4434 						   req, sizeof(req), &result);
4435 			if (len < 0) {
4436 				l2cap_send_disconn_req(chan, ECONNRESET);
4437 				goto done;
4438 			}
4439 
4440 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4441 				       L2CAP_CONF_REQ, len, req);
4442 			chan->num_conf_req++;
4443 			if (result != L2CAP_CONF_SUCCESS)
4444 				goto done;
4445 			break;
4446 		}
4447 		fallthrough;
4448 
4449 	default:
4450 		l2cap_chan_set_err(chan, ECONNRESET);
4451 
4452 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4453 		l2cap_send_disconn_req(chan, ECONNRESET);
4454 		goto done;
4455 	}
4456 
4457 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4458 		goto done;
4459 
4460 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4461 
4462 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4463 		set_default_fcs(chan);
4464 
4465 		if (chan->mode == L2CAP_MODE_ERTM ||
4466 		    chan->mode == L2CAP_MODE_STREAMING)
4467 			err = l2cap_ertm_init(chan);
4468 
4469 		if (err < 0)
4470 			l2cap_send_disconn_req(chan, -err);
4471 		else
4472 			l2cap_chan_ready(chan);
4473 	}
4474 
4475 done:
4476 	l2cap_chan_unlock(chan);
4477 	l2cap_chan_put(chan);
4478 	return err;
4479 }
4480 
4481 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4482 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4483 				       u8 *data)
4484 {
4485 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4486 	struct l2cap_disconn_rsp rsp;
4487 	u16 dcid, scid;
4488 	struct l2cap_chan *chan;
4489 
4490 	if (cmd_len != sizeof(*req))
4491 		return -EPROTO;
4492 
4493 	scid = __le16_to_cpu(req->scid);
4494 	dcid = __le16_to_cpu(req->dcid);
4495 
4496 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4497 
4498 	chan = l2cap_get_chan_by_scid(conn, dcid);
4499 	if (!chan) {
4500 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4501 		return 0;
4502 	}
4503 
4504 	rsp.dcid = cpu_to_le16(chan->scid);
4505 	rsp.scid = cpu_to_le16(chan->dcid);
4506 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4507 
4508 	chan->ops->set_shutdown(chan);
4509 
4510 	l2cap_chan_del(chan, ECONNRESET);
4511 
4512 	chan->ops->close(chan);
4513 
4514 	l2cap_chan_unlock(chan);
4515 	l2cap_chan_put(chan);
4516 
4517 	return 0;
4518 }
4519 
4520 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4521 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4522 				       u8 *data)
4523 {
4524 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4525 	u16 dcid, scid;
4526 	struct l2cap_chan *chan;
4527 
4528 	if (cmd_len != sizeof(*rsp))
4529 		return -EPROTO;
4530 
4531 	scid = __le16_to_cpu(rsp->scid);
4532 	dcid = __le16_to_cpu(rsp->dcid);
4533 
4534 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4535 
4536 	chan = l2cap_get_chan_by_scid(conn, scid);
4537 	if (!chan) {
4538 		return 0;
4539 	}
4540 
4541 	if (chan->state != BT_DISCONN) {
4542 		l2cap_chan_unlock(chan);
4543 		l2cap_chan_put(chan);
4544 		return 0;
4545 	}
4546 
4547 	l2cap_chan_del(chan, 0);
4548 
4549 	chan->ops->close(chan);
4550 
4551 	l2cap_chan_unlock(chan);
4552 	l2cap_chan_put(chan);
4553 
4554 	return 0;
4555 }
4556 
4557 static inline int l2cap_information_req(struct l2cap_conn *conn,
4558 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4559 					u8 *data)
4560 {
4561 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4562 	u16 type;
4563 
4564 	if (cmd_len != sizeof(*req))
4565 		return -EPROTO;
4566 
4567 	type = __le16_to_cpu(req->type);
4568 
4569 	BT_DBG("type 0x%4.4x", type);
4570 
4571 	if (type == L2CAP_IT_FEAT_MASK) {
4572 		u8 buf[8];
4573 		u32 feat_mask = l2cap_feat_mask;
4574 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4575 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4576 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4577 		if (!disable_ertm)
4578 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4579 				| L2CAP_FEAT_FCS;
4580 
4581 		put_unaligned_le32(feat_mask, rsp->data);
4582 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4583 			       buf);
4584 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4585 		u8 buf[12];
4586 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4587 
4588 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4589 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4590 		rsp->data[0] = conn->local_fixed_chan;
4591 		memset(rsp->data + 1, 0, 7);
4592 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4593 			       buf);
4594 	} else {
4595 		struct l2cap_info_rsp rsp;
4596 		rsp.type   = cpu_to_le16(type);
4597 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4598 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4599 			       &rsp);
4600 	}
4601 
4602 	return 0;
4603 }
4604 
4605 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4606 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4607 					u8 *data)
4608 {
4609 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4610 	u16 type, result;
4611 
4612 	if (cmd_len < sizeof(*rsp))
4613 		return -EPROTO;
4614 
4615 	type   = __le16_to_cpu(rsp->type);
4616 	result = __le16_to_cpu(rsp->result);
4617 
4618 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4619 
4620 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4621 	if (cmd->ident != conn->info_ident ||
4622 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4623 		return 0;
4624 
4625 	cancel_delayed_work(&conn->info_timer);
4626 
4627 	if (result != L2CAP_IR_SUCCESS) {
4628 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4629 		conn->info_ident = 0;
4630 
4631 		l2cap_conn_start(conn);
4632 
4633 		return 0;
4634 	}
4635 
4636 	switch (type) {
4637 	case L2CAP_IT_FEAT_MASK:
4638 		if (cmd_len >= sizeof(*rsp) + sizeof(u32))
4639 			conn->feat_mask = get_unaligned_le32(rsp->data);
4640 
4641 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4642 			struct l2cap_info_req req;
4643 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4644 
4645 			conn->info_ident = l2cap_get_ident(conn);
4646 
4647 			l2cap_send_cmd(conn, conn->info_ident,
4648 				       L2CAP_INFO_REQ, sizeof(req), &req);
4649 		} else {
4650 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4651 			conn->info_ident = 0;
4652 
4653 			l2cap_conn_start(conn);
4654 		}
4655 		break;
4656 
4657 	case L2CAP_IT_FIXED_CHAN:
4658 		if (cmd_len >= sizeof(*rsp) + sizeof(rsp->data[0]))
4659 			conn->remote_fixed_chan = rsp->data[0];
4660 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4661 		conn->info_ident = 0;
4662 
4663 		l2cap_conn_start(conn);
4664 		break;
4665 	}
4666 
4667 	return 0;
4668 }
4669 
4670 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4671 					      struct l2cap_cmd_hdr *cmd,
4672 					      u16 cmd_len, u8 *data)
4673 {
4674 	struct hci_conn *hcon = conn->hcon;
4675 	struct l2cap_conn_param_update_req *req;
4676 	struct l2cap_conn_param_update_rsp rsp;
4677 	u16 min, max, latency, to_multiplier;
4678 	int err;
4679 
4680 	if (hcon->role != HCI_ROLE_MASTER)
4681 		return -EINVAL;
4682 
4683 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4684 		return -EPROTO;
4685 
4686 	req = (struct l2cap_conn_param_update_req *) data;
4687 	min		= __le16_to_cpu(req->min);
4688 	max		= __le16_to_cpu(req->max);
4689 	latency		= __le16_to_cpu(req->latency);
4690 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4691 
4692 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4693 	       min, max, latency, to_multiplier);
4694 
4695 	memset(&rsp, 0, sizeof(rsp));
4696 
4697 	err = hci_check_conn_params(min, max, latency, to_multiplier);
4698 	if (err)
4699 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4700 	else
4701 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4702 
4703 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4704 		       sizeof(rsp), &rsp);
4705 
4706 	if (!err) {
4707 		u8 store_hint;
4708 
4709 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4710 						to_multiplier);
4711 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4712 				    store_hint, min, max, latency,
4713 				    to_multiplier);
4714 
4715 	}
4716 
4717 	return 0;
4718 }
4719 
4720 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4721 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4722 				u8 *data)
4723 {
4724 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4725 	struct hci_conn *hcon = conn->hcon;
4726 	u16 dcid, mtu, mps, credits, result;
4727 	struct l2cap_chan *chan;
4728 	int err, sec_level;
4729 
4730 	if (cmd_len < sizeof(*rsp))
4731 		return -EPROTO;
4732 
4733 	dcid    = __le16_to_cpu(rsp->dcid);
4734 	mtu     = __le16_to_cpu(rsp->mtu);
4735 	mps     = __le16_to_cpu(rsp->mps);
4736 	credits = __le16_to_cpu(rsp->credits);
4737 	result  = __le16_to_cpu(rsp->result);
4738 
4739 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4740 					   dcid < L2CAP_CID_DYN_START ||
4741 					   dcid > L2CAP_CID_LE_DYN_END))
4742 		return -EPROTO;
4743 
4744 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4745 	       dcid, mtu, mps, credits, result);
4746 
4747 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4748 	if (!chan)
4749 		return -EBADSLT;
4750 
4751 	err = 0;
4752 
4753 	l2cap_chan_lock(chan);
4754 
4755 	switch (result) {
4756 	case L2CAP_CR_LE_SUCCESS:
4757 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4758 			err = -EBADSLT;
4759 			break;
4760 		}
4761 
4762 		chan->ident = 0;
4763 		chan->dcid = dcid;
4764 		chan->omtu = mtu;
4765 		chan->remote_mps = mps;
4766 		chan->tx_credits = credits;
4767 		l2cap_chan_ready(chan);
4768 		break;
4769 
4770 	case L2CAP_CR_LE_AUTHENTICATION:
4771 	case L2CAP_CR_LE_ENCRYPTION:
4772 		/* If we already have MITM protection we can't do
4773 		 * anything.
4774 		 */
4775 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4776 			l2cap_chan_del(chan, ECONNREFUSED);
4777 			break;
4778 		}
4779 
4780 		sec_level = hcon->sec_level + 1;
4781 		if (chan->sec_level < sec_level)
4782 			chan->sec_level = sec_level;
4783 
4784 		/* We'll need to send a new Connect Request */
4785 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4786 
4787 		smp_conn_security(hcon, chan->sec_level);
4788 		break;
4789 
4790 	default:
4791 		l2cap_chan_del(chan, ECONNREFUSED);
4792 		break;
4793 	}
4794 
4795 	l2cap_chan_unlock(chan);
4796 
4797 	return err;
4798 }
4799 
4800 static void l2cap_put_ident(struct l2cap_conn *conn, u8 code, u8 id)
4801 {
4802 	switch (code) {
4803 	case L2CAP_COMMAND_REJ:
4804 	case L2CAP_CONN_RSP:
4805 	case L2CAP_CONF_RSP:
4806 	case L2CAP_DISCONN_RSP:
4807 	case L2CAP_ECHO_RSP:
4808 	case L2CAP_INFO_RSP:
4809 	case L2CAP_CONN_PARAM_UPDATE_RSP:
4810 	case L2CAP_ECRED_CONN_RSP:
4811 	case L2CAP_ECRED_RECONF_RSP:
4812 		/* First do a lookup since the remote may send bogus ids that
4813 		 * would make ida_free to generate warnings.
4814 		 */
4815 		if (ida_find_first_range(&conn->tx_ida, id, id) >= 0)
4816 			ida_free(&conn->tx_ida, id);
4817 	}
4818 }
4819 
4820 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4821 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4822 				      u8 *data)
4823 {
4824 	int err = 0;
4825 
4826 	l2cap_put_ident(conn, cmd->code, cmd->ident);
4827 
4828 	switch (cmd->code) {
4829 	case L2CAP_COMMAND_REJ:
4830 		l2cap_command_rej(conn, cmd, cmd_len, data);
4831 		break;
4832 
4833 	case L2CAP_CONN_REQ:
4834 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4835 		break;
4836 
4837 	case L2CAP_CONN_RSP:
4838 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4839 		break;
4840 
4841 	case L2CAP_CONF_REQ:
4842 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4843 		break;
4844 
4845 	case L2CAP_CONF_RSP:
4846 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4847 		break;
4848 
4849 	case L2CAP_DISCONN_REQ:
4850 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4851 		break;
4852 
4853 	case L2CAP_DISCONN_RSP:
4854 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4855 		break;
4856 
4857 	case L2CAP_ECHO_REQ:
4858 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4859 		break;
4860 
4861 	case L2CAP_ECHO_RSP:
4862 		break;
4863 
4864 	case L2CAP_INFO_REQ:
4865 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4866 		break;
4867 
4868 	case L2CAP_INFO_RSP:
4869 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4870 		break;
4871 
4872 	default:
4873 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4874 		err = -EINVAL;
4875 		break;
4876 	}
4877 
4878 	return err;
4879 }
4880 
4881 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4882 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4883 				u8 *data)
4884 {
4885 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4886 	struct l2cap_le_conn_rsp rsp;
4887 	struct l2cap_chan *chan, *pchan;
4888 	u16 dcid, scid, credits, mtu, mps;
4889 	__le16 psm;
4890 	u8 result;
4891 
4892 	if (cmd_len != sizeof(*req))
4893 		return -EPROTO;
4894 
4895 	scid = __le16_to_cpu(req->scid);
4896 	mtu  = __le16_to_cpu(req->mtu);
4897 	mps  = __le16_to_cpu(req->mps);
4898 	psm  = req->psm;
4899 	dcid = 0;
4900 	credits = 0;
4901 
4902 	if (mtu < 23 || mps < 23)
4903 		return -EPROTO;
4904 
4905 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4906 	       scid, mtu, mps);
4907 
4908 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4909 	 * page 1059:
4910 	 *
4911 	 * Valid range: 0x0001-0x00ff
4912 	 *
4913 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4914 	 */
4915 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4916 		result = L2CAP_CR_LE_BAD_PSM;
4917 		chan = NULL;
4918 		goto response;
4919 	}
4920 
4921 	/* Check if we have socket listening on psm */
4922 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4923 					 &conn->hcon->dst, LE_LINK);
4924 	if (!pchan) {
4925 		result = L2CAP_CR_LE_BAD_PSM;
4926 		chan = NULL;
4927 		goto response;
4928 	}
4929 
4930 	l2cap_chan_lock(pchan);
4931 
4932 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4933 				     SMP_ALLOW_STK)) {
4934 		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4935 			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4936 		chan = NULL;
4937 		goto response_unlock;
4938 	}
4939 
4940 	/* Check if Key Size is sufficient for the security level */
4941 	if (!l2cap_check_enc_key_size(conn->hcon, pchan)) {
4942 		result = L2CAP_CR_LE_BAD_KEY_SIZE;
4943 		chan = NULL;
4944 		goto response_unlock;
4945 	}
4946 
4947 	/* Check for valid dynamic CID range */
4948 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4949 		result = L2CAP_CR_LE_INVALID_SCID;
4950 		chan = NULL;
4951 		goto response_unlock;
4952 	}
4953 
4954 	/* Check if we already have channel with that dcid */
4955 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4956 		result = L2CAP_CR_LE_SCID_IN_USE;
4957 		chan = NULL;
4958 		goto response_unlock;
4959 	}
4960 
4961 	chan = pchan->ops->new_connection(pchan);
4962 	if (!chan) {
4963 		result = L2CAP_CR_LE_NO_MEM;
4964 		goto response_unlock;
4965 	}
4966 
4967 	bacpy(&chan->src, &conn->hcon->src);
4968 	bacpy(&chan->dst, &conn->hcon->dst);
4969 	chan->src_type = bdaddr_src_type(conn->hcon);
4970 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4971 	chan->psm  = psm;
4972 	chan->dcid = scid;
4973 	chan->omtu = mtu;
4974 	chan->remote_mps = mps;
4975 
4976 	__l2cap_chan_add(conn, chan);
4977 
4978 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4979 
4980 	dcid = chan->scid;
4981 	credits = chan->rx_credits;
4982 
4983 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4984 
4985 	chan->ident = cmd->ident;
4986 
4987 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4988 		l2cap_state_change(chan, BT_CONNECT2);
4989 		/* The following result value is actually not defined
4990 		 * for LE CoC but we use it to let the function know
4991 		 * that it should bail out after doing its cleanup
4992 		 * instead of sending a response.
4993 		 */
4994 		result = L2CAP_CR_PEND;
4995 		chan->ops->defer(chan);
4996 	} else {
4997 		l2cap_chan_ready(chan);
4998 		result = L2CAP_CR_LE_SUCCESS;
4999 	}
5000 
5001 response_unlock:
5002 	l2cap_chan_unlock(pchan);
5003 	l2cap_chan_put(pchan);
5004 
5005 	if (result == L2CAP_CR_PEND)
5006 		return 0;
5007 
5008 response:
5009 	if (chan) {
5010 		rsp.mtu = cpu_to_le16(chan->imtu);
5011 		rsp.mps = cpu_to_le16(chan->mps);
5012 	} else {
5013 		rsp.mtu = 0;
5014 		rsp.mps = 0;
5015 	}
5016 
5017 	rsp.dcid    = cpu_to_le16(dcid);
5018 	rsp.credits = cpu_to_le16(credits);
5019 	rsp.result  = cpu_to_le16(result);
5020 
5021 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5022 
5023 	return 0;
5024 }
5025 
5026 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5027 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5028 				   u8 *data)
5029 {
5030 	struct l2cap_le_credits *pkt;
5031 	struct l2cap_chan *chan;
5032 	u16 cid, credits, max_credits;
5033 
5034 	if (cmd_len != sizeof(*pkt))
5035 		return -EPROTO;
5036 
5037 	pkt = (struct l2cap_le_credits *) data;
5038 	cid	= __le16_to_cpu(pkt->cid);
5039 	credits	= __le16_to_cpu(pkt->credits);
5040 
5041 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5042 
5043 	chan = l2cap_get_chan_by_dcid(conn, cid);
5044 	if (!chan)
5045 		return -EBADSLT;
5046 
5047 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5048 	if (credits > max_credits) {
5049 		BT_ERR("LE credits overflow");
5050 		l2cap_send_disconn_req(chan, ECONNRESET);
5051 
5052 		/* Return 0 so that we don't trigger an unnecessary
5053 		 * command reject packet.
5054 		 */
5055 		goto unlock;
5056 	}
5057 
5058 	chan->tx_credits += credits;
5059 
5060 	/* Resume sending */
5061 	l2cap_le_flowctl_send(chan);
5062 
5063 	if (chan->tx_credits)
5064 		chan->ops->resume(chan);
5065 
5066 unlock:
5067 	l2cap_chan_unlock(chan);
5068 	l2cap_chan_put(chan);
5069 
5070 	return 0;
5071 }
5072 
5073 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5074 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5075 				       u8 *data)
5076 {
5077 	struct l2cap_ecred_conn_req *req = (void *) data;
5078 	DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5079 	struct l2cap_chan *chan, *pchan;
5080 	u16 mtu, mps;
5081 	__le16 psm;
5082 	u8 result, rsp_len = 0;
5083 	int i, num_scid = 0;
5084 	bool defer = false;
5085 
5086 	if (!enable_ecred)
5087 		return -EINVAL;
5088 
5089 	memset(pdu, 0, sizeof(*pdu));
5090 
5091 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5092 		result = L2CAP_CR_LE_INVALID_PARAMS;
5093 		goto response;
5094 	}
5095 
5096 	/* Check if there are no pending channels with the same ident */
5097 	__l2cap_chan_list_id(conn, cmd->ident, l2cap_ecred_list_defer,
5098 			     &num_scid);
5099 	if (num_scid) {
5100 		result = L2CAP_CR_LE_INVALID_PARAMS;
5101 		goto response;
5102 	}
5103 
5104 	cmd_len -= sizeof(*req);
5105 	num_scid = cmd_len / sizeof(u16);
5106 
5107 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5108 		result = L2CAP_CR_LE_INVALID_PARAMS;
5109 		goto response;
5110 	}
5111 
5112 	/* Always respond with the same number of scids as in the request */
5113 	rsp_len = cmd_len;
5114 
5115 	mtu  = __le16_to_cpu(req->mtu);
5116 	mps  = __le16_to_cpu(req->mps);
5117 
5118 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5119 		result = L2CAP_CR_LE_INVALID_PARAMS;
5120 		goto response;
5121 	}
5122 
5123 	psm  = req->psm;
5124 
5125 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5126 	 * page 1059:
5127 	 *
5128 	 * Valid range: 0x0001-0x00ff
5129 	 *
5130 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5131 	 */
5132 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5133 		result = L2CAP_CR_LE_BAD_PSM;
5134 		goto response;
5135 	}
5136 
5137 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5138 
5139 	/* Check if we have socket listening on psm */
5140 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5141 					 &conn->hcon->dst, LE_LINK);
5142 	if (!pchan) {
5143 		result = L2CAP_CR_LE_BAD_PSM;
5144 		goto response;
5145 	}
5146 
5147 	l2cap_chan_lock(pchan);
5148 
5149 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5150 				     SMP_ALLOW_STK)) {
5151 		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
5152 			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
5153 		goto unlock;
5154 	}
5155 
5156 	/* Check if the listening channel has set an output MTU then the
5157 	 * requested MTU shall be less than or equal to that value.
5158 	 */
5159 	if (pchan->omtu && mtu < pchan->omtu) {
5160 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5161 		goto unlock;
5162 	}
5163 
5164 	result = L2CAP_CR_LE_SUCCESS;
5165 
5166 	for (i = 0; i < num_scid; i++) {
5167 		u16 scid = __le16_to_cpu(req->scid[i]);
5168 
5169 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5170 
5171 		pdu->dcid[i] = 0x0000;
5172 
5173 		/* Check for valid dynamic CID range */
5174 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5175 			result = L2CAP_CR_LE_INVALID_SCID;
5176 			continue;
5177 		}
5178 
5179 		/* Check if we already have channel with that dcid */
5180 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5181 			result = L2CAP_CR_LE_SCID_IN_USE;
5182 			continue;
5183 		}
5184 
5185 		chan = pchan->ops->new_connection(pchan);
5186 		if (!chan) {
5187 			result = L2CAP_CR_LE_NO_MEM;
5188 			continue;
5189 		}
5190 
5191 		bacpy(&chan->src, &conn->hcon->src);
5192 		bacpy(&chan->dst, &conn->hcon->dst);
5193 		chan->src_type = bdaddr_src_type(conn->hcon);
5194 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5195 		chan->psm  = psm;
5196 		chan->dcid = scid;
5197 		chan->omtu = mtu;
5198 		chan->remote_mps = mps;
5199 
5200 		__l2cap_chan_add(conn, chan);
5201 
5202 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5203 
5204 		/* Init response */
5205 		if (!pdu->credits) {
5206 			pdu->mtu = cpu_to_le16(chan->imtu);
5207 			pdu->mps = cpu_to_le16(chan->mps);
5208 			pdu->credits = cpu_to_le16(chan->rx_credits);
5209 		}
5210 
5211 		pdu->dcid[i] = cpu_to_le16(chan->scid);
5212 
5213 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5214 
5215 		chan->ident = cmd->ident;
5216 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5217 
5218 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5219 			l2cap_state_change(chan, BT_CONNECT2);
5220 			defer = true;
5221 			chan->ops->defer(chan);
5222 		} else {
5223 			l2cap_chan_ready(chan);
5224 		}
5225 	}
5226 
5227 unlock:
5228 	l2cap_chan_unlock(pchan);
5229 	l2cap_chan_put(pchan);
5230 
5231 response:
5232 	pdu->result = cpu_to_le16(result);
5233 
5234 	if (defer)
5235 		return 0;
5236 
5237 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5238 		       sizeof(*pdu) + rsp_len, pdu);
5239 
5240 	return 0;
5241 }
5242 
5243 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5244 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5245 				       u8 *data)
5246 {
5247 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5248 	struct hci_conn *hcon = conn->hcon;
5249 	u16 mtu, mps, credits, result;
5250 	struct l2cap_chan *chan, *tmp;
5251 	int err = 0, sec_level;
5252 	int i = 0;
5253 
5254 	if (cmd_len < sizeof(*rsp))
5255 		return -EPROTO;
5256 
5257 	mtu     = __le16_to_cpu(rsp->mtu);
5258 	mps     = __le16_to_cpu(rsp->mps);
5259 	credits = __le16_to_cpu(rsp->credits);
5260 	result  = __le16_to_cpu(rsp->result);
5261 
5262 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5263 	       result);
5264 
5265 	cmd_len -= sizeof(*rsp);
5266 
5267 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5268 		u16 dcid;
5269 
5270 		if (chan->ident != cmd->ident ||
5271 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5272 		    chan->state == BT_CONNECTED)
5273 			continue;
5274 
5275 		l2cap_chan_lock(chan);
5276 
5277 		/* Check that there is a dcid for each pending channel */
5278 		if (cmd_len < sizeof(dcid)) {
5279 			l2cap_chan_del(chan, ECONNREFUSED);
5280 			l2cap_chan_unlock(chan);
5281 			continue;
5282 		}
5283 
5284 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5285 		cmd_len -= sizeof(u16);
5286 
5287 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5288 
5289 		/* Check if dcid is already in use */
5290 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5291 			/* If a device receives a
5292 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5293 			 * already-assigned Destination CID, then both the
5294 			 * original channel and the new channel shall be
5295 			 * immediately discarded and not used.
5296 			 */
5297 			l2cap_chan_del(chan, ECONNREFUSED);
5298 			l2cap_chan_unlock(chan);
5299 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5300 			l2cap_chan_lock(chan);
5301 			l2cap_chan_del(chan, ECONNRESET);
5302 			l2cap_chan_unlock(chan);
5303 			continue;
5304 		}
5305 
5306 		switch (result) {
5307 		case L2CAP_CR_LE_AUTHENTICATION:
5308 		case L2CAP_CR_LE_ENCRYPTION:
5309 			/* If we already have MITM protection we can't do
5310 			 * anything.
5311 			 */
5312 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5313 				l2cap_chan_del(chan, ECONNREFUSED);
5314 				break;
5315 			}
5316 
5317 			sec_level = hcon->sec_level + 1;
5318 			if (chan->sec_level < sec_level)
5319 				chan->sec_level = sec_level;
5320 
5321 			/* We'll need to send a new Connect Request */
5322 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5323 
5324 			smp_conn_security(hcon, chan->sec_level);
5325 			break;
5326 
5327 		case L2CAP_CR_LE_BAD_PSM:
5328 			l2cap_chan_del(chan, ECONNREFUSED);
5329 			break;
5330 
5331 		default:
5332 			/* If dcid was not set it means channels was refused */
5333 			if (!dcid) {
5334 				l2cap_chan_del(chan, ECONNREFUSED);
5335 				break;
5336 			}
5337 
5338 			chan->ident = 0;
5339 			chan->dcid = dcid;
5340 			chan->omtu = mtu;
5341 			chan->remote_mps = mps;
5342 			chan->tx_credits = credits;
5343 			l2cap_chan_ready(chan);
5344 			break;
5345 		}
5346 
5347 		l2cap_chan_unlock(chan);
5348 	}
5349 
5350 	return err;
5351 }
5352 
5353 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5354 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5355 					 u8 *data)
5356 {
5357 	struct l2cap_ecred_reconf_req *req = (void *) data;
5358 	struct l2cap_ecred_reconf_rsp rsp;
5359 	u16 mtu, mps, result;
5360 	struct l2cap_chan *chan[L2CAP_ECRED_MAX_CID] = {};
5361 	int i, num_scid;
5362 
5363 	if (!enable_ecred)
5364 		return -EINVAL;
5365 
5366 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5367 		result = L2CAP_RECONF_INVALID_CID;
5368 		goto respond;
5369 	}
5370 
5371 	mtu = __le16_to_cpu(req->mtu);
5372 	mps = __le16_to_cpu(req->mps);
5373 
5374 	BT_DBG("mtu %u mps %u", mtu, mps);
5375 
5376 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5377 		result = L2CAP_RECONF_INVALID_PARAMS;
5378 		goto respond;
5379 	}
5380 
5381 	if (mps < L2CAP_ECRED_MIN_MPS) {
5382 		result = L2CAP_RECONF_INVALID_PARAMS;
5383 		goto respond;
5384 	}
5385 
5386 	cmd_len -= sizeof(*req);
5387 	num_scid = cmd_len / sizeof(u16);
5388 
5389 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5390 		result = L2CAP_RECONF_INVALID_PARAMS;
5391 		goto respond;
5392 	}
5393 
5394 	result = L2CAP_RECONF_SUCCESS;
5395 
5396 	/* Check if each SCID, MTU and MPS are valid */
5397 	for (i = 0; i < num_scid; i++) {
5398 		u16 scid;
5399 
5400 		scid = __le16_to_cpu(req->scid[i]);
5401 		if (!scid) {
5402 			result = L2CAP_RECONF_INVALID_CID;
5403 			goto respond;
5404 		}
5405 
5406 		chan[i] = __l2cap_get_chan_by_dcid(conn, scid);
5407 		if (!chan[i]) {
5408 			result = L2CAP_RECONF_INVALID_CID;
5409 			goto respond;
5410 		}
5411 
5412 		/* The MTU field shall be greater than or equal to the greatest
5413 		 * current MTU size of these channels.
5414 		 */
5415 		if (chan[i]->omtu > mtu) {
5416 			BT_ERR("chan %p decreased MTU %u -> %u", chan[i],
5417 			       chan[i]->omtu, mtu);
5418 			result = L2CAP_RECONF_INVALID_MTU;
5419 			goto respond;
5420 		}
5421 
5422 		/* If more than one channel is being configured, the MPS field
5423 		 * shall be greater than or equal to the current MPS size of
5424 		 * each of these channels. If only one channel is being
5425 		 * configured, the MPS field may be less than the current MPS
5426 		 * of that channel.
5427 		 */
5428 		if (chan[i]->remote_mps >= mps && i) {
5429 			BT_ERR("chan %p decreased MPS %u -> %u", chan[i],
5430 			       chan[i]->remote_mps, mps);
5431 			result = L2CAP_RECONF_INVALID_MPS;
5432 			goto respond;
5433 		}
5434 	}
5435 
5436 	/* Commit the new MTU and MPS values after checking they are valid */
5437 	for (i = 0; i < num_scid; i++) {
5438 		chan[i]->omtu = mtu;
5439 		chan[i]->remote_mps = mps;
5440 	}
5441 
5442 respond:
5443 	rsp.result = cpu_to_le16(result);
5444 
5445 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5446 		       &rsp);
5447 
5448 	return 0;
5449 }
5450 
5451 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5452 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5453 					 u8 *data)
5454 {
5455 	struct l2cap_chan *chan, *tmp;
5456 	struct l2cap_ecred_reconf_rsp *rsp = (void *)data;
5457 	u16 result;
5458 
5459 	if (cmd_len < sizeof(*rsp))
5460 		return -EPROTO;
5461 
5462 	result = __le16_to_cpu(rsp->result);
5463 
5464 	BT_DBG("result 0x%4.4x", result);
5465 
5466 	if (!result)
5467 		return 0;
5468 
5469 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5470 		if (chan->ident != cmd->ident)
5471 			continue;
5472 
5473 		l2cap_chan_del(chan, ECONNRESET);
5474 	}
5475 
5476 	return 0;
5477 }
5478 
5479 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5480 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5481 				       u8 *data)
5482 {
5483 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5484 	struct l2cap_chan *chan;
5485 
5486 	if (cmd_len < sizeof(*rej))
5487 		return -EPROTO;
5488 
5489 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5490 	if (!chan)
5491 		goto done;
5492 
5493 	chan = l2cap_chan_hold_unless_zero(chan);
5494 	if (!chan)
5495 		goto done;
5496 
5497 	l2cap_chan_lock(chan);
5498 	l2cap_chan_del(chan, ECONNREFUSED);
5499 	l2cap_chan_unlock(chan);
5500 	l2cap_chan_put(chan);
5501 
5502 done:
5503 	return 0;
5504 }
5505 
5506 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5507 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5508 				   u8 *data)
5509 {
5510 	int err = 0;
5511 
5512 	l2cap_put_ident(conn, cmd->code, cmd->ident);
5513 
5514 	switch (cmd->code) {
5515 	case L2CAP_COMMAND_REJ:
5516 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5517 		break;
5518 
5519 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5520 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5521 		break;
5522 
5523 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5524 		break;
5525 
5526 	case L2CAP_LE_CONN_RSP:
5527 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5528 		break;
5529 
5530 	case L2CAP_LE_CONN_REQ:
5531 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5532 		break;
5533 
5534 	case L2CAP_LE_CREDITS:
5535 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5536 		break;
5537 
5538 	case L2CAP_ECRED_CONN_REQ:
5539 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5540 		break;
5541 
5542 	case L2CAP_ECRED_CONN_RSP:
5543 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5544 		break;
5545 
5546 	case L2CAP_ECRED_RECONF_REQ:
5547 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5548 		break;
5549 
5550 	case L2CAP_ECRED_RECONF_RSP:
5551 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5552 		break;
5553 
5554 	case L2CAP_DISCONN_REQ:
5555 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5556 		break;
5557 
5558 	case L2CAP_DISCONN_RSP:
5559 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5560 		break;
5561 
5562 	default:
5563 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5564 		err = -EINVAL;
5565 		break;
5566 	}
5567 
5568 	return err;
5569 }
5570 
5571 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5572 					struct sk_buff *skb)
5573 {
5574 	struct hci_conn *hcon = conn->hcon;
5575 	struct l2cap_cmd_hdr *cmd;
5576 	u16 len;
5577 	int err;
5578 
5579 	if (hcon->type != LE_LINK)
5580 		goto drop;
5581 
5582 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5583 		goto drop;
5584 
5585 	cmd = (void *) skb->data;
5586 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5587 
5588 	len = le16_to_cpu(cmd->len);
5589 
5590 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5591 
5592 	if (len != skb->len || !cmd->ident) {
5593 		BT_DBG("corrupted command");
5594 		goto drop;
5595 	}
5596 
5597 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5598 	if (err) {
5599 		struct l2cap_cmd_rej_unk rej;
5600 
5601 		BT_ERR("Wrong link type (%d)", err);
5602 
5603 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5604 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5605 			       sizeof(rej), &rej);
5606 	}
5607 
5608 drop:
5609 	kfree_skb(skb);
5610 }
5611 
5612 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5613 {
5614 	struct l2cap_cmd_rej_unk rej;
5615 
5616 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5617 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5618 }
5619 
5620 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5621 				     struct sk_buff *skb)
5622 {
5623 	struct hci_conn *hcon = conn->hcon;
5624 	struct l2cap_cmd_hdr *cmd;
5625 	int err;
5626 
5627 	l2cap_raw_recv(conn, skb);
5628 
5629 	if (hcon->type != ACL_LINK)
5630 		goto drop;
5631 
5632 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5633 		u16 len;
5634 
5635 		cmd = (void *) skb->data;
5636 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5637 
5638 		len = le16_to_cpu(cmd->len);
5639 
5640 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5641 		       cmd->ident);
5642 
5643 		if (len > skb->len || !cmd->ident) {
5644 			BT_DBG("corrupted command");
5645 			l2cap_sig_send_rej(conn, cmd->ident);
5646 			skb_pull(skb, len > skb->len ? skb->len : len);
5647 			continue;
5648 		}
5649 
5650 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5651 		if (err) {
5652 			BT_ERR("Wrong link type (%d)", err);
5653 			l2cap_sig_send_rej(conn, cmd->ident);
5654 		}
5655 
5656 		skb_pull(skb, len);
5657 	}
5658 
5659 	if (skb->len > 0) {
5660 		BT_DBG("corrupted command");
5661 		l2cap_sig_send_rej(conn, 0);
5662 	}
5663 
5664 drop:
5665 	kfree_skb(skb);
5666 }
5667 
5668 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5669 {
5670 	u16 our_fcs, rcv_fcs;
5671 	int hdr_size;
5672 
5673 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5674 		hdr_size = L2CAP_EXT_HDR_SIZE;
5675 	else
5676 		hdr_size = L2CAP_ENH_HDR_SIZE;
5677 
5678 	if (chan->fcs == L2CAP_FCS_CRC16) {
5679 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5680 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5681 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5682 
5683 		if (our_fcs != rcv_fcs)
5684 			return -EBADMSG;
5685 	}
5686 	return 0;
5687 }
5688 
5689 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5690 {
5691 	struct l2cap_ctrl control;
5692 
5693 	BT_DBG("chan %p", chan);
5694 
5695 	memset(&control, 0, sizeof(control));
5696 	control.sframe = 1;
5697 	control.final = 1;
5698 	control.reqseq = chan->buffer_seq;
5699 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5700 
5701 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5702 		control.super = L2CAP_SUPER_RNR;
5703 		l2cap_send_sframe(chan, &control);
5704 	}
5705 
5706 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5707 	    chan->unacked_frames > 0)
5708 		__set_retrans_timer(chan);
5709 
5710 	/* Send pending iframes */
5711 	l2cap_ertm_send(chan);
5712 
5713 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5714 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5715 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5716 		 * send it now.
5717 		 */
5718 		control.super = L2CAP_SUPER_RR;
5719 		l2cap_send_sframe(chan, &control);
5720 	}
5721 }
5722 
5723 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5724 			    struct sk_buff **last_frag)
5725 {
5726 	/* skb->len reflects data in skb as well as all fragments
5727 	 * skb->data_len reflects only data in fragments
5728 	 */
5729 	if (!skb_has_frag_list(skb))
5730 		skb_shinfo(skb)->frag_list = new_frag;
5731 
5732 	new_frag->next = NULL;
5733 
5734 	(*last_frag)->next = new_frag;
5735 	*last_frag = new_frag;
5736 
5737 	skb->len += new_frag->len;
5738 	skb->data_len += new_frag->len;
5739 	skb->truesize += new_frag->truesize;
5740 }
5741 
5742 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5743 				struct l2cap_ctrl *control)
5744 {
5745 	int err = -EINVAL;
5746 
5747 	switch (control->sar) {
5748 	case L2CAP_SAR_UNSEGMENTED:
5749 		if (chan->sdu)
5750 			break;
5751 
5752 		err = chan->ops->recv(chan, skb);
5753 		break;
5754 
5755 	case L2CAP_SAR_START:
5756 		if (chan->sdu)
5757 			break;
5758 
5759 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5760 			break;
5761 
5762 		chan->sdu_len = get_unaligned_le16(skb->data);
5763 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5764 
5765 		if (chan->sdu_len > chan->imtu) {
5766 			err = -EMSGSIZE;
5767 			break;
5768 		}
5769 
5770 		if (skb->len >= chan->sdu_len)
5771 			break;
5772 
5773 		chan->sdu = skb;
5774 		chan->sdu_last_frag = skb;
5775 
5776 		skb = NULL;
5777 		err = 0;
5778 		break;
5779 
5780 	case L2CAP_SAR_CONTINUE:
5781 		if (!chan->sdu)
5782 			break;
5783 
5784 		append_skb_frag(chan->sdu, skb,
5785 				&chan->sdu_last_frag);
5786 		skb = NULL;
5787 
5788 		if (chan->sdu->len >= chan->sdu_len)
5789 			break;
5790 
5791 		err = 0;
5792 		break;
5793 
5794 	case L2CAP_SAR_END:
5795 		if (!chan->sdu)
5796 			break;
5797 
5798 		append_skb_frag(chan->sdu, skb,
5799 				&chan->sdu_last_frag);
5800 		skb = NULL;
5801 
5802 		if (chan->sdu->len != chan->sdu_len)
5803 			break;
5804 
5805 		err = chan->ops->recv(chan, chan->sdu);
5806 
5807 		if (!err) {
5808 			/* Reassembly complete */
5809 			chan->sdu = NULL;
5810 			chan->sdu_last_frag = NULL;
5811 			chan->sdu_len = 0;
5812 		}
5813 		break;
5814 	}
5815 
5816 	if (err) {
5817 		kfree_skb(skb);
5818 		kfree_skb(chan->sdu);
5819 		chan->sdu = NULL;
5820 		chan->sdu_last_frag = NULL;
5821 		chan->sdu_len = 0;
5822 	}
5823 
5824 	return err;
5825 }
5826 
5827 static int l2cap_resegment(struct l2cap_chan *chan)
5828 {
5829 	/* Placeholder */
5830 	return 0;
5831 }
5832 
5833 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5834 {
5835 	u8 event;
5836 
5837 	if (chan->mode != L2CAP_MODE_ERTM)
5838 		return;
5839 
5840 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5841 	l2cap_tx(chan, NULL, NULL, event);
5842 }
5843 
5844 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5845 {
5846 	int err = 0;
5847 	/* Pass sequential frames to l2cap_reassemble_sdu()
5848 	 * until a gap is encountered.
5849 	 */
5850 
5851 	BT_DBG("chan %p", chan);
5852 
5853 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5854 		struct sk_buff *skb;
5855 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5856 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5857 
5858 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5859 
5860 		if (!skb)
5861 			break;
5862 
5863 		skb_unlink(skb, &chan->srej_q);
5864 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5865 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5866 		if (err)
5867 			break;
5868 	}
5869 
5870 	if (skb_queue_empty(&chan->srej_q)) {
5871 		chan->rx_state = L2CAP_RX_STATE_RECV;
5872 		l2cap_send_ack(chan);
5873 	}
5874 
5875 	return err;
5876 }
5877 
5878 static void l2cap_handle_srej(struct l2cap_chan *chan,
5879 			      struct l2cap_ctrl *control)
5880 {
5881 	struct sk_buff *skb;
5882 
5883 	BT_DBG("chan %p, control %p", chan, control);
5884 
5885 	if (control->reqseq == chan->next_tx_seq) {
5886 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5887 		l2cap_send_disconn_req(chan, ECONNRESET);
5888 		return;
5889 	}
5890 
5891 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5892 
5893 	if (skb == NULL) {
5894 		BT_DBG("Seq %d not available for retransmission",
5895 		       control->reqseq);
5896 		return;
5897 	}
5898 
5899 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5900 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5901 		l2cap_send_disconn_req(chan, ECONNRESET);
5902 		return;
5903 	}
5904 
5905 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5906 
5907 	if (control->poll) {
5908 		l2cap_pass_to_tx(chan, control);
5909 
5910 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5911 		l2cap_retransmit(chan, control);
5912 		l2cap_ertm_send(chan);
5913 
5914 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5915 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5916 			chan->srej_save_reqseq = control->reqseq;
5917 		}
5918 	} else {
5919 		l2cap_pass_to_tx_fbit(chan, control);
5920 
5921 		if (control->final) {
5922 			if (chan->srej_save_reqseq != control->reqseq ||
5923 			    !test_and_clear_bit(CONN_SREJ_ACT,
5924 						&chan->conn_state))
5925 				l2cap_retransmit(chan, control);
5926 		} else {
5927 			l2cap_retransmit(chan, control);
5928 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5929 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5930 				chan->srej_save_reqseq = control->reqseq;
5931 			}
5932 		}
5933 	}
5934 }
5935 
5936 static void l2cap_handle_rej(struct l2cap_chan *chan,
5937 			     struct l2cap_ctrl *control)
5938 {
5939 	struct sk_buff *skb;
5940 
5941 	BT_DBG("chan %p, control %p", chan, control);
5942 
5943 	if (control->reqseq == chan->next_tx_seq) {
5944 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5945 		l2cap_send_disconn_req(chan, ECONNRESET);
5946 		return;
5947 	}
5948 
5949 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5950 
5951 	if (chan->max_tx && skb &&
5952 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5953 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5954 		l2cap_send_disconn_req(chan, ECONNRESET);
5955 		return;
5956 	}
5957 
5958 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5959 
5960 	l2cap_pass_to_tx(chan, control);
5961 
5962 	if (control->final) {
5963 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5964 			l2cap_retransmit_all(chan, control);
5965 	} else {
5966 		l2cap_retransmit_all(chan, control);
5967 		l2cap_ertm_send(chan);
5968 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5969 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5970 	}
5971 }
5972 
5973 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5974 {
5975 	BT_DBG("chan %p, txseq %d", chan, txseq);
5976 
5977 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5978 	       chan->expected_tx_seq);
5979 
5980 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5981 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5982 		    chan->tx_win) {
5983 			/* See notes below regarding "double poll" and
5984 			 * invalid packets.
5985 			 */
5986 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5987 				BT_DBG("Invalid/Ignore - after SREJ");
5988 				return L2CAP_TXSEQ_INVALID_IGNORE;
5989 			} else {
5990 				BT_DBG("Invalid - in window after SREJ sent");
5991 				return L2CAP_TXSEQ_INVALID;
5992 			}
5993 		}
5994 
5995 		if (chan->srej_list.head == txseq) {
5996 			BT_DBG("Expected SREJ");
5997 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5998 		}
5999 
6000 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6001 			BT_DBG("Duplicate SREJ - txseq already stored");
6002 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6003 		}
6004 
6005 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6006 			BT_DBG("Unexpected SREJ - not requested");
6007 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6008 		}
6009 	}
6010 
6011 	if (chan->expected_tx_seq == txseq) {
6012 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6013 		    chan->tx_win) {
6014 			BT_DBG("Invalid - txseq outside tx window");
6015 			return L2CAP_TXSEQ_INVALID;
6016 		} else {
6017 			BT_DBG("Expected");
6018 			return L2CAP_TXSEQ_EXPECTED;
6019 		}
6020 	}
6021 
6022 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6023 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6024 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6025 		return L2CAP_TXSEQ_DUPLICATE;
6026 	}
6027 
6028 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6029 		/* A source of invalid packets is a "double poll" condition,
6030 		 * where delays cause us to send multiple poll packets.  If
6031 		 * the remote stack receives and processes both polls,
6032 		 * sequence numbers can wrap around in such a way that a
6033 		 * resent frame has a sequence number that looks like new data
6034 		 * with a sequence gap.  This would trigger an erroneous SREJ
6035 		 * request.
6036 		 *
6037 		 * Fortunately, this is impossible with a tx window that's
6038 		 * less than half of the maximum sequence number, which allows
6039 		 * invalid frames to be safely ignored.
6040 		 *
6041 		 * With tx window sizes greater than half of the tx window
6042 		 * maximum, the frame is invalid and cannot be ignored.  This
6043 		 * causes a disconnect.
6044 		 */
6045 
6046 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6047 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6048 			return L2CAP_TXSEQ_INVALID_IGNORE;
6049 		} else {
6050 			BT_DBG("Invalid - txseq outside tx window");
6051 			return L2CAP_TXSEQ_INVALID;
6052 		}
6053 	} else {
6054 		BT_DBG("Unexpected - txseq indicates missing frames");
6055 		return L2CAP_TXSEQ_UNEXPECTED;
6056 	}
6057 }
6058 
6059 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6060 			       struct l2cap_ctrl *control,
6061 			       struct sk_buff *skb, u8 event)
6062 {
6063 	struct l2cap_ctrl local_control;
6064 	int err = 0;
6065 	bool skb_in_use = false;
6066 
6067 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6068 	       event);
6069 
6070 	switch (event) {
6071 	case L2CAP_EV_RECV_IFRAME:
6072 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6073 		case L2CAP_TXSEQ_EXPECTED:
6074 			l2cap_pass_to_tx(chan, control);
6075 
6076 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6077 				BT_DBG("Busy, discarding expected seq %d",
6078 				       control->txseq);
6079 				break;
6080 			}
6081 
6082 			chan->expected_tx_seq = __next_seq(chan,
6083 							   control->txseq);
6084 
6085 			chan->buffer_seq = chan->expected_tx_seq;
6086 			skb_in_use = true;
6087 
6088 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6089 			 * control, so make a copy in advance to use it after
6090 			 * l2cap_reassemble_sdu returns and to avoid the race
6091 			 * condition, for example:
6092 			 *
6093 			 * The current thread calls:
6094 			 *   l2cap_reassemble_sdu
6095 			 *     chan->ops->recv == l2cap_sock_recv_cb
6096 			 *       __sock_queue_rcv_skb
6097 			 * Another thread calls:
6098 			 *   bt_sock_recvmsg
6099 			 *     skb_recv_datagram
6100 			 *     skb_free_datagram
6101 			 * Then the current thread tries to access control, but
6102 			 * it was freed by skb_free_datagram.
6103 			 */
6104 			local_control = *control;
6105 			err = l2cap_reassemble_sdu(chan, skb, control);
6106 			if (err)
6107 				break;
6108 
6109 			if (local_control.final) {
6110 				if (!test_and_clear_bit(CONN_REJ_ACT,
6111 							&chan->conn_state)) {
6112 					local_control.final = 0;
6113 					l2cap_retransmit_all(chan, &local_control);
6114 					l2cap_ertm_send(chan);
6115 				}
6116 			}
6117 
6118 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6119 				l2cap_send_ack(chan);
6120 			break;
6121 		case L2CAP_TXSEQ_UNEXPECTED:
6122 			l2cap_pass_to_tx(chan, control);
6123 
6124 			/* Can't issue SREJ frames in the local busy state.
6125 			 * Drop this frame, it will be seen as missing
6126 			 * when local busy is exited.
6127 			 */
6128 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6129 				BT_DBG("Busy, discarding unexpected seq %d",
6130 				       control->txseq);
6131 				break;
6132 			}
6133 
6134 			/* There was a gap in the sequence, so an SREJ
6135 			 * must be sent for each missing frame.  The
6136 			 * current frame is stored for later use.
6137 			 */
6138 			skb_queue_tail(&chan->srej_q, skb);
6139 			skb_in_use = true;
6140 			BT_DBG("Queued %p (queue len %d)", skb,
6141 			       skb_queue_len(&chan->srej_q));
6142 
6143 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6144 			l2cap_seq_list_clear(&chan->srej_list);
6145 			l2cap_send_srej(chan, control->txseq);
6146 
6147 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6148 			break;
6149 		case L2CAP_TXSEQ_DUPLICATE:
6150 			l2cap_pass_to_tx(chan, control);
6151 			break;
6152 		case L2CAP_TXSEQ_INVALID_IGNORE:
6153 			break;
6154 		case L2CAP_TXSEQ_INVALID:
6155 		default:
6156 			l2cap_send_disconn_req(chan, ECONNRESET);
6157 			break;
6158 		}
6159 		break;
6160 	case L2CAP_EV_RECV_RR:
6161 		l2cap_pass_to_tx(chan, control);
6162 		if (control->final) {
6163 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6164 
6165 			if (!test_and_clear_bit(CONN_REJ_ACT,
6166 						&chan->conn_state)) {
6167 				control->final = 0;
6168 				l2cap_retransmit_all(chan, control);
6169 			}
6170 
6171 			l2cap_ertm_send(chan);
6172 		} else if (control->poll) {
6173 			l2cap_send_i_or_rr_or_rnr(chan);
6174 		} else {
6175 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6176 					       &chan->conn_state) &&
6177 			    chan->unacked_frames)
6178 				__set_retrans_timer(chan);
6179 
6180 			l2cap_ertm_send(chan);
6181 		}
6182 		break;
6183 	case L2CAP_EV_RECV_RNR:
6184 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6185 		l2cap_pass_to_tx(chan, control);
6186 		if (control && control->poll) {
6187 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6188 			l2cap_send_rr_or_rnr(chan, 0);
6189 		}
6190 		__clear_retrans_timer(chan);
6191 		l2cap_seq_list_clear(&chan->retrans_list);
6192 		break;
6193 	case L2CAP_EV_RECV_REJ:
6194 		l2cap_handle_rej(chan, control);
6195 		break;
6196 	case L2CAP_EV_RECV_SREJ:
6197 		l2cap_handle_srej(chan, control);
6198 		break;
6199 	default:
6200 		break;
6201 	}
6202 
6203 	if (skb && !skb_in_use) {
6204 		BT_DBG("Freeing %p", skb);
6205 		kfree_skb(skb);
6206 	}
6207 
6208 	return err;
6209 }
6210 
6211 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6212 				    struct l2cap_ctrl *control,
6213 				    struct sk_buff *skb, u8 event)
6214 {
6215 	int err = 0;
6216 	u16 txseq = control->txseq;
6217 	bool skb_in_use = false;
6218 
6219 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6220 	       event);
6221 
6222 	switch (event) {
6223 	case L2CAP_EV_RECV_IFRAME:
6224 		switch (l2cap_classify_txseq(chan, txseq)) {
6225 		case L2CAP_TXSEQ_EXPECTED:
6226 			/* Keep frame for reassembly later */
6227 			l2cap_pass_to_tx(chan, control);
6228 			skb_queue_tail(&chan->srej_q, skb);
6229 			skb_in_use = true;
6230 			BT_DBG("Queued %p (queue len %d)", skb,
6231 			       skb_queue_len(&chan->srej_q));
6232 
6233 			chan->expected_tx_seq = __next_seq(chan, txseq);
6234 			break;
6235 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6236 			l2cap_seq_list_pop(&chan->srej_list);
6237 
6238 			l2cap_pass_to_tx(chan, control);
6239 			skb_queue_tail(&chan->srej_q, skb);
6240 			skb_in_use = true;
6241 			BT_DBG("Queued %p (queue len %d)", skb,
6242 			       skb_queue_len(&chan->srej_q));
6243 
6244 			err = l2cap_rx_queued_iframes(chan);
6245 			if (err)
6246 				break;
6247 
6248 			break;
6249 		case L2CAP_TXSEQ_UNEXPECTED:
6250 			/* Got a frame that can't be reassembled yet.
6251 			 * Save it for later, and send SREJs to cover
6252 			 * the missing frames.
6253 			 */
6254 			skb_queue_tail(&chan->srej_q, skb);
6255 			skb_in_use = true;
6256 			BT_DBG("Queued %p (queue len %d)", skb,
6257 			       skb_queue_len(&chan->srej_q));
6258 
6259 			l2cap_pass_to_tx(chan, control);
6260 			l2cap_send_srej(chan, control->txseq);
6261 			break;
6262 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6263 			/* This frame was requested with an SREJ, but
6264 			 * some expected retransmitted frames are
6265 			 * missing.  Request retransmission of missing
6266 			 * SREJ'd frames.
6267 			 */
6268 			skb_queue_tail(&chan->srej_q, skb);
6269 			skb_in_use = true;
6270 			BT_DBG("Queued %p (queue len %d)", skb,
6271 			       skb_queue_len(&chan->srej_q));
6272 
6273 			l2cap_pass_to_tx(chan, control);
6274 			l2cap_send_srej_list(chan, control->txseq);
6275 			break;
6276 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6277 			/* We've already queued this frame.  Drop this copy. */
6278 			l2cap_pass_to_tx(chan, control);
6279 			break;
6280 		case L2CAP_TXSEQ_DUPLICATE:
6281 			/* Expecting a later sequence number, so this frame
6282 			 * was already received.  Ignore it completely.
6283 			 */
6284 			break;
6285 		case L2CAP_TXSEQ_INVALID_IGNORE:
6286 			break;
6287 		case L2CAP_TXSEQ_INVALID:
6288 		default:
6289 			l2cap_send_disconn_req(chan, ECONNRESET);
6290 			break;
6291 		}
6292 		break;
6293 	case L2CAP_EV_RECV_RR:
6294 		l2cap_pass_to_tx(chan, control);
6295 		if (control->final) {
6296 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6297 
6298 			if (!test_and_clear_bit(CONN_REJ_ACT,
6299 						&chan->conn_state)) {
6300 				control->final = 0;
6301 				l2cap_retransmit_all(chan, control);
6302 			}
6303 
6304 			l2cap_ertm_send(chan);
6305 		} else if (control->poll) {
6306 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6307 					       &chan->conn_state) &&
6308 			    chan->unacked_frames) {
6309 				__set_retrans_timer(chan);
6310 			}
6311 
6312 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6313 			l2cap_send_srej_tail(chan);
6314 		} else {
6315 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6316 					       &chan->conn_state) &&
6317 			    chan->unacked_frames)
6318 				__set_retrans_timer(chan);
6319 
6320 			l2cap_send_ack(chan);
6321 		}
6322 		break;
6323 	case L2CAP_EV_RECV_RNR:
6324 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6325 		l2cap_pass_to_tx(chan, control);
6326 		if (control->poll) {
6327 			l2cap_send_srej_tail(chan);
6328 		} else {
6329 			struct l2cap_ctrl rr_control;
6330 			memset(&rr_control, 0, sizeof(rr_control));
6331 			rr_control.sframe = 1;
6332 			rr_control.super = L2CAP_SUPER_RR;
6333 			rr_control.reqseq = chan->buffer_seq;
6334 			l2cap_send_sframe(chan, &rr_control);
6335 		}
6336 
6337 		break;
6338 	case L2CAP_EV_RECV_REJ:
6339 		l2cap_handle_rej(chan, control);
6340 		break;
6341 	case L2CAP_EV_RECV_SREJ:
6342 		l2cap_handle_srej(chan, control);
6343 		break;
6344 	}
6345 
6346 	if (skb && !skb_in_use) {
6347 		BT_DBG("Freeing %p", skb);
6348 		kfree_skb(skb);
6349 	}
6350 
6351 	return err;
6352 }
6353 
6354 static int l2cap_finish_move(struct l2cap_chan *chan)
6355 {
6356 	BT_DBG("chan %p", chan);
6357 
6358 	chan->rx_state = L2CAP_RX_STATE_RECV;
6359 	chan->conn->mtu = chan->conn->hcon->mtu;
6360 
6361 	return l2cap_resegment(chan);
6362 }
6363 
6364 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6365 				 struct l2cap_ctrl *control,
6366 				 struct sk_buff *skb, u8 event)
6367 {
6368 	int err;
6369 
6370 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6371 	       event);
6372 
6373 	if (!control->poll)
6374 		return -EPROTO;
6375 
6376 	l2cap_process_reqseq(chan, control->reqseq);
6377 
6378 	if (!skb_queue_empty(&chan->tx_q))
6379 		chan->tx_send_head = skb_peek(&chan->tx_q);
6380 	else
6381 		chan->tx_send_head = NULL;
6382 
6383 	/* Rewind next_tx_seq to the point expected
6384 	 * by the receiver.
6385 	 */
6386 	chan->next_tx_seq = control->reqseq;
6387 	chan->unacked_frames = 0;
6388 
6389 	err = l2cap_finish_move(chan);
6390 	if (err)
6391 		return err;
6392 
6393 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6394 	l2cap_send_i_or_rr_or_rnr(chan);
6395 
6396 	if (event == L2CAP_EV_RECV_IFRAME)
6397 		return -EPROTO;
6398 
6399 	return l2cap_rx_state_recv(chan, control, NULL, event);
6400 }
6401 
6402 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6403 				 struct l2cap_ctrl *control,
6404 				 struct sk_buff *skb, u8 event)
6405 {
6406 	int err;
6407 
6408 	if (!control->final)
6409 		return -EPROTO;
6410 
6411 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6412 
6413 	chan->rx_state = L2CAP_RX_STATE_RECV;
6414 	l2cap_process_reqseq(chan, control->reqseq);
6415 
6416 	if (!skb_queue_empty(&chan->tx_q))
6417 		chan->tx_send_head = skb_peek(&chan->tx_q);
6418 	else
6419 		chan->tx_send_head = NULL;
6420 
6421 	/* Rewind next_tx_seq to the point expected
6422 	 * by the receiver.
6423 	 */
6424 	chan->next_tx_seq = control->reqseq;
6425 	chan->unacked_frames = 0;
6426 	chan->conn->mtu = chan->conn->hcon->mtu;
6427 
6428 	err = l2cap_resegment(chan);
6429 
6430 	if (!err)
6431 		err = l2cap_rx_state_recv(chan, control, skb, event);
6432 
6433 	return err;
6434 }
6435 
6436 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6437 {
6438 	/* Make sure reqseq is for a packet that has been sent but not acked */
6439 	u16 unacked;
6440 
6441 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6442 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6443 }
6444 
6445 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6446 		    struct sk_buff *skb, u8 event)
6447 {
6448 	int err = 0;
6449 
6450 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6451 	       control, skb, event, chan->rx_state);
6452 
6453 	if (__valid_reqseq(chan, control->reqseq)) {
6454 		switch (chan->rx_state) {
6455 		case L2CAP_RX_STATE_RECV:
6456 			err = l2cap_rx_state_recv(chan, control, skb, event);
6457 			break;
6458 		case L2CAP_RX_STATE_SREJ_SENT:
6459 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6460 						       event);
6461 			break;
6462 		case L2CAP_RX_STATE_WAIT_P:
6463 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6464 			break;
6465 		case L2CAP_RX_STATE_WAIT_F:
6466 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6467 			break;
6468 		default:
6469 			/* shut it down */
6470 			break;
6471 		}
6472 	} else {
6473 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6474 		       control->reqseq, chan->next_tx_seq,
6475 		       chan->expected_ack_seq);
6476 		l2cap_send_disconn_req(chan, ECONNRESET);
6477 	}
6478 
6479 	return err;
6480 }
6481 
6482 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6483 			   struct sk_buff *skb)
6484 {
6485 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6486 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6487 	 * returns and to avoid the race condition, for example:
6488 	 *
6489 	 * The current thread calls:
6490 	 *   l2cap_reassemble_sdu
6491 	 *     chan->ops->recv == l2cap_sock_recv_cb
6492 	 *       __sock_queue_rcv_skb
6493 	 * Another thread calls:
6494 	 *   bt_sock_recvmsg
6495 	 *     skb_recv_datagram
6496 	 *     skb_free_datagram
6497 	 * Then the current thread tries to access control, but it was freed by
6498 	 * skb_free_datagram.
6499 	 */
6500 	u16 txseq = control->txseq;
6501 
6502 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6503 	       chan->rx_state);
6504 
6505 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6506 		l2cap_pass_to_tx(chan, control);
6507 
6508 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6509 		       __next_seq(chan, chan->buffer_seq));
6510 
6511 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6512 
6513 		l2cap_reassemble_sdu(chan, skb, control);
6514 	} else {
6515 		if (chan->sdu) {
6516 			kfree_skb(chan->sdu);
6517 			chan->sdu = NULL;
6518 		}
6519 		chan->sdu_last_frag = NULL;
6520 		chan->sdu_len = 0;
6521 
6522 		if (skb) {
6523 			BT_DBG("Freeing %p", skb);
6524 			kfree_skb(skb);
6525 		}
6526 	}
6527 
6528 	chan->last_acked_seq = txseq;
6529 	chan->expected_tx_seq = __next_seq(chan, txseq);
6530 
6531 	return 0;
6532 }
6533 
6534 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6535 {
6536 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6537 	u16 len;
6538 	u8 event;
6539 
6540 	__unpack_control(chan, skb);
6541 
6542 	len = skb->len;
6543 
6544 	/*
6545 	 * We can just drop the corrupted I-frame here.
6546 	 * Receiver will miss it and start proper recovery
6547 	 * procedures and ask for retransmission.
6548 	 */
6549 	if (l2cap_check_fcs(chan, skb))
6550 		goto drop;
6551 
6552 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6553 		len -= L2CAP_SDULEN_SIZE;
6554 
6555 	if (chan->fcs == L2CAP_FCS_CRC16)
6556 		len -= L2CAP_FCS_SIZE;
6557 
6558 	if (len > chan->mps) {
6559 		l2cap_send_disconn_req(chan, ECONNRESET);
6560 		goto drop;
6561 	}
6562 
6563 	if (chan->ops->filter) {
6564 		if (chan->ops->filter(chan, skb))
6565 			goto drop;
6566 	}
6567 
6568 	if (!control->sframe) {
6569 		int err;
6570 
6571 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6572 		       control->sar, control->reqseq, control->final,
6573 		       control->txseq);
6574 
6575 		/* Validate F-bit - F=0 always valid, F=1 only
6576 		 * valid in TX WAIT_F
6577 		 */
6578 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6579 			goto drop;
6580 
6581 		if (chan->mode != L2CAP_MODE_STREAMING) {
6582 			event = L2CAP_EV_RECV_IFRAME;
6583 			err = l2cap_rx(chan, control, skb, event);
6584 		} else {
6585 			err = l2cap_stream_rx(chan, control, skb);
6586 		}
6587 
6588 		if (err)
6589 			l2cap_send_disconn_req(chan, ECONNRESET);
6590 	} else {
6591 		const u8 rx_func_to_event[4] = {
6592 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6593 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6594 		};
6595 
6596 		/* Only I-frames are expected in streaming mode */
6597 		if (chan->mode == L2CAP_MODE_STREAMING)
6598 			goto drop;
6599 
6600 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6601 		       control->reqseq, control->final, control->poll,
6602 		       control->super);
6603 
6604 		if (len != 0) {
6605 			BT_ERR("Trailing bytes: %d in sframe", len);
6606 			l2cap_send_disconn_req(chan, ECONNRESET);
6607 			goto drop;
6608 		}
6609 
6610 		/* Validate F and P bits */
6611 		if (control->final && (control->poll ||
6612 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6613 			goto drop;
6614 
6615 		event = rx_func_to_event[control->super];
6616 		if (l2cap_rx(chan, control, skb, event))
6617 			l2cap_send_disconn_req(chan, ECONNRESET);
6618 	}
6619 
6620 	return 0;
6621 
6622 drop:
6623 	kfree_skb(skb);
6624 	return 0;
6625 }
6626 
6627 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6628 {
6629 	struct l2cap_conn *conn = chan->conn;
6630 	struct l2cap_le_credits pkt;
6631 	u16 return_credits = l2cap_le_rx_credits(chan);
6632 
6633 	if (chan->rx_credits >= return_credits)
6634 		return;
6635 
6636 	return_credits -= chan->rx_credits;
6637 
6638 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6639 
6640 	chan->rx_credits += return_credits;
6641 
6642 	pkt.cid     = cpu_to_le16(chan->scid);
6643 	pkt.credits = cpu_to_le16(return_credits);
6644 
6645 	chan->ident = l2cap_get_ident(conn);
6646 
6647 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6648 }
6649 
6650 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6651 {
6652 	if (chan->rx_avail == rx_avail)
6653 		return;
6654 
6655 	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6656 
6657 	chan->rx_avail = rx_avail;
6658 
6659 	if (chan->state == BT_CONNECTED)
6660 		l2cap_chan_le_send_credits(chan);
6661 }
6662 
6663 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6664 {
6665 	int err;
6666 
6667 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6668 
6669 	/* Wait recv to confirm reception before updating the credits */
6670 	err = chan->ops->recv(chan, skb);
6671 
6672 	if (err < 0 && chan->rx_avail != -1) {
6673 		BT_ERR("Queueing received LE L2CAP data failed");
6674 		l2cap_send_disconn_req(chan, ECONNRESET);
6675 		return err;
6676 	}
6677 
6678 	/* Update credits whenever an SDU is received */
6679 	l2cap_chan_le_send_credits(chan);
6680 
6681 	return err;
6682 }
6683 
6684 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6685 {
6686 	int err;
6687 
6688 	if (!chan->rx_credits) {
6689 		BT_ERR("No credits to receive LE L2CAP data");
6690 		l2cap_send_disconn_req(chan, ECONNRESET);
6691 		return -ENOBUFS;
6692 	}
6693 
6694 	if (skb->len > chan->imtu) {
6695 		BT_ERR("Too big LE L2CAP PDU: len %u > %u", skb->len,
6696 		       chan->imtu);
6697 		l2cap_send_disconn_req(chan, ECONNRESET);
6698 		return -ENOBUFS;
6699 	}
6700 
6701 	chan->rx_credits--;
6702 	BT_DBG("chan %p: rx_credits %u -> %u",
6703 	       chan, chan->rx_credits + 1, chan->rx_credits);
6704 
6705 	/* Update if remote had run out of credits, this should only happens
6706 	 * if the remote is not using the entire MPS.
6707 	 */
6708 	if (!chan->rx_credits)
6709 		l2cap_chan_le_send_credits(chan);
6710 
6711 	err = 0;
6712 
6713 	if (!chan->sdu) {
6714 		u16 sdu_len;
6715 
6716 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) {
6717 			err = -EINVAL;
6718 			goto failed;
6719 		}
6720 
6721 		sdu_len = get_unaligned_le16(skb->data);
6722 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6723 
6724 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6725 		       sdu_len, skb->len, chan->imtu);
6726 
6727 		if (sdu_len > chan->imtu) {
6728 			BT_ERR("Too big LE L2CAP SDU length: len %u > %u",
6729 			       skb->len, sdu_len);
6730 			l2cap_send_disconn_req(chan, ECONNRESET);
6731 			err = -EMSGSIZE;
6732 			goto failed;
6733 		}
6734 
6735 		if (skb->len > sdu_len) {
6736 			BT_ERR("Too much LE L2CAP data received");
6737 			err = -EINVAL;
6738 			goto failed;
6739 		}
6740 
6741 		if (skb->len == sdu_len)
6742 			return l2cap_ecred_recv(chan, skb);
6743 
6744 		chan->sdu = skb;
6745 		chan->sdu_len = sdu_len;
6746 		chan->sdu_last_frag = skb;
6747 
6748 		/* Detect if remote is not able to use the selected MPS */
6749 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6750 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6751 
6752 			/* Adjust the number of credits */
6753 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6754 			chan->mps = mps_len;
6755 			l2cap_chan_le_send_credits(chan);
6756 		}
6757 
6758 		return 0;
6759 	}
6760 
6761 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6762 	       chan->sdu->len, skb->len, chan->sdu_len);
6763 
6764 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6765 		BT_ERR("Too much LE L2CAP data received");
6766 		l2cap_send_disconn_req(chan, ECONNRESET);
6767 		err = -EINVAL;
6768 		goto failed;
6769 	}
6770 
6771 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6772 	skb = NULL;
6773 
6774 	if (chan->sdu->len == chan->sdu_len) {
6775 		err = l2cap_ecred_recv(chan, chan->sdu);
6776 		if (!err) {
6777 			chan->sdu = NULL;
6778 			chan->sdu_last_frag = NULL;
6779 			chan->sdu_len = 0;
6780 		}
6781 	}
6782 
6783 failed:
6784 	if (err) {
6785 		kfree_skb(skb);
6786 		kfree_skb(chan->sdu);
6787 		chan->sdu = NULL;
6788 		chan->sdu_last_frag = NULL;
6789 		chan->sdu_len = 0;
6790 	}
6791 
6792 	/* We can't return an error here since we took care of the skb
6793 	 * freeing internally. An error return would cause the caller to
6794 	 * do a double-free of the skb.
6795 	 */
6796 	return 0;
6797 }
6798 
6799 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6800 			       struct sk_buff *skb)
6801 {
6802 	struct l2cap_chan *chan;
6803 
6804 	chan = l2cap_get_chan_by_scid(conn, cid);
6805 	if (!chan) {
6806 		BT_DBG("unknown cid 0x%4.4x", cid);
6807 		/* Drop packet and return */
6808 		kfree_skb(skb);
6809 		return;
6810 	}
6811 
6812 	BT_DBG("chan %p, len %d", chan, skb->len);
6813 
6814 	/* If we receive data on a fixed channel before the info req/rsp
6815 	 * procedure is done simply assume that the channel is supported
6816 	 * and mark it as ready.
6817 	 */
6818 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6819 		l2cap_chan_ready(chan);
6820 
6821 	if (chan->state != BT_CONNECTED)
6822 		goto drop;
6823 
6824 	switch (chan->mode) {
6825 	case L2CAP_MODE_LE_FLOWCTL:
6826 	case L2CAP_MODE_EXT_FLOWCTL:
6827 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6828 			goto drop;
6829 
6830 		goto done;
6831 
6832 	case L2CAP_MODE_BASIC:
6833 		/* If socket recv buffers overflows we drop data here
6834 		 * which is *bad* because L2CAP has to be reliable.
6835 		 * But we don't have any other choice. L2CAP doesn't
6836 		 * provide flow control mechanism. */
6837 
6838 		if (chan->imtu < skb->len) {
6839 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6840 			goto drop;
6841 		}
6842 
6843 		if (!chan->ops->recv(chan, skb))
6844 			goto done;
6845 		break;
6846 
6847 	case L2CAP_MODE_ERTM:
6848 	case L2CAP_MODE_STREAMING:
6849 		l2cap_data_rcv(chan, skb);
6850 		goto done;
6851 
6852 	default:
6853 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6854 		break;
6855 	}
6856 
6857 drop:
6858 	kfree_skb(skb);
6859 
6860 done:
6861 	l2cap_chan_unlock(chan);
6862 	l2cap_chan_put(chan);
6863 }
6864 
6865 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6866 				  struct sk_buff *skb)
6867 {
6868 	struct hci_conn *hcon = conn->hcon;
6869 	struct l2cap_chan *chan;
6870 
6871 	if (hcon->type != ACL_LINK)
6872 		goto free_skb;
6873 
6874 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6875 					ACL_LINK);
6876 	if (!chan)
6877 		goto free_skb;
6878 
6879 	BT_DBG("chan %p, len %d", chan, skb->len);
6880 
6881 	l2cap_chan_lock(chan);
6882 
6883 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6884 		goto drop;
6885 
6886 	if (chan->imtu < skb->len)
6887 		goto drop;
6888 
6889 	/* Store remote BD_ADDR and PSM for msg_name */
6890 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6891 	bt_cb(skb)->l2cap.psm = psm;
6892 
6893 	if (!chan->ops->recv(chan, skb)) {
6894 		l2cap_chan_unlock(chan);
6895 		l2cap_chan_put(chan);
6896 		return;
6897 	}
6898 
6899 drop:
6900 	l2cap_chan_unlock(chan);
6901 	l2cap_chan_put(chan);
6902 free_skb:
6903 	kfree_skb(skb);
6904 }
6905 
6906 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6907 {
6908 	struct l2cap_hdr *lh = (void *) skb->data;
6909 	struct hci_conn *hcon = conn->hcon;
6910 	u16 cid, len;
6911 	__le16 psm;
6912 
6913 	if (hcon->state != BT_CONNECTED) {
6914 		BT_DBG("queueing pending rx skb");
6915 		skb_queue_tail(&conn->pending_rx, skb);
6916 		return;
6917 	}
6918 
6919 	skb_pull(skb, L2CAP_HDR_SIZE);
6920 	cid = __le16_to_cpu(lh->cid);
6921 	len = __le16_to_cpu(lh->len);
6922 
6923 	if (len != skb->len) {
6924 		kfree_skb(skb);
6925 		return;
6926 	}
6927 
6928 	/* Since we can't actively block incoming LE connections we must
6929 	 * at least ensure that we ignore incoming data from them.
6930 	 */
6931 	if (hcon->type == LE_LINK &&
6932 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6933 				   bdaddr_dst_type(hcon))) {
6934 		kfree_skb(skb);
6935 		return;
6936 	}
6937 
6938 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6939 
6940 	switch (cid) {
6941 	case L2CAP_CID_SIGNALING:
6942 		l2cap_sig_channel(conn, skb);
6943 		break;
6944 
6945 	case L2CAP_CID_CONN_LESS:
6946 		psm = get_unaligned((__le16 *) skb->data);
6947 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6948 		l2cap_conless_channel(conn, psm, skb);
6949 		break;
6950 
6951 	case L2CAP_CID_LE_SIGNALING:
6952 		l2cap_le_sig_channel(conn, skb);
6953 		break;
6954 
6955 	default:
6956 		l2cap_data_channel(conn, cid, skb);
6957 		break;
6958 	}
6959 }
6960 
6961 static void process_pending_rx(struct work_struct *work)
6962 {
6963 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6964 					       pending_rx_work);
6965 	struct sk_buff *skb;
6966 
6967 	BT_DBG("");
6968 
6969 	mutex_lock(&conn->lock);
6970 
6971 	while ((skb = skb_dequeue(&conn->pending_rx)))
6972 		l2cap_recv_frame(conn, skb);
6973 
6974 	mutex_unlock(&conn->lock);
6975 }
6976 
6977 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6978 {
6979 	struct l2cap_conn *conn = hcon->l2cap_data;
6980 	struct hci_chan *hchan;
6981 
6982 	if (conn)
6983 		return conn;
6984 
6985 	hchan = hci_chan_create(hcon);
6986 	if (!hchan)
6987 		return NULL;
6988 
6989 	conn = kzalloc_obj(*conn);
6990 	if (!conn) {
6991 		hci_chan_del(hchan);
6992 		return NULL;
6993 	}
6994 
6995 	kref_init(&conn->ref);
6996 	hcon->l2cap_data = conn;
6997 	conn->hcon = hci_conn_get(hcon);
6998 	conn->hchan = hchan;
6999 
7000 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7001 
7002 	conn->mtu = hcon->mtu;
7003 	conn->feat_mask = 0;
7004 
7005 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7006 
7007 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7008 	    (bredr_sc_enabled(hcon->hdev) ||
7009 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7010 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7011 
7012 	mutex_init(&conn->lock);
7013 
7014 	INIT_LIST_HEAD(&conn->chan_l);
7015 	INIT_LIST_HEAD(&conn->users);
7016 
7017 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7018 	ida_init(&conn->tx_ida);
7019 
7020 	skb_queue_head_init(&conn->pending_rx);
7021 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7022 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
7023 
7024 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7025 
7026 	return conn;
7027 }
7028 
7029 static bool is_valid_psm(u16 psm, u8 dst_type)
7030 {
7031 	if (!psm)
7032 		return false;
7033 
7034 	if (bdaddr_type_is_le(dst_type))
7035 		return (psm <= 0x00ff);
7036 
7037 	/* PSM must be odd and lsb of upper byte must be 0 */
7038 	return ((psm & 0x0101) == 0x0001);
7039 }
7040 
7041 struct l2cap_chan_data {
7042 	struct l2cap_chan *chan;
7043 	struct pid *pid;
7044 	int count;
7045 };
7046 
7047 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7048 {
7049 	struct l2cap_chan_data *d = data;
7050 	struct pid *pid;
7051 
7052 	if (chan == d->chan)
7053 		return;
7054 
7055 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7056 		return;
7057 
7058 	pid = chan->ops->get_peer_pid(chan);
7059 
7060 	/* Only count deferred channels with the same PID/PSM */
7061 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7062 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7063 		return;
7064 
7065 	d->count++;
7066 }
7067 
7068 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7069 		       bdaddr_t *dst, u8 dst_type, u16 timeout)
7070 {
7071 	struct l2cap_conn *conn;
7072 	struct hci_conn *hcon;
7073 	struct hci_dev *hdev;
7074 	int err;
7075 
7076 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7077 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7078 
7079 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7080 	if (!hdev)
7081 		return -EHOSTUNREACH;
7082 
7083 	hci_dev_lock(hdev);
7084 
7085 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7086 	    chan->chan_type != L2CAP_CHAN_RAW) {
7087 		err = -EINVAL;
7088 		goto done;
7089 	}
7090 
7091 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7092 		err = -EINVAL;
7093 		goto done;
7094 	}
7095 
7096 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7097 		err = -EINVAL;
7098 		goto done;
7099 	}
7100 
7101 	switch (chan->mode) {
7102 	case L2CAP_MODE_BASIC:
7103 		break;
7104 	case L2CAP_MODE_LE_FLOWCTL:
7105 		break;
7106 	case L2CAP_MODE_EXT_FLOWCTL:
7107 		if (!enable_ecred) {
7108 			err = -EOPNOTSUPP;
7109 			goto done;
7110 		}
7111 		break;
7112 	case L2CAP_MODE_ERTM:
7113 	case L2CAP_MODE_STREAMING:
7114 		if (!disable_ertm)
7115 			break;
7116 		fallthrough;
7117 	default:
7118 		err = -EOPNOTSUPP;
7119 		goto done;
7120 	}
7121 
7122 	switch (chan->state) {
7123 	case BT_CONNECT:
7124 	case BT_CONNECT2:
7125 	case BT_CONFIG:
7126 		/* Already connecting */
7127 		err = 0;
7128 		goto done;
7129 
7130 	case BT_CONNECTED:
7131 		/* Already connected */
7132 		err = -EISCONN;
7133 		goto done;
7134 
7135 	case BT_OPEN:
7136 	case BT_BOUND:
7137 		/* Can connect */
7138 		break;
7139 
7140 	default:
7141 		err = -EBADFD;
7142 		goto done;
7143 	}
7144 
7145 	/* Set destination address and psm */
7146 	bacpy(&chan->dst, dst);
7147 	chan->dst_type = dst_type;
7148 
7149 	chan->psm = psm;
7150 	chan->dcid = cid;
7151 
7152 	if (bdaddr_type_is_le(dst_type)) {
7153 		/* Convert from L2CAP channel address type to HCI address type
7154 		 */
7155 		if (dst_type == BDADDR_LE_PUBLIC)
7156 			dst_type = ADDR_LE_DEV_PUBLIC;
7157 		else
7158 			dst_type = ADDR_LE_DEV_RANDOM;
7159 
7160 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7161 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7162 					      chan->sec_level, timeout,
7163 					      HCI_ROLE_SLAVE, 0, 0);
7164 		else
7165 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7166 						   chan->sec_level, timeout,
7167 						   CONN_REASON_L2CAP_CHAN);
7168 
7169 	} else {
7170 		u8 auth_type = l2cap_get_auth_type(chan);
7171 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7172 				       CONN_REASON_L2CAP_CHAN, timeout);
7173 	}
7174 
7175 	if (IS_ERR(hcon)) {
7176 		err = PTR_ERR(hcon);
7177 		goto done;
7178 	}
7179 
7180 	conn = l2cap_conn_add(hcon);
7181 	if (!conn) {
7182 		hci_conn_drop(hcon);
7183 		err = -ENOMEM;
7184 		goto done;
7185 	}
7186 
7187 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7188 		struct l2cap_chan_data data;
7189 
7190 		data.chan = chan;
7191 		data.pid = chan->ops->get_peer_pid(chan);
7192 		data.count = 1;
7193 
7194 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7195 
7196 		/* Check if there isn't too many channels being connected */
7197 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7198 			hci_conn_drop(hcon);
7199 			err = -EPROTO;
7200 			goto done;
7201 		}
7202 	}
7203 
7204 	mutex_lock(&conn->lock);
7205 	l2cap_chan_lock(chan);
7206 
7207 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7208 		hci_conn_drop(hcon);
7209 		err = -EBUSY;
7210 		goto chan_unlock;
7211 	}
7212 
7213 	/* Update source addr of the socket */
7214 	bacpy(&chan->src, &hcon->src);
7215 	chan->src_type = bdaddr_src_type(hcon);
7216 
7217 	__l2cap_chan_add(conn, chan);
7218 
7219 	/* l2cap_chan_add takes its own ref so we can drop this one */
7220 	hci_conn_drop(hcon);
7221 
7222 	l2cap_state_change(chan, BT_CONNECT);
7223 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7224 
7225 	/* Release chan->sport so that it can be reused by other
7226 	 * sockets (as it's only used for listening sockets).
7227 	 */
7228 	write_lock(&chan_list_lock);
7229 	chan->sport = 0;
7230 	write_unlock(&chan_list_lock);
7231 
7232 	if (hcon->state == BT_CONNECTED) {
7233 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7234 			__clear_chan_timer(chan);
7235 			if (l2cap_chan_check_security(chan, true))
7236 				l2cap_state_change(chan, BT_CONNECTED);
7237 		} else
7238 			l2cap_do_start(chan);
7239 	}
7240 
7241 	err = 0;
7242 
7243 chan_unlock:
7244 	l2cap_chan_unlock(chan);
7245 	mutex_unlock(&conn->lock);
7246 done:
7247 	hci_dev_unlock(hdev);
7248 	hci_dev_put(hdev);
7249 	return err;
7250 }
7251 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7252 
7253 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7254 {
7255 	struct l2cap_conn *conn = chan->conn;
7256 	DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7257 
7258 	pdu->mtu = cpu_to_le16(chan->imtu);
7259 	pdu->mps = cpu_to_le16(chan->mps);
7260 	pdu->scid[0] = cpu_to_le16(chan->scid);
7261 
7262 	chan->ident = l2cap_get_ident(conn);
7263 
7264 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7265 		       sizeof(pdu), &pdu);
7266 }
7267 
7268 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7269 {
7270 	if (chan->imtu > mtu)
7271 		return -EINVAL;
7272 
7273 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7274 
7275 	chan->imtu = mtu;
7276 
7277 	l2cap_ecred_reconfigure(chan);
7278 
7279 	return 0;
7280 }
7281 
7282 /* ---- L2CAP interface with lower layer (HCI) ---- */
7283 
7284 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7285 {
7286 	int exact = 0, lm1 = 0, lm2 = 0;
7287 	struct l2cap_chan *c;
7288 
7289 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7290 
7291 	/* Find listening sockets and check their link_mode */
7292 	read_lock(&chan_list_lock);
7293 	list_for_each_entry(c, &chan_list, global_l) {
7294 		if (c->state != BT_LISTEN)
7295 			continue;
7296 
7297 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7298 			lm1 |= HCI_LM_ACCEPT;
7299 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7300 				lm1 |= HCI_LM_MASTER;
7301 			exact++;
7302 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7303 			lm2 |= HCI_LM_ACCEPT;
7304 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7305 				lm2 |= HCI_LM_MASTER;
7306 		}
7307 	}
7308 	read_unlock(&chan_list_lock);
7309 
7310 	return exact ? lm1 : lm2;
7311 }
7312 
7313 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7314  * from an existing channel in the list or from the beginning of the
7315  * global list (by passing NULL as first parameter).
7316  */
7317 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7318 						  struct hci_conn *hcon)
7319 {
7320 	u8 src_type = bdaddr_src_type(hcon);
7321 
7322 	read_lock(&chan_list_lock);
7323 
7324 	if (c)
7325 		c = list_next_entry(c, global_l);
7326 	else
7327 		c = list_entry(chan_list.next, typeof(*c), global_l);
7328 
7329 	list_for_each_entry_from(c, &chan_list, global_l) {
7330 		if (c->chan_type != L2CAP_CHAN_FIXED)
7331 			continue;
7332 		if (c->state != BT_LISTEN)
7333 			continue;
7334 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7335 			continue;
7336 		if (src_type != c->src_type)
7337 			continue;
7338 
7339 		c = l2cap_chan_hold_unless_zero(c);
7340 		read_unlock(&chan_list_lock);
7341 		return c;
7342 	}
7343 
7344 	read_unlock(&chan_list_lock);
7345 
7346 	return NULL;
7347 }
7348 
7349 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7350 {
7351 	struct hci_dev *hdev = hcon->hdev;
7352 	struct l2cap_conn *conn;
7353 	struct l2cap_chan *pchan;
7354 	u8 dst_type;
7355 
7356 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7357 		return;
7358 
7359 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7360 
7361 	if (status) {
7362 		l2cap_conn_del(hcon, bt_to_errno(status));
7363 		return;
7364 	}
7365 
7366 	conn = l2cap_conn_add(hcon);
7367 	if (!conn)
7368 		return;
7369 
7370 	dst_type = bdaddr_dst_type(hcon);
7371 
7372 	/* If device is blocked, do not create channels for it */
7373 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7374 		return;
7375 
7376 	/* Find fixed channels and notify them of the new connection. We
7377 	 * use multiple individual lookups, continuing each time where
7378 	 * we left off, because the list lock would prevent calling the
7379 	 * potentially sleeping l2cap_chan_lock() function.
7380 	 */
7381 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7382 	while (pchan) {
7383 		struct l2cap_chan *chan, *next;
7384 
7385 		/* Client fixed channels should override server ones */
7386 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7387 			goto next;
7388 
7389 		l2cap_chan_lock(pchan);
7390 		chan = pchan->ops->new_connection(pchan);
7391 		if (chan) {
7392 			bacpy(&chan->src, &hcon->src);
7393 			bacpy(&chan->dst, &hcon->dst);
7394 			chan->src_type = bdaddr_src_type(hcon);
7395 			chan->dst_type = dst_type;
7396 
7397 			__l2cap_chan_add(conn, chan);
7398 		}
7399 
7400 		l2cap_chan_unlock(pchan);
7401 next:
7402 		next = l2cap_global_fixed_chan(pchan, hcon);
7403 		l2cap_chan_put(pchan);
7404 		pchan = next;
7405 	}
7406 
7407 	l2cap_conn_ready(conn);
7408 }
7409 
7410 int l2cap_disconn_ind(struct hci_conn *hcon)
7411 {
7412 	struct l2cap_conn *conn = hcon->l2cap_data;
7413 
7414 	BT_DBG("hcon %p", hcon);
7415 
7416 	if (!conn)
7417 		return HCI_ERROR_REMOTE_USER_TERM;
7418 	return conn->disc_reason;
7419 }
7420 
7421 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7422 {
7423 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7424 		return;
7425 
7426 	BT_DBG("hcon %p reason %d", hcon, reason);
7427 
7428 	l2cap_conn_del(hcon, bt_to_errno(reason));
7429 }
7430 
7431 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7432 {
7433 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7434 		return;
7435 
7436 	if (encrypt == 0x00) {
7437 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7438 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7439 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7440 			   chan->sec_level == BT_SECURITY_FIPS)
7441 			l2cap_chan_close(chan, ECONNREFUSED);
7442 	} else {
7443 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7444 			__clear_chan_timer(chan);
7445 	}
7446 }
7447 
7448 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7449 {
7450 	struct l2cap_conn *conn = hcon->l2cap_data;
7451 	struct l2cap_chan *chan;
7452 
7453 	if (!conn)
7454 		return;
7455 
7456 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7457 
7458 	mutex_lock(&conn->lock);
7459 
7460 	list_for_each_entry(chan, &conn->chan_l, list) {
7461 		l2cap_chan_lock(chan);
7462 
7463 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7464 		       state_to_string(chan->state));
7465 
7466 		if (!status && encrypt)
7467 			chan->sec_level = hcon->sec_level;
7468 
7469 		if (!__l2cap_no_conn_pending(chan)) {
7470 			l2cap_chan_unlock(chan);
7471 			continue;
7472 		}
7473 
7474 		if (!status && (chan->state == BT_CONNECTED ||
7475 				chan->state == BT_CONFIG)) {
7476 			chan->ops->resume(chan);
7477 			l2cap_check_encryption(chan, encrypt);
7478 			l2cap_chan_unlock(chan);
7479 			continue;
7480 		}
7481 
7482 		if (chan->state == BT_CONNECT) {
7483 			if (!status && l2cap_check_enc_key_size(hcon, chan))
7484 				l2cap_start_connection(chan);
7485 			else
7486 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7487 		} else if (chan->state == BT_CONNECT2 &&
7488 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7489 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7490 			struct l2cap_conn_rsp rsp;
7491 			__u16 res, stat;
7492 
7493 			if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7494 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7495 					res = L2CAP_CR_PEND;
7496 					stat = L2CAP_CS_AUTHOR_PEND;
7497 					chan->ops->defer(chan);
7498 				} else {
7499 					l2cap_state_change(chan, BT_CONFIG);
7500 					res = L2CAP_CR_SUCCESS;
7501 					stat = L2CAP_CS_NO_INFO;
7502 				}
7503 			} else {
7504 				l2cap_state_change(chan, BT_DISCONN);
7505 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7506 				res = L2CAP_CR_SEC_BLOCK;
7507 				stat = L2CAP_CS_NO_INFO;
7508 			}
7509 
7510 			rsp.scid   = cpu_to_le16(chan->dcid);
7511 			rsp.dcid   = cpu_to_le16(chan->scid);
7512 			rsp.result = cpu_to_le16(res);
7513 			rsp.status = cpu_to_le16(stat);
7514 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7515 				       sizeof(rsp), &rsp);
7516 
7517 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7518 			    res == L2CAP_CR_SUCCESS) {
7519 				char buf[128];
7520 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7521 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7522 					       L2CAP_CONF_REQ,
7523 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7524 					       buf);
7525 				chan->num_conf_req++;
7526 			}
7527 		}
7528 
7529 		l2cap_chan_unlock(chan);
7530 	}
7531 
7532 	mutex_unlock(&conn->lock);
7533 }
7534 
7535 /* Append fragment into frame respecting the maximum len of rx_skb */
7536 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7537 			   u16 len)
7538 {
7539 	if (!conn->rx_skb) {
7540 		/* Allocate skb for the complete frame (with header) */
7541 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7542 		if (!conn->rx_skb)
7543 			return -ENOMEM;
7544 		/* Init rx_len */
7545 		conn->rx_len = len;
7546 
7547 		skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7548 				      skb->tstamp_type);
7549 	}
7550 
7551 	/* Copy as much as the rx_skb can hold */
7552 	len = min_t(u16, len, skb->len);
7553 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7554 	skb_pull(skb, len);
7555 	conn->rx_len -= len;
7556 
7557 	return len;
7558 }
7559 
7560 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7561 {
7562 	struct sk_buff *rx_skb;
7563 	int len;
7564 
7565 	/* Append just enough to complete the header */
7566 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7567 
7568 	/* If header could not be read just continue */
7569 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7570 		return len;
7571 
7572 	rx_skb = conn->rx_skb;
7573 	len = get_unaligned_le16(rx_skb->data);
7574 
7575 	/* Check if rx_skb has enough space to received all fragments */
7576 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7577 		/* Update expected len */
7578 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7579 		return L2CAP_LEN_SIZE;
7580 	}
7581 
7582 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7583 	 * fit all fragments.
7584 	 */
7585 	conn->rx_skb = NULL;
7586 
7587 	/* Reallocates rx_skb using the exact expected length */
7588 	len = l2cap_recv_frag(conn, rx_skb,
7589 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7590 	kfree_skb(rx_skb);
7591 
7592 	return len;
7593 }
7594 
7595 static void l2cap_recv_reset(struct l2cap_conn *conn)
7596 {
7597 	kfree_skb(conn->rx_skb);
7598 	conn->rx_skb = NULL;
7599 	conn->rx_len = 0;
7600 }
7601 
7602 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7603 {
7604 	if (!c)
7605 		return NULL;
7606 
7607 	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7608 
7609 	if (!kref_get_unless_zero(&c->ref))
7610 		return NULL;
7611 
7612 	return c;
7613 }
7614 
7615 int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
7616 		       struct sk_buff *skb, u16 flags)
7617 {
7618 	struct hci_conn *hcon;
7619 	struct l2cap_conn *conn;
7620 	int len;
7621 
7622 	/* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
7623 	hci_dev_lock(hdev);
7624 
7625 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
7626 	if (!hcon) {
7627 		hci_dev_unlock(hdev);
7628 		kfree_skb(skb);
7629 		return -ENOENT;
7630 	}
7631 
7632 	hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
7633 
7634 	conn = hcon->l2cap_data;
7635 
7636 	if (!conn)
7637 		conn = l2cap_conn_add(hcon);
7638 
7639 	conn = l2cap_conn_hold_unless_zero(conn);
7640 	hcon = NULL;
7641 
7642 	hci_dev_unlock(hdev);
7643 
7644 	if (!conn) {
7645 		kfree_skb(skb);
7646 		return -EINVAL;
7647 	}
7648 
7649 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7650 
7651 	mutex_lock(&conn->lock);
7652 
7653 	switch (flags) {
7654 	case ACL_START:
7655 	case ACL_START_NO_FLUSH:
7656 	case ACL_COMPLETE:
7657 		if (conn->rx_skb) {
7658 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7659 			l2cap_recv_reset(conn);
7660 			l2cap_conn_unreliable(conn, ECOMM);
7661 		}
7662 
7663 		/* Start fragment may not contain the L2CAP length so just
7664 		 * copy the initial byte when that happens and use conn->mtu as
7665 		 * expected length.
7666 		 */
7667 		if (skb->len < L2CAP_LEN_SIZE) {
7668 			l2cap_recv_frag(conn, skb, conn->mtu);
7669 			break;
7670 		}
7671 
7672 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7673 
7674 		if (len == skb->len) {
7675 			/* Complete frame received */
7676 			l2cap_recv_frame(conn, skb);
7677 			goto unlock;
7678 		}
7679 
7680 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7681 
7682 		if (skb->len > len) {
7683 			BT_ERR("Frame is too long (len %u, expected len %d)",
7684 			       skb->len, len);
7685 			/* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7686 			 * (Multiple Signaling Command in one PDU, Data
7687 			 * Truncated, BR/EDR) send a C-frame to the IUT with
7688 			 * PDU Length set to 8 and Channel ID set to the
7689 			 * correct signaling channel for the logical link.
7690 			 * The Information payload contains one L2CAP_ECHO_REQ
7691 			 * packet with Data Length set to 0 with 0 octets of
7692 			 * echo data and one invalid command packet due to
7693 			 * data truncated in PDU but present in HCI packet.
7694 			 *
7695 			 * Shorter the socket buffer to the PDU length to
7696 			 * allow to process valid commands from the PDU before
7697 			 * setting the socket unreliable.
7698 			 */
7699 			skb->len = len;
7700 			l2cap_recv_frame(conn, skb);
7701 			l2cap_conn_unreliable(conn, ECOMM);
7702 			goto unlock;
7703 		}
7704 
7705 		/* Append fragment into frame (with header) */
7706 		if (l2cap_recv_frag(conn, skb, len) < 0)
7707 			goto drop;
7708 
7709 		break;
7710 
7711 	case ACL_CONT:
7712 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7713 
7714 		if (!conn->rx_skb) {
7715 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7716 			l2cap_conn_unreliable(conn, ECOMM);
7717 			goto drop;
7718 		}
7719 
7720 		/* Complete the L2CAP length if it has not been read */
7721 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7722 			if (l2cap_recv_len(conn, skb) < 0) {
7723 				l2cap_conn_unreliable(conn, ECOMM);
7724 				goto drop;
7725 			}
7726 
7727 			/* Header still could not be read just continue */
7728 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7729 				break;
7730 		}
7731 
7732 		if (skb->len > conn->rx_len) {
7733 			BT_ERR("Fragment is too long (len %u, expected %u)",
7734 			       skb->len, conn->rx_len);
7735 			l2cap_recv_reset(conn);
7736 			l2cap_conn_unreliable(conn, ECOMM);
7737 			goto drop;
7738 		}
7739 
7740 		/* Append fragment into frame (with header) */
7741 		l2cap_recv_frag(conn, skb, skb->len);
7742 
7743 		if (!conn->rx_len) {
7744 			/* Complete frame received. l2cap_recv_frame
7745 			 * takes ownership of the skb so set the global
7746 			 * rx_skb pointer to NULL first.
7747 			 */
7748 			struct sk_buff *rx_skb = conn->rx_skb;
7749 			conn->rx_skb = NULL;
7750 			l2cap_recv_frame(conn, rx_skb);
7751 		}
7752 		break;
7753 	}
7754 
7755 drop:
7756 	kfree_skb(skb);
7757 unlock:
7758 	mutex_unlock(&conn->lock);
7759 	l2cap_conn_put(conn);
7760 	return 0;
7761 }
7762 
7763 static struct hci_cb l2cap_cb = {
7764 	.name		= "L2CAP",
7765 	.connect_cfm	= l2cap_connect_cfm,
7766 	.disconn_cfm	= l2cap_disconn_cfm,
7767 	.security_cfm	= l2cap_security_cfm,
7768 };
7769 
7770 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7771 {
7772 	struct l2cap_chan *c;
7773 
7774 	read_lock(&chan_list_lock);
7775 
7776 	list_for_each_entry(c, &chan_list, global_l) {
7777 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7778 			   &c->src, c->src_type, &c->dst, c->dst_type,
7779 			   c->state, __le16_to_cpu(c->psm),
7780 			   c->scid, c->dcid, c->imtu, c->omtu,
7781 			   c->sec_level, c->mode);
7782 	}
7783 
7784 	read_unlock(&chan_list_lock);
7785 
7786 	return 0;
7787 }
7788 
7789 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7790 
7791 static struct dentry *l2cap_debugfs;
7792 
7793 int __init l2cap_init(void)
7794 {
7795 	int err;
7796 
7797 	err = l2cap_init_sockets();
7798 	if (err < 0)
7799 		return err;
7800 
7801 	hci_register_cb(&l2cap_cb);
7802 
7803 	if (IS_ERR_OR_NULL(bt_debugfs))
7804 		return 0;
7805 
7806 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7807 					    NULL, &l2cap_debugfs_fops);
7808 
7809 	return 0;
7810 }
7811 
7812 void l2cap_exit(void)
7813 {
7814 	debugfs_remove(l2cap_debugfs);
7815 	hci_unregister_cb(&l2cap_cb);
7816 	l2cap_cleanup_sockets();
7817 }
7818 
7819 module_param(disable_ertm, bool, 0644);
7820 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7821 
7822 module_param(enable_ecred, bool, 0644);
7823 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7824