xref: /linux/net/bluetooth/l2cap_core.c (revision 25f420a0d4cfd61d3d23ec4b9c56d9f443d91377)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 
130 	return c;
131 }
132 
133 /* Find channel with given DCID.
134  * Returns a reference locked channel.
135  */
136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 						 u16 cid)
138 {
139 	struct l2cap_chan *c;
140 
141 	c = __l2cap_get_chan_by_dcid(conn, cid);
142 	if (c) {
143 		/* Only lock if chan reference is not 0 */
144 		c = l2cap_chan_hold_unless_zero(c);
145 		if (c)
146 			l2cap_chan_lock(c);
147 	}
148 
149 	return c;
150 }
151 
152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 						    u8 ident)
154 {
155 	struct l2cap_chan *c;
156 
157 	list_for_each_entry(c, &conn->chan_l, list) {
158 		if (c->ident == ident)
159 			return c;
160 	}
161 	return NULL;
162 }
163 
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 						      u8 src_type)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 			continue;
172 
173 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 			continue;
175 
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 							 chan->src_type)) {
214 				chan->psm   = cpu_to_le16(p);
215 				chan->sport = cpu_to_le16(p);
216 				err = 0;
217 				break;
218 			}
219 	}
220 
221 done:
222 	write_unlock(&chan_list_lock);
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 
227 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
228 {
229 	write_lock(&chan_list_lock);
230 
231 	/* Override the defaults (which are for conn-oriented) */
232 	chan->omtu = L2CAP_DEFAULT_MTU;
233 	chan->chan_type = L2CAP_CHAN_FIXED;
234 
235 	chan->scid = scid;
236 
237 	write_unlock(&chan_list_lock);
238 
239 	return 0;
240 }
241 
242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 	u16 cid, dyn_end;
245 
246 	if (conn->hcon->type == LE_LINK)
247 		dyn_end = L2CAP_CID_LE_DYN_END;
248 	else
249 		dyn_end = L2CAP_CID_DYN_END;
250 
251 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 		if (!__l2cap_get_chan_by_scid(conn, cid))
253 			return cid;
254 	}
255 
256 	return 0;
257 }
258 
259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 	       state_to_string(state));
263 
264 	chan->state = state;
265 	chan->ops->state_change(chan, state, 0);
266 }
267 
268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 						int state, int err)
270 {
271 	chan->state = state;
272 	chan->ops->state_change(chan, chan->state, err);
273 }
274 
275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 	chan->ops->state_change(chan, chan->state, err);
278 }
279 
280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 	if (!delayed_work_pending(&chan->monitor_timer) &&
283 	    chan->retrans_timeout) {
284 		l2cap_set_timer(chan, &chan->retrans_timer,
285 				msecs_to_jiffies(chan->retrans_timeout));
286 	}
287 }
288 
289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 	__clear_retrans_timer(chan);
292 	if (chan->monitor_timeout) {
293 		l2cap_set_timer(chan, &chan->monitor_timer,
294 				msecs_to_jiffies(chan->monitor_timeout));
295 	}
296 }
297 
298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 					       u16 seq)
300 {
301 	struct sk_buff *skb;
302 
303 	skb_queue_walk(head, skb) {
304 		if (bt_cb(skb)->l2cap.txseq == seq)
305 			return skb;
306 	}
307 
308 	return NULL;
309 }
310 
311 /* ---- L2CAP sequence number lists ---- */
312 
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314  * SREJ requests that are received and for frames that are to be
315  * retransmitted. These seq_list functions implement a singly-linked
316  * list in an array, where membership in the list can also be checked
317  * in constant time. Items can also be added to the tail of the list
318  * and removed from the head in constant time, without further memory
319  * allocs or frees.
320  */
321 
322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 	size_t alloc_size, i;
325 
326 	/* Allocated size is a power of 2 to map sequence numbers
327 	 * (which may be up to 14 bits) in to a smaller array that is
328 	 * sized for the negotiated ERTM transmit windows.
329 	 */
330 	alloc_size = roundup_pow_of_two(size);
331 
332 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 	if (!seq_list->list)
334 		return -ENOMEM;
335 
336 	seq_list->mask = alloc_size - 1;
337 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 	for (i = 0; i < alloc_size; i++)
340 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341 
342 	return 0;
343 }
344 
345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 	kfree(seq_list->list);
348 }
349 
350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 					   u16 seq)
352 {
353 	/* Constant-time check for list membership */
354 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356 
357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 	u16 seq = seq_list->head;
360 	u16 mask = seq_list->mask;
361 
362 	seq_list->head = seq_list->list[seq & mask];
363 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 	}
369 
370 	return seq;
371 }
372 
373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 	u16 i;
376 
377 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 		return;
379 
380 	for (i = 0; i <= seq_list->mask; i++)
381 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 
383 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386 
387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 	u16 mask = seq_list->mask;
390 
391 	/* All appends happen in constant time */
392 
393 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 		return;
395 
396 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 		seq_list->head = seq;
398 	else
399 		seq_list->list[seq_list->tail & mask] = seq;
400 
401 	seq_list->tail = seq;
402 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404 
405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 					       chan_timer.work);
409 	struct l2cap_conn *conn = chan->conn;
410 	int reason;
411 
412 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 
414 	if (!conn)
415 		return;
416 
417 	mutex_lock(&conn->lock);
418 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 	 * this work. No need to call l2cap_chan_hold(chan) here again.
420 	 */
421 	l2cap_chan_lock(chan);
422 
423 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 		reason = ECONNREFUSED;
425 	else if (chan->state == BT_CONNECT &&
426 		 chan->sec_level != BT_SECURITY_SDP)
427 		reason = ECONNREFUSED;
428 	else
429 		reason = ETIMEDOUT;
430 
431 	l2cap_chan_close(chan, reason);
432 
433 	chan->ops->close(chan);
434 
435 	l2cap_chan_unlock(chan);
436 	l2cap_chan_put(chan);
437 
438 	mutex_unlock(&conn->lock);
439 }
440 
441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 	struct l2cap_chan *chan;
444 
445 	chan = kzalloc_obj(*chan, GFP_ATOMIC);
446 	if (!chan)
447 		return NULL;
448 
449 	skb_queue_head_init(&chan->tx_q);
450 	skb_queue_head_init(&chan->srej_q);
451 	mutex_init(&chan->lock);
452 
453 	/* Set default lock nesting level */
454 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 
456 	/* Available receive buffer space is initially unknown */
457 	chan->rx_avail = -1;
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467 
468 	chan->state = BT_OPEN;
469 
470 	kref_init(&chan->kref);
471 
472 	/* This flag is cleared in l2cap_chan_ready() */
473 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 
475 	BT_DBG("chan %p", chan);
476 
477 	return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 
481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 
485 	BT_DBG("chan %p", chan);
486 
487 	write_lock(&chan_list_lock);
488 	list_del(&chan->global_l);
489 	write_unlock(&chan_list_lock);
490 
491 	kfree(chan);
492 }
493 
494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497 
498 	kref_get(&c->kref);
499 }
500 EXPORT_SYMBOL_GPL(l2cap_chan_hold);
501 
502 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
503 {
504 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505 
506 	if (!kref_get_unless_zero(&c->kref))
507 		return NULL;
508 
509 	return c;
510 }
511 
512 void l2cap_chan_put(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_put(&c->kref, l2cap_chan_destroy);
517 }
518 EXPORT_SYMBOL_GPL(l2cap_chan_put);
519 
520 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
521 {
522 	chan->fcs  = L2CAP_FCS_CRC16;
523 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526 	chan->remote_max_tx = chan->max_tx;
527 	chan->remote_tx_win = chan->tx_win;
528 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529 	chan->sec_level = BT_SECURITY_LOW;
530 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
533 
534 	chan->conf_state = 0;
535 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
536 
537 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
538 }
539 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
540 
541 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
542 {
543 	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
544 
545 	if (chan->mps == 0)
546 		return 0;
547 
548 	/* If we don't know the available space in the receiver buffer, give
549 	 * enough credits for a full packet.
550 	 */
551 	if (chan->rx_avail == -1)
552 		return (chan->imtu / chan->mps) + 1;
553 
554 	/* If we know how much space is available in the receive buffer, give
555 	 * out as many credits as would fill the buffer.
556 	 */
557 	if (chan->rx_avail <= sdu_len)
558 		return 0;
559 
560 	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
561 }
562 
563 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
564 {
565 	chan->sdu = NULL;
566 	chan->sdu_last_frag = NULL;
567 	chan->sdu_len = 0;
568 	chan->tx_credits = tx_credits;
569 	/* Derive MPS from connection MTU to stop HCI fragmentation */
570 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
571 	chan->rx_credits = l2cap_le_rx_credits(chan);
572 
573 	skb_queue_head_init(&chan->tx_q);
574 }
575 
576 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
577 {
578 	l2cap_le_flowctl_init(chan, tx_credits);
579 
580 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
581 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
582 		chan->mps = L2CAP_ECRED_MIN_MPS;
583 		chan->rx_credits = l2cap_le_rx_credits(chan);
584 	}
585 }
586 
587 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588 {
589 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
590 	       __le16_to_cpu(chan->psm), chan->dcid);
591 
592 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
593 
594 	chan->conn = conn;
595 
596 	switch (chan->chan_type) {
597 	case L2CAP_CHAN_CONN_ORIENTED:
598 		/* Alloc CID for connection-oriented socket */
599 		chan->scid = l2cap_alloc_cid(conn);
600 		if (conn->hcon->type == ACL_LINK)
601 			chan->omtu = L2CAP_DEFAULT_MTU;
602 		break;
603 
604 	case L2CAP_CHAN_CONN_LESS:
605 		/* Connectionless socket */
606 		chan->scid = L2CAP_CID_CONN_LESS;
607 		chan->dcid = L2CAP_CID_CONN_LESS;
608 		chan->omtu = L2CAP_DEFAULT_MTU;
609 		break;
610 
611 	case L2CAP_CHAN_FIXED:
612 		/* Caller will set CID and CID specific MTU values */
613 		break;
614 
615 	default:
616 		/* Raw socket can send/recv signalling messages only */
617 		chan->scid = L2CAP_CID_SIGNALING;
618 		chan->dcid = L2CAP_CID_SIGNALING;
619 		chan->omtu = L2CAP_DEFAULT_MTU;
620 	}
621 
622 	chan->local_id		= L2CAP_BESTEFFORT_ID;
623 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
624 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
625 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
626 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
627 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
628 
629 	l2cap_chan_hold(chan);
630 
631 	/* Only keep a reference for fixed channels if they requested it */
632 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
633 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
634 		hci_conn_hold(conn->hcon);
635 
636 	/* Append to the list since the order matters for ECRED */
637 	list_add_tail(&chan->list, &conn->chan_l);
638 }
639 
640 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
641 {
642 	mutex_lock(&conn->lock);
643 	__l2cap_chan_add(conn, chan);
644 	mutex_unlock(&conn->lock);
645 }
646 
647 void l2cap_chan_del(struct l2cap_chan *chan, int err)
648 {
649 	struct l2cap_conn *conn = chan->conn;
650 
651 	__clear_chan_timer(chan);
652 
653 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
654 	       state_to_string(chan->state));
655 
656 	chan->ops->teardown(chan, err);
657 
658 	if (conn) {
659 		/* Delete from channel list */
660 		list_del(&chan->list);
661 
662 		l2cap_chan_put(chan);
663 
664 		chan->conn = NULL;
665 
666 		/* Reference was only held for non-fixed channels or
667 		 * fixed channels that explicitly requested it using the
668 		 * FLAG_HOLD_HCI_CONN flag.
669 		 */
670 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
671 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
672 			hci_conn_drop(conn->hcon);
673 	}
674 
675 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 		return;
677 
678 	switch (chan->mode) {
679 	case L2CAP_MODE_BASIC:
680 		break;
681 
682 	case L2CAP_MODE_LE_FLOWCTL:
683 	case L2CAP_MODE_EXT_FLOWCTL:
684 		skb_queue_purge(&chan->tx_q);
685 		break;
686 
687 	case L2CAP_MODE_ERTM:
688 		__clear_retrans_timer(chan);
689 		__clear_monitor_timer(chan);
690 		__clear_ack_timer(chan);
691 
692 		skb_queue_purge(&chan->srej_q);
693 
694 		l2cap_seq_list_free(&chan->srej_list);
695 		l2cap_seq_list_free(&chan->retrans_list);
696 		fallthrough;
697 
698 	case L2CAP_MODE_STREAMING:
699 		skb_queue_purge(&chan->tx_q);
700 		break;
701 	}
702 }
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
704 
705 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
706 				 l2cap_chan_func_t func, void *data)
707 {
708 	struct l2cap_chan *chan, *l;
709 
710 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
711 		if (chan->ident == id)
712 			func(chan, data);
713 	}
714 }
715 
716 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
717 			      void *data)
718 {
719 	struct l2cap_chan *chan;
720 
721 	list_for_each_entry(chan, &conn->chan_l, list) {
722 		func(chan, data);
723 	}
724 }
725 
726 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
727 		     void *data)
728 {
729 	if (!conn)
730 		return;
731 
732 	mutex_lock(&conn->lock);
733 	__l2cap_chan_list(conn, func, data);
734 	mutex_unlock(&conn->lock);
735 }
736 
737 EXPORT_SYMBOL_GPL(l2cap_chan_list);
738 
739 static void l2cap_conn_update_id_addr(struct work_struct *work)
740 {
741 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
742 					       id_addr_timer.work);
743 	struct hci_conn *hcon = conn->hcon;
744 	struct l2cap_chan *chan;
745 
746 	mutex_lock(&conn->lock);
747 
748 	list_for_each_entry(chan, &conn->chan_l, list) {
749 		l2cap_chan_lock(chan);
750 		bacpy(&chan->dst, &hcon->dst);
751 		chan->dst_type = bdaddr_dst_type(hcon);
752 		l2cap_chan_unlock(chan);
753 	}
754 
755 	mutex_unlock(&conn->lock);
756 }
757 
758 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
759 {
760 	struct l2cap_conn *conn = chan->conn;
761 	struct l2cap_le_conn_rsp rsp;
762 	u16 result;
763 
764 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
765 		result = L2CAP_CR_LE_AUTHORIZATION;
766 	else
767 		result = L2CAP_CR_LE_BAD_PSM;
768 
769 	l2cap_state_change(chan, BT_DISCONN);
770 
771 	rsp.dcid    = cpu_to_le16(chan->scid);
772 	rsp.mtu     = cpu_to_le16(chan->imtu);
773 	rsp.mps     = cpu_to_le16(chan->mps);
774 	rsp.credits = cpu_to_le16(chan->rx_credits);
775 	rsp.result  = cpu_to_le16(result);
776 
777 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
778 		       &rsp);
779 }
780 
781 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
782 {
783 	l2cap_state_change(chan, BT_DISCONN);
784 
785 	__l2cap_ecred_conn_rsp_defer(chan);
786 }
787 
788 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
789 {
790 	struct l2cap_conn *conn = chan->conn;
791 	struct l2cap_conn_rsp rsp;
792 	u16 result;
793 
794 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
795 		result = L2CAP_CR_SEC_BLOCK;
796 	else
797 		result = L2CAP_CR_BAD_PSM;
798 
799 	l2cap_state_change(chan, BT_DISCONN);
800 
801 	rsp.scid   = cpu_to_le16(chan->dcid);
802 	rsp.dcid   = cpu_to_le16(chan->scid);
803 	rsp.result = cpu_to_le16(result);
804 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 
806 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
807 }
808 
809 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
810 {
811 	struct l2cap_conn *conn = chan->conn;
812 
813 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
814 
815 	switch (chan->state) {
816 	case BT_LISTEN:
817 		chan->ops->teardown(chan, 0);
818 		break;
819 
820 	case BT_CONNECTED:
821 	case BT_CONFIG:
822 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
823 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
824 			l2cap_send_disconn_req(chan, reason);
825 		} else
826 			l2cap_chan_del(chan, reason);
827 		break;
828 
829 	case BT_CONNECT2:
830 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
831 			if (conn->hcon->type == ACL_LINK)
832 				l2cap_chan_connect_reject(chan);
833 			else if (conn->hcon->type == LE_LINK) {
834 				switch (chan->mode) {
835 				case L2CAP_MODE_LE_FLOWCTL:
836 					l2cap_chan_le_connect_reject(chan);
837 					break;
838 				case L2CAP_MODE_EXT_FLOWCTL:
839 					l2cap_chan_ecred_connect_reject(chan);
840 					return;
841 				}
842 			}
843 		}
844 
845 		l2cap_chan_del(chan, reason);
846 		break;
847 
848 	case BT_CONNECT:
849 	case BT_DISCONN:
850 		l2cap_chan_del(chan, reason);
851 		break;
852 
853 	default:
854 		chan->ops->teardown(chan, 0);
855 		break;
856 	}
857 }
858 EXPORT_SYMBOL(l2cap_chan_close);
859 
860 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
861 {
862 	switch (chan->chan_type) {
863 	case L2CAP_CHAN_RAW:
864 		switch (chan->sec_level) {
865 		case BT_SECURITY_HIGH:
866 		case BT_SECURITY_FIPS:
867 			return HCI_AT_DEDICATED_BONDING_MITM;
868 		case BT_SECURITY_MEDIUM:
869 			return HCI_AT_DEDICATED_BONDING;
870 		default:
871 			return HCI_AT_NO_BONDING;
872 		}
873 		break;
874 	case L2CAP_CHAN_CONN_LESS:
875 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
876 			if (chan->sec_level == BT_SECURITY_LOW)
877 				chan->sec_level = BT_SECURITY_SDP;
878 		}
879 		if (chan->sec_level == BT_SECURITY_HIGH ||
880 		    chan->sec_level == BT_SECURITY_FIPS)
881 			return HCI_AT_NO_BONDING_MITM;
882 		else
883 			return HCI_AT_NO_BONDING;
884 		break;
885 	case L2CAP_CHAN_CONN_ORIENTED:
886 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
887 			if (chan->sec_level == BT_SECURITY_LOW)
888 				chan->sec_level = BT_SECURITY_SDP;
889 
890 			if (chan->sec_level == BT_SECURITY_HIGH ||
891 			    chan->sec_level == BT_SECURITY_FIPS)
892 				return HCI_AT_NO_BONDING_MITM;
893 			else
894 				return HCI_AT_NO_BONDING;
895 		}
896 		fallthrough;
897 
898 	default:
899 		switch (chan->sec_level) {
900 		case BT_SECURITY_HIGH:
901 		case BT_SECURITY_FIPS:
902 			return HCI_AT_GENERAL_BONDING_MITM;
903 		case BT_SECURITY_MEDIUM:
904 			return HCI_AT_GENERAL_BONDING;
905 		default:
906 			return HCI_AT_NO_BONDING;
907 		}
908 		break;
909 	}
910 }
911 
912 /* Service level security */
913 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
914 {
915 	struct l2cap_conn *conn = chan->conn;
916 	__u8 auth_type;
917 
918 	if (conn->hcon->type == LE_LINK)
919 		return smp_conn_security(conn->hcon, chan->sec_level);
920 
921 	auth_type = l2cap_get_auth_type(chan);
922 
923 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
924 				 initiator);
925 }
926 
927 static int l2cap_get_ident(struct l2cap_conn *conn)
928 {
929 	u8 max;
930 	int ident;
931 
932 	/* LE link does not support tools like l2ping so use the full range */
933 	if (conn->hcon->type == LE_LINK)
934 		max = 255;
935 	/* Get next available identificator.
936 	 *    1 - 128 are used by kernel.
937 	 *  129 - 199 are reserved.
938 	 *  200 - 254 are used by utilities like l2ping, etc.
939 	 */
940 	else
941 		max = 128;
942 
943 	/* Allocate ident using min as last used + 1 (cyclic) */
944 	ident = ida_alloc_range(&conn->tx_ida, READ_ONCE(conn->tx_ident) + 1,
945 				max, GFP_ATOMIC);
946 	/* Force min 1 to start over */
947 	if (ident <= 0) {
948 		ident = ida_alloc_range(&conn->tx_ida, 1, max, GFP_ATOMIC);
949 		if (ident <= 0) {
950 			/* If all idents are in use, log an error, this is
951 			 * extremely unlikely to happen and would indicate a bug
952 			 * in the code that idents are not being freed properly.
953 			 */
954 			BT_ERR("Unable to allocate ident: %d", ident);
955 			return 0;
956 		}
957 	}
958 
959 	WRITE_ONCE(conn->tx_ident, ident);
960 
961 	return ident;
962 }
963 
964 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
965 			   u8 flags)
966 {
967 	/* Check if the hcon still valid before attempting to send */
968 	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
969 		hci_send_acl(conn->hchan, skb, flags);
970 	else
971 		kfree_skb(skb);
972 }
973 
974 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
975 			   void *data)
976 {
977 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
978 	u8 flags;
979 
980 	BT_DBG("code 0x%2.2x", code);
981 
982 	if (!skb)
983 		return;
984 
985 	/* Use NO_FLUSH if supported or we have an LE link (which does
986 	 * not support auto-flushing packets) */
987 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
988 	    conn->hcon->type == LE_LINK)
989 		flags = ACL_START_NO_FLUSH;
990 	else
991 		flags = ACL_START;
992 
993 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
994 	skb->priority = HCI_PRIO_MAX;
995 
996 	l2cap_send_acl(conn, skb, flags);
997 }
998 
999 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1000 {
1001 	struct hci_conn *hcon = chan->conn->hcon;
1002 	u16 flags;
1003 
1004 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
1005 	       skb->priority);
1006 
1007 	/* Use NO_FLUSH for LE links (where this is the only option) or
1008 	 * if the BR/EDR link supports it and flushing has not been
1009 	 * explicitly requested (through FLAG_FLUSHABLE).
1010 	 */
1011 	if (hcon->type == LE_LINK ||
1012 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1013 	     lmp_no_flush_capable(hcon->hdev)))
1014 		flags = ACL_START_NO_FLUSH;
1015 	else
1016 		flags = ACL_START;
1017 
1018 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1019 	hci_send_acl(chan->conn->hchan, skb, flags);
1020 }
1021 
1022 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1023 {
1024 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1025 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1026 
1027 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1028 		/* S-Frame */
1029 		control->sframe = 1;
1030 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1031 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1032 
1033 		control->sar = 0;
1034 		control->txseq = 0;
1035 	} else {
1036 		/* I-Frame */
1037 		control->sframe = 0;
1038 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1039 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1040 
1041 		control->poll = 0;
1042 		control->super = 0;
1043 	}
1044 }
1045 
1046 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1047 {
1048 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1049 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1050 
1051 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1052 		/* S-Frame */
1053 		control->sframe = 1;
1054 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1055 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1056 
1057 		control->sar = 0;
1058 		control->txseq = 0;
1059 	} else {
1060 		/* I-Frame */
1061 		control->sframe = 0;
1062 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1063 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1064 
1065 		control->poll = 0;
1066 		control->super = 0;
1067 	}
1068 }
1069 
1070 static inline void __unpack_control(struct l2cap_chan *chan,
1071 				    struct sk_buff *skb)
1072 {
1073 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1074 		__unpack_extended_control(get_unaligned_le32(skb->data),
1075 					  &bt_cb(skb)->l2cap);
1076 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1077 	} else {
1078 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1079 					  &bt_cb(skb)->l2cap);
1080 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1081 	}
1082 }
1083 
1084 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1085 {
1086 	u32 packed;
1087 
1088 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1089 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1090 
1091 	if (control->sframe) {
1092 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1093 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1094 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1095 	} else {
1096 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1097 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1098 	}
1099 
1100 	return packed;
1101 }
1102 
1103 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1104 {
1105 	u16 packed;
1106 
1107 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1108 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1109 
1110 	if (control->sframe) {
1111 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1112 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1113 		packed |= L2CAP_CTRL_FRAME_TYPE;
1114 	} else {
1115 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1116 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1117 	}
1118 
1119 	return packed;
1120 }
1121 
1122 static inline void __pack_control(struct l2cap_chan *chan,
1123 				  struct l2cap_ctrl *control,
1124 				  struct sk_buff *skb)
1125 {
1126 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1127 		put_unaligned_le32(__pack_extended_control(control),
1128 				   skb->data + L2CAP_HDR_SIZE);
1129 	} else {
1130 		put_unaligned_le16(__pack_enhanced_control(control),
1131 				   skb->data + L2CAP_HDR_SIZE);
1132 	}
1133 }
1134 
1135 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1136 {
1137 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1138 		return L2CAP_EXT_HDR_SIZE;
1139 	else
1140 		return L2CAP_ENH_HDR_SIZE;
1141 }
1142 
1143 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1144 					       u32 control)
1145 {
1146 	struct sk_buff *skb;
1147 	struct l2cap_hdr *lh;
1148 	int hlen = __ertm_hdr_size(chan);
1149 
1150 	if (chan->fcs == L2CAP_FCS_CRC16)
1151 		hlen += L2CAP_FCS_SIZE;
1152 
1153 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1154 
1155 	if (!skb)
1156 		return ERR_PTR(-ENOMEM);
1157 
1158 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1159 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1160 	lh->cid = cpu_to_le16(chan->dcid);
1161 
1162 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1163 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1164 	else
1165 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1166 
1167 	if (chan->fcs == L2CAP_FCS_CRC16) {
1168 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1169 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1170 	}
1171 
1172 	skb->priority = HCI_PRIO_MAX;
1173 	return skb;
1174 }
1175 
1176 static void l2cap_send_sframe(struct l2cap_chan *chan,
1177 			      struct l2cap_ctrl *control)
1178 {
1179 	struct sk_buff *skb;
1180 	u32 control_field;
1181 
1182 	BT_DBG("chan %p, control %p", chan, control);
1183 
1184 	if (!control->sframe)
1185 		return;
1186 
1187 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1188 	    !control->poll)
1189 		control->final = 1;
1190 
1191 	if (control->super == L2CAP_SUPER_RR)
1192 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1193 	else if (control->super == L2CAP_SUPER_RNR)
1194 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1195 
1196 	if (control->super != L2CAP_SUPER_SREJ) {
1197 		chan->last_acked_seq = control->reqseq;
1198 		__clear_ack_timer(chan);
1199 	}
1200 
1201 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1202 	       control->final, control->poll, control->super);
1203 
1204 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1205 		control_field = __pack_extended_control(control);
1206 	else
1207 		control_field = __pack_enhanced_control(control);
1208 
1209 	skb = l2cap_create_sframe_pdu(chan, control_field);
1210 	if (!IS_ERR(skb))
1211 		l2cap_do_send(chan, skb);
1212 }
1213 
1214 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1215 {
1216 	struct l2cap_ctrl control;
1217 
1218 	BT_DBG("chan %p, poll %d", chan, poll);
1219 
1220 	memset(&control, 0, sizeof(control));
1221 	control.sframe = 1;
1222 	control.poll = poll;
1223 
1224 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1225 		control.super = L2CAP_SUPER_RNR;
1226 	else
1227 		control.super = L2CAP_SUPER_RR;
1228 
1229 	control.reqseq = chan->buffer_seq;
1230 	l2cap_send_sframe(chan, &control);
1231 }
1232 
1233 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1234 {
1235 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1236 		return true;
1237 
1238 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1239 }
1240 
1241 void l2cap_send_conn_req(struct l2cap_chan *chan)
1242 {
1243 	struct l2cap_conn *conn = chan->conn;
1244 	struct l2cap_conn_req req;
1245 
1246 	req.scid = cpu_to_le16(chan->scid);
1247 	req.psm  = chan->psm;
1248 
1249 	chan->ident = l2cap_get_ident(conn);
1250 
1251 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1252 
1253 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1254 }
1255 
1256 static void l2cap_chan_ready(struct l2cap_chan *chan)
1257 {
1258 	/* The channel may have already been flagged as connected in
1259 	 * case of receiving data before the L2CAP info req/rsp
1260 	 * procedure is complete.
1261 	 */
1262 	if (chan->state == BT_CONNECTED)
1263 		return;
1264 
1265 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1266 	chan->conf_state = 0;
1267 	__clear_chan_timer(chan);
1268 
1269 	switch (chan->mode) {
1270 	case L2CAP_MODE_LE_FLOWCTL:
1271 	case L2CAP_MODE_EXT_FLOWCTL:
1272 		if (!chan->tx_credits)
1273 			chan->ops->suspend(chan);
1274 		break;
1275 	}
1276 
1277 	chan->state = BT_CONNECTED;
1278 
1279 	chan->ops->ready(chan);
1280 }
1281 
1282 static void l2cap_le_connect(struct l2cap_chan *chan)
1283 {
1284 	struct l2cap_conn *conn = chan->conn;
1285 	struct l2cap_le_conn_req req;
1286 
1287 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1288 		return;
1289 
1290 	if (!chan->imtu)
1291 		chan->imtu = chan->conn->mtu;
1292 
1293 	l2cap_le_flowctl_init(chan, 0);
1294 
1295 	memset(&req, 0, sizeof(req));
1296 	req.psm     = chan->psm;
1297 	req.scid    = cpu_to_le16(chan->scid);
1298 	req.mtu     = cpu_to_le16(chan->imtu);
1299 	req.mps     = cpu_to_le16(chan->mps);
1300 	req.credits = cpu_to_le16(chan->rx_credits);
1301 
1302 	chan->ident = l2cap_get_ident(conn);
1303 
1304 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1305 		       sizeof(req), &req);
1306 }
1307 
1308 struct l2cap_ecred_conn_data {
1309 	struct {
1310 		struct l2cap_ecred_conn_req_hdr req;
1311 		__le16 scid[5];
1312 	} __packed pdu;
1313 	struct l2cap_chan *chan;
1314 	struct pid *pid;
1315 	int count;
1316 };
1317 
1318 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1319 {
1320 	struct l2cap_ecred_conn_data *conn = data;
1321 	struct pid *pid;
1322 
1323 	if (chan == conn->chan)
1324 		return;
1325 
1326 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1327 		return;
1328 
1329 	pid = chan->ops->get_peer_pid(chan);
1330 
1331 	/* Only add deferred channels with the same PID/PSM */
1332 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1333 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1334 		return;
1335 
1336 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1337 		return;
1338 
1339 	l2cap_ecred_init(chan, 0);
1340 
1341 	/* Set the same ident so we can match on the rsp */
1342 	chan->ident = conn->chan->ident;
1343 
1344 	/* Include all channels deferred */
1345 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1346 
1347 	conn->count++;
1348 }
1349 
1350 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1351 {
1352 	struct l2cap_conn *conn = chan->conn;
1353 	struct l2cap_ecred_conn_data data;
1354 
1355 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1356 		return;
1357 
1358 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1359 		return;
1360 
1361 	l2cap_ecred_init(chan, 0);
1362 
1363 	memset(&data, 0, sizeof(data));
1364 	data.pdu.req.psm     = chan->psm;
1365 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1366 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1367 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1368 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1369 
1370 	chan->ident = l2cap_get_ident(conn);
1371 
1372 	data.count = 1;
1373 	data.chan = chan;
1374 	data.pid = chan->ops->get_peer_pid(chan);
1375 
1376 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1377 
1378 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1379 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1380 		       &data.pdu);
1381 }
1382 
1383 static void l2cap_le_start(struct l2cap_chan *chan)
1384 {
1385 	struct l2cap_conn *conn = chan->conn;
1386 
1387 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1388 		return;
1389 
1390 	if (!chan->psm) {
1391 		l2cap_chan_ready(chan);
1392 		return;
1393 	}
1394 
1395 	if (chan->state == BT_CONNECT) {
1396 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1397 			l2cap_ecred_connect(chan);
1398 		else
1399 			l2cap_le_connect(chan);
1400 	}
1401 }
1402 
1403 static void l2cap_start_connection(struct l2cap_chan *chan)
1404 {
1405 	if (chan->conn->hcon->type == LE_LINK) {
1406 		l2cap_le_start(chan);
1407 	} else {
1408 		l2cap_send_conn_req(chan);
1409 	}
1410 }
1411 
1412 static void l2cap_request_info(struct l2cap_conn *conn)
1413 {
1414 	struct l2cap_info_req req;
1415 
1416 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1417 		return;
1418 
1419 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1420 
1421 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1422 	conn->info_ident = l2cap_get_ident(conn);
1423 
1424 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1425 
1426 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1427 		       sizeof(req), &req);
1428 }
1429 
1430 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1431 				     struct l2cap_chan *chan)
1432 {
1433 	/* The minimum encryption key size needs to be enforced by the
1434 	 * host stack before establishing any L2CAP connections. The
1435 	 * specification in theory allows a minimum of 1, but to align
1436 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1437 	 *
1438 	 * This check might also be called for unencrypted connections
1439 	 * that have no key size requirements. Ensure that the link is
1440 	 * actually encrypted before enforcing a key size.
1441 	 */
1442 	int min_key_size = hcon->hdev->min_enc_key_size;
1443 
1444 	/* On FIPS security level, key size must be 16 bytes */
1445 	if (chan->sec_level == BT_SECURITY_FIPS)
1446 		min_key_size = 16;
1447 
1448 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1449 		hcon->enc_key_size >= min_key_size);
1450 }
1451 
1452 static void l2cap_do_start(struct l2cap_chan *chan)
1453 {
1454 	struct l2cap_conn *conn = chan->conn;
1455 
1456 	if (conn->hcon->type == LE_LINK) {
1457 		l2cap_le_start(chan);
1458 		return;
1459 	}
1460 
1461 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1462 		l2cap_request_info(conn);
1463 		return;
1464 	}
1465 
1466 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1467 		return;
1468 
1469 	if (!l2cap_chan_check_security(chan, true) ||
1470 	    !__l2cap_no_conn_pending(chan))
1471 		return;
1472 
1473 	if (l2cap_check_enc_key_size(conn->hcon, chan))
1474 		l2cap_start_connection(chan);
1475 	else
1476 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1477 }
1478 
1479 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1480 {
1481 	u32 local_feat_mask = l2cap_feat_mask;
1482 	if (!disable_ertm)
1483 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1484 
1485 	switch (mode) {
1486 	case L2CAP_MODE_ERTM:
1487 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1488 	case L2CAP_MODE_STREAMING:
1489 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1490 	default:
1491 		return 0x00;
1492 	}
1493 }
1494 
1495 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1496 {
1497 	struct l2cap_conn *conn = chan->conn;
1498 	struct l2cap_disconn_req req;
1499 
1500 	if (!conn)
1501 		return;
1502 
1503 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1504 		__clear_retrans_timer(chan);
1505 		__clear_monitor_timer(chan);
1506 		__clear_ack_timer(chan);
1507 	}
1508 
1509 	req.dcid = cpu_to_le16(chan->dcid);
1510 	req.scid = cpu_to_le16(chan->scid);
1511 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1512 		       sizeof(req), &req);
1513 
1514 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1515 }
1516 
1517 /* ---- L2CAP connections ---- */
1518 static void l2cap_conn_start(struct l2cap_conn *conn)
1519 {
1520 	struct l2cap_chan *chan, *tmp;
1521 
1522 	BT_DBG("conn %p", conn);
1523 
1524 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1525 		l2cap_chan_lock(chan);
1526 
1527 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 			l2cap_chan_ready(chan);
1529 			l2cap_chan_unlock(chan);
1530 			continue;
1531 		}
1532 
1533 		if (chan->state == BT_CONNECT) {
1534 			if (!l2cap_chan_check_security(chan, true) ||
1535 			    !__l2cap_no_conn_pending(chan)) {
1536 				l2cap_chan_unlock(chan);
1537 				continue;
1538 			}
1539 
1540 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1541 			    && test_bit(CONF_STATE2_DEVICE,
1542 					&chan->conf_state)) {
1543 				l2cap_chan_close(chan, ECONNRESET);
1544 				l2cap_chan_unlock(chan);
1545 				continue;
1546 			}
1547 
1548 			if (l2cap_check_enc_key_size(conn->hcon, chan))
1549 				l2cap_start_connection(chan);
1550 			else
1551 				l2cap_chan_close(chan, ECONNREFUSED);
1552 
1553 		} else if (chan->state == BT_CONNECT2) {
1554 			struct l2cap_conn_rsp rsp;
1555 			char buf[128];
1556 			rsp.scid = cpu_to_le16(chan->dcid);
1557 			rsp.dcid = cpu_to_le16(chan->scid);
1558 
1559 			if (l2cap_chan_check_security(chan, false)) {
1560 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1561 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1562 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1563 					chan->ops->defer(chan);
1564 
1565 				} else {
1566 					l2cap_state_change(chan, BT_CONFIG);
1567 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1568 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1569 				}
1570 			} else {
1571 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1572 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1573 			}
1574 
1575 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1576 				       sizeof(rsp), &rsp);
1577 
1578 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1579 			    rsp.result != L2CAP_CR_SUCCESS) {
1580 				l2cap_chan_unlock(chan);
1581 				continue;
1582 			}
1583 
1584 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1585 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1586 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1587 			chan->num_conf_req++;
1588 		}
1589 
1590 		l2cap_chan_unlock(chan);
1591 	}
1592 }
1593 
1594 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1595 {
1596 	struct hci_conn *hcon = conn->hcon;
1597 	struct hci_dev *hdev = hcon->hdev;
1598 
1599 	BT_DBG("%s conn %p", hdev->name, conn);
1600 
1601 	/* For outgoing pairing which doesn't necessarily have an
1602 	 * associated socket (e.g. mgmt_pair_device).
1603 	 */
1604 	if (hcon->out)
1605 		smp_conn_security(hcon, hcon->pending_sec_level);
1606 
1607 	/* For LE peripheral connections, make sure the connection interval
1608 	 * is in the range of the minimum and maximum interval that has
1609 	 * been configured for this connection. If not, then trigger
1610 	 * the connection update procedure.
1611 	 */
1612 	if (hcon->role == HCI_ROLE_SLAVE &&
1613 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1614 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1615 		struct l2cap_conn_param_update_req req;
1616 
1617 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1618 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1619 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1620 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1621 
1622 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1623 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1624 	}
1625 }
1626 
1627 static void l2cap_conn_ready(struct l2cap_conn *conn)
1628 {
1629 	struct l2cap_chan *chan;
1630 	struct hci_conn *hcon = conn->hcon;
1631 
1632 	BT_DBG("conn %p", conn);
1633 
1634 	if (hcon->type == ACL_LINK)
1635 		l2cap_request_info(conn);
1636 
1637 	mutex_lock(&conn->lock);
1638 
1639 	list_for_each_entry(chan, &conn->chan_l, list) {
1640 
1641 		l2cap_chan_lock(chan);
1642 
1643 		if (hcon->type == LE_LINK) {
1644 			l2cap_le_start(chan);
1645 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1646 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1647 				l2cap_chan_ready(chan);
1648 		} else if (chan->state == BT_CONNECT) {
1649 			l2cap_do_start(chan);
1650 		}
1651 
1652 		l2cap_chan_unlock(chan);
1653 	}
1654 
1655 	mutex_unlock(&conn->lock);
1656 
1657 	if (hcon->type == LE_LINK)
1658 		l2cap_le_conn_ready(conn);
1659 
1660 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1661 }
1662 
1663 /* Notify sockets that we cannot guaranty reliability anymore */
1664 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1665 {
1666 	struct l2cap_chan *chan;
1667 
1668 	BT_DBG("conn %p", conn);
1669 
1670 	list_for_each_entry(chan, &conn->chan_l, list) {
1671 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1672 			l2cap_chan_set_err(chan, err);
1673 	}
1674 }
1675 
1676 static void l2cap_info_timeout(struct work_struct *work)
1677 {
1678 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1679 					       info_timer.work);
1680 
1681 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1682 	conn->info_ident = 0;
1683 
1684 	mutex_lock(&conn->lock);
1685 	l2cap_conn_start(conn);
1686 	mutex_unlock(&conn->lock);
1687 }
1688 
1689 /*
1690  * l2cap_user
1691  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1692  * callback is called during registration. The ->remove callback is called
1693  * during unregistration.
1694  * An l2cap_user object can either be explicitly unregistered or when the
1695  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1696  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1697  * External modules must own a reference to the l2cap_conn object if they intend
1698  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1699  * any time if they don't.
1700  */
1701 
1702 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1703 {
1704 	int ret;
1705 
1706 	/* We need to check whether l2cap_conn is registered. If it is not, we
1707 	 * must not register the l2cap_user. l2cap_conn_del() unregisters
1708 	 * l2cap_conn objects under conn->lock, and we use the same lock here
1709 	 * to protect access to conn->users and conn->hchan.
1710 	 */
1711 
1712 	mutex_lock(&conn->lock);
1713 
1714 	if (!list_empty(&user->list)) {
1715 		ret = -EINVAL;
1716 		goto out_unlock;
1717 	}
1718 
1719 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1720 	if (!conn->hchan) {
1721 		ret = -ENODEV;
1722 		goto out_unlock;
1723 	}
1724 
1725 	ret = user->probe(conn, user);
1726 	if (ret)
1727 		goto out_unlock;
1728 
1729 	list_add(&user->list, &conn->users);
1730 	ret = 0;
1731 
1732 out_unlock:
1733 	mutex_unlock(&conn->lock);
1734 	return ret;
1735 }
1736 EXPORT_SYMBOL(l2cap_register_user);
1737 
1738 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1739 {
1740 	mutex_lock(&conn->lock);
1741 
1742 	if (list_empty(&user->list))
1743 		goto out_unlock;
1744 
1745 	list_del_init(&user->list);
1746 	user->remove(conn, user);
1747 
1748 out_unlock:
1749 	mutex_unlock(&conn->lock);
1750 }
1751 EXPORT_SYMBOL(l2cap_unregister_user);
1752 
1753 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1754 {
1755 	struct l2cap_user *user;
1756 
1757 	while (!list_empty(&conn->users)) {
1758 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1759 		list_del_init(&user->list);
1760 		user->remove(conn, user);
1761 	}
1762 }
1763 
1764 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1765 {
1766 	struct l2cap_conn *conn = hcon->l2cap_data;
1767 	struct l2cap_chan *chan, *l;
1768 
1769 	if (!conn)
1770 		return;
1771 
1772 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1773 
1774 	disable_delayed_work_sync(&conn->info_timer);
1775 	disable_delayed_work_sync(&conn->id_addr_timer);
1776 
1777 	mutex_lock(&conn->lock);
1778 
1779 	kfree_skb(conn->rx_skb);
1780 
1781 	skb_queue_purge(&conn->pending_rx);
1782 
1783 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1784 	 * might block if we are running on a worker from the same workqueue
1785 	 * pending_rx_work is waiting on.
1786 	 */
1787 	if (work_pending(&conn->pending_rx_work))
1788 		cancel_work_sync(&conn->pending_rx_work);
1789 
1790 	ida_destroy(&conn->tx_ida);
1791 
1792 	l2cap_unregister_all_users(conn);
1793 
1794 	/* Force the connection to be immediately dropped */
1795 	hcon->disc_timeout = 0;
1796 
1797 	/* Kill channels */
1798 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1799 		l2cap_chan_hold(chan);
1800 		l2cap_chan_lock(chan);
1801 
1802 		l2cap_chan_del(chan, err);
1803 
1804 		chan->ops->close(chan);
1805 
1806 		l2cap_chan_unlock(chan);
1807 		l2cap_chan_put(chan);
1808 	}
1809 
1810 	hci_chan_del(conn->hchan);
1811 	conn->hchan = NULL;
1812 
1813 	hcon->l2cap_data = NULL;
1814 	mutex_unlock(&conn->lock);
1815 	l2cap_conn_put(conn);
1816 }
1817 
1818 static void l2cap_conn_free(struct kref *ref)
1819 {
1820 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1821 
1822 	hci_conn_put(conn->hcon);
1823 	kfree(conn);
1824 }
1825 
1826 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1827 {
1828 	kref_get(&conn->ref);
1829 	return conn;
1830 }
1831 EXPORT_SYMBOL(l2cap_conn_get);
1832 
1833 void l2cap_conn_put(struct l2cap_conn *conn)
1834 {
1835 	kref_put(&conn->ref, l2cap_conn_free);
1836 }
1837 EXPORT_SYMBOL(l2cap_conn_put);
1838 
1839 /* ---- Socket interface ---- */
1840 
1841 /* Find socket with psm and source / destination bdaddr.
1842  * Returns closest match.
1843  */
1844 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1845 						   bdaddr_t *src,
1846 						   bdaddr_t *dst,
1847 						   u8 link_type)
1848 {
1849 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1850 
1851 	read_lock(&chan_list_lock);
1852 
1853 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1854 		if (state && c->state != state)
1855 			continue;
1856 
1857 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1858 			continue;
1859 
1860 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1861 			continue;
1862 
1863 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1864 			int src_match, dst_match;
1865 			int src_any, dst_any;
1866 
1867 			/* Exact match. */
1868 			src_match = !bacmp(&c->src, src);
1869 			dst_match = !bacmp(&c->dst, dst);
1870 			if (src_match && dst_match) {
1871 				if (!l2cap_chan_hold_unless_zero(c))
1872 					continue;
1873 
1874 				read_unlock(&chan_list_lock);
1875 				return c;
1876 			}
1877 
1878 			/* Closest match */
1879 			src_any = !bacmp(&c->src, BDADDR_ANY);
1880 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1881 			if ((src_match && dst_any) || (src_any && dst_match) ||
1882 			    (src_any && dst_any))
1883 				c1 = c;
1884 		}
1885 	}
1886 
1887 	if (c1)
1888 		c1 = l2cap_chan_hold_unless_zero(c1);
1889 
1890 	read_unlock(&chan_list_lock);
1891 
1892 	return c1;
1893 }
1894 
1895 static void l2cap_monitor_timeout(struct work_struct *work)
1896 {
1897 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1898 					       monitor_timer.work);
1899 
1900 	BT_DBG("chan %p", chan);
1901 
1902 	l2cap_chan_lock(chan);
1903 
1904 	if (!chan->conn) {
1905 		l2cap_chan_unlock(chan);
1906 		l2cap_chan_put(chan);
1907 		return;
1908 	}
1909 
1910 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1911 
1912 	l2cap_chan_unlock(chan);
1913 	l2cap_chan_put(chan);
1914 }
1915 
1916 static void l2cap_retrans_timeout(struct work_struct *work)
1917 {
1918 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1919 					       retrans_timer.work);
1920 
1921 	BT_DBG("chan %p", chan);
1922 
1923 	l2cap_chan_lock(chan);
1924 
1925 	if (!chan->conn) {
1926 		l2cap_chan_unlock(chan);
1927 		l2cap_chan_put(chan);
1928 		return;
1929 	}
1930 
1931 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1932 	l2cap_chan_unlock(chan);
1933 	l2cap_chan_put(chan);
1934 }
1935 
1936 static void l2cap_streaming_send(struct l2cap_chan *chan,
1937 				 struct sk_buff_head *skbs)
1938 {
1939 	struct sk_buff *skb;
1940 	struct l2cap_ctrl *control;
1941 
1942 	BT_DBG("chan %p, skbs %p", chan, skbs);
1943 
1944 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1945 
1946 	while (!skb_queue_empty(&chan->tx_q)) {
1947 
1948 		skb = skb_dequeue(&chan->tx_q);
1949 
1950 		bt_cb(skb)->l2cap.retries = 1;
1951 		control = &bt_cb(skb)->l2cap;
1952 
1953 		control->reqseq = 0;
1954 		control->txseq = chan->next_tx_seq;
1955 
1956 		__pack_control(chan, control, skb);
1957 
1958 		if (chan->fcs == L2CAP_FCS_CRC16) {
1959 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1960 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1961 		}
1962 
1963 		l2cap_do_send(chan, skb);
1964 
1965 		BT_DBG("Sent txseq %u", control->txseq);
1966 
1967 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1968 		chan->frames_sent++;
1969 	}
1970 }
1971 
1972 static int l2cap_ertm_send(struct l2cap_chan *chan)
1973 {
1974 	struct sk_buff *skb, *tx_skb;
1975 	struct l2cap_ctrl *control;
1976 	int sent = 0;
1977 
1978 	BT_DBG("chan %p", chan);
1979 
1980 	if (chan->state != BT_CONNECTED)
1981 		return -ENOTCONN;
1982 
1983 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1984 		return 0;
1985 
1986 	while (chan->tx_send_head &&
1987 	       chan->unacked_frames < chan->remote_tx_win &&
1988 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1989 
1990 		skb = chan->tx_send_head;
1991 
1992 		bt_cb(skb)->l2cap.retries = 1;
1993 		control = &bt_cb(skb)->l2cap;
1994 
1995 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1996 			control->final = 1;
1997 
1998 		control->reqseq = chan->buffer_seq;
1999 		chan->last_acked_seq = chan->buffer_seq;
2000 		control->txseq = chan->next_tx_seq;
2001 
2002 		__pack_control(chan, control, skb);
2003 
2004 		if (chan->fcs == L2CAP_FCS_CRC16) {
2005 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2006 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2007 		}
2008 
2009 		/* Clone after data has been modified. Data is assumed to be
2010 		   read-only (for locking purposes) on cloned sk_buffs.
2011 		 */
2012 		tx_skb = skb_clone(skb, GFP_KERNEL);
2013 
2014 		if (!tx_skb)
2015 			break;
2016 
2017 		__set_retrans_timer(chan);
2018 
2019 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2020 		chan->unacked_frames++;
2021 		chan->frames_sent++;
2022 		sent++;
2023 
2024 		if (skb_queue_is_last(&chan->tx_q, skb))
2025 			chan->tx_send_head = NULL;
2026 		else
2027 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2028 
2029 		l2cap_do_send(chan, tx_skb);
2030 		BT_DBG("Sent txseq %u", control->txseq);
2031 	}
2032 
2033 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2034 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2035 
2036 	return sent;
2037 }
2038 
2039 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2040 {
2041 	struct l2cap_ctrl control;
2042 	struct sk_buff *skb;
2043 	struct sk_buff *tx_skb;
2044 	u16 seq;
2045 
2046 	BT_DBG("chan %p", chan);
2047 
2048 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2049 		return;
2050 
2051 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2052 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2053 
2054 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2055 		if (!skb) {
2056 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2057 			       seq);
2058 			continue;
2059 		}
2060 
2061 		bt_cb(skb)->l2cap.retries++;
2062 		control = bt_cb(skb)->l2cap;
2063 
2064 		if (chan->max_tx != 0 &&
2065 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2066 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2067 			l2cap_send_disconn_req(chan, ECONNRESET);
2068 			l2cap_seq_list_clear(&chan->retrans_list);
2069 			break;
2070 		}
2071 
2072 		control.reqseq = chan->buffer_seq;
2073 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2074 			control.final = 1;
2075 		else
2076 			control.final = 0;
2077 
2078 		if (skb_cloned(skb)) {
2079 			/* Cloned sk_buffs are read-only, so we need a
2080 			 * writeable copy
2081 			 */
2082 			tx_skb = skb_copy(skb, GFP_KERNEL);
2083 		} else {
2084 			tx_skb = skb_clone(skb, GFP_KERNEL);
2085 		}
2086 
2087 		if (!tx_skb) {
2088 			l2cap_seq_list_clear(&chan->retrans_list);
2089 			break;
2090 		}
2091 
2092 		/* Update skb contents */
2093 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2094 			put_unaligned_le32(__pack_extended_control(&control),
2095 					   tx_skb->data + L2CAP_HDR_SIZE);
2096 		} else {
2097 			put_unaligned_le16(__pack_enhanced_control(&control),
2098 					   tx_skb->data + L2CAP_HDR_SIZE);
2099 		}
2100 
2101 		/* Update FCS */
2102 		if (chan->fcs == L2CAP_FCS_CRC16) {
2103 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2104 					tx_skb->len - L2CAP_FCS_SIZE);
2105 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2106 						L2CAP_FCS_SIZE);
2107 		}
2108 
2109 		l2cap_do_send(chan, tx_skb);
2110 
2111 		BT_DBG("Resent txseq %d", control.txseq);
2112 
2113 		chan->last_acked_seq = chan->buffer_seq;
2114 	}
2115 }
2116 
2117 static void l2cap_retransmit(struct l2cap_chan *chan,
2118 			     struct l2cap_ctrl *control)
2119 {
2120 	BT_DBG("chan %p, control %p", chan, control);
2121 
2122 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2123 	l2cap_ertm_resend(chan);
2124 }
2125 
2126 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2127 				 struct l2cap_ctrl *control)
2128 {
2129 	struct sk_buff *skb;
2130 
2131 	BT_DBG("chan %p, control %p", chan, control);
2132 
2133 	if (control->poll)
2134 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2135 
2136 	l2cap_seq_list_clear(&chan->retrans_list);
2137 
2138 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2139 		return;
2140 
2141 	if (chan->unacked_frames) {
2142 		skb_queue_walk(&chan->tx_q, skb) {
2143 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2144 			    skb == chan->tx_send_head)
2145 				break;
2146 		}
2147 
2148 		skb_queue_walk_from(&chan->tx_q, skb) {
2149 			if (skb == chan->tx_send_head)
2150 				break;
2151 
2152 			l2cap_seq_list_append(&chan->retrans_list,
2153 					      bt_cb(skb)->l2cap.txseq);
2154 		}
2155 
2156 		l2cap_ertm_resend(chan);
2157 	}
2158 }
2159 
2160 static void l2cap_send_ack(struct l2cap_chan *chan)
2161 {
2162 	struct l2cap_ctrl control;
2163 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2164 					 chan->last_acked_seq);
2165 	int threshold;
2166 
2167 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2168 	       chan, chan->last_acked_seq, chan->buffer_seq);
2169 
2170 	memset(&control, 0, sizeof(control));
2171 	control.sframe = 1;
2172 
2173 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2174 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2175 		__clear_ack_timer(chan);
2176 		control.super = L2CAP_SUPER_RNR;
2177 		control.reqseq = chan->buffer_seq;
2178 		l2cap_send_sframe(chan, &control);
2179 	} else {
2180 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2181 			l2cap_ertm_send(chan);
2182 			/* If any i-frames were sent, they included an ack */
2183 			if (chan->buffer_seq == chan->last_acked_seq)
2184 				frames_to_ack = 0;
2185 		}
2186 
2187 		/* Ack now if the window is 3/4ths full.
2188 		 * Calculate without mul or div
2189 		 */
2190 		threshold = chan->ack_win;
2191 		threshold += threshold << 1;
2192 		threshold >>= 2;
2193 
2194 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2195 		       threshold);
2196 
2197 		if (frames_to_ack >= threshold) {
2198 			__clear_ack_timer(chan);
2199 			control.super = L2CAP_SUPER_RR;
2200 			control.reqseq = chan->buffer_seq;
2201 			l2cap_send_sframe(chan, &control);
2202 			frames_to_ack = 0;
2203 		}
2204 
2205 		if (frames_to_ack)
2206 			__set_ack_timer(chan);
2207 	}
2208 }
2209 
2210 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2211 					 struct msghdr *msg, int len,
2212 					 int count, struct sk_buff *skb)
2213 {
2214 	struct l2cap_conn *conn = chan->conn;
2215 	struct sk_buff **frag;
2216 	int sent = 0;
2217 
2218 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2219 		return -EFAULT;
2220 
2221 	sent += count;
2222 	len  -= count;
2223 
2224 	/* Continuation fragments (no L2CAP header) */
2225 	frag = &skb_shinfo(skb)->frag_list;
2226 	while (len) {
2227 		struct sk_buff *tmp;
2228 
2229 		count = min_t(unsigned int, conn->mtu, len);
2230 
2231 		tmp = chan->ops->alloc_skb(chan, 0, count,
2232 					   msg->msg_flags & MSG_DONTWAIT);
2233 		if (IS_ERR(tmp))
2234 			return PTR_ERR(tmp);
2235 
2236 		*frag = tmp;
2237 
2238 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2239 				   &msg->msg_iter))
2240 			return -EFAULT;
2241 
2242 		sent += count;
2243 		len  -= count;
2244 
2245 		skb->len += (*frag)->len;
2246 		skb->data_len += (*frag)->len;
2247 
2248 		frag = &(*frag)->next;
2249 	}
2250 
2251 	return sent;
2252 }
2253 
2254 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2255 						 struct msghdr *msg, size_t len)
2256 {
2257 	struct l2cap_conn *conn = chan->conn;
2258 	struct sk_buff *skb;
2259 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2260 	struct l2cap_hdr *lh;
2261 
2262 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2263 	       __le16_to_cpu(chan->psm), len);
2264 
2265 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2266 
2267 	skb = chan->ops->alloc_skb(chan, hlen, count,
2268 				   msg->msg_flags & MSG_DONTWAIT);
2269 	if (IS_ERR(skb))
2270 		return skb;
2271 
2272 	/* Create L2CAP header */
2273 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2274 	lh->cid = cpu_to_le16(chan->dcid);
2275 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2276 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2277 
2278 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2279 	if (unlikely(err < 0)) {
2280 		kfree_skb(skb);
2281 		return ERR_PTR(err);
2282 	}
2283 	return skb;
2284 }
2285 
2286 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2287 					      struct msghdr *msg, size_t len)
2288 {
2289 	struct l2cap_conn *conn = chan->conn;
2290 	struct sk_buff *skb;
2291 	int err, count;
2292 	struct l2cap_hdr *lh;
2293 
2294 	BT_DBG("chan %p len %zu", chan, len);
2295 
2296 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2297 
2298 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2299 				   msg->msg_flags & MSG_DONTWAIT);
2300 	if (IS_ERR(skb))
2301 		return skb;
2302 
2303 	/* Create L2CAP header */
2304 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2305 	lh->cid = cpu_to_le16(chan->dcid);
2306 	lh->len = cpu_to_le16(len);
2307 
2308 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2309 	if (unlikely(err < 0)) {
2310 		kfree_skb(skb);
2311 		return ERR_PTR(err);
2312 	}
2313 	return skb;
2314 }
2315 
2316 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2317 					       struct msghdr *msg, size_t len,
2318 					       u16 sdulen)
2319 {
2320 	struct l2cap_conn *conn = chan->conn;
2321 	struct sk_buff *skb;
2322 	int err, count, hlen;
2323 	struct l2cap_hdr *lh;
2324 
2325 	BT_DBG("chan %p len %zu", chan, len);
2326 
2327 	if (!conn)
2328 		return ERR_PTR(-ENOTCONN);
2329 
2330 	hlen = __ertm_hdr_size(chan);
2331 
2332 	if (sdulen)
2333 		hlen += L2CAP_SDULEN_SIZE;
2334 
2335 	if (chan->fcs == L2CAP_FCS_CRC16)
2336 		hlen += L2CAP_FCS_SIZE;
2337 
2338 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2339 
2340 	skb = chan->ops->alloc_skb(chan, hlen, count,
2341 				   msg->msg_flags & MSG_DONTWAIT);
2342 	if (IS_ERR(skb))
2343 		return skb;
2344 
2345 	/* Create L2CAP header */
2346 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2347 	lh->cid = cpu_to_le16(chan->dcid);
2348 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2349 
2350 	/* Control header is populated later */
2351 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2352 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2353 	else
2354 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2355 
2356 	if (sdulen)
2357 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2358 
2359 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2360 	if (unlikely(err < 0)) {
2361 		kfree_skb(skb);
2362 		return ERR_PTR(err);
2363 	}
2364 
2365 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2366 	bt_cb(skb)->l2cap.retries = 0;
2367 	return skb;
2368 }
2369 
2370 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2371 			     struct sk_buff_head *seg_queue,
2372 			     struct msghdr *msg, size_t len)
2373 {
2374 	struct sk_buff *skb;
2375 	u16 sdu_len;
2376 	size_t pdu_len;
2377 	u8 sar;
2378 
2379 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2380 
2381 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2382 	 * so fragmented skbs are not used.  The HCI layer's handling
2383 	 * of fragmented skbs is not compatible with ERTM's queueing.
2384 	 */
2385 
2386 	/* PDU size is derived from the HCI MTU */
2387 	pdu_len = chan->conn->mtu;
2388 
2389 	/* Constrain PDU size for BR/EDR connections */
2390 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2391 
2392 	/* Adjust for largest possible L2CAP overhead. */
2393 	if (chan->fcs)
2394 		pdu_len -= L2CAP_FCS_SIZE;
2395 
2396 	pdu_len -= __ertm_hdr_size(chan);
2397 
2398 	/* Remote device may have requested smaller PDUs */
2399 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2400 
2401 	if (!pdu_len)
2402 		return -EINVAL;
2403 
2404 	if (len <= pdu_len) {
2405 		sar = L2CAP_SAR_UNSEGMENTED;
2406 		sdu_len = 0;
2407 		pdu_len = len;
2408 	} else {
2409 		sar = L2CAP_SAR_START;
2410 		sdu_len = len;
2411 	}
2412 
2413 	while (len > 0) {
2414 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2415 
2416 		if (IS_ERR(skb)) {
2417 			__skb_queue_purge(seg_queue);
2418 			return PTR_ERR(skb);
2419 		}
2420 
2421 		bt_cb(skb)->l2cap.sar = sar;
2422 		__skb_queue_tail(seg_queue, skb);
2423 
2424 		len -= pdu_len;
2425 		if (sdu_len)
2426 			sdu_len = 0;
2427 
2428 		if (len <= pdu_len) {
2429 			sar = L2CAP_SAR_END;
2430 			pdu_len = len;
2431 		} else {
2432 			sar = L2CAP_SAR_CONTINUE;
2433 		}
2434 	}
2435 
2436 	return 0;
2437 }
2438 
2439 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2440 						   struct msghdr *msg,
2441 						   size_t len, u16 sdulen)
2442 {
2443 	struct l2cap_conn *conn = chan->conn;
2444 	struct sk_buff *skb;
2445 	int err, count, hlen;
2446 	struct l2cap_hdr *lh;
2447 
2448 	BT_DBG("chan %p len %zu", chan, len);
2449 
2450 	if (!conn)
2451 		return ERR_PTR(-ENOTCONN);
2452 
2453 	hlen = L2CAP_HDR_SIZE;
2454 
2455 	if (sdulen)
2456 		hlen += L2CAP_SDULEN_SIZE;
2457 
2458 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2459 
2460 	skb = chan->ops->alloc_skb(chan, hlen, count,
2461 				   msg->msg_flags & MSG_DONTWAIT);
2462 	if (IS_ERR(skb))
2463 		return skb;
2464 
2465 	/* Create L2CAP header */
2466 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2467 	lh->cid = cpu_to_le16(chan->dcid);
2468 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2469 
2470 	if (sdulen)
2471 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2472 
2473 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2474 	if (unlikely(err < 0)) {
2475 		kfree_skb(skb);
2476 		return ERR_PTR(err);
2477 	}
2478 
2479 	return skb;
2480 }
2481 
2482 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2483 				struct sk_buff_head *seg_queue,
2484 				struct msghdr *msg, size_t len)
2485 {
2486 	struct sk_buff *skb;
2487 	size_t pdu_len;
2488 	u16 sdu_len;
2489 
2490 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2491 
2492 	sdu_len = len;
2493 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2494 
2495 	while (len > 0) {
2496 		if (len <= pdu_len)
2497 			pdu_len = len;
2498 
2499 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2500 		if (IS_ERR(skb)) {
2501 			__skb_queue_purge(seg_queue);
2502 			return PTR_ERR(skb);
2503 		}
2504 
2505 		__skb_queue_tail(seg_queue, skb);
2506 
2507 		len -= pdu_len;
2508 
2509 		if (sdu_len) {
2510 			sdu_len = 0;
2511 			pdu_len += L2CAP_SDULEN_SIZE;
2512 		}
2513 	}
2514 
2515 	return 0;
2516 }
2517 
2518 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2519 {
2520 	int sent = 0;
2521 
2522 	BT_DBG("chan %p", chan);
2523 
2524 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2525 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2526 		chan->tx_credits--;
2527 		sent++;
2528 	}
2529 
2530 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2531 	       skb_queue_len(&chan->tx_q));
2532 }
2533 
2534 static void l2cap_tx_timestamp(struct sk_buff *skb,
2535 			       const struct sockcm_cookie *sockc,
2536 			       size_t len)
2537 {
2538 	struct sock *sk = skb ? skb->sk : NULL;
2539 
2540 	if (sk && sk->sk_type == SOCK_STREAM)
2541 		hci_setup_tx_timestamp(skb, len, sockc);
2542 	else
2543 		hci_setup_tx_timestamp(skb, 1, sockc);
2544 }
2545 
2546 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2547 				   const struct sockcm_cookie *sockc,
2548 				   size_t len)
2549 {
2550 	struct sk_buff *skb = skb_peek(queue);
2551 	struct sock *sk = skb ? skb->sk : NULL;
2552 
2553 	if (sk && sk->sk_type == SOCK_STREAM)
2554 		l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2555 	else
2556 		l2cap_tx_timestamp(skb, sockc, len);
2557 }
2558 
2559 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2560 		    const struct sockcm_cookie *sockc)
2561 {
2562 	struct sk_buff *skb;
2563 	int err;
2564 	struct sk_buff_head seg_queue;
2565 
2566 	if (!chan->conn)
2567 		return -ENOTCONN;
2568 
2569 	/* Connectionless channel */
2570 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2571 		skb = l2cap_create_connless_pdu(chan, msg, len);
2572 		if (IS_ERR(skb))
2573 			return PTR_ERR(skb);
2574 
2575 		l2cap_tx_timestamp(skb, sockc, len);
2576 
2577 		l2cap_do_send(chan, skb);
2578 		return len;
2579 	}
2580 
2581 	switch (chan->mode) {
2582 	case L2CAP_MODE_LE_FLOWCTL:
2583 	case L2CAP_MODE_EXT_FLOWCTL:
2584 		/* Check outgoing MTU */
2585 		if (len > chan->omtu)
2586 			return -EMSGSIZE;
2587 
2588 		__skb_queue_head_init(&seg_queue);
2589 
2590 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2591 
2592 		if (chan->state != BT_CONNECTED) {
2593 			__skb_queue_purge(&seg_queue);
2594 			err = -ENOTCONN;
2595 		}
2596 
2597 		if (err)
2598 			return err;
2599 
2600 		l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2601 
2602 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2603 
2604 		l2cap_le_flowctl_send(chan);
2605 
2606 		if (!chan->tx_credits)
2607 			chan->ops->suspend(chan);
2608 
2609 		err = len;
2610 
2611 		break;
2612 
2613 	case L2CAP_MODE_BASIC:
2614 		/* Check outgoing MTU */
2615 		if (len > chan->omtu)
2616 			return -EMSGSIZE;
2617 
2618 		/* Create a basic PDU */
2619 		skb = l2cap_create_basic_pdu(chan, msg, len);
2620 		if (IS_ERR(skb))
2621 			return PTR_ERR(skb);
2622 
2623 		l2cap_tx_timestamp(skb, sockc, len);
2624 
2625 		l2cap_do_send(chan, skb);
2626 		err = len;
2627 		break;
2628 
2629 	case L2CAP_MODE_ERTM:
2630 	case L2CAP_MODE_STREAMING:
2631 		/* Check outgoing MTU */
2632 		if (len > chan->omtu) {
2633 			err = -EMSGSIZE;
2634 			break;
2635 		}
2636 
2637 		__skb_queue_head_init(&seg_queue);
2638 
2639 		/* Do segmentation before calling in to the state machine,
2640 		 * since it's possible to block while waiting for memory
2641 		 * allocation.
2642 		 */
2643 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2644 
2645 		if (err)
2646 			break;
2647 
2648 		if (chan->mode == L2CAP_MODE_ERTM) {
2649 			/* TODO: ERTM mode timestamping */
2650 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2651 		} else {
2652 			l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2653 			l2cap_streaming_send(chan, &seg_queue);
2654 		}
2655 
2656 		err = len;
2657 
2658 		/* If the skbs were not queued for sending, they'll still be in
2659 		 * seg_queue and need to be purged.
2660 		 */
2661 		__skb_queue_purge(&seg_queue);
2662 		break;
2663 
2664 	default:
2665 		BT_DBG("bad state %1.1x", chan->mode);
2666 		err = -EBADFD;
2667 	}
2668 
2669 	return err;
2670 }
2671 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2672 
2673 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2674 {
2675 	struct l2cap_ctrl control;
2676 	u16 seq;
2677 
2678 	BT_DBG("chan %p, txseq %u", chan, txseq);
2679 
2680 	memset(&control, 0, sizeof(control));
2681 	control.sframe = 1;
2682 	control.super = L2CAP_SUPER_SREJ;
2683 
2684 	for (seq = chan->expected_tx_seq; seq != txseq;
2685 	     seq = __next_seq(chan, seq)) {
2686 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2687 			control.reqseq = seq;
2688 			l2cap_send_sframe(chan, &control);
2689 			l2cap_seq_list_append(&chan->srej_list, seq);
2690 		}
2691 	}
2692 
2693 	chan->expected_tx_seq = __next_seq(chan, txseq);
2694 }
2695 
2696 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2697 {
2698 	struct l2cap_ctrl control;
2699 
2700 	BT_DBG("chan %p", chan);
2701 
2702 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2703 		return;
2704 
2705 	memset(&control, 0, sizeof(control));
2706 	control.sframe = 1;
2707 	control.super = L2CAP_SUPER_SREJ;
2708 	control.reqseq = chan->srej_list.tail;
2709 	l2cap_send_sframe(chan, &control);
2710 }
2711 
2712 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2713 {
2714 	struct l2cap_ctrl control;
2715 	u16 initial_head;
2716 	u16 seq;
2717 
2718 	BT_DBG("chan %p, txseq %u", chan, txseq);
2719 
2720 	memset(&control, 0, sizeof(control));
2721 	control.sframe = 1;
2722 	control.super = L2CAP_SUPER_SREJ;
2723 
2724 	/* Capture initial list head to allow only one pass through the list. */
2725 	initial_head = chan->srej_list.head;
2726 
2727 	do {
2728 		seq = l2cap_seq_list_pop(&chan->srej_list);
2729 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2730 			break;
2731 
2732 		control.reqseq = seq;
2733 		l2cap_send_sframe(chan, &control);
2734 		l2cap_seq_list_append(&chan->srej_list, seq);
2735 	} while (chan->srej_list.head != initial_head);
2736 }
2737 
2738 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2739 {
2740 	struct sk_buff *acked_skb;
2741 	u16 ackseq;
2742 
2743 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2744 
2745 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2746 		return;
2747 
2748 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2749 	       chan->expected_ack_seq, chan->unacked_frames);
2750 
2751 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2752 	     ackseq = __next_seq(chan, ackseq)) {
2753 
2754 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2755 		if (acked_skb) {
2756 			skb_unlink(acked_skb, &chan->tx_q);
2757 			kfree_skb(acked_skb);
2758 			chan->unacked_frames--;
2759 		}
2760 	}
2761 
2762 	chan->expected_ack_seq = reqseq;
2763 
2764 	if (chan->unacked_frames == 0)
2765 		__clear_retrans_timer(chan);
2766 
2767 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2768 }
2769 
2770 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2771 {
2772 	BT_DBG("chan %p", chan);
2773 
2774 	chan->expected_tx_seq = chan->buffer_seq;
2775 	l2cap_seq_list_clear(&chan->srej_list);
2776 	skb_queue_purge(&chan->srej_q);
2777 	chan->rx_state = L2CAP_RX_STATE_RECV;
2778 }
2779 
2780 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2781 				struct l2cap_ctrl *control,
2782 				struct sk_buff_head *skbs, u8 event)
2783 {
2784 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2785 	       event);
2786 
2787 	switch (event) {
2788 	case L2CAP_EV_DATA_REQUEST:
2789 		if (chan->tx_send_head == NULL)
2790 			chan->tx_send_head = skb_peek(skbs);
2791 
2792 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2793 		l2cap_ertm_send(chan);
2794 		break;
2795 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2796 		BT_DBG("Enter LOCAL_BUSY");
2797 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2798 
2799 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2800 			/* The SREJ_SENT state must be aborted if we are to
2801 			 * enter the LOCAL_BUSY state.
2802 			 */
2803 			l2cap_abort_rx_srej_sent(chan);
2804 		}
2805 
2806 		l2cap_send_ack(chan);
2807 
2808 		break;
2809 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2810 		BT_DBG("Exit LOCAL_BUSY");
2811 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2812 
2813 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2814 			struct l2cap_ctrl local_control;
2815 
2816 			memset(&local_control, 0, sizeof(local_control));
2817 			local_control.sframe = 1;
2818 			local_control.super = L2CAP_SUPER_RR;
2819 			local_control.poll = 1;
2820 			local_control.reqseq = chan->buffer_seq;
2821 			l2cap_send_sframe(chan, &local_control);
2822 
2823 			chan->retry_count = 1;
2824 			__set_monitor_timer(chan);
2825 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2826 		}
2827 		break;
2828 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2829 		l2cap_process_reqseq(chan, control->reqseq);
2830 		break;
2831 	case L2CAP_EV_EXPLICIT_POLL:
2832 		l2cap_send_rr_or_rnr(chan, 1);
2833 		chan->retry_count = 1;
2834 		__set_monitor_timer(chan);
2835 		__clear_ack_timer(chan);
2836 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2837 		break;
2838 	case L2CAP_EV_RETRANS_TO:
2839 		l2cap_send_rr_or_rnr(chan, 1);
2840 		chan->retry_count = 1;
2841 		__set_monitor_timer(chan);
2842 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2843 		break;
2844 	case L2CAP_EV_RECV_FBIT:
2845 		/* Nothing to process */
2846 		break;
2847 	default:
2848 		break;
2849 	}
2850 }
2851 
2852 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2853 				  struct l2cap_ctrl *control,
2854 				  struct sk_buff_head *skbs, u8 event)
2855 {
2856 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2857 	       event);
2858 
2859 	switch (event) {
2860 	case L2CAP_EV_DATA_REQUEST:
2861 		if (chan->tx_send_head == NULL)
2862 			chan->tx_send_head = skb_peek(skbs);
2863 		/* Queue data, but don't send. */
2864 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2865 		break;
2866 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2867 		BT_DBG("Enter LOCAL_BUSY");
2868 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2869 
2870 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2871 			/* The SREJ_SENT state must be aborted if we are to
2872 			 * enter the LOCAL_BUSY state.
2873 			 */
2874 			l2cap_abort_rx_srej_sent(chan);
2875 		}
2876 
2877 		l2cap_send_ack(chan);
2878 
2879 		break;
2880 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2881 		BT_DBG("Exit LOCAL_BUSY");
2882 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2883 
2884 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2885 			struct l2cap_ctrl local_control;
2886 			memset(&local_control, 0, sizeof(local_control));
2887 			local_control.sframe = 1;
2888 			local_control.super = L2CAP_SUPER_RR;
2889 			local_control.poll = 1;
2890 			local_control.reqseq = chan->buffer_seq;
2891 			l2cap_send_sframe(chan, &local_control);
2892 
2893 			chan->retry_count = 1;
2894 			__set_monitor_timer(chan);
2895 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2896 		}
2897 		break;
2898 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2899 		l2cap_process_reqseq(chan, control->reqseq);
2900 		fallthrough;
2901 
2902 	case L2CAP_EV_RECV_FBIT:
2903 		if (control && control->final) {
2904 			__clear_monitor_timer(chan);
2905 			if (chan->unacked_frames > 0)
2906 				__set_retrans_timer(chan);
2907 			chan->retry_count = 0;
2908 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2909 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2910 		}
2911 		break;
2912 	case L2CAP_EV_EXPLICIT_POLL:
2913 		/* Ignore */
2914 		break;
2915 	case L2CAP_EV_MONITOR_TO:
2916 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2917 			l2cap_send_rr_or_rnr(chan, 1);
2918 			__set_monitor_timer(chan);
2919 			chan->retry_count++;
2920 		} else {
2921 			l2cap_send_disconn_req(chan, ECONNABORTED);
2922 		}
2923 		break;
2924 	default:
2925 		break;
2926 	}
2927 }
2928 
2929 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2930 		     struct sk_buff_head *skbs, u8 event)
2931 {
2932 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2933 	       chan, control, skbs, event, chan->tx_state);
2934 
2935 	switch (chan->tx_state) {
2936 	case L2CAP_TX_STATE_XMIT:
2937 		l2cap_tx_state_xmit(chan, control, skbs, event);
2938 		break;
2939 	case L2CAP_TX_STATE_WAIT_F:
2940 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2941 		break;
2942 	default:
2943 		/* Ignore event */
2944 		break;
2945 	}
2946 }
2947 
2948 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2949 			     struct l2cap_ctrl *control)
2950 {
2951 	BT_DBG("chan %p, control %p", chan, control);
2952 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2953 }
2954 
2955 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2956 				  struct l2cap_ctrl *control)
2957 {
2958 	BT_DBG("chan %p, control %p", chan, control);
2959 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2960 }
2961 
2962 /* Copy frame to all raw sockets on that connection */
2963 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2964 {
2965 	struct sk_buff *nskb;
2966 	struct l2cap_chan *chan;
2967 
2968 	BT_DBG("conn %p", conn);
2969 
2970 	list_for_each_entry(chan, &conn->chan_l, list) {
2971 		if (chan->chan_type != L2CAP_CHAN_RAW)
2972 			continue;
2973 
2974 		/* Don't send frame to the channel it came from */
2975 		if (bt_cb(skb)->l2cap.chan == chan)
2976 			continue;
2977 
2978 		nskb = skb_clone(skb, GFP_KERNEL);
2979 		if (!nskb)
2980 			continue;
2981 		if (chan->ops->recv(chan, nskb))
2982 			kfree_skb(nskb);
2983 	}
2984 }
2985 
2986 /* ---- L2CAP signalling commands ---- */
2987 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2988 				       u8 ident, u16 dlen, void *data)
2989 {
2990 	struct sk_buff *skb, **frag;
2991 	struct l2cap_cmd_hdr *cmd;
2992 	struct l2cap_hdr *lh;
2993 	int len, count;
2994 
2995 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2996 	       conn, code, ident, dlen);
2997 
2998 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2999 		return NULL;
3000 
3001 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3002 	count = min_t(unsigned int, conn->mtu, len);
3003 
3004 	skb = bt_skb_alloc(count, GFP_KERNEL);
3005 	if (!skb)
3006 		return NULL;
3007 
3008 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3009 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3010 
3011 	if (conn->hcon->type == LE_LINK)
3012 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3013 	else
3014 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3015 
3016 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3017 	cmd->code  = code;
3018 	cmd->ident = ident;
3019 	cmd->len   = cpu_to_le16(dlen);
3020 
3021 	if (dlen) {
3022 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3023 		skb_put_data(skb, data, count);
3024 		data += count;
3025 	}
3026 
3027 	len -= skb->len;
3028 
3029 	/* Continuation fragments (no L2CAP header) */
3030 	frag = &skb_shinfo(skb)->frag_list;
3031 	while (len) {
3032 		count = min_t(unsigned int, conn->mtu, len);
3033 
3034 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3035 		if (!*frag)
3036 			goto fail;
3037 
3038 		skb_put_data(*frag, data, count);
3039 
3040 		len  -= count;
3041 		data += count;
3042 
3043 		frag = &(*frag)->next;
3044 	}
3045 
3046 	return skb;
3047 
3048 fail:
3049 	kfree_skb(skb);
3050 	return NULL;
3051 }
3052 
3053 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3054 				     unsigned long *val)
3055 {
3056 	struct l2cap_conf_opt *opt = *ptr;
3057 	int len;
3058 
3059 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3060 	*ptr += len;
3061 
3062 	*type = opt->type;
3063 	*olen = opt->len;
3064 
3065 	switch (opt->len) {
3066 	case 1:
3067 		*val = *((u8 *) opt->val);
3068 		break;
3069 
3070 	case 2:
3071 		*val = get_unaligned_le16(opt->val);
3072 		break;
3073 
3074 	case 4:
3075 		*val = get_unaligned_le32(opt->val);
3076 		break;
3077 
3078 	default:
3079 		*val = (unsigned long) opt->val;
3080 		break;
3081 	}
3082 
3083 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3084 	return len;
3085 }
3086 
3087 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3088 {
3089 	struct l2cap_conf_opt *opt = *ptr;
3090 
3091 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3092 
3093 	if (size < L2CAP_CONF_OPT_SIZE + len)
3094 		return;
3095 
3096 	opt->type = type;
3097 	opt->len  = len;
3098 
3099 	switch (len) {
3100 	case 1:
3101 		*((u8 *) opt->val)  = val;
3102 		break;
3103 
3104 	case 2:
3105 		put_unaligned_le16(val, opt->val);
3106 		break;
3107 
3108 	case 4:
3109 		put_unaligned_le32(val, opt->val);
3110 		break;
3111 
3112 	default:
3113 		memcpy(opt->val, (void *) val, len);
3114 		break;
3115 	}
3116 
3117 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3118 }
3119 
3120 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3121 {
3122 	struct l2cap_conf_efs efs;
3123 
3124 	switch (chan->mode) {
3125 	case L2CAP_MODE_ERTM:
3126 		efs.id		= chan->local_id;
3127 		efs.stype	= chan->local_stype;
3128 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3129 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3130 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3131 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3132 		break;
3133 
3134 	case L2CAP_MODE_STREAMING:
3135 		efs.id		= 1;
3136 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3137 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3138 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3139 		efs.acc_lat	= 0;
3140 		efs.flush_to	= 0;
3141 		break;
3142 
3143 	default:
3144 		return;
3145 	}
3146 
3147 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3148 			   (unsigned long) &efs, size);
3149 }
3150 
3151 static void l2cap_ack_timeout(struct work_struct *work)
3152 {
3153 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3154 					       ack_timer.work);
3155 	u16 frames_to_ack;
3156 
3157 	BT_DBG("chan %p", chan);
3158 
3159 	l2cap_chan_lock(chan);
3160 
3161 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3162 				     chan->last_acked_seq);
3163 
3164 	if (frames_to_ack)
3165 		l2cap_send_rr_or_rnr(chan, 0);
3166 
3167 	l2cap_chan_unlock(chan);
3168 	l2cap_chan_put(chan);
3169 }
3170 
3171 int l2cap_ertm_init(struct l2cap_chan *chan)
3172 {
3173 	int err;
3174 
3175 	chan->next_tx_seq = 0;
3176 	chan->expected_tx_seq = 0;
3177 	chan->expected_ack_seq = 0;
3178 	chan->unacked_frames = 0;
3179 	chan->buffer_seq = 0;
3180 	chan->frames_sent = 0;
3181 	chan->last_acked_seq = 0;
3182 	chan->sdu = NULL;
3183 	chan->sdu_last_frag = NULL;
3184 	chan->sdu_len = 0;
3185 
3186 	skb_queue_head_init(&chan->tx_q);
3187 
3188 	if (chan->mode != L2CAP_MODE_ERTM)
3189 		return 0;
3190 
3191 	chan->rx_state = L2CAP_RX_STATE_RECV;
3192 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3193 
3194 	skb_queue_head_init(&chan->srej_q);
3195 
3196 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3197 	if (err < 0)
3198 		return err;
3199 
3200 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3201 	if (err < 0)
3202 		l2cap_seq_list_free(&chan->srej_list);
3203 
3204 	return err;
3205 }
3206 
3207 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3208 {
3209 	switch (mode) {
3210 	case L2CAP_MODE_STREAMING:
3211 	case L2CAP_MODE_ERTM:
3212 		if (l2cap_mode_supported(mode, remote_feat_mask))
3213 			return mode;
3214 		fallthrough;
3215 	default:
3216 		return L2CAP_MODE_BASIC;
3217 	}
3218 }
3219 
3220 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3221 {
3222 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3223 }
3224 
3225 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3226 {
3227 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3228 }
3229 
3230 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3231 				      struct l2cap_conf_rfc *rfc)
3232 {
3233 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3234 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3235 }
3236 
3237 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3238 {
3239 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3240 	    __l2cap_ews_supported(chan->conn)) {
3241 		/* use extended control field */
3242 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3243 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3244 	} else {
3245 		chan->tx_win = min_t(u16, chan->tx_win,
3246 				     L2CAP_DEFAULT_TX_WINDOW);
3247 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3248 	}
3249 	chan->ack_win = chan->tx_win;
3250 }
3251 
3252 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3253 {
3254 	struct hci_conn *conn = chan->conn->hcon;
3255 
3256 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3257 
3258 	/* The 2-DH1 packet has between 2 and 56 information bytes
3259 	 * (including the 2-byte payload header)
3260 	 */
3261 	if (!(conn->pkt_type & HCI_2DH1))
3262 		chan->imtu = 54;
3263 
3264 	/* The 3-DH1 packet has between 2 and 85 information bytes
3265 	 * (including the 2-byte payload header)
3266 	 */
3267 	if (!(conn->pkt_type & HCI_3DH1))
3268 		chan->imtu = 83;
3269 
3270 	/* The 2-DH3 packet has between 2 and 369 information bytes
3271 	 * (including the 2-byte payload header)
3272 	 */
3273 	if (!(conn->pkt_type & HCI_2DH3))
3274 		chan->imtu = 367;
3275 
3276 	/* The 3-DH3 packet has between 2 and 554 information bytes
3277 	 * (including the 2-byte payload header)
3278 	 */
3279 	if (!(conn->pkt_type & HCI_3DH3))
3280 		chan->imtu = 552;
3281 
3282 	/* The 2-DH5 packet has between 2 and 681 information bytes
3283 	 * (including the 2-byte payload header)
3284 	 */
3285 	if (!(conn->pkt_type & HCI_2DH5))
3286 		chan->imtu = 679;
3287 
3288 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3289 	 * (including the 2-byte payload header)
3290 	 */
3291 	if (!(conn->pkt_type & HCI_3DH5))
3292 		chan->imtu = 1021;
3293 }
3294 
3295 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3296 {
3297 	struct l2cap_conf_req *req = data;
3298 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3299 	void *ptr = req->data;
3300 	void *endptr = data + data_size;
3301 	u16 size;
3302 
3303 	BT_DBG("chan %p", chan);
3304 
3305 	if (chan->num_conf_req || chan->num_conf_rsp)
3306 		goto done;
3307 
3308 	switch (chan->mode) {
3309 	case L2CAP_MODE_STREAMING:
3310 	case L2CAP_MODE_ERTM:
3311 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3312 			break;
3313 
3314 		if (__l2cap_efs_supported(chan->conn))
3315 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3316 
3317 		fallthrough;
3318 	default:
3319 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3320 		break;
3321 	}
3322 
3323 done:
3324 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3325 		if (!chan->imtu)
3326 			l2cap_mtu_auto(chan);
3327 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3328 				   endptr - ptr);
3329 	}
3330 
3331 	switch (chan->mode) {
3332 	case L2CAP_MODE_BASIC:
3333 		if (disable_ertm)
3334 			break;
3335 
3336 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3337 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3338 			break;
3339 
3340 		rfc.mode            = L2CAP_MODE_BASIC;
3341 		rfc.txwin_size      = 0;
3342 		rfc.max_transmit    = 0;
3343 		rfc.retrans_timeout = 0;
3344 		rfc.monitor_timeout = 0;
3345 		rfc.max_pdu_size    = 0;
3346 
3347 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3348 				   (unsigned long) &rfc, endptr - ptr);
3349 		break;
3350 
3351 	case L2CAP_MODE_ERTM:
3352 		rfc.mode            = L2CAP_MODE_ERTM;
3353 		rfc.max_transmit    = chan->max_tx;
3354 
3355 		__l2cap_set_ertm_timeouts(chan, &rfc);
3356 
3357 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3358 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3359 			     L2CAP_FCS_SIZE);
3360 		rfc.max_pdu_size = cpu_to_le16(size);
3361 
3362 		l2cap_txwin_setup(chan);
3363 
3364 		rfc.txwin_size = min_t(u16, chan->tx_win,
3365 				       L2CAP_DEFAULT_TX_WINDOW);
3366 
3367 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3368 				   (unsigned long) &rfc, endptr - ptr);
3369 
3370 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3371 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3372 
3373 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3374 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3375 					   chan->tx_win, endptr - ptr);
3376 
3377 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3378 			if (chan->fcs == L2CAP_FCS_NONE ||
3379 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3380 				chan->fcs = L2CAP_FCS_NONE;
3381 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3382 						   chan->fcs, endptr - ptr);
3383 			}
3384 		break;
3385 
3386 	case L2CAP_MODE_STREAMING:
3387 		l2cap_txwin_setup(chan);
3388 		rfc.mode            = L2CAP_MODE_STREAMING;
3389 		rfc.txwin_size      = 0;
3390 		rfc.max_transmit    = 0;
3391 		rfc.retrans_timeout = 0;
3392 		rfc.monitor_timeout = 0;
3393 
3394 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3395 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3396 			     L2CAP_FCS_SIZE);
3397 		rfc.max_pdu_size = cpu_to_le16(size);
3398 
3399 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3400 				   (unsigned long) &rfc, endptr - ptr);
3401 
3402 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3403 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3404 
3405 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3406 			if (chan->fcs == L2CAP_FCS_NONE ||
3407 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3408 				chan->fcs = L2CAP_FCS_NONE;
3409 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3410 						   chan->fcs, endptr - ptr);
3411 			}
3412 		break;
3413 	}
3414 
3415 	req->dcid  = cpu_to_le16(chan->dcid);
3416 	req->flags = cpu_to_le16(0);
3417 
3418 	return ptr - data;
3419 }
3420 
3421 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3422 {
3423 	struct l2cap_conf_rsp *rsp = data;
3424 	void *ptr = rsp->data;
3425 	void *endptr = data + data_size;
3426 	void *req = chan->conf_req;
3427 	int len = chan->conf_len;
3428 	int type, hint, olen;
3429 	unsigned long val;
3430 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3431 	struct l2cap_conf_efs efs;
3432 	u8 remote_efs = 0;
3433 	u16 mtu = 0;
3434 	u16 result = L2CAP_CONF_SUCCESS;
3435 	u16 size;
3436 
3437 	BT_DBG("chan %p", chan);
3438 
3439 	while (len >= L2CAP_CONF_OPT_SIZE) {
3440 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3441 		if (len < 0)
3442 			break;
3443 
3444 		hint  = type & L2CAP_CONF_HINT;
3445 		type &= L2CAP_CONF_MASK;
3446 
3447 		switch (type) {
3448 		case L2CAP_CONF_MTU:
3449 			if (olen != 2)
3450 				break;
3451 			mtu = val;
3452 			break;
3453 
3454 		case L2CAP_CONF_FLUSH_TO:
3455 			if (olen != 2)
3456 				break;
3457 			chan->flush_to = val;
3458 			break;
3459 
3460 		case L2CAP_CONF_QOS:
3461 			break;
3462 
3463 		case L2CAP_CONF_RFC:
3464 			if (olen != sizeof(rfc))
3465 				break;
3466 			memcpy(&rfc, (void *) val, olen);
3467 			break;
3468 
3469 		case L2CAP_CONF_FCS:
3470 			if (olen != 1)
3471 				break;
3472 			if (val == L2CAP_FCS_NONE)
3473 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3474 			break;
3475 
3476 		case L2CAP_CONF_EFS:
3477 			if (olen != sizeof(efs))
3478 				break;
3479 			remote_efs = 1;
3480 			memcpy(&efs, (void *) val, olen);
3481 			break;
3482 
3483 		case L2CAP_CONF_EWS:
3484 			if (olen != 2)
3485 				break;
3486 			return -ECONNREFUSED;
3487 
3488 		default:
3489 			if (hint)
3490 				break;
3491 			result = L2CAP_CONF_UNKNOWN;
3492 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3493 			break;
3494 		}
3495 	}
3496 
3497 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3498 		goto done;
3499 
3500 	switch (chan->mode) {
3501 	case L2CAP_MODE_STREAMING:
3502 	case L2CAP_MODE_ERTM:
3503 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3504 			chan->mode = l2cap_select_mode(rfc.mode,
3505 						       chan->conn->feat_mask);
3506 			break;
3507 		}
3508 
3509 		if (remote_efs) {
3510 			if (__l2cap_efs_supported(chan->conn))
3511 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3512 			else
3513 				return -ECONNREFUSED;
3514 		}
3515 
3516 		if (chan->mode != rfc.mode)
3517 			return -ECONNREFUSED;
3518 
3519 		break;
3520 	}
3521 
3522 done:
3523 	if (chan->mode != rfc.mode) {
3524 		result = L2CAP_CONF_UNACCEPT;
3525 		rfc.mode = chan->mode;
3526 
3527 		if (chan->num_conf_rsp == 1)
3528 			return -ECONNREFUSED;
3529 
3530 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3531 				   (unsigned long) &rfc, endptr - ptr);
3532 	}
3533 
3534 	if (result == L2CAP_CONF_SUCCESS) {
3535 		/* Configure output options and let the other side know
3536 		 * which ones we don't like. */
3537 
3538 		/* If MTU is not provided in configure request, try adjusting it
3539 		 * to the current output MTU if it has been set
3540 		 *
3541 		 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3542 		 *
3543 		 * Each configuration parameter value (if any is present) in an
3544 		 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3545 		 * configuration parameter value that has been sent (or, in case
3546 		 * of default values, implied) in the corresponding
3547 		 * L2CAP_CONFIGURATION_REQ packet.
3548 		 */
3549 		if (!mtu) {
3550 			/* Only adjust for ERTM channels as for older modes the
3551 			 * remote stack may not be able to detect that the
3552 			 * adjustment causing it to silently drop packets.
3553 			 */
3554 			if (chan->mode == L2CAP_MODE_ERTM &&
3555 			    chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3556 				mtu = chan->omtu;
3557 			else
3558 				mtu = L2CAP_DEFAULT_MTU;
3559 		}
3560 
3561 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3562 			result = L2CAP_CONF_UNACCEPT;
3563 		else {
3564 			chan->omtu = mtu;
3565 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3566 		}
3567 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3568 
3569 		if (remote_efs) {
3570 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3571 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3572 			    efs.stype != chan->local_stype) {
3573 
3574 				result = L2CAP_CONF_UNACCEPT;
3575 
3576 				if (chan->num_conf_req >= 1)
3577 					return -ECONNREFUSED;
3578 
3579 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3580 						   sizeof(efs),
3581 						   (unsigned long) &efs, endptr - ptr);
3582 			} else {
3583 				/* Send PENDING Conf Rsp */
3584 				result = L2CAP_CONF_PENDING;
3585 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3586 			}
3587 		}
3588 
3589 		switch (rfc.mode) {
3590 		case L2CAP_MODE_BASIC:
3591 			chan->fcs = L2CAP_FCS_NONE;
3592 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3593 			break;
3594 
3595 		case L2CAP_MODE_ERTM:
3596 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3597 				chan->remote_tx_win = rfc.txwin_size;
3598 			else
3599 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3600 
3601 			chan->remote_max_tx = rfc.max_transmit;
3602 
3603 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3604 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3605 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3606 			rfc.max_pdu_size = cpu_to_le16(size);
3607 			chan->remote_mps = size;
3608 
3609 			__l2cap_set_ertm_timeouts(chan, &rfc);
3610 
3611 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3612 
3613 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3614 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3615 
3616 			if (remote_efs &&
3617 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3618 				chan->remote_id = efs.id;
3619 				chan->remote_stype = efs.stype;
3620 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3621 				chan->remote_flush_to =
3622 					le32_to_cpu(efs.flush_to);
3623 				chan->remote_acc_lat =
3624 					le32_to_cpu(efs.acc_lat);
3625 				chan->remote_sdu_itime =
3626 					le32_to_cpu(efs.sdu_itime);
3627 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3628 						   sizeof(efs),
3629 						   (unsigned long) &efs, endptr - ptr);
3630 			}
3631 			break;
3632 
3633 		case L2CAP_MODE_STREAMING:
3634 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3635 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3636 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3637 			rfc.max_pdu_size = cpu_to_le16(size);
3638 			chan->remote_mps = size;
3639 
3640 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3641 
3642 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3643 					   (unsigned long) &rfc, endptr - ptr);
3644 
3645 			break;
3646 
3647 		default:
3648 			result = L2CAP_CONF_UNACCEPT;
3649 
3650 			memset(&rfc, 0, sizeof(rfc));
3651 			rfc.mode = chan->mode;
3652 		}
3653 
3654 		if (result == L2CAP_CONF_SUCCESS)
3655 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3656 	}
3657 	rsp->scid   = cpu_to_le16(chan->dcid);
3658 	rsp->result = cpu_to_le16(result);
3659 	rsp->flags  = cpu_to_le16(0);
3660 
3661 	return ptr - data;
3662 }
3663 
3664 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3665 				void *data, size_t size, u16 *result)
3666 {
3667 	struct l2cap_conf_req *req = data;
3668 	void *ptr = req->data;
3669 	void *endptr = data + size;
3670 	int type, olen;
3671 	unsigned long val;
3672 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3673 	struct l2cap_conf_efs efs;
3674 
3675 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3676 
3677 	while (len >= L2CAP_CONF_OPT_SIZE) {
3678 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3679 		if (len < 0)
3680 			break;
3681 
3682 		switch (type) {
3683 		case L2CAP_CONF_MTU:
3684 			if (olen != 2)
3685 				break;
3686 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3687 				*result = L2CAP_CONF_UNACCEPT;
3688 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3689 			} else
3690 				chan->imtu = val;
3691 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3692 					   endptr - ptr);
3693 			break;
3694 
3695 		case L2CAP_CONF_FLUSH_TO:
3696 			if (olen != 2)
3697 				break;
3698 			chan->flush_to = val;
3699 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3700 					   chan->flush_to, endptr - ptr);
3701 			break;
3702 
3703 		case L2CAP_CONF_RFC:
3704 			if (olen != sizeof(rfc))
3705 				break;
3706 			memcpy(&rfc, (void *)val, olen);
3707 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3708 			    rfc.mode != chan->mode)
3709 				return -ECONNREFUSED;
3710 			chan->fcs = 0;
3711 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3712 					   (unsigned long) &rfc, endptr - ptr);
3713 			break;
3714 
3715 		case L2CAP_CONF_EWS:
3716 			if (olen != 2)
3717 				break;
3718 			chan->ack_win = min_t(u16, val, chan->ack_win);
3719 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3720 					   chan->tx_win, endptr - ptr);
3721 			break;
3722 
3723 		case L2CAP_CONF_EFS:
3724 			if (olen != sizeof(efs))
3725 				break;
3726 			memcpy(&efs, (void *)val, olen);
3727 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3728 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3729 			    efs.stype != chan->local_stype)
3730 				return -ECONNREFUSED;
3731 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3732 					   (unsigned long) &efs, endptr - ptr);
3733 			break;
3734 
3735 		case L2CAP_CONF_FCS:
3736 			if (olen != 1)
3737 				break;
3738 			if (*result == L2CAP_CONF_PENDING)
3739 				if (val == L2CAP_FCS_NONE)
3740 					set_bit(CONF_RECV_NO_FCS,
3741 						&chan->conf_state);
3742 			break;
3743 		}
3744 	}
3745 
3746 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3747 		return -ECONNREFUSED;
3748 
3749 	chan->mode = rfc.mode;
3750 
3751 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3752 		switch (rfc.mode) {
3753 		case L2CAP_MODE_ERTM:
3754 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3755 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3756 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3757 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3758 				chan->ack_win = min_t(u16, chan->ack_win,
3759 						      rfc.txwin_size);
3760 
3761 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3762 				chan->local_msdu = le16_to_cpu(efs.msdu);
3763 				chan->local_sdu_itime =
3764 					le32_to_cpu(efs.sdu_itime);
3765 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3766 				chan->local_flush_to =
3767 					le32_to_cpu(efs.flush_to);
3768 			}
3769 			break;
3770 
3771 		case L2CAP_MODE_STREAMING:
3772 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3773 		}
3774 	}
3775 
3776 	req->dcid   = cpu_to_le16(chan->dcid);
3777 	req->flags  = cpu_to_le16(0);
3778 
3779 	return ptr - data;
3780 }
3781 
3782 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3783 				u16 result, u16 flags)
3784 {
3785 	struct l2cap_conf_rsp *rsp = data;
3786 	void *ptr = rsp->data;
3787 
3788 	BT_DBG("chan %p", chan);
3789 
3790 	rsp->scid   = cpu_to_le16(chan->dcid);
3791 	rsp->result = cpu_to_le16(result);
3792 	rsp->flags  = cpu_to_le16(flags);
3793 
3794 	return ptr - data;
3795 }
3796 
3797 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3798 {
3799 	struct l2cap_le_conn_rsp rsp;
3800 	struct l2cap_conn *conn = chan->conn;
3801 
3802 	BT_DBG("chan %p", chan);
3803 
3804 	rsp.dcid    = cpu_to_le16(chan->scid);
3805 	rsp.mtu     = cpu_to_le16(chan->imtu);
3806 	rsp.mps     = cpu_to_le16(chan->mps);
3807 	rsp.credits = cpu_to_le16(chan->rx_credits);
3808 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3809 
3810 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3811 		       &rsp);
3812 }
3813 
3814 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3815 {
3816 	int *result = data;
3817 
3818 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3819 		return;
3820 
3821 	switch (chan->state) {
3822 	case BT_CONNECT2:
3823 		/* If channel still pending accept add to result */
3824 		(*result)++;
3825 		return;
3826 	case BT_CONNECTED:
3827 		return;
3828 	default:
3829 		/* If not connected or pending accept it has been refused */
3830 		*result = -ECONNREFUSED;
3831 		return;
3832 	}
3833 }
3834 
3835 struct l2cap_ecred_rsp_data {
3836 	struct {
3837 		struct l2cap_ecred_conn_rsp_hdr rsp;
3838 		__le16 scid[L2CAP_ECRED_MAX_CID];
3839 	} __packed pdu;
3840 	int count;
3841 };
3842 
3843 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3844 {
3845 	struct l2cap_ecred_rsp_data *rsp = data;
3846 	struct l2cap_ecred_conn_rsp *rsp_flex =
3847 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3848 
3849 	/* Check if channel for outgoing connection or if it wasn't deferred
3850 	 * since in those cases it must be skipped.
3851 	 */
3852 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3853 	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3854 		return;
3855 
3856 	/* Reset ident so only one response is sent */
3857 	chan->ident = 0;
3858 
3859 	/* Include all channels pending with the same ident */
3860 	if (!rsp->pdu.rsp.result)
3861 		rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3862 	else
3863 		l2cap_chan_del(chan, ECONNRESET);
3864 }
3865 
3866 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3867 {
3868 	struct l2cap_conn *conn = chan->conn;
3869 	struct l2cap_ecred_rsp_data data;
3870 	u16 id = chan->ident;
3871 	int result = 0;
3872 
3873 	if (!id)
3874 		return;
3875 
3876 	BT_DBG("chan %p id %d", chan, id);
3877 
3878 	memset(&data, 0, sizeof(data));
3879 
3880 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3881 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3882 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3883 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3884 
3885 	/* Verify that all channels are ready */
3886 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3887 
3888 	if (result > 0)
3889 		return;
3890 
3891 	if (result < 0)
3892 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3893 
3894 	/* Build response */
3895 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3896 
3897 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3898 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3899 		       &data.pdu);
3900 }
3901 
3902 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3903 {
3904 	struct l2cap_conn_rsp rsp;
3905 	struct l2cap_conn *conn = chan->conn;
3906 	u8 buf[128];
3907 	u8 rsp_code;
3908 
3909 	rsp.scid   = cpu_to_le16(chan->dcid);
3910 	rsp.dcid   = cpu_to_le16(chan->scid);
3911 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3912 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3913 	rsp_code = L2CAP_CONN_RSP;
3914 
3915 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3916 
3917 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3918 
3919 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3920 		return;
3921 
3922 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3923 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3924 	chan->num_conf_req++;
3925 }
3926 
3927 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3928 {
3929 	int type, olen;
3930 	unsigned long val;
3931 	/* Use sane default values in case a misbehaving remote device
3932 	 * did not send an RFC or extended window size option.
3933 	 */
3934 	u16 txwin_ext = chan->ack_win;
3935 	struct l2cap_conf_rfc rfc = {
3936 		.mode = chan->mode,
3937 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3938 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3939 		.max_pdu_size = cpu_to_le16(chan->imtu),
3940 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3941 	};
3942 
3943 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3944 
3945 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3946 		return;
3947 
3948 	while (len >= L2CAP_CONF_OPT_SIZE) {
3949 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3950 		if (len < 0)
3951 			break;
3952 
3953 		switch (type) {
3954 		case L2CAP_CONF_RFC:
3955 			if (olen != sizeof(rfc))
3956 				break;
3957 			memcpy(&rfc, (void *)val, olen);
3958 			break;
3959 		case L2CAP_CONF_EWS:
3960 			if (olen != 2)
3961 				break;
3962 			txwin_ext = val;
3963 			break;
3964 		}
3965 	}
3966 
3967 	switch (rfc.mode) {
3968 	case L2CAP_MODE_ERTM:
3969 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3970 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3971 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3972 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3973 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3974 		else
3975 			chan->ack_win = min_t(u16, chan->ack_win,
3976 					      rfc.txwin_size);
3977 		break;
3978 	case L2CAP_MODE_STREAMING:
3979 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3980 	}
3981 }
3982 
3983 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3984 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3985 				    u8 *data)
3986 {
3987 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3988 
3989 	if (cmd_len < sizeof(*rej))
3990 		return -EPROTO;
3991 
3992 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3993 		return 0;
3994 
3995 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3996 	    cmd->ident == conn->info_ident) {
3997 		cancel_delayed_work(&conn->info_timer);
3998 
3999 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4000 		conn->info_ident = 0;
4001 
4002 		l2cap_conn_start(conn);
4003 	}
4004 
4005 	return 0;
4006 }
4007 
4008 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
4009 			  u8 *data, u8 rsp_code)
4010 {
4011 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4012 	struct l2cap_conn_rsp rsp;
4013 	struct l2cap_chan *chan = NULL, *pchan = NULL;
4014 	int result, status = L2CAP_CS_NO_INFO;
4015 
4016 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4017 	__le16 psm = req->psm;
4018 
4019 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4020 
4021 	/* Check if we have socket listening on psm */
4022 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4023 					 &conn->hcon->dst, ACL_LINK);
4024 	if (!pchan) {
4025 		result = L2CAP_CR_BAD_PSM;
4026 		goto response;
4027 	}
4028 
4029 	l2cap_chan_lock(pchan);
4030 
4031 	/* Check if the ACL is secure enough (if not SDP) */
4032 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4033 	    (!hci_conn_check_link_mode(conn->hcon) ||
4034 	    !l2cap_check_enc_key_size(conn->hcon, pchan))) {
4035 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4036 		result = L2CAP_CR_SEC_BLOCK;
4037 		goto response;
4038 	}
4039 
4040 	result = L2CAP_CR_NO_MEM;
4041 
4042 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4043 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4044 		result = L2CAP_CR_INVALID_SCID;
4045 		goto response;
4046 	}
4047 
4048 	/* Check if we already have channel with that dcid */
4049 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4050 		result = L2CAP_CR_SCID_IN_USE;
4051 		goto response;
4052 	}
4053 
4054 	chan = pchan->ops->new_connection(pchan);
4055 	if (!chan)
4056 		goto response;
4057 
4058 	/* For certain devices (ex: HID mouse), support for authentication,
4059 	 * pairing and bonding is optional. For such devices, inorder to avoid
4060 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4061 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4062 	 */
4063 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4064 
4065 	bacpy(&chan->src, &conn->hcon->src);
4066 	bacpy(&chan->dst, &conn->hcon->dst);
4067 	chan->src_type = bdaddr_src_type(conn->hcon);
4068 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4069 	chan->psm  = psm;
4070 	chan->dcid = scid;
4071 
4072 	__l2cap_chan_add(conn, chan);
4073 
4074 	dcid = chan->scid;
4075 
4076 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4077 
4078 	chan->ident = cmd->ident;
4079 
4080 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4081 		if (l2cap_chan_check_security(chan, false)) {
4082 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4083 				l2cap_state_change(chan, BT_CONNECT2);
4084 				result = L2CAP_CR_PEND;
4085 				status = L2CAP_CS_AUTHOR_PEND;
4086 				chan->ops->defer(chan);
4087 			} else {
4088 				l2cap_state_change(chan, BT_CONFIG);
4089 				result = L2CAP_CR_SUCCESS;
4090 				status = L2CAP_CS_NO_INFO;
4091 			}
4092 		} else {
4093 			l2cap_state_change(chan, BT_CONNECT2);
4094 			result = L2CAP_CR_PEND;
4095 			status = L2CAP_CS_AUTHEN_PEND;
4096 		}
4097 	} else {
4098 		l2cap_state_change(chan, BT_CONNECT2);
4099 		result = L2CAP_CR_PEND;
4100 		status = L2CAP_CS_NO_INFO;
4101 	}
4102 
4103 response:
4104 	rsp.scid   = cpu_to_le16(scid);
4105 	rsp.dcid   = cpu_to_le16(dcid);
4106 	rsp.result = cpu_to_le16(result);
4107 	rsp.status = cpu_to_le16(status);
4108 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4109 
4110 	if (!pchan)
4111 		return;
4112 
4113 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4114 		struct l2cap_info_req info;
4115 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4116 
4117 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4118 		conn->info_ident = l2cap_get_ident(conn);
4119 
4120 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4121 
4122 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4123 			       sizeof(info), &info);
4124 	}
4125 
4126 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4127 	    result == L2CAP_CR_SUCCESS) {
4128 		u8 buf[128];
4129 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4130 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4131 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4132 		chan->num_conf_req++;
4133 	}
4134 
4135 	l2cap_chan_unlock(pchan);
4136 	l2cap_chan_put(pchan);
4137 }
4138 
4139 static int l2cap_connect_req(struct l2cap_conn *conn,
4140 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4141 {
4142 	if (cmd_len < sizeof(struct l2cap_conn_req))
4143 		return -EPROTO;
4144 
4145 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4146 	return 0;
4147 }
4148 
4149 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4150 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4151 				    u8 *data)
4152 {
4153 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4154 	u16 scid, dcid, result, status;
4155 	struct l2cap_chan *chan;
4156 	u8 req[128];
4157 	int err;
4158 
4159 	if (cmd_len < sizeof(*rsp))
4160 		return -EPROTO;
4161 
4162 	scid   = __le16_to_cpu(rsp->scid);
4163 	dcid   = __le16_to_cpu(rsp->dcid);
4164 	result = __le16_to_cpu(rsp->result);
4165 	status = __le16_to_cpu(rsp->status);
4166 
4167 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4168 					   dcid > L2CAP_CID_DYN_END))
4169 		return -EPROTO;
4170 
4171 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4172 	       dcid, scid, result, status);
4173 
4174 	if (scid) {
4175 		chan = __l2cap_get_chan_by_scid(conn, scid);
4176 		if (!chan)
4177 			return -EBADSLT;
4178 	} else {
4179 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4180 		if (!chan)
4181 			return -EBADSLT;
4182 	}
4183 
4184 	chan = l2cap_chan_hold_unless_zero(chan);
4185 	if (!chan)
4186 		return -EBADSLT;
4187 
4188 	err = 0;
4189 
4190 	l2cap_chan_lock(chan);
4191 
4192 	switch (result) {
4193 	case L2CAP_CR_SUCCESS:
4194 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4195 			err = -EBADSLT;
4196 			break;
4197 		}
4198 
4199 		l2cap_state_change(chan, BT_CONFIG);
4200 		chan->ident = 0;
4201 		chan->dcid = dcid;
4202 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4203 
4204 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4205 			break;
4206 
4207 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4208 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4209 		chan->num_conf_req++;
4210 		break;
4211 
4212 	case L2CAP_CR_PEND:
4213 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4214 		break;
4215 
4216 	default:
4217 		l2cap_chan_del(chan, ECONNREFUSED);
4218 		break;
4219 	}
4220 
4221 	l2cap_chan_unlock(chan);
4222 	l2cap_chan_put(chan);
4223 
4224 	return err;
4225 }
4226 
4227 static inline void set_default_fcs(struct l2cap_chan *chan)
4228 {
4229 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4230 	 * sides request it.
4231 	 */
4232 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4233 		chan->fcs = L2CAP_FCS_NONE;
4234 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4235 		chan->fcs = L2CAP_FCS_CRC16;
4236 }
4237 
4238 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4239 				    u8 ident, u16 flags)
4240 {
4241 	struct l2cap_conn *conn = chan->conn;
4242 
4243 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4244 	       flags);
4245 
4246 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4247 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4248 
4249 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4250 		       l2cap_build_conf_rsp(chan, data,
4251 					    L2CAP_CONF_SUCCESS, flags), data);
4252 }
4253 
4254 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4255 				   u16 scid, u16 dcid)
4256 {
4257 	struct l2cap_cmd_rej_cid rej;
4258 
4259 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4260 	rej.scid = __cpu_to_le16(scid);
4261 	rej.dcid = __cpu_to_le16(dcid);
4262 
4263 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4264 }
4265 
4266 static inline int l2cap_config_req(struct l2cap_conn *conn,
4267 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4268 				   u8 *data)
4269 {
4270 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4271 	u16 dcid, flags;
4272 	u8 rsp[64];
4273 	struct l2cap_chan *chan;
4274 	int len, err = 0;
4275 
4276 	if (cmd_len < sizeof(*req))
4277 		return -EPROTO;
4278 
4279 	dcid  = __le16_to_cpu(req->dcid);
4280 	flags = __le16_to_cpu(req->flags);
4281 
4282 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4283 
4284 	chan = l2cap_get_chan_by_scid(conn, dcid);
4285 	if (!chan) {
4286 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4287 		return 0;
4288 	}
4289 
4290 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4291 	    chan->state != BT_CONNECTED) {
4292 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4293 				       chan->dcid);
4294 		goto unlock;
4295 	}
4296 
4297 	/* Reject if config buffer is too small. */
4298 	len = cmd_len - sizeof(*req);
4299 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4300 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4301 			       l2cap_build_conf_rsp(chan, rsp,
4302 			       L2CAP_CONF_REJECT, flags), rsp);
4303 		goto unlock;
4304 	}
4305 
4306 	/* Store config. */
4307 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4308 	chan->conf_len += len;
4309 
4310 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4311 		/* Incomplete config. Send empty response. */
4312 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4313 			       l2cap_build_conf_rsp(chan, rsp,
4314 			       L2CAP_CONF_SUCCESS, flags), rsp);
4315 		goto unlock;
4316 	}
4317 
4318 	/* Complete config. */
4319 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4320 	if (len < 0) {
4321 		l2cap_send_disconn_req(chan, ECONNRESET);
4322 		goto unlock;
4323 	}
4324 
4325 	chan->ident = cmd->ident;
4326 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4327 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4328 		chan->num_conf_rsp++;
4329 
4330 	/* Reset config buffer. */
4331 	chan->conf_len = 0;
4332 
4333 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4334 		goto unlock;
4335 
4336 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4337 		set_default_fcs(chan);
4338 
4339 		if (chan->state != BT_CONNECTED) {
4340 			if (chan->mode == L2CAP_MODE_ERTM ||
4341 			    chan->mode == L2CAP_MODE_STREAMING)
4342 				err = l2cap_ertm_init(chan);
4343 
4344 			if (err < 0)
4345 				l2cap_send_disconn_req(chan, -err);
4346 			else
4347 				l2cap_chan_ready(chan);
4348 		}
4349 
4350 		goto unlock;
4351 	}
4352 
4353 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4354 		u8 buf[64];
4355 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4356 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4357 		chan->num_conf_req++;
4358 	}
4359 
4360 	/* Got Conf Rsp PENDING from remote side and assume we sent
4361 	   Conf Rsp PENDING in the code above */
4362 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4363 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4364 
4365 		/* check compatibility */
4366 
4367 		/* Send rsp for BR/EDR channel */
4368 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4369 	}
4370 
4371 unlock:
4372 	l2cap_chan_unlock(chan);
4373 	l2cap_chan_put(chan);
4374 	return err;
4375 }
4376 
4377 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4378 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4379 				   u8 *data)
4380 {
4381 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4382 	u16 scid, flags, result;
4383 	struct l2cap_chan *chan;
4384 	int len = cmd_len - sizeof(*rsp);
4385 	int err = 0;
4386 
4387 	if (cmd_len < sizeof(*rsp))
4388 		return -EPROTO;
4389 
4390 	scid   = __le16_to_cpu(rsp->scid);
4391 	flags  = __le16_to_cpu(rsp->flags);
4392 	result = __le16_to_cpu(rsp->result);
4393 
4394 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4395 	       result, len);
4396 
4397 	chan = l2cap_get_chan_by_scid(conn, scid);
4398 	if (!chan)
4399 		return 0;
4400 
4401 	switch (result) {
4402 	case L2CAP_CONF_SUCCESS:
4403 		l2cap_conf_rfc_get(chan, rsp->data, len);
4404 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4405 		break;
4406 
4407 	case L2CAP_CONF_PENDING:
4408 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4409 
4410 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4411 			char buf[64];
4412 
4413 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4414 						   buf, sizeof(buf), &result);
4415 			if (len < 0) {
4416 				l2cap_send_disconn_req(chan, ECONNRESET);
4417 				goto done;
4418 			}
4419 
4420 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4421 		}
4422 		goto done;
4423 
4424 	case L2CAP_CONF_UNKNOWN:
4425 	case L2CAP_CONF_UNACCEPT:
4426 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4427 			char req[64];
4428 
4429 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4430 				l2cap_send_disconn_req(chan, ECONNRESET);
4431 				goto done;
4432 			}
4433 
4434 			/* throw out any old stored conf requests */
4435 			result = L2CAP_CONF_SUCCESS;
4436 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4437 						   req, sizeof(req), &result);
4438 			if (len < 0) {
4439 				l2cap_send_disconn_req(chan, ECONNRESET);
4440 				goto done;
4441 			}
4442 
4443 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4444 				       L2CAP_CONF_REQ, len, req);
4445 			chan->num_conf_req++;
4446 			if (result != L2CAP_CONF_SUCCESS)
4447 				goto done;
4448 			break;
4449 		}
4450 		fallthrough;
4451 
4452 	default:
4453 		l2cap_chan_set_err(chan, ECONNRESET);
4454 
4455 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4456 		l2cap_send_disconn_req(chan, ECONNRESET);
4457 		goto done;
4458 	}
4459 
4460 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4461 		goto done;
4462 
4463 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4464 
4465 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4466 		set_default_fcs(chan);
4467 
4468 		if (chan->mode == L2CAP_MODE_ERTM ||
4469 		    chan->mode == L2CAP_MODE_STREAMING)
4470 			err = l2cap_ertm_init(chan);
4471 
4472 		if (err < 0)
4473 			l2cap_send_disconn_req(chan, -err);
4474 		else
4475 			l2cap_chan_ready(chan);
4476 	}
4477 
4478 done:
4479 	l2cap_chan_unlock(chan);
4480 	l2cap_chan_put(chan);
4481 	return err;
4482 }
4483 
4484 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4485 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4486 				       u8 *data)
4487 {
4488 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4489 	struct l2cap_disconn_rsp rsp;
4490 	u16 dcid, scid;
4491 	struct l2cap_chan *chan;
4492 
4493 	if (cmd_len != sizeof(*req))
4494 		return -EPROTO;
4495 
4496 	scid = __le16_to_cpu(req->scid);
4497 	dcid = __le16_to_cpu(req->dcid);
4498 
4499 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4500 
4501 	chan = l2cap_get_chan_by_scid(conn, dcid);
4502 	if (!chan) {
4503 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4504 		return 0;
4505 	}
4506 
4507 	rsp.dcid = cpu_to_le16(chan->scid);
4508 	rsp.scid = cpu_to_le16(chan->dcid);
4509 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4510 
4511 	chan->ops->set_shutdown(chan);
4512 
4513 	l2cap_chan_del(chan, ECONNRESET);
4514 
4515 	chan->ops->close(chan);
4516 
4517 	l2cap_chan_unlock(chan);
4518 	l2cap_chan_put(chan);
4519 
4520 	return 0;
4521 }
4522 
4523 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4524 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4525 				       u8 *data)
4526 {
4527 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4528 	u16 dcid, scid;
4529 	struct l2cap_chan *chan;
4530 
4531 	if (cmd_len != sizeof(*rsp))
4532 		return -EPROTO;
4533 
4534 	scid = __le16_to_cpu(rsp->scid);
4535 	dcid = __le16_to_cpu(rsp->dcid);
4536 
4537 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4538 
4539 	chan = l2cap_get_chan_by_scid(conn, scid);
4540 	if (!chan) {
4541 		return 0;
4542 	}
4543 
4544 	if (chan->state != BT_DISCONN) {
4545 		l2cap_chan_unlock(chan);
4546 		l2cap_chan_put(chan);
4547 		return 0;
4548 	}
4549 
4550 	l2cap_chan_del(chan, 0);
4551 
4552 	chan->ops->close(chan);
4553 
4554 	l2cap_chan_unlock(chan);
4555 	l2cap_chan_put(chan);
4556 
4557 	return 0;
4558 }
4559 
4560 static inline int l2cap_information_req(struct l2cap_conn *conn,
4561 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4562 					u8 *data)
4563 {
4564 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4565 	u16 type;
4566 
4567 	if (cmd_len != sizeof(*req))
4568 		return -EPROTO;
4569 
4570 	type = __le16_to_cpu(req->type);
4571 
4572 	BT_DBG("type 0x%4.4x", type);
4573 
4574 	if (type == L2CAP_IT_FEAT_MASK) {
4575 		u8 buf[8];
4576 		u32 feat_mask = l2cap_feat_mask;
4577 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4578 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4579 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4580 		if (!disable_ertm)
4581 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4582 				| L2CAP_FEAT_FCS;
4583 
4584 		put_unaligned_le32(feat_mask, rsp->data);
4585 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4586 			       buf);
4587 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4588 		u8 buf[12];
4589 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4590 
4591 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4592 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4593 		rsp->data[0] = conn->local_fixed_chan;
4594 		memset(rsp->data + 1, 0, 7);
4595 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4596 			       buf);
4597 	} else {
4598 		struct l2cap_info_rsp rsp;
4599 		rsp.type   = cpu_to_le16(type);
4600 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4601 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4602 			       &rsp);
4603 	}
4604 
4605 	return 0;
4606 }
4607 
4608 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4609 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4610 					u8 *data)
4611 {
4612 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4613 	u16 type, result;
4614 
4615 	if (cmd_len < sizeof(*rsp))
4616 		return -EPROTO;
4617 
4618 	type   = __le16_to_cpu(rsp->type);
4619 	result = __le16_to_cpu(rsp->result);
4620 
4621 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4622 
4623 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4624 	if (cmd->ident != conn->info_ident ||
4625 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4626 		return 0;
4627 
4628 	cancel_delayed_work(&conn->info_timer);
4629 
4630 	if (result != L2CAP_IR_SUCCESS) {
4631 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4632 		conn->info_ident = 0;
4633 
4634 		l2cap_conn_start(conn);
4635 
4636 		return 0;
4637 	}
4638 
4639 	switch (type) {
4640 	case L2CAP_IT_FEAT_MASK:
4641 		if (cmd_len >= sizeof(*rsp) + sizeof(u32))
4642 			conn->feat_mask = get_unaligned_le32(rsp->data);
4643 
4644 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4645 			struct l2cap_info_req req;
4646 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4647 
4648 			conn->info_ident = l2cap_get_ident(conn);
4649 
4650 			l2cap_send_cmd(conn, conn->info_ident,
4651 				       L2CAP_INFO_REQ, sizeof(req), &req);
4652 		} else {
4653 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4654 			conn->info_ident = 0;
4655 
4656 			l2cap_conn_start(conn);
4657 		}
4658 		break;
4659 
4660 	case L2CAP_IT_FIXED_CHAN:
4661 		if (cmd_len >= sizeof(*rsp) + sizeof(rsp->data[0]))
4662 			conn->remote_fixed_chan = rsp->data[0];
4663 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4664 		conn->info_ident = 0;
4665 
4666 		l2cap_conn_start(conn);
4667 		break;
4668 	}
4669 
4670 	return 0;
4671 }
4672 
4673 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4674 					      struct l2cap_cmd_hdr *cmd,
4675 					      u16 cmd_len, u8 *data)
4676 {
4677 	struct hci_conn *hcon = conn->hcon;
4678 	struct l2cap_conn_param_update_req *req;
4679 	struct l2cap_conn_param_update_rsp rsp;
4680 	u16 min, max, latency, to_multiplier;
4681 	int err;
4682 
4683 	if (hcon->role != HCI_ROLE_MASTER)
4684 		return -EINVAL;
4685 
4686 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4687 		return -EPROTO;
4688 
4689 	req = (struct l2cap_conn_param_update_req *) data;
4690 	min		= __le16_to_cpu(req->min);
4691 	max		= __le16_to_cpu(req->max);
4692 	latency		= __le16_to_cpu(req->latency);
4693 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4694 
4695 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4696 	       min, max, latency, to_multiplier);
4697 
4698 	memset(&rsp, 0, sizeof(rsp));
4699 
4700 	err = hci_check_conn_params(min, max, latency, to_multiplier);
4701 	if (err)
4702 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4703 	else
4704 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4705 
4706 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4707 		       sizeof(rsp), &rsp);
4708 
4709 	if (!err) {
4710 		u8 store_hint;
4711 
4712 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4713 						to_multiplier);
4714 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4715 				    store_hint, min, max, latency,
4716 				    to_multiplier);
4717 
4718 	}
4719 
4720 	return 0;
4721 }
4722 
4723 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4724 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4725 				u8 *data)
4726 {
4727 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4728 	struct hci_conn *hcon = conn->hcon;
4729 	u16 dcid, mtu, mps, credits, result;
4730 	struct l2cap_chan *chan;
4731 	int err, sec_level;
4732 
4733 	if (cmd_len < sizeof(*rsp))
4734 		return -EPROTO;
4735 
4736 	dcid    = __le16_to_cpu(rsp->dcid);
4737 	mtu     = __le16_to_cpu(rsp->mtu);
4738 	mps     = __le16_to_cpu(rsp->mps);
4739 	credits = __le16_to_cpu(rsp->credits);
4740 	result  = __le16_to_cpu(rsp->result);
4741 
4742 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4743 					   dcid < L2CAP_CID_DYN_START ||
4744 					   dcid > L2CAP_CID_LE_DYN_END))
4745 		return -EPROTO;
4746 
4747 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4748 	       dcid, mtu, mps, credits, result);
4749 
4750 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4751 	if (!chan)
4752 		return -EBADSLT;
4753 
4754 	err = 0;
4755 
4756 	l2cap_chan_lock(chan);
4757 
4758 	switch (result) {
4759 	case L2CAP_CR_LE_SUCCESS:
4760 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4761 			err = -EBADSLT;
4762 			break;
4763 		}
4764 
4765 		chan->ident = 0;
4766 		chan->dcid = dcid;
4767 		chan->omtu = mtu;
4768 		chan->remote_mps = mps;
4769 		chan->tx_credits = credits;
4770 		l2cap_chan_ready(chan);
4771 		break;
4772 
4773 	case L2CAP_CR_LE_AUTHENTICATION:
4774 	case L2CAP_CR_LE_ENCRYPTION:
4775 		/* If we already have MITM protection we can't do
4776 		 * anything.
4777 		 */
4778 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4779 			l2cap_chan_del(chan, ECONNREFUSED);
4780 			break;
4781 		}
4782 
4783 		sec_level = hcon->sec_level + 1;
4784 		if (chan->sec_level < sec_level)
4785 			chan->sec_level = sec_level;
4786 
4787 		/* We'll need to send a new Connect Request */
4788 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4789 
4790 		smp_conn_security(hcon, chan->sec_level);
4791 		break;
4792 
4793 	default:
4794 		l2cap_chan_del(chan, ECONNREFUSED);
4795 		break;
4796 	}
4797 
4798 	l2cap_chan_unlock(chan);
4799 
4800 	return err;
4801 }
4802 
4803 static void l2cap_put_ident(struct l2cap_conn *conn, u8 code, u8 id)
4804 {
4805 	switch (code) {
4806 	case L2CAP_COMMAND_REJ:
4807 	case L2CAP_CONN_RSP:
4808 	case L2CAP_CONF_RSP:
4809 	case L2CAP_DISCONN_RSP:
4810 	case L2CAP_ECHO_RSP:
4811 	case L2CAP_INFO_RSP:
4812 	case L2CAP_CONN_PARAM_UPDATE_RSP:
4813 	case L2CAP_ECRED_CONN_RSP:
4814 	case L2CAP_ECRED_RECONF_RSP:
4815 		/* First do a lookup since the remote may send bogus ids that
4816 		 * would make ida_free to generate warnings.
4817 		 */
4818 		if (ida_find_first_range(&conn->tx_ida, id, id) >= 0)
4819 			ida_free(&conn->tx_ida, id);
4820 	}
4821 }
4822 
4823 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4824 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4825 				      u8 *data)
4826 {
4827 	int err = 0;
4828 
4829 	l2cap_put_ident(conn, cmd->code, cmd->ident);
4830 
4831 	switch (cmd->code) {
4832 	case L2CAP_COMMAND_REJ:
4833 		l2cap_command_rej(conn, cmd, cmd_len, data);
4834 		break;
4835 
4836 	case L2CAP_CONN_REQ:
4837 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4838 		break;
4839 
4840 	case L2CAP_CONN_RSP:
4841 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4842 		break;
4843 
4844 	case L2CAP_CONF_REQ:
4845 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4846 		break;
4847 
4848 	case L2CAP_CONF_RSP:
4849 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4850 		break;
4851 
4852 	case L2CAP_DISCONN_REQ:
4853 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4854 		break;
4855 
4856 	case L2CAP_DISCONN_RSP:
4857 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4858 		break;
4859 
4860 	case L2CAP_ECHO_REQ:
4861 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4862 		break;
4863 
4864 	case L2CAP_ECHO_RSP:
4865 		break;
4866 
4867 	case L2CAP_INFO_REQ:
4868 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4869 		break;
4870 
4871 	case L2CAP_INFO_RSP:
4872 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4873 		break;
4874 
4875 	default:
4876 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4877 		err = -EINVAL;
4878 		break;
4879 	}
4880 
4881 	return err;
4882 }
4883 
4884 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4885 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4886 				u8 *data)
4887 {
4888 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4889 	struct l2cap_le_conn_rsp rsp;
4890 	struct l2cap_chan *chan, *pchan;
4891 	u16 dcid, scid, credits, mtu, mps;
4892 	__le16 psm;
4893 	u8 result;
4894 
4895 	if (cmd_len != sizeof(*req))
4896 		return -EPROTO;
4897 
4898 	scid = __le16_to_cpu(req->scid);
4899 	mtu  = __le16_to_cpu(req->mtu);
4900 	mps  = __le16_to_cpu(req->mps);
4901 	psm  = req->psm;
4902 	dcid = 0;
4903 	credits = 0;
4904 
4905 	if (mtu < 23 || mps < 23)
4906 		return -EPROTO;
4907 
4908 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4909 	       scid, mtu, mps);
4910 
4911 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4912 	 * page 1059:
4913 	 *
4914 	 * Valid range: 0x0001-0x00ff
4915 	 *
4916 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4917 	 */
4918 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4919 		result = L2CAP_CR_LE_BAD_PSM;
4920 		chan = NULL;
4921 		goto response;
4922 	}
4923 
4924 	/* Check if we have socket listening on psm */
4925 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4926 					 &conn->hcon->dst, LE_LINK);
4927 	if (!pchan) {
4928 		result = L2CAP_CR_LE_BAD_PSM;
4929 		chan = NULL;
4930 		goto response;
4931 	}
4932 
4933 	l2cap_chan_lock(pchan);
4934 
4935 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4936 				     SMP_ALLOW_STK)) {
4937 		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4938 			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4939 		chan = NULL;
4940 		goto response_unlock;
4941 	}
4942 
4943 	/* Check if Key Size is sufficient for the security level */
4944 	if (!l2cap_check_enc_key_size(conn->hcon, pchan)) {
4945 		result = L2CAP_CR_LE_BAD_KEY_SIZE;
4946 		chan = NULL;
4947 		goto response_unlock;
4948 	}
4949 
4950 	/* Check for valid dynamic CID range */
4951 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4952 		result = L2CAP_CR_LE_INVALID_SCID;
4953 		chan = NULL;
4954 		goto response_unlock;
4955 	}
4956 
4957 	/* Check if we already have channel with that dcid */
4958 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4959 		result = L2CAP_CR_LE_SCID_IN_USE;
4960 		chan = NULL;
4961 		goto response_unlock;
4962 	}
4963 
4964 	chan = pchan->ops->new_connection(pchan);
4965 	if (!chan) {
4966 		result = L2CAP_CR_LE_NO_MEM;
4967 		goto response_unlock;
4968 	}
4969 
4970 	bacpy(&chan->src, &conn->hcon->src);
4971 	bacpy(&chan->dst, &conn->hcon->dst);
4972 	chan->src_type = bdaddr_src_type(conn->hcon);
4973 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4974 	chan->psm  = psm;
4975 	chan->dcid = scid;
4976 	chan->omtu = mtu;
4977 	chan->remote_mps = mps;
4978 
4979 	__l2cap_chan_add(conn, chan);
4980 
4981 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4982 
4983 	dcid = chan->scid;
4984 	credits = chan->rx_credits;
4985 
4986 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4987 
4988 	chan->ident = cmd->ident;
4989 
4990 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4991 		l2cap_state_change(chan, BT_CONNECT2);
4992 		/* The following result value is actually not defined
4993 		 * for LE CoC but we use it to let the function know
4994 		 * that it should bail out after doing its cleanup
4995 		 * instead of sending a response.
4996 		 */
4997 		result = L2CAP_CR_PEND;
4998 		chan->ops->defer(chan);
4999 	} else {
5000 		l2cap_chan_ready(chan);
5001 		result = L2CAP_CR_LE_SUCCESS;
5002 	}
5003 
5004 response_unlock:
5005 	l2cap_chan_unlock(pchan);
5006 	l2cap_chan_put(pchan);
5007 
5008 	if (result == L2CAP_CR_PEND)
5009 		return 0;
5010 
5011 response:
5012 	if (chan) {
5013 		rsp.mtu = cpu_to_le16(chan->imtu);
5014 		rsp.mps = cpu_to_le16(chan->mps);
5015 	} else {
5016 		rsp.mtu = 0;
5017 		rsp.mps = 0;
5018 	}
5019 
5020 	rsp.dcid    = cpu_to_le16(dcid);
5021 	rsp.credits = cpu_to_le16(credits);
5022 	rsp.result  = cpu_to_le16(result);
5023 
5024 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5025 
5026 	return 0;
5027 }
5028 
5029 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5030 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5031 				   u8 *data)
5032 {
5033 	struct l2cap_le_credits *pkt;
5034 	struct l2cap_chan *chan;
5035 	u16 cid, credits, max_credits;
5036 
5037 	if (cmd_len != sizeof(*pkt))
5038 		return -EPROTO;
5039 
5040 	pkt = (struct l2cap_le_credits *) data;
5041 	cid	= __le16_to_cpu(pkt->cid);
5042 	credits	= __le16_to_cpu(pkt->credits);
5043 
5044 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5045 
5046 	chan = l2cap_get_chan_by_dcid(conn, cid);
5047 	if (!chan)
5048 		return -EBADSLT;
5049 
5050 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5051 	if (credits > max_credits) {
5052 		BT_ERR("LE credits overflow");
5053 		l2cap_send_disconn_req(chan, ECONNRESET);
5054 
5055 		/* Return 0 so that we don't trigger an unnecessary
5056 		 * command reject packet.
5057 		 */
5058 		goto unlock;
5059 	}
5060 
5061 	chan->tx_credits += credits;
5062 
5063 	/* Resume sending */
5064 	l2cap_le_flowctl_send(chan);
5065 
5066 	if (chan->tx_credits)
5067 		chan->ops->resume(chan);
5068 
5069 unlock:
5070 	l2cap_chan_unlock(chan);
5071 	l2cap_chan_put(chan);
5072 
5073 	return 0;
5074 }
5075 
5076 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5077 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5078 				       u8 *data)
5079 {
5080 	struct l2cap_ecred_conn_req *req = (void *) data;
5081 	DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5082 	struct l2cap_chan *chan, *pchan;
5083 	u16 mtu, mps;
5084 	__le16 psm;
5085 	u8 result, rsp_len = 0;
5086 	int i, num_scid = 0;
5087 	bool defer = false;
5088 
5089 	if (!enable_ecred)
5090 		return -EINVAL;
5091 
5092 	memset(pdu, 0, sizeof(*pdu));
5093 
5094 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5095 		result = L2CAP_CR_LE_INVALID_PARAMS;
5096 		goto response;
5097 	}
5098 
5099 	/* Check if there are no pending channels with the same ident */
5100 	__l2cap_chan_list_id(conn, cmd->ident, l2cap_ecred_list_defer,
5101 			     &num_scid);
5102 	if (num_scid) {
5103 		result = L2CAP_CR_LE_INVALID_PARAMS;
5104 		goto response;
5105 	}
5106 
5107 	cmd_len -= sizeof(*req);
5108 	num_scid = cmd_len / sizeof(u16);
5109 
5110 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5111 		result = L2CAP_CR_LE_INVALID_PARAMS;
5112 		goto response;
5113 	}
5114 
5115 	/* Always respond with the same number of scids as in the request */
5116 	rsp_len = cmd_len;
5117 
5118 	mtu  = __le16_to_cpu(req->mtu);
5119 	mps  = __le16_to_cpu(req->mps);
5120 
5121 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5122 		result = L2CAP_CR_LE_INVALID_PARAMS;
5123 		goto response;
5124 	}
5125 
5126 	psm  = req->psm;
5127 
5128 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5129 	 * page 1059:
5130 	 *
5131 	 * Valid range: 0x0001-0x00ff
5132 	 *
5133 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5134 	 */
5135 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5136 		result = L2CAP_CR_LE_BAD_PSM;
5137 		goto response;
5138 	}
5139 
5140 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5141 
5142 	/* Check if we have socket listening on psm */
5143 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5144 					 &conn->hcon->dst, LE_LINK);
5145 	if (!pchan) {
5146 		result = L2CAP_CR_LE_BAD_PSM;
5147 		goto response;
5148 	}
5149 
5150 	l2cap_chan_lock(pchan);
5151 
5152 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5153 				     SMP_ALLOW_STK)) {
5154 		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
5155 			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
5156 		goto unlock;
5157 	}
5158 
5159 	/* Check if the listening channel has set an output MTU then the
5160 	 * requested MTU shall be less than or equal to that value.
5161 	 */
5162 	if (pchan->omtu && mtu < pchan->omtu) {
5163 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5164 		goto unlock;
5165 	}
5166 
5167 	result = L2CAP_CR_LE_SUCCESS;
5168 
5169 	for (i = 0; i < num_scid; i++) {
5170 		u16 scid = __le16_to_cpu(req->scid[i]);
5171 
5172 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5173 
5174 		pdu->dcid[i] = 0x0000;
5175 
5176 		/* Check for valid dynamic CID range */
5177 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5178 			result = L2CAP_CR_LE_INVALID_SCID;
5179 			continue;
5180 		}
5181 
5182 		/* Check if we already have channel with that dcid */
5183 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5184 			result = L2CAP_CR_LE_SCID_IN_USE;
5185 			continue;
5186 		}
5187 
5188 		chan = pchan->ops->new_connection(pchan);
5189 		if (!chan) {
5190 			result = L2CAP_CR_LE_NO_MEM;
5191 			continue;
5192 		}
5193 
5194 		bacpy(&chan->src, &conn->hcon->src);
5195 		bacpy(&chan->dst, &conn->hcon->dst);
5196 		chan->src_type = bdaddr_src_type(conn->hcon);
5197 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5198 		chan->psm  = psm;
5199 		chan->dcid = scid;
5200 		chan->omtu = mtu;
5201 		chan->remote_mps = mps;
5202 
5203 		__l2cap_chan_add(conn, chan);
5204 
5205 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5206 
5207 		/* Init response */
5208 		if (!pdu->credits) {
5209 			pdu->mtu = cpu_to_le16(chan->imtu);
5210 			pdu->mps = cpu_to_le16(chan->mps);
5211 			pdu->credits = cpu_to_le16(chan->rx_credits);
5212 		}
5213 
5214 		pdu->dcid[i] = cpu_to_le16(chan->scid);
5215 
5216 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5217 
5218 		chan->ident = cmd->ident;
5219 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5220 
5221 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5222 			l2cap_state_change(chan, BT_CONNECT2);
5223 			defer = true;
5224 			chan->ops->defer(chan);
5225 		} else {
5226 			l2cap_chan_ready(chan);
5227 		}
5228 	}
5229 
5230 unlock:
5231 	l2cap_chan_unlock(pchan);
5232 	l2cap_chan_put(pchan);
5233 
5234 response:
5235 	pdu->result = cpu_to_le16(result);
5236 
5237 	if (defer)
5238 		return 0;
5239 
5240 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5241 		       sizeof(*pdu) + rsp_len, pdu);
5242 
5243 	return 0;
5244 }
5245 
5246 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5247 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5248 				       u8 *data)
5249 {
5250 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5251 	struct hci_conn *hcon = conn->hcon;
5252 	u16 mtu, mps, credits, result;
5253 	struct l2cap_chan *chan, *tmp;
5254 	int err = 0, sec_level;
5255 	int i = 0;
5256 
5257 	if (cmd_len < sizeof(*rsp))
5258 		return -EPROTO;
5259 
5260 	mtu     = __le16_to_cpu(rsp->mtu);
5261 	mps     = __le16_to_cpu(rsp->mps);
5262 	credits = __le16_to_cpu(rsp->credits);
5263 	result  = __le16_to_cpu(rsp->result);
5264 
5265 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5266 	       result);
5267 
5268 	cmd_len -= sizeof(*rsp);
5269 
5270 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5271 		u16 dcid;
5272 
5273 		if (chan->ident != cmd->ident ||
5274 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5275 		    chan->state == BT_CONNECTED)
5276 			continue;
5277 
5278 		l2cap_chan_lock(chan);
5279 
5280 		/* Check that there is a dcid for each pending channel */
5281 		if (cmd_len < sizeof(dcid)) {
5282 			l2cap_chan_del(chan, ECONNREFUSED);
5283 			l2cap_chan_unlock(chan);
5284 			continue;
5285 		}
5286 
5287 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5288 		cmd_len -= sizeof(u16);
5289 
5290 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5291 
5292 		/* Check if dcid is already in use */
5293 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5294 			/* If a device receives a
5295 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5296 			 * already-assigned Destination CID, then both the
5297 			 * original channel and the new channel shall be
5298 			 * immediately discarded and not used.
5299 			 */
5300 			l2cap_chan_del(chan, ECONNREFUSED);
5301 			l2cap_chan_unlock(chan);
5302 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5303 			l2cap_chan_lock(chan);
5304 			l2cap_chan_del(chan, ECONNRESET);
5305 			l2cap_chan_unlock(chan);
5306 			continue;
5307 		}
5308 
5309 		switch (result) {
5310 		case L2CAP_CR_LE_AUTHENTICATION:
5311 		case L2CAP_CR_LE_ENCRYPTION:
5312 			/* If we already have MITM protection we can't do
5313 			 * anything.
5314 			 */
5315 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5316 				l2cap_chan_del(chan, ECONNREFUSED);
5317 				break;
5318 			}
5319 
5320 			sec_level = hcon->sec_level + 1;
5321 			if (chan->sec_level < sec_level)
5322 				chan->sec_level = sec_level;
5323 
5324 			/* We'll need to send a new Connect Request */
5325 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5326 
5327 			smp_conn_security(hcon, chan->sec_level);
5328 			break;
5329 
5330 		case L2CAP_CR_LE_BAD_PSM:
5331 			l2cap_chan_del(chan, ECONNREFUSED);
5332 			break;
5333 
5334 		default:
5335 			/* If dcid was not set it means channels was refused */
5336 			if (!dcid) {
5337 				l2cap_chan_del(chan, ECONNREFUSED);
5338 				break;
5339 			}
5340 
5341 			chan->ident = 0;
5342 			chan->dcid = dcid;
5343 			chan->omtu = mtu;
5344 			chan->remote_mps = mps;
5345 			chan->tx_credits = credits;
5346 			l2cap_chan_ready(chan);
5347 			break;
5348 		}
5349 
5350 		l2cap_chan_unlock(chan);
5351 	}
5352 
5353 	return err;
5354 }
5355 
5356 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5357 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5358 					 u8 *data)
5359 {
5360 	struct l2cap_ecred_reconf_req *req = (void *) data;
5361 	struct l2cap_ecred_reconf_rsp rsp;
5362 	u16 mtu, mps, result;
5363 	struct l2cap_chan *chan[L2CAP_ECRED_MAX_CID] = {};
5364 	int i, num_scid;
5365 
5366 	if (!enable_ecred)
5367 		return -EINVAL;
5368 
5369 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5370 		result = L2CAP_RECONF_INVALID_CID;
5371 		goto respond;
5372 	}
5373 
5374 	mtu = __le16_to_cpu(req->mtu);
5375 	mps = __le16_to_cpu(req->mps);
5376 
5377 	BT_DBG("mtu %u mps %u", mtu, mps);
5378 
5379 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5380 		result = L2CAP_RECONF_INVALID_PARAMS;
5381 		goto respond;
5382 	}
5383 
5384 	if (mps < L2CAP_ECRED_MIN_MPS) {
5385 		result = L2CAP_RECONF_INVALID_PARAMS;
5386 		goto respond;
5387 	}
5388 
5389 	cmd_len -= sizeof(*req);
5390 	num_scid = cmd_len / sizeof(u16);
5391 
5392 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5393 		result = L2CAP_RECONF_INVALID_PARAMS;
5394 		goto respond;
5395 	}
5396 
5397 	result = L2CAP_RECONF_SUCCESS;
5398 
5399 	/* Check if each SCID, MTU and MPS are valid */
5400 	for (i = 0; i < num_scid; i++) {
5401 		u16 scid;
5402 
5403 		scid = __le16_to_cpu(req->scid[i]);
5404 		if (!scid) {
5405 			result = L2CAP_RECONF_INVALID_CID;
5406 			goto respond;
5407 		}
5408 
5409 		chan[i] = __l2cap_get_chan_by_dcid(conn, scid);
5410 		if (!chan[i]) {
5411 			result = L2CAP_RECONF_INVALID_CID;
5412 			goto respond;
5413 		}
5414 
5415 		/* The MTU field shall be greater than or equal to the greatest
5416 		 * current MTU size of these channels.
5417 		 */
5418 		if (chan[i]->omtu > mtu) {
5419 			BT_ERR("chan %p decreased MTU %u -> %u", chan[i],
5420 			       chan[i]->omtu, mtu);
5421 			result = L2CAP_RECONF_INVALID_MTU;
5422 			goto respond;
5423 		}
5424 
5425 		/* If more than one channel is being configured, the MPS field
5426 		 * shall be greater than or equal to the current MPS size of
5427 		 * each of these channels. If only one channel is being
5428 		 * configured, the MPS field may be less than the current MPS
5429 		 * of that channel.
5430 		 */
5431 		if (chan[i]->remote_mps >= mps && i) {
5432 			BT_ERR("chan %p decreased MPS %u -> %u", chan[i],
5433 			       chan[i]->remote_mps, mps);
5434 			result = L2CAP_RECONF_INVALID_MPS;
5435 			goto respond;
5436 		}
5437 	}
5438 
5439 	/* Commit the new MTU and MPS values after checking they are valid */
5440 	for (i = 0; i < num_scid; i++) {
5441 		chan[i]->omtu = mtu;
5442 		chan[i]->remote_mps = mps;
5443 	}
5444 
5445 respond:
5446 	rsp.result = cpu_to_le16(result);
5447 
5448 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5449 		       &rsp);
5450 
5451 	return 0;
5452 }
5453 
5454 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5455 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5456 					 u8 *data)
5457 {
5458 	struct l2cap_chan *chan, *tmp;
5459 	struct l2cap_ecred_reconf_rsp *rsp = (void *)data;
5460 	u16 result;
5461 
5462 	if (cmd_len < sizeof(*rsp))
5463 		return -EPROTO;
5464 
5465 	result = __le16_to_cpu(rsp->result);
5466 
5467 	BT_DBG("result 0x%4.4x", result);
5468 
5469 	if (!result)
5470 		return 0;
5471 
5472 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5473 		if (chan->ident != cmd->ident)
5474 			continue;
5475 
5476 		l2cap_chan_del(chan, ECONNRESET);
5477 	}
5478 
5479 	return 0;
5480 }
5481 
5482 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5483 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5484 				       u8 *data)
5485 {
5486 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5487 	struct l2cap_chan *chan;
5488 
5489 	if (cmd_len < sizeof(*rej))
5490 		return -EPROTO;
5491 
5492 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5493 	if (!chan)
5494 		goto done;
5495 
5496 	chan = l2cap_chan_hold_unless_zero(chan);
5497 	if (!chan)
5498 		goto done;
5499 
5500 	l2cap_chan_lock(chan);
5501 	l2cap_chan_del(chan, ECONNREFUSED);
5502 	l2cap_chan_unlock(chan);
5503 	l2cap_chan_put(chan);
5504 
5505 done:
5506 	return 0;
5507 }
5508 
5509 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5510 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5511 				   u8 *data)
5512 {
5513 	int err = 0;
5514 
5515 	l2cap_put_ident(conn, cmd->code, cmd->ident);
5516 
5517 	switch (cmd->code) {
5518 	case L2CAP_COMMAND_REJ:
5519 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5520 		break;
5521 
5522 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5523 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5524 		break;
5525 
5526 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5527 		break;
5528 
5529 	case L2CAP_LE_CONN_RSP:
5530 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5531 		break;
5532 
5533 	case L2CAP_LE_CONN_REQ:
5534 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5535 		break;
5536 
5537 	case L2CAP_LE_CREDITS:
5538 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5539 		break;
5540 
5541 	case L2CAP_ECRED_CONN_REQ:
5542 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5543 		break;
5544 
5545 	case L2CAP_ECRED_CONN_RSP:
5546 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5547 		break;
5548 
5549 	case L2CAP_ECRED_RECONF_REQ:
5550 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5551 		break;
5552 
5553 	case L2CAP_ECRED_RECONF_RSP:
5554 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5555 		break;
5556 
5557 	case L2CAP_DISCONN_REQ:
5558 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5559 		break;
5560 
5561 	case L2CAP_DISCONN_RSP:
5562 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5563 		break;
5564 
5565 	default:
5566 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5567 		err = -EINVAL;
5568 		break;
5569 	}
5570 
5571 	return err;
5572 }
5573 
5574 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5575 					struct sk_buff *skb)
5576 {
5577 	struct hci_conn *hcon = conn->hcon;
5578 	struct l2cap_cmd_hdr *cmd;
5579 	u16 len;
5580 	int err;
5581 
5582 	if (hcon->type != LE_LINK)
5583 		goto drop;
5584 
5585 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5586 		goto drop;
5587 
5588 	cmd = (void *) skb->data;
5589 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5590 
5591 	len = le16_to_cpu(cmd->len);
5592 
5593 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5594 
5595 	if (len != skb->len || !cmd->ident) {
5596 		BT_DBG("corrupted command");
5597 		goto drop;
5598 	}
5599 
5600 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5601 	if (err) {
5602 		struct l2cap_cmd_rej_unk rej;
5603 
5604 		BT_ERR("Wrong link type (%d)", err);
5605 
5606 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5607 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5608 			       sizeof(rej), &rej);
5609 	}
5610 
5611 drop:
5612 	kfree_skb(skb);
5613 }
5614 
5615 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5616 {
5617 	struct l2cap_cmd_rej_unk rej;
5618 
5619 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5620 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5621 }
5622 
5623 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5624 				     struct sk_buff *skb)
5625 {
5626 	struct hci_conn *hcon = conn->hcon;
5627 	struct l2cap_cmd_hdr *cmd;
5628 	int err;
5629 
5630 	l2cap_raw_recv(conn, skb);
5631 
5632 	if (hcon->type != ACL_LINK)
5633 		goto drop;
5634 
5635 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5636 		u16 len;
5637 
5638 		cmd = (void *) skb->data;
5639 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5640 
5641 		len = le16_to_cpu(cmd->len);
5642 
5643 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5644 		       cmd->ident);
5645 
5646 		if (len > skb->len || !cmd->ident) {
5647 			BT_DBG("corrupted command");
5648 			l2cap_sig_send_rej(conn, cmd->ident);
5649 			skb_pull(skb, len > skb->len ? skb->len : len);
5650 			continue;
5651 		}
5652 
5653 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5654 		if (err) {
5655 			BT_ERR("Wrong link type (%d)", err);
5656 			l2cap_sig_send_rej(conn, cmd->ident);
5657 		}
5658 
5659 		skb_pull(skb, len);
5660 	}
5661 
5662 	if (skb->len > 0) {
5663 		BT_DBG("corrupted command");
5664 		l2cap_sig_send_rej(conn, 0);
5665 	}
5666 
5667 drop:
5668 	kfree_skb(skb);
5669 }
5670 
5671 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5672 {
5673 	u16 our_fcs, rcv_fcs;
5674 	int hdr_size;
5675 
5676 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5677 		hdr_size = L2CAP_EXT_HDR_SIZE;
5678 	else
5679 		hdr_size = L2CAP_ENH_HDR_SIZE;
5680 
5681 	if (chan->fcs == L2CAP_FCS_CRC16) {
5682 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5683 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5684 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5685 
5686 		if (our_fcs != rcv_fcs)
5687 			return -EBADMSG;
5688 	}
5689 	return 0;
5690 }
5691 
5692 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5693 {
5694 	struct l2cap_ctrl control;
5695 
5696 	BT_DBG("chan %p", chan);
5697 
5698 	memset(&control, 0, sizeof(control));
5699 	control.sframe = 1;
5700 	control.final = 1;
5701 	control.reqseq = chan->buffer_seq;
5702 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5703 
5704 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5705 		control.super = L2CAP_SUPER_RNR;
5706 		l2cap_send_sframe(chan, &control);
5707 	}
5708 
5709 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5710 	    chan->unacked_frames > 0)
5711 		__set_retrans_timer(chan);
5712 
5713 	/* Send pending iframes */
5714 	l2cap_ertm_send(chan);
5715 
5716 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5717 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5718 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5719 		 * send it now.
5720 		 */
5721 		control.super = L2CAP_SUPER_RR;
5722 		l2cap_send_sframe(chan, &control);
5723 	}
5724 }
5725 
5726 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5727 			    struct sk_buff **last_frag)
5728 {
5729 	/* skb->len reflects data in skb as well as all fragments
5730 	 * skb->data_len reflects only data in fragments
5731 	 */
5732 	if (!skb_has_frag_list(skb))
5733 		skb_shinfo(skb)->frag_list = new_frag;
5734 
5735 	new_frag->next = NULL;
5736 
5737 	(*last_frag)->next = new_frag;
5738 	*last_frag = new_frag;
5739 
5740 	skb->len += new_frag->len;
5741 	skb->data_len += new_frag->len;
5742 	skb->truesize += new_frag->truesize;
5743 }
5744 
5745 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5746 				struct l2cap_ctrl *control)
5747 {
5748 	int err = -EINVAL;
5749 
5750 	switch (control->sar) {
5751 	case L2CAP_SAR_UNSEGMENTED:
5752 		if (chan->sdu)
5753 			break;
5754 
5755 		err = chan->ops->recv(chan, skb);
5756 		break;
5757 
5758 	case L2CAP_SAR_START:
5759 		if (chan->sdu)
5760 			break;
5761 
5762 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5763 			break;
5764 
5765 		chan->sdu_len = get_unaligned_le16(skb->data);
5766 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5767 
5768 		if (chan->sdu_len > chan->imtu) {
5769 			err = -EMSGSIZE;
5770 			break;
5771 		}
5772 
5773 		if (skb->len >= chan->sdu_len)
5774 			break;
5775 
5776 		chan->sdu = skb;
5777 		chan->sdu_last_frag = skb;
5778 
5779 		skb = NULL;
5780 		err = 0;
5781 		break;
5782 
5783 	case L2CAP_SAR_CONTINUE:
5784 		if (!chan->sdu)
5785 			break;
5786 
5787 		append_skb_frag(chan->sdu, skb,
5788 				&chan->sdu_last_frag);
5789 		skb = NULL;
5790 
5791 		if (chan->sdu->len >= chan->sdu_len)
5792 			break;
5793 
5794 		err = 0;
5795 		break;
5796 
5797 	case L2CAP_SAR_END:
5798 		if (!chan->sdu)
5799 			break;
5800 
5801 		append_skb_frag(chan->sdu, skb,
5802 				&chan->sdu_last_frag);
5803 		skb = NULL;
5804 
5805 		if (chan->sdu->len != chan->sdu_len)
5806 			break;
5807 
5808 		err = chan->ops->recv(chan, chan->sdu);
5809 
5810 		if (!err) {
5811 			/* Reassembly complete */
5812 			chan->sdu = NULL;
5813 			chan->sdu_last_frag = NULL;
5814 			chan->sdu_len = 0;
5815 		}
5816 		break;
5817 	}
5818 
5819 	if (err) {
5820 		kfree_skb(skb);
5821 		kfree_skb(chan->sdu);
5822 		chan->sdu = NULL;
5823 		chan->sdu_last_frag = NULL;
5824 		chan->sdu_len = 0;
5825 	}
5826 
5827 	return err;
5828 }
5829 
5830 static int l2cap_resegment(struct l2cap_chan *chan)
5831 {
5832 	/* Placeholder */
5833 	return 0;
5834 }
5835 
5836 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5837 {
5838 	u8 event;
5839 
5840 	if (chan->mode != L2CAP_MODE_ERTM)
5841 		return;
5842 
5843 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5844 	l2cap_tx(chan, NULL, NULL, event);
5845 }
5846 
5847 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5848 {
5849 	int err = 0;
5850 	/* Pass sequential frames to l2cap_reassemble_sdu()
5851 	 * until a gap is encountered.
5852 	 */
5853 
5854 	BT_DBG("chan %p", chan);
5855 
5856 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5857 		struct sk_buff *skb;
5858 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5859 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5860 
5861 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5862 
5863 		if (!skb)
5864 			break;
5865 
5866 		skb_unlink(skb, &chan->srej_q);
5867 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5868 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5869 		if (err)
5870 			break;
5871 	}
5872 
5873 	if (skb_queue_empty(&chan->srej_q)) {
5874 		chan->rx_state = L2CAP_RX_STATE_RECV;
5875 		l2cap_send_ack(chan);
5876 	}
5877 
5878 	return err;
5879 }
5880 
5881 static void l2cap_handle_srej(struct l2cap_chan *chan,
5882 			      struct l2cap_ctrl *control)
5883 {
5884 	struct sk_buff *skb;
5885 
5886 	BT_DBG("chan %p, control %p", chan, control);
5887 
5888 	if (control->reqseq == chan->next_tx_seq) {
5889 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5890 		l2cap_send_disconn_req(chan, ECONNRESET);
5891 		return;
5892 	}
5893 
5894 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5895 
5896 	if (skb == NULL) {
5897 		BT_DBG("Seq %d not available for retransmission",
5898 		       control->reqseq);
5899 		return;
5900 	}
5901 
5902 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5903 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5904 		l2cap_send_disconn_req(chan, ECONNRESET);
5905 		return;
5906 	}
5907 
5908 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5909 
5910 	if (control->poll) {
5911 		l2cap_pass_to_tx(chan, control);
5912 
5913 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5914 		l2cap_retransmit(chan, control);
5915 		l2cap_ertm_send(chan);
5916 
5917 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5918 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5919 			chan->srej_save_reqseq = control->reqseq;
5920 		}
5921 	} else {
5922 		l2cap_pass_to_tx_fbit(chan, control);
5923 
5924 		if (control->final) {
5925 			if (chan->srej_save_reqseq != control->reqseq ||
5926 			    !test_and_clear_bit(CONN_SREJ_ACT,
5927 						&chan->conn_state))
5928 				l2cap_retransmit(chan, control);
5929 		} else {
5930 			l2cap_retransmit(chan, control);
5931 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5932 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5933 				chan->srej_save_reqseq = control->reqseq;
5934 			}
5935 		}
5936 	}
5937 }
5938 
5939 static void l2cap_handle_rej(struct l2cap_chan *chan,
5940 			     struct l2cap_ctrl *control)
5941 {
5942 	struct sk_buff *skb;
5943 
5944 	BT_DBG("chan %p, control %p", chan, control);
5945 
5946 	if (control->reqseq == chan->next_tx_seq) {
5947 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5948 		l2cap_send_disconn_req(chan, ECONNRESET);
5949 		return;
5950 	}
5951 
5952 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5953 
5954 	if (chan->max_tx && skb &&
5955 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5956 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5957 		l2cap_send_disconn_req(chan, ECONNRESET);
5958 		return;
5959 	}
5960 
5961 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5962 
5963 	l2cap_pass_to_tx(chan, control);
5964 
5965 	if (control->final) {
5966 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5967 			l2cap_retransmit_all(chan, control);
5968 	} else {
5969 		l2cap_retransmit_all(chan, control);
5970 		l2cap_ertm_send(chan);
5971 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5972 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5973 	}
5974 }
5975 
5976 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5977 {
5978 	BT_DBG("chan %p, txseq %d", chan, txseq);
5979 
5980 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5981 	       chan->expected_tx_seq);
5982 
5983 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5984 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5985 		    chan->tx_win) {
5986 			/* See notes below regarding "double poll" and
5987 			 * invalid packets.
5988 			 */
5989 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5990 				BT_DBG("Invalid/Ignore - after SREJ");
5991 				return L2CAP_TXSEQ_INVALID_IGNORE;
5992 			} else {
5993 				BT_DBG("Invalid - in window after SREJ sent");
5994 				return L2CAP_TXSEQ_INVALID;
5995 			}
5996 		}
5997 
5998 		if (chan->srej_list.head == txseq) {
5999 			BT_DBG("Expected SREJ");
6000 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6001 		}
6002 
6003 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6004 			BT_DBG("Duplicate SREJ - txseq already stored");
6005 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6006 		}
6007 
6008 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6009 			BT_DBG("Unexpected SREJ - not requested");
6010 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6011 		}
6012 	}
6013 
6014 	if (chan->expected_tx_seq == txseq) {
6015 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6016 		    chan->tx_win) {
6017 			BT_DBG("Invalid - txseq outside tx window");
6018 			return L2CAP_TXSEQ_INVALID;
6019 		} else {
6020 			BT_DBG("Expected");
6021 			return L2CAP_TXSEQ_EXPECTED;
6022 		}
6023 	}
6024 
6025 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6026 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6027 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6028 		return L2CAP_TXSEQ_DUPLICATE;
6029 	}
6030 
6031 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6032 		/* A source of invalid packets is a "double poll" condition,
6033 		 * where delays cause us to send multiple poll packets.  If
6034 		 * the remote stack receives and processes both polls,
6035 		 * sequence numbers can wrap around in such a way that a
6036 		 * resent frame has a sequence number that looks like new data
6037 		 * with a sequence gap.  This would trigger an erroneous SREJ
6038 		 * request.
6039 		 *
6040 		 * Fortunately, this is impossible with a tx window that's
6041 		 * less than half of the maximum sequence number, which allows
6042 		 * invalid frames to be safely ignored.
6043 		 *
6044 		 * With tx window sizes greater than half of the tx window
6045 		 * maximum, the frame is invalid and cannot be ignored.  This
6046 		 * causes a disconnect.
6047 		 */
6048 
6049 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6050 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6051 			return L2CAP_TXSEQ_INVALID_IGNORE;
6052 		} else {
6053 			BT_DBG("Invalid - txseq outside tx window");
6054 			return L2CAP_TXSEQ_INVALID;
6055 		}
6056 	} else {
6057 		BT_DBG("Unexpected - txseq indicates missing frames");
6058 		return L2CAP_TXSEQ_UNEXPECTED;
6059 	}
6060 }
6061 
6062 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6063 			       struct l2cap_ctrl *control,
6064 			       struct sk_buff *skb, u8 event)
6065 {
6066 	struct l2cap_ctrl local_control;
6067 	int err = 0;
6068 	bool skb_in_use = false;
6069 
6070 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6071 	       event);
6072 
6073 	switch (event) {
6074 	case L2CAP_EV_RECV_IFRAME:
6075 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6076 		case L2CAP_TXSEQ_EXPECTED:
6077 			l2cap_pass_to_tx(chan, control);
6078 
6079 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6080 				BT_DBG("Busy, discarding expected seq %d",
6081 				       control->txseq);
6082 				break;
6083 			}
6084 
6085 			chan->expected_tx_seq = __next_seq(chan,
6086 							   control->txseq);
6087 
6088 			chan->buffer_seq = chan->expected_tx_seq;
6089 			skb_in_use = true;
6090 
6091 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6092 			 * control, so make a copy in advance to use it after
6093 			 * l2cap_reassemble_sdu returns and to avoid the race
6094 			 * condition, for example:
6095 			 *
6096 			 * The current thread calls:
6097 			 *   l2cap_reassemble_sdu
6098 			 *     chan->ops->recv == l2cap_sock_recv_cb
6099 			 *       __sock_queue_rcv_skb
6100 			 * Another thread calls:
6101 			 *   bt_sock_recvmsg
6102 			 *     skb_recv_datagram
6103 			 *     skb_free_datagram
6104 			 * Then the current thread tries to access control, but
6105 			 * it was freed by skb_free_datagram.
6106 			 */
6107 			local_control = *control;
6108 			err = l2cap_reassemble_sdu(chan, skb, control);
6109 			if (err)
6110 				break;
6111 
6112 			if (local_control.final) {
6113 				if (!test_and_clear_bit(CONN_REJ_ACT,
6114 							&chan->conn_state)) {
6115 					local_control.final = 0;
6116 					l2cap_retransmit_all(chan, &local_control);
6117 					l2cap_ertm_send(chan);
6118 				}
6119 			}
6120 
6121 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6122 				l2cap_send_ack(chan);
6123 			break;
6124 		case L2CAP_TXSEQ_UNEXPECTED:
6125 			l2cap_pass_to_tx(chan, control);
6126 
6127 			/* Can't issue SREJ frames in the local busy state.
6128 			 * Drop this frame, it will be seen as missing
6129 			 * when local busy is exited.
6130 			 */
6131 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6132 				BT_DBG("Busy, discarding unexpected seq %d",
6133 				       control->txseq);
6134 				break;
6135 			}
6136 
6137 			/* There was a gap in the sequence, so an SREJ
6138 			 * must be sent for each missing frame.  The
6139 			 * current frame is stored for later use.
6140 			 */
6141 			skb_queue_tail(&chan->srej_q, skb);
6142 			skb_in_use = true;
6143 			BT_DBG("Queued %p (queue len %d)", skb,
6144 			       skb_queue_len(&chan->srej_q));
6145 
6146 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6147 			l2cap_seq_list_clear(&chan->srej_list);
6148 			l2cap_send_srej(chan, control->txseq);
6149 
6150 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6151 			break;
6152 		case L2CAP_TXSEQ_DUPLICATE:
6153 			l2cap_pass_to_tx(chan, control);
6154 			break;
6155 		case L2CAP_TXSEQ_INVALID_IGNORE:
6156 			break;
6157 		case L2CAP_TXSEQ_INVALID:
6158 		default:
6159 			l2cap_send_disconn_req(chan, ECONNRESET);
6160 			break;
6161 		}
6162 		break;
6163 	case L2CAP_EV_RECV_RR:
6164 		l2cap_pass_to_tx(chan, control);
6165 		if (control->final) {
6166 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6167 
6168 			if (!test_and_clear_bit(CONN_REJ_ACT,
6169 						&chan->conn_state)) {
6170 				control->final = 0;
6171 				l2cap_retransmit_all(chan, control);
6172 			}
6173 
6174 			l2cap_ertm_send(chan);
6175 		} else if (control->poll) {
6176 			l2cap_send_i_or_rr_or_rnr(chan);
6177 		} else {
6178 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6179 					       &chan->conn_state) &&
6180 			    chan->unacked_frames)
6181 				__set_retrans_timer(chan);
6182 
6183 			l2cap_ertm_send(chan);
6184 		}
6185 		break;
6186 	case L2CAP_EV_RECV_RNR:
6187 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6188 		l2cap_pass_to_tx(chan, control);
6189 		if (control && control->poll) {
6190 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6191 			l2cap_send_rr_or_rnr(chan, 0);
6192 		}
6193 		__clear_retrans_timer(chan);
6194 		l2cap_seq_list_clear(&chan->retrans_list);
6195 		break;
6196 	case L2CAP_EV_RECV_REJ:
6197 		l2cap_handle_rej(chan, control);
6198 		break;
6199 	case L2CAP_EV_RECV_SREJ:
6200 		l2cap_handle_srej(chan, control);
6201 		break;
6202 	default:
6203 		break;
6204 	}
6205 
6206 	if (skb && !skb_in_use) {
6207 		BT_DBG("Freeing %p", skb);
6208 		kfree_skb(skb);
6209 	}
6210 
6211 	return err;
6212 }
6213 
6214 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6215 				    struct l2cap_ctrl *control,
6216 				    struct sk_buff *skb, u8 event)
6217 {
6218 	int err = 0;
6219 	u16 txseq = control->txseq;
6220 	bool skb_in_use = false;
6221 
6222 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6223 	       event);
6224 
6225 	switch (event) {
6226 	case L2CAP_EV_RECV_IFRAME:
6227 		switch (l2cap_classify_txseq(chan, txseq)) {
6228 		case L2CAP_TXSEQ_EXPECTED:
6229 			/* Keep frame for reassembly later */
6230 			l2cap_pass_to_tx(chan, control);
6231 			skb_queue_tail(&chan->srej_q, skb);
6232 			skb_in_use = true;
6233 			BT_DBG("Queued %p (queue len %d)", skb,
6234 			       skb_queue_len(&chan->srej_q));
6235 
6236 			chan->expected_tx_seq = __next_seq(chan, txseq);
6237 			break;
6238 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6239 			l2cap_seq_list_pop(&chan->srej_list);
6240 
6241 			l2cap_pass_to_tx(chan, control);
6242 			skb_queue_tail(&chan->srej_q, skb);
6243 			skb_in_use = true;
6244 			BT_DBG("Queued %p (queue len %d)", skb,
6245 			       skb_queue_len(&chan->srej_q));
6246 
6247 			err = l2cap_rx_queued_iframes(chan);
6248 			if (err)
6249 				break;
6250 
6251 			break;
6252 		case L2CAP_TXSEQ_UNEXPECTED:
6253 			/* Got a frame that can't be reassembled yet.
6254 			 * Save it for later, and send SREJs to cover
6255 			 * the missing frames.
6256 			 */
6257 			skb_queue_tail(&chan->srej_q, skb);
6258 			skb_in_use = true;
6259 			BT_DBG("Queued %p (queue len %d)", skb,
6260 			       skb_queue_len(&chan->srej_q));
6261 
6262 			l2cap_pass_to_tx(chan, control);
6263 			l2cap_send_srej(chan, control->txseq);
6264 			break;
6265 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6266 			/* This frame was requested with an SREJ, but
6267 			 * some expected retransmitted frames are
6268 			 * missing.  Request retransmission of missing
6269 			 * SREJ'd frames.
6270 			 */
6271 			skb_queue_tail(&chan->srej_q, skb);
6272 			skb_in_use = true;
6273 			BT_DBG("Queued %p (queue len %d)", skb,
6274 			       skb_queue_len(&chan->srej_q));
6275 
6276 			l2cap_pass_to_tx(chan, control);
6277 			l2cap_send_srej_list(chan, control->txseq);
6278 			break;
6279 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6280 			/* We've already queued this frame.  Drop this copy. */
6281 			l2cap_pass_to_tx(chan, control);
6282 			break;
6283 		case L2CAP_TXSEQ_DUPLICATE:
6284 			/* Expecting a later sequence number, so this frame
6285 			 * was already received.  Ignore it completely.
6286 			 */
6287 			break;
6288 		case L2CAP_TXSEQ_INVALID_IGNORE:
6289 			break;
6290 		case L2CAP_TXSEQ_INVALID:
6291 		default:
6292 			l2cap_send_disconn_req(chan, ECONNRESET);
6293 			break;
6294 		}
6295 		break;
6296 	case L2CAP_EV_RECV_RR:
6297 		l2cap_pass_to_tx(chan, control);
6298 		if (control->final) {
6299 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6300 
6301 			if (!test_and_clear_bit(CONN_REJ_ACT,
6302 						&chan->conn_state)) {
6303 				control->final = 0;
6304 				l2cap_retransmit_all(chan, control);
6305 			}
6306 
6307 			l2cap_ertm_send(chan);
6308 		} else if (control->poll) {
6309 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6310 					       &chan->conn_state) &&
6311 			    chan->unacked_frames) {
6312 				__set_retrans_timer(chan);
6313 			}
6314 
6315 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6316 			l2cap_send_srej_tail(chan);
6317 		} else {
6318 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6319 					       &chan->conn_state) &&
6320 			    chan->unacked_frames)
6321 				__set_retrans_timer(chan);
6322 
6323 			l2cap_send_ack(chan);
6324 		}
6325 		break;
6326 	case L2CAP_EV_RECV_RNR:
6327 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6328 		l2cap_pass_to_tx(chan, control);
6329 		if (control->poll) {
6330 			l2cap_send_srej_tail(chan);
6331 		} else {
6332 			struct l2cap_ctrl rr_control;
6333 			memset(&rr_control, 0, sizeof(rr_control));
6334 			rr_control.sframe = 1;
6335 			rr_control.super = L2CAP_SUPER_RR;
6336 			rr_control.reqseq = chan->buffer_seq;
6337 			l2cap_send_sframe(chan, &rr_control);
6338 		}
6339 
6340 		break;
6341 	case L2CAP_EV_RECV_REJ:
6342 		l2cap_handle_rej(chan, control);
6343 		break;
6344 	case L2CAP_EV_RECV_SREJ:
6345 		l2cap_handle_srej(chan, control);
6346 		break;
6347 	}
6348 
6349 	if (skb && !skb_in_use) {
6350 		BT_DBG("Freeing %p", skb);
6351 		kfree_skb(skb);
6352 	}
6353 
6354 	return err;
6355 }
6356 
6357 static int l2cap_finish_move(struct l2cap_chan *chan)
6358 {
6359 	BT_DBG("chan %p", chan);
6360 
6361 	chan->rx_state = L2CAP_RX_STATE_RECV;
6362 	chan->conn->mtu = chan->conn->hcon->mtu;
6363 
6364 	return l2cap_resegment(chan);
6365 }
6366 
6367 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6368 				 struct l2cap_ctrl *control,
6369 				 struct sk_buff *skb, u8 event)
6370 {
6371 	int err;
6372 
6373 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6374 	       event);
6375 
6376 	if (!control->poll)
6377 		return -EPROTO;
6378 
6379 	l2cap_process_reqseq(chan, control->reqseq);
6380 
6381 	if (!skb_queue_empty(&chan->tx_q))
6382 		chan->tx_send_head = skb_peek(&chan->tx_q);
6383 	else
6384 		chan->tx_send_head = NULL;
6385 
6386 	/* Rewind next_tx_seq to the point expected
6387 	 * by the receiver.
6388 	 */
6389 	chan->next_tx_seq = control->reqseq;
6390 	chan->unacked_frames = 0;
6391 
6392 	err = l2cap_finish_move(chan);
6393 	if (err)
6394 		return err;
6395 
6396 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6397 	l2cap_send_i_or_rr_or_rnr(chan);
6398 
6399 	if (event == L2CAP_EV_RECV_IFRAME)
6400 		return -EPROTO;
6401 
6402 	return l2cap_rx_state_recv(chan, control, NULL, event);
6403 }
6404 
6405 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6406 				 struct l2cap_ctrl *control,
6407 				 struct sk_buff *skb, u8 event)
6408 {
6409 	int err;
6410 
6411 	if (!control->final)
6412 		return -EPROTO;
6413 
6414 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6415 
6416 	chan->rx_state = L2CAP_RX_STATE_RECV;
6417 	l2cap_process_reqseq(chan, control->reqseq);
6418 
6419 	if (!skb_queue_empty(&chan->tx_q))
6420 		chan->tx_send_head = skb_peek(&chan->tx_q);
6421 	else
6422 		chan->tx_send_head = NULL;
6423 
6424 	/* Rewind next_tx_seq to the point expected
6425 	 * by the receiver.
6426 	 */
6427 	chan->next_tx_seq = control->reqseq;
6428 	chan->unacked_frames = 0;
6429 	chan->conn->mtu = chan->conn->hcon->mtu;
6430 
6431 	err = l2cap_resegment(chan);
6432 
6433 	if (!err)
6434 		err = l2cap_rx_state_recv(chan, control, skb, event);
6435 
6436 	return err;
6437 }
6438 
6439 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6440 {
6441 	/* Make sure reqseq is for a packet that has been sent but not acked */
6442 	u16 unacked;
6443 
6444 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6445 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6446 }
6447 
6448 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6449 		    struct sk_buff *skb, u8 event)
6450 {
6451 	int err = 0;
6452 
6453 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6454 	       control, skb, event, chan->rx_state);
6455 
6456 	if (__valid_reqseq(chan, control->reqseq)) {
6457 		switch (chan->rx_state) {
6458 		case L2CAP_RX_STATE_RECV:
6459 			err = l2cap_rx_state_recv(chan, control, skb, event);
6460 			break;
6461 		case L2CAP_RX_STATE_SREJ_SENT:
6462 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6463 						       event);
6464 			break;
6465 		case L2CAP_RX_STATE_WAIT_P:
6466 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6467 			break;
6468 		case L2CAP_RX_STATE_WAIT_F:
6469 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6470 			break;
6471 		default:
6472 			/* shut it down */
6473 			break;
6474 		}
6475 	} else {
6476 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6477 		       control->reqseq, chan->next_tx_seq,
6478 		       chan->expected_ack_seq);
6479 		l2cap_send_disconn_req(chan, ECONNRESET);
6480 	}
6481 
6482 	return err;
6483 }
6484 
6485 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6486 			   struct sk_buff *skb)
6487 {
6488 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6489 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6490 	 * returns and to avoid the race condition, for example:
6491 	 *
6492 	 * The current thread calls:
6493 	 *   l2cap_reassemble_sdu
6494 	 *     chan->ops->recv == l2cap_sock_recv_cb
6495 	 *       __sock_queue_rcv_skb
6496 	 * Another thread calls:
6497 	 *   bt_sock_recvmsg
6498 	 *     skb_recv_datagram
6499 	 *     skb_free_datagram
6500 	 * Then the current thread tries to access control, but it was freed by
6501 	 * skb_free_datagram.
6502 	 */
6503 	u16 txseq = control->txseq;
6504 
6505 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6506 	       chan->rx_state);
6507 
6508 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6509 		l2cap_pass_to_tx(chan, control);
6510 
6511 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6512 		       __next_seq(chan, chan->buffer_seq));
6513 
6514 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6515 
6516 		l2cap_reassemble_sdu(chan, skb, control);
6517 	} else {
6518 		if (chan->sdu) {
6519 			kfree_skb(chan->sdu);
6520 			chan->sdu = NULL;
6521 		}
6522 		chan->sdu_last_frag = NULL;
6523 		chan->sdu_len = 0;
6524 
6525 		if (skb) {
6526 			BT_DBG("Freeing %p", skb);
6527 			kfree_skb(skb);
6528 		}
6529 	}
6530 
6531 	chan->last_acked_seq = txseq;
6532 	chan->expected_tx_seq = __next_seq(chan, txseq);
6533 
6534 	return 0;
6535 }
6536 
6537 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6538 {
6539 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6540 	u16 len;
6541 	u8 event;
6542 
6543 	__unpack_control(chan, skb);
6544 
6545 	len = skb->len;
6546 
6547 	/*
6548 	 * We can just drop the corrupted I-frame here.
6549 	 * Receiver will miss it and start proper recovery
6550 	 * procedures and ask for retransmission.
6551 	 */
6552 	if (l2cap_check_fcs(chan, skb))
6553 		goto drop;
6554 
6555 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6556 		len -= L2CAP_SDULEN_SIZE;
6557 
6558 	if (chan->fcs == L2CAP_FCS_CRC16)
6559 		len -= L2CAP_FCS_SIZE;
6560 
6561 	if (len > chan->mps) {
6562 		l2cap_send_disconn_req(chan, ECONNRESET);
6563 		goto drop;
6564 	}
6565 
6566 	if (chan->ops->filter) {
6567 		if (chan->ops->filter(chan, skb))
6568 			goto drop;
6569 	}
6570 
6571 	if (!control->sframe) {
6572 		int err;
6573 
6574 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6575 		       control->sar, control->reqseq, control->final,
6576 		       control->txseq);
6577 
6578 		/* Validate F-bit - F=0 always valid, F=1 only
6579 		 * valid in TX WAIT_F
6580 		 */
6581 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6582 			goto drop;
6583 
6584 		if (chan->mode != L2CAP_MODE_STREAMING) {
6585 			event = L2CAP_EV_RECV_IFRAME;
6586 			err = l2cap_rx(chan, control, skb, event);
6587 		} else {
6588 			err = l2cap_stream_rx(chan, control, skb);
6589 		}
6590 
6591 		if (err)
6592 			l2cap_send_disconn_req(chan, ECONNRESET);
6593 	} else {
6594 		const u8 rx_func_to_event[4] = {
6595 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6596 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6597 		};
6598 
6599 		/* Only I-frames are expected in streaming mode */
6600 		if (chan->mode == L2CAP_MODE_STREAMING)
6601 			goto drop;
6602 
6603 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6604 		       control->reqseq, control->final, control->poll,
6605 		       control->super);
6606 
6607 		if (len != 0) {
6608 			BT_ERR("Trailing bytes: %d in sframe", len);
6609 			l2cap_send_disconn_req(chan, ECONNRESET);
6610 			goto drop;
6611 		}
6612 
6613 		/* Validate F and P bits */
6614 		if (control->final && (control->poll ||
6615 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6616 			goto drop;
6617 
6618 		event = rx_func_to_event[control->super];
6619 		if (l2cap_rx(chan, control, skb, event))
6620 			l2cap_send_disconn_req(chan, ECONNRESET);
6621 	}
6622 
6623 	return 0;
6624 
6625 drop:
6626 	kfree_skb(skb);
6627 	return 0;
6628 }
6629 
6630 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6631 {
6632 	struct l2cap_conn *conn = chan->conn;
6633 	struct l2cap_le_credits pkt;
6634 	u16 return_credits = l2cap_le_rx_credits(chan);
6635 
6636 	if (chan->mode != L2CAP_MODE_LE_FLOWCTL &&
6637 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL)
6638 		return;
6639 
6640 	if (chan->rx_credits >= return_credits)
6641 		return;
6642 
6643 	return_credits -= chan->rx_credits;
6644 
6645 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6646 
6647 	chan->rx_credits += return_credits;
6648 
6649 	pkt.cid     = cpu_to_le16(chan->scid);
6650 	pkt.credits = cpu_to_le16(return_credits);
6651 
6652 	chan->ident = l2cap_get_ident(conn);
6653 
6654 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6655 }
6656 
6657 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6658 {
6659 	if (chan->rx_avail == rx_avail)
6660 		return;
6661 
6662 	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6663 
6664 	chan->rx_avail = rx_avail;
6665 
6666 	if (chan->state == BT_CONNECTED)
6667 		l2cap_chan_le_send_credits(chan);
6668 }
6669 
6670 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6671 {
6672 	int err;
6673 
6674 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6675 
6676 	/* Wait recv to confirm reception before updating the credits */
6677 	err = chan->ops->recv(chan, skb);
6678 
6679 	if (err < 0 && chan->rx_avail != -1) {
6680 		BT_ERR("Queueing received LE L2CAP data failed");
6681 		l2cap_send_disconn_req(chan, ECONNRESET);
6682 		return err;
6683 	}
6684 
6685 	/* Update credits whenever an SDU is received */
6686 	l2cap_chan_le_send_credits(chan);
6687 
6688 	return err;
6689 }
6690 
6691 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6692 {
6693 	int err;
6694 
6695 	if (!chan->rx_credits) {
6696 		BT_ERR("No credits to receive LE L2CAP data");
6697 		l2cap_send_disconn_req(chan, ECONNRESET);
6698 		return -ENOBUFS;
6699 	}
6700 
6701 	if (skb->len > chan->imtu) {
6702 		BT_ERR("Too big LE L2CAP PDU: len %u > %u", skb->len,
6703 		       chan->imtu);
6704 		l2cap_send_disconn_req(chan, ECONNRESET);
6705 		return -ENOBUFS;
6706 	}
6707 
6708 	chan->rx_credits--;
6709 	BT_DBG("chan %p: rx_credits %u -> %u",
6710 	       chan, chan->rx_credits + 1, chan->rx_credits);
6711 
6712 	/* Update if remote had run out of credits, this should only happens
6713 	 * if the remote is not using the entire MPS.
6714 	 */
6715 	if (!chan->rx_credits)
6716 		l2cap_chan_le_send_credits(chan);
6717 
6718 	err = 0;
6719 
6720 	if (!chan->sdu) {
6721 		u16 sdu_len;
6722 
6723 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) {
6724 			err = -EINVAL;
6725 			goto failed;
6726 		}
6727 
6728 		sdu_len = get_unaligned_le16(skb->data);
6729 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6730 
6731 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6732 		       sdu_len, skb->len, chan->imtu);
6733 
6734 		if (sdu_len > chan->imtu) {
6735 			BT_ERR("Too big LE L2CAP SDU length: len %u > %u",
6736 			       skb->len, sdu_len);
6737 			l2cap_send_disconn_req(chan, ECONNRESET);
6738 			err = -EMSGSIZE;
6739 			goto failed;
6740 		}
6741 
6742 		if (skb->len > sdu_len) {
6743 			BT_ERR("Too much LE L2CAP data received");
6744 			err = -EINVAL;
6745 			goto failed;
6746 		}
6747 
6748 		if (skb->len == sdu_len)
6749 			return l2cap_ecred_recv(chan, skb);
6750 
6751 		chan->sdu = skb;
6752 		chan->sdu_len = sdu_len;
6753 		chan->sdu_last_frag = skb;
6754 
6755 		/* Detect if remote is not able to use the selected MPS */
6756 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6757 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6758 
6759 			/* Adjust the number of credits */
6760 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6761 			chan->mps = mps_len;
6762 			l2cap_chan_le_send_credits(chan);
6763 		}
6764 
6765 		return 0;
6766 	}
6767 
6768 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6769 	       chan->sdu->len, skb->len, chan->sdu_len);
6770 
6771 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6772 		BT_ERR("Too much LE L2CAP data received");
6773 		l2cap_send_disconn_req(chan, ECONNRESET);
6774 		err = -EINVAL;
6775 		goto failed;
6776 	}
6777 
6778 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6779 	skb = NULL;
6780 
6781 	if (chan->sdu->len == chan->sdu_len) {
6782 		err = l2cap_ecred_recv(chan, chan->sdu);
6783 		if (!err) {
6784 			chan->sdu = NULL;
6785 			chan->sdu_last_frag = NULL;
6786 			chan->sdu_len = 0;
6787 		}
6788 	}
6789 
6790 failed:
6791 	if (err) {
6792 		kfree_skb(skb);
6793 		kfree_skb(chan->sdu);
6794 		chan->sdu = NULL;
6795 		chan->sdu_last_frag = NULL;
6796 		chan->sdu_len = 0;
6797 	}
6798 
6799 	/* We can't return an error here since we took care of the skb
6800 	 * freeing internally. An error return would cause the caller to
6801 	 * do a double-free of the skb.
6802 	 */
6803 	return 0;
6804 }
6805 
6806 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6807 			       struct sk_buff *skb)
6808 {
6809 	struct l2cap_chan *chan;
6810 
6811 	chan = l2cap_get_chan_by_scid(conn, cid);
6812 	if (!chan) {
6813 		BT_DBG("unknown cid 0x%4.4x", cid);
6814 		/* Drop packet and return */
6815 		kfree_skb(skb);
6816 		return;
6817 	}
6818 
6819 	BT_DBG("chan %p, len %d", chan, skb->len);
6820 
6821 	/* If we receive data on a fixed channel before the info req/rsp
6822 	 * procedure is done simply assume that the channel is supported
6823 	 * and mark it as ready.
6824 	 */
6825 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6826 		l2cap_chan_ready(chan);
6827 
6828 	if (chan->state != BT_CONNECTED)
6829 		goto drop;
6830 
6831 	switch (chan->mode) {
6832 	case L2CAP_MODE_LE_FLOWCTL:
6833 	case L2CAP_MODE_EXT_FLOWCTL:
6834 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6835 			goto drop;
6836 
6837 		goto done;
6838 
6839 	case L2CAP_MODE_BASIC:
6840 		/* If socket recv buffers overflows we drop data here
6841 		 * which is *bad* because L2CAP has to be reliable.
6842 		 * But we don't have any other choice. L2CAP doesn't
6843 		 * provide flow control mechanism. */
6844 
6845 		if (chan->imtu < skb->len) {
6846 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6847 			goto drop;
6848 		}
6849 
6850 		if (!chan->ops->recv(chan, skb))
6851 			goto done;
6852 		break;
6853 
6854 	case L2CAP_MODE_ERTM:
6855 	case L2CAP_MODE_STREAMING:
6856 		l2cap_data_rcv(chan, skb);
6857 		goto done;
6858 
6859 	default:
6860 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6861 		break;
6862 	}
6863 
6864 drop:
6865 	kfree_skb(skb);
6866 
6867 done:
6868 	l2cap_chan_unlock(chan);
6869 	l2cap_chan_put(chan);
6870 }
6871 
6872 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6873 				  struct sk_buff *skb)
6874 {
6875 	struct hci_conn *hcon = conn->hcon;
6876 	struct l2cap_chan *chan;
6877 
6878 	if (hcon->type != ACL_LINK)
6879 		goto free_skb;
6880 
6881 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6882 					ACL_LINK);
6883 	if (!chan)
6884 		goto free_skb;
6885 
6886 	BT_DBG("chan %p, len %d", chan, skb->len);
6887 
6888 	l2cap_chan_lock(chan);
6889 
6890 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6891 		goto drop;
6892 
6893 	if (chan->imtu < skb->len)
6894 		goto drop;
6895 
6896 	/* Store remote BD_ADDR and PSM for msg_name */
6897 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6898 	bt_cb(skb)->l2cap.psm = psm;
6899 
6900 	if (!chan->ops->recv(chan, skb)) {
6901 		l2cap_chan_unlock(chan);
6902 		l2cap_chan_put(chan);
6903 		return;
6904 	}
6905 
6906 drop:
6907 	l2cap_chan_unlock(chan);
6908 	l2cap_chan_put(chan);
6909 free_skb:
6910 	kfree_skb(skb);
6911 }
6912 
6913 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6914 {
6915 	struct l2cap_hdr *lh = (void *) skb->data;
6916 	struct hci_conn *hcon = conn->hcon;
6917 	u16 cid, len;
6918 	__le16 psm;
6919 
6920 	if (hcon->state != BT_CONNECTED) {
6921 		BT_DBG("queueing pending rx skb");
6922 		skb_queue_tail(&conn->pending_rx, skb);
6923 		return;
6924 	}
6925 
6926 	skb_pull(skb, L2CAP_HDR_SIZE);
6927 	cid = __le16_to_cpu(lh->cid);
6928 	len = __le16_to_cpu(lh->len);
6929 
6930 	if (len != skb->len) {
6931 		kfree_skb(skb);
6932 		return;
6933 	}
6934 
6935 	/* Since we can't actively block incoming LE connections we must
6936 	 * at least ensure that we ignore incoming data from them.
6937 	 */
6938 	if (hcon->type == LE_LINK &&
6939 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6940 				   bdaddr_dst_type(hcon))) {
6941 		kfree_skb(skb);
6942 		return;
6943 	}
6944 
6945 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6946 
6947 	switch (cid) {
6948 	case L2CAP_CID_SIGNALING:
6949 		l2cap_sig_channel(conn, skb);
6950 		break;
6951 
6952 	case L2CAP_CID_CONN_LESS:
6953 		psm = get_unaligned((__le16 *) skb->data);
6954 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6955 		l2cap_conless_channel(conn, psm, skb);
6956 		break;
6957 
6958 	case L2CAP_CID_LE_SIGNALING:
6959 		l2cap_le_sig_channel(conn, skb);
6960 		break;
6961 
6962 	default:
6963 		l2cap_data_channel(conn, cid, skb);
6964 		break;
6965 	}
6966 }
6967 
6968 static void process_pending_rx(struct work_struct *work)
6969 {
6970 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6971 					       pending_rx_work);
6972 	struct sk_buff *skb;
6973 
6974 	BT_DBG("");
6975 
6976 	mutex_lock(&conn->lock);
6977 
6978 	while ((skb = skb_dequeue(&conn->pending_rx)))
6979 		l2cap_recv_frame(conn, skb);
6980 
6981 	mutex_unlock(&conn->lock);
6982 }
6983 
6984 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6985 {
6986 	struct l2cap_conn *conn = hcon->l2cap_data;
6987 	struct hci_chan *hchan;
6988 
6989 	if (conn)
6990 		return conn;
6991 
6992 	hchan = hci_chan_create(hcon);
6993 	if (!hchan)
6994 		return NULL;
6995 
6996 	conn = kzalloc_obj(*conn);
6997 	if (!conn) {
6998 		hci_chan_del(hchan);
6999 		return NULL;
7000 	}
7001 
7002 	kref_init(&conn->ref);
7003 	hcon->l2cap_data = conn;
7004 	conn->hcon = hci_conn_get(hcon);
7005 	conn->hchan = hchan;
7006 
7007 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7008 
7009 	conn->mtu = hcon->mtu;
7010 	conn->feat_mask = 0;
7011 
7012 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7013 
7014 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7015 	    (bredr_sc_enabled(hcon->hdev) ||
7016 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7017 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7018 
7019 	mutex_init(&conn->lock);
7020 
7021 	INIT_LIST_HEAD(&conn->chan_l);
7022 	INIT_LIST_HEAD(&conn->users);
7023 
7024 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7025 	ida_init(&conn->tx_ida);
7026 
7027 	skb_queue_head_init(&conn->pending_rx);
7028 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7029 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
7030 
7031 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7032 
7033 	return conn;
7034 }
7035 
7036 static bool is_valid_psm(u16 psm, u8 dst_type)
7037 {
7038 	if (!psm)
7039 		return false;
7040 
7041 	if (bdaddr_type_is_le(dst_type))
7042 		return (psm <= 0x00ff);
7043 
7044 	/* PSM must be odd and lsb of upper byte must be 0 */
7045 	return ((psm & 0x0101) == 0x0001);
7046 }
7047 
7048 struct l2cap_chan_data {
7049 	struct l2cap_chan *chan;
7050 	struct pid *pid;
7051 	int count;
7052 };
7053 
7054 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7055 {
7056 	struct l2cap_chan_data *d = data;
7057 	struct pid *pid;
7058 
7059 	if (chan == d->chan)
7060 		return;
7061 
7062 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7063 		return;
7064 
7065 	pid = chan->ops->get_peer_pid(chan);
7066 
7067 	/* Only count deferred channels with the same PID/PSM */
7068 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7069 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7070 		return;
7071 
7072 	d->count++;
7073 }
7074 
7075 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7076 		       bdaddr_t *dst, u8 dst_type, u16 timeout)
7077 {
7078 	struct l2cap_conn *conn;
7079 	struct hci_conn *hcon;
7080 	struct hci_dev *hdev;
7081 	int err;
7082 
7083 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7084 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7085 
7086 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7087 	if (!hdev)
7088 		return -EHOSTUNREACH;
7089 
7090 	hci_dev_lock(hdev);
7091 
7092 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7093 	    chan->chan_type != L2CAP_CHAN_RAW) {
7094 		err = -EINVAL;
7095 		goto done;
7096 	}
7097 
7098 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7099 		err = -EINVAL;
7100 		goto done;
7101 	}
7102 
7103 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7104 		err = -EINVAL;
7105 		goto done;
7106 	}
7107 
7108 	switch (chan->mode) {
7109 	case L2CAP_MODE_BASIC:
7110 		break;
7111 	case L2CAP_MODE_LE_FLOWCTL:
7112 		break;
7113 	case L2CAP_MODE_EXT_FLOWCTL:
7114 		if (!enable_ecred) {
7115 			err = -EOPNOTSUPP;
7116 			goto done;
7117 		}
7118 		break;
7119 	case L2CAP_MODE_ERTM:
7120 	case L2CAP_MODE_STREAMING:
7121 		if (!disable_ertm)
7122 			break;
7123 		fallthrough;
7124 	default:
7125 		err = -EOPNOTSUPP;
7126 		goto done;
7127 	}
7128 
7129 	switch (chan->state) {
7130 	case BT_CONNECT:
7131 	case BT_CONNECT2:
7132 	case BT_CONFIG:
7133 		/* Already connecting */
7134 		err = 0;
7135 		goto done;
7136 
7137 	case BT_CONNECTED:
7138 		/* Already connected */
7139 		err = -EISCONN;
7140 		goto done;
7141 
7142 	case BT_OPEN:
7143 	case BT_BOUND:
7144 		/* Can connect */
7145 		break;
7146 
7147 	default:
7148 		err = -EBADFD;
7149 		goto done;
7150 	}
7151 
7152 	/* Set destination address and psm */
7153 	bacpy(&chan->dst, dst);
7154 	chan->dst_type = dst_type;
7155 
7156 	chan->psm = psm;
7157 	chan->dcid = cid;
7158 
7159 	if (bdaddr_type_is_le(dst_type)) {
7160 		/* Convert from L2CAP channel address type to HCI address type
7161 		 */
7162 		if (dst_type == BDADDR_LE_PUBLIC)
7163 			dst_type = ADDR_LE_DEV_PUBLIC;
7164 		else
7165 			dst_type = ADDR_LE_DEV_RANDOM;
7166 
7167 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7168 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7169 					      chan->sec_level, timeout,
7170 					      HCI_ROLE_SLAVE, 0, 0);
7171 		else
7172 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7173 						   chan->sec_level, timeout,
7174 						   CONN_REASON_L2CAP_CHAN);
7175 
7176 	} else {
7177 		u8 auth_type = l2cap_get_auth_type(chan);
7178 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7179 				       CONN_REASON_L2CAP_CHAN, timeout);
7180 	}
7181 
7182 	if (IS_ERR(hcon)) {
7183 		err = PTR_ERR(hcon);
7184 		goto done;
7185 	}
7186 
7187 	conn = l2cap_conn_add(hcon);
7188 	if (!conn) {
7189 		hci_conn_drop(hcon);
7190 		err = -ENOMEM;
7191 		goto done;
7192 	}
7193 
7194 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7195 		struct l2cap_chan_data data;
7196 
7197 		data.chan = chan;
7198 		data.pid = chan->ops->get_peer_pid(chan);
7199 		data.count = 1;
7200 
7201 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7202 
7203 		/* Check if there isn't too many channels being connected */
7204 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7205 			hci_conn_drop(hcon);
7206 			err = -EPROTO;
7207 			goto done;
7208 		}
7209 	}
7210 
7211 	mutex_lock(&conn->lock);
7212 	l2cap_chan_lock(chan);
7213 
7214 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7215 		hci_conn_drop(hcon);
7216 		err = -EBUSY;
7217 		goto chan_unlock;
7218 	}
7219 
7220 	/* Update source addr of the socket */
7221 	bacpy(&chan->src, &hcon->src);
7222 	chan->src_type = bdaddr_src_type(hcon);
7223 
7224 	__l2cap_chan_add(conn, chan);
7225 
7226 	/* l2cap_chan_add takes its own ref so we can drop this one */
7227 	hci_conn_drop(hcon);
7228 
7229 	l2cap_state_change(chan, BT_CONNECT);
7230 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7231 
7232 	/* Release chan->sport so that it can be reused by other
7233 	 * sockets (as it's only used for listening sockets).
7234 	 */
7235 	write_lock(&chan_list_lock);
7236 	chan->sport = 0;
7237 	write_unlock(&chan_list_lock);
7238 
7239 	if (hcon->state == BT_CONNECTED) {
7240 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7241 			__clear_chan_timer(chan);
7242 			if (l2cap_chan_check_security(chan, true))
7243 				l2cap_state_change(chan, BT_CONNECTED);
7244 		} else
7245 			l2cap_do_start(chan);
7246 	}
7247 
7248 	err = 0;
7249 
7250 chan_unlock:
7251 	l2cap_chan_unlock(chan);
7252 	mutex_unlock(&conn->lock);
7253 done:
7254 	hci_dev_unlock(hdev);
7255 	hci_dev_put(hdev);
7256 	return err;
7257 }
7258 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7259 
7260 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7261 {
7262 	struct l2cap_conn *conn = chan->conn;
7263 	DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7264 
7265 	pdu->mtu = cpu_to_le16(chan->imtu);
7266 	pdu->mps = cpu_to_le16(chan->mps);
7267 	pdu->scid[0] = cpu_to_le16(chan->scid);
7268 
7269 	chan->ident = l2cap_get_ident(conn);
7270 
7271 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7272 		       sizeof(pdu), &pdu);
7273 }
7274 
7275 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7276 {
7277 	if (chan->imtu > mtu)
7278 		return -EINVAL;
7279 
7280 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7281 
7282 	chan->imtu = mtu;
7283 
7284 	l2cap_ecred_reconfigure(chan);
7285 
7286 	return 0;
7287 }
7288 
7289 /* ---- L2CAP interface with lower layer (HCI) ---- */
7290 
7291 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7292 {
7293 	int exact = 0, lm1 = 0, lm2 = 0;
7294 	struct l2cap_chan *c;
7295 
7296 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7297 
7298 	/* Find listening sockets and check their link_mode */
7299 	read_lock(&chan_list_lock);
7300 	list_for_each_entry(c, &chan_list, global_l) {
7301 		if (c->state != BT_LISTEN)
7302 			continue;
7303 
7304 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7305 			lm1 |= HCI_LM_ACCEPT;
7306 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7307 				lm1 |= HCI_LM_MASTER;
7308 			exact++;
7309 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7310 			lm2 |= HCI_LM_ACCEPT;
7311 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7312 				lm2 |= HCI_LM_MASTER;
7313 		}
7314 	}
7315 	read_unlock(&chan_list_lock);
7316 
7317 	return exact ? lm1 : lm2;
7318 }
7319 
7320 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7321  * from an existing channel in the list or from the beginning of the
7322  * global list (by passing NULL as first parameter).
7323  */
7324 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7325 						  struct hci_conn *hcon)
7326 {
7327 	u8 src_type = bdaddr_src_type(hcon);
7328 
7329 	read_lock(&chan_list_lock);
7330 
7331 	if (c)
7332 		c = list_next_entry(c, global_l);
7333 	else
7334 		c = list_entry(chan_list.next, typeof(*c), global_l);
7335 
7336 	list_for_each_entry_from(c, &chan_list, global_l) {
7337 		if (c->chan_type != L2CAP_CHAN_FIXED)
7338 			continue;
7339 		if (c->state != BT_LISTEN)
7340 			continue;
7341 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7342 			continue;
7343 		if (src_type != c->src_type)
7344 			continue;
7345 
7346 		c = l2cap_chan_hold_unless_zero(c);
7347 		read_unlock(&chan_list_lock);
7348 		return c;
7349 	}
7350 
7351 	read_unlock(&chan_list_lock);
7352 
7353 	return NULL;
7354 }
7355 
7356 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7357 {
7358 	struct hci_dev *hdev = hcon->hdev;
7359 	struct l2cap_conn *conn;
7360 	struct l2cap_chan *pchan;
7361 	u8 dst_type;
7362 
7363 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7364 		return;
7365 
7366 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7367 
7368 	if (status) {
7369 		l2cap_conn_del(hcon, bt_to_errno(status));
7370 		return;
7371 	}
7372 
7373 	conn = l2cap_conn_add(hcon);
7374 	if (!conn)
7375 		return;
7376 
7377 	dst_type = bdaddr_dst_type(hcon);
7378 
7379 	/* If device is blocked, do not create channels for it */
7380 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7381 		return;
7382 
7383 	/* Find fixed channels and notify them of the new connection. We
7384 	 * use multiple individual lookups, continuing each time where
7385 	 * we left off, because the list lock would prevent calling the
7386 	 * potentially sleeping l2cap_chan_lock() function.
7387 	 */
7388 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7389 	while (pchan) {
7390 		struct l2cap_chan *chan, *next;
7391 
7392 		/* Client fixed channels should override server ones */
7393 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7394 			goto next;
7395 
7396 		l2cap_chan_lock(pchan);
7397 		chan = pchan->ops->new_connection(pchan);
7398 		if (chan) {
7399 			bacpy(&chan->src, &hcon->src);
7400 			bacpy(&chan->dst, &hcon->dst);
7401 			chan->src_type = bdaddr_src_type(hcon);
7402 			chan->dst_type = dst_type;
7403 
7404 			__l2cap_chan_add(conn, chan);
7405 		}
7406 
7407 		l2cap_chan_unlock(pchan);
7408 next:
7409 		next = l2cap_global_fixed_chan(pchan, hcon);
7410 		l2cap_chan_put(pchan);
7411 		pchan = next;
7412 	}
7413 
7414 	l2cap_conn_ready(conn);
7415 }
7416 
7417 int l2cap_disconn_ind(struct hci_conn *hcon)
7418 {
7419 	struct l2cap_conn *conn = hcon->l2cap_data;
7420 
7421 	BT_DBG("hcon %p", hcon);
7422 
7423 	if (!conn)
7424 		return HCI_ERROR_REMOTE_USER_TERM;
7425 	return conn->disc_reason;
7426 }
7427 
7428 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7429 {
7430 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7431 		return;
7432 
7433 	BT_DBG("hcon %p reason %d", hcon, reason);
7434 
7435 	l2cap_conn_del(hcon, bt_to_errno(reason));
7436 }
7437 
7438 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7439 {
7440 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7441 		return;
7442 
7443 	if (encrypt == 0x00) {
7444 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7445 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7446 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7447 			   chan->sec_level == BT_SECURITY_FIPS)
7448 			l2cap_chan_close(chan, ECONNREFUSED);
7449 	} else {
7450 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7451 			__clear_chan_timer(chan);
7452 	}
7453 }
7454 
7455 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7456 {
7457 	struct l2cap_conn *conn = hcon->l2cap_data;
7458 	struct l2cap_chan *chan;
7459 
7460 	if (!conn)
7461 		return;
7462 
7463 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7464 
7465 	mutex_lock(&conn->lock);
7466 
7467 	list_for_each_entry(chan, &conn->chan_l, list) {
7468 		l2cap_chan_lock(chan);
7469 
7470 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7471 		       state_to_string(chan->state));
7472 
7473 		if (!status && encrypt)
7474 			chan->sec_level = hcon->sec_level;
7475 
7476 		if (!__l2cap_no_conn_pending(chan)) {
7477 			l2cap_chan_unlock(chan);
7478 			continue;
7479 		}
7480 
7481 		if (!status && (chan->state == BT_CONNECTED ||
7482 				chan->state == BT_CONFIG)) {
7483 			chan->ops->resume(chan);
7484 			l2cap_check_encryption(chan, encrypt);
7485 			l2cap_chan_unlock(chan);
7486 			continue;
7487 		}
7488 
7489 		if (chan->state == BT_CONNECT) {
7490 			if (!status && l2cap_check_enc_key_size(hcon, chan))
7491 				l2cap_start_connection(chan);
7492 			else
7493 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7494 		} else if (chan->state == BT_CONNECT2 &&
7495 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7496 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7497 			struct l2cap_conn_rsp rsp;
7498 			__u16 res, stat;
7499 
7500 			if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7501 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7502 					res = L2CAP_CR_PEND;
7503 					stat = L2CAP_CS_AUTHOR_PEND;
7504 					chan->ops->defer(chan);
7505 				} else {
7506 					l2cap_state_change(chan, BT_CONFIG);
7507 					res = L2CAP_CR_SUCCESS;
7508 					stat = L2CAP_CS_NO_INFO;
7509 				}
7510 			} else {
7511 				l2cap_state_change(chan, BT_DISCONN);
7512 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7513 				res = L2CAP_CR_SEC_BLOCK;
7514 				stat = L2CAP_CS_NO_INFO;
7515 			}
7516 
7517 			rsp.scid   = cpu_to_le16(chan->dcid);
7518 			rsp.dcid   = cpu_to_le16(chan->scid);
7519 			rsp.result = cpu_to_le16(res);
7520 			rsp.status = cpu_to_le16(stat);
7521 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7522 				       sizeof(rsp), &rsp);
7523 
7524 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7525 			    res == L2CAP_CR_SUCCESS) {
7526 				char buf[128];
7527 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7528 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7529 					       L2CAP_CONF_REQ,
7530 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7531 					       buf);
7532 				chan->num_conf_req++;
7533 			}
7534 		}
7535 
7536 		l2cap_chan_unlock(chan);
7537 	}
7538 
7539 	mutex_unlock(&conn->lock);
7540 }
7541 
7542 /* Append fragment into frame respecting the maximum len of rx_skb */
7543 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7544 			   u16 len)
7545 {
7546 	if (!conn->rx_skb) {
7547 		/* Allocate skb for the complete frame (with header) */
7548 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7549 		if (!conn->rx_skb)
7550 			return -ENOMEM;
7551 		/* Init rx_len */
7552 		conn->rx_len = len;
7553 
7554 		skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7555 				      skb->tstamp_type);
7556 	}
7557 
7558 	/* Copy as much as the rx_skb can hold */
7559 	len = min_t(u16, len, skb->len);
7560 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7561 	skb_pull(skb, len);
7562 	conn->rx_len -= len;
7563 
7564 	return len;
7565 }
7566 
7567 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7568 {
7569 	struct sk_buff *rx_skb;
7570 	int len;
7571 
7572 	/* Append just enough to complete the header */
7573 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7574 
7575 	/* If header could not be read just continue */
7576 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7577 		return len;
7578 
7579 	rx_skb = conn->rx_skb;
7580 	len = get_unaligned_le16(rx_skb->data);
7581 
7582 	/* Check if rx_skb has enough space to received all fragments */
7583 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7584 		/* Update expected len */
7585 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7586 		return L2CAP_LEN_SIZE;
7587 	}
7588 
7589 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7590 	 * fit all fragments.
7591 	 */
7592 	conn->rx_skb = NULL;
7593 
7594 	/* Reallocates rx_skb using the exact expected length */
7595 	len = l2cap_recv_frag(conn, rx_skb,
7596 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7597 	kfree_skb(rx_skb);
7598 
7599 	return len;
7600 }
7601 
7602 static void l2cap_recv_reset(struct l2cap_conn *conn)
7603 {
7604 	kfree_skb(conn->rx_skb);
7605 	conn->rx_skb = NULL;
7606 	conn->rx_len = 0;
7607 }
7608 
7609 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7610 {
7611 	if (!c)
7612 		return NULL;
7613 
7614 	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7615 
7616 	if (!kref_get_unless_zero(&c->ref))
7617 		return NULL;
7618 
7619 	return c;
7620 }
7621 
7622 int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
7623 		       struct sk_buff *skb, u16 flags)
7624 {
7625 	struct hci_conn *hcon;
7626 	struct l2cap_conn *conn;
7627 	int len;
7628 
7629 	/* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
7630 	hci_dev_lock(hdev);
7631 
7632 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
7633 	if (!hcon) {
7634 		hci_dev_unlock(hdev);
7635 		kfree_skb(skb);
7636 		return -ENOENT;
7637 	}
7638 
7639 	hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
7640 
7641 	conn = hcon->l2cap_data;
7642 
7643 	if (!conn)
7644 		conn = l2cap_conn_add(hcon);
7645 
7646 	conn = l2cap_conn_hold_unless_zero(conn);
7647 	hcon = NULL;
7648 
7649 	hci_dev_unlock(hdev);
7650 
7651 	if (!conn) {
7652 		kfree_skb(skb);
7653 		return -EINVAL;
7654 	}
7655 
7656 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7657 
7658 	mutex_lock(&conn->lock);
7659 
7660 	switch (flags) {
7661 	case ACL_START:
7662 	case ACL_START_NO_FLUSH:
7663 	case ACL_COMPLETE:
7664 		if (conn->rx_skb) {
7665 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7666 			l2cap_recv_reset(conn);
7667 			l2cap_conn_unreliable(conn, ECOMM);
7668 		}
7669 
7670 		/* Start fragment may not contain the L2CAP length so just
7671 		 * copy the initial byte when that happens and use conn->mtu as
7672 		 * expected length.
7673 		 */
7674 		if (skb->len < L2CAP_LEN_SIZE) {
7675 			l2cap_recv_frag(conn, skb, conn->mtu);
7676 			break;
7677 		}
7678 
7679 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7680 
7681 		if (len == skb->len) {
7682 			/* Complete frame received */
7683 			l2cap_recv_frame(conn, skb);
7684 			goto unlock;
7685 		}
7686 
7687 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7688 
7689 		if (skb->len > len) {
7690 			BT_ERR("Frame is too long (len %u, expected len %d)",
7691 			       skb->len, len);
7692 			/* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7693 			 * (Multiple Signaling Command in one PDU, Data
7694 			 * Truncated, BR/EDR) send a C-frame to the IUT with
7695 			 * PDU Length set to 8 and Channel ID set to the
7696 			 * correct signaling channel for the logical link.
7697 			 * The Information payload contains one L2CAP_ECHO_REQ
7698 			 * packet with Data Length set to 0 with 0 octets of
7699 			 * echo data and one invalid command packet due to
7700 			 * data truncated in PDU but present in HCI packet.
7701 			 *
7702 			 * Shorter the socket buffer to the PDU length to
7703 			 * allow to process valid commands from the PDU before
7704 			 * setting the socket unreliable.
7705 			 */
7706 			skb->len = len;
7707 			l2cap_recv_frame(conn, skb);
7708 			l2cap_conn_unreliable(conn, ECOMM);
7709 			goto unlock;
7710 		}
7711 
7712 		/* Append fragment into frame (with header) */
7713 		if (l2cap_recv_frag(conn, skb, len) < 0)
7714 			goto drop;
7715 
7716 		break;
7717 
7718 	case ACL_CONT:
7719 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7720 
7721 		if (!conn->rx_skb) {
7722 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7723 			l2cap_conn_unreliable(conn, ECOMM);
7724 			goto drop;
7725 		}
7726 
7727 		/* Complete the L2CAP length if it has not been read */
7728 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7729 			if (l2cap_recv_len(conn, skb) < 0) {
7730 				l2cap_conn_unreliable(conn, ECOMM);
7731 				goto drop;
7732 			}
7733 
7734 			/* Header still could not be read just continue */
7735 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7736 				break;
7737 		}
7738 
7739 		if (skb->len > conn->rx_len) {
7740 			BT_ERR("Fragment is too long (len %u, expected %u)",
7741 			       skb->len, conn->rx_len);
7742 			l2cap_recv_reset(conn);
7743 			l2cap_conn_unreliable(conn, ECOMM);
7744 			goto drop;
7745 		}
7746 
7747 		/* Append fragment into frame (with header) */
7748 		l2cap_recv_frag(conn, skb, skb->len);
7749 
7750 		if (!conn->rx_len) {
7751 			/* Complete frame received. l2cap_recv_frame
7752 			 * takes ownership of the skb so set the global
7753 			 * rx_skb pointer to NULL first.
7754 			 */
7755 			struct sk_buff *rx_skb = conn->rx_skb;
7756 			conn->rx_skb = NULL;
7757 			l2cap_recv_frame(conn, rx_skb);
7758 		}
7759 		break;
7760 	}
7761 
7762 drop:
7763 	kfree_skb(skb);
7764 unlock:
7765 	mutex_unlock(&conn->lock);
7766 	l2cap_conn_put(conn);
7767 	return 0;
7768 }
7769 
7770 static struct hci_cb l2cap_cb = {
7771 	.name		= "L2CAP",
7772 	.connect_cfm	= l2cap_connect_cfm,
7773 	.disconn_cfm	= l2cap_disconn_cfm,
7774 	.security_cfm	= l2cap_security_cfm,
7775 };
7776 
7777 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7778 {
7779 	struct l2cap_chan *c;
7780 
7781 	read_lock(&chan_list_lock);
7782 
7783 	list_for_each_entry(c, &chan_list, global_l) {
7784 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7785 			   &c->src, c->src_type, &c->dst, c->dst_type,
7786 			   c->state, __le16_to_cpu(c->psm),
7787 			   c->scid, c->dcid, c->imtu, c->omtu,
7788 			   c->sec_level, c->mode);
7789 	}
7790 
7791 	read_unlock(&chan_list_lock);
7792 
7793 	return 0;
7794 }
7795 
7796 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7797 
7798 static struct dentry *l2cap_debugfs;
7799 
7800 int __init l2cap_init(void)
7801 {
7802 	int err;
7803 
7804 	err = l2cap_init_sockets();
7805 	if (err < 0)
7806 		return err;
7807 
7808 	hci_register_cb(&l2cap_cb);
7809 
7810 	if (IS_ERR_OR_NULL(bt_debugfs))
7811 		return 0;
7812 
7813 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7814 					    NULL, &l2cap_debugfs_fops);
7815 
7816 	return 0;
7817 }
7818 
7819 void l2cap_exit(void)
7820 {
7821 	debugfs_remove(l2cap_debugfs);
7822 	hci_unregister_cb(&l2cap_cb);
7823 	l2cap_cleanup_sockets();
7824 }
7825 
7826 module_param(disable_ertm, bool, 0644);
7827 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7828 
7829 module_param(enable_ecred, bool, 0644);
7830 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7831