xref: /linux/net/bluetooth/l2cap_core.c (revision 38c6104e0bc7c8af20ab4897cb0504e3339e4fe4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 
130 	return c;
131 }
132 
133 /* Find channel with given DCID.
134  * Returns a reference locked channel.
135  */
136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 						 u16 cid)
138 {
139 	struct l2cap_chan *c;
140 
141 	c = __l2cap_get_chan_by_dcid(conn, cid);
142 	if (c) {
143 		/* Only lock if chan reference is not 0 */
144 		c = l2cap_chan_hold_unless_zero(c);
145 		if (c)
146 			l2cap_chan_lock(c);
147 	}
148 
149 	return c;
150 }
151 
152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 						    u8 ident)
154 {
155 	struct l2cap_chan *c;
156 
157 	list_for_each_entry(c, &conn->chan_l, list) {
158 		if (c->ident == ident)
159 			return c;
160 	}
161 	return NULL;
162 }
163 
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 						      u8 src_type)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 			continue;
172 
173 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 			continue;
175 
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 							 chan->src_type)) {
214 				chan->psm   = cpu_to_le16(p);
215 				chan->sport = cpu_to_le16(p);
216 				err = 0;
217 				break;
218 			}
219 	}
220 
221 done:
222 	write_unlock(&chan_list_lock);
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 
227 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
228 {
229 	write_lock(&chan_list_lock);
230 
231 	/* Override the defaults (which are for conn-oriented) */
232 	chan->omtu = L2CAP_DEFAULT_MTU;
233 	chan->chan_type = L2CAP_CHAN_FIXED;
234 
235 	chan->scid = scid;
236 
237 	write_unlock(&chan_list_lock);
238 
239 	return 0;
240 }
241 
242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 	u16 cid, dyn_end;
245 
246 	if (conn->hcon->type == LE_LINK)
247 		dyn_end = L2CAP_CID_LE_DYN_END;
248 	else
249 		dyn_end = L2CAP_CID_DYN_END;
250 
251 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 		if (!__l2cap_get_chan_by_scid(conn, cid))
253 			return cid;
254 	}
255 
256 	return 0;
257 }
258 
259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 	       state_to_string(state));
263 
264 	chan->state = state;
265 	chan->ops->state_change(chan, state, 0);
266 }
267 
268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 						int state, int err)
270 {
271 	chan->state = state;
272 	chan->ops->state_change(chan, chan->state, err);
273 }
274 
275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 	chan->ops->state_change(chan, chan->state, err);
278 }
279 
280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 	if (!delayed_work_pending(&chan->monitor_timer) &&
283 	    chan->retrans_timeout) {
284 		l2cap_set_timer(chan, &chan->retrans_timer,
285 				msecs_to_jiffies(chan->retrans_timeout));
286 	}
287 }
288 
289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 	__clear_retrans_timer(chan);
292 	if (chan->monitor_timeout) {
293 		l2cap_set_timer(chan, &chan->monitor_timer,
294 				msecs_to_jiffies(chan->monitor_timeout));
295 	}
296 }
297 
298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 					       u16 seq)
300 {
301 	struct sk_buff *skb;
302 
303 	skb_queue_walk(head, skb) {
304 		if (bt_cb(skb)->l2cap.txseq == seq)
305 			return skb;
306 	}
307 
308 	return NULL;
309 }
310 
311 /* ---- L2CAP sequence number lists ---- */
312 
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314  * SREJ requests that are received and for frames that are to be
315  * retransmitted. These seq_list functions implement a singly-linked
316  * list in an array, where membership in the list can also be checked
317  * in constant time. Items can also be added to the tail of the list
318  * and removed from the head in constant time, without further memory
319  * allocs or frees.
320  */
321 
322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 	size_t alloc_size, i;
325 
326 	/* Allocated size is a power of 2 to map sequence numbers
327 	 * (which may be up to 14 bits) in to a smaller array that is
328 	 * sized for the negotiated ERTM transmit windows.
329 	 */
330 	alloc_size = roundup_pow_of_two(size);
331 
332 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 	if (!seq_list->list)
334 		return -ENOMEM;
335 
336 	seq_list->mask = alloc_size - 1;
337 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 	for (i = 0; i < alloc_size; i++)
340 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341 
342 	return 0;
343 }
344 
345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 	kfree(seq_list->list);
348 }
349 
350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 					   u16 seq)
352 {
353 	/* Constant-time check for list membership */
354 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356 
357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 	u16 seq = seq_list->head;
360 	u16 mask = seq_list->mask;
361 
362 	seq_list->head = seq_list->list[seq & mask];
363 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 	}
369 
370 	return seq;
371 }
372 
373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 	u16 i;
376 
377 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 		return;
379 
380 	for (i = 0; i <= seq_list->mask; i++)
381 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 
383 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386 
387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 	u16 mask = seq_list->mask;
390 
391 	/* All appends happen in constant time */
392 
393 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 		return;
395 
396 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 		seq_list->head = seq;
398 	else
399 		seq_list->list[seq_list->tail & mask] = seq;
400 
401 	seq_list->tail = seq;
402 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404 
405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 					       chan_timer.work);
409 	struct l2cap_conn *conn = chan->conn;
410 	int reason;
411 
412 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 
414 	if (!conn)
415 		return;
416 
417 	mutex_lock(&conn->lock);
418 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 	 * this work. No need to call l2cap_chan_hold(chan) here again.
420 	 */
421 	l2cap_chan_lock(chan);
422 
423 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 		reason = ECONNREFUSED;
425 	else if (chan->state == BT_CONNECT &&
426 		 chan->sec_level != BT_SECURITY_SDP)
427 		reason = ECONNREFUSED;
428 	else
429 		reason = ETIMEDOUT;
430 
431 	l2cap_chan_close(chan, reason);
432 
433 	chan->ops->close(chan);
434 
435 	l2cap_chan_unlock(chan);
436 	l2cap_chan_put(chan);
437 
438 	mutex_unlock(&conn->lock);
439 }
440 
441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 	struct l2cap_chan *chan;
444 
445 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 	if (!chan)
447 		return NULL;
448 
449 	skb_queue_head_init(&chan->tx_q);
450 	skb_queue_head_init(&chan->srej_q);
451 	mutex_init(&chan->lock);
452 
453 	/* Set default lock nesting level */
454 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 
456 	/* Available receive buffer space is initially unknown */
457 	chan->rx_avail = -1;
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467 
468 	chan->state = BT_OPEN;
469 
470 	kref_init(&chan->kref);
471 
472 	/* This flag is cleared in l2cap_chan_ready() */
473 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 
475 	BT_DBG("chan %p", chan);
476 
477 	return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 
481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 
485 	BT_DBG("chan %p", chan);
486 
487 	write_lock(&chan_list_lock);
488 	list_del(&chan->global_l);
489 	write_unlock(&chan_list_lock);
490 
491 	kfree(chan);
492 }
493 
494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497 
498 	kref_get(&c->kref);
499 }
500 
501 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
502 {
503 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504 
505 	if (!kref_get_unless_zero(&c->kref))
506 		return NULL;
507 
508 	return c;
509 }
510 
511 void l2cap_chan_put(struct l2cap_chan *c)
512 {
513 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
514 
515 	kref_put(&c->kref, l2cap_chan_destroy);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_put);
518 
519 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
520 {
521 	chan->fcs  = L2CAP_FCS_CRC16;
522 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
523 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
524 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->remote_max_tx = chan->max_tx;
526 	chan->remote_tx_win = chan->tx_win;
527 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
528 	chan->sec_level = BT_SECURITY_LOW;
529 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
530 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
531 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
532 
533 	chan->conf_state = 0;
534 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
535 
536 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
539 
540 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
541 {
542 	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
543 
544 	if (chan->mps == 0)
545 		return 0;
546 
547 	/* If we don't know the available space in the receiver buffer, give
548 	 * enough credits for a full packet.
549 	 */
550 	if (chan->rx_avail == -1)
551 		return (chan->imtu / chan->mps) + 1;
552 
553 	/* If we know how much space is available in the receive buffer, give
554 	 * out as many credits as would fill the buffer.
555 	 */
556 	if (chan->rx_avail <= sdu_len)
557 		return 0;
558 
559 	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
560 }
561 
562 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
563 {
564 	chan->sdu = NULL;
565 	chan->sdu_last_frag = NULL;
566 	chan->sdu_len = 0;
567 	chan->tx_credits = tx_credits;
568 	/* Derive MPS from connection MTU to stop HCI fragmentation */
569 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
570 	chan->rx_credits = l2cap_le_rx_credits(chan);
571 
572 	skb_queue_head_init(&chan->tx_q);
573 }
574 
575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 	l2cap_le_flowctl_init(chan, tx_credits);
578 
579 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
580 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 		chan->mps = L2CAP_ECRED_MIN_MPS;
582 		chan->rx_credits = l2cap_le_rx_credits(chan);
583 	}
584 }
585 
586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 	       __le16_to_cpu(chan->psm), chan->dcid);
590 
591 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 
593 	chan->conn = conn;
594 
595 	switch (chan->chan_type) {
596 	case L2CAP_CHAN_CONN_ORIENTED:
597 		/* Alloc CID for connection-oriented socket */
598 		chan->scid = l2cap_alloc_cid(conn);
599 		if (conn->hcon->type == ACL_LINK)
600 			chan->omtu = L2CAP_DEFAULT_MTU;
601 		break;
602 
603 	case L2CAP_CHAN_CONN_LESS:
604 		/* Connectionless socket */
605 		chan->scid = L2CAP_CID_CONN_LESS;
606 		chan->dcid = L2CAP_CID_CONN_LESS;
607 		chan->omtu = L2CAP_DEFAULT_MTU;
608 		break;
609 
610 	case L2CAP_CHAN_FIXED:
611 		/* Caller will set CID and CID specific MTU values */
612 		break;
613 
614 	default:
615 		/* Raw socket can send/recv signalling messages only */
616 		chan->scid = L2CAP_CID_SIGNALING;
617 		chan->dcid = L2CAP_CID_SIGNALING;
618 		chan->omtu = L2CAP_DEFAULT_MTU;
619 	}
620 
621 	chan->local_id		= L2CAP_BESTEFFORT_ID;
622 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
623 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
624 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
625 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
626 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
627 
628 	l2cap_chan_hold(chan);
629 
630 	/* Only keep a reference for fixed channels if they requested it */
631 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 		hci_conn_hold(conn->hcon);
634 
635 	list_add(&chan->list, &conn->chan_l);
636 }
637 
638 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
639 {
640 	mutex_lock(&conn->lock);
641 	__l2cap_chan_add(conn, chan);
642 	mutex_unlock(&conn->lock);
643 }
644 
645 void l2cap_chan_del(struct l2cap_chan *chan, int err)
646 {
647 	struct l2cap_conn *conn = chan->conn;
648 
649 	__clear_chan_timer(chan);
650 
651 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
652 	       state_to_string(chan->state));
653 
654 	chan->ops->teardown(chan, err);
655 
656 	if (conn) {
657 		/* Delete from channel list */
658 		list_del(&chan->list);
659 
660 		l2cap_chan_put(chan);
661 
662 		chan->conn = NULL;
663 
664 		/* Reference was only held for non-fixed channels or
665 		 * fixed channels that explicitly requested it using the
666 		 * FLAG_HOLD_HCI_CONN flag.
667 		 */
668 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
669 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
670 			hci_conn_drop(conn->hcon);
671 	}
672 
673 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
674 		return;
675 
676 	switch (chan->mode) {
677 	case L2CAP_MODE_BASIC:
678 		break;
679 
680 	case L2CAP_MODE_LE_FLOWCTL:
681 	case L2CAP_MODE_EXT_FLOWCTL:
682 		skb_queue_purge(&chan->tx_q);
683 		break;
684 
685 	case L2CAP_MODE_ERTM:
686 		__clear_retrans_timer(chan);
687 		__clear_monitor_timer(chan);
688 		__clear_ack_timer(chan);
689 
690 		skb_queue_purge(&chan->srej_q);
691 
692 		l2cap_seq_list_free(&chan->srej_list);
693 		l2cap_seq_list_free(&chan->retrans_list);
694 		fallthrough;
695 
696 	case L2CAP_MODE_STREAMING:
697 		skb_queue_purge(&chan->tx_q);
698 		break;
699 	}
700 }
701 EXPORT_SYMBOL_GPL(l2cap_chan_del);
702 
703 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
704 				 l2cap_chan_func_t func, void *data)
705 {
706 	struct l2cap_chan *chan, *l;
707 
708 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
709 		if (chan->ident == id)
710 			func(chan, data);
711 	}
712 }
713 
714 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
715 			      void *data)
716 {
717 	struct l2cap_chan *chan;
718 
719 	list_for_each_entry(chan, &conn->chan_l, list) {
720 		func(chan, data);
721 	}
722 }
723 
724 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
725 		     void *data)
726 {
727 	if (!conn)
728 		return;
729 
730 	mutex_lock(&conn->lock);
731 	__l2cap_chan_list(conn, func, data);
732 	mutex_unlock(&conn->lock);
733 }
734 
735 EXPORT_SYMBOL_GPL(l2cap_chan_list);
736 
737 static void l2cap_conn_update_id_addr(struct work_struct *work)
738 {
739 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
740 					       id_addr_timer.work);
741 	struct hci_conn *hcon = conn->hcon;
742 	struct l2cap_chan *chan;
743 
744 	mutex_lock(&conn->lock);
745 
746 	list_for_each_entry(chan, &conn->chan_l, list) {
747 		l2cap_chan_lock(chan);
748 		bacpy(&chan->dst, &hcon->dst);
749 		chan->dst_type = bdaddr_dst_type(hcon);
750 		l2cap_chan_unlock(chan);
751 	}
752 
753 	mutex_unlock(&conn->lock);
754 }
755 
756 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
757 {
758 	struct l2cap_conn *conn = chan->conn;
759 	struct l2cap_le_conn_rsp rsp;
760 	u16 result;
761 
762 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
763 		result = L2CAP_CR_LE_AUTHORIZATION;
764 	else
765 		result = L2CAP_CR_LE_BAD_PSM;
766 
767 	l2cap_state_change(chan, BT_DISCONN);
768 
769 	rsp.dcid    = cpu_to_le16(chan->scid);
770 	rsp.mtu     = cpu_to_le16(chan->imtu);
771 	rsp.mps     = cpu_to_le16(chan->mps);
772 	rsp.credits = cpu_to_le16(chan->rx_credits);
773 	rsp.result  = cpu_to_le16(result);
774 
775 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
776 		       &rsp);
777 }
778 
779 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
780 {
781 	l2cap_state_change(chan, BT_DISCONN);
782 
783 	__l2cap_ecred_conn_rsp_defer(chan);
784 }
785 
786 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
787 {
788 	struct l2cap_conn *conn = chan->conn;
789 	struct l2cap_conn_rsp rsp;
790 	u16 result;
791 
792 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
793 		result = L2CAP_CR_SEC_BLOCK;
794 	else
795 		result = L2CAP_CR_BAD_PSM;
796 
797 	l2cap_state_change(chan, BT_DISCONN);
798 
799 	rsp.scid   = cpu_to_le16(chan->dcid);
800 	rsp.dcid   = cpu_to_le16(chan->scid);
801 	rsp.result = cpu_to_le16(result);
802 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
803 
804 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
805 }
806 
807 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
808 {
809 	struct l2cap_conn *conn = chan->conn;
810 
811 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
812 
813 	switch (chan->state) {
814 	case BT_LISTEN:
815 		chan->ops->teardown(chan, 0);
816 		break;
817 
818 	case BT_CONNECTED:
819 	case BT_CONFIG:
820 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
821 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
822 			l2cap_send_disconn_req(chan, reason);
823 		} else
824 			l2cap_chan_del(chan, reason);
825 		break;
826 
827 	case BT_CONNECT2:
828 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
829 			if (conn->hcon->type == ACL_LINK)
830 				l2cap_chan_connect_reject(chan);
831 			else if (conn->hcon->type == LE_LINK) {
832 				switch (chan->mode) {
833 				case L2CAP_MODE_LE_FLOWCTL:
834 					l2cap_chan_le_connect_reject(chan);
835 					break;
836 				case L2CAP_MODE_EXT_FLOWCTL:
837 					l2cap_chan_ecred_connect_reject(chan);
838 					return;
839 				}
840 			}
841 		}
842 
843 		l2cap_chan_del(chan, reason);
844 		break;
845 
846 	case BT_CONNECT:
847 	case BT_DISCONN:
848 		l2cap_chan_del(chan, reason);
849 		break;
850 
851 	default:
852 		chan->ops->teardown(chan, 0);
853 		break;
854 	}
855 }
856 EXPORT_SYMBOL(l2cap_chan_close);
857 
858 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
859 {
860 	switch (chan->chan_type) {
861 	case L2CAP_CHAN_RAW:
862 		switch (chan->sec_level) {
863 		case BT_SECURITY_HIGH:
864 		case BT_SECURITY_FIPS:
865 			return HCI_AT_DEDICATED_BONDING_MITM;
866 		case BT_SECURITY_MEDIUM:
867 			return HCI_AT_DEDICATED_BONDING;
868 		default:
869 			return HCI_AT_NO_BONDING;
870 		}
871 		break;
872 	case L2CAP_CHAN_CONN_LESS:
873 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
874 			if (chan->sec_level == BT_SECURITY_LOW)
875 				chan->sec_level = BT_SECURITY_SDP;
876 		}
877 		if (chan->sec_level == BT_SECURITY_HIGH ||
878 		    chan->sec_level == BT_SECURITY_FIPS)
879 			return HCI_AT_NO_BONDING_MITM;
880 		else
881 			return HCI_AT_NO_BONDING;
882 		break;
883 	case L2CAP_CHAN_CONN_ORIENTED:
884 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
885 			if (chan->sec_level == BT_SECURITY_LOW)
886 				chan->sec_level = BT_SECURITY_SDP;
887 
888 			if (chan->sec_level == BT_SECURITY_HIGH ||
889 			    chan->sec_level == BT_SECURITY_FIPS)
890 				return HCI_AT_NO_BONDING_MITM;
891 			else
892 				return HCI_AT_NO_BONDING;
893 		}
894 		fallthrough;
895 
896 	default:
897 		switch (chan->sec_level) {
898 		case BT_SECURITY_HIGH:
899 		case BT_SECURITY_FIPS:
900 			return HCI_AT_GENERAL_BONDING_MITM;
901 		case BT_SECURITY_MEDIUM:
902 			return HCI_AT_GENERAL_BONDING;
903 		default:
904 			return HCI_AT_NO_BONDING;
905 		}
906 		break;
907 	}
908 }
909 
910 /* Service level security */
911 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
912 {
913 	struct l2cap_conn *conn = chan->conn;
914 	__u8 auth_type;
915 
916 	if (conn->hcon->type == LE_LINK)
917 		return smp_conn_security(conn->hcon, chan->sec_level);
918 
919 	auth_type = l2cap_get_auth_type(chan);
920 
921 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
922 				 initiator);
923 }
924 
925 static u8 l2cap_get_ident(struct l2cap_conn *conn)
926 {
927 	u8 id;
928 
929 	/* Get next available identificator.
930 	 *    1 - 128 are used by kernel.
931 	 *  129 - 199 are reserved.
932 	 *  200 - 254 are used by utilities like l2ping, etc.
933 	 */
934 
935 	mutex_lock(&conn->ident_lock);
936 
937 	if (++conn->tx_ident > 128)
938 		conn->tx_ident = 1;
939 
940 	id = conn->tx_ident;
941 
942 	mutex_unlock(&conn->ident_lock);
943 
944 	return id;
945 }
946 
947 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
948 			   u8 flags)
949 {
950 	/* Check if the hcon still valid before attempting to send */
951 	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
952 		hci_send_acl(conn->hchan, skb, flags);
953 	else
954 		kfree_skb(skb);
955 }
956 
957 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
958 			   void *data)
959 {
960 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
961 	u8 flags;
962 
963 	BT_DBG("code 0x%2.2x", code);
964 
965 	if (!skb)
966 		return;
967 
968 	/* Use NO_FLUSH if supported or we have an LE link (which does
969 	 * not support auto-flushing packets) */
970 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
971 	    conn->hcon->type == LE_LINK)
972 		flags = ACL_START_NO_FLUSH;
973 	else
974 		flags = ACL_START;
975 
976 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
977 	skb->priority = HCI_PRIO_MAX;
978 
979 	l2cap_send_acl(conn, skb, flags);
980 }
981 
982 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
983 {
984 	struct hci_conn *hcon = chan->conn->hcon;
985 	u16 flags;
986 
987 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
988 	       skb->priority);
989 
990 	/* Use NO_FLUSH for LE links (where this is the only option) or
991 	 * if the BR/EDR link supports it and flushing has not been
992 	 * explicitly requested (through FLAG_FLUSHABLE).
993 	 */
994 	if (hcon->type == LE_LINK ||
995 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
996 	     lmp_no_flush_capable(hcon->hdev)))
997 		flags = ACL_START_NO_FLUSH;
998 	else
999 		flags = ACL_START;
1000 
1001 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1002 	hci_send_acl(chan->conn->hchan, skb, flags);
1003 }
1004 
1005 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1006 {
1007 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1008 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1009 
1010 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1011 		/* S-Frame */
1012 		control->sframe = 1;
1013 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1014 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1015 
1016 		control->sar = 0;
1017 		control->txseq = 0;
1018 	} else {
1019 		/* I-Frame */
1020 		control->sframe = 0;
1021 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1022 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1023 
1024 		control->poll = 0;
1025 		control->super = 0;
1026 	}
1027 }
1028 
1029 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1030 {
1031 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1032 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1033 
1034 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1035 		/* S-Frame */
1036 		control->sframe = 1;
1037 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1038 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1039 
1040 		control->sar = 0;
1041 		control->txseq = 0;
1042 	} else {
1043 		/* I-Frame */
1044 		control->sframe = 0;
1045 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1046 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1047 
1048 		control->poll = 0;
1049 		control->super = 0;
1050 	}
1051 }
1052 
1053 static inline void __unpack_control(struct l2cap_chan *chan,
1054 				    struct sk_buff *skb)
1055 {
1056 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1057 		__unpack_extended_control(get_unaligned_le32(skb->data),
1058 					  &bt_cb(skb)->l2cap);
1059 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1060 	} else {
1061 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1062 					  &bt_cb(skb)->l2cap);
1063 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1064 	}
1065 }
1066 
1067 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1068 {
1069 	u32 packed;
1070 
1071 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1072 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1073 
1074 	if (control->sframe) {
1075 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1076 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1077 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1078 	} else {
1079 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1080 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1081 	}
1082 
1083 	return packed;
1084 }
1085 
1086 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1087 {
1088 	u16 packed;
1089 
1090 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1091 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1092 
1093 	if (control->sframe) {
1094 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1095 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1096 		packed |= L2CAP_CTRL_FRAME_TYPE;
1097 	} else {
1098 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1099 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1100 	}
1101 
1102 	return packed;
1103 }
1104 
1105 static inline void __pack_control(struct l2cap_chan *chan,
1106 				  struct l2cap_ctrl *control,
1107 				  struct sk_buff *skb)
1108 {
1109 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1110 		put_unaligned_le32(__pack_extended_control(control),
1111 				   skb->data + L2CAP_HDR_SIZE);
1112 	} else {
1113 		put_unaligned_le16(__pack_enhanced_control(control),
1114 				   skb->data + L2CAP_HDR_SIZE);
1115 	}
1116 }
1117 
1118 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1119 {
1120 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1121 		return L2CAP_EXT_HDR_SIZE;
1122 	else
1123 		return L2CAP_ENH_HDR_SIZE;
1124 }
1125 
1126 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1127 					       u32 control)
1128 {
1129 	struct sk_buff *skb;
1130 	struct l2cap_hdr *lh;
1131 	int hlen = __ertm_hdr_size(chan);
1132 
1133 	if (chan->fcs == L2CAP_FCS_CRC16)
1134 		hlen += L2CAP_FCS_SIZE;
1135 
1136 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1137 
1138 	if (!skb)
1139 		return ERR_PTR(-ENOMEM);
1140 
1141 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1142 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1143 	lh->cid = cpu_to_le16(chan->dcid);
1144 
1145 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1146 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1147 	else
1148 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1149 
1150 	if (chan->fcs == L2CAP_FCS_CRC16) {
1151 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1152 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1153 	}
1154 
1155 	skb->priority = HCI_PRIO_MAX;
1156 	return skb;
1157 }
1158 
1159 static void l2cap_send_sframe(struct l2cap_chan *chan,
1160 			      struct l2cap_ctrl *control)
1161 {
1162 	struct sk_buff *skb;
1163 	u32 control_field;
1164 
1165 	BT_DBG("chan %p, control %p", chan, control);
1166 
1167 	if (!control->sframe)
1168 		return;
1169 
1170 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1171 	    !control->poll)
1172 		control->final = 1;
1173 
1174 	if (control->super == L2CAP_SUPER_RR)
1175 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1176 	else if (control->super == L2CAP_SUPER_RNR)
1177 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1178 
1179 	if (control->super != L2CAP_SUPER_SREJ) {
1180 		chan->last_acked_seq = control->reqseq;
1181 		__clear_ack_timer(chan);
1182 	}
1183 
1184 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1185 	       control->final, control->poll, control->super);
1186 
1187 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1188 		control_field = __pack_extended_control(control);
1189 	else
1190 		control_field = __pack_enhanced_control(control);
1191 
1192 	skb = l2cap_create_sframe_pdu(chan, control_field);
1193 	if (!IS_ERR(skb))
1194 		l2cap_do_send(chan, skb);
1195 }
1196 
1197 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1198 {
1199 	struct l2cap_ctrl control;
1200 
1201 	BT_DBG("chan %p, poll %d", chan, poll);
1202 
1203 	memset(&control, 0, sizeof(control));
1204 	control.sframe = 1;
1205 	control.poll = poll;
1206 
1207 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1208 		control.super = L2CAP_SUPER_RNR;
1209 	else
1210 		control.super = L2CAP_SUPER_RR;
1211 
1212 	control.reqseq = chan->buffer_seq;
1213 	l2cap_send_sframe(chan, &control);
1214 }
1215 
1216 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1217 {
1218 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1219 		return true;
1220 
1221 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1222 }
1223 
1224 void l2cap_send_conn_req(struct l2cap_chan *chan)
1225 {
1226 	struct l2cap_conn *conn = chan->conn;
1227 	struct l2cap_conn_req req;
1228 
1229 	req.scid = cpu_to_le16(chan->scid);
1230 	req.psm  = chan->psm;
1231 
1232 	chan->ident = l2cap_get_ident(conn);
1233 
1234 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1235 
1236 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1237 }
1238 
1239 static void l2cap_chan_ready(struct l2cap_chan *chan)
1240 {
1241 	/* The channel may have already been flagged as connected in
1242 	 * case of receiving data before the L2CAP info req/rsp
1243 	 * procedure is complete.
1244 	 */
1245 	if (chan->state == BT_CONNECTED)
1246 		return;
1247 
1248 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1249 	chan->conf_state = 0;
1250 	__clear_chan_timer(chan);
1251 
1252 	switch (chan->mode) {
1253 	case L2CAP_MODE_LE_FLOWCTL:
1254 	case L2CAP_MODE_EXT_FLOWCTL:
1255 		if (!chan->tx_credits)
1256 			chan->ops->suspend(chan);
1257 		break;
1258 	}
1259 
1260 	chan->state = BT_CONNECTED;
1261 
1262 	chan->ops->ready(chan);
1263 }
1264 
1265 static void l2cap_le_connect(struct l2cap_chan *chan)
1266 {
1267 	struct l2cap_conn *conn = chan->conn;
1268 	struct l2cap_le_conn_req req;
1269 
1270 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1271 		return;
1272 
1273 	if (!chan->imtu)
1274 		chan->imtu = chan->conn->mtu;
1275 
1276 	l2cap_le_flowctl_init(chan, 0);
1277 
1278 	memset(&req, 0, sizeof(req));
1279 	req.psm     = chan->psm;
1280 	req.scid    = cpu_to_le16(chan->scid);
1281 	req.mtu     = cpu_to_le16(chan->imtu);
1282 	req.mps     = cpu_to_le16(chan->mps);
1283 	req.credits = cpu_to_le16(chan->rx_credits);
1284 
1285 	chan->ident = l2cap_get_ident(conn);
1286 
1287 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1288 		       sizeof(req), &req);
1289 }
1290 
1291 struct l2cap_ecred_conn_data {
1292 	struct {
1293 		struct l2cap_ecred_conn_req_hdr req;
1294 		__le16 scid[5];
1295 	} __packed pdu;
1296 	struct l2cap_chan *chan;
1297 	struct pid *pid;
1298 	int count;
1299 };
1300 
1301 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1302 {
1303 	struct l2cap_ecred_conn_data *conn = data;
1304 	struct pid *pid;
1305 
1306 	if (chan == conn->chan)
1307 		return;
1308 
1309 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1310 		return;
1311 
1312 	pid = chan->ops->get_peer_pid(chan);
1313 
1314 	/* Only add deferred channels with the same PID/PSM */
1315 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1316 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1317 		return;
1318 
1319 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1320 		return;
1321 
1322 	l2cap_ecred_init(chan, 0);
1323 
1324 	/* Set the same ident so we can match on the rsp */
1325 	chan->ident = conn->chan->ident;
1326 
1327 	/* Include all channels deferred */
1328 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1329 
1330 	conn->count++;
1331 }
1332 
1333 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1334 {
1335 	struct l2cap_conn *conn = chan->conn;
1336 	struct l2cap_ecred_conn_data data;
1337 
1338 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1339 		return;
1340 
1341 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1342 		return;
1343 
1344 	l2cap_ecred_init(chan, 0);
1345 
1346 	memset(&data, 0, sizeof(data));
1347 	data.pdu.req.psm     = chan->psm;
1348 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1349 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1350 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1351 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1352 
1353 	chan->ident = l2cap_get_ident(conn);
1354 
1355 	data.count = 1;
1356 	data.chan = chan;
1357 	data.pid = chan->ops->get_peer_pid(chan);
1358 
1359 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1360 
1361 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1362 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1363 		       &data.pdu);
1364 }
1365 
1366 static void l2cap_le_start(struct l2cap_chan *chan)
1367 {
1368 	struct l2cap_conn *conn = chan->conn;
1369 
1370 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1371 		return;
1372 
1373 	if (!chan->psm) {
1374 		l2cap_chan_ready(chan);
1375 		return;
1376 	}
1377 
1378 	if (chan->state == BT_CONNECT) {
1379 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1380 			l2cap_ecred_connect(chan);
1381 		else
1382 			l2cap_le_connect(chan);
1383 	}
1384 }
1385 
1386 static void l2cap_start_connection(struct l2cap_chan *chan)
1387 {
1388 	if (chan->conn->hcon->type == LE_LINK) {
1389 		l2cap_le_start(chan);
1390 	} else {
1391 		l2cap_send_conn_req(chan);
1392 	}
1393 }
1394 
1395 static void l2cap_request_info(struct l2cap_conn *conn)
1396 {
1397 	struct l2cap_info_req req;
1398 
1399 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1400 		return;
1401 
1402 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1403 
1404 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1405 	conn->info_ident = l2cap_get_ident(conn);
1406 
1407 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1408 
1409 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1410 		       sizeof(req), &req);
1411 }
1412 
1413 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1414 {
1415 	/* The minimum encryption key size needs to be enforced by the
1416 	 * host stack before establishing any L2CAP connections. The
1417 	 * specification in theory allows a minimum of 1, but to align
1418 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1419 	 *
1420 	 * This check might also be called for unencrypted connections
1421 	 * that have no key size requirements. Ensure that the link is
1422 	 * actually encrypted before enforcing a key size.
1423 	 */
1424 	int min_key_size = hcon->hdev->min_enc_key_size;
1425 
1426 	/* On FIPS security level, key size must be 16 bytes */
1427 	if (hcon->sec_level == BT_SECURITY_FIPS)
1428 		min_key_size = 16;
1429 
1430 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1431 		hcon->enc_key_size >= min_key_size);
1432 }
1433 
1434 static void l2cap_do_start(struct l2cap_chan *chan)
1435 {
1436 	struct l2cap_conn *conn = chan->conn;
1437 
1438 	if (conn->hcon->type == LE_LINK) {
1439 		l2cap_le_start(chan);
1440 		return;
1441 	}
1442 
1443 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1444 		l2cap_request_info(conn);
1445 		return;
1446 	}
1447 
1448 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1449 		return;
1450 
1451 	if (!l2cap_chan_check_security(chan, true) ||
1452 	    !__l2cap_no_conn_pending(chan))
1453 		return;
1454 
1455 	if (l2cap_check_enc_key_size(conn->hcon))
1456 		l2cap_start_connection(chan);
1457 	else
1458 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1459 }
1460 
1461 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1462 {
1463 	u32 local_feat_mask = l2cap_feat_mask;
1464 	if (!disable_ertm)
1465 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1466 
1467 	switch (mode) {
1468 	case L2CAP_MODE_ERTM:
1469 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1470 	case L2CAP_MODE_STREAMING:
1471 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1472 	default:
1473 		return 0x00;
1474 	}
1475 }
1476 
1477 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1478 {
1479 	struct l2cap_conn *conn = chan->conn;
1480 	struct l2cap_disconn_req req;
1481 
1482 	if (!conn)
1483 		return;
1484 
1485 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1486 		__clear_retrans_timer(chan);
1487 		__clear_monitor_timer(chan);
1488 		__clear_ack_timer(chan);
1489 	}
1490 
1491 	req.dcid = cpu_to_le16(chan->dcid);
1492 	req.scid = cpu_to_le16(chan->scid);
1493 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1494 		       sizeof(req), &req);
1495 
1496 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1497 }
1498 
1499 /* ---- L2CAP connections ---- */
1500 static void l2cap_conn_start(struct l2cap_conn *conn)
1501 {
1502 	struct l2cap_chan *chan, *tmp;
1503 
1504 	BT_DBG("conn %p", conn);
1505 
1506 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1507 		l2cap_chan_lock(chan);
1508 
1509 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1510 			l2cap_chan_ready(chan);
1511 			l2cap_chan_unlock(chan);
1512 			continue;
1513 		}
1514 
1515 		if (chan->state == BT_CONNECT) {
1516 			if (!l2cap_chan_check_security(chan, true) ||
1517 			    !__l2cap_no_conn_pending(chan)) {
1518 				l2cap_chan_unlock(chan);
1519 				continue;
1520 			}
1521 
1522 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1523 			    && test_bit(CONF_STATE2_DEVICE,
1524 					&chan->conf_state)) {
1525 				l2cap_chan_close(chan, ECONNRESET);
1526 				l2cap_chan_unlock(chan);
1527 				continue;
1528 			}
1529 
1530 			if (l2cap_check_enc_key_size(conn->hcon))
1531 				l2cap_start_connection(chan);
1532 			else
1533 				l2cap_chan_close(chan, ECONNREFUSED);
1534 
1535 		} else if (chan->state == BT_CONNECT2) {
1536 			struct l2cap_conn_rsp rsp;
1537 			char buf[128];
1538 			rsp.scid = cpu_to_le16(chan->dcid);
1539 			rsp.dcid = cpu_to_le16(chan->scid);
1540 
1541 			if (l2cap_chan_check_security(chan, false)) {
1542 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1543 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1544 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1545 					chan->ops->defer(chan);
1546 
1547 				} else {
1548 					l2cap_state_change(chan, BT_CONFIG);
1549 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1550 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1551 				}
1552 			} else {
1553 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1554 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1555 			}
1556 
1557 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1558 				       sizeof(rsp), &rsp);
1559 
1560 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1561 			    rsp.result != L2CAP_CR_SUCCESS) {
1562 				l2cap_chan_unlock(chan);
1563 				continue;
1564 			}
1565 
1566 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1567 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1568 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1569 			chan->num_conf_req++;
1570 		}
1571 
1572 		l2cap_chan_unlock(chan);
1573 	}
1574 }
1575 
1576 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1577 {
1578 	struct hci_conn *hcon = conn->hcon;
1579 	struct hci_dev *hdev = hcon->hdev;
1580 
1581 	BT_DBG("%s conn %p", hdev->name, conn);
1582 
1583 	/* For outgoing pairing which doesn't necessarily have an
1584 	 * associated socket (e.g. mgmt_pair_device).
1585 	 */
1586 	if (hcon->out)
1587 		smp_conn_security(hcon, hcon->pending_sec_level);
1588 
1589 	/* For LE peripheral connections, make sure the connection interval
1590 	 * is in the range of the minimum and maximum interval that has
1591 	 * been configured for this connection. If not, then trigger
1592 	 * the connection update procedure.
1593 	 */
1594 	if (hcon->role == HCI_ROLE_SLAVE &&
1595 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1596 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1597 		struct l2cap_conn_param_update_req req;
1598 
1599 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1600 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1601 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1602 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1603 
1604 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1605 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1606 	}
1607 }
1608 
1609 static void l2cap_conn_ready(struct l2cap_conn *conn)
1610 {
1611 	struct l2cap_chan *chan;
1612 	struct hci_conn *hcon = conn->hcon;
1613 
1614 	BT_DBG("conn %p", conn);
1615 
1616 	if (hcon->type == ACL_LINK)
1617 		l2cap_request_info(conn);
1618 
1619 	mutex_lock(&conn->lock);
1620 
1621 	list_for_each_entry(chan, &conn->chan_l, list) {
1622 
1623 		l2cap_chan_lock(chan);
1624 
1625 		if (hcon->type == LE_LINK) {
1626 			l2cap_le_start(chan);
1627 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1628 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1629 				l2cap_chan_ready(chan);
1630 		} else if (chan->state == BT_CONNECT) {
1631 			l2cap_do_start(chan);
1632 		}
1633 
1634 		l2cap_chan_unlock(chan);
1635 	}
1636 
1637 	mutex_unlock(&conn->lock);
1638 
1639 	if (hcon->type == LE_LINK)
1640 		l2cap_le_conn_ready(conn);
1641 
1642 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1643 }
1644 
1645 /* Notify sockets that we cannot guaranty reliability anymore */
1646 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1647 {
1648 	struct l2cap_chan *chan;
1649 
1650 	BT_DBG("conn %p", conn);
1651 
1652 	list_for_each_entry(chan, &conn->chan_l, list) {
1653 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1654 			l2cap_chan_set_err(chan, err);
1655 	}
1656 }
1657 
1658 static void l2cap_info_timeout(struct work_struct *work)
1659 {
1660 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1661 					       info_timer.work);
1662 
1663 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1664 	conn->info_ident = 0;
1665 
1666 	mutex_lock(&conn->lock);
1667 	l2cap_conn_start(conn);
1668 	mutex_unlock(&conn->lock);
1669 }
1670 
1671 /*
1672  * l2cap_user
1673  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1674  * callback is called during registration. The ->remove callback is called
1675  * during unregistration.
1676  * An l2cap_user object can either be explicitly unregistered or when the
1677  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1678  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1679  * External modules must own a reference to the l2cap_conn object if they intend
1680  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1681  * any time if they don't.
1682  */
1683 
1684 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1685 {
1686 	struct hci_dev *hdev = conn->hcon->hdev;
1687 	int ret;
1688 
1689 	/* We need to check whether l2cap_conn is registered. If it is not, we
1690 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1691 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1692 	 * relies on the parent hci_conn object to be locked. This itself relies
1693 	 * on the hci_dev object to be locked. So we must lock the hci device
1694 	 * here, too. */
1695 
1696 	hci_dev_lock(hdev);
1697 
1698 	if (!list_empty(&user->list)) {
1699 		ret = -EINVAL;
1700 		goto out_unlock;
1701 	}
1702 
1703 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1704 	if (!conn->hchan) {
1705 		ret = -ENODEV;
1706 		goto out_unlock;
1707 	}
1708 
1709 	ret = user->probe(conn, user);
1710 	if (ret)
1711 		goto out_unlock;
1712 
1713 	list_add(&user->list, &conn->users);
1714 	ret = 0;
1715 
1716 out_unlock:
1717 	hci_dev_unlock(hdev);
1718 	return ret;
1719 }
1720 EXPORT_SYMBOL(l2cap_register_user);
1721 
1722 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1723 {
1724 	struct hci_dev *hdev = conn->hcon->hdev;
1725 
1726 	hci_dev_lock(hdev);
1727 
1728 	if (list_empty(&user->list))
1729 		goto out_unlock;
1730 
1731 	list_del_init(&user->list);
1732 	user->remove(conn, user);
1733 
1734 out_unlock:
1735 	hci_dev_unlock(hdev);
1736 }
1737 EXPORT_SYMBOL(l2cap_unregister_user);
1738 
1739 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1740 {
1741 	struct l2cap_user *user;
1742 
1743 	while (!list_empty(&conn->users)) {
1744 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1745 		list_del_init(&user->list);
1746 		user->remove(conn, user);
1747 	}
1748 }
1749 
1750 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1751 {
1752 	struct l2cap_conn *conn = hcon->l2cap_data;
1753 	struct l2cap_chan *chan, *l;
1754 
1755 	if (!conn)
1756 		return;
1757 
1758 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1759 
1760 	mutex_lock(&conn->lock);
1761 
1762 	kfree_skb(conn->rx_skb);
1763 
1764 	skb_queue_purge(&conn->pending_rx);
1765 
1766 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1767 	 * might block if we are running on a worker from the same workqueue
1768 	 * pending_rx_work is waiting on.
1769 	 */
1770 	if (work_pending(&conn->pending_rx_work))
1771 		cancel_work_sync(&conn->pending_rx_work);
1772 
1773 	cancel_delayed_work_sync(&conn->id_addr_timer);
1774 
1775 	l2cap_unregister_all_users(conn);
1776 
1777 	/* Force the connection to be immediately dropped */
1778 	hcon->disc_timeout = 0;
1779 
1780 	/* Kill channels */
1781 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1782 		l2cap_chan_hold(chan);
1783 		l2cap_chan_lock(chan);
1784 
1785 		l2cap_chan_del(chan, err);
1786 
1787 		chan->ops->close(chan);
1788 
1789 		l2cap_chan_unlock(chan);
1790 		l2cap_chan_put(chan);
1791 	}
1792 
1793 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1794 		cancel_delayed_work_sync(&conn->info_timer);
1795 
1796 	hci_chan_del(conn->hchan);
1797 	conn->hchan = NULL;
1798 
1799 	hcon->l2cap_data = NULL;
1800 	mutex_unlock(&conn->lock);
1801 	l2cap_conn_put(conn);
1802 }
1803 
1804 static void l2cap_conn_free(struct kref *ref)
1805 {
1806 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1807 
1808 	hci_conn_put(conn->hcon);
1809 	kfree(conn);
1810 }
1811 
1812 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1813 {
1814 	kref_get(&conn->ref);
1815 	return conn;
1816 }
1817 EXPORT_SYMBOL(l2cap_conn_get);
1818 
1819 void l2cap_conn_put(struct l2cap_conn *conn)
1820 {
1821 	kref_put(&conn->ref, l2cap_conn_free);
1822 }
1823 EXPORT_SYMBOL(l2cap_conn_put);
1824 
1825 /* ---- Socket interface ---- */
1826 
1827 /* Find socket with psm and source / destination bdaddr.
1828  * Returns closest match.
1829  */
1830 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1831 						   bdaddr_t *src,
1832 						   bdaddr_t *dst,
1833 						   u8 link_type)
1834 {
1835 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1836 
1837 	read_lock(&chan_list_lock);
1838 
1839 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1840 		if (state && c->state != state)
1841 			continue;
1842 
1843 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1844 			continue;
1845 
1846 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1847 			continue;
1848 
1849 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1850 			int src_match, dst_match;
1851 			int src_any, dst_any;
1852 
1853 			/* Exact match. */
1854 			src_match = !bacmp(&c->src, src);
1855 			dst_match = !bacmp(&c->dst, dst);
1856 			if (src_match && dst_match) {
1857 				if (!l2cap_chan_hold_unless_zero(c))
1858 					continue;
1859 
1860 				read_unlock(&chan_list_lock);
1861 				return c;
1862 			}
1863 
1864 			/* Closest match */
1865 			src_any = !bacmp(&c->src, BDADDR_ANY);
1866 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1867 			if ((src_match && dst_any) || (src_any && dst_match) ||
1868 			    (src_any && dst_any))
1869 				c1 = c;
1870 		}
1871 	}
1872 
1873 	if (c1)
1874 		c1 = l2cap_chan_hold_unless_zero(c1);
1875 
1876 	read_unlock(&chan_list_lock);
1877 
1878 	return c1;
1879 }
1880 
1881 static void l2cap_monitor_timeout(struct work_struct *work)
1882 {
1883 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1884 					       monitor_timer.work);
1885 
1886 	BT_DBG("chan %p", chan);
1887 
1888 	l2cap_chan_lock(chan);
1889 
1890 	if (!chan->conn) {
1891 		l2cap_chan_unlock(chan);
1892 		l2cap_chan_put(chan);
1893 		return;
1894 	}
1895 
1896 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1897 
1898 	l2cap_chan_unlock(chan);
1899 	l2cap_chan_put(chan);
1900 }
1901 
1902 static void l2cap_retrans_timeout(struct work_struct *work)
1903 {
1904 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1905 					       retrans_timer.work);
1906 
1907 	BT_DBG("chan %p", chan);
1908 
1909 	l2cap_chan_lock(chan);
1910 
1911 	if (!chan->conn) {
1912 		l2cap_chan_unlock(chan);
1913 		l2cap_chan_put(chan);
1914 		return;
1915 	}
1916 
1917 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1918 	l2cap_chan_unlock(chan);
1919 	l2cap_chan_put(chan);
1920 }
1921 
1922 static void l2cap_streaming_send(struct l2cap_chan *chan,
1923 				 struct sk_buff_head *skbs)
1924 {
1925 	struct sk_buff *skb;
1926 	struct l2cap_ctrl *control;
1927 
1928 	BT_DBG("chan %p, skbs %p", chan, skbs);
1929 
1930 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1931 
1932 	while (!skb_queue_empty(&chan->tx_q)) {
1933 
1934 		skb = skb_dequeue(&chan->tx_q);
1935 
1936 		bt_cb(skb)->l2cap.retries = 1;
1937 		control = &bt_cb(skb)->l2cap;
1938 
1939 		control->reqseq = 0;
1940 		control->txseq = chan->next_tx_seq;
1941 
1942 		__pack_control(chan, control, skb);
1943 
1944 		if (chan->fcs == L2CAP_FCS_CRC16) {
1945 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1946 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1947 		}
1948 
1949 		l2cap_do_send(chan, skb);
1950 
1951 		BT_DBG("Sent txseq %u", control->txseq);
1952 
1953 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1954 		chan->frames_sent++;
1955 	}
1956 }
1957 
1958 static int l2cap_ertm_send(struct l2cap_chan *chan)
1959 {
1960 	struct sk_buff *skb, *tx_skb;
1961 	struct l2cap_ctrl *control;
1962 	int sent = 0;
1963 
1964 	BT_DBG("chan %p", chan);
1965 
1966 	if (chan->state != BT_CONNECTED)
1967 		return -ENOTCONN;
1968 
1969 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1970 		return 0;
1971 
1972 	while (chan->tx_send_head &&
1973 	       chan->unacked_frames < chan->remote_tx_win &&
1974 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1975 
1976 		skb = chan->tx_send_head;
1977 
1978 		bt_cb(skb)->l2cap.retries = 1;
1979 		control = &bt_cb(skb)->l2cap;
1980 
1981 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1982 			control->final = 1;
1983 
1984 		control->reqseq = chan->buffer_seq;
1985 		chan->last_acked_seq = chan->buffer_seq;
1986 		control->txseq = chan->next_tx_seq;
1987 
1988 		__pack_control(chan, control, skb);
1989 
1990 		if (chan->fcs == L2CAP_FCS_CRC16) {
1991 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1992 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1993 		}
1994 
1995 		/* Clone after data has been modified. Data is assumed to be
1996 		   read-only (for locking purposes) on cloned sk_buffs.
1997 		 */
1998 		tx_skb = skb_clone(skb, GFP_KERNEL);
1999 
2000 		if (!tx_skb)
2001 			break;
2002 
2003 		__set_retrans_timer(chan);
2004 
2005 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2006 		chan->unacked_frames++;
2007 		chan->frames_sent++;
2008 		sent++;
2009 
2010 		if (skb_queue_is_last(&chan->tx_q, skb))
2011 			chan->tx_send_head = NULL;
2012 		else
2013 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2014 
2015 		l2cap_do_send(chan, tx_skb);
2016 		BT_DBG("Sent txseq %u", control->txseq);
2017 	}
2018 
2019 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2020 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2021 
2022 	return sent;
2023 }
2024 
2025 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2026 {
2027 	struct l2cap_ctrl control;
2028 	struct sk_buff *skb;
2029 	struct sk_buff *tx_skb;
2030 	u16 seq;
2031 
2032 	BT_DBG("chan %p", chan);
2033 
2034 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2035 		return;
2036 
2037 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2038 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2039 
2040 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2041 		if (!skb) {
2042 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2043 			       seq);
2044 			continue;
2045 		}
2046 
2047 		bt_cb(skb)->l2cap.retries++;
2048 		control = bt_cb(skb)->l2cap;
2049 
2050 		if (chan->max_tx != 0 &&
2051 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2052 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2053 			l2cap_send_disconn_req(chan, ECONNRESET);
2054 			l2cap_seq_list_clear(&chan->retrans_list);
2055 			break;
2056 		}
2057 
2058 		control.reqseq = chan->buffer_seq;
2059 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2060 			control.final = 1;
2061 		else
2062 			control.final = 0;
2063 
2064 		if (skb_cloned(skb)) {
2065 			/* Cloned sk_buffs are read-only, so we need a
2066 			 * writeable copy
2067 			 */
2068 			tx_skb = skb_copy(skb, GFP_KERNEL);
2069 		} else {
2070 			tx_skb = skb_clone(skb, GFP_KERNEL);
2071 		}
2072 
2073 		if (!tx_skb) {
2074 			l2cap_seq_list_clear(&chan->retrans_list);
2075 			break;
2076 		}
2077 
2078 		/* Update skb contents */
2079 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2080 			put_unaligned_le32(__pack_extended_control(&control),
2081 					   tx_skb->data + L2CAP_HDR_SIZE);
2082 		} else {
2083 			put_unaligned_le16(__pack_enhanced_control(&control),
2084 					   tx_skb->data + L2CAP_HDR_SIZE);
2085 		}
2086 
2087 		/* Update FCS */
2088 		if (chan->fcs == L2CAP_FCS_CRC16) {
2089 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2090 					tx_skb->len - L2CAP_FCS_SIZE);
2091 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2092 						L2CAP_FCS_SIZE);
2093 		}
2094 
2095 		l2cap_do_send(chan, tx_skb);
2096 
2097 		BT_DBG("Resent txseq %d", control.txseq);
2098 
2099 		chan->last_acked_seq = chan->buffer_seq;
2100 	}
2101 }
2102 
2103 static void l2cap_retransmit(struct l2cap_chan *chan,
2104 			     struct l2cap_ctrl *control)
2105 {
2106 	BT_DBG("chan %p, control %p", chan, control);
2107 
2108 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2109 	l2cap_ertm_resend(chan);
2110 }
2111 
2112 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2113 				 struct l2cap_ctrl *control)
2114 {
2115 	struct sk_buff *skb;
2116 
2117 	BT_DBG("chan %p, control %p", chan, control);
2118 
2119 	if (control->poll)
2120 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2121 
2122 	l2cap_seq_list_clear(&chan->retrans_list);
2123 
2124 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2125 		return;
2126 
2127 	if (chan->unacked_frames) {
2128 		skb_queue_walk(&chan->tx_q, skb) {
2129 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2130 			    skb == chan->tx_send_head)
2131 				break;
2132 		}
2133 
2134 		skb_queue_walk_from(&chan->tx_q, skb) {
2135 			if (skb == chan->tx_send_head)
2136 				break;
2137 
2138 			l2cap_seq_list_append(&chan->retrans_list,
2139 					      bt_cb(skb)->l2cap.txseq);
2140 		}
2141 
2142 		l2cap_ertm_resend(chan);
2143 	}
2144 }
2145 
2146 static void l2cap_send_ack(struct l2cap_chan *chan)
2147 {
2148 	struct l2cap_ctrl control;
2149 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2150 					 chan->last_acked_seq);
2151 	int threshold;
2152 
2153 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2154 	       chan, chan->last_acked_seq, chan->buffer_seq);
2155 
2156 	memset(&control, 0, sizeof(control));
2157 	control.sframe = 1;
2158 
2159 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2160 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2161 		__clear_ack_timer(chan);
2162 		control.super = L2CAP_SUPER_RNR;
2163 		control.reqseq = chan->buffer_seq;
2164 		l2cap_send_sframe(chan, &control);
2165 	} else {
2166 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2167 			l2cap_ertm_send(chan);
2168 			/* If any i-frames were sent, they included an ack */
2169 			if (chan->buffer_seq == chan->last_acked_seq)
2170 				frames_to_ack = 0;
2171 		}
2172 
2173 		/* Ack now if the window is 3/4ths full.
2174 		 * Calculate without mul or div
2175 		 */
2176 		threshold = chan->ack_win;
2177 		threshold += threshold << 1;
2178 		threshold >>= 2;
2179 
2180 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2181 		       threshold);
2182 
2183 		if (frames_to_ack >= threshold) {
2184 			__clear_ack_timer(chan);
2185 			control.super = L2CAP_SUPER_RR;
2186 			control.reqseq = chan->buffer_seq;
2187 			l2cap_send_sframe(chan, &control);
2188 			frames_to_ack = 0;
2189 		}
2190 
2191 		if (frames_to_ack)
2192 			__set_ack_timer(chan);
2193 	}
2194 }
2195 
2196 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2197 					 struct msghdr *msg, int len,
2198 					 int count, struct sk_buff *skb)
2199 {
2200 	struct l2cap_conn *conn = chan->conn;
2201 	struct sk_buff **frag;
2202 	int sent = 0;
2203 
2204 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2205 		return -EFAULT;
2206 
2207 	sent += count;
2208 	len  -= count;
2209 
2210 	/* Continuation fragments (no L2CAP header) */
2211 	frag = &skb_shinfo(skb)->frag_list;
2212 	while (len) {
2213 		struct sk_buff *tmp;
2214 
2215 		count = min_t(unsigned int, conn->mtu, len);
2216 
2217 		tmp = chan->ops->alloc_skb(chan, 0, count,
2218 					   msg->msg_flags & MSG_DONTWAIT);
2219 		if (IS_ERR(tmp))
2220 			return PTR_ERR(tmp);
2221 
2222 		*frag = tmp;
2223 
2224 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2225 				   &msg->msg_iter))
2226 			return -EFAULT;
2227 
2228 		sent += count;
2229 		len  -= count;
2230 
2231 		skb->len += (*frag)->len;
2232 		skb->data_len += (*frag)->len;
2233 
2234 		frag = &(*frag)->next;
2235 	}
2236 
2237 	return sent;
2238 }
2239 
2240 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2241 						 struct msghdr *msg, size_t len)
2242 {
2243 	struct l2cap_conn *conn = chan->conn;
2244 	struct sk_buff *skb;
2245 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2246 	struct l2cap_hdr *lh;
2247 
2248 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2249 	       __le16_to_cpu(chan->psm), len);
2250 
2251 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2252 
2253 	skb = chan->ops->alloc_skb(chan, hlen, count,
2254 				   msg->msg_flags & MSG_DONTWAIT);
2255 	if (IS_ERR(skb))
2256 		return skb;
2257 
2258 	/* Create L2CAP header */
2259 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2260 	lh->cid = cpu_to_le16(chan->dcid);
2261 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2262 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2263 
2264 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2265 	if (unlikely(err < 0)) {
2266 		kfree_skb(skb);
2267 		return ERR_PTR(err);
2268 	}
2269 	return skb;
2270 }
2271 
2272 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2273 					      struct msghdr *msg, size_t len)
2274 {
2275 	struct l2cap_conn *conn = chan->conn;
2276 	struct sk_buff *skb;
2277 	int err, count;
2278 	struct l2cap_hdr *lh;
2279 
2280 	BT_DBG("chan %p len %zu", chan, len);
2281 
2282 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2283 
2284 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2285 				   msg->msg_flags & MSG_DONTWAIT);
2286 	if (IS_ERR(skb))
2287 		return skb;
2288 
2289 	/* Create L2CAP header */
2290 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2291 	lh->cid = cpu_to_le16(chan->dcid);
2292 	lh->len = cpu_to_le16(len);
2293 
2294 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2295 	if (unlikely(err < 0)) {
2296 		kfree_skb(skb);
2297 		return ERR_PTR(err);
2298 	}
2299 	return skb;
2300 }
2301 
2302 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2303 					       struct msghdr *msg, size_t len,
2304 					       u16 sdulen)
2305 {
2306 	struct l2cap_conn *conn = chan->conn;
2307 	struct sk_buff *skb;
2308 	int err, count, hlen;
2309 	struct l2cap_hdr *lh;
2310 
2311 	BT_DBG("chan %p len %zu", chan, len);
2312 
2313 	if (!conn)
2314 		return ERR_PTR(-ENOTCONN);
2315 
2316 	hlen = __ertm_hdr_size(chan);
2317 
2318 	if (sdulen)
2319 		hlen += L2CAP_SDULEN_SIZE;
2320 
2321 	if (chan->fcs == L2CAP_FCS_CRC16)
2322 		hlen += L2CAP_FCS_SIZE;
2323 
2324 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2325 
2326 	skb = chan->ops->alloc_skb(chan, hlen, count,
2327 				   msg->msg_flags & MSG_DONTWAIT);
2328 	if (IS_ERR(skb))
2329 		return skb;
2330 
2331 	/* Create L2CAP header */
2332 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2333 	lh->cid = cpu_to_le16(chan->dcid);
2334 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2335 
2336 	/* Control header is populated later */
2337 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2338 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2339 	else
2340 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2341 
2342 	if (sdulen)
2343 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2344 
2345 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2346 	if (unlikely(err < 0)) {
2347 		kfree_skb(skb);
2348 		return ERR_PTR(err);
2349 	}
2350 
2351 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2352 	bt_cb(skb)->l2cap.retries = 0;
2353 	return skb;
2354 }
2355 
2356 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2357 			     struct sk_buff_head *seg_queue,
2358 			     struct msghdr *msg, size_t len)
2359 {
2360 	struct sk_buff *skb;
2361 	u16 sdu_len;
2362 	size_t pdu_len;
2363 	u8 sar;
2364 
2365 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2366 
2367 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2368 	 * so fragmented skbs are not used.  The HCI layer's handling
2369 	 * of fragmented skbs is not compatible with ERTM's queueing.
2370 	 */
2371 
2372 	/* PDU size is derived from the HCI MTU */
2373 	pdu_len = chan->conn->mtu;
2374 
2375 	/* Constrain PDU size for BR/EDR connections */
2376 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2377 
2378 	/* Adjust for largest possible L2CAP overhead. */
2379 	if (chan->fcs)
2380 		pdu_len -= L2CAP_FCS_SIZE;
2381 
2382 	pdu_len -= __ertm_hdr_size(chan);
2383 
2384 	/* Remote device may have requested smaller PDUs */
2385 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2386 
2387 	if (len <= pdu_len) {
2388 		sar = L2CAP_SAR_UNSEGMENTED;
2389 		sdu_len = 0;
2390 		pdu_len = len;
2391 	} else {
2392 		sar = L2CAP_SAR_START;
2393 		sdu_len = len;
2394 	}
2395 
2396 	while (len > 0) {
2397 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2398 
2399 		if (IS_ERR(skb)) {
2400 			__skb_queue_purge(seg_queue);
2401 			return PTR_ERR(skb);
2402 		}
2403 
2404 		bt_cb(skb)->l2cap.sar = sar;
2405 		__skb_queue_tail(seg_queue, skb);
2406 
2407 		len -= pdu_len;
2408 		if (sdu_len)
2409 			sdu_len = 0;
2410 
2411 		if (len <= pdu_len) {
2412 			sar = L2CAP_SAR_END;
2413 			pdu_len = len;
2414 		} else {
2415 			sar = L2CAP_SAR_CONTINUE;
2416 		}
2417 	}
2418 
2419 	return 0;
2420 }
2421 
2422 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2423 						   struct msghdr *msg,
2424 						   size_t len, u16 sdulen)
2425 {
2426 	struct l2cap_conn *conn = chan->conn;
2427 	struct sk_buff *skb;
2428 	int err, count, hlen;
2429 	struct l2cap_hdr *lh;
2430 
2431 	BT_DBG("chan %p len %zu", chan, len);
2432 
2433 	if (!conn)
2434 		return ERR_PTR(-ENOTCONN);
2435 
2436 	hlen = L2CAP_HDR_SIZE;
2437 
2438 	if (sdulen)
2439 		hlen += L2CAP_SDULEN_SIZE;
2440 
2441 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2442 
2443 	skb = chan->ops->alloc_skb(chan, hlen, count,
2444 				   msg->msg_flags & MSG_DONTWAIT);
2445 	if (IS_ERR(skb))
2446 		return skb;
2447 
2448 	/* Create L2CAP header */
2449 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2450 	lh->cid = cpu_to_le16(chan->dcid);
2451 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2452 
2453 	if (sdulen)
2454 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2455 
2456 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2457 	if (unlikely(err < 0)) {
2458 		kfree_skb(skb);
2459 		return ERR_PTR(err);
2460 	}
2461 
2462 	return skb;
2463 }
2464 
2465 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2466 				struct sk_buff_head *seg_queue,
2467 				struct msghdr *msg, size_t len)
2468 {
2469 	struct sk_buff *skb;
2470 	size_t pdu_len;
2471 	u16 sdu_len;
2472 
2473 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2474 
2475 	sdu_len = len;
2476 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2477 
2478 	while (len > 0) {
2479 		if (len <= pdu_len)
2480 			pdu_len = len;
2481 
2482 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2483 		if (IS_ERR(skb)) {
2484 			__skb_queue_purge(seg_queue);
2485 			return PTR_ERR(skb);
2486 		}
2487 
2488 		__skb_queue_tail(seg_queue, skb);
2489 
2490 		len -= pdu_len;
2491 
2492 		if (sdu_len) {
2493 			sdu_len = 0;
2494 			pdu_len += L2CAP_SDULEN_SIZE;
2495 		}
2496 	}
2497 
2498 	return 0;
2499 }
2500 
2501 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2502 {
2503 	int sent = 0;
2504 
2505 	BT_DBG("chan %p", chan);
2506 
2507 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2508 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2509 		chan->tx_credits--;
2510 		sent++;
2511 	}
2512 
2513 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2514 	       skb_queue_len(&chan->tx_q));
2515 }
2516 
2517 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2518 {
2519 	struct sk_buff *skb;
2520 	int err;
2521 	struct sk_buff_head seg_queue;
2522 
2523 	if (!chan->conn)
2524 		return -ENOTCONN;
2525 
2526 	/* Connectionless channel */
2527 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2528 		skb = l2cap_create_connless_pdu(chan, msg, len);
2529 		if (IS_ERR(skb))
2530 			return PTR_ERR(skb);
2531 
2532 		l2cap_do_send(chan, skb);
2533 		return len;
2534 	}
2535 
2536 	switch (chan->mode) {
2537 	case L2CAP_MODE_LE_FLOWCTL:
2538 	case L2CAP_MODE_EXT_FLOWCTL:
2539 		/* Check outgoing MTU */
2540 		if (len > chan->omtu)
2541 			return -EMSGSIZE;
2542 
2543 		__skb_queue_head_init(&seg_queue);
2544 
2545 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2546 
2547 		if (chan->state != BT_CONNECTED) {
2548 			__skb_queue_purge(&seg_queue);
2549 			err = -ENOTCONN;
2550 		}
2551 
2552 		if (err)
2553 			return err;
2554 
2555 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2556 
2557 		l2cap_le_flowctl_send(chan);
2558 
2559 		if (!chan->tx_credits)
2560 			chan->ops->suspend(chan);
2561 
2562 		err = len;
2563 
2564 		break;
2565 
2566 	case L2CAP_MODE_BASIC:
2567 		/* Check outgoing MTU */
2568 		if (len > chan->omtu)
2569 			return -EMSGSIZE;
2570 
2571 		/* Create a basic PDU */
2572 		skb = l2cap_create_basic_pdu(chan, msg, len);
2573 		if (IS_ERR(skb))
2574 			return PTR_ERR(skb);
2575 
2576 		l2cap_do_send(chan, skb);
2577 		err = len;
2578 		break;
2579 
2580 	case L2CAP_MODE_ERTM:
2581 	case L2CAP_MODE_STREAMING:
2582 		/* Check outgoing MTU */
2583 		if (len > chan->omtu) {
2584 			err = -EMSGSIZE;
2585 			break;
2586 		}
2587 
2588 		__skb_queue_head_init(&seg_queue);
2589 
2590 		/* Do segmentation before calling in to the state machine,
2591 		 * since it's possible to block while waiting for memory
2592 		 * allocation.
2593 		 */
2594 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2595 
2596 		if (err)
2597 			break;
2598 
2599 		if (chan->mode == L2CAP_MODE_ERTM)
2600 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2601 		else
2602 			l2cap_streaming_send(chan, &seg_queue);
2603 
2604 		err = len;
2605 
2606 		/* If the skbs were not queued for sending, they'll still be in
2607 		 * seg_queue and need to be purged.
2608 		 */
2609 		__skb_queue_purge(&seg_queue);
2610 		break;
2611 
2612 	default:
2613 		BT_DBG("bad state %1.1x", chan->mode);
2614 		err = -EBADFD;
2615 	}
2616 
2617 	return err;
2618 }
2619 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2620 
2621 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2622 {
2623 	struct l2cap_ctrl control;
2624 	u16 seq;
2625 
2626 	BT_DBG("chan %p, txseq %u", chan, txseq);
2627 
2628 	memset(&control, 0, sizeof(control));
2629 	control.sframe = 1;
2630 	control.super = L2CAP_SUPER_SREJ;
2631 
2632 	for (seq = chan->expected_tx_seq; seq != txseq;
2633 	     seq = __next_seq(chan, seq)) {
2634 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2635 			control.reqseq = seq;
2636 			l2cap_send_sframe(chan, &control);
2637 			l2cap_seq_list_append(&chan->srej_list, seq);
2638 		}
2639 	}
2640 
2641 	chan->expected_tx_seq = __next_seq(chan, txseq);
2642 }
2643 
2644 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2645 {
2646 	struct l2cap_ctrl control;
2647 
2648 	BT_DBG("chan %p", chan);
2649 
2650 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2651 		return;
2652 
2653 	memset(&control, 0, sizeof(control));
2654 	control.sframe = 1;
2655 	control.super = L2CAP_SUPER_SREJ;
2656 	control.reqseq = chan->srej_list.tail;
2657 	l2cap_send_sframe(chan, &control);
2658 }
2659 
2660 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2661 {
2662 	struct l2cap_ctrl control;
2663 	u16 initial_head;
2664 	u16 seq;
2665 
2666 	BT_DBG("chan %p, txseq %u", chan, txseq);
2667 
2668 	memset(&control, 0, sizeof(control));
2669 	control.sframe = 1;
2670 	control.super = L2CAP_SUPER_SREJ;
2671 
2672 	/* Capture initial list head to allow only one pass through the list. */
2673 	initial_head = chan->srej_list.head;
2674 
2675 	do {
2676 		seq = l2cap_seq_list_pop(&chan->srej_list);
2677 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2678 			break;
2679 
2680 		control.reqseq = seq;
2681 		l2cap_send_sframe(chan, &control);
2682 		l2cap_seq_list_append(&chan->srej_list, seq);
2683 	} while (chan->srej_list.head != initial_head);
2684 }
2685 
2686 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2687 {
2688 	struct sk_buff *acked_skb;
2689 	u16 ackseq;
2690 
2691 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2692 
2693 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2694 		return;
2695 
2696 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2697 	       chan->expected_ack_seq, chan->unacked_frames);
2698 
2699 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2700 	     ackseq = __next_seq(chan, ackseq)) {
2701 
2702 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2703 		if (acked_skb) {
2704 			skb_unlink(acked_skb, &chan->tx_q);
2705 			kfree_skb(acked_skb);
2706 			chan->unacked_frames--;
2707 		}
2708 	}
2709 
2710 	chan->expected_ack_seq = reqseq;
2711 
2712 	if (chan->unacked_frames == 0)
2713 		__clear_retrans_timer(chan);
2714 
2715 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2716 }
2717 
2718 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2719 {
2720 	BT_DBG("chan %p", chan);
2721 
2722 	chan->expected_tx_seq = chan->buffer_seq;
2723 	l2cap_seq_list_clear(&chan->srej_list);
2724 	skb_queue_purge(&chan->srej_q);
2725 	chan->rx_state = L2CAP_RX_STATE_RECV;
2726 }
2727 
2728 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2729 				struct l2cap_ctrl *control,
2730 				struct sk_buff_head *skbs, u8 event)
2731 {
2732 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2733 	       event);
2734 
2735 	switch (event) {
2736 	case L2CAP_EV_DATA_REQUEST:
2737 		if (chan->tx_send_head == NULL)
2738 			chan->tx_send_head = skb_peek(skbs);
2739 
2740 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2741 		l2cap_ertm_send(chan);
2742 		break;
2743 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2744 		BT_DBG("Enter LOCAL_BUSY");
2745 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2746 
2747 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2748 			/* The SREJ_SENT state must be aborted if we are to
2749 			 * enter the LOCAL_BUSY state.
2750 			 */
2751 			l2cap_abort_rx_srej_sent(chan);
2752 		}
2753 
2754 		l2cap_send_ack(chan);
2755 
2756 		break;
2757 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2758 		BT_DBG("Exit LOCAL_BUSY");
2759 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2760 
2761 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2762 			struct l2cap_ctrl local_control;
2763 
2764 			memset(&local_control, 0, sizeof(local_control));
2765 			local_control.sframe = 1;
2766 			local_control.super = L2CAP_SUPER_RR;
2767 			local_control.poll = 1;
2768 			local_control.reqseq = chan->buffer_seq;
2769 			l2cap_send_sframe(chan, &local_control);
2770 
2771 			chan->retry_count = 1;
2772 			__set_monitor_timer(chan);
2773 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2774 		}
2775 		break;
2776 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2777 		l2cap_process_reqseq(chan, control->reqseq);
2778 		break;
2779 	case L2CAP_EV_EXPLICIT_POLL:
2780 		l2cap_send_rr_or_rnr(chan, 1);
2781 		chan->retry_count = 1;
2782 		__set_monitor_timer(chan);
2783 		__clear_ack_timer(chan);
2784 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2785 		break;
2786 	case L2CAP_EV_RETRANS_TO:
2787 		l2cap_send_rr_or_rnr(chan, 1);
2788 		chan->retry_count = 1;
2789 		__set_monitor_timer(chan);
2790 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2791 		break;
2792 	case L2CAP_EV_RECV_FBIT:
2793 		/* Nothing to process */
2794 		break;
2795 	default:
2796 		break;
2797 	}
2798 }
2799 
2800 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2801 				  struct l2cap_ctrl *control,
2802 				  struct sk_buff_head *skbs, u8 event)
2803 {
2804 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2805 	       event);
2806 
2807 	switch (event) {
2808 	case L2CAP_EV_DATA_REQUEST:
2809 		if (chan->tx_send_head == NULL)
2810 			chan->tx_send_head = skb_peek(skbs);
2811 		/* Queue data, but don't send. */
2812 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2813 		break;
2814 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2815 		BT_DBG("Enter LOCAL_BUSY");
2816 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2817 
2818 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2819 			/* The SREJ_SENT state must be aborted if we are to
2820 			 * enter the LOCAL_BUSY state.
2821 			 */
2822 			l2cap_abort_rx_srej_sent(chan);
2823 		}
2824 
2825 		l2cap_send_ack(chan);
2826 
2827 		break;
2828 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2829 		BT_DBG("Exit LOCAL_BUSY");
2830 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2831 
2832 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2833 			struct l2cap_ctrl local_control;
2834 			memset(&local_control, 0, sizeof(local_control));
2835 			local_control.sframe = 1;
2836 			local_control.super = L2CAP_SUPER_RR;
2837 			local_control.poll = 1;
2838 			local_control.reqseq = chan->buffer_seq;
2839 			l2cap_send_sframe(chan, &local_control);
2840 
2841 			chan->retry_count = 1;
2842 			__set_monitor_timer(chan);
2843 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2844 		}
2845 		break;
2846 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2847 		l2cap_process_reqseq(chan, control->reqseq);
2848 		fallthrough;
2849 
2850 	case L2CAP_EV_RECV_FBIT:
2851 		if (control && control->final) {
2852 			__clear_monitor_timer(chan);
2853 			if (chan->unacked_frames > 0)
2854 				__set_retrans_timer(chan);
2855 			chan->retry_count = 0;
2856 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2857 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2858 		}
2859 		break;
2860 	case L2CAP_EV_EXPLICIT_POLL:
2861 		/* Ignore */
2862 		break;
2863 	case L2CAP_EV_MONITOR_TO:
2864 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2865 			l2cap_send_rr_or_rnr(chan, 1);
2866 			__set_monitor_timer(chan);
2867 			chan->retry_count++;
2868 		} else {
2869 			l2cap_send_disconn_req(chan, ECONNABORTED);
2870 		}
2871 		break;
2872 	default:
2873 		break;
2874 	}
2875 }
2876 
2877 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2878 		     struct sk_buff_head *skbs, u8 event)
2879 {
2880 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2881 	       chan, control, skbs, event, chan->tx_state);
2882 
2883 	switch (chan->tx_state) {
2884 	case L2CAP_TX_STATE_XMIT:
2885 		l2cap_tx_state_xmit(chan, control, skbs, event);
2886 		break;
2887 	case L2CAP_TX_STATE_WAIT_F:
2888 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2889 		break;
2890 	default:
2891 		/* Ignore event */
2892 		break;
2893 	}
2894 }
2895 
2896 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2897 			     struct l2cap_ctrl *control)
2898 {
2899 	BT_DBG("chan %p, control %p", chan, control);
2900 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2901 }
2902 
2903 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2904 				  struct l2cap_ctrl *control)
2905 {
2906 	BT_DBG("chan %p, control %p", chan, control);
2907 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2908 }
2909 
2910 /* Copy frame to all raw sockets on that connection */
2911 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2912 {
2913 	struct sk_buff *nskb;
2914 	struct l2cap_chan *chan;
2915 
2916 	BT_DBG("conn %p", conn);
2917 
2918 	list_for_each_entry(chan, &conn->chan_l, list) {
2919 		if (chan->chan_type != L2CAP_CHAN_RAW)
2920 			continue;
2921 
2922 		/* Don't send frame to the channel it came from */
2923 		if (bt_cb(skb)->l2cap.chan == chan)
2924 			continue;
2925 
2926 		nskb = skb_clone(skb, GFP_KERNEL);
2927 		if (!nskb)
2928 			continue;
2929 		if (chan->ops->recv(chan, nskb))
2930 			kfree_skb(nskb);
2931 	}
2932 }
2933 
2934 /* ---- L2CAP signalling commands ---- */
2935 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2936 				       u8 ident, u16 dlen, void *data)
2937 {
2938 	struct sk_buff *skb, **frag;
2939 	struct l2cap_cmd_hdr *cmd;
2940 	struct l2cap_hdr *lh;
2941 	int len, count;
2942 
2943 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2944 	       conn, code, ident, dlen);
2945 
2946 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2947 		return NULL;
2948 
2949 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2950 	count = min_t(unsigned int, conn->mtu, len);
2951 
2952 	skb = bt_skb_alloc(count, GFP_KERNEL);
2953 	if (!skb)
2954 		return NULL;
2955 
2956 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2957 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2958 
2959 	if (conn->hcon->type == LE_LINK)
2960 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2961 	else
2962 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2963 
2964 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2965 	cmd->code  = code;
2966 	cmd->ident = ident;
2967 	cmd->len   = cpu_to_le16(dlen);
2968 
2969 	if (dlen) {
2970 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2971 		skb_put_data(skb, data, count);
2972 		data += count;
2973 	}
2974 
2975 	len -= skb->len;
2976 
2977 	/* Continuation fragments (no L2CAP header) */
2978 	frag = &skb_shinfo(skb)->frag_list;
2979 	while (len) {
2980 		count = min_t(unsigned int, conn->mtu, len);
2981 
2982 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2983 		if (!*frag)
2984 			goto fail;
2985 
2986 		skb_put_data(*frag, data, count);
2987 
2988 		len  -= count;
2989 		data += count;
2990 
2991 		frag = &(*frag)->next;
2992 	}
2993 
2994 	return skb;
2995 
2996 fail:
2997 	kfree_skb(skb);
2998 	return NULL;
2999 }
3000 
3001 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3002 				     unsigned long *val)
3003 {
3004 	struct l2cap_conf_opt *opt = *ptr;
3005 	int len;
3006 
3007 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3008 	*ptr += len;
3009 
3010 	*type = opt->type;
3011 	*olen = opt->len;
3012 
3013 	switch (opt->len) {
3014 	case 1:
3015 		*val = *((u8 *) opt->val);
3016 		break;
3017 
3018 	case 2:
3019 		*val = get_unaligned_le16(opt->val);
3020 		break;
3021 
3022 	case 4:
3023 		*val = get_unaligned_le32(opt->val);
3024 		break;
3025 
3026 	default:
3027 		*val = (unsigned long) opt->val;
3028 		break;
3029 	}
3030 
3031 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3032 	return len;
3033 }
3034 
3035 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3036 {
3037 	struct l2cap_conf_opt *opt = *ptr;
3038 
3039 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3040 
3041 	if (size < L2CAP_CONF_OPT_SIZE + len)
3042 		return;
3043 
3044 	opt->type = type;
3045 	opt->len  = len;
3046 
3047 	switch (len) {
3048 	case 1:
3049 		*((u8 *) opt->val)  = val;
3050 		break;
3051 
3052 	case 2:
3053 		put_unaligned_le16(val, opt->val);
3054 		break;
3055 
3056 	case 4:
3057 		put_unaligned_le32(val, opt->val);
3058 		break;
3059 
3060 	default:
3061 		memcpy(opt->val, (void *) val, len);
3062 		break;
3063 	}
3064 
3065 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3066 }
3067 
3068 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3069 {
3070 	struct l2cap_conf_efs efs;
3071 
3072 	switch (chan->mode) {
3073 	case L2CAP_MODE_ERTM:
3074 		efs.id		= chan->local_id;
3075 		efs.stype	= chan->local_stype;
3076 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3077 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3078 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3079 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3080 		break;
3081 
3082 	case L2CAP_MODE_STREAMING:
3083 		efs.id		= 1;
3084 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3085 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3086 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3087 		efs.acc_lat	= 0;
3088 		efs.flush_to	= 0;
3089 		break;
3090 
3091 	default:
3092 		return;
3093 	}
3094 
3095 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3096 			   (unsigned long) &efs, size);
3097 }
3098 
3099 static void l2cap_ack_timeout(struct work_struct *work)
3100 {
3101 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3102 					       ack_timer.work);
3103 	u16 frames_to_ack;
3104 
3105 	BT_DBG("chan %p", chan);
3106 
3107 	l2cap_chan_lock(chan);
3108 
3109 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3110 				     chan->last_acked_seq);
3111 
3112 	if (frames_to_ack)
3113 		l2cap_send_rr_or_rnr(chan, 0);
3114 
3115 	l2cap_chan_unlock(chan);
3116 	l2cap_chan_put(chan);
3117 }
3118 
3119 int l2cap_ertm_init(struct l2cap_chan *chan)
3120 {
3121 	int err;
3122 
3123 	chan->next_tx_seq = 0;
3124 	chan->expected_tx_seq = 0;
3125 	chan->expected_ack_seq = 0;
3126 	chan->unacked_frames = 0;
3127 	chan->buffer_seq = 0;
3128 	chan->frames_sent = 0;
3129 	chan->last_acked_seq = 0;
3130 	chan->sdu = NULL;
3131 	chan->sdu_last_frag = NULL;
3132 	chan->sdu_len = 0;
3133 
3134 	skb_queue_head_init(&chan->tx_q);
3135 
3136 	if (chan->mode != L2CAP_MODE_ERTM)
3137 		return 0;
3138 
3139 	chan->rx_state = L2CAP_RX_STATE_RECV;
3140 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3141 
3142 	skb_queue_head_init(&chan->srej_q);
3143 
3144 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3145 	if (err < 0)
3146 		return err;
3147 
3148 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3149 	if (err < 0)
3150 		l2cap_seq_list_free(&chan->srej_list);
3151 
3152 	return err;
3153 }
3154 
3155 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3156 {
3157 	switch (mode) {
3158 	case L2CAP_MODE_STREAMING:
3159 	case L2CAP_MODE_ERTM:
3160 		if (l2cap_mode_supported(mode, remote_feat_mask))
3161 			return mode;
3162 		fallthrough;
3163 	default:
3164 		return L2CAP_MODE_BASIC;
3165 	}
3166 }
3167 
3168 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3169 {
3170 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3171 }
3172 
3173 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3174 {
3175 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3176 }
3177 
3178 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3179 				      struct l2cap_conf_rfc *rfc)
3180 {
3181 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3182 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3183 }
3184 
3185 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3186 {
3187 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3188 	    __l2cap_ews_supported(chan->conn)) {
3189 		/* use extended control field */
3190 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3191 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3192 	} else {
3193 		chan->tx_win = min_t(u16, chan->tx_win,
3194 				     L2CAP_DEFAULT_TX_WINDOW);
3195 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3196 	}
3197 	chan->ack_win = chan->tx_win;
3198 }
3199 
3200 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3201 {
3202 	struct hci_conn *conn = chan->conn->hcon;
3203 
3204 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3205 
3206 	/* The 2-DH1 packet has between 2 and 56 information bytes
3207 	 * (including the 2-byte payload header)
3208 	 */
3209 	if (!(conn->pkt_type & HCI_2DH1))
3210 		chan->imtu = 54;
3211 
3212 	/* The 3-DH1 packet has between 2 and 85 information bytes
3213 	 * (including the 2-byte payload header)
3214 	 */
3215 	if (!(conn->pkt_type & HCI_3DH1))
3216 		chan->imtu = 83;
3217 
3218 	/* The 2-DH3 packet has between 2 and 369 information bytes
3219 	 * (including the 2-byte payload header)
3220 	 */
3221 	if (!(conn->pkt_type & HCI_2DH3))
3222 		chan->imtu = 367;
3223 
3224 	/* The 3-DH3 packet has between 2 and 554 information bytes
3225 	 * (including the 2-byte payload header)
3226 	 */
3227 	if (!(conn->pkt_type & HCI_3DH3))
3228 		chan->imtu = 552;
3229 
3230 	/* The 2-DH5 packet has between 2 and 681 information bytes
3231 	 * (including the 2-byte payload header)
3232 	 */
3233 	if (!(conn->pkt_type & HCI_2DH5))
3234 		chan->imtu = 679;
3235 
3236 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3237 	 * (including the 2-byte payload header)
3238 	 */
3239 	if (!(conn->pkt_type & HCI_3DH5))
3240 		chan->imtu = 1021;
3241 }
3242 
3243 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3244 {
3245 	struct l2cap_conf_req *req = data;
3246 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3247 	void *ptr = req->data;
3248 	void *endptr = data + data_size;
3249 	u16 size;
3250 
3251 	BT_DBG("chan %p", chan);
3252 
3253 	if (chan->num_conf_req || chan->num_conf_rsp)
3254 		goto done;
3255 
3256 	switch (chan->mode) {
3257 	case L2CAP_MODE_STREAMING:
3258 	case L2CAP_MODE_ERTM:
3259 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3260 			break;
3261 
3262 		if (__l2cap_efs_supported(chan->conn))
3263 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3264 
3265 		fallthrough;
3266 	default:
3267 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3268 		break;
3269 	}
3270 
3271 done:
3272 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3273 		if (!chan->imtu)
3274 			l2cap_mtu_auto(chan);
3275 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3276 				   endptr - ptr);
3277 	}
3278 
3279 	switch (chan->mode) {
3280 	case L2CAP_MODE_BASIC:
3281 		if (disable_ertm)
3282 			break;
3283 
3284 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3285 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3286 			break;
3287 
3288 		rfc.mode            = L2CAP_MODE_BASIC;
3289 		rfc.txwin_size      = 0;
3290 		rfc.max_transmit    = 0;
3291 		rfc.retrans_timeout = 0;
3292 		rfc.monitor_timeout = 0;
3293 		rfc.max_pdu_size    = 0;
3294 
3295 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3296 				   (unsigned long) &rfc, endptr - ptr);
3297 		break;
3298 
3299 	case L2CAP_MODE_ERTM:
3300 		rfc.mode            = L2CAP_MODE_ERTM;
3301 		rfc.max_transmit    = chan->max_tx;
3302 
3303 		__l2cap_set_ertm_timeouts(chan, &rfc);
3304 
3305 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3306 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3307 			     L2CAP_FCS_SIZE);
3308 		rfc.max_pdu_size = cpu_to_le16(size);
3309 
3310 		l2cap_txwin_setup(chan);
3311 
3312 		rfc.txwin_size = min_t(u16, chan->tx_win,
3313 				       L2CAP_DEFAULT_TX_WINDOW);
3314 
3315 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3316 				   (unsigned long) &rfc, endptr - ptr);
3317 
3318 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3319 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3320 
3321 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3322 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3323 					   chan->tx_win, endptr - ptr);
3324 
3325 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3326 			if (chan->fcs == L2CAP_FCS_NONE ||
3327 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3328 				chan->fcs = L2CAP_FCS_NONE;
3329 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3330 						   chan->fcs, endptr - ptr);
3331 			}
3332 		break;
3333 
3334 	case L2CAP_MODE_STREAMING:
3335 		l2cap_txwin_setup(chan);
3336 		rfc.mode            = L2CAP_MODE_STREAMING;
3337 		rfc.txwin_size      = 0;
3338 		rfc.max_transmit    = 0;
3339 		rfc.retrans_timeout = 0;
3340 		rfc.monitor_timeout = 0;
3341 
3342 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3343 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3344 			     L2CAP_FCS_SIZE);
3345 		rfc.max_pdu_size = cpu_to_le16(size);
3346 
3347 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3348 				   (unsigned long) &rfc, endptr - ptr);
3349 
3350 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3351 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3352 
3353 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3354 			if (chan->fcs == L2CAP_FCS_NONE ||
3355 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3356 				chan->fcs = L2CAP_FCS_NONE;
3357 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3358 						   chan->fcs, endptr - ptr);
3359 			}
3360 		break;
3361 	}
3362 
3363 	req->dcid  = cpu_to_le16(chan->dcid);
3364 	req->flags = cpu_to_le16(0);
3365 
3366 	return ptr - data;
3367 }
3368 
3369 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3370 {
3371 	struct l2cap_conf_rsp *rsp = data;
3372 	void *ptr = rsp->data;
3373 	void *endptr = data + data_size;
3374 	void *req = chan->conf_req;
3375 	int len = chan->conf_len;
3376 	int type, hint, olen;
3377 	unsigned long val;
3378 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3379 	struct l2cap_conf_efs efs;
3380 	u8 remote_efs = 0;
3381 	u16 mtu = L2CAP_DEFAULT_MTU;
3382 	u16 result = L2CAP_CONF_SUCCESS;
3383 	u16 size;
3384 
3385 	BT_DBG("chan %p", chan);
3386 
3387 	while (len >= L2CAP_CONF_OPT_SIZE) {
3388 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3389 		if (len < 0)
3390 			break;
3391 
3392 		hint  = type & L2CAP_CONF_HINT;
3393 		type &= L2CAP_CONF_MASK;
3394 
3395 		switch (type) {
3396 		case L2CAP_CONF_MTU:
3397 			if (olen != 2)
3398 				break;
3399 			mtu = val;
3400 			break;
3401 
3402 		case L2CAP_CONF_FLUSH_TO:
3403 			if (olen != 2)
3404 				break;
3405 			chan->flush_to = val;
3406 			break;
3407 
3408 		case L2CAP_CONF_QOS:
3409 			break;
3410 
3411 		case L2CAP_CONF_RFC:
3412 			if (olen != sizeof(rfc))
3413 				break;
3414 			memcpy(&rfc, (void *) val, olen);
3415 			break;
3416 
3417 		case L2CAP_CONF_FCS:
3418 			if (olen != 1)
3419 				break;
3420 			if (val == L2CAP_FCS_NONE)
3421 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3422 			break;
3423 
3424 		case L2CAP_CONF_EFS:
3425 			if (olen != sizeof(efs))
3426 				break;
3427 			remote_efs = 1;
3428 			memcpy(&efs, (void *) val, olen);
3429 			break;
3430 
3431 		case L2CAP_CONF_EWS:
3432 			if (olen != 2)
3433 				break;
3434 			return -ECONNREFUSED;
3435 
3436 		default:
3437 			if (hint)
3438 				break;
3439 			result = L2CAP_CONF_UNKNOWN;
3440 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3441 			break;
3442 		}
3443 	}
3444 
3445 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3446 		goto done;
3447 
3448 	switch (chan->mode) {
3449 	case L2CAP_MODE_STREAMING:
3450 	case L2CAP_MODE_ERTM:
3451 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3452 			chan->mode = l2cap_select_mode(rfc.mode,
3453 						       chan->conn->feat_mask);
3454 			break;
3455 		}
3456 
3457 		if (remote_efs) {
3458 			if (__l2cap_efs_supported(chan->conn))
3459 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3460 			else
3461 				return -ECONNREFUSED;
3462 		}
3463 
3464 		if (chan->mode != rfc.mode)
3465 			return -ECONNREFUSED;
3466 
3467 		break;
3468 	}
3469 
3470 done:
3471 	if (chan->mode != rfc.mode) {
3472 		result = L2CAP_CONF_UNACCEPT;
3473 		rfc.mode = chan->mode;
3474 
3475 		if (chan->num_conf_rsp == 1)
3476 			return -ECONNREFUSED;
3477 
3478 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3479 				   (unsigned long) &rfc, endptr - ptr);
3480 	}
3481 
3482 	if (result == L2CAP_CONF_SUCCESS) {
3483 		/* Configure output options and let the other side know
3484 		 * which ones we don't like. */
3485 
3486 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3487 			result = L2CAP_CONF_UNACCEPT;
3488 		else {
3489 			chan->omtu = mtu;
3490 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3491 		}
3492 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3493 
3494 		if (remote_efs) {
3495 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3496 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3497 			    efs.stype != chan->local_stype) {
3498 
3499 				result = L2CAP_CONF_UNACCEPT;
3500 
3501 				if (chan->num_conf_req >= 1)
3502 					return -ECONNREFUSED;
3503 
3504 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3505 						   sizeof(efs),
3506 						   (unsigned long) &efs, endptr - ptr);
3507 			} else {
3508 				/* Send PENDING Conf Rsp */
3509 				result = L2CAP_CONF_PENDING;
3510 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3511 			}
3512 		}
3513 
3514 		switch (rfc.mode) {
3515 		case L2CAP_MODE_BASIC:
3516 			chan->fcs = L2CAP_FCS_NONE;
3517 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3518 			break;
3519 
3520 		case L2CAP_MODE_ERTM:
3521 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3522 				chan->remote_tx_win = rfc.txwin_size;
3523 			else
3524 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3525 
3526 			chan->remote_max_tx = rfc.max_transmit;
3527 
3528 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3529 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3530 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3531 			rfc.max_pdu_size = cpu_to_le16(size);
3532 			chan->remote_mps = size;
3533 
3534 			__l2cap_set_ertm_timeouts(chan, &rfc);
3535 
3536 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3537 
3538 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3539 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3540 
3541 			if (remote_efs &&
3542 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3543 				chan->remote_id = efs.id;
3544 				chan->remote_stype = efs.stype;
3545 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3546 				chan->remote_flush_to =
3547 					le32_to_cpu(efs.flush_to);
3548 				chan->remote_acc_lat =
3549 					le32_to_cpu(efs.acc_lat);
3550 				chan->remote_sdu_itime =
3551 					le32_to_cpu(efs.sdu_itime);
3552 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3553 						   sizeof(efs),
3554 						   (unsigned long) &efs, endptr - ptr);
3555 			}
3556 			break;
3557 
3558 		case L2CAP_MODE_STREAMING:
3559 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3560 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3561 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3562 			rfc.max_pdu_size = cpu_to_le16(size);
3563 			chan->remote_mps = size;
3564 
3565 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3566 
3567 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3568 					   (unsigned long) &rfc, endptr - ptr);
3569 
3570 			break;
3571 
3572 		default:
3573 			result = L2CAP_CONF_UNACCEPT;
3574 
3575 			memset(&rfc, 0, sizeof(rfc));
3576 			rfc.mode = chan->mode;
3577 		}
3578 
3579 		if (result == L2CAP_CONF_SUCCESS)
3580 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3581 	}
3582 	rsp->scid   = cpu_to_le16(chan->dcid);
3583 	rsp->result = cpu_to_le16(result);
3584 	rsp->flags  = cpu_to_le16(0);
3585 
3586 	return ptr - data;
3587 }
3588 
3589 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3590 				void *data, size_t size, u16 *result)
3591 {
3592 	struct l2cap_conf_req *req = data;
3593 	void *ptr = req->data;
3594 	void *endptr = data + size;
3595 	int type, olen;
3596 	unsigned long val;
3597 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3598 	struct l2cap_conf_efs efs;
3599 
3600 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3601 
3602 	while (len >= L2CAP_CONF_OPT_SIZE) {
3603 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3604 		if (len < 0)
3605 			break;
3606 
3607 		switch (type) {
3608 		case L2CAP_CONF_MTU:
3609 			if (olen != 2)
3610 				break;
3611 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3612 				*result = L2CAP_CONF_UNACCEPT;
3613 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3614 			} else
3615 				chan->imtu = val;
3616 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3617 					   endptr - ptr);
3618 			break;
3619 
3620 		case L2CAP_CONF_FLUSH_TO:
3621 			if (olen != 2)
3622 				break;
3623 			chan->flush_to = val;
3624 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3625 					   chan->flush_to, endptr - ptr);
3626 			break;
3627 
3628 		case L2CAP_CONF_RFC:
3629 			if (olen != sizeof(rfc))
3630 				break;
3631 			memcpy(&rfc, (void *)val, olen);
3632 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3633 			    rfc.mode != chan->mode)
3634 				return -ECONNREFUSED;
3635 			chan->fcs = 0;
3636 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3637 					   (unsigned long) &rfc, endptr - ptr);
3638 			break;
3639 
3640 		case L2CAP_CONF_EWS:
3641 			if (olen != 2)
3642 				break;
3643 			chan->ack_win = min_t(u16, val, chan->ack_win);
3644 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3645 					   chan->tx_win, endptr - ptr);
3646 			break;
3647 
3648 		case L2CAP_CONF_EFS:
3649 			if (olen != sizeof(efs))
3650 				break;
3651 			memcpy(&efs, (void *)val, olen);
3652 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3653 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3654 			    efs.stype != chan->local_stype)
3655 				return -ECONNREFUSED;
3656 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3657 					   (unsigned long) &efs, endptr - ptr);
3658 			break;
3659 
3660 		case L2CAP_CONF_FCS:
3661 			if (olen != 1)
3662 				break;
3663 			if (*result == L2CAP_CONF_PENDING)
3664 				if (val == L2CAP_FCS_NONE)
3665 					set_bit(CONF_RECV_NO_FCS,
3666 						&chan->conf_state);
3667 			break;
3668 		}
3669 	}
3670 
3671 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3672 		return -ECONNREFUSED;
3673 
3674 	chan->mode = rfc.mode;
3675 
3676 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3677 		switch (rfc.mode) {
3678 		case L2CAP_MODE_ERTM:
3679 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3680 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3681 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3682 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3683 				chan->ack_win = min_t(u16, chan->ack_win,
3684 						      rfc.txwin_size);
3685 
3686 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3687 				chan->local_msdu = le16_to_cpu(efs.msdu);
3688 				chan->local_sdu_itime =
3689 					le32_to_cpu(efs.sdu_itime);
3690 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3691 				chan->local_flush_to =
3692 					le32_to_cpu(efs.flush_to);
3693 			}
3694 			break;
3695 
3696 		case L2CAP_MODE_STREAMING:
3697 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3698 		}
3699 	}
3700 
3701 	req->dcid   = cpu_to_le16(chan->dcid);
3702 	req->flags  = cpu_to_le16(0);
3703 
3704 	return ptr - data;
3705 }
3706 
3707 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3708 				u16 result, u16 flags)
3709 {
3710 	struct l2cap_conf_rsp *rsp = data;
3711 	void *ptr = rsp->data;
3712 
3713 	BT_DBG("chan %p", chan);
3714 
3715 	rsp->scid   = cpu_to_le16(chan->dcid);
3716 	rsp->result = cpu_to_le16(result);
3717 	rsp->flags  = cpu_to_le16(flags);
3718 
3719 	return ptr - data;
3720 }
3721 
3722 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3723 {
3724 	struct l2cap_le_conn_rsp rsp;
3725 	struct l2cap_conn *conn = chan->conn;
3726 
3727 	BT_DBG("chan %p", chan);
3728 
3729 	rsp.dcid    = cpu_to_le16(chan->scid);
3730 	rsp.mtu     = cpu_to_le16(chan->imtu);
3731 	rsp.mps     = cpu_to_le16(chan->mps);
3732 	rsp.credits = cpu_to_le16(chan->rx_credits);
3733 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3734 
3735 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3736 		       &rsp);
3737 }
3738 
3739 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3740 {
3741 	int *result = data;
3742 
3743 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3744 		return;
3745 
3746 	switch (chan->state) {
3747 	case BT_CONNECT2:
3748 		/* If channel still pending accept add to result */
3749 		(*result)++;
3750 		return;
3751 	case BT_CONNECTED:
3752 		return;
3753 	default:
3754 		/* If not connected or pending accept it has been refused */
3755 		*result = -ECONNREFUSED;
3756 		return;
3757 	}
3758 }
3759 
3760 struct l2cap_ecred_rsp_data {
3761 	struct {
3762 		struct l2cap_ecred_conn_rsp_hdr rsp;
3763 		__le16 scid[L2CAP_ECRED_MAX_CID];
3764 	} __packed pdu;
3765 	int count;
3766 };
3767 
3768 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3769 {
3770 	struct l2cap_ecred_rsp_data *rsp = data;
3771 	struct l2cap_ecred_conn_rsp *rsp_flex =
3772 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3773 
3774 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3775 		return;
3776 
3777 	/* Reset ident so only one response is sent */
3778 	chan->ident = 0;
3779 
3780 	/* Include all channels pending with the same ident */
3781 	if (!rsp->pdu.rsp.result)
3782 		rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3783 	else
3784 		l2cap_chan_del(chan, ECONNRESET);
3785 }
3786 
3787 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3788 {
3789 	struct l2cap_conn *conn = chan->conn;
3790 	struct l2cap_ecred_rsp_data data;
3791 	u16 id = chan->ident;
3792 	int result = 0;
3793 
3794 	if (!id)
3795 		return;
3796 
3797 	BT_DBG("chan %p id %d", chan, id);
3798 
3799 	memset(&data, 0, sizeof(data));
3800 
3801 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3802 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3803 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3804 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3805 
3806 	/* Verify that all channels are ready */
3807 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3808 
3809 	if (result > 0)
3810 		return;
3811 
3812 	if (result < 0)
3813 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3814 
3815 	/* Build response */
3816 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3817 
3818 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3819 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3820 		       &data.pdu);
3821 }
3822 
3823 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3824 {
3825 	struct l2cap_conn_rsp rsp;
3826 	struct l2cap_conn *conn = chan->conn;
3827 	u8 buf[128];
3828 	u8 rsp_code;
3829 
3830 	rsp.scid   = cpu_to_le16(chan->dcid);
3831 	rsp.dcid   = cpu_to_le16(chan->scid);
3832 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3833 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3834 	rsp_code = L2CAP_CONN_RSP;
3835 
3836 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3837 
3838 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3839 
3840 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3841 		return;
3842 
3843 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3844 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3845 	chan->num_conf_req++;
3846 }
3847 
3848 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3849 {
3850 	int type, olen;
3851 	unsigned long val;
3852 	/* Use sane default values in case a misbehaving remote device
3853 	 * did not send an RFC or extended window size option.
3854 	 */
3855 	u16 txwin_ext = chan->ack_win;
3856 	struct l2cap_conf_rfc rfc = {
3857 		.mode = chan->mode,
3858 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3859 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3860 		.max_pdu_size = cpu_to_le16(chan->imtu),
3861 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3862 	};
3863 
3864 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3865 
3866 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3867 		return;
3868 
3869 	while (len >= L2CAP_CONF_OPT_SIZE) {
3870 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3871 		if (len < 0)
3872 			break;
3873 
3874 		switch (type) {
3875 		case L2CAP_CONF_RFC:
3876 			if (olen != sizeof(rfc))
3877 				break;
3878 			memcpy(&rfc, (void *)val, olen);
3879 			break;
3880 		case L2CAP_CONF_EWS:
3881 			if (olen != 2)
3882 				break;
3883 			txwin_ext = val;
3884 			break;
3885 		}
3886 	}
3887 
3888 	switch (rfc.mode) {
3889 	case L2CAP_MODE_ERTM:
3890 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3891 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3892 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3893 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3894 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3895 		else
3896 			chan->ack_win = min_t(u16, chan->ack_win,
3897 					      rfc.txwin_size);
3898 		break;
3899 	case L2CAP_MODE_STREAMING:
3900 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3901 	}
3902 }
3903 
3904 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3905 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3906 				    u8 *data)
3907 {
3908 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3909 
3910 	if (cmd_len < sizeof(*rej))
3911 		return -EPROTO;
3912 
3913 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3914 		return 0;
3915 
3916 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3917 	    cmd->ident == conn->info_ident) {
3918 		cancel_delayed_work(&conn->info_timer);
3919 
3920 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3921 		conn->info_ident = 0;
3922 
3923 		l2cap_conn_start(conn);
3924 	}
3925 
3926 	return 0;
3927 }
3928 
3929 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3930 			  u8 *data, u8 rsp_code)
3931 {
3932 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3933 	struct l2cap_conn_rsp rsp;
3934 	struct l2cap_chan *chan = NULL, *pchan = NULL;
3935 	int result, status = L2CAP_CS_NO_INFO;
3936 
3937 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3938 	__le16 psm = req->psm;
3939 
3940 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3941 
3942 	/* Check if we have socket listening on psm */
3943 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3944 					 &conn->hcon->dst, ACL_LINK);
3945 	if (!pchan) {
3946 		result = L2CAP_CR_BAD_PSM;
3947 		goto response;
3948 	}
3949 
3950 	l2cap_chan_lock(pchan);
3951 
3952 	/* Check if the ACL is secure enough (if not SDP) */
3953 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3954 	    !hci_conn_check_link_mode(conn->hcon)) {
3955 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3956 		result = L2CAP_CR_SEC_BLOCK;
3957 		goto response;
3958 	}
3959 
3960 	result = L2CAP_CR_NO_MEM;
3961 
3962 	/* Check for valid dynamic CID range (as per Erratum 3253) */
3963 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3964 		result = L2CAP_CR_INVALID_SCID;
3965 		goto response;
3966 	}
3967 
3968 	/* Check if we already have channel with that dcid */
3969 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
3970 		result = L2CAP_CR_SCID_IN_USE;
3971 		goto response;
3972 	}
3973 
3974 	chan = pchan->ops->new_connection(pchan);
3975 	if (!chan)
3976 		goto response;
3977 
3978 	/* For certain devices (ex: HID mouse), support for authentication,
3979 	 * pairing and bonding is optional. For such devices, inorder to avoid
3980 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3981 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3982 	 */
3983 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3984 
3985 	bacpy(&chan->src, &conn->hcon->src);
3986 	bacpy(&chan->dst, &conn->hcon->dst);
3987 	chan->src_type = bdaddr_src_type(conn->hcon);
3988 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3989 	chan->psm  = psm;
3990 	chan->dcid = scid;
3991 
3992 	__l2cap_chan_add(conn, chan);
3993 
3994 	dcid = chan->scid;
3995 
3996 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3997 
3998 	chan->ident = cmd->ident;
3999 
4000 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4001 		if (l2cap_chan_check_security(chan, false)) {
4002 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4003 				l2cap_state_change(chan, BT_CONNECT2);
4004 				result = L2CAP_CR_PEND;
4005 				status = L2CAP_CS_AUTHOR_PEND;
4006 				chan->ops->defer(chan);
4007 			} else {
4008 				l2cap_state_change(chan, BT_CONFIG);
4009 				result = L2CAP_CR_SUCCESS;
4010 				status = L2CAP_CS_NO_INFO;
4011 			}
4012 		} else {
4013 			l2cap_state_change(chan, BT_CONNECT2);
4014 			result = L2CAP_CR_PEND;
4015 			status = L2CAP_CS_AUTHEN_PEND;
4016 		}
4017 	} else {
4018 		l2cap_state_change(chan, BT_CONNECT2);
4019 		result = L2CAP_CR_PEND;
4020 		status = L2CAP_CS_NO_INFO;
4021 	}
4022 
4023 response:
4024 	rsp.scid   = cpu_to_le16(scid);
4025 	rsp.dcid   = cpu_to_le16(dcid);
4026 	rsp.result = cpu_to_le16(result);
4027 	rsp.status = cpu_to_le16(status);
4028 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4029 
4030 	if (!pchan)
4031 		return;
4032 
4033 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4034 		struct l2cap_info_req info;
4035 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4036 
4037 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4038 		conn->info_ident = l2cap_get_ident(conn);
4039 
4040 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4041 
4042 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4043 			       sizeof(info), &info);
4044 	}
4045 
4046 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4047 	    result == L2CAP_CR_SUCCESS) {
4048 		u8 buf[128];
4049 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4050 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4051 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4052 		chan->num_conf_req++;
4053 	}
4054 
4055 	l2cap_chan_unlock(pchan);
4056 	l2cap_chan_put(pchan);
4057 }
4058 
4059 static int l2cap_connect_req(struct l2cap_conn *conn,
4060 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4061 {
4062 	if (cmd_len < sizeof(struct l2cap_conn_req))
4063 		return -EPROTO;
4064 
4065 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4066 	return 0;
4067 }
4068 
4069 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4070 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4071 				    u8 *data)
4072 {
4073 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4074 	u16 scid, dcid, result, status;
4075 	struct l2cap_chan *chan;
4076 	u8 req[128];
4077 	int err;
4078 
4079 	if (cmd_len < sizeof(*rsp))
4080 		return -EPROTO;
4081 
4082 	scid   = __le16_to_cpu(rsp->scid);
4083 	dcid   = __le16_to_cpu(rsp->dcid);
4084 	result = __le16_to_cpu(rsp->result);
4085 	status = __le16_to_cpu(rsp->status);
4086 
4087 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4088 					   dcid > L2CAP_CID_DYN_END))
4089 		return -EPROTO;
4090 
4091 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4092 	       dcid, scid, result, status);
4093 
4094 	if (scid) {
4095 		chan = __l2cap_get_chan_by_scid(conn, scid);
4096 		if (!chan)
4097 			return -EBADSLT;
4098 	} else {
4099 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4100 		if (!chan)
4101 			return -EBADSLT;
4102 	}
4103 
4104 	chan = l2cap_chan_hold_unless_zero(chan);
4105 	if (!chan)
4106 		return -EBADSLT;
4107 
4108 	err = 0;
4109 
4110 	l2cap_chan_lock(chan);
4111 
4112 	switch (result) {
4113 	case L2CAP_CR_SUCCESS:
4114 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4115 			err = -EBADSLT;
4116 			break;
4117 		}
4118 
4119 		l2cap_state_change(chan, BT_CONFIG);
4120 		chan->ident = 0;
4121 		chan->dcid = dcid;
4122 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4123 
4124 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4125 			break;
4126 
4127 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4128 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4129 		chan->num_conf_req++;
4130 		break;
4131 
4132 	case L2CAP_CR_PEND:
4133 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4134 		break;
4135 
4136 	default:
4137 		l2cap_chan_del(chan, ECONNREFUSED);
4138 		break;
4139 	}
4140 
4141 	l2cap_chan_unlock(chan);
4142 	l2cap_chan_put(chan);
4143 
4144 	return err;
4145 }
4146 
4147 static inline void set_default_fcs(struct l2cap_chan *chan)
4148 {
4149 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4150 	 * sides request it.
4151 	 */
4152 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4153 		chan->fcs = L2CAP_FCS_NONE;
4154 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4155 		chan->fcs = L2CAP_FCS_CRC16;
4156 }
4157 
4158 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4159 				    u8 ident, u16 flags)
4160 {
4161 	struct l2cap_conn *conn = chan->conn;
4162 
4163 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4164 	       flags);
4165 
4166 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4167 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4168 
4169 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4170 		       l2cap_build_conf_rsp(chan, data,
4171 					    L2CAP_CONF_SUCCESS, flags), data);
4172 }
4173 
4174 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4175 				   u16 scid, u16 dcid)
4176 {
4177 	struct l2cap_cmd_rej_cid rej;
4178 
4179 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4180 	rej.scid = __cpu_to_le16(scid);
4181 	rej.dcid = __cpu_to_le16(dcid);
4182 
4183 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4184 }
4185 
4186 static inline int l2cap_config_req(struct l2cap_conn *conn,
4187 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4188 				   u8 *data)
4189 {
4190 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4191 	u16 dcid, flags;
4192 	u8 rsp[64];
4193 	struct l2cap_chan *chan;
4194 	int len, err = 0;
4195 
4196 	if (cmd_len < sizeof(*req))
4197 		return -EPROTO;
4198 
4199 	dcid  = __le16_to_cpu(req->dcid);
4200 	flags = __le16_to_cpu(req->flags);
4201 
4202 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4203 
4204 	chan = l2cap_get_chan_by_scid(conn, dcid);
4205 	if (!chan) {
4206 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4207 		return 0;
4208 	}
4209 
4210 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4211 	    chan->state != BT_CONNECTED) {
4212 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4213 				       chan->dcid);
4214 		goto unlock;
4215 	}
4216 
4217 	/* Reject if config buffer is too small. */
4218 	len = cmd_len - sizeof(*req);
4219 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4220 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4221 			       l2cap_build_conf_rsp(chan, rsp,
4222 			       L2CAP_CONF_REJECT, flags), rsp);
4223 		goto unlock;
4224 	}
4225 
4226 	/* Store config. */
4227 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4228 	chan->conf_len += len;
4229 
4230 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4231 		/* Incomplete config. Send empty response. */
4232 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4233 			       l2cap_build_conf_rsp(chan, rsp,
4234 			       L2CAP_CONF_SUCCESS, flags), rsp);
4235 		goto unlock;
4236 	}
4237 
4238 	/* Complete config. */
4239 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4240 	if (len < 0) {
4241 		l2cap_send_disconn_req(chan, ECONNRESET);
4242 		goto unlock;
4243 	}
4244 
4245 	chan->ident = cmd->ident;
4246 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4247 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4248 		chan->num_conf_rsp++;
4249 
4250 	/* Reset config buffer. */
4251 	chan->conf_len = 0;
4252 
4253 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4254 		goto unlock;
4255 
4256 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4257 		set_default_fcs(chan);
4258 
4259 		if (chan->mode == L2CAP_MODE_ERTM ||
4260 		    chan->mode == L2CAP_MODE_STREAMING)
4261 			err = l2cap_ertm_init(chan);
4262 
4263 		if (err < 0)
4264 			l2cap_send_disconn_req(chan, -err);
4265 		else
4266 			l2cap_chan_ready(chan);
4267 
4268 		goto unlock;
4269 	}
4270 
4271 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4272 		u8 buf[64];
4273 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4274 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4275 		chan->num_conf_req++;
4276 	}
4277 
4278 	/* Got Conf Rsp PENDING from remote side and assume we sent
4279 	   Conf Rsp PENDING in the code above */
4280 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4281 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4282 
4283 		/* check compatibility */
4284 
4285 		/* Send rsp for BR/EDR channel */
4286 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4287 	}
4288 
4289 unlock:
4290 	l2cap_chan_unlock(chan);
4291 	l2cap_chan_put(chan);
4292 	return err;
4293 }
4294 
4295 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4296 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4297 				   u8 *data)
4298 {
4299 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4300 	u16 scid, flags, result;
4301 	struct l2cap_chan *chan;
4302 	int len = cmd_len - sizeof(*rsp);
4303 	int err = 0;
4304 
4305 	if (cmd_len < sizeof(*rsp))
4306 		return -EPROTO;
4307 
4308 	scid   = __le16_to_cpu(rsp->scid);
4309 	flags  = __le16_to_cpu(rsp->flags);
4310 	result = __le16_to_cpu(rsp->result);
4311 
4312 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4313 	       result, len);
4314 
4315 	chan = l2cap_get_chan_by_scid(conn, scid);
4316 	if (!chan)
4317 		return 0;
4318 
4319 	switch (result) {
4320 	case L2CAP_CONF_SUCCESS:
4321 		l2cap_conf_rfc_get(chan, rsp->data, len);
4322 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4323 		break;
4324 
4325 	case L2CAP_CONF_PENDING:
4326 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4327 
4328 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4329 			char buf[64];
4330 
4331 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4332 						   buf, sizeof(buf), &result);
4333 			if (len < 0) {
4334 				l2cap_send_disconn_req(chan, ECONNRESET);
4335 				goto done;
4336 			}
4337 
4338 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4339 		}
4340 		goto done;
4341 
4342 	case L2CAP_CONF_UNKNOWN:
4343 	case L2CAP_CONF_UNACCEPT:
4344 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4345 			char req[64];
4346 
4347 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4348 				l2cap_send_disconn_req(chan, ECONNRESET);
4349 				goto done;
4350 			}
4351 
4352 			/* throw out any old stored conf requests */
4353 			result = L2CAP_CONF_SUCCESS;
4354 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4355 						   req, sizeof(req), &result);
4356 			if (len < 0) {
4357 				l2cap_send_disconn_req(chan, ECONNRESET);
4358 				goto done;
4359 			}
4360 
4361 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4362 				       L2CAP_CONF_REQ, len, req);
4363 			chan->num_conf_req++;
4364 			if (result != L2CAP_CONF_SUCCESS)
4365 				goto done;
4366 			break;
4367 		}
4368 		fallthrough;
4369 
4370 	default:
4371 		l2cap_chan_set_err(chan, ECONNRESET);
4372 
4373 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4374 		l2cap_send_disconn_req(chan, ECONNRESET);
4375 		goto done;
4376 	}
4377 
4378 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4379 		goto done;
4380 
4381 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4382 
4383 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4384 		set_default_fcs(chan);
4385 
4386 		if (chan->mode == L2CAP_MODE_ERTM ||
4387 		    chan->mode == L2CAP_MODE_STREAMING)
4388 			err = l2cap_ertm_init(chan);
4389 
4390 		if (err < 0)
4391 			l2cap_send_disconn_req(chan, -err);
4392 		else
4393 			l2cap_chan_ready(chan);
4394 	}
4395 
4396 done:
4397 	l2cap_chan_unlock(chan);
4398 	l2cap_chan_put(chan);
4399 	return err;
4400 }
4401 
4402 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4403 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4404 				       u8 *data)
4405 {
4406 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4407 	struct l2cap_disconn_rsp rsp;
4408 	u16 dcid, scid;
4409 	struct l2cap_chan *chan;
4410 
4411 	if (cmd_len != sizeof(*req))
4412 		return -EPROTO;
4413 
4414 	scid = __le16_to_cpu(req->scid);
4415 	dcid = __le16_to_cpu(req->dcid);
4416 
4417 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4418 
4419 	chan = l2cap_get_chan_by_scid(conn, dcid);
4420 	if (!chan) {
4421 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4422 		return 0;
4423 	}
4424 
4425 	rsp.dcid = cpu_to_le16(chan->scid);
4426 	rsp.scid = cpu_to_le16(chan->dcid);
4427 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4428 
4429 	chan->ops->set_shutdown(chan);
4430 
4431 	l2cap_chan_del(chan, ECONNRESET);
4432 
4433 	chan->ops->close(chan);
4434 
4435 	l2cap_chan_unlock(chan);
4436 	l2cap_chan_put(chan);
4437 
4438 	return 0;
4439 }
4440 
4441 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4442 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4443 				       u8 *data)
4444 {
4445 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4446 	u16 dcid, scid;
4447 	struct l2cap_chan *chan;
4448 
4449 	if (cmd_len != sizeof(*rsp))
4450 		return -EPROTO;
4451 
4452 	scid = __le16_to_cpu(rsp->scid);
4453 	dcid = __le16_to_cpu(rsp->dcid);
4454 
4455 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4456 
4457 	chan = l2cap_get_chan_by_scid(conn, scid);
4458 	if (!chan) {
4459 		return 0;
4460 	}
4461 
4462 	if (chan->state != BT_DISCONN) {
4463 		l2cap_chan_unlock(chan);
4464 		l2cap_chan_put(chan);
4465 		return 0;
4466 	}
4467 
4468 	l2cap_chan_del(chan, 0);
4469 
4470 	chan->ops->close(chan);
4471 
4472 	l2cap_chan_unlock(chan);
4473 	l2cap_chan_put(chan);
4474 
4475 	return 0;
4476 }
4477 
4478 static inline int l2cap_information_req(struct l2cap_conn *conn,
4479 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4480 					u8 *data)
4481 {
4482 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4483 	u16 type;
4484 
4485 	if (cmd_len != sizeof(*req))
4486 		return -EPROTO;
4487 
4488 	type = __le16_to_cpu(req->type);
4489 
4490 	BT_DBG("type 0x%4.4x", type);
4491 
4492 	if (type == L2CAP_IT_FEAT_MASK) {
4493 		u8 buf[8];
4494 		u32 feat_mask = l2cap_feat_mask;
4495 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4496 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4497 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4498 		if (!disable_ertm)
4499 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4500 				| L2CAP_FEAT_FCS;
4501 
4502 		put_unaligned_le32(feat_mask, rsp->data);
4503 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4504 			       buf);
4505 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4506 		u8 buf[12];
4507 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4508 
4509 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4510 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4511 		rsp->data[0] = conn->local_fixed_chan;
4512 		memset(rsp->data + 1, 0, 7);
4513 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4514 			       buf);
4515 	} else {
4516 		struct l2cap_info_rsp rsp;
4517 		rsp.type   = cpu_to_le16(type);
4518 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4519 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4520 			       &rsp);
4521 	}
4522 
4523 	return 0;
4524 }
4525 
4526 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4527 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4528 					u8 *data)
4529 {
4530 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4531 	u16 type, result;
4532 
4533 	if (cmd_len < sizeof(*rsp))
4534 		return -EPROTO;
4535 
4536 	type   = __le16_to_cpu(rsp->type);
4537 	result = __le16_to_cpu(rsp->result);
4538 
4539 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4540 
4541 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4542 	if (cmd->ident != conn->info_ident ||
4543 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4544 		return 0;
4545 
4546 	cancel_delayed_work(&conn->info_timer);
4547 
4548 	if (result != L2CAP_IR_SUCCESS) {
4549 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4550 		conn->info_ident = 0;
4551 
4552 		l2cap_conn_start(conn);
4553 
4554 		return 0;
4555 	}
4556 
4557 	switch (type) {
4558 	case L2CAP_IT_FEAT_MASK:
4559 		conn->feat_mask = get_unaligned_le32(rsp->data);
4560 
4561 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4562 			struct l2cap_info_req req;
4563 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4564 
4565 			conn->info_ident = l2cap_get_ident(conn);
4566 
4567 			l2cap_send_cmd(conn, conn->info_ident,
4568 				       L2CAP_INFO_REQ, sizeof(req), &req);
4569 		} else {
4570 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4571 			conn->info_ident = 0;
4572 
4573 			l2cap_conn_start(conn);
4574 		}
4575 		break;
4576 
4577 	case L2CAP_IT_FIXED_CHAN:
4578 		conn->remote_fixed_chan = rsp->data[0];
4579 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4580 		conn->info_ident = 0;
4581 
4582 		l2cap_conn_start(conn);
4583 		break;
4584 	}
4585 
4586 	return 0;
4587 }
4588 
4589 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4590 					      struct l2cap_cmd_hdr *cmd,
4591 					      u16 cmd_len, u8 *data)
4592 {
4593 	struct hci_conn *hcon = conn->hcon;
4594 	struct l2cap_conn_param_update_req *req;
4595 	struct l2cap_conn_param_update_rsp rsp;
4596 	u16 min, max, latency, to_multiplier;
4597 	int err;
4598 
4599 	if (hcon->role != HCI_ROLE_MASTER)
4600 		return -EINVAL;
4601 
4602 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4603 		return -EPROTO;
4604 
4605 	req = (struct l2cap_conn_param_update_req *) data;
4606 	min		= __le16_to_cpu(req->min);
4607 	max		= __le16_to_cpu(req->max);
4608 	latency		= __le16_to_cpu(req->latency);
4609 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4610 
4611 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4612 	       min, max, latency, to_multiplier);
4613 
4614 	memset(&rsp, 0, sizeof(rsp));
4615 
4616 	err = hci_check_conn_params(min, max, latency, to_multiplier);
4617 	if (err)
4618 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4619 	else
4620 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4621 
4622 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4623 		       sizeof(rsp), &rsp);
4624 
4625 	if (!err) {
4626 		u8 store_hint;
4627 
4628 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4629 						to_multiplier);
4630 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4631 				    store_hint, min, max, latency,
4632 				    to_multiplier);
4633 
4634 	}
4635 
4636 	return 0;
4637 }
4638 
4639 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4640 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4641 				u8 *data)
4642 {
4643 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4644 	struct hci_conn *hcon = conn->hcon;
4645 	u16 dcid, mtu, mps, credits, result;
4646 	struct l2cap_chan *chan;
4647 	int err, sec_level;
4648 
4649 	if (cmd_len < sizeof(*rsp))
4650 		return -EPROTO;
4651 
4652 	dcid    = __le16_to_cpu(rsp->dcid);
4653 	mtu     = __le16_to_cpu(rsp->mtu);
4654 	mps     = __le16_to_cpu(rsp->mps);
4655 	credits = __le16_to_cpu(rsp->credits);
4656 	result  = __le16_to_cpu(rsp->result);
4657 
4658 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4659 					   dcid < L2CAP_CID_DYN_START ||
4660 					   dcid > L2CAP_CID_LE_DYN_END))
4661 		return -EPROTO;
4662 
4663 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4664 	       dcid, mtu, mps, credits, result);
4665 
4666 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4667 	if (!chan)
4668 		return -EBADSLT;
4669 
4670 	err = 0;
4671 
4672 	l2cap_chan_lock(chan);
4673 
4674 	switch (result) {
4675 	case L2CAP_CR_LE_SUCCESS:
4676 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4677 			err = -EBADSLT;
4678 			break;
4679 		}
4680 
4681 		chan->ident = 0;
4682 		chan->dcid = dcid;
4683 		chan->omtu = mtu;
4684 		chan->remote_mps = mps;
4685 		chan->tx_credits = credits;
4686 		l2cap_chan_ready(chan);
4687 		break;
4688 
4689 	case L2CAP_CR_LE_AUTHENTICATION:
4690 	case L2CAP_CR_LE_ENCRYPTION:
4691 		/* If we already have MITM protection we can't do
4692 		 * anything.
4693 		 */
4694 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4695 			l2cap_chan_del(chan, ECONNREFUSED);
4696 			break;
4697 		}
4698 
4699 		sec_level = hcon->sec_level + 1;
4700 		if (chan->sec_level < sec_level)
4701 			chan->sec_level = sec_level;
4702 
4703 		/* We'll need to send a new Connect Request */
4704 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4705 
4706 		smp_conn_security(hcon, chan->sec_level);
4707 		break;
4708 
4709 	default:
4710 		l2cap_chan_del(chan, ECONNREFUSED);
4711 		break;
4712 	}
4713 
4714 	l2cap_chan_unlock(chan);
4715 
4716 	return err;
4717 }
4718 
4719 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4720 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4721 				      u8 *data)
4722 {
4723 	int err = 0;
4724 
4725 	switch (cmd->code) {
4726 	case L2CAP_COMMAND_REJ:
4727 		l2cap_command_rej(conn, cmd, cmd_len, data);
4728 		break;
4729 
4730 	case L2CAP_CONN_REQ:
4731 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4732 		break;
4733 
4734 	case L2CAP_CONN_RSP:
4735 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4736 		break;
4737 
4738 	case L2CAP_CONF_REQ:
4739 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4740 		break;
4741 
4742 	case L2CAP_CONF_RSP:
4743 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4744 		break;
4745 
4746 	case L2CAP_DISCONN_REQ:
4747 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4748 		break;
4749 
4750 	case L2CAP_DISCONN_RSP:
4751 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4752 		break;
4753 
4754 	case L2CAP_ECHO_REQ:
4755 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4756 		break;
4757 
4758 	case L2CAP_ECHO_RSP:
4759 		break;
4760 
4761 	case L2CAP_INFO_REQ:
4762 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4763 		break;
4764 
4765 	case L2CAP_INFO_RSP:
4766 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4767 		break;
4768 
4769 	default:
4770 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4771 		err = -EINVAL;
4772 		break;
4773 	}
4774 
4775 	return err;
4776 }
4777 
4778 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4779 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4780 				u8 *data)
4781 {
4782 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4783 	struct l2cap_le_conn_rsp rsp;
4784 	struct l2cap_chan *chan, *pchan;
4785 	u16 dcid, scid, credits, mtu, mps;
4786 	__le16 psm;
4787 	u8 result;
4788 
4789 	if (cmd_len != sizeof(*req))
4790 		return -EPROTO;
4791 
4792 	scid = __le16_to_cpu(req->scid);
4793 	mtu  = __le16_to_cpu(req->mtu);
4794 	mps  = __le16_to_cpu(req->mps);
4795 	psm  = req->psm;
4796 	dcid = 0;
4797 	credits = 0;
4798 
4799 	if (mtu < 23 || mps < 23)
4800 		return -EPROTO;
4801 
4802 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4803 	       scid, mtu, mps);
4804 
4805 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4806 	 * page 1059:
4807 	 *
4808 	 * Valid range: 0x0001-0x00ff
4809 	 *
4810 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4811 	 */
4812 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4813 		result = L2CAP_CR_LE_BAD_PSM;
4814 		chan = NULL;
4815 		goto response;
4816 	}
4817 
4818 	/* Check if we have socket listening on psm */
4819 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4820 					 &conn->hcon->dst, LE_LINK);
4821 	if (!pchan) {
4822 		result = L2CAP_CR_LE_BAD_PSM;
4823 		chan = NULL;
4824 		goto response;
4825 	}
4826 
4827 	l2cap_chan_lock(pchan);
4828 
4829 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4830 				     SMP_ALLOW_STK)) {
4831 		result = L2CAP_CR_LE_AUTHENTICATION;
4832 		chan = NULL;
4833 		goto response_unlock;
4834 	}
4835 
4836 	/* Check for valid dynamic CID range */
4837 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4838 		result = L2CAP_CR_LE_INVALID_SCID;
4839 		chan = NULL;
4840 		goto response_unlock;
4841 	}
4842 
4843 	/* Check if we already have channel with that dcid */
4844 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4845 		result = L2CAP_CR_LE_SCID_IN_USE;
4846 		chan = NULL;
4847 		goto response_unlock;
4848 	}
4849 
4850 	chan = pchan->ops->new_connection(pchan);
4851 	if (!chan) {
4852 		result = L2CAP_CR_LE_NO_MEM;
4853 		goto response_unlock;
4854 	}
4855 
4856 	bacpy(&chan->src, &conn->hcon->src);
4857 	bacpy(&chan->dst, &conn->hcon->dst);
4858 	chan->src_type = bdaddr_src_type(conn->hcon);
4859 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4860 	chan->psm  = psm;
4861 	chan->dcid = scid;
4862 	chan->omtu = mtu;
4863 	chan->remote_mps = mps;
4864 
4865 	__l2cap_chan_add(conn, chan);
4866 
4867 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4868 
4869 	dcid = chan->scid;
4870 	credits = chan->rx_credits;
4871 
4872 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4873 
4874 	chan->ident = cmd->ident;
4875 
4876 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4877 		l2cap_state_change(chan, BT_CONNECT2);
4878 		/* The following result value is actually not defined
4879 		 * for LE CoC but we use it to let the function know
4880 		 * that it should bail out after doing its cleanup
4881 		 * instead of sending a response.
4882 		 */
4883 		result = L2CAP_CR_PEND;
4884 		chan->ops->defer(chan);
4885 	} else {
4886 		l2cap_chan_ready(chan);
4887 		result = L2CAP_CR_LE_SUCCESS;
4888 	}
4889 
4890 response_unlock:
4891 	l2cap_chan_unlock(pchan);
4892 	l2cap_chan_put(pchan);
4893 
4894 	if (result == L2CAP_CR_PEND)
4895 		return 0;
4896 
4897 response:
4898 	if (chan) {
4899 		rsp.mtu = cpu_to_le16(chan->imtu);
4900 		rsp.mps = cpu_to_le16(chan->mps);
4901 	} else {
4902 		rsp.mtu = 0;
4903 		rsp.mps = 0;
4904 	}
4905 
4906 	rsp.dcid    = cpu_to_le16(dcid);
4907 	rsp.credits = cpu_to_le16(credits);
4908 	rsp.result  = cpu_to_le16(result);
4909 
4910 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4911 
4912 	return 0;
4913 }
4914 
4915 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4916 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4917 				   u8 *data)
4918 {
4919 	struct l2cap_le_credits *pkt;
4920 	struct l2cap_chan *chan;
4921 	u16 cid, credits, max_credits;
4922 
4923 	if (cmd_len != sizeof(*pkt))
4924 		return -EPROTO;
4925 
4926 	pkt = (struct l2cap_le_credits *) data;
4927 	cid	= __le16_to_cpu(pkt->cid);
4928 	credits	= __le16_to_cpu(pkt->credits);
4929 
4930 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4931 
4932 	chan = l2cap_get_chan_by_dcid(conn, cid);
4933 	if (!chan)
4934 		return -EBADSLT;
4935 
4936 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4937 	if (credits > max_credits) {
4938 		BT_ERR("LE credits overflow");
4939 		l2cap_send_disconn_req(chan, ECONNRESET);
4940 
4941 		/* Return 0 so that we don't trigger an unnecessary
4942 		 * command reject packet.
4943 		 */
4944 		goto unlock;
4945 	}
4946 
4947 	chan->tx_credits += credits;
4948 
4949 	/* Resume sending */
4950 	l2cap_le_flowctl_send(chan);
4951 
4952 	if (chan->tx_credits)
4953 		chan->ops->resume(chan);
4954 
4955 unlock:
4956 	l2cap_chan_unlock(chan);
4957 	l2cap_chan_put(chan);
4958 
4959 	return 0;
4960 }
4961 
4962 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
4963 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4964 				       u8 *data)
4965 {
4966 	struct l2cap_ecred_conn_req *req = (void *) data;
4967 	DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
4968 	struct l2cap_chan *chan, *pchan;
4969 	u16 mtu, mps;
4970 	__le16 psm;
4971 	u8 result, len = 0;
4972 	int i, num_scid;
4973 	bool defer = false;
4974 
4975 	if (!enable_ecred)
4976 		return -EINVAL;
4977 
4978 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
4979 		result = L2CAP_CR_LE_INVALID_PARAMS;
4980 		goto response;
4981 	}
4982 
4983 	cmd_len -= sizeof(*req);
4984 	num_scid = cmd_len / sizeof(u16);
4985 
4986 	if (num_scid > L2CAP_ECRED_MAX_CID) {
4987 		result = L2CAP_CR_LE_INVALID_PARAMS;
4988 		goto response;
4989 	}
4990 
4991 	mtu  = __le16_to_cpu(req->mtu);
4992 	mps  = __le16_to_cpu(req->mps);
4993 
4994 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
4995 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
4996 		goto response;
4997 	}
4998 
4999 	psm  = req->psm;
5000 
5001 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5002 	 * page 1059:
5003 	 *
5004 	 * Valid range: 0x0001-0x00ff
5005 	 *
5006 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5007 	 */
5008 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5009 		result = L2CAP_CR_LE_BAD_PSM;
5010 		goto response;
5011 	}
5012 
5013 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5014 
5015 	memset(pdu, 0, sizeof(*pdu));
5016 
5017 	/* Check if we have socket listening on psm */
5018 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5019 					 &conn->hcon->dst, LE_LINK);
5020 	if (!pchan) {
5021 		result = L2CAP_CR_LE_BAD_PSM;
5022 		goto response;
5023 	}
5024 
5025 	l2cap_chan_lock(pchan);
5026 
5027 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5028 				     SMP_ALLOW_STK)) {
5029 		result = L2CAP_CR_LE_AUTHENTICATION;
5030 		goto unlock;
5031 	}
5032 
5033 	result = L2CAP_CR_LE_SUCCESS;
5034 
5035 	for (i = 0; i < num_scid; i++) {
5036 		u16 scid = __le16_to_cpu(req->scid[i]);
5037 
5038 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5039 
5040 		pdu->dcid[i] = 0x0000;
5041 		len += sizeof(*pdu->dcid);
5042 
5043 		/* Check for valid dynamic CID range */
5044 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5045 			result = L2CAP_CR_LE_INVALID_SCID;
5046 			continue;
5047 		}
5048 
5049 		/* Check if we already have channel with that dcid */
5050 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5051 			result = L2CAP_CR_LE_SCID_IN_USE;
5052 			continue;
5053 		}
5054 
5055 		chan = pchan->ops->new_connection(pchan);
5056 		if (!chan) {
5057 			result = L2CAP_CR_LE_NO_MEM;
5058 			continue;
5059 		}
5060 
5061 		bacpy(&chan->src, &conn->hcon->src);
5062 		bacpy(&chan->dst, &conn->hcon->dst);
5063 		chan->src_type = bdaddr_src_type(conn->hcon);
5064 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5065 		chan->psm  = psm;
5066 		chan->dcid = scid;
5067 		chan->omtu = mtu;
5068 		chan->remote_mps = mps;
5069 
5070 		__l2cap_chan_add(conn, chan);
5071 
5072 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5073 
5074 		/* Init response */
5075 		if (!pdu->credits) {
5076 			pdu->mtu = cpu_to_le16(chan->imtu);
5077 			pdu->mps = cpu_to_le16(chan->mps);
5078 			pdu->credits = cpu_to_le16(chan->rx_credits);
5079 		}
5080 
5081 		pdu->dcid[i] = cpu_to_le16(chan->scid);
5082 
5083 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5084 
5085 		chan->ident = cmd->ident;
5086 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5087 
5088 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5089 			l2cap_state_change(chan, BT_CONNECT2);
5090 			defer = true;
5091 			chan->ops->defer(chan);
5092 		} else {
5093 			l2cap_chan_ready(chan);
5094 		}
5095 	}
5096 
5097 unlock:
5098 	l2cap_chan_unlock(pchan);
5099 	l2cap_chan_put(pchan);
5100 
5101 response:
5102 	pdu->result = cpu_to_le16(result);
5103 
5104 	if (defer)
5105 		return 0;
5106 
5107 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5108 		       sizeof(*pdu) + len, pdu);
5109 
5110 	return 0;
5111 }
5112 
5113 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5114 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5115 				       u8 *data)
5116 {
5117 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5118 	struct hci_conn *hcon = conn->hcon;
5119 	u16 mtu, mps, credits, result;
5120 	struct l2cap_chan *chan, *tmp;
5121 	int err = 0, sec_level;
5122 	int i = 0;
5123 
5124 	if (cmd_len < sizeof(*rsp))
5125 		return -EPROTO;
5126 
5127 	mtu     = __le16_to_cpu(rsp->mtu);
5128 	mps     = __le16_to_cpu(rsp->mps);
5129 	credits = __le16_to_cpu(rsp->credits);
5130 	result  = __le16_to_cpu(rsp->result);
5131 
5132 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5133 	       result);
5134 
5135 	cmd_len -= sizeof(*rsp);
5136 
5137 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5138 		u16 dcid;
5139 
5140 		if (chan->ident != cmd->ident ||
5141 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5142 		    chan->state == BT_CONNECTED)
5143 			continue;
5144 
5145 		l2cap_chan_lock(chan);
5146 
5147 		/* Check that there is a dcid for each pending channel */
5148 		if (cmd_len < sizeof(dcid)) {
5149 			l2cap_chan_del(chan, ECONNREFUSED);
5150 			l2cap_chan_unlock(chan);
5151 			continue;
5152 		}
5153 
5154 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5155 		cmd_len -= sizeof(u16);
5156 
5157 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5158 
5159 		/* Check if dcid is already in use */
5160 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5161 			/* If a device receives a
5162 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5163 			 * already-assigned Destination CID, then both the
5164 			 * original channel and the new channel shall be
5165 			 * immediately discarded and not used.
5166 			 */
5167 			l2cap_chan_del(chan, ECONNREFUSED);
5168 			l2cap_chan_unlock(chan);
5169 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5170 			l2cap_chan_lock(chan);
5171 			l2cap_chan_del(chan, ECONNRESET);
5172 			l2cap_chan_unlock(chan);
5173 			continue;
5174 		}
5175 
5176 		switch (result) {
5177 		case L2CAP_CR_LE_AUTHENTICATION:
5178 		case L2CAP_CR_LE_ENCRYPTION:
5179 			/* If we already have MITM protection we can't do
5180 			 * anything.
5181 			 */
5182 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5183 				l2cap_chan_del(chan, ECONNREFUSED);
5184 				break;
5185 			}
5186 
5187 			sec_level = hcon->sec_level + 1;
5188 			if (chan->sec_level < sec_level)
5189 				chan->sec_level = sec_level;
5190 
5191 			/* We'll need to send a new Connect Request */
5192 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5193 
5194 			smp_conn_security(hcon, chan->sec_level);
5195 			break;
5196 
5197 		case L2CAP_CR_LE_BAD_PSM:
5198 			l2cap_chan_del(chan, ECONNREFUSED);
5199 			break;
5200 
5201 		default:
5202 			/* If dcid was not set it means channels was refused */
5203 			if (!dcid) {
5204 				l2cap_chan_del(chan, ECONNREFUSED);
5205 				break;
5206 			}
5207 
5208 			chan->ident = 0;
5209 			chan->dcid = dcid;
5210 			chan->omtu = mtu;
5211 			chan->remote_mps = mps;
5212 			chan->tx_credits = credits;
5213 			l2cap_chan_ready(chan);
5214 			break;
5215 		}
5216 
5217 		l2cap_chan_unlock(chan);
5218 	}
5219 
5220 	return err;
5221 }
5222 
5223 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5224 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5225 					 u8 *data)
5226 {
5227 	struct l2cap_ecred_reconf_req *req = (void *) data;
5228 	struct l2cap_ecred_reconf_rsp rsp;
5229 	u16 mtu, mps, result;
5230 	struct l2cap_chan *chan;
5231 	int i, num_scid;
5232 
5233 	if (!enable_ecred)
5234 		return -EINVAL;
5235 
5236 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5237 		result = L2CAP_CR_LE_INVALID_PARAMS;
5238 		goto respond;
5239 	}
5240 
5241 	mtu = __le16_to_cpu(req->mtu);
5242 	mps = __le16_to_cpu(req->mps);
5243 
5244 	BT_DBG("mtu %u mps %u", mtu, mps);
5245 
5246 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5247 		result = L2CAP_RECONF_INVALID_MTU;
5248 		goto respond;
5249 	}
5250 
5251 	if (mps < L2CAP_ECRED_MIN_MPS) {
5252 		result = L2CAP_RECONF_INVALID_MPS;
5253 		goto respond;
5254 	}
5255 
5256 	cmd_len -= sizeof(*req);
5257 	num_scid = cmd_len / sizeof(u16);
5258 	result = L2CAP_RECONF_SUCCESS;
5259 
5260 	for (i = 0; i < num_scid; i++) {
5261 		u16 scid;
5262 
5263 		scid = __le16_to_cpu(req->scid[i]);
5264 		if (!scid)
5265 			return -EPROTO;
5266 
5267 		chan = __l2cap_get_chan_by_dcid(conn, scid);
5268 		if (!chan)
5269 			continue;
5270 
5271 		/* If the MTU value is decreased for any of the included
5272 		 * channels, then the receiver shall disconnect all
5273 		 * included channels.
5274 		 */
5275 		if (chan->omtu > mtu) {
5276 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
5277 			       chan->omtu, mtu);
5278 			result = L2CAP_RECONF_INVALID_MTU;
5279 		}
5280 
5281 		chan->omtu = mtu;
5282 		chan->remote_mps = mps;
5283 	}
5284 
5285 respond:
5286 	rsp.result = cpu_to_le16(result);
5287 
5288 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5289 		       &rsp);
5290 
5291 	return 0;
5292 }
5293 
5294 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5295 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5296 					 u8 *data)
5297 {
5298 	struct l2cap_chan *chan, *tmp;
5299 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5300 	u16 result;
5301 
5302 	if (cmd_len < sizeof(*rsp))
5303 		return -EPROTO;
5304 
5305 	result = __le16_to_cpu(rsp->result);
5306 
5307 	BT_DBG("result 0x%4.4x", rsp->result);
5308 
5309 	if (!result)
5310 		return 0;
5311 
5312 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5313 		if (chan->ident != cmd->ident)
5314 			continue;
5315 
5316 		l2cap_chan_del(chan, ECONNRESET);
5317 	}
5318 
5319 	return 0;
5320 }
5321 
5322 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5323 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5324 				       u8 *data)
5325 {
5326 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5327 	struct l2cap_chan *chan;
5328 
5329 	if (cmd_len < sizeof(*rej))
5330 		return -EPROTO;
5331 
5332 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5333 	if (!chan)
5334 		goto done;
5335 
5336 	chan = l2cap_chan_hold_unless_zero(chan);
5337 	if (!chan)
5338 		goto done;
5339 
5340 	l2cap_chan_lock(chan);
5341 	l2cap_chan_del(chan, ECONNREFUSED);
5342 	l2cap_chan_unlock(chan);
5343 	l2cap_chan_put(chan);
5344 
5345 done:
5346 	return 0;
5347 }
5348 
5349 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5350 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5351 				   u8 *data)
5352 {
5353 	int err = 0;
5354 
5355 	switch (cmd->code) {
5356 	case L2CAP_COMMAND_REJ:
5357 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5358 		break;
5359 
5360 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5361 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5362 		break;
5363 
5364 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5365 		break;
5366 
5367 	case L2CAP_LE_CONN_RSP:
5368 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5369 		break;
5370 
5371 	case L2CAP_LE_CONN_REQ:
5372 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5373 		break;
5374 
5375 	case L2CAP_LE_CREDITS:
5376 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5377 		break;
5378 
5379 	case L2CAP_ECRED_CONN_REQ:
5380 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5381 		break;
5382 
5383 	case L2CAP_ECRED_CONN_RSP:
5384 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5385 		break;
5386 
5387 	case L2CAP_ECRED_RECONF_REQ:
5388 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5389 		break;
5390 
5391 	case L2CAP_ECRED_RECONF_RSP:
5392 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5393 		break;
5394 
5395 	case L2CAP_DISCONN_REQ:
5396 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5397 		break;
5398 
5399 	case L2CAP_DISCONN_RSP:
5400 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5401 		break;
5402 
5403 	default:
5404 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5405 		err = -EINVAL;
5406 		break;
5407 	}
5408 
5409 	return err;
5410 }
5411 
5412 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5413 					struct sk_buff *skb)
5414 {
5415 	struct hci_conn *hcon = conn->hcon;
5416 	struct l2cap_cmd_hdr *cmd;
5417 	u16 len;
5418 	int err;
5419 
5420 	if (hcon->type != LE_LINK)
5421 		goto drop;
5422 
5423 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5424 		goto drop;
5425 
5426 	cmd = (void *) skb->data;
5427 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5428 
5429 	len = le16_to_cpu(cmd->len);
5430 
5431 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5432 
5433 	if (len != skb->len || !cmd->ident) {
5434 		BT_DBG("corrupted command");
5435 		goto drop;
5436 	}
5437 
5438 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5439 	if (err) {
5440 		struct l2cap_cmd_rej_unk rej;
5441 
5442 		BT_ERR("Wrong link type (%d)", err);
5443 
5444 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5445 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5446 			       sizeof(rej), &rej);
5447 	}
5448 
5449 drop:
5450 	kfree_skb(skb);
5451 }
5452 
5453 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5454 {
5455 	struct l2cap_cmd_rej_unk rej;
5456 
5457 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5458 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5459 }
5460 
5461 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5462 				     struct sk_buff *skb)
5463 {
5464 	struct hci_conn *hcon = conn->hcon;
5465 	struct l2cap_cmd_hdr *cmd;
5466 	int err;
5467 
5468 	l2cap_raw_recv(conn, skb);
5469 
5470 	if (hcon->type != ACL_LINK)
5471 		goto drop;
5472 
5473 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5474 		u16 len;
5475 
5476 		cmd = (void *) skb->data;
5477 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5478 
5479 		len = le16_to_cpu(cmd->len);
5480 
5481 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5482 		       cmd->ident);
5483 
5484 		if (len > skb->len || !cmd->ident) {
5485 			BT_DBG("corrupted command");
5486 			l2cap_sig_send_rej(conn, cmd->ident);
5487 			skb_pull(skb, len > skb->len ? skb->len : len);
5488 			continue;
5489 		}
5490 
5491 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5492 		if (err) {
5493 			BT_ERR("Wrong link type (%d)", err);
5494 			l2cap_sig_send_rej(conn, cmd->ident);
5495 		}
5496 
5497 		skb_pull(skb, len);
5498 	}
5499 
5500 	if (skb->len > 0) {
5501 		BT_DBG("corrupted command");
5502 		l2cap_sig_send_rej(conn, 0);
5503 	}
5504 
5505 drop:
5506 	kfree_skb(skb);
5507 }
5508 
5509 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5510 {
5511 	u16 our_fcs, rcv_fcs;
5512 	int hdr_size;
5513 
5514 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5515 		hdr_size = L2CAP_EXT_HDR_SIZE;
5516 	else
5517 		hdr_size = L2CAP_ENH_HDR_SIZE;
5518 
5519 	if (chan->fcs == L2CAP_FCS_CRC16) {
5520 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5521 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5522 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5523 
5524 		if (our_fcs != rcv_fcs)
5525 			return -EBADMSG;
5526 	}
5527 	return 0;
5528 }
5529 
5530 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5531 {
5532 	struct l2cap_ctrl control;
5533 
5534 	BT_DBG("chan %p", chan);
5535 
5536 	memset(&control, 0, sizeof(control));
5537 	control.sframe = 1;
5538 	control.final = 1;
5539 	control.reqseq = chan->buffer_seq;
5540 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5541 
5542 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5543 		control.super = L2CAP_SUPER_RNR;
5544 		l2cap_send_sframe(chan, &control);
5545 	}
5546 
5547 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5548 	    chan->unacked_frames > 0)
5549 		__set_retrans_timer(chan);
5550 
5551 	/* Send pending iframes */
5552 	l2cap_ertm_send(chan);
5553 
5554 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5555 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5556 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5557 		 * send it now.
5558 		 */
5559 		control.super = L2CAP_SUPER_RR;
5560 		l2cap_send_sframe(chan, &control);
5561 	}
5562 }
5563 
5564 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5565 			    struct sk_buff **last_frag)
5566 {
5567 	/* skb->len reflects data in skb as well as all fragments
5568 	 * skb->data_len reflects only data in fragments
5569 	 */
5570 	if (!skb_has_frag_list(skb))
5571 		skb_shinfo(skb)->frag_list = new_frag;
5572 
5573 	new_frag->next = NULL;
5574 
5575 	(*last_frag)->next = new_frag;
5576 	*last_frag = new_frag;
5577 
5578 	skb->len += new_frag->len;
5579 	skb->data_len += new_frag->len;
5580 	skb->truesize += new_frag->truesize;
5581 }
5582 
5583 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5584 				struct l2cap_ctrl *control)
5585 {
5586 	int err = -EINVAL;
5587 
5588 	switch (control->sar) {
5589 	case L2CAP_SAR_UNSEGMENTED:
5590 		if (chan->sdu)
5591 			break;
5592 
5593 		err = chan->ops->recv(chan, skb);
5594 		break;
5595 
5596 	case L2CAP_SAR_START:
5597 		if (chan->sdu)
5598 			break;
5599 
5600 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5601 			break;
5602 
5603 		chan->sdu_len = get_unaligned_le16(skb->data);
5604 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5605 
5606 		if (chan->sdu_len > chan->imtu) {
5607 			err = -EMSGSIZE;
5608 			break;
5609 		}
5610 
5611 		if (skb->len >= chan->sdu_len)
5612 			break;
5613 
5614 		chan->sdu = skb;
5615 		chan->sdu_last_frag = skb;
5616 
5617 		skb = NULL;
5618 		err = 0;
5619 		break;
5620 
5621 	case L2CAP_SAR_CONTINUE:
5622 		if (!chan->sdu)
5623 			break;
5624 
5625 		append_skb_frag(chan->sdu, skb,
5626 				&chan->sdu_last_frag);
5627 		skb = NULL;
5628 
5629 		if (chan->sdu->len >= chan->sdu_len)
5630 			break;
5631 
5632 		err = 0;
5633 		break;
5634 
5635 	case L2CAP_SAR_END:
5636 		if (!chan->sdu)
5637 			break;
5638 
5639 		append_skb_frag(chan->sdu, skb,
5640 				&chan->sdu_last_frag);
5641 		skb = NULL;
5642 
5643 		if (chan->sdu->len != chan->sdu_len)
5644 			break;
5645 
5646 		err = chan->ops->recv(chan, chan->sdu);
5647 
5648 		if (!err) {
5649 			/* Reassembly complete */
5650 			chan->sdu = NULL;
5651 			chan->sdu_last_frag = NULL;
5652 			chan->sdu_len = 0;
5653 		}
5654 		break;
5655 	}
5656 
5657 	if (err) {
5658 		kfree_skb(skb);
5659 		kfree_skb(chan->sdu);
5660 		chan->sdu = NULL;
5661 		chan->sdu_last_frag = NULL;
5662 		chan->sdu_len = 0;
5663 	}
5664 
5665 	return err;
5666 }
5667 
5668 static int l2cap_resegment(struct l2cap_chan *chan)
5669 {
5670 	/* Placeholder */
5671 	return 0;
5672 }
5673 
5674 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5675 {
5676 	u8 event;
5677 
5678 	if (chan->mode != L2CAP_MODE_ERTM)
5679 		return;
5680 
5681 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5682 	l2cap_tx(chan, NULL, NULL, event);
5683 }
5684 
5685 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5686 {
5687 	int err = 0;
5688 	/* Pass sequential frames to l2cap_reassemble_sdu()
5689 	 * until a gap is encountered.
5690 	 */
5691 
5692 	BT_DBG("chan %p", chan);
5693 
5694 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5695 		struct sk_buff *skb;
5696 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5697 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5698 
5699 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5700 
5701 		if (!skb)
5702 			break;
5703 
5704 		skb_unlink(skb, &chan->srej_q);
5705 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5706 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5707 		if (err)
5708 			break;
5709 	}
5710 
5711 	if (skb_queue_empty(&chan->srej_q)) {
5712 		chan->rx_state = L2CAP_RX_STATE_RECV;
5713 		l2cap_send_ack(chan);
5714 	}
5715 
5716 	return err;
5717 }
5718 
5719 static void l2cap_handle_srej(struct l2cap_chan *chan,
5720 			      struct l2cap_ctrl *control)
5721 {
5722 	struct sk_buff *skb;
5723 
5724 	BT_DBG("chan %p, control %p", chan, control);
5725 
5726 	if (control->reqseq == chan->next_tx_seq) {
5727 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5728 		l2cap_send_disconn_req(chan, ECONNRESET);
5729 		return;
5730 	}
5731 
5732 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5733 
5734 	if (skb == NULL) {
5735 		BT_DBG("Seq %d not available for retransmission",
5736 		       control->reqseq);
5737 		return;
5738 	}
5739 
5740 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5741 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5742 		l2cap_send_disconn_req(chan, ECONNRESET);
5743 		return;
5744 	}
5745 
5746 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5747 
5748 	if (control->poll) {
5749 		l2cap_pass_to_tx(chan, control);
5750 
5751 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5752 		l2cap_retransmit(chan, control);
5753 		l2cap_ertm_send(chan);
5754 
5755 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5756 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5757 			chan->srej_save_reqseq = control->reqseq;
5758 		}
5759 	} else {
5760 		l2cap_pass_to_tx_fbit(chan, control);
5761 
5762 		if (control->final) {
5763 			if (chan->srej_save_reqseq != control->reqseq ||
5764 			    !test_and_clear_bit(CONN_SREJ_ACT,
5765 						&chan->conn_state))
5766 				l2cap_retransmit(chan, control);
5767 		} else {
5768 			l2cap_retransmit(chan, control);
5769 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5770 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5771 				chan->srej_save_reqseq = control->reqseq;
5772 			}
5773 		}
5774 	}
5775 }
5776 
5777 static void l2cap_handle_rej(struct l2cap_chan *chan,
5778 			     struct l2cap_ctrl *control)
5779 {
5780 	struct sk_buff *skb;
5781 
5782 	BT_DBG("chan %p, control %p", chan, control);
5783 
5784 	if (control->reqseq == chan->next_tx_seq) {
5785 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5786 		l2cap_send_disconn_req(chan, ECONNRESET);
5787 		return;
5788 	}
5789 
5790 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5791 
5792 	if (chan->max_tx && skb &&
5793 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5794 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5795 		l2cap_send_disconn_req(chan, ECONNRESET);
5796 		return;
5797 	}
5798 
5799 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5800 
5801 	l2cap_pass_to_tx(chan, control);
5802 
5803 	if (control->final) {
5804 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5805 			l2cap_retransmit_all(chan, control);
5806 	} else {
5807 		l2cap_retransmit_all(chan, control);
5808 		l2cap_ertm_send(chan);
5809 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5810 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5811 	}
5812 }
5813 
5814 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5815 {
5816 	BT_DBG("chan %p, txseq %d", chan, txseq);
5817 
5818 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5819 	       chan->expected_tx_seq);
5820 
5821 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5822 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5823 		    chan->tx_win) {
5824 			/* See notes below regarding "double poll" and
5825 			 * invalid packets.
5826 			 */
5827 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5828 				BT_DBG("Invalid/Ignore - after SREJ");
5829 				return L2CAP_TXSEQ_INVALID_IGNORE;
5830 			} else {
5831 				BT_DBG("Invalid - in window after SREJ sent");
5832 				return L2CAP_TXSEQ_INVALID;
5833 			}
5834 		}
5835 
5836 		if (chan->srej_list.head == txseq) {
5837 			BT_DBG("Expected SREJ");
5838 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5839 		}
5840 
5841 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5842 			BT_DBG("Duplicate SREJ - txseq already stored");
5843 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5844 		}
5845 
5846 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5847 			BT_DBG("Unexpected SREJ - not requested");
5848 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5849 		}
5850 	}
5851 
5852 	if (chan->expected_tx_seq == txseq) {
5853 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5854 		    chan->tx_win) {
5855 			BT_DBG("Invalid - txseq outside tx window");
5856 			return L2CAP_TXSEQ_INVALID;
5857 		} else {
5858 			BT_DBG("Expected");
5859 			return L2CAP_TXSEQ_EXPECTED;
5860 		}
5861 	}
5862 
5863 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5864 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5865 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5866 		return L2CAP_TXSEQ_DUPLICATE;
5867 	}
5868 
5869 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5870 		/* A source of invalid packets is a "double poll" condition,
5871 		 * where delays cause us to send multiple poll packets.  If
5872 		 * the remote stack receives and processes both polls,
5873 		 * sequence numbers can wrap around in such a way that a
5874 		 * resent frame has a sequence number that looks like new data
5875 		 * with a sequence gap.  This would trigger an erroneous SREJ
5876 		 * request.
5877 		 *
5878 		 * Fortunately, this is impossible with a tx window that's
5879 		 * less than half of the maximum sequence number, which allows
5880 		 * invalid frames to be safely ignored.
5881 		 *
5882 		 * With tx window sizes greater than half of the tx window
5883 		 * maximum, the frame is invalid and cannot be ignored.  This
5884 		 * causes a disconnect.
5885 		 */
5886 
5887 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5888 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5889 			return L2CAP_TXSEQ_INVALID_IGNORE;
5890 		} else {
5891 			BT_DBG("Invalid - txseq outside tx window");
5892 			return L2CAP_TXSEQ_INVALID;
5893 		}
5894 	} else {
5895 		BT_DBG("Unexpected - txseq indicates missing frames");
5896 		return L2CAP_TXSEQ_UNEXPECTED;
5897 	}
5898 }
5899 
5900 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5901 			       struct l2cap_ctrl *control,
5902 			       struct sk_buff *skb, u8 event)
5903 {
5904 	struct l2cap_ctrl local_control;
5905 	int err = 0;
5906 	bool skb_in_use = false;
5907 
5908 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5909 	       event);
5910 
5911 	switch (event) {
5912 	case L2CAP_EV_RECV_IFRAME:
5913 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5914 		case L2CAP_TXSEQ_EXPECTED:
5915 			l2cap_pass_to_tx(chan, control);
5916 
5917 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5918 				BT_DBG("Busy, discarding expected seq %d",
5919 				       control->txseq);
5920 				break;
5921 			}
5922 
5923 			chan->expected_tx_seq = __next_seq(chan,
5924 							   control->txseq);
5925 
5926 			chan->buffer_seq = chan->expected_tx_seq;
5927 			skb_in_use = true;
5928 
5929 			/* l2cap_reassemble_sdu may free skb, hence invalidate
5930 			 * control, so make a copy in advance to use it after
5931 			 * l2cap_reassemble_sdu returns and to avoid the race
5932 			 * condition, for example:
5933 			 *
5934 			 * The current thread calls:
5935 			 *   l2cap_reassemble_sdu
5936 			 *     chan->ops->recv == l2cap_sock_recv_cb
5937 			 *       __sock_queue_rcv_skb
5938 			 * Another thread calls:
5939 			 *   bt_sock_recvmsg
5940 			 *     skb_recv_datagram
5941 			 *     skb_free_datagram
5942 			 * Then the current thread tries to access control, but
5943 			 * it was freed by skb_free_datagram.
5944 			 */
5945 			local_control = *control;
5946 			err = l2cap_reassemble_sdu(chan, skb, control);
5947 			if (err)
5948 				break;
5949 
5950 			if (local_control.final) {
5951 				if (!test_and_clear_bit(CONN_REJ_ACT,
5952 							&chan->conn_state)) {
5953 					local_control.final = 0;
5954 					l2cap_retransmit_all(chan, &local_control);
5955 					l2cap_ertm_send(chan);
5956 				}
5957 			}
5958 
5959 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5960 				l2cap_send_ack(chan);
5961 			break;
5962 		case L2CAP_TXSEQ_UNEXPECTED:
5963 			l2cap_pass_to_tx(chan, control);
5964 
5965 			/* Can't issue SREJ frames in the local busy state.
5966 			 * Drop this frame, it will be seen as missing
5967 			 * when local busy is exited.
5968 			 */
5969 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5970 				BT_DBG("Busy, discarding unexpected seq %d",
5971 				       control->txseq);
5972 				break;
5973 			}
5974 
5975 			/* There was a gap in the sequence, so an SREJ
5976 			 * must be sent for each missing frame.  The
5977 			 * current frame is stored for later use.
5978 			 */
5979 			skb_queue_tail(&chan->srej_q, skb);
5980 			skb_in_use = true;
5981 			BT_DBG("Queued %p (queue len %d)", skb,
5982 			       skb_queue_len(&chan->srej_q));
5983 
5984 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5985 			l2cap_seq_list_clear(&chan->srej_list);
5986 			l2cap_send_srej(chan, control->txseq);
5987 
5988 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5989 			break;
5990 		case L2CAP_TXSEQ_DUPLICATE:
5991 			l2cap_pass_to_tx(chan, control);
5992 			break;
5993 		case L2CAP_TXSEQ_INVALID_IGNORE:
5994 			break;
5995 		case L2CAP_TXSEQ_INVALID:
5996 		default:
5997 			l2cap_send_disconn_req(chan, ECONNRESET);
5998 			break;
5999 		}
6000 		break;
6001 	case L2CAP_EV_RECV_RR:
6002 		l2cap_pass_to_tx(chan, control);
6003 		if (control->final) {
6004 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6005 
6006 			if (!test_and_clear_bit(CONN_REJ_ACT,
6007 						&chan->conn_state)) {
6008 				control->final = 0;
6009 				l2cap_retransmit_all(chan, control);
6010 			}
6011 
6012 			l2cap_ertm_send(chan);
6013 		} else if (control->poll) {
6014 			l2cap_send_i_or_rr_or_rnr(chan);
6015 		} else {
6016 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6017 					       &chan->conn_state) &&
6018 			    chan->unacked_frames)
6019 				__set_retrans_timer(chan);
6020 
6021 			l2cap_ertm_send(chan);
6022 		}
6023 		break;
6024 	case L2CAP_EV_RECV_RNR:
6025 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6026 		l2cap_pass_to_tx(chan, control);
6027 		if (control && control->poll) {
6028 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6029 			l2cap_send_rr_or_rnr(chan, 0);
6030 		}
6031 		__clear_retrans_timer(chan);
6032 		l2cap_seq_list_clear(&chan->retrans_list);
6033 		break;
6034 	case L2CAP_EV_RECV_REJ:
6035 		l2cap_handle_rej(chan, control);
6036 		break;
6037 	case L2CAP_EV_RECV_SREJ:
6038 		l2cap_handle_srej(chan, control);
6039 		break;
6040 	default:
6041 		break;
6042 	}
6043 
6044 	if (skb && !skb_in_use) {
6045 		BT_DBG("Freeing %p", skb);
6046 		kfree_skb(skb);
6047 	}
6048 
6049 	return err;
6050 }
6051 
6052 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6053 				    struct l2cap_ctrl *control,
6054 				    struct sk_buff *skb, u8 event)
6055 {
6056 	int err = 0;
6057 	u16 txseq = control->txseq;
6058 	bool skb_in_use = false;
6059 
6060 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6061 	       event);
6062 
6063 	switch (event) {
6064 	case L2CAP_EV_RECV_IFRAME:
6065 		switch (l2cap_classify_txseq(chan, txseq)) {
6066 		case L2CAP_TXSEQ_EXPECTED:
6067 			/* Keep frame for reassembly later */
6068 			l2cap_pass_to_tx(chan, control);
6069 			skb_queue_tail(&chan->srej_q, skb);
6070 			skb_in_use = true;
6071 			BT_DBG("Queued %p (queue len %d)", skb,
6072 			       skb_queue_len(&chan->srej_q));
6073 
6074 			chan->expected_tx_seq = __next_seq(chan, txseq);
6075 			break;
6076 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6077 			l2cap_seq_list_pop(&chan->srej_list);
6078 
6079 			l2cap_pass_to_tx(chan, control);
6080 			skb_queue_tail(&chan->srej_q, skb);
6081 			skb_in_use = true;
6082 			BT_DBG("Queued %p (queue len %d)", skb,
6083 			       skb_queue_len(&chan->srej_q));
6084 
6085 			err = l2cap_rx_queued_iframes(chan);
6086 			if (err)
6087 				break;
6088 
6089 			break;
6090 		case L2CAP_TXSEQ_UNEXPECTED:
6091 			/* Got a frame that can't be reassembled yet.
6092 			 * Save it for later, and send SREJs to cover
6093 			 * the missing frames.
6094 			 */
6095 			skb_queue_tail(&chan->srej_q, skb);
6096 			skb_in_use = true;
6097 			BT_DBG("Queued %p (queue len %d)", skb,
6098 			       skb_queue_len(&chan->srej_q));
6099 
6100 			l2cap_pass_to_tx(chan, control);
6101 			l2cap_send_srej(chan, control->txseq);
6102 			break;
6103 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6104 			/* This frame was requested with an SREJ, but
6105 			 * some expected retransmitted frames are
6106 			 * missing.  Request retransmission of missing
6107 			 * SREJ'd frames.
6108 			 */
6109 			skb_queue_tail(&chan->srej_q, skb);
6110 			skb_in_use = true;
6111 			BT_DBG("Queued %p (queue len %d)", skb,
6112 			       skb_queue_len(&chan->srej_q));
6113 
6114 			l2cap_pass_to_tx(chan, control);
6115 			l2cap_send_srej_list(chan, control->txseq);
6116 			break;
6117 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6118 			/* We've already queued this frame.  Drop this copy. */
6119 			l2cap_pass_to_tx(chan, control);
6120 			break;
6121 		case L2CAP_TXSEQ_DUPLICATE:
6122 			/* Expecting a later sequence number, so this frame
6123 			 * was already received.  Ignore it completely.
6124 			 */
6125 			break;
6126 		case L2CAP_TXSEQ_INVALID_IGNORE:
6127 			break;
6128 		case L2CAP_TXSEQ_INVALID:
6129 		default:
6130 			l2cap_send_disconn_req(chan, ECONNRESET);
6131 			break;
6132 		}
6133 		break;
6134 	case L2CAP_EV_RECV_RR:
6135 		l2cap_pass_to_tx(chan, control);
6136 		if (control->final) {
6137 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6138 
6139 			if (!test_and_clear_bit(CONN_REJ_ACT,
6140 						&chan->conn_state)) {
6141 				control->final = 0;
6142 				l2cap_retransmit_all(chan, control);
6143 			}
6144 
6145 			l2cap_ertm_send(chan);
6146 		} else if (control->poll) {
6147 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6148 					       &chan->conn_state) &&
6149 			    chan->unacked_frames) {
6150 				__set_retrans_timer(chan);
6151 			}
6152 
6153 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6154 			l2cap_send_srej_tail(chan);
6155 		} else {
6156 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6157 					       &chan->conn_state) &&
6158 			    chan->unacked_frames)
6159 				__set_retrans_timer(chan);
6160 
6161 			l2cap_send_ack(chan);
6162 		}
6163 		break;
6164 	case L2CAP_EV_RECV_RNR:
6165 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6166 		l2cap_pass_to_tx(chan, control);
6167 		if (control->poll) {
6168 			l2cap_send_srej_tail(chan);
6169 		} else {
6170 			struct l2cap_ctrl rr_control;
6171 			memset(&rr_control, 0, sizeof(rr_control));
6172 			rr_control.sframe = 1;
6173 			rr_control.super = L2CAP_SUPER_RR;
6174 			rr_control.reqseq = chan->buffer_seq;
6175 			l2cap_send_sframe(chan, &rr_control);
6176 		}
6177 
6178 		break;
6179 	case L2CAP_EV_RECV_REJ:
6180 		l2cap_handle_rej(chan, control);
6181 		break;
6182 	case L2CAP_EV_RECV_SREJ:
6183 		l2cap_handle_srej(chan, control);
6184 		break;
6185 	}
6186 
6187 	if (skb && !skb_in_use) {
6188 		BT_DBG("Freeing %p", skb);
6189 		kfree_skb(skb);
6190 	}
6191 
6192 	return err;
6193 }
6194 
6195 static int l2cap_finish_move(struct l2cap_chan *chan)
6196 {
6197 	BT_DBG("chan %p", chan);
6198 
6199 	chan->rx_state = L2CAP_RX_STATE_RECV;
6200 	chan->conn->mtu = chan->conn->hcon->mtu;
6201 
6202 	return l2cap_resegment(chan);
6203 }
6204 
6205 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6206 				 struct l2cap_ctrl *control,
6207 				 struct sk_buff *skb, u8 event)
6208 {
6209 	int err;
6210 
6211 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6212 	       event);
6213 
6214 	if (!control->poll)
6215 		return -EPROTO;
6216 
6217 	l2cap_process_reqseq(chan, control->reqseq);
6218 
6219 	if (!skb_queue_empty(&chan->tx_q))
6220 		chan->tx_send_head = skb_peek(&chan->tx_q);
6221 	else
6222 		chan->tx_send_head = NULL;
6223 
6224 	/* Rewind next_tx_seq to the point expected
6225 	 * by the receiver.
6226 	 */
6227 	chan->next_tx_seq = control->reqseq;
6228 	chan->unacked_frames = 0;
6229 
6230 	err = l2cap_finish_move(chan);
6231 	if (err)
6232 		return err;
6233 
6234 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6235 	l2cap_send_i_or_rr_or_rnr(chan);
6236 
6237 	if (event == L2CAP_EV_RECV_IFRAME)
6238 		return -EPROTO;
6239 
6240 	return l2cap_rx_state_recv(chan, control, NULL, event);
6241 }
6242 
6243 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6244 				 struct l2cap_ctrl *control,
6245 				 struct sk_buff *skb, u8 event)
6246 {
6247 	int err;
6248 
6249 	if (!control->final)
6250 		return -EPROTO;
6251 
6252 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6253 
6254 	chan->rx_state = L2CAP_RX_STATE_RECV;
6255 	l2cap_process_reqseq(chan, control->reqseq);
6256 
6257 	if (!skb_queue_empty(&chan->tx_q))
6258 		chan->tx_send_head = skb_peek(&chan->tx_q);
6259 	else
6260 		chan->tx_send_head = NULL;
6261 
6262 	/* Rewind next_tx_seq to the point expected
6263 	 * by the receiver.
6264 	 */
6265 	chan->next_tx_seq = control->reqseq;
6266 	chan->unacked_frames = 0;
6267 	chan->conn->mtu = chan->conn->hcon->mtu;
6268 
6269 	err = l2cap_resegment(chan);
6270 
6271 	if (!err)
6272 		err = l2cap_rx_state_recv(chan, control, skb, event);
6273 
6274 	return err;
6275 }
6276 
6277 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6278 {
6279 	/* Make sure reqseq is for a packet that has been sent but not acked */
6280 	u16 unacked;
6281 
6282 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6283 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6284 }
6285 
6286 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6287 		    struct sk_buff *skb, u8 event)
6288 {
6289 	int err = 0;
6290 
6291 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6292 	       control, skb, event, chan->rx_state);
6293 
6294 	if (__valid_reqseq(chan, control->reqseq)) {
6295 		switch (chan->rx_state) {
6296 		case L2CAP_RX_STATE_RECV:
6297 			err = l2cap_rx_state_recv(chan, control, skb, event);
6298 			break;
6299 		case L2CAP_RX_STATE_SREJ_SENT:
6300 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6301 						       event);
6302 			break;
6303 		case L2CAP_RX_STATE_WAIT_P:
6304 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6305 			break;
6306 		case L2CAP_RX_STATE_WAIT_F:
6307 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6308 			break;
6309 		default:
6310 			/* shut it down */
6311 			break;
6312 		}
6313 	} else {
6314 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6315 		       control->reqseq, chan->next_tx_seq,
6316 		       chan->expected_ack_seq);
6317 		l2cap_send_disconn_req(chan, ECONNRESET);
6318 	}
6319 
6320 	return err;
6321 }
6322 
6323 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6324 			   struct sk_buff *skb)
6325 {
6326 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6327 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6328 	 * returns and to avoid the race condition, for example:
6329 	 *
6330 	 * The current thread calls:
6331 	 *   l2cap_reassemble_sdu
6332 	 *     chan->ops->recv == l2cap_sock_recv_cb
6333 	 *       __sock_queue_rcv_skb
6334 	 * Another thread calls:
6335 	 *   bt_sock_recvmsg
6336 	 *     skb_recv_datagram
6337 	 *     skb_free_datagram
6338 	 * Then the current thread tries to access control, but it was freed by
6339 	 * skb_free_datagram.
6340 	 */
6341 	u16 txseq = control->txseq;
6342 
6343 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6344 	       chan->rx_state);
6345 
6346 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6347 		l2cap_pass_to_tx(chan, control);
6348 
6349 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6350 		       __next_seq(chan, chan->buffer_seq));
6351 
6352 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6353 
6354 		l2cap_reassemble_sdu(chan, skb, control);
6355 	} else {
6356 		if (chan->sdu) {
6357 			kfree_skb(chan->sdu);
6358 			chan->sdu = NULL;
6359 		}
6360 		chan->sdu_last_frag = NULL;
6361 		chan->sdu_len = 0;
6362 
6363 		if (skb) {
6364 			BT_DBG("Freeing %p", skb);
6365 			kfree_skb(skb);
6366 		}
6367 	}
6368 
6369 	chan->last_acked_seq = txseq;
6370 	chan->expected_tx_seq = __next_seq(chan, txseq);
6371 
6372 	return 0;
6373 }
6374 
6375 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6376 {
6377 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6378 	u16 len;
6379 	u8 event;
6380 
6381 	__unpack_control(chan, skb);
6382 
6383 	len = skb->len;
6384 
6385 	/*
6386 	 * We can just drop the corrupted I-frame here.
6387 	 * Receiver will miss it and start proper recovery
6388 	 * procedures and ask for retransmission.
6389 	 */
6390 	if (l2cap_check_fcs(chan, skb))
6391 		goto drop;
6392 
6393 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6394 		len -= L2CAP_SDULEN_SIZE;
6395 
6396 	if (chan->fcs == L2CAP_FCS_CRC16)
6397 		len -= L2CAP_FCS_SIZE;
6398 
6399 	if (len > chan->mps) {
6400 		l2cap_send_disconn_req(chan, ECONNRESET);
6401 		goto drop;
6402 	}
6403 
6404 	if (chan->ops->filter) {
6405 		if (chan->ops->filter(chan, skb))
6406 			goto drop;
6407 	}
6408 
6409 	if (!control->sframe) {
6410 		int err;
6411 
6412 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6413 		       control->sar, control->reqseq, control->final,
6414 		       control->txseq);
6415 
6416 		/* Validate F-bit - F=0 always valid, F=1 only
6417 		 * valid in TX WAIT_F
6418 		 */
6419 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6420 			goto drop;
6421 
6422 		if (chan->mode != L2CAP_MODE_STREAMING) {
6423 			event = L2CAP_EV_RECV_IFRAME;
6424 			err = l2cap_rx(chan, control, skb, event);
6425 		} else {
6426 			err = l2cap_stream_rx(chan, control, skb);
6427 		}
6428 
6429 		if (err)
6430 			l2cap_send_disconn_req(chan, ECONNRESET);
6431 	} else {
6432 		const u8 rx_func_to_event[4] = {
6433 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6434 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6435 		};
6436 
6437 		/* Only I-frames are expected in streaming mode */
6438 		if (chan->mode == L2CAP_MODE_STREAMING)
6439 			goto drop;
6440 
6441 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6442 		       control->reqseq, control->final, control->poll,
6443 		       control->super);
6444 
6445 		if (len != 0) {
6446 			BT_ERR("Trailing bytes: %d in sframe", len);
6447 			l2cap_send_disconn_req(chan, ECONNRESET);
6448 			goto drop;
6449 		}
6450 
6451 		/* Validate F and P bits */
6452 		if (control->final && (control->poll ||
6453 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6454 			goto drop;
6455 
6456 		event = rx_func_to_event[control->super];
6457 		if (l2cap_rx(chan, control, skb, event))
6458 			l2cap_send_disconn_req(chan, ECONNRESET);
6459 	}
6460 
6461 	return 0;
6462 
6463 drop:
6464 	kfree_skb(skb);
6465 	return 0;
6466 }
6467 
6468 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6469 {
6470 	struct l2cap_conn *conn = chan->conn;
6471 	struct l2cap_le_credits pkt;
6472 	u16 return_credits = l2cap_le_rx_credits(chan);
6473 
6474 	if (chan->rx_credits >= return_credits)
6475 		return;
6476 
6477 	return_credits -= chan->rx_credits;
6478 
6479 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6480 
6481 	chan->rx_credits += return_credits;
6482 
6483 	pkt.cid     = cpu_to_le16(chan->scid);
6484 	pkt.credits = cpu_to_le16(return_credits);
6485 
6486 	chan->ident = l2cap_get_ident(conn);
6487 
6488 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6489 }
6490 
6491 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6492 {
6493 	if (chan->rx_avail == rx_avail)
6494 		return;
6495 
6496 	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6497 
6498 	chan->rx_avail = rx_avail;
6499 
6500 	if (chan->state == BT_CONNECTED)
6501 		l2cap_chan_le_send_credits(chan);
6502 }
6503 
6504 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6505 {
6506 	int err;
6507 
6508 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6509 
6510 	/* Wait recv to confirm reception before updating the credits */
6511 	err = chan->ops->recv(chan, skb);
6512 
6513 	if (err < 0 && chan->rx_avail != -1) {
6514 		BT_ERR("Queueing received LE L2CAP data failed");
6515 		l2cap_send_disconn_req(chan, ECONNRESET);
6516 		return err;
6517 	}
6518 
6519 	/* Update credits whenever an SDU is received */
6520 	l2cap_chan_le_send_credits(chan);
6521 
6522 	return err;
6523 }
6524 
6525 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6526 {
6527 	int err;
6528 
6529 	if (!chan->rx_credits) {
6530 		BT_ERR("No credits to receive LE L2CAP data");
6531 		l2cap_send_disconn_req(chan, ECONNRESET);
6532 		return -ENOBUFS;
6533 	}
6534 
6535 	if (chan->imtu < skb->len) {
6536 		BT_ERR("Too big LE L2CAP PDU");
6537 		return -ENOBUFS;
6538 	}
6539 
6540 	chan->rx_credits--;
6541 	BT_DBG("chan %p: rx_credits %u -> %u",
6542 	       chan, chan->rx_credits + 1, chan->rx_credits);
6543 
6544 	/* Update if remote had run out of credits, this should only happens
6545 	 * if the remote is not using the entire MPS.
6546 	 */
6547 	if (!chan->rx_credits)
6548 		l2cap_chan_le_send_credits(chan);
6549 
6550 	err = 0;
6551 
6552 	if (!chan->sdu) {
6553 		u16 sdu_len;
6554 
6555 		sdu_len = get_unaligned_le16(skb->data);
6556 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6557 
6558 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6559 		       sdu_len, skb->len, chan->imtu);
6560 
6561 		if (sdu_len > chan->imtu) {
6562 			BT_ERR("Too big LE L2CAP SDU length received");
6563 			err = -EMSGSIZE;
6564 			goto failed;
6565 		}
6566 
6567 		if (skb->len > sdu_len) {
6568 			BT_ERR("Too much LE L2CAP data received");
6569 			err = -EINVAL;
6570 			goto failed;
6571 		}
6572 
6573 		if (skb->len == sdu_len)
6574 			return l2cap_ecred_recv(chan, skb);
6575 
6576 		chan->sdu = skb;
6577 		chan->sdu_len = sdu_len;
6578 		chan->sdu_last_frag = skb;
6579 
6580 		/* Detect if remote is not able to use the selected MPS */
6581 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6582 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6583 
6584 			/* Adjust the number of credits */
6585 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6586 			chan->mps = mps_len;
6587 			l2cap_chan_le_send_credits(chan);
6588 		}
6589 
6590 		return 0;
6591 	}
6592 
6593 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6594 	       chan->sdu->len, skb->len, chan->sdu_len);
6595 
6596 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6597 		BT_ERR("Too much LE L2CAP data received");
6598 		err = -EINVAL;
6599 		goto failed;
6600 	}
6601 
6602 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6603 	skb = NULL;
6604 
6605 	if (chan->sdu->len == chan->sdu_len) {
6606 		err = l2cap_ecred_recv(chan, chan->sdu);
6607 		if (!err) {
6608 			chan->sdu = NULL;
6609 			chan->sdu_last_frag = NULL;
6610 			chan->sdu_len = 0;
6611 		}
6612 	}
6613 
6614 failed:
6615 	if (err) {
6616 		kfree_skb(skb);
6617 		kfree_skb(chan->sdu);
6618 		chan->sdu = NULL;
6619 		chan->sdu_last_frag = NULL;
6620 		chan->sdu_len = 0;
6621 	}
6622 
6623 	/* We can't return an error here since we took care of the skb
6624 	 * freeing internally. An error return would cause the caller to
6625 	 * do a double-free of the skb.
6626 	 */
6627 	return 0;
6628 }
6629 
6630 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6631 			       struct sk_buff *skb)
6632 {
6633 	struct l2cap_chan *chan;
6634 
6635 	chan = l2cap_get_chan_by_scid(conn, cid);
6636 	if (!chan) {
6637 		BT_DBG("unknown cid 0x%4.4x", cid);
6638 		/* Drop packet and return */
6639 		kfree_skb(skb);
6640 		return;
6641 	}
6642 
6643 	BT_DBG("chan %p, len %d", chan, skb->len);
6644 
6645 	/* If we receive data on a fixed channel before the info req/rsp
6646 	 * procedure is done simply assume that the channel is supported
6647 	 * and mark it as ready.
6648 	 */
6649 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6650 		l2cap_chan_ready(chan);
6651 
6652 	if (chan->state != BT_CONNECTED)
6653 		goto drop;
6654 
6655 	switch (chan->mode) {
6656 	case L2CAP_MODE_LE_FLOWCTL:
6657 	case L2CAP_MODE_EXT_FLOWCTL:
6658 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6659 			goto drop;
6660 
6661 		goto done;
6662 
6663 	case L2CAP_MODE_BASIC:
6664 		/* If socket recv buffers overflows we drop data here
6665 		 * which is *bad* because L2CAP has to be reliable.
6666 		 * But we don't have any other choice. L2CAP doesn't
6667 		 * provide flow control mechanism. */
6668 
6669 		if (chan->imtu < skb->len) {
6670 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6671 			goto drop;
6672 		}
6673 
6674 		if (!chan->ops->recv(chan, skb))
6675 			goto done;
6676 		break;
6677 
6678 	case L2CAP_MODE_ERTM:
6679 	case L2CAP_MODE_STREAMING:
6680 		l2cap_data_rcv(chan, skb);
6681 		goto done;
6682 
6683 	default:
6684 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6685 		break;
6686 	}
6687 
6688 drop:
6689 	kfree_skb(skb);
6690 
6691 done:
6692 	l2cap_chan_unlock(chan);
6693 	l2cap_chan_put(chan);
6694 }
6695 
6696 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6697 				  struct sk_buff *skb)
6698 {
6699 	struct hci_conn *hcon = conn->hcon;
6700 	struct l2cap_chan *chan;
6701 
6702 	if (hcon->type != ACL_LINK)
6703 		goto free_skb;
6704 
6705 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6706 					ACL_LINK);
6707 	if (!chan)
6708 		goto free_skb;
6709 
6710 	BT_DBG("chan %p, len %d", chan, skb->len);
6711 
6712 	l2cap_chan_lock(chan);
6713 
6714 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6715 		goto drop;
6716 
6717 	if (chan->imtu < skb->len)
6718 		goto drop;
6719 
6720 	/* Store remote BD_ADDR and PSM for msg_name */
6721 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6722 	bt_cb(skb)->l2cap.psm = psm;
6723 
6724 	if (!chan->ops->recv(chan, skb)) {
6725 		l2cap_chan_unlock(chan);
6726 		l2cap_chan_put(chan);
6727 		return;
6728 	}
6729 
6730 drop:
6731 	l2cap_chan_unlock(chan);
6732 	l2cap_chan_put(chan);
6733 free_skb:
6734 	kfree_skb(skb);
6735 }
6736 
6737 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6738 {
6739 	struct l2cap_hdr *lh = (void *) skb->data;
6740 	struct hci_conn *hcon = conn->hcon;
6741 	u16 cid, len;
6742 	__le16 psm;
6743 
6744 	if (hcon->state != BT_CONNECTED) {
6745 		BT_DBG("queueing pending rx skb");
6746 		skb_queue_tail(&conn->pending_rx, skb);
6747 		return;
6748 	}
6749 
6750 	skb_pull(skb, L2CAP_HDR_SIZE);
6751 	cid = __le16_to_cpu(lh->cid);
6752 	len = __le16_to_cpu(lh->len);
6753 
6754 	if (len != skb->len) {
6755 		kfree_skb(skb);
6756 		return;
6757 	}
6758 
6759 	/* Since we can't actively block incoming LE connections we must
6760 	 * at least ensure that we ignore incoming data from them.
6761 	 */
6762 	if (hcon->type == LE_LINK &&
6763 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6764 				   bdaddr_dst_type(hcon))) {
6765 		kfree_skb(skb);
6766 		return;
6767 	}
6768 
6769 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6770 
6771 	switch (cid) {
6772 	case L2CAP_CID_SIGNALING:
6773 		l2cap_sig_channel(conn, skb);
6774 		break;
6775 
6776 	case L2CAP_CID_CONN_LESS:
6777 		psm = get_unaligned((__le16 *) skb->data);
6778 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6779 		l2cap_conless_channel(conn, psm, skb);
6780 		break;
6781 
6782 	case L2CAP_CID_LE_SIGNALING:
6783 		l2cap_le_sig_channel(conn, skb);
6784 		break;
6785 
6786 	default:
6787 		l2cap_data_channel(conn, cid, skb);
6788 		break;
6789 	}
6790 }
6791 
6792 static void process_pending_rx(struct work_struct *work)
6793 {
6794 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6795 					       pending_rx_work);
6796 	struct sk_buff *skb;
6797 
6798 	BT_DBG("");
6799 
6800 	mutex_lock(&conn->lock);
6801 
6802 	while ((skb = skb_dequeue(&conn->pending_rx)))
6803 		l2cap_recv_frame(conn, skb);
6804 
6805 	mutex_unlock(&conn->lock);
6806 }
6807 
6808 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6809 {
6810 	struct l2cap_conn *conn = hcon->l2cap_data;
6811 	struct hci_chan *hchan;
6812 
6813 	if (conn)
6814 		return conn;
6815 
6816 	hchan = hci_chan_create(hcon);
6817 	if (!hchan)
6818 		return NULL;
6819 
6820 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6821 	if (!conn) {
6822 		hci_chan_del(hchan);
6823 		return NULL;
6824 	}
6825 
6826 	kref_init(&conn->ref);
6827 	hcon->l2cap_data = conn;
6828 	conn->hcon = hci_conn_get(hcon);
6829 	conn->hchan = hchan;
6830 
6831 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6832 
6833 	conn->mtu = hcon->mtu;
6834 	conn->feat_mask = 0;
6835 
6836 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6837 
6838 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6839 	    (bredr_sc_enabled(hcon->hdev) ||
6840 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6841 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6842 
6843 	mutex_init(&conn->ident_lock);
6844 	mutex_init(&conn->lock);
6845 
6846 	INIT_LIST_HEAD(&conn->chan_l);
6847 	INIT_LIST_HEAD(&conn->users);
6848 
6849 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6850 
6851 	skb_queue_head_init(&conn->pending_rx);
6852 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6853 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6854 
6855 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6856 
6857 	return conn;
6858 }
6859 
6860 static bool is_valid_psm(u16 psm, u8 dst_type)
6861 {
6862 	if (!psm)
6863 		return false;
6864 
6865 	if (bdaddr_type_is_le(dst_type))
6866 		return (psm <= 0x00ff);
6867 
6868 	/* PSM must be odd and lsb of upper byte must be 0 */
6869 	return ((psm & 0x0101) == 0x0001);
6870 }
6871 
6872 struct l2cap_chan_data {
6873 	struct l2cap_chan *chan;
6874 	struct pid *pid;
6875 	int count;
6876 };
6877 
6878 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6879 {
6880 	struct l2cap_chan_data *d = data;
6881 	struct pid *pid;
6882 
6883 	if (chan == d->chan)
6884 		return;
6885 
6886 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6887 		return;
6888 
6889 	pid = chan->ops->get_peer_pid(chan);
6890 
6891 	/* Only count deferred channels with the same PID/PSM */
6892 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6893 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6894 		return;
6895 
6896 	d->count++;
6897 }
6898 
6899 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6900 		       bdaddr_t *dst, u8 dst_type, u16 timeout)
6901 {
6902 	struct l2cap_conn *conn;
6903 	struct hci_conn *hcon;
6904 	struct hci_dev *hdev;
6905 	int err;
6906 
6907 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6908 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
6909 
6910 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
6911 	if (!hdev)
6912 		return -EHOSTUNREACH;
6913 
6914 	hci_dev_lock(hdev);
6915 
6916 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6917 	    chan->chan_type != L2CAP_CHAN_RAW) {
6918 		err = -EINVAL;
6919 		goto done;
6920 	}
6921 
6922 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6923 		err = -EINVAL;
6924 		goto done;
6925 	}
6926 
6927 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6928 		err = -EINVAL;
6929 		goto done;
6930 	}
6931 
6932 	switch (chan->mode) {
6933 	case L2CAP_MODE_BASIC:
6934 		break;
6935 	case L2CAP_MODE_LE_FLOWCTL:
6936 		break;
6937 	case L2CAP_MODE_EXT_FLOWCTL:
6938 		if (!enable_ecred) {
6939 			err = -EOPNOTSUPP;
6940 			goto done;
6941 		}
6942 		break;
6943 	case L2CAP_MODE_ERTM:
6944 	case L2CAP_MODE_STREAMING:
6945 		if (!disable_ertm)
6946 			break;
6947 		fallthrough;
6948 	default:
6949 		err = -EOPNOTSUPP;
6950 		goto done;
6951 	}
6952 
6953 	switch (chan->state) {
6954 	case BT_CONNECT:
6955 	case BT_CONNECT2:
6956 	case BT_CONFIG:
6957 		/* Already connecting */
6958 		err = 0;
6959 		goto done;
6960 
6961 	case BT_CONNECTED:
6962 		/* Already connected */
6963 		err = -EISCONN;
6964 		goto done;
6965 
6966 	case BT_OPEN:
6967 	case BT_BOUND:
6968 		/* Can connect */
6969 		break;
6970 
6971 	default:
6972 		err = -EBADFD;
6973 		goto done;
6974 	}
6975 
6976 	/* Set destination address and psm */
6977 	bacpy(&chan->dst, dst);
6978 	chan->dst_type = dst_type;
6979 
6980 	chan->psm = psm;
6981 	chan->dcid = cid;
6982 
6983 	if (bdaddr_type_is_le(dst_type)) {
6984 		/* Convert from L2CAP channel address type to HCI address type
6985 		 */
6986 		if (dst_type == BDADDR_LE_PUBLIC)
6987 			dst_type = ADDR_LE_DEV_PUBLIC;
6988 		else
6989 			dst_type = ADDR_LE_DEV_RANDOM;
6990 
6991 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6992 			hcon = hci_connect_le(hdev, dst, dst_type, false,
6993 					      chan->sec_level, timeout,
6994 					      HCI_ROLE_SLAVE, 0, 0);
6995 		else
6996 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
6997 						   chan->sec_level, timeout,
6998 						   CONN_REASON_L2CAP_CHAN);
6999 
7000 	} else {
7001 		u8 auth_type = l2cap_get_auth_type(chan);
7002 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7003 				       CONN_REASON_L2CAP_CHAN, timeout);
7004 	}
7005 
7006 	if (IS_ERR(hcon)) {
7007 		err = PTR_ERR(hcon);
7008 		goto done;
7009 	}
7010 
7011 	conn = l2cap_conn_add(hcon);
7012 	if (!conn) {
7013 		hci_conn_drop(hcon);
7014 		err = -ENOMEM;
7015 		goto done;
7016 	}
7017 
7018 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7019 		struct l2cap_chan_data data;
7020 
7021 		data.chan = chan;
7022 		data.pid = chan->ops->get_peer_pid(chan);
7023 		data.count = 1;
7024 
7025 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7026 
7027 		/* Check if there isn't too many channels being connected */
7028 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7029 			hci_conn_drop(hcon);
7030 			err = -EPROTO;
7031 			goto done;
7032 		}
7033 	}
7034 
7035 	mutex_lock(&conn->lock);
7036 	l2cap_chan_lock(chan);
7037 
7038 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7039 		hci_conn_drop(hcon);
7040 		err = -EBUSY;
7041 		goto chan_unlock;
7042 	}
7043 
7044 	/* Update source addr of the socket */
7045 	bacpy(&chan->src, &hcon->src);
7046 	chan->src_type = bdaddr_src_type(hcon);
7047 
7048 	__l2cap_chan_add(conn, chan);
7049 
7050 	/* l2cap_chan_add takes its own ref so we can drop this one */
7051 	hci_conn_drop(hcon);
7052 
7053 	l2cap_state_change(chan, BT_CONNECT);
7054 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7055 
7056 	/* Release chan->sport so that it can be reused by other
7057 	 * sockets (as it's only used for listening sockets).
7058 	 */
7059 	write_lock(&chan_list_lock);
7060 	chan->sport = 0;
7061 	write_unlock(&chan_list_lock);
7062 
7063 	if (hcon->state == BT_CONNECTED) {
7064 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7065 			__clear_chan_timer(chan);
7066 			if (l2cap_chan_check_security(chan, true))
7067 				l2cap_state_change(chan, BT_CONNECTED);
7068 		} else
7069 			l2cap_do_start(chan);
7070 	}
7071 
7072 	err = 0;
7073 
7074 chan_unlock:
7075 	l2cap_chan_unlock(chan);
7076 	mutex_unlock(&conn->lock);
7077 done:
7078 	hci_dev_unlock(hdev);
7079 	hci_dev_put(hdev);
7080 	return err;
7081 }
7082 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7083 
7084 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7085 {
7086 	struct l2cap_conn *conn = chan->conn;
7087 	DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7088 
7089 	pdu->mtu = cpu_to_le16(chan->imtu);
7090 	pdu->mps = cpu_to_le16(chan->mps);
7091 	pdu->scid[0] = cpu_to_le16(chan->scid);
7092 
7093 	chan->ident = l2cap_get_ident(conn);
7094 
7095 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7096 		       sizeof(pdu), &pdu);
7097 }
7098 
7099 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7100 {
7101 	if (chan->imtu > mtu)
7102 		return -EINVAL;
7103 
7104 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7105 
7106 	chan->imtu = mtu;
7107 
7108 	l2cap_ecred_reconfigure(chan);
7109 
7110 	return 0;
7111 }
7112 
7113 /* ---- L2CAP interface with lower layer (HCI) ---- */
7114 
7115 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7116 {
7117 	int exact = 0, lm1 = 0, lm2 = 0;
7118 	struct l2cap_chan *c;
7119 
7120 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7121 
7122 	/* Find listening sockets and check their link_mode */
7123 	read_lock(&chan_list_lock);
7124 	list_for_each_entry(c, &chan_list, global_l) {
7125 		if (c->state != BT_LISTEN)
7126 			continue;
7127 
7128 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7129 			lm1 |= HCI_LM_ACCEPT;
7130 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7131 				lm1 |= HCI_LM_MASTER;
7132 			exact++;
7133 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7134 			lm2 |= HCI_LM_ACCEPT;
7135 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7136 				lm2 |= HCI_LM_MASTER;
7137 		}
7138 	}
7139 	read_unlock(&chan_list_lock);
7140 
7141 	return exact ? lm1 : lm2;
7142 }
7143 
7144 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7145  * from an existing channel in the list or from the beginning of the
7146  * global list (by passing NULL as first parameter).
7147  */
7148 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7149 						  struct hci_conn *hcon)
7150 {
7151 	u8 src_type = bdaddr_src_type(hcon);
7152 
7153 	read_lock(&chan_list_lock);
7154 
7155 	if (c)
7156 		c = list_next_entry(c, global_l);
7157 	else
7158 		c = list_entry(chan_list.next, typeof(*c), global_l);
7159 
7160 	list_for_each_entry_from(c, &chan_list, global_l) {
7161 		if (c->chan_type != L2CAP_CHAN_FIXED)
7162 			continue;
7163 		if (c->state != BT_LISTEN)
7164 			continue;
7165 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7166 			continue;
7167 		if (src_type != c->src_type)
7168 			continue;
7169 
7170 		c = l2cap_chan_hold_unless_zero(c);
7171 		read_unlock(&chan_list_lock);
7172 		return c;
7173 	}
7174 
7175 	read_unlock(&chan_list_lock);
7176 
7177 	return NULL;
7178 }
7179 
7180 static bool l2cap_match(struct hci_conn *hcon)
7181 {
7182 	return hcon->type == ACL_LINK || hcon->type == LE_LINK;
7183 }
7184 
7185 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7186 {
7187 	struct hci_dev *hdev = hcon->hdev;
7188 	struct l2cap_conn *conn;
7189 	struct l2cap_chan *pchan;
7190 	u8 dst_type;
7191 
7192 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7193 
7194 	if (status) {
7195 		l2cap_conn_del(hcon, bt_to_errno(status));
7196 		return;
7197 	}
7198 
7199 	conn = l2cap_conn_add(hcon);
7200 	if (!conn)
7201 		return;
7202 
7203 	dst_type = bdaddr_dst_type(hcon);
7204 
7205 	/* If device is blocked, do not create channels for it */
7206 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7207 		return;
7208 
7209 	/* Find fixed channels and notify them of the new connection. We
7210 	 * use multiple individual lookups, continuing each time where
7211 	 * we left off, because the list lock would prevent calling the
7212 	 * potentially sleeping l2cap_chan_lock() function.
7213 	 */
7214 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7215 	while (pchan) {
7216 		struct l2cap_chan *chan, *next;
7217 
7218 		/* Client fixed channels should override server ones */
7219 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7220 			goto next;
7221 
7222 		l2cap_chan_lock(pchan);
7223 		chan = pchan->ops->new_connection(pchan);
7224 		if (chan) {
7225 			bacpy(&chan->src, &hcon->src);
7226 			bacpy(&chan->dst, &hcon->dst);
7227 			chan->src_type = bdaddr_src_type(hcon);
7228 			chan->dst_type = dst_type;
7229 
7230 			__l2cap_chan_add(conn, chan);
7231 		}
7232 
7233 		l2cap_chan_unlock(pchan);
7234 next:
7235 		next = l2cap_global_fixed_chan(pchan, hcon);
7236 		l2cap_chan_put(pchan);
7237 		pchan = next;
7238 	}
7239 
7240 	l2cap_conn_ready(conn);
7241 }
7242 
7243 int l2cap_disconn_ind(struct hci_conn *hcon)
7244 {
7245 	struct l2cap_conn *conn = hcon->l2cap_data;
7246 
7247 	BT_DBG("hcon %p", hcon);
7248 
7249 	if (!conn)
7250 		return HCI_ERROR_REMOTE_USER_TERM;
7251 	return conn->disc_reason;
7252 }
7253 
7254 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7255 {
7256 	BT_DBG("hcon %p reason %d", hcon, reason);
7257 
7258 	l2cap_conn_del(hcon, bt_to_errno(reason));
7259 }
7260 
7261 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7262 {
7263 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7264 		return;
7265 
7266 	if (encrypt == 0x00) {
7267 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7268 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7269 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7270 			   chan->sec_level == BT_SECURITY_FIPS)
7271 			l2cap_chan_close(chan, ECONNREFUSED);
7272 	} else {
7273 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7274 			__clear_chan_timer(chan);
7275 	}
7276 }
7277 
7278 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7279 {
7280 	struct l2cap_conn *conn = hcon->l2cap_data;
7281 	struct l2cap_chan *chan;
7282 
7283 	if (!conn)
7284 		return;
7285 
7286 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7287 
7288 	mutex_lock(&conn->lock);
7289 
7290 	list_for_each_entry(chan, &conn->chan_l, list) {
7291 		l2cap_chan_lock(chan);
7292 
7293 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7294 		       state_to_string(chan->state));
7295 
7296 		if (!status && encrypt)
7297 			chan->sec_level = hcon->sec_level;
7298 
7299 		if (!__l2cap_no_conn_pending(chan)) {
7300 			l2cap_chan_unlock(chan);
7301 			continue;
7302 		}
7303 
7304 		if (!status && (chan->state == BT_CONNECTED ||
7305 				chan->state == BT_CONFIG)) {
7306 			chan->ops->resume(chan);
7307 			l2cap_check_encryption(chan, encrypt);
7308 			l2cap_chan_unlock(chan);
7309 			continue;
7310 		}
7311 
7312 		if (chan->state == BT_CONNECT) {
7313 			if (!status && l2cap_check_enc_key_size(hcon))
7314 				l2cap_start_connection(chan);
7315 			else
7316 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7317 		} else if (chan->state == BT_CONNECT2 &&
7318 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7319 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7320 			struct l2cap_conn_rsp rsp;
7321 			__u16 res, stat;
7322 
7323 			if (!status && l2cap_check_enc_key_size(hcon)) {
7324 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7325 					res = L2CAP_CR_PEND;
7326 					stat = L2CAP_CS_AUTHOR_PEND;
7327 					chan->ops->defer(chan);
7328 				} else {
7329 					l2cap_state_change(chan, BT_CONFIG);
7330 					res = L2CAP_CR_SUCCESS;
7331 					stat = L2CAP_CS_NO_INFO;
7332 				}
7333 			} else {
7334 				l2cap_state_change(chan, BT_DISCONN);
7335 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7336 				res = L2CAP_CR_SEC_BLOCK;
7337 				stat = L2CAP_CS_NO_INFO;
7338 			}
7339 
7340 			rsp.scid   = cpu_to_le16(chan->dcid);
7341 			rsp.dcid   = cpu_to_le16(chan->scid);
7342 			rsp.result = cpu_to_le16(res);
7343 			rsp.status = cpu_to_le16(stat);
7344 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7345 				       sizeof(rsp), &rsp);
7346 
7347 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7348 			    res == L2CAP_CR_SUCCESS) {
7349 				char buf[128];
7350 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7351 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7352 					       L2CAP_CONF_REQ,
7353 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7354 					       buf);
7355 				chan->num_conf_req++;
7356 			}
7357 		}
7358 
7359 		l2cap_chan_unlock(chan);
7360 	}
7361 
7362 	mutex_unlock(&conn->lock);
7363 }
7364 
7365 /* Append fragment into frame respecting the maximum len of rx_skb */
7366 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7367 			   u16 len)
7368 {
7369 	if (!conn->rx_skb) {
7370 		/* Allocate skb for the complete frame (with header) */
7371 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7372 		if (!conn->rx_skb)
7373 			return -ENOMEM;
7374 		/* Init rx_len */
7375 		conn->rx_len = len;
7376 	}
7377 
7378 	/* Copy as much as the rx_skb can hold */
7379 	len = min_t(u16, len, skb->len);
7380 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7381 	skb_pull(skb, len);
7382 	conn->rx_len -= len;
7383 
7384 	return len;
7385 }
7386 
7387 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7388 {
7389 	struct sk_buff *rx_skb;
7390 	int len;
7391 
7392 	/* Append just enough to complete the header */
7393 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7394 
7395 	/* If header could not be read just continue */
7396 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7397 		return len;
7398 
7399 	rx_skb = conn->rx_skb;
7400 	len = get_unaligned_le16(rx_skb->data);
7401 
7402 	/* Check if rx_skb has enough space to received all fragments */
7403 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7404 		/* Update expected len */
7405 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7406 		return L2CAP_LEN_SIZE;
7407 	}
7408 
7409 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7410 	 * fit all fragments.
7411 	 */
7412 	conn->rx_skb = NULL;
7413 
7414 	/* Reallocates rx_skb using the exact expected length */
7415 	len = l2cap_recv_frag(conn, rx_skb,
7416 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7417 	kfree_skb(rx_skb);
7418 
7419 	return len;
7420 }
7421 
7422 static void l2cap_recv_reset(struct l2cap_conn *conn)
7423 {
7424 	kfree_skb(conn->rx_skb);
7425 	conn->rx_skb = NULL;
7426 	conn->rx_len = 0;
7427 }
7428 
7429 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7430 {
7431 	if (!c)
7432 		return NULL;
7433 
7434 	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7435 
7436 	if (!kref_get_unless_zero(&c->ref))
7437 		return NULL;
7438 
7439 	return c;
7440 }
7441 
7442 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7443 {
7444 	struct l2cap_conn *conn;
7445 	int len;
7446 
7447 	/* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
7448 	hci_dev_lock(hcon->hdev);
7449 
7450 	conn = hcon->l2cap_data;
7451 
7452 	if (!conn)
7453 		conn = l2cap_conn_add(hcon);
7454 
7455 	conn = l2cap_conn_hold_unless_zero(conn);
7456 
7457 	hci_dev_unlock(hcon->hdev);
7458 
7459 	if (!conn) {
7460 		kfree_skb(skb);
7461 		return;
7462 	}
7463 
7464 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7465 
7466 	mutex_lock(&conn->lock);
7467 
7468 	switch (flags) {
7469 	case ACL_START:
7470 	case ACL_START_NO_FLUSH:
7471 	case ACL_COMPLETE:
7472 		if (conn->rx_skb) {
7473 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7474 			l2cap_recv_reset(conn);
7475 			l2cap_conn_unreliable(conn, ECOMM);
7476 		}
7477 
7478 		/* Start fragment may not contain the L2CAP length so just
7479 		 * copy the initial byte when that happens and use conn->mtu as
7480 		 * expected length.
7481 		 */
7482 		if (skb->len < L2CAP_LEN_SIZE) {
7483 			l2cap_recv_frag(conn, skb, conn->mtu);
7484 			break;
7485 		}
7486 
7487 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7488 
7489 		if (len == skb->len) {
7490 			/* Complete frame received */
7491 			l2cap_recv_frame(conn, skb);
7492 			goto unlock;
7493 		}
7494 
7495 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7496 
7497 		if (skb->len > len) {
7498 			BT_ERR("Frame is too long (len %u, expected len %d)",
7499 			       skb->len, len);
7500 			l2cap_conn_unreliable(conn, ECOMM);
7501 			goto drop;
7502 		}
7503 
7504 		/* Append fragment into frame (with header) */
7505 		if (l2cap_recv_frag(conn, skb, len) < 0)
7506 			goto drop;
7507 
7508 		break;
7509 
7510 	case ACL_CONT:
7511 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7512 
7513 		if (!conn->rx_skb) {
7514 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7515 			l2cap_conn_unreliable(conn, ECOMM);
7516 			goto drop;
7517 		}
7518 
7519 		/* Complete the L2CAP length if it has not been read */
7520 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7521 			if (l2cap_recv_len(conn, skb) < 0) {
7522 				l2cap_conn_unreliable(conn, ECOMM);
7523 				goto drop;
7524 			}
7525 
7526 			/* Header still could not be read just continue */
7527 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7528 				break;
7529 		}
7530 
7531 		if (skb->len > conn->rx_len) {
7532 			BT_ERR("Fragment is too long (len %u, expected %u)",
7533 			       skb->len, conn->rx_len);
7534 			l2cap_recv_reset(conn);
7535 			l2cap_conn_unreliable(conn, ECOMM);
7536 			goto drop;
7537 		}
7538 
7539 		/* Append fragment into frame (with header) */
7540 		l2cap_recv_frag(conn, skb, skb->len);
7541 
7542 		if (!conn->rx_len) {
7543 			/* Complete frame received. l2cap_recv_frame
7544 			 * takes ownership of the skb so set the global
7545 			 * rx_skb pointer to NULL first.
7546 			 */
7547 			struct sk_buff *rx_skb = conn->rx_skb;
7548 			conn->rx_skb = NULL;
7549 			l2cap_recv_frame(conn, rx_skb);
7550 		}
7551 		break;
7552 	}
7553 
7554 drop:
7555 	kfree_skb(skb);
7556 unlock:
7557 	mutex_unlock(&conn->lock);
7558 	l2cap_conn_put(conn);
7559 }
7560 
7561 static struct hci_cb l2cap_cb = {
7562 	.name		= "L2CAP",
7563 	.match		= l2cap_match,
7564 	.connect_cfm	= l2cap_connect_cfm,
7565 	.disconn_cfm	= l2cap_disconn_cfm,
7566 	.security_cfm	= l2cap_security_cfm,
7567 };
7568 
7569 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7570 {
7571 	struct l2cap_chan *c;
7572 
7573 	read_lock(&chan_list_lock);
7574 
7575 	list_for_each_entry(c, &chan_list, global_l) {
7576 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7577 			   &c->src, c->src_type, &c->dst, c->dst_type,
7578 			   c->state, __le16_to_cpu(c->psm),
7579 			   c->scid, c->dcid, c->imtu, c->omtu,
7580 			   c->sec_level, c->mode);
7581 	}
7582 
7583 	read_unlock(&chan_list_lock);
7584 
7585 	return 0;
7586 }
7587 
7588 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7589 
7590 static struct dentry *l2cap_debugfs;
7591 
7592 int __init l2cap_init(void)
7593 {
7594 	int err;
7595 
7596 	err = l2cap_init_sockets();
7597 	if (err < 0)
7598 		return err;
7599 
7600 	hci_register_cb(&l2cap_cb);
7601 
7602 	if (IS_ERR_OR_NULL(bt_debugfs))
7603 		return 0;
7604 
7605 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7606 					    NULL, &l2cap_debugfs_fops);
7607 
7608 	return 0;
7609 }
7610 
7611 void l2cap_exit(void)
7612 {
7613 	debugfs_remove(l2cap_debugfs);
7614 	hci_unregister_cb(&l2cap_cb);
7615 	l2cap_cleanup_sockets();
7616 }
7617 
7618 module_param(disable_ertm, bool, 0644);
7619 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7620 
7621 module_param(enable_ecred, bool, 0644);
7622 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7623