xref: /linux/net/bluetooth/l2cap_core.c (revision 6c3ea155e5ee3e56606233acde8309afda66d483)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 
130 	return c;
131 }
132 
133 /* Find channel with given DCID.
134  * Returns a reference locked channel.
135  */
136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 						 u16 cid)
138 {
139 	struct l2cap_chan *c;
140 
141 	c = __l2cap_get_chan_by_dcid(conn, cid);
142 	if (c) {
143 		/* Only lock if chan reference is not 0 */
144 		c = l2cap_chan_hold_unless_zero(c);
145 		if (c)
146 			l2cap_chan_lock(c);
147 	}
148 
149 	return c;
150 }
151 
152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 						    u8 ident)
154 {
155 	struct l2cap_chan *c;
156 
157 	list_for_each_entry(c, &conn->chan_l, list) {
158 		if (c->ident == ident)
159 			return c;
160 	}
161 	return NULL;
162 }
163 
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 						      u8 src_type)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 			continue;
172 
173 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 			continue;
175 
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 							 chan->src_type)) {
214 				chan->psm   = cpu_to_le16(p);
215 				chan->sport = cpu_to_le16(p);
216 				err = 0;
217 				break;
218 			}
219 	}
220 
221 done:
222 	write_unlock(&chan_list_lock);
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 
227 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
228 {
229 	write_lock(&chan_list_lock);
230 
231 	/* Override the defaults (which are for conn-oriented) */
232 	chan->omtu = L2CAP_DEFAULT_MTU;
233 	chan->chan_type = L2CAP_CHAN_FIXED;
234 
235 	chan->scid = scid;
236 
237 	write_unlock(&chan_list_lock);
238 
239 	return 0;
240 }
241 
242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 	u16 cid, dyn_end;
245 
246 	if (conn->hcon->type == LE_LINK)
247 		dyn_end = L2CAP_CID_LE_DYN_END;
248 	else
249 		dyn_end = L2CAP_CID_DYN_END;
250 
251 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 		if (!__l2cap_get_chan_by_scid(conn, cid))
253 			return cid;
254 	}
255 
256 	return 0;
257 }
258 
259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 	       state_to_string(state));
263 
264 	chan->state = state;
265 	chan->ops->state_change(chan, state, 0);
266 }
267 
268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 						int state, int err)
270 {
271 	chan->state = state;
272 	chan->ops->state_change(chan, chan->state, err);
273 }
274 
275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 	chan->ops->state_change(chan, chan->state, err);
278 }
279 
280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 	if (!delayed_work_pending(&chan->monitor_timer) &&
283 	    chan->retrans_timeout) {
284 		l2cap_set_timer(chan, &chan->retrans_timer,
285 				msecs_to_jiffies(chan->retrans_timeout));
286 	}
287 }
288 
289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 	__clear_retrans_timer(chan);
292 	if (chan->monitor_timeout) {
293 		l2cap_set_timer(chan, &chan->monitor_timer,
294 				msecs_to_jiffies(chan->monitor_timeout));
295 	}
296 }
297 
298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 					       u16 seq)
300 {
301 	struct sk_buff *skb;
302 
303 	skb_queue_walk(head, skb) {
304 		if (bt_cb(skb)->l2cap.txseq == seq)
305 			return skb;
306 	}
307 
308 	return NULL;
309 }
310 
311 /* ---- L2CAP sequence number lists ---- */
312 
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314  * SREJ requests that are received and for frames that are to be
315  * retransmitted. These seq_list functions implement a singly-linked
316  * list in an array, where membership in the list can also be checked
317  * in constant time. Items can also be added to the tail of the list
318  * and removed from the head in constant time, without further memory
319  * allocs or frees.
320  */
321 
322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 	size_t alloc_size, i;
325 
326 	/* Allocated size is a power of 2 to map sequence numbers
327 	 * (which may be up to 14 bits) in to a smaller array that is
328 	 * sized for the negotiated ERTM transmit windows.
329 	 */
330 	alloc_size = roundup_pow_of_two(size);
331 
332 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 	if (!seq_list->list)
334 		return -ENOMEM;
335 
336 	seq_list->mask = alloc_size - 1;
337 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 	for (i = 0; i < alloc_size; i++)
340 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341 
342 	return 0;
343 }
344 
345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 	kfree(seq_list->list);
348 }
349 
350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 					   u16 seq)
352 {
353 	/* Constant-time check for list membership */
354 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356 
357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 	u16 seq = seq_list->head;
360 	u16 mask = seq_list->mask;
361 
362 	seq_list->head = seq_list->list[seq & mask];
363 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 	}
369 
370 	return seq;
371 }
372 
373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 	u16 i;
376 
377 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 		return;
379 
380 	for (i = 0; i <= seq_list->mask; i++)
381 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 
383 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386 
387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 	u16 mask = seq_list->mask;
390 
391 	/* All appends happen in constant time */
392 
393 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 		return;
395 
396 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 		seq_list->head = seq;
398 	else
399 		seq_list->list[seq_list->tail & mask] = seq;
400 
401 	seq_list->tail = seq;
402 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404 
405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 					       chan_timer.work);
409 	struct l2cap_conn *conn = chan->conn;
410 	int reason;
411 
412 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 
414 	if (!conn)
415 		return;
416 
417 	mutex_lock(&conn->lock);
418 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 	 * this work. No need to call l2cap_chan_hold(chan) here again.
420 	 */
421 	l2cap_chan_lock(chan);
422 
423 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 		reason = ECONNREFUSED;
425 	else if (chan->state == BT_CONNECT &&
426 		 chan->sec_level != BT_SECURITY_SDP)
427 		reason = ECONNREFUSED;
428 	else
429 		reason = ETIMEDOUT;
430 
431 	l2cap_chan_close(chan, reason);
432 
433 	chan->ops->close(chan);
434 
435 	l2cap_chan_unlock(chan);
436 	l2cap_chan_put(chan);
437 
438 	mutex_unlock(&conn->lock);
439 }
440 
441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 	struct l2cap_chan *chan;
444 
445 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 	if (!chan)
447 		return NULL;
448 
449 	skb_queue_head_init(&chan->tx_q);
450 	skb_queue_head_init(&chan->srej_q);
451 	mutex_init(&chan->lock);
452 
453 	/* Set default lock nesting level */
454 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 
456 	/* Available receive buffer space is initially unknown */
457 	chan->rx_avail = -1;
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467 
468 	chan->state = BT_OPEN;
469 
470 	kref_init(&chan->kref);
471 
472 	/* This flag is cleared in l2cap_chan_ready() */
473 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 
475 	BT_DBG("chan %p", chan);
476 
477 	return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 
481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 
485 	BT_DBG("chan %p", chan);
486 
487 	write_lock(&chan_list_lock);
488 	list_del(&chan->global_l);
489 	write_unlock(&chan_list_lock);
490 
491 	kfree(chan);
492 }
493 
494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497 
498 	kref_get(&c->kref);
499 }
500 EXPORT_SYMBOL_GPL(l2cap_chan_hold);
501 
502 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
503 {
504 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505 
506 	if (!kref_get_unless_zero(&c->kref))
507 		return NULL;
508 
509 	return c;
510 }
511 
512 void l2cap_chan_put(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_put(&c->kref, l2cap_chan_destroy);
517 }
518 EXPORT_SYMBOL_GPL(l2cap_chan_put);
519 
520 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
521 {
522 	chan->fcs  = L2CAP_FCS_CRC16;
523 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526 	chan->remote_max_tx = chan->max_tx;
527 	chan->remote_tx_win = chan->tx_win;
528 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529 	chan->sec_level = BT_SECURITY_LOW;
530 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
533 
534 	chan->conf_state = 0;
535 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
536 
537 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
538 }
539 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
540 
541 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
542 {
543 	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
544 
545 	if (chan->mps == 0)
546 		return 0;
547 
548 	/* If we don't know the available space in the receiver buffer, give
549 	 * enough credits for a full packet.
550 	 */
551 	if (chan->rx_avail == -1)
552 		return (chan->imtu / chan->mps) + 1;
553 
554 	/* If we know how much space is available in the receive buffer, give
555 	 * out as many credits as would fill the buffer.
556 	 */
557 	if (chan->rx_avail <= sdu_len)
558 		return 0;
559 
560 	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
561 }
562 
563 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
564 {
565 	chan->sdu = NULL;
566 	chan->sdu_last_frag = NULL;
567 	chan->sdu_len = 0;
568 	chan->tx_credits = tx_credits;
569 	/* Derive MPS from connection MTU to stop HCI fragmentation */
570 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
571 	chan->rx_credits = l2cap_le_rx_credits(chan);
572 
573 	skb_queue_head_init(&chan->tx_q);
574 }
575 
576 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
577 {
578 	l2cap_le_flowctl_init(chan, tx_credits);
579 
580 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
581 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
582 		chan->mps = L2CAP_ECRED_MIN_MPS;
583 		chan->rx_credits = l2cap_le_rx_credits(chan);
584 	}
585 }
586 
587 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588 {
589 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
590 	       __le16_to_cpu(chan->psm), chan->dcid);
591 
592 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
593 
594 	chan->conn = conn;
595 
596 	switch (chan->chan_type) {
597 	case L2CAP_CHAN_CONN_ORIENTED:
598 		/* Alloc CID for connection-oriented socket */
599 		chan->scid = l2cap_alloc_cid(conn);
600 		if (conn->hcon->type == ACL_LINK)
601 			chan->omtu = L2CAP_DEFAULT_MTU;
602 		break;
603 
604 	case L2CAP_CHAN_CONN_LESS:
605 		/* Connectionless socket */
606 		chan->scid = L2CAP_CID_CONN_LESS;
607 		chan->dcid = L2CAP_CID_CONN_LESS;
608 		chan->omtu = L2CAP_DEFAULT_MTU;
609 		break;
610 
611 	case L2CAP_CHAN_FIXED:
612 		/* Caller will set CID and CID specific MTU values */
613 		break;
614 
615 	default:
616 		/* Raw socket can send/recv signalling messages only */
617 		chan->scid = L2CAP_CID_SIGNALING;
618 		chan->dcid = L2CAP_CID_SIGNALING;
619 		chan->omtu = L2CAP_DEFAULT_MTU;
620 	}
621 
622 	chan->local_id		= L2CAP_BESTEFFORT_ID;
623 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
624 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
625 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
626 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
627 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
628 
629 	l2cap_chan_hold(chan);
630 
631 	/* Only keep a reference for fixed channels if they requested it */
632 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
633 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
634 		hci_conn_hold(conn->hcon);
635 
636 	/* Append to the list since the order matters for ECRED */
637 	list_add_tail(&chan->list, &conn->chan_l);
638 }
639 
640 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
641 {
642 	mutex_lock(&conn->lock);
643 	__l2cap_chan_add(conn, chan);
644 	mutex_unlock(&conn->lock);
645 }
646 
647 void l2cap_chan_del(struct l2cap_chan *chan, int err)
648 {
649 	struct l2cap_conn *conn = chan->conn;
650 
651 	__clear_chan_timer(chan);
652 
653 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
654 	       state_to_string(chan->state));
655 
656 	chan->ops->teardown(chan, err);
657 
658 	if (conn) {
659 		/* Delete from channel list */
660 		list_del(&chan->list);
661 
662 		l2cap_chan_put(chan);
663 
664 		chan->conn = NULL;
665 
666 		/* Reference was only held for non-fixed channels or
667 		 * fixed channels that explicitly requested it using the
668 		 * FLAG_HOLD_HCI_CONN flag.
669 		 */
670 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
671 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
672 			hci_conn_drop(conn->hcon);
673 	}
674 
675 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 		return;
677 
678 	switch (chan->mode) {
679 	case L2CAP_MODE_BASIC:
680 		break;
681 
682 	case L2CAP_MODE_LE_FLOWCTL:
683 	case L2CAP_MODE_EXT_FLOWCTL:
684 		skb_queue_purge(&chan->tx_q);
685 		break;
686 
687 	case L2CAP_MODE_ERTM:
688 		__clear_retrans_timer(chan);
689 		__clear_monitor_timer(chan);
690 		__clear_ack_timer(chan);
691 
692 		skb_queue_purge(&chan->srej_q);
693 
694 		l2cap_seq_list_free(&chan->srej_list);
695 		l2cap_seq_list_free(&chan->retrans_list);
696 		fallthrough;
697 
698 	case L2CAP_MODE_STREAMING:
699 		skb_queue_purge(&chan->tx_q);
700 		break;
701 	}
702 }
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
704 
705 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
706 				 l2cap_chan_func_t func, void *data)
707 {
708 	struct l2cap_chan *chan, *l;
709 
710 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
711 		if (chan->ident == id)
712 			func(chan, data);
713 	}
714 }
715 
716 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
717 			      void *data)
718 {
719 	struct l2cap_chan *chan;
720 
721 	list_for_each_entry(chan, &conn->chan_l, list) {
722 		func(chan, data);
723 	}
724 }
725 
726 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
727 		     void *data)
728 {
729 	if (!conn)
730 		return;
731 
732 	mutex_lock(&conn->lock);
733 	__l2cap_chan_list(conn, func, data);
734 	mutex_unlock(&conn->lock);
735 }
736 
737 EXPORT_SYMBOL_GPL(l2cap_chan_list);
738 
739 static void l2cap_conn_update_id_addr(struct work_struct *work)
740 {
741 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
742 					       id_addr_timer.work);
743 	struct hci_conn *hcon = conn->hcon;
744 	struct l2cap_chan *chan;
745 
746 	mutex_lock(&conn->lock);
747 
748 	list_for_each_entry(chan, &conn->chan_l, list) {
749 		l2cap_chan_lock(chan);
750 		bacpy(&chan->dst, &hcon->dst);
751 		chan->dst_type = bdaddr_dst_type(hcon);
752 		l2cap_chan_unlock(chan);
753 	}
754 
755 	mutex_unlock(&conn->lock);
756 }
757 
758 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
759 {
760 	struct l2cap_conn *conn = chan->conn;
761 	struct l2cap_le_conn_rsp rsp;
762 	u16 result;
763 
764 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
765 		result = L2CAP_CR_LE_AUTHORIZATION;
766 	else
767 		result = L2CAP_CR_LE_BAD_PSM;
768 
769 	l2cap_state_change(chan, BT_DISCONN);
770 
771 	rsp.dcid    = cpu_to_le16(chan->scid);
772 	rsp.mtu     = cpu_to_le16(chan->imtu);
773 	rsp.mps     = cpu_to_le16(chan->mps);
774 	rsp.credits = cpu_to_le16(chan->rx_credits);
775 	rsp.result  = cpu_to_le16(result);
776 
777 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
778 		       &rsp);
779 }
780 
781 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
782 {
783 	l2cap_state_change(chan, BT_DISCONN);
784 
785 	__l2cap_ecred_conn_rsp_defer(chan);
786 }
787 
788 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
789 {
790 	struct l2cap_conn *conn = chan->conn;
791 	struct l2cap_conn_rsp rsp;
792 	u16 result;
793 
794 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
795 		result = L2CAP_CR_SEC_BLOCK;
796 	else
797 		result = L2CAP_CR_BAD_PSM;
798 
799 	l2cap_state_change(chan, BT_DISCONN);
800 
801 	rsp.scid   = cpu_to_le16(chan->dcid);
802 	rsp.dcid   = cpu_to_le16(chan->scid);
803 	rsp.result = cpu_to_le16(result);
804 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 
806 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
807 }
808 
809 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
810 {
811 	struct l2cap_conn *conn = chan->conn;
812 
813 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
814 
815 	switch (chan->state) {
816 	case BT_LISTEN:
817 		chan->ops->teardown(chan, 0);
818 		break;
819 
820 	case BT_CONNECTED:
821 	case BT_CONFIG:
822 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
823 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
824 			l2cap_send_disconn_req(chan, reason);
825 		} else
826 			l2cap_chan_del(chan, reason);
827 		break;
828 
829 	case BT_CONNECT2:
830 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
831 			if (conn->hcon->type == ACL_LINK)
832 				l2cap_chan_connect_reject(chan);
833 			else if (conn->hcon->type == LE_LINK) {
834 				switch (chan->mode) {
835 				case L2CAP_MODE_LE_FLOWCTL:
836 					l2cap_chan_le_connect_reject(chan);
837 					break;
838 				case L2CAP_MODE_EXT_FLOWCTL:
839 					l2cap_chan_ecred_connect_reject(chan);
840 					return;
841 				}
842 			}
843 		}
844 
845 		l2cap_chan_del(chan, reason);
846 		break;
847 
848 	case BT_CONNECT:
849 	case BT_DISCONN:
850 		l2cap_chan_del(chan, reason);
851 		break;
852 
853 	default:
854 		chan->ops->teardown(chan, 0);
855 		break;
856 	}
857 }
858 EXPORT_SYMBOL(l2cap_chan_close);
859 
860 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
861 {
862 	switch (chan->chan_type) {
863 	case L2CAP_CHAN_RAW:
864 		switch (chan->sec_level) {
865 		case BT_SECURITY_HIGH:
866 		case BT_SECURITY_FIPS:
867 			return HCI_AT_DEDICATED_BONDING_MITM;
868 		case BT_SECURITY_MEDIUM:
869 			return HCI_AT_DEDICATED_BONDING;
870 		default:
871 			return HCI_AT_NO_BONDING;
872 		}
873 		break;
874 	case L2CAP_CHAN_CONN_LESS:
875 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
876 			if (chan->sec_level == BT_SECURITY_LOW)
877 				chan->sec_level = BT_SECURITY_SDP;
878 		}
879 		if (chan->sec_level == BT_SECURITY_HIGH ||
880 		    chan->sec_level == BT_SECURITY_FIPS)
881 			return HCI_AT_NO_BONDING_MITM;
882 		else
883 			return HCI_AT_NO_BONDING;
884 		break;
885 	case L2CAP_CHAN_CONN_ORIENTED:
886 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
887 			if (chan->sec_level == BT_SECURITY_LOW)
888 				chan->sec_level = BT_SECURITY_SDP;
889 
890 			if (chan->sec_level == BT_SECURITY_HIGH ||
891 			    chan->sec_level == BT_SECURITY_FIPS)
892 				return HCI_AT_NO_BONDING_MITM;
893 			else
894 				return HCI_AT_NO_BONDING;
895 		}
896 		fallthrough;
897 
898 	default:
899 		switch (chan->sec_level) {
900 		case BT_SECURITY_HIGH:
901 		case BT_SECURITY_FIPS:
902 			return HCI_AT_GENERAL_BONDING_MITM;
903 		case BT_SECURITY_MEDIUM:
904 			return HCI_AT_GENERAL_BONDING;
905 		default:
906 			return HCI_AT_NO_BONDING;
907 		}
908 		break;
909 	}
910 }
911 
912 /* Service level security */
913 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
914 {
915 	struct l2cap_conn *conn = chan->conn;
916 	__u8 auth_type;
917 
918 	if (conn->hcon->type == LE_LINK)
919 		return smp_conn_security(conn->hcon, chan->sec_level);
920 
921 	auth_type = l2cap_get_auth_type(chan);
922 
923 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
924 				 initiator);
925 }
926 
927 static int l2cap_get_ident(struct l2cap_conn *conn)
928 {
929 	/* LE link does not support tools like l2ping so use the full range */
930 	if (conn->hcon->type == LE_LINK)
931 		return ida_alloc_range(&conn->tx_ida, 1, 255, GFP_ATOMIC);
932 
933 	/* Get next available identificator.
934 	 *    1 - 128 are used by kernel.
935 	 *  129 - 199 are reserved.
936 	 *  200 - 254 are used by utilities like l2ping, etc.
937 	 */
938 	return ida_alloc_range(&conn->tx_ida, 1, 128, GFP_ATOMIC);
939 }
940 
941 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
942 			   u8 flags)
943 {
944 	/* Check if the hcon still valid before attempting to send */
945 	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
946 		hci_send_acl(conn->hchan, skb, flags);
947 	else
948 		kfree_skb(skb);
949 }
950 
951 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
952 			   void *data)
953 {
954 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
955 	u8 flags;
956 
957 	BT_DBG("code 0x%2.2x", code);
958 
959 	if (!skb)
960 		return;
961 
962 	/* Use NO_FLUSH if supported or we have an LE link (which does
963 	 * not support auto-flushing packets) */
964 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
965 	    conn->hcon->type == LE_LINK)
966 		flags = ACL_START_NO_FLUSH;
967 	else
968 		flags = ACL_START;
969 
970 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
971 	skb->priority = HCI_PRIO_MAX;
972 
973 	l2cap_send_acl(conn, skb, flags);
974 }
975 
976 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
977 {
978 	struct hci_conn *hcon = chan->conn->hcon;
979 	u16 flags;
980 
981 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
982 	       skb->priority);
983 
984 	/* Use NO_FLUSH for LE links (where this is the only option) or
985 	 * if the BR/EDR link supports it and flushing has not been
986 	 * explicitly requested (through FLAG_FLUSHABLE).
987 	 */
988 	if (hcon->type == LE_LINK ||
989 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
990 	     lmp_no_flush_capable(hcon->hdev)))
991 		flags = ACL_START_NO_FLUSH;
992 	else
993 		flags = ACL_START;
994 
995 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
996 	hci_send_acl(chan->conn->hchan, skb, flags);
997 }
998 
999 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1000 {
1001 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1002 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1003 
1004 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1005 		/* S-Frame */
1006 		control->sframe = 1;
1007 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1008 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1009 
1010 		control->sar = 0;
1011 		control->txseq = 0;
1012 	} else {
1013 		/* I-Frame */
1014 		control->sframe = 0;
1015 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1016 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1017 
1018 		control->poll = 0;
1019 		control->super = 0;
1020 	}
1021 }
1022 
1023 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1024 {
1025 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1026 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1027 
1028 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1029 		/* S-Frame */
1030 		control->sframe = 1;
1031 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1032 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1033 
1034 		control->sar = 0;
1035 		control->txseq = 0;
1036 	} else {
1037 		/* I-Frame */
1038 		control->sframe = 0;
1039 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1040 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1041 
1042 		control->poll = 0;
1043 		control->super = 0;
1044 	}
1045 }
1046 
1047 static inline void __unpack_control(struct l2cap_chan *chan,
1048 				    struct sk_buff *skb)
1049 {
1050 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1051 		__unpack_extended_control(get_unaligned_le32(skb->data),
1052 					  &bt_cb(skb)->l2cap);
1053 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1054 	} else {
1055 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1056 					  &bt_cb(skb)->l2cap);
1057 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1058 	}
1059 }
1060 
1061 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1062 {
1063 	u32 packed;
1064 
1065 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1066 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1067 
1068 	if (control->sframe) {
1069 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1070 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1071 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1072 	} else {
1073 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1074 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1075 	}
1076 
1077 	return packed;
1078 }
1079 
1080 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1081 {
1082 	u16 packed;
1083 
1084 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1085 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1086 
1087 	if (control->sframe) {
1088 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1089 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1090 		packed |= L2CAP_CTRL_FRAME_TYPE;
1091 	} else {
1092 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1093 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1094 	}
1095 
1096 	return packed;
1097 }
1098 
1099 static inline void __pack_control(struct l2cap_chan *chan,
1100 				  struct l2cap_ctrl *control,
1101 				  struct sk_buff *skb)
1102 {
1103 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1104 		put_unaligned_le32(__pack_extended_control(control),
1105 				   skb->data + L2CAP_HDR_SIZE);
1106 	} else {
1107 		put_unaligned_le16(__pack_enhanced_control(control),
1108 				   skb->data + L2CAP_HDR_SIZE);
1109 	}
1110 }
1111 
1112 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1113 {
1114 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1115 		return L2CAP_EXT_HDR_SIZE;
1116 	else
1117 		return L2CAP_ENH_HDR_SIZE;
1118 }
1119 
1120 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1121 					       u32 control)
1122 {
1123 	struct sk_buff *skb;
1124 	struct l2cap_hdr *lh;
1125 	int hlen = __ertm_hdr_size(chan);
1126 
1127 	if (chan->fcs == L2CAP_FCS_CRC16)
1128 		hlen += L2CAP_FCS_SIZE;
1129 
1130 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1131 
1132 	if (!skb)
1133 		return ERR_PTR(-ENOMEM);
1134 
1135 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1136 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1137 	lh->cid = cpu_to_le16(chan->dcid);
1138 
1139 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1140 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1141 	else
1142 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1143 
1144 	if (chan->fcs == L2CAP_FCS_CRC16) {
1145 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1146 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1147 	}
1148 
1149 	skb->priority = HCI_PRIO_MAX;
1150 	return skb;
1151 }
1152 
1153 static void l2cap_send_sframe(struct l2cap_chan *chan,
1154 			      struct l2cap_ctrl *control)
1155 {
1156 	struct sk_buff *skb;
1157 	u32 control_field;
1158 
1159 	BT_DBG("chan %p, control %p", chan, control);
1160 
1161 	if (!control->sframe)
1162 		return;
1163 
1164 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1165 	    !control->poll)
1166 		control->final = 1;
1167 
1168 	if (control->super == L2CAP_SUPER_RR)
1169 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1170 	else if (control->super == L2CAP_SUPER_RNR)
1171 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1172 
1173 	if (control->super != L2CAP_SUPER_SREJ) {
1174 		chan->last_acked_seq = control->reqseq;
1175 		__clear_ack_timer(chan);
1176 	}
1177 
1178 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1179 	       control->final, control->poll, control->super);
1180 
1181 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1182 		control_field = __pack_extended_control(control);
1183 	else
1184 		control_field = __pack_enhanced_control(control);
1185 
1186 	skb = l2cap_create_sframe_pdu(chan, control_field);
1187 	if (!IS_ERR(skb))
1188 		l2cap_do_send(chan, skb);
1189 }
1190 
1191 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1192 {
1193 	struct l2cap_ctrl control;
1194 
1195 	BT_DBG("chan %p, poll %d", chan, poll);
1196 
1197 	memset(&control, 0, sizeof(control));
1198 	control.sframe = 1;
1199 	control.poll = poll;
1200 
1201 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1202 		control.super = L2CAP_SUPER_RNR;
1203 	else
1204 		control.super = L2CAP_SUPER_RR;
1205 
1206 	control.reqseq = chan->buffer_seq;
1207 	l2cap_send_sframe(chan, &control);
1208 }
1209 
1210 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1211 {
1212 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1213 		return true;
1214 
1215 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1216 }
1217 
1218 void l2cap_send_conn_req(struct l2cap_chan *chan)
1219 {
1220 	struct l2cap_conn *conn = chan->conn;
1221 	struct l2cap_conn_req req;
1222 
1223 	req.scid = cpu_to_le16(chan->scid);
1224 	req.psm  = chan->psm;
1225 
1226 	chan->ident = l2cap_get_ident(conn);
1227 
1228 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1229 
1230 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1231 }
1232 
1233 static void l2cap_chan_ready(struct l2cap_chan *chan)
1234 {
1235 	/* The channel may have already been flagged as connected in
1236 	 * case of receiving data before the L2CAP info req/rsp
1237 	 * procedure is complete.
1238 	 */
1239 	if (chan->state == BT_CONNECTED)
1240 		return;
1241 
1242 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1243 	chan->conf_state = 0;
1244 	__clear_chan_timer(chan);
1245 
1246 	switch (chan->mode) {
1247 	case L2CAP_MODE_LE_FLOWCTL:
1248 	case L2CAP_MODE_EXT_FLOWCTL:
1249 		if (!chan->tx_credits)
1250 			chan->ops->suspend(chan);
1251 		break;
1252 	}
1253 
1254 	chan->state = BT_CONNECTED;
1255 
1256 	chan->ops->ready(chan);
1257 }
1258 
1259 static void l2cap_le_connect(struct l2cap_chan *chan)
1260 {
1261 	struct l2cap_conn *conn = chan->conn;
1262 	struct l2cap_le_conn_req req;
1263 
1264 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1265 		return;
1266 
1267 	if (!chan->imtu)
1268 		chan->imtu = chan->conn->mtu;
1269 
1270 	l2cap_le_flowctl_init(chan, 0);
1271 
1272 	memset(&req, 0, sizeof(req));
1273 	req.psm     = chan->psm;
1274 	req.scid    = cpu_to_le16(chan->scid);
1275 	req.mtu     = cpu_to_le16(chan->imtu);
1276 	req.mps     = cpu_to_le16(chan->mps);
1277 	req.credits = cpu_to_le16(chan->rx_credits);
1278 
1279 	chan->ident = l2cap_get_ident(conn);
1280 
1281 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1282 		       sizeof(req), &req);
1283 }
1284 
1285 struct l2cap_ecred_conn_data {
1286 	struct {
1287 		struct l2cap_ecred_conn_req_hdr req;
1288 		__le16 scid[5];
1289 	} __packed pdu;
1290 	struct l2cap_chan *chan;
1291 	struct pid *pid;
1292 	int count;
1293 };
1294 
1295 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1296 {
1297 	struct l2cap_ecred_conn_data *conn = data;
1298 	struct pid *pid;
1299 
1300 	if (chan == conn->chan)
1301 		return;
1302 
1303 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1304 		return;
1305 
1306 	pid = chan->ops->get_peer_pid(chan);
1307 
1308 	/* Only add deferred channels with the same PID/PSM */
1309 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1310 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1311 		return;
1312 
1313 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1314 		return;
1315 
1316 	l2cap_ecred_init(chan, 0);
1317 
1318 	/* Set the same ident so we can match on the rsp */
1319 	chan->ident = conn->chan->ident;
1320 
1321 	/* Include all channels deferred */
1322 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1323 
1324 	conn->count++;
1325 }
1326 
1327 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1328 {
1329 	struct l2cap_conn *conn = chan->conn;
1330 	struct l2cap_ecred_conn_data data;
1331 
1332 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1333 		return;
1334 
1335 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1336 		return;
1337 
1338 	l2cap_ecred_init(chan, 0);
1339 
1340 	memset(&data, 0, sizeof(data));
1341 	data.pdu.req.psm     = chan->psm;
1342 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1343 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1344 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1345 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1346 
1347 	chan->ident = l2cap_get_ident(conn);
1348 
1349 	data.count = 1;
1350 	data.chan = chan;
1351 	data.pid = chan->ops->get_peer_pid(chan);
1352 
1353 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1354 
1355 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1356 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1357 		       &data.pdu);
1358 }
1359 
1360 static void l2cap_le_start(struct l2cap_chan *chan)
1361 {
1362 	struct l2cap_conn *conn = chan->conn;
1363 
1364 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1365 		return;
1366 
1367 	if (!chan->psm) {
1368 		l2cap_chan_ready(chan);
1369 		return;
1370 	}
1371 
1372 	if (chan->state == BT_CONNECT) {
1373 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1374 			l2cap_ecred_connect(chan);
1375 		else
1376 			l2cap_le_connect(chan);
1377 	}
1378 }
1379 
1380 static void l2cap_start_connection(struct l2cap_chan *chan)
1381 {
1382 	if (chan->conn->hcon->type == LE_LINK) {
1383 		l2cap_le_start(chan);
1384 	} else {
1385 		l2cap_send_conn_req(chan);
1386 	}
1387 }
1388 
1389 static void l2cap_request_info(struct l2cap_conn *conn)
1390 {
1391 	struct l2cap_info_req req;
1392 
1393 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1394 		return;
1395 
1396 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1397 
1398 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1399 	conn->info_ident = l2cap_get_ident(conn);
1400 
1401 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1402 
1403 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1404 		       sizeof(req), &req);
1405 }
1406 
1407 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1408 				     struct l2cap_chan *chan)
1409 {
1410 	/* The minimum encryption key size needs to be enforced by the
1411 	 * host stack before establishing any L2CAP connections. The
1412 	 * specification in theory allows a minimum of 1, but to align
1413 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1414 	 *
1415 	 * This check might also be called for unencrypted connections
1416 	 * that have no key size requirements. Ensure that the link is
1417 	 * actually encrypted before enforcing a key size.
1418 	 */
1419 	int min_key_size = hcon->hdev->min_enc_key_size;
1420 
1421 	/* On FIPS security level, key size must be 16 bytes */
1422 	if (chan->sec_level == BT_SECURITY_FIPS)
1423 		min_key_size = 16;
1424 
1425 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1426 		hcon->enc_key_size >= min_key_size);
1427 }
1428 
1429 static void l2cap_do_start(struct l2cap_chan *chan)
1430 {
1431 	struct l2cap_conn *conn = chan->conn;
1432 
1433 	if (conn->hcon->type == LE_LINK) {
1434 		l2cap_le_start(chan);
1435 		return;
1436 	}
1437 
1438 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1439 		l2cap_request_info(conn);
1440 		return;
1441 	}
1442 
1443 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1444 		return;
1445 
1446 	if (!l2cap_chan_check_security(chan, true) ||
1447 	    !__l2cap_no_conn_pending(chan))
1448 		return;
1449 
1450 	if (l2cap_check_enc_key_size(conn->hcon, chan))
1451 		l2cap_start_connection(chan);
1452 	else
1453 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1454 }
1455 
1456 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1457 {
1458 	u32 local_feat_mask = l2cap_feat_mask;
1459 	if (!disable_ertm)
1460 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1461 
1462 	switch (mode) {
1463 	case L2CAP_MODE_ERTM:
1464 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1465 	case L2CAP_MODE_STREAMING:
1466 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1467 	default:
1468 		return 0x00;
1469 	}
1470 }
1471 
1472 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1473 {
1474 	struct l2cap_conn *conn = chan->conn;
1475 	struct l2cap_disconn_req req;
1476 
1477 	if (!conn)
1478 		return;
1479 
1480 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1481 		__clear_retrans_timer(chan);
1482 		__clear_monitor_timer(chan);
1483 		__clear_ack_timer(chan);
1484 	}
1485 
1486 	req.dcid = cpu_to_le16(chan->dcid);
1487 	req.scid = cpu_to_le16(chan->scid);
1488 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1489 		       sizeof(req), &req);
1490 
1491 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1492 }
1493 
1494 /* ---- L2CAP connections ---- */
1495 static void l2cap_conn_start(struct l2cap_conn *conn)
1496 {
1497 	struct l2cap_chan *chan, *tmp;
1498 
1499 	BT_DBG("conn %p", conn);
1500 
1501 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1502 		l2cap_chan_lock(chan);
1503 
1504 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1505 			l2cap_chan_ready(chan);
1506 			l2cap_chan_unlock(chan);
1507 			continue;
1508 		}
1509 
1510 		if (chan->state == BT_CONNECT) {
1511 			if (!l2cap_chan_check_security(chan, true) ||
1512 			    !__l2cap_no_conn_pending(chan)) {
1513 				l2cap_chan_unlock(chan);
1514 				continue;
1515 			}
1516 
1517 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1518 			    && test_bit(CONF_STATE2_DEVICE,
1519 					&chan->conf_state)) {
1520 				l2cap_chan_close(chan, ECONNRESET);
1521 				l2cap_chan_unlock(chan);
1522 				continue;
1523 			}
1524 
1525 			if (l2cap_check_enc_key_size(conn->hcon, chan))
1526 				l2cap_start_connection(chan);
1527 			else
1528 				l2cap_chan_close(chan, ECONNREFUSED);
1529 
1530 		} else if (chan->state == BT_CONNECT2) {
1531 			struct l2cap_conn_rsp rsp;
1532 			char buf[128];
1533 			rsp.scid = cpu_to_le16(chan->dcid);
1534 			rsp.dcid = cpu_to_le16(chan->scid);
1535 
1536 			if (l2cap_chan_check_security(chan, false)) {
1537 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1538 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1539 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1540 					chan->ops->defer(chan);
1541 
1542 				} else {
1543 					l2cap_state_change(chan, BT_CONFIG);
1544 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1545 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1546 				}
1547 			} else {
1548 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1549 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1550 			}
1551 
1552 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1553 				       sizeof(rsp), &rsp);
1554 
1555 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1556 			    rsp.result != L2CAP_CR_SUCCESS) {
1557 				l2cap_chan_unlock(chan);
1558 				continue;
1559 			}
1560 
1561 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1562 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1563 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1564 			chan->num_conf_req++;
1565 		}
1566 
1567 		l2cap_chan_unlock(chan);
1568 	}
1569 }
1570 
1571 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1572 {
1573 	struct hci_conn *hcon = conn->hcon;
1574 	struct hci_dev *hdev = hcon->hdev;
1575 
1576 	BT_DBG("%s conn %p", hdev->name, conn);
1577 
1578 	/* For outgoing pairing which doesn't necessarily have an
1579 	 * associated socket (e.g. mgmt_pair_device).
1580 	 */
1581 	if (hcon->out)
1582 		smp_conn_security(hcon, hcon->pending_sec_level);
1583 
1584 	/* For LE peripheral connections, make sure the connection interval
1585 	 * is in the range of the minimum and maximum interval that has
1586 	 * been configured for this connection. If not, then trigger
1587 	 * the connection update procedure.
1588 	 */
1589 	if (hcon->role == HCI_ROLE_SLAVE &&
1590 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1591 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1592 		struct l2cap_conn_param_update_req req;
1593 
1594 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1595 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1596 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1597 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1598 
1599 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1600 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1601 	}
1602 }
1603 
1604 static void l2cap_conn_ready(struct l2cap_conn *conn)
1605 {
1606 	struct l2cap_chan *chan;
1607 	struct hci_conn *hcon = conn->hcon;
1608 
1609 	BT_DBG("conn %p", conn);
1610 
1611 	if (hcon->type == ACL_LINK)
1612 		l2cap_request_info(conn);
1613 
1614 	mutex_lock(&conn->lock);
1615 
1616 	list_for_each_entry(chan, &conn->chan_l, list) {
1617 
1618 		l2cap_chan_lock(chan);
1619 
1620 		if (hcon->type == LE_LINK) {
1621 			l2cap_le_start(chan);
1622 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1623 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1624 				l2cap_chan_ready(chan);
1625 		} else if (chan->state == BT_CONNECT) {
1626 			l2cap_do_start(chan);
1627 		}
1628 
1629 		l2cap_chan_unlock(chan);
1630 	}
1631 
1632 	mutex_unlock(&conn->lock);
1633 
1634 	if (hcon->type == LE_LINK)
1635 		l2cap_le_conn_ready(conn);
1636 
1637 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1638 }
1639 
1640 /* Notify sockets that we cannot guaranty reliability anymore */
1641 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1642 {
1643 	struct l2cap_chan *chan;
1644 
1645 	BT_DBG("conn %p", conn);
1646 
1647 	list_for_each_entry(chan, &conn->chan_l, list) {
1648 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1649 			l2cap_chan_set_err(chan, err);
1650 	}
1651 }
1652 
1653 static void l2cap_info_timeout(struct work_struct *work)
1654 {
1655 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1656 					       info_timer.work);
1657 
1658 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1659 	conn->info_ident = 0;
1660 
1661 	mutex_lock(&conn->lock);
1662 	l2cap_conn_start(conn);
1663 	mutex_unlock(&conn->lock);
1664 }
1665 
1666 /*
1667  * l2cap_user
1668  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1669  * callback is called during registration. The ->remove callback is called
1670  * during unregistration.
1671  * An l2cap_user object can either be explicitly unregistered or when the
1672  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1673  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1674  * External modules must own a reference to the l2cap_conn object if they intend
1675  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1676  * any time if they don't.
1677  */
1678 
1679 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1680 {
1681 	struct hci_dev *hdev = conn->hcon->hdev;
1682 	int ret;
1683 
1684 	/* We need to check whether l2cap_conn is registered. If it is not, we
1685 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1686 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1687 	 * relies on the parent hci_conn object to be locked. This itself relies
1688 	 * on the hci_dev object to be locked. So we must lock the hci device
1689 	 * here, too. */
1690 
1691 	hci_dev_lock(hdev);
1692 
1693 	if (!list_empty(&user->list)) {
1694 		ret = -EINVAL;
1695 		goto out_unlock;
1696 	}
1697 
1698 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1699 	if (!conn->hchan) {
1700 		ret = -ENODEV;
1701 		goto out_unlock;
1702 	}
1703 
1704 	ret = user->probe(conn, user);
1705 	if (ret)
1706 		goto out_unlock;
1707 
1708 	list_add(&user->list, &conn->users);
1709 	ret = 0;
1710 
1711 out_unlock:
1712 	hci_dev_unlock(hdev);
1713 	return ret;
1714 }
1715 EXPORT_SYMBOL(l2cap_register_user);
1716 
1717 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1718 {
1719 	struct hci_dev *hdev = conn->hcon->hdev;
1720 
1721 	hci_dev_lock(hdev);
1722 
1723 	if (list_empty(&user->list))
1724 		goto out_unlock;
1725 
1726 	list_del_init(&user->list);
1727 	user->remove(conn, user);
1728 
1729 out_unlock:
1730 	hci_dev_unlock(hdev);
1731 }
1732 EXPORT_SYMBOL(l2cap_unregister_user);
1733 
1734 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1735 {
1736 	struct l2cap_user *user;
1737 
1738 	while (!list_empty(&conn->users)) {
1739 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1740 		list_del_init(&user->list);
1741 		user->remove(conn, user);
1742 	}
1743 }
1744 
1745 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1746 {
1747 	struct l2cap_conn *conn = hcon->l2cap_data;
1748 	struct l2cap_chan *chan, *l;
1749 
1750 	if (!conn)
1751 		return;
1752 
1753 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1754 
1755 	mutex_lock(&conn->lock);
1756 
1757 	kfree_skb(conn->rx_skb);
1758 
1759 	skb_queue_purge(&conn->pending_rx);
1760 
1761 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1762 	 * might block if we are running on a worker from the same workqueue
1763 	 * pending_rx_work is waiting on.
1764 	 */
1765 	if (work_pending(&conn->pending_rx_work))
1766 		cancel_work_sync(&conn->pending_rx_work);
1767 
1768 	ida_destroy(&conn->tx_ida);
1769 
1770 	cancel_delayed_work_sync(&conn->id_addr_timer);
1771 
1772 	l2cap_unregister_all_users(conn);
1773 
1774 	/* Force the connection to be immediately dropped */
1775 	hcon->disc_timeout = 0;
1776 
1777 	/* Kill channels */
1778 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1779 		l2cap_chan_hold(chan);
1780 		l2cap_chan_lock(chan);
1781 
1782 		l2cap_chan_del(chan, err);
1783 
1784 		chan->ops->close(chan);
1785 
1786 		l2cap_chan_unlock(chan);
1787 		l2cap_chan_put(chan);
1788 	}
1789 
1790 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1791 		cancel_delayed_work_sync(&conn->info_timer);
1792 
1793 	hci_chan_del(conn->hchan);
1794 	conn->hchan = NULL;
1795 
1796 	hcon->l2cap_data = NULL;
1797 	mutex_unlock(&conn->lock);
1798 	l2cap_conn_put(conn);
1799 }
1800 
1801 static void l2cap_conn_free(struct kref *ref)
1802 {
1803 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1804 
1805 	hci_conn_put(conn->hcon);
1806 	kfree(conn);
1807 }
1808 
1809 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1810 {
1811 	kref_get(&conn->ref);
1812 	return conn;
1813 }
1814 EXPORT_SYMBOL(l2cap_conn_get);
1815 
1816 void l2cap_conn_put(struct l2cap_conn *conn)
1817 {
1818 	kref_put(&conn->ref, l2cap_conn_free);
1819 }
1820 EXPORT_SYMBOL(l2cap_conn_put);
1821 
1822 /* ---- Socket interface ---- */
1823 
1824 /* Find socket with psm and source / destination bdaddr.
1825  * Returns closest match.
1826  */
1827 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1828 						   bdaddr_t *src,
1829 						   bdaddr_t *dst,
1830 						   u8 link_type)
1831 {
1832 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1833 
1834 	read_lock(&chan_list_lock);
1835 
1836 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1837 		if (state && c->state != state)
1838 			continue;
1839 
1840 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1841 			continue;
1842 
1843 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1844 			continue;
1845 
1846 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1847 			int src_match, dst_match;
1848 			int src_any, dst_any;
1849 
1850 			/* Exact match. */
1851 			src_match = !bacmp(&c->src, src);
1852 			dst_match = !bacmp(&c->dst, dst);
1853 			if (src_match && dst_match) {
1854 				if (!l2cap_chan_hold_unless_zero(c))
1855 					continue;
1856 
1857 				read_unlock(&chan_list_lock);
1858 				return c;
1859 			}
1860 
1861 			/* Closest match */
1862 			src_any = !bacmp(&c->src, BDADDR_ANY);
1863 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1864 			if ((src_match && dst_any) || (src_any && dst_match) ||
1865 			    (src_any && dst_any))
1866 				c1 = c;
1867 		}
1868 	}
1869 
1870 	if (c1)
1871 		c1 = l2cap_chan_hold_unless_zero(c1);
1872 
1873 	read_unlock(&chan_list_lock);
1874 
1875 	return c1;
1876 }
1877 
1878 static void l2cap_monitor_timeout(struct work_struct *work)
1879 {
1880 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1881 					       monitor_timer.work);
1882 
1883 	BT_DBG("chan %p", chan);
1884 
1885 	l2cap_chan_lock(chan);
1886 
1887 	if (!chan->conn) {
1888 		l2cap_chan_unlock(chan);
1889 		l2cap_chan_put(chan);
1890 		return;
1891 	}
1892 
1893 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1894 
1895 	l2cap_chan_unlock(chan);
1896 	l2cap_chan_put(chan);
1897 }
1898 
1899 static void l2cap_retrans_timeout(struct work_struct *work)
1900 {
1901 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1902 					       retrans_timer.work);
1903 
1904 	BT_DBG("chan %p", chan);
1905 
1906 	l2cap_chan_lock(chan);
1907 
1908 	if (!chan->conn) {
1909 		l2cap_chan_unlock(chan);
1910 		l2cap_chan_put(chan);
1911 		return;
1912 	}
1913 
1914 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1915 	l2cap_chan_unlock(chan);
1916 	l2cap_chan_put(chan);
1917 }
1918 
1919 static void l2cap_streaming_send(struct l2cap_chan *chan,
1920 				 struct sk_buff_head *skbs)
1921 {
1922 	struct sk_buff *skb;
1923 	struct l2cap_ctrl *control;
1924 
1925 	BT_DBG("chan %p, skbs %p", chan, skbs);
1926 
1927 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1928 
1929 	while (!skb_queue_empty(&chan->tx_q)) {
1930 
1931 		skb = skb_dequeue(&chan->tx_q);
1932 
1933 		bt_cb(skb)->l2cap.retries = 1;
1934 		control = &bt_cb(skb)->l2cap;
1935 
1936 		control->reqseq = 0;
1937 		control->txseq = chan->next_tx_seq;
1938 
1939 		__pack_control(chan, control, skb);
1940 
1941 		if (chan->fcs == L2CAP_FCS_CRC16) {
1942 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1943 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1944 		}
1945 
1946 		l2cap_do_send(chan, skb);
1947 
1948 		BT_DBG("Sent txseq %u", control->txseq);
1949 
1950 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1951 		chan->frames_sent++;
1952 	}
1953 }
1954 
1955 static int l2cap_ertm_send(struct l2cap_chan *chan)
1956 {
1957 	struct sk_buff *skb, *tx_skb;
1958 	struct l2cap_ctrl *control;
1959 	int sent = 0;
1960 
1961 	BT_DBG("chan %p", chan);
1962 
1963 	if (chan->state != BT_CONNECTED)
1964 		return -ENOTCONN;
1965 
1966 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1967 		return 0;
1968 
1969 	while (chan->tx_send_head &&
1970 	       chan->unacked_frames < chan->remote_tx_win &&
1971 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1972 
1973 		skb = chan->tx_send_head;
1974 
1975 		bt_cb(skb)->l2cap.retries = 1;
1976 		control = &bt_cb(skb)->l2cap;
1977 
1978 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1979 			control->final = 1;
1980 
1981 		control->reqseq = chan->buffer_seq;
1982 		chan->last_acked_seq = chan->buffer_seq;
1983 		control->txseq = chan->next_tx_seq;
1984 
1985 		__pack_control(chan, control, skb);
1986 
1987 		if (chan->fcs == L2CAP_FCS_CRC16) {
1988 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1989 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1990 		}
1991 
1992 		/* Clone after data has been modified. Data is assumed to be
1993 		   read-only (for locking purposes) on cloned sk_buffs.
1994 		 */
1995 		tx_skb = skb_clone(skb, GFP_KERNEL);
1996 
1997 		if (!tx_skb)
1998 			break;
1999 
2000 		__set_retrans_timer(chan);
2001 
2002 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2003 		chan->unacked_frames++;
2004 		chan->frames_sent++;
2005 		sent++;
2006 
2007 		if (skb_queue_is_last(&chan->tx_q, skb))
2008 			chan->tx_send_head = NULL;
2009 		else
2010 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2011 
2012 		l2cap_do_send(chan, tx_skb);
2013 		BT_DBG("Sent txseq %u", control->txseq);
2014 	}
2015 
2016 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2017 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2018 
2019 	return sent;
2020 }
2021 
2022 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2023 {
2024 	struct l2cap_ctrl control;
2025 	struct sk_buff *skb;
2026 	struct sk_buff *tx_skb;
2027 	u16 seq;
2028 
2029 	BT_DBG("chan %p", chan);
2030 
2031 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2032 		return;
2033 
2034 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2035 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2036 
2037 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2038 		if (!skb) {
2039 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2040 			       seq);
2041 			continue;
2042 		}
2043 
2044 		bt_cb(skb)->l2cap.retries++;
2045 		control = bt_cb(skb)->l2cap;
2046 
2047 		if (chan->max_tx != 0 &&
2048 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2049 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2050 			l2cap_send_disconn_req(chan, ECONNRESET);
2051 			l2cap_seq_list_clear(&chan->retrans_list);
2052 			break;
2053 		}
2054 
2055 		control.reqseq = chan->buffer_seq;
2056 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2057 			control.final = 1;
2058 		else
2059 			control.final = 0;
2060 
2061 		if (skb_cloned(skb)) {
2062 			/* Cloned sk_buffs are read-only, so we need a
2063 			 * writeable copy
2064 			 */
2065 			tx_skb = skb_copy(skb, GFP_KERNEL);
2066 		} else {
2067 			tx_skb = skb_clone(skb, GFP_KERNEL);
2068 		}
2069 
2070 		if (!tx_skb) {
2071 			l2cap_seq_list_clear(&chan->retrans_list);
2072 			break;
2073 		}
2074 
2075 		/* Update skb contents */
2076 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2077 			put_unaligned_le32(__pack_extended_control(&control),
2078 					   tx_skb->data + L2CAP_HDR_SIZE);
2079 		} else {
2080 			put_unaligned_le16(__pack_enhanced_control(&control),
2081 					   tx_skb->data + L2CAP_HDR_SIZE);
2082 		}
2083 
2084 		/* Update FCS */
2085 		if (chan->fcs == L2CAP_FCS_CRC16) {
2086 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2087 					tx_skb->len - L2CAP_FCS_SIZE);
2088 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2089 						L2CAP_FCS_SIZE);
2090 		}
2091 
2092 		l2cap_do_send(chan, tx_skb);
2093 
2094 		BT_DBG("Resent txseq %d", control.txseq);
2095 
2096 		chan->last_acked_seq = chan->buffer_seq;
2097 	}
2098 }
2099 
2100 static void l2cap_retransmit(struct l2cap_chan *chan,
2101 			     struct l2cap_ctrl *control)
2102 {
2103 	BT_DBG("chan %p, control %p", chan, control);
2104 
2105 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2106 	l2cap_ertm_resend(chan);
2107 }
2108 
2109 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2110 				 struct l2cap_ctrl *control)
2111 {
2112 	struct sk_buff *skb;
2113 
2114 	BT_DBG("chan %p, control %p", chan, control);
2115 
2116 	if (control->poll)
2117 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2118 
2119 	l2cap_seq_list_clear(&chan->retrans_list);
2120 
2121 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2122 		return;
2123 
2124 	if (chan->unacked_frames) {
2125 		skb_queue_walk(&chan->tx_q, skb) {
2126 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2127 			    skb == chan->tx_send_head)
2128 				break;
2129 		}
2130 
2131 		skb_queue_walk_from(&chan->tx_q, skb) {
2132 			if (skb == chan->tx_send_head)
2133 				break;
2134 
2135 			l2cap_seq_list_append(&chan->retrans_list,
2136 					      bt_cb(skb)->l2cap.txseq);
2137 		}
2138 
2139 		l2cap_ertm_resend(chan);
2140 	}
2141 }
2142 
2143 static void l2cap_send_ack(struct l2cap_chan *chan)
2144 {
2145 	struct l2cap_ctrl control;
2146 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2147 					 chan->last_acked_seq);
2148 	int threshold;
2149 
2150 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2151 	       chan, chan->last_acked_seq, chan->buffer_seq);
2152 
2153 	memset(&control, 0, sizeof(control));
2154 	control.sframe = 1;
2155 
2156 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2157 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2158 		__clear_ack_timer(chan);
2159 		control.super = L2CAP_SUPER_RNR;
2160 		control.reqseq = chan->buffer_seq;
2161 		l2cap_send_sframe(chan, &control);
2162 	} else {
2163 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2164 			l2cap_ertm_send(chan);
2165 			/* If any i-frames were sent, they included an ack */
2166 			if (chan->buffer_seq == chan->last_acked_seq)
2167 				frames_to_ack = 0;
2168 		}
2169 
2170 		/* Ack now if the window is 3/4ths full.
2171 		 * Calculate without mul or div
2172 		 */
2173 		threshold = chan->ack_win;
2174 		threshold += threshold << 1;
2175 		threshold >>= 2;
2176 
2177 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2178 		       threshold);
2179 
2180 		if (frames_to_ack >= threshold) {
2181 			__clear_ack_timer(chan);
2182 			control.super = L2CAP_SUPER_RR;
2183 			control.reqseq = chan->buffer_seq;
2184 			l2cap_send_sframe(chan, &control);
2185 			frames_to_ack = 0;
2186 		}
2187 
2188 		if (frames_to_ack)
2189 			__set_ack_timer(chan);
2190 	}
2191 }
2192 
2193 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2194 					 struct msghdr *msg, int len,
2195 					 int count, struct sk_buff *skb)
2196 {
2197 	struct l2cap_conn *conn = chan->conn;
2198 	struct sk_buff **frag;
2199 	int sent = 0;
2200 
2201 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2202 		return -EFAULT;
2203 
2204 	sent += count;
2205 	len  -= count;
2206 
2207 	/* Continuation fragments (no L2CAP header) */
2208 	frag = &skb_shinfo(skb)->frag_list;
2209 	while (len) {
2210 		struct sk_buff *tmp;
2211 
2212 		count = min_t(unsigned int, conn->mtu, len);
2213 
2214 		tmp = chan->ops->alloc_skb(chan, 0, count,
2215 					   msg->msg_flags & MSG_DONTWAIT);
2216 		if (IS_ERR(tmp))
2217 			return PTR_ERR(tmp);
2218 
2219 		*frag = tmp;
2220 
2221 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2222 				   &msg->msg_iter))
2223 			return -EFAULT;
2224 
2225 		sent += count;
2226 		len  -= count;
2227 
2228 		skb->len += (*frag)->len;
2229 		skb->data_len += (*frag)->len;
2230 
2231 		frag = &(*frag)->next;
2232 	}
2233 
2234 	return sent;
2235 }
2236 
2237 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2238 						 struct msghdr *msg, size_t len)
2239 {
2240 	struct l2cap_conn *conn = chan->conn;
2241 	struct sk_buff *skb;
2242 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2243 	struct l2cap_hdr *lh;
2244 
2245 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2246 	       __le16_to_cpu(chan->psm), len);
2247 
2248 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2249 
2250 	skb = chan->ops->alloc_skb(chan, hlen, count,
2251 				   msg->msg_flags & MSG_DONTWAIT);
2252 	if (IS_ERR(skb))
2253 		return skb;
2254 
2255 	/* Create L2CAP header */
2256 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2257 	lh->cid = cpu_to_le16(chan->dcid);
2258 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2259 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2260 
2261 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2262 	if (unlikely(err < 0)) {
2263 		kfree_skb(skb);
2264 		return ERR_PTR(err);
2265 	}
2266 	return skb;
2267 }
2268 
2269 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2270 					      struct msghdr *msg, size_t len)
2271 {
2272 	struct l2cap_conn *conn = chan->conn;
2273 	struct sk_buff *skb;
2274 	int err, count;
2275 	struct l2cap_hdr *lh;
2276 
2277 	BT_DBG("chan %p len %zu", chan, len);
2278 
2279 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2280 
2281 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2282 				   msg->msg_flags & MSG_DONTWAIT);
2283 	if (IS_ERR(skb))
2284 		return skb;
2285 
2286 	/* Create L2CAP header */
2287 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2288 	lh->cid = cpu_to_le16(chan->dcid);
2289 	lh->len = cpu_to_le16(len);
2290 
2291 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2292 	if (unlikely(err < 0)) {
2293 		kfree_skb(skb);
2294 		return ERR_PTR(err);
2295 	}
2296 	return skb;
2297 }
2298 
2299 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2300 					       struct msghdr *msg, size_t len,
2301 					       u16 sdulen)
2302 {
2303 	struct l2cap_conn *conn = chan->conn;
2304 	struct sk_buff *skb;
2305 	int err, count, hlen;
2306 	struct l2cap_hdr *lh;
2307 
2308 	BT_DBG("chan %p len %zu", chan, len);
2309 
2310 	if (!conn)
2311 		return ERR_PTR(-ENOTCONN);
2312 
2313 	hlen = __ertm_hdr_size(chan);
2314 
2315 	if (sdulen)
2316 		hlen += L2CAP_SDULEN_SIZE;
2317 
2318 	if (chan->fcs == L2CAP_FCS_CRC16)
2319 		hlen += L2CAP_FCS_SIZE;
2320 
2321 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2322 
2323 	skb = chan->ops->alloc_skb(chan, hlen, count,
2324 				   msg->msg_flags & MSG_DONTWAIT);
2325 	if (IS_ERR(skb))
2326 		return skb;
2327 
2328 	/* Create L2CAP header */
2329 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2330 	lh->cid = cpu_to_le16(chan->dcid);
2331 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2332 
2333 	/* Control header is populated later */
2334 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2335 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2336 	else
2337 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2338 
2339 	if (sdulen)
2340 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2341 
2342 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2343 	if (unlikely(err < 0)) {
2344 		kfree_skb(skb);
2345 		return ERR_PTR(err);
2346 	}
2347 
2348 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2349 	bt_cb(skb)->l2cap.retries = 0;
2350 	return skb;
2351 }
2352 
2353 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2354 			     struct sk_buff_head *seg_queue,
2355 			     struct msghdr *msg, size_t len)
2356 {
2357 	struct sk_buff *skb;
2358 	u16 sdu_len;
2359 	size_t pdu_len;
2360 	u8 sar;
2361 
2362 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2363 
2364 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2365 	 * so fragmented skbs are not used.  The HCI layer's handling
2366 	 * of fragmented skbs is not compatible with ERTM's queueing.
2367 	 */
2368 
2369 	/* PDU size is derived from the HCI MTU */
2370 	pdu_len = chan->conn->mtu;
2371 
2372 	/* Constrain PDU size for BR/EDR connections */
2373 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2374 
2375 	/* Adjust for largest possible L2CAP overhead. */
2376 	if (chan->fcs)
2377 		pdu_len -= L2CAP_FCS_SIZE;
2378 
2379 	pdu_len -= __ertm_hdr_size(chan);
2380 
2381 	/* Remote device may have requested smaller PDUs */
2382 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2383 
2384 	if (len <= pdu_len) {
2385 		sar = L2CAP_SAR_UNSEGMENTED;
2386 		sdu_len = 0;
2387 		pdu_len = len;
2388 	} else {
2389 		sar = L2CAP_SAR_START;
2390 		sdu_len = len;
2391 	}
2392 
2393 	while (len > 0) {
2394 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2395 
2396 		if (IS_ERR(skb)) {
2397 			__skb_queue_purge(seg_queue);
2398 			return PTR_ERR(skb);
2399 		}
2400 
2401 		bt_cb(skb)->l2cap.sar = sar;
2402 		__skb_queue_tail(seg_queue, skb);
2403 
2404 		len -= pdu_len;
2405 		if (sdu_len)
2406 			sdu_len = 0;
2407 
2408 		if (len <= pdu_len) {
2409 			sar = L2CAP_SAR_END;
2410 			pdu_len = len;
2411 		} else {
2412 			sar = L2CAP_SAR_CONTINUE;
2413 		}
2414 	}
2415 
2416 	return 0;
2417 }
2418 
2419 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2420 						   struct msghdr *msg,
2421 						   size_t len, u16 sdulen)
2422 {
2423 	struct l2cap_conn *conn = chan->conn;
2424 	struct sk_buff *skb;
2425 	int err, count, hlen;
2426 	struct l2cap_hdr *lh;
2427 
2428 	BT_DBG("chan %p len %zu", chan, len);
2429 
2430 	if (!conn)
2431 		return ERR_PTR(-ENOTCONN);
2432 
2433 	hlen = L2CAP_HDR_SIZE;
2434 
2435 	if (sdulen)
2436 		hlen += L2CAP_SDULEN_SIZE;
2437 
2438 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2439 
2440 	skb = chan->ops->alloc_skb(chan, hlen, count,
2441 				   msg->msg_flags & MSG_DONTWAIT);
2442 	if (IS_ERR(skb))
2443 		return skb;
2444 
2445 	/* Create L2CAP header */
2446 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2447 	lh->cid = cpu_to_le16(chan->dcid);
2448 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2449 
2450 	if (sdulen)
2451 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2452 
2453 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2454 	if (unlikely(err < 0)) {
2455 		kfree_skb(skb);
2456 		return ERR_PTR(err);
2457 	}
2458 
2459 	return skb;
2460 }
2461 
2462 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2463 				struct sk_buff_head *seg_queue,
2464 				struct msghdr *msg, size_t len)
2465 {
2466 	struct sk_buff *skb;
2467 	size_t pdu_len;
2468 	u16 sdu_len;
2469 
2470 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2471 
2472 	sdu_len = len;
2473 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2474 
2475 	while (len > 0) {
2476 		if (len <= pdu_len)
2477 			pdu_len = len;
2478 
2479 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2480 		if (IS_ERR(skb)) {
2481 			__skb_queue_purge(seg_queue);
2482 			return PTR_ERR(skb);
2483 		}
2484 
2485 		__skb_queue_tail(seg_queue, skb);
2486 
2487 		len -= pdu_len;
2488 
2489 		if (sdu_len) {
2490 			sdu_len = 0;
2491 			pdu_len += L2CAP_SDULEN_SIZE;
2492 		}
2493 	}
2494 
2495 	return 0;
2496 }
2497 
2498 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2499 {
2500 	int sent = 0;
2501 
2502 	BT_DBG("chan %p", chan);
2503 
2504 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2505 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2506 		chan->tx_credits--;
2507 		sent++;
2508 	}
2509 
2510 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2511 	       skb_queue_len(&chan->tx_q));
2512 }
2513 
2514 static void l2cap_tx_timestamp(struct sk_buff *skb,
2515 			       const struct sockcm_cookie *sockc,
2516 			       size_t len)
2517 {
2518 	struct sock *sk = skb ? skb->sk : NULL;
2519 
2520 	if (sk && sk->sk_type == SOCK_STREAM)
2521 		hci_setup_tx_timestamp(skb, len, sockc);
2522 	else
2523 		hci_setup_tx_timestamp(skb, 1, sockc);
2524 }
2525 
2526 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2527 				   const struct sockcm_cookie *sockc,
2528 				   size_t len)
2529 {
2530 	struct sk_buff *skb = skb_peek(queue);
2531 	struct sock *sk = skb ? skb->sk : NULL;
2532 
2533 	if (sk && sk->sk_type == SOCK_STREAM)
2534 		l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2535 	else
2536 		l2cap_tx_timestamp(skb, sockc, len);
2537 }
2538 
2539 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2540 		    const struct sockcm_cookie *sockc)
2541 {
2542 	struct sk_buff *skb;
2543 	int err;
2544 	struct sk_buff_head seg_queue;
2545 
2546 	if (!chan->conn)
2547 		return -ENOTCONN;
2548 
2549 	/* Connectionless channel */
2550 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2551 		skb = l2cap_create_connless_pdu(chan, msg, len);
2552 		if (IS_ERR(skb))
2553 			return PTR_ERR(skb);
2554 
2555 		l2cap_tx_timestamp(skb, sockc, len);
2556 
2557 		l2cap_do_send(chan, skb);
2558 		return len;
2559 	}
2560 
2561 	switch (chan->mode) {
2562 	case L2CAP_MODE_LE_FLOWCTL:
2563 	case L2CAP_MODE_EXT_FLOWCTL:
2564 		/* Check outgoing MTU */
2565 		if (len > chan->omtu)
2566 			return -EMSGSIZE;
2567 
2568 		__skb_queue_head_init(&seg_queue);
2569 
2570 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2571 
2572 		if (chan->state != BT_CONNECTED) {
2573 			__skb_queue_purge(&seg_queue);
2574 			err = -ENOTCONN;
2575 		}
2576 
2577 		if (err)
2578 			return err;
2579 
2580 		l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2581 
2582 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2583 
2584 		l2cap_le_flowctl_send(chan);
2585 
2586 		if (!chan->tx_credits)
2587 			chan->ops->suspend(chan);
2588 
2589 		err = len;
2590 
2591 		break;
2592 
2593 	case L2CAP_MODE_BASIC:
2594 		/* Check outgoing MTU */
2595 		if (len > chan->omtu)
2596 			return -EMSGSIZE;
2597 
2598 		/* Create a basic PDU */
2599 		skb = l2cap_create_basic_pdu(chan, msg, len);
2600 		if (IS_ERR(skb))
2601 			return PTR_ERR(skb);
2602 
2603 		l2cap_tx_timestamp(skb, sockc, len);
2604 
2605 		l2cap_do_send(chan, skb);
2606 		err = len;
2607 		break;
2608 
2609 	case L2CAP_MODE_ERTM:
2610 	case L2CAP_MODE_STREAMING:
2611 		/* Check outgoing MTU */
2612 		if (len > chan->omtu) {
2613 			err = -EMSGSIZE;
2614 			break;
2615 		}
2616 
2617 		__skb_queue_head_init(&seg_queue);
2618 
2619 		/* Do segmentation before calling in to the state machine,
2620 		 * since it's possible to block while waiting for memory
2621 		 * allocation.
2622 		 */
2623 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2624 
2625 		if (err)
2626 			break;
2627 
2628 		if (chan->mode == L2CAP_MODE_ERTM) {
2629 			/* TODO: ERTM mode timestamping */
2630 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2631 		} else {
2632 			l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2633 			l2cap_streaming_send(chan, &seg_queue);
2634 		}
2635 
2636 		err = len;
2637 
2638 		/* If the skbs were not queued for sending, they'll still be in
2639 		 * seg_queue and need to be purged.
2640 		 */
2641 		__skb_queue_purge(&seg_queue);
2642 		break;
2643 
2644 	default:
2645 		BT_DBG("bad state %1.1x", chan->mode);
2646 		err = -EBADFD;
2647 	}
2648 
2649 	return err;
2650 }
2651 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2652 
2653 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2654 {
2655 	struct l2cap_ctrl control;
2656 	u16 seq;
2657 
2658 	BT_DBG("chan %p, txseq %u", chan, txseq);
2659 
2660 	memset(&control, 0, sizeof(control));
2661 	control.sframe = 1;
2662 	control.super = L2CAP_SUPER_SREJ;
2663 
2664 	for (seq = chan->expected_tx_seq; seq != txseq;
2665 	     seq = __next_seq(chan, seq)) {
2666 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2667 			control.reqseq = seq;
2668 			l2cap_send_sframe(chan, &control);
2669 			l2cap_seq_list_append(&chan->srej_list, seq);
2670 		}
2671 	}
2672 
2673 	chan->expected_tx_seq = __next_seq(chan, txseq);
2674 }
2675 
2676 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2677 {
2678 	struct l2cap_ctrl control;
2679 
2680 	BT_DBG("chan %p", chan);
2681 
2682 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2683 		return;
2684 
2685 	memset(&control, 0, sizeof(control));
2686 	control.sframe = 1;
2687 	control.super = L2CAP_SUPER_SREJ;
2688 	control.reqseq = chan->srej_list.tail;
2689 	l2cap_send_sframe(chan, &control);
2690 }
2691 
2692 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2693 {
2694 	struct l2cap_ctrl control;
2695 	u16 initial_head;
2696 	u16 seq;
2697 
2698 	BT_DBG("chan %p, txseq %u", chan, txseq);
2699 
2700 	memset(&control, 0, sizeof(control));
2701 	control.sframe = 1;
2702 	control.super = L2CAP_SUPER_SREJ;
2703 
2704 	/* Capture initial list head to allow only one pass through the list. */
2705 	initial_head = chan->srej_list.head;
2706 
2707 	do {
2708 		seq = l2cap_seq_list_pop(&chan->srej_list);
2709 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2710 			break;
2711 
2712 		control.reqseq = seq;
2713 		l2cap_send_sframe(chan, &control);
2714 		l2cap_seq_list_append(&chan->srej_list, seq);
2715 	} while (chan->srej_list.head != initial_head);
2716 }
2717 
2718 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2719 {
2720 	struct sk_buff *acked_skb;
2721 	u16 ackseq;
2722 
2723 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2724 
2725 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2726 		return;
2727 
2728 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2729 	       chan->expected_ack_seq, chan->unacked_frames);
2730 
2731 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2732 	     ackseq = __next_seq(chan, ackseq)) {
2733 
2734 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2735 		if (acked_skb) {
2736 			skb_unlink(acked_skb, &chan->tx_q);
2737 			kfree_skb(acked_skb);
2738 			chan->unacked_frames--;
2739 		}
2740 	}
2741 
2742 	chan->expected_ack_seq = reqseq;
2743 
2744 	if (chan->unacked_frames == 0)
2745 		__clear_retrans_timer(chan);
2746 
2747 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2748 }
2749 
2750 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2751 {
2752 	BT_DBG("chan %p", chan);
2753 
2754 	chan->expected_tx_seq = chan->buffer_seq;
2755 	l2cap_seq_list_clear(&chan->srej_list);
2756 	skb_queue_purge(&chan->srej_q);
2757 	chan->rx_state = L2CAP_RX_STATE_RECV;
2758 }
2759 
2760 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2761 				struct l2cap_ctrl *control,
2762 				struct sk_buff_head *skbs, u8 event)
2763 {
2764 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2765 	       event);
2766 
2767 	switch (event) {
2768 	case L2CAP_EV_DATA_REQUEST:
2769 		if (chan->tx_send_head == NULL)
2770 			chan->tx_send_head = skb_peek(skbs);
2771 
2772 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2773 		l2cap_ertm_send(chan);
2774 		break;
2775 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2776 		BT_DBG("Enter LOCAL_BUSY");
2777 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2778 
2779 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2780 			/* The SREJ_SENT state must be aborted if we are to
2781 			 * enter the LOCAL_BUSY state.
2782 			 */
2783 			l2cap_abort_rx_srej_sent(chan);
2784 		}
2785 
2786 		l2cap_send_ack(chan);
2787 
2788 		break;
2789 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2790 		BT_DBG("Exit LOCAL_BUSY");
2791 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2792 
2793 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2794 			struct l2cap_ctrl local_control;
2795 
2796 			memset(&local_control, 0, sizeof(local_control));
2797 			local_control.sframe = 1;
2798 			local_control.super = L2CAP_SUPER_RR;
2799 			local_control.poll = 1;
2800 			local_control.reqseq = chan->buffer_seq;
2801 			l2cap_send_sframe(chan, &local_control);
2802 
2803 			chan->retry_count = 1;
2804 			__set_monitor_timer(chan);
2805 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2806 		}
2807 		break;
2808 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2809 		l2cap_process_reqseq(chan, control->reqseq);
2810 		break;
2811 	case L2CAP_EV_EXPLICIT_POLL:
2812 		l2cap_send_rr_or_rnr(chan, 1);
2813 		chan->retry_count = 1;
2814 		__set_monitor_timer(chan);
2815 		__clear_ack_timer(chan);
2816 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2817 		break;
2818 	case L2CAP_EV_RETRANS_TO:
2819 		l2cap_send_rr_or_rnr(chan, 1);
2820 		chan->retry_count = 1;
2821 		__set_monitor_timer(chan);
2822 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2823 		break;
2824 	case L2CAP_EV_RECV_FBIT:
2825 		/* Nothing to process */
2826 		break;
2827 	default:
2828 		break;
2829 	}
2830 }
2831 
2832 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2833 				  struct l2cap_ctrl *control,
2834 				  struct sk_buff_head *skbs, u8 event)
2835 {
2836 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2837 	       event);
2838 
2839 	switch (event) {
2840 	case L2CAP_EV_DATA_REQUEST:
2841 		if (chan->tx_send_head == NULL)
2842 			chan->tx_send_head = skb_peek(skbs);
2843 		/* Queue data, but don't send. */
2844 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2845 		break;
2846 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2847 		BT_DBG("Enter LOCAL_BUSY");
2848 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2849 
2850 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2851 			/* The SREJ_SENT state must be aborted if we are to
2852 			 * enter the LOCAL_BUSY state.
2853 			 */
2854 			l2cap_abort_rx_srej_sent(chan);
2855 		}
2856 
2857 		l2cap_send_ack(chan);
2858 
2859 		break;
2860 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2861 		BT_DBG("Exit LOCAL_BUSY");
2862 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2863 
2864 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2865 			struct l2cap_ctrl local_control;
2866 			memset(&local_control, 0, sizeof(local_control));
2867 			local_control.sframe = 1;
2868 			local_control.super = L2CAP_SUPER_RR;
2869 			local_control.poll = 1;
2870 			local_control.reqseq = chan->buffer_seq;
2871 			l2cap_send_sframe(chan, &local_control);
2872 
2873 			chan->retry_count = 1;
2874 			__set_monitor_timer(chan);
2875 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2876 		}
2877 		break;
2878 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2879 		l2cap_process_reqseq(chan, control->reqseq);
2880 		fallthrough;
2881 
2882 	case L2CAP_EV_RECV_FBIT:
2883 		if (control && control->final) {
2884 			__clear_monitor_timer(chan);
2885 			if (chan->unacked_frames > 0)
2886 				__set_retrans_timer(chan);
2887 			chan->retry_count = 0;
2888 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2889 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2890 		}
2891 		break;
2892 	case L2CAP_EV_EXPLICIT_POLL:
2893 		/* Ignore */
2894 		break;
2895 	case L2CAP_EV_MONITOR_TO:
2896 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2897 			l2cap_send_rr_or_rnr(chan, 1);
2898 			__set_monitor_timer(chan);
2899 			chan->retry_count++;
2900 		} else {
2901 			l2cap_send_disconn_req(chan, ECONNABORTED);
2902 		}
2903 		break;
2904 	default:
2905 		break;
2906 	}
2907 }
2908 
2909 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2910 		     struct sk_buff_head *skbs, u8 event)
2911 {
2912 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2913 	       chan, control, skbs, event, chan->tx_state);
2914 
2915 	switch (chan->tx_state) {
2916 	case L2CAP_TX_STATE_XMIT:
2917 		l2cap_tx_state_xmit(chan, control, skbs, event);
2918 		break;
2919 	case L2CAP_TX_STATE_WAIT_F:
2920 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2921 		break;
2922 	default:
2923 		/* Ignore event */
2924 		break;
2925 	}
2926 }
2927 
2928 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2929 			     struct l2cap_ctrl *control)
2930 {
2931 	BT_DBG("chan %p, control %p", chan, control);
2932 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2933 }
2934 
2935 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2936 				  struct l2cap_ctrl *control)
2937 {
2938 	BT_DBG("chan %p, control %p", chan, control);
2939 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2940 }
2941 
2942 /* Copy frame to all raw sockets on that connection */
2943 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2944 {
2945 	struct sk_buff *nskb;
2946 	struct l2cap_chan *chan;
2947 
2948 	BT_DBG("conn %p", conn);
2949 
2950 	list_for_each_entry(chan, &conn->chan_l, list) {
2951 		if (chan->chan_type != L2CAP_CHAN_RAW)
2952 			continue;
2953 
2954 		/* Don't send frame to the channel it came from */
2955 		if (bt_cb(skb)->l2cap.chan == chan)
2956 			continue;
2957 
2958 		nskb = skb_clone(skb, GFP_KERNEL);
2959 		if (!nskb)
2960 			continue;
2961 		if (chan->ops->recv(chan, nskb))
2962 			kfree_skb(nskb);
2963 	}
2964 }
2965 
2966 /* ---- L2CAP signalling commands ---- */
2967 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2968 				       u8 ident, u16 dlen, void *data)
2969 {
2970 	struct sk_buff *skb, **frag;
2971 	struct l2cap_cmd_hdr *cmd;
2972 	struct l2cap_hdr *lh;
2973 	int len, count;
2974 
2975 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2976 	       conn, code, ident, dlen);
2977 
2978 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2979 		return NULL;
2980 
2981 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2982 	count = min_t(unsigned int, conn->mtu, len);
2983 
2984 	skb = bt_skb_alloc(count, GFP_KERNEL);
2985 	if (!skb)
2986 		return NULL;
2987 
2988 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2989 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2990 
2991 	if (conn->hcon->type == LE_LINK)
2992 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2993 	else
2994 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2995 
2996 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2997 	cmd->code  = code;
2998 	cmd->ident = ident;
2999 	cmd->len   = cpu_to_le16(dlen);
3000 
3001 	if (dlen) {
3002 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3003 		skb_put_data(skb, data, count);
3004 		data += count;
3005 	}
3006 
3007 	len -= skb->len;
3008 
3009 	/* Continuation fragments (no L2CAP header) */
3010 	frag = &skb_shinfo(skb)->frag_list;
3011 	while (len) {
3012 		count = min_t(unsigned int, conn->mtu, len);
3013 
3014 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3015 		if (!*frag)
3016 			goto fail;
3017 
3018 		skb_put_data(*frag, data, count);
3019 
3020 		len  -= count;
3021 		data += count;
3022 
3023 		frag = &(*frag)->next;
3024 	}
3025 
3026 	return skb;
3027 
3028 fail:
3029 	kfree_skb(skb);
3030 	return NULL;
3031 }
3032 
3033 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3034 				     unsigned long *val)
3035 {
3036 	struct l2cap_conf_opt *opt = *ptr;
3037 	int len;
3038 
3039 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3040 	*ptr += len;
3041 
3042 	*type = opt->type;
3043 	*olen = opt->len;
3044 
3045 	switch (opt->len) {
3046 	case 1:
3047 		*val = *((u8 *) opt->val);
3048 		break;
3049 
3050 	case 2:
3051 		*val = get_unaligned_le16(opt->val);
3052 		break;
3053 
3054 	case 4:
3055 		*val = get_unaligned_le32(opt->val);
3056 		break;
3057 
3058 	default:
3059 		*val = (unsigned long) opt->val;
3060 		break;
3061 	}
3062 
3063 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3064 	return len;
3065 }
3066 
3067 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3068 {
3069 	struct l2cap_conf_opt *opt = *ptr;
3070 
3071 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3072 
3073 	if (size < L2CAP_CONF_OPT_SIZE + len)
3074 		return;
3075 
3076 	opt->type = type;
3077 	opt->len  = len;
3078 
3079 	switch (len) {
3080 	case 1:
3081 		*((u8 *) opt->val)  = val;
3082 		break;
3083 
3084 	case 2:
3085 		put_unaligned_le16(val, opt->val);
3086 		break;
3087 
3088 	case 4:
3089 		put_unaligned_le32(val, opt->val);
3090 		break;
3091 
3092 	default:
3093 		memcpy(opt->val, (void *) val, len);
3094 		break;
3095 	}
3096 
3097 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3098 }
3099 
3100 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3101 {
3102 	struct l2cap_conf_efs efs;
3103 
3104 	switch (chan->mode) {
3105 	case L2CAP_MODE_ERTM:
3106 		efs.id		= chan->local_id;
3107 		efs.stype	= chan->local_stype;
3108 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3109 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3110 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3111 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3112 		break;
3113 
3114 	case L2CAP_MODE_STREAMING:
3115 		efs.id		= 1;
3116 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3117 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3118 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3119 		efs.acc_lat	= 0;
3120 		efs.flush_to	= 0;
3121 		break;
3122 
3123 	default:
3124 		return;
3125 	}
3126 
3127 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3128 			   (unsigned long) &efs, size);
3129 }
3130 
3131 static void l2cap_ack_timeout(struct work_struct *work)
3132 {
3133 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3134 					       ack_timer.work);
3135 	u16 frames_to_ack;
3136 
3137 	BT_DBG("chan %p", chan);
3138 
3139 	l2cap_chan_lock(chan);
3140 
3141 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3142 				     chan->last_acked_seq);
3143 
3144 	if (frames_to_ack)
3145 		l2cap_send_rr_or_rnr(chan, 0);
3146 
3147 	l2cap_chan_unlock(chan);
3148 	l2cap_chan_put(chan);
3149 }
3150 
3151 int l2cap_ertm_init(struct l2cap_chan *chan)
3152 {
3153 	int err;
3154 
3155 	chan->next_tx_seq = 0;
3156 	chan->expected_tx_seq = 0;
3157 	chan->expected_ack_seq = 0;
3158 	chan->unacked_frames = 0;
3159 	chan->buffer_seq = 0;
3160 	chan->frames_sent = 0;
3161 	chan->last_acked_seq = 0;
3162 	chan->sdu = NULL;
3163 	chan->sdu_last_frag = NULL;
3164 	chan->sdu_len = 0;
3165 
3166 	skb_queue_head_init(&chan->tx_q);
3167 
3168 	if (chan->mode != L2CAP_MODE_ERTM)
3169 		return 0;
3170 
3171 	chan->rx_state = L2CAP_RX_STATE_RECV;
3172 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3173 
3174 	skb_queue_head_init(&chan->srej_q);
3175 
3176 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3177 	if (err < 0)
3178 		return err;
3179 
3180 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3181 	if (err < 0)
3182 		l2cap_seq_list_free(&chan->srej_list);
3183 
3184 	return err;
3185 }
3186 
3187 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3188 {
3189 	switch (mode) {
3190 	case L2CAP_MODE_STREAMING:
3191 	case L2CAP_MODE_ERTM:
3192 		if (l2cap_mode_supported(mode, remote_feat_mask))
3193 			return mode;
3194 		fallthrough;
3195 	default:
3196 		return L2CAP_MODE_BASIC;
3197 	}
3198 }
3199 
3200 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3201 {
3202 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3203 }
3204 
3205 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3206 {
3207 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3208 }
3209 
3210 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3211 				      struct l2cap_conf_rfc *rfc)
3212 {
3213 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3214 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3215 }
3216 
3217 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3218 {
3219 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3220 	    __l2cap_ews_supported(chan->conn)) {
3221 		/* use extended control field */
3222 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3223 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3224 	} else {
3225 		chan->tx_win = min_t(u16, chan->tx_win,
3226 				     L2CAP_DEFAULT_TX_WINDOW);
3227 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3228 	}
3229 	chan->ack_win = chan->tx_win;
3230 }
3231 
3232 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3233 {
3234 	struct hci_conn *conn = chan->conn->hcon;
3235 
3236 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3237 
3238 	/* The 2-DH1 packet has between 2 and 56 information bytes
3239 	 * (including the 2-byte payload header)
3240 	 */
3241 	if (!(conn->pkt_type & HCI_2DH1))
3242 		chan->imtu = 54;
3243 
3244 	/* The 3-DH1 packet has between 2 and 85 information bytes
3245 	 * (including the 2-byte payload header)
3246 	 */
3247 	if (!(conn->pkt_type & HCI_3DH1))
3248 		chan->imtu = 83;
3249 
3250 	/* The 2-DH3 packet has between 2 and 369 information bytes
3251 	 * (including the 2-byte payload header)
3252 	 */
3253 	if (!(conn->pkt_type & HCI_2DH3))
3254 		chan->imtu = 367;
3255 
3256 	/* The 3-DH3 packet has between 2 and 554 information bytes
3257 	 * (including the 2-byte payload header)
3258 	 */
3259 	if (!(conn->pkt_type & HCI_3DH3))
3260 		chan->imtu = 552;
3261 
3262 	/* The 2-DH5 packet has between 2 and 681 information bytes
3263 	 * (including the 2-byte payload header)
3264 	 */
3265 	if (!(conn->pkt_type & HCI_2DH5))
3266 		chan->imtu = 679;
3267 
3268 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3269 	 * (including the 2-byte payload header)
3270 	 */
3271 	if (!(conn->pkt_type & HCI_3DH5))
3272 		chan->imtu = 1021;
3273 }
3274 
3275 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3276 {
3277 	struct l2cap_conf_req *req = data;
3278 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3279 	void *ptr = req->data;
3280 	void *endptr = data + data_size;
3281 	u16 size;
3282 
3283 	BT_DBG("chan %p", chan);
3284 
3285 	if (chan->num_conf_req || chan->num_conf_rsp)
3286 		goto done;
3287 
3288 	switch (chan->mode) {
3289 	case L2CAP_MODE_STREAMING:
3290 	case L2CAP_MODE_ERTM:
3291 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3292 			break;
3293 
3294 		if (__l2cap_efs_supported(chan->conn))
3295 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3296 
3297 		fallthrough;
3298 	default:
3299 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3300 		break;
3301 	}
3302 
3303 done:
3304 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3305 		if (!chan->imtu)
3306 			l2cap_mtu_auto(chan);
3307 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3308 				   endptr - ptr);
3309 	}
3310 
3311 	switch (chan->mode) {
3312 	case L2CAP_MODE_BASIC:
3313 		if (disable_ertm)
3314 			break;
3315 
3316 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3317 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3318 			break;
3319 
3320 		rfc.mode            = L2CAP_MODE_BASIC;
3321 		rfc.txwin_size      = 0;
3322 		rfc.max_transmit    = 0;
3323 		rfc.retrans_timeout = 0;
3324 		rfc.monitor_timeout = 0;
3325 		rfc.max_pdu_size    = 0;
3326 
3327 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3328 				   (unsigned long) &rfc, endptr - ptr);
3329 		break;
3330 
3331 	case L2CAP_MODE_ERTM:
3332 		rfc.mode            = L2CAP_MODE_ERTM;
3333 		rfc.max_transmit    = chan->max_tx;
3334 
3335 		__l2cap_set_ertm_timeouts(chan, &rfc);
3336 
3337 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3338 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3339 			     L2CAP_FCS_SIZE);
3340 		rfc.max_pdu_size = cpu_to_le16(size);
3341 
3342 		l2cap_txwin_setup(chan);
3343 
3344 		rfc.txwin_size = min_t(u16, chan->tx_win,
3345 				       L2CAP_DEFAULT_TX_WINDOW);
3346 
3347 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3348 				   (unsigned long) &rfc, endptr - ptr);
3349 
3350 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3351 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3352 
3353 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3354 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3355 					   chan->tx_win, endptr - ptr);
3356 
3357 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3358 			if (chan->fcs == L2CAP_FCS_NONE ||
3359 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3360 				chan->fcs = L2CAP_FCS_NONE;
3361 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3362 						   chan->fcs, endptr - ptr);
3363 			}
3364 		break;
3365 
3366 	case L2CAP_MODE_STREAMING:
3367 		l2cap_txwin_setup(chan);
3368 		rfc.mode            = L2CAP_MODE_STREAMING;
3369 		rfc.txwin_size      = 0;
3370 		rfc.max_transmit    = 0;
3371 		rfc.retrans_timeout = 0;
3372 		rfc.monitor_timeout = 0;
3373 
3374 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3375 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3376 			     L2CAP_FCS_SIZE);
3377 		rfc.max_pdu_size = cpu_to_le16(size);
3378 
3379 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3380 				   (unsigned long) &rfc, endptr - ptr);
3381 
3382 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3383 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3384 
3385 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3386 			if (chan->fcs == L2CAP_FCS_NONE ||
3387 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3388 				chan->fcs = L2CAP_FCS_NONE;
3389 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3390 						   chan->fcs, endptr - ptr);
3391 			}
3392 		break;
3393 	}
3394 
3395 	req->dcid  = cpu_to_le16(chan->dcid);
3396 	req->flags = cpu_to_le16(0);
3397 
3398 	return ptr - data;
3399 }
3400 
3401 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3402 {
3403 	struct l2cap_conf_rsp *rsp = data;
3404 	void *ptr = rsp->data;
3405 	void *endptr = data + data_size;
3406 	void *req = chan->conf_req;
3407 	int len = chan->conf_len;
3408 	int type, hint, olen;
3409 	unsigned long val;
3410 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3411 	struct l2cap_conf_efs efs;
3412 	u8 remote_efs = 0;
3413 	u16 mtu = 0;
3414 	u16 result = L2CAP_CONF_SUCCESS;
3415 	u16 size;
3416 
3417 	BT_DBG("chan %p", chan);
3418 
3419 	while (len >= L2CAP_CONF_OPT_SIZE) {
3420 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3421 		if (len < 0)
3422 			break;
3423 
3424 		hint  = type & L2CAP_CONF_HINT;
3425 		type &= L2CAP_CONF_MASK;
3426 
3427 		switch (type) {
3428 		case L2CAP_CONF_MTU:
3429 			if (olen != 2)
3430 				break;
3431 			mtu = val;
3432 			break;
3433 
3434 		case L2CAP_CONF_FLUSH_TO:
3435 			if (olen != 2)
3436 				break;
3437 			chan->flush_to = val;
3438 			break;
3439 
3440 		case L2CAP_CONF_QOS:
3441 			break;
3442 
3443 		case L2CAP_CONF_RFC:
3444 			if (olen != sizeof(rfc))
3445 				break;
3446 			memcpy(&rfc, (void *) val, olen);
3447 			break;
3448 
3449 		case L2CAP_CONF_FCS:
3450 			if (olen != 1)
3451 				break;
3452 			if (val == L2CAP_FCS_NONE)
3453 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3454 			break;
3455 
3456 		case L2CAP_CONF_EFS:
3457 			if (olen != sizeof(efs))
3458 				break;
3459 			remote_efs = 1;
3460 			memcpy(&efs, (void *) val, olen);
3461 			break;
3462 
3463 		case L2CAP_CONF_EWS:
3464 			if (olen != 2)
3465 				break;
3466 			return -ECONNREFUSED;
3467 
3468 		default:
3469 			if (hint)
3470 				break;
3471 			result = L2CAP_CONF_UNKNOWN;
3472 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3473 			break;
3474 		}
3475 	}
3476 
3477 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3478 		goto done;
3479 
3480 	switch (chan->mode) {
3481 	case L2CAP_MODE_STREAMING:
3482 	case L2CAP_MODE_ERTM:
3483 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3484 			chan->mode = l2cap_select_mode(rfc.mode,
3485 						       chan->conn->feat_mask);
3486 			break;
3487 		}
3488 
3489 		if (remote_efs) {
3490 			if (__l2cap_efs_supported(chan->conn))
3491 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3492 			else
3493 				return -ECONNREFUSED;
3494 		}
3495 
3496 		if (chan->mode != rfc.mode)
3497 			return -ECONNREFUSED;
3498 
3499 		break;
3500 	}
3501 
3502 done:
3503 	if (chan->mode != rfc.mode) {
3504 		result = L2CAP_CONF_UNACCEPT;
3505 		rfc.mode = chan->mode;
3506 
3507 		if (chan->num_conf_rsp == 1)
3508 			return -ECONNREFUSED;
3509 
3510 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3511 				   (unsigned long) &rfc, endptr - ptr);
3512 	}
3513 
3514 	if (result == L2CAP_CONF_SUCCESS) {
3515 		/* Configure output options and let the other side know
3516 		 * which ones we don't like. */
3517 
3518 		/* If MTU is not provided in configure request, try adjusting it
3519 		 * to the current output MTU if it has been set
3520 		 *
3521 		 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3522 		 *
3523 		 * Each configuration parameter value (if any is present) in an
3524 		 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3525 		 * configuration parameter value that has been sent (or, in case
3526 		 * of default values, implied) in the corresponding
3527 		 * L2CAP_CONFIGURATION_REQ packet.
3528 		 */
3529 		if (!mtu) {
3530 			/* Only adjust for ERTM channels as for older modes the
3531 			 * remote stack may not be able to detect that the
3532 			 * adjustment causing it to silently drop packets.
3533 			 */
3534 			if (chan->mode == L2CAP_MODE_ERTM &&
3535 			    chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3536 				mtu = chan->omtu;
3537 			else
3538 				mtu = L2CAP_DEFAULT_MTU;
3539 		}
3540 
3541 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3542 			result = L2CAP_CONF_UNACCEPT;
3543 		else {
3544 			chan->omtu = mtu;
3545 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3546 		}
3547 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3548 
3549 		if (remote_efs) {
3550 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3551 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3552 			    efs.stype != chan->local_stype) {
3553 
3554 				result = L2CAP_CONF_UNACCEPT;
3555 
3556 				if (chan->num_conf_req >= 1)
3557 					return -ECONNREFUSED;
3558 
3559 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3560 						   sizeof(efs),
3561 						   (unsigned long) &efs, endptr - ptr);
3562 			} else {
3563 				/* Send PENDING Conf Rsp */
3564 				result = L2CAP_CONF_PENDING;
3565 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3566 			}
3567 		}
3568 
3569 		switch (rfc.mode) {
3570 		case L2CAP_MODE_BASIC:
3571 			chan->fcs = L2CAP_FCS_NONE;
3572 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3573 			break;
3574 
3575 		case L2CAP_MODE_ERTM:
3576 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3577 				chan->remote_tx_win = rfc.txwin_size;
3578 			else
3579 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3580 
3581 			chan->remote_max_tx = rfc.max_transmit;
3582 
3583 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3584 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3585 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3586 			rfc.max_pdu_size = cpu_to_le16(size);
3587 			chan->remote_mps = size;
3588 
3589 			__l2cap_set_ertm_timeouts(chan, &rfc);
3590 
3591 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3592 
3593 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3594 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3595 
3596 			if (remote_efs &&
3597 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3598 				chan->remote_id = efs.id;
3599 				chan->remote_stype = efs.stype;
3600 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3601 				chan->remote_flush_to =
3602 					le32_to_cpu(efs.flush_to);
3603 				chan->remote_acc_lat =
3604 					le32_to_cpu(efs.acc_lat);
3605 				chan->remote_sdu_itime =
3606 					le32_to_cpu(efs.sdu_itime);
3607 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3608 						   sizeof(efs),
3609 						   (unsigned long) &efs, endptr - ptr);
3610 			}
3611 			break;
3612 
3613 		case L2CAP_MODE_STREAMING:
3614 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3615 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3616 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3617 			rfc.max_pdu_size = cpu_to_le16(size);
3618 			chan->remote_mps = size;
3619 
3620 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3621 
3622 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3623 					   (unsigned long) &rfc, endptr - ptr);
3624 
3625 			break;
3626 
3627 		default:
3628 			result = L2CAP_CONF_UNACCEPT;
3629 
3630 			memset(&rfc, 0, sizeof(rfc));
3631 			rfc.mode = chan->mode;
3632 		}
3633 
3634 		if (result == L2CAP_CONF_SUCCESS)
3635 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3636 	}
3637 	rsp->scid   = cpu_to_le16(chan->dcid);
3638 	rsp->result = cpu_to_le16(result);
3639 	rsp->flags  = cpu_to_le16(0);
3640 
3641 	return ptr - data;
3642 }
3643 
3644 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3645 				void *data, size_t size, u16 *result)
3646 {
3647 	struct l2cap_conf_req *req = data;
3648 	void *ptr = req->data;
3649 	void *endptr = data + size;
3650 	int type, olen;
3651 	unsigned long val;
3652 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3653 	struct l2cap_conf_efs efs;
3654 
3655 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3656 
3657 	while (len >= L2CAP_CONF_OPT_SIZE) {
3658 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3659 		if (len < 0)
3660 			break;
3661 
3662 		switch (type) {
3663 		case L2CAP_CONF_MTU:
3664 			if (olen != 2)
3665 				break;
3666 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3667 				*result = L2CAP_CONF_UNACCEPT;
3668 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3669 			} else
3670 				chan->imtu = val;
3671 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3672 					   endptr - ptr);
3673 			break;
3674 
3675 		case L2CAP_CONF_FLUSH_TO:
3676 			if (olen != 2)
3677 				break;
3678 			chan->flush_to = val;
3679 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3680 					   chan->flush_to, endptr - ptr);
3681 			break;
3682 
3683 		case L2CAP_CONF_RFC:
3684 			if (olen != sizeof(rfc))
3685 				break;
3686 			memcpy(&rfc, (void *)val, olen);
3687 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3688 			    rfc.mode != chan->mode)
3689 				return -ECONNREFUSED;
3690 			chan->fcs = 0;
3691 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3692 					   (unsigned long) &rfc, endptr - ptr);
3693 			break;
3694 
3695 		case L2CAP_CONF_EWS:
3696 			if (olen != 2)
3697 				break;
3698 			chan->ack_win = min_t(u16, val, chan->ack_win);
3699 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3700 					   chan->tx_win, endptr - ptr);
3701 			break;
3702 
3703 		case L2CAP_CONF_EFS:
3704 			if (olen != sizeof(efs))
3705 				break;
3706 			memcpy(&efs, (void *)val, olen);
3707 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3708 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3709 			    efs.stype != chan->local_stype)
3710 				return -ECONNREFUSED;
3711 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3712 					   (unsigned long) &efs, endptr - ptr);
3713 			break;
3714 
3715 		case L2CAP_CONF_FCS:
3716 			if (olen != 1)
3717 				break;
3718 			if (*result == L2CAP_CONF_PENDING)
3719 				if (val == L2CAP_FCS_NONE)
3720 					set_bit(CONF_RECV_NO_FCS,
3721 						&chan->conf_state);
3722 			break;
3723 		}
3724 	}
3725 
3726 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3727 		return -ECONNREFUSED;
3728 
3729 	chan->mode = rfc.mode;
3730 
3731 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3732 		switch (rfc.mode) {
3733 		case L2CAP_MODE_ERTM:
3734 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3735 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3736 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3737 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3738 				chan->ack_win = min_t(u16, chan->ack_win,
3739 						      rfc.txwin_size);
3740 
3741 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3742 				chan->local_msdu = le16_to_cpu(efs.msdu);
3743 				chan->local_sdu_itime =
3744 					le32_to_cpu(efs.sdu_itime);
3745 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3746 				chan->local_flush_to =
3747 					le32_to_cpu(efs.flush_to);
3748 			}
3749 			break;
3750 
3751 		case L2CAP_MODE_STREAMING:
3752 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3753 		}
3754 	}
3755 
3756 	req->dcid   = cpu_to_le16(chan->dcid);
3757 	req->flags  = cpu_to_le16(0);
3758 
3759 	return ptr - data;
3760 }
3761 
3762 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3763 				u16 result, u16 flags)
3764 {
3765 	struct l2cap_conf_rsp *rsp = data;
3766 	void *ptr = rsp->data;
3767 
3768 	BT_DBG("chan %p", chan);
3769 
3770 	rsp->scid   = cpu_to_le16(chan->dcid);
3771 	rsp->result = cpu_to_le16(result);
3772 	rsp->flags  = cpu_to_le16(flags);
3773 
3774 	return ptr - data;
3775 }
3776 
3777 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3778 {
3779 	struct l2cap_le_conn_rsp rsp;
3780 	struct l2cap_conn *conn = chan->conn;
3781 
3782 	BT_DBG("chan %p", chan);
3783 
3784 	rsp.dcid    = cpu_to_le16(chan->scid);
3785 	rsp.mtu     = cpu_to_le16(chan->imtu);
3786 	rsp.mps     = cpu_to_le16(chan->mps);
3787 	rsp.credits = cpu_to_le16(chan->rx_credits);
3788 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3789 
3790 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3791 		       &rsp);
3792 }
3793 
3794 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3795 {
3796 	int *result = data;
3797 
3798 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3799 		return;
3800 
3801 	switch (chan->state) {
3802 	case BT_CONNECT2:
3803 		/* If channel still pending accept add to result */
3804 		(*result)++;
3805 		return;
3806 	case BT_CONNECTED:
3807 		return;
3808 	default:
3809 		/* If not connected or pending accept it has been refused */
3810 		*result = -ECONNREFUSED;
3811 		return;
3812 	}
3813 }
3814 
3815 struct l2cap_ecred_rsp_data {
3816 	struct {
3817 		struct l2cap_ecred_conn_rsp_hdr rsp;
3818 		__le16 scid[L2CAP_ECRED_MAX_CID];
3819 	} __packed pdu;
3820 	int count;
3821 };
3822 
3823 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3824 {
3825 	struct l2cap_ecred_rsp_data *rsp = data;
3826 	struct l2cap_ecred_conn_rsp *rsp_flex =
3827 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3828 
3829 	/* Check if channel for outgoing connection or if it wasn't deferred
3830 	 * since in those cases it must be skipped.
3831 	 */
3832 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3833 	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3834 		return;
3835 
3836 	/* Reset ident so only one response is sent */
3837 	chan->ident = 0;
3838 
3839 	/* Include all channels pending with the same ident */
3840 	if (!rsp->pdu.rsp.result)
3841 		rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3842 	else
3843 		l2cap_chan_del(chan, ECONNRESET);
3844 }
3845 
3846 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3847 {
3848 	struct l2cap_conn *conn = chan->conn;
3849 	struct l2cap_ecred_rsp_data data;
3850 	u16 id = chan->ident;
3851 	int result = 0;
3852 
3853 	if (!id)
3854 		return;
3855 
3856 	BT_DBG("chan %p id %d", chan, id);
3857 
3858 	memset(&data, 0, sizeof(data));
3859 
3860 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3861 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3862 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3863 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3864 
3865 	/* Verify that all channels are ready */
3866 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3867 
3868 	if (result > 0)
3869 		return;
3870 
3871 	if (result < 0)
3872 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3873 
3874 	/* Build response */
3875 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3876 
3877 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3878 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3879 		       &data.pdu);
3880 }
3881 
3882 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3883 {
3884 	struct l2cap_conn_rsp rsp;
3885 	struct l2cap_conn *conn = chan->conn;
3886 	u8 buf[128];
3887 	u8 rsp_code;
3888 
3889 	rsp.scid   = cpu_to_le16(chan->dcid);
3890 	rsp.dcid   = cpu_to_le16(chan->scid);
3891 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3892 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3893 	rsp_code = L2CAP_CONN_RSP;
3894 
3895 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3896 
3897 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3898 
3899 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3900 		return;
3901 
3902 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3903 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3904 	chan->num_conf_req++;
3905 }
3906 
3907 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3908 {
3909 	int type, olen;
3910 	unsigned long val;
3911 	/* Use sane default values in case a misbehaving remote device
3912 	 * did not send an RFC or extended window size option.
3913 	 */
3914 	u16 txwin_ext = chan->ack_win;
3915 	struct l2cap_conf_rfc rfc = {
3916 		.mode = chan->mode,
3917 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3918 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3919 		.max_pdu_size = cpu_to_le16(chan->imtu),
3920 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3921 	};
3922 
3923 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3924 
3925 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3926 		return;
3927 
3928 	while (len >= L2CAP_CONF_OPT_SIZE) {
3929 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3930 		if (len < 0)
3931 			break;
3932 
3933 		switch (type) {
3934 		case L2CAP_CONF_RFC:
3935 			if (olen != sizeof(rfc))
3936 				break;
3937 			memcpy(&rfc, (void *)val, olen);
3938 			break;
3939 		case L2CAP_CONF_EWS:
3940 			if (olen != 2)
3941 				break;
3942 			txwin_ext = val;
3943 			break;
3944 		}
3945 	}
3946 
3947 	switch (rfc.mode) {
3948 	case L2CAP_MODE_ERTM:
3949 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3950 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3951 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3952 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3953 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3954 		else
3955 			chan->ack_win = min_t(u16, chan->ack_win,
3956 					      rfc.txwin_size);
3957 		break;
3958 	case L2CAP_MODE_STREAMING:
3959 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3960 	}
3961 }
3962 
3963 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3964 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3965 				    u8 *data)
3966 {
3967 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3968 
3969 	if (cmd_len < sizeof(*rej))
3970 		return -EPROTO;
3971 
3972 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3973 		return 0;
3974 
3975 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3976 	    cmd->ident == conn->info_ident) {
3977 		cancel_delayed_work(&conn->info_timer);
3978 
3979 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3980 		conn->info_ident = 0;
3981 
3982 		l2cap_conn_start(conn);
3983 	}
3984 
3985 	return 0;
3986 }
3987 
3988 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3989 			  u8 *data, u8 rsp_code)
3990 {
3991 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3992 	struct l2cap_conn_rsp rsp;
3993 	struct l2cap_chan *chan = NULL, *pchan = NULL;
3994 	int result, status = L2CAP_CS_NO_INFO;
3995 
3996 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3997 	__le16 psm = req->psm;
3998 
3999 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4000 
4001 	/* Check if we have socket listening on psm */
4002 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4003 					 &conn->hcon->dst, ACL_LINK);
4004 	if (!pchan) {
4005 		result = L2CAP_CR_BAD_PSM;
4006 		goto response;
4007 	}
4008 
4009 	l2cap_chan_lock(pchan);
4010 
4011 	/* Check if the ACL is secure enough (if not SDP) */
4012 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4013 	    (!hci_conn_check_link_mode(conn->hcon) ||
4014 	    !l2cap_check_enc_key_size(conn->hcon, pchan))) {
4015 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4016 		result = L2CAP_CR_SEC_BLOCK;
4017 		goto response;
4018 	}
4019 
4020 	result = L2CAP_CR_NO_MEM;
4021 
4022 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4023 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4024 		result = L2CAP_CR_INVALID_SCID;
4025 		goto response;
4026 	}
4027 
4028 	/* Check if we already have channel with that dcid */
4029 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4030 		result = L2CAP_CR_SCID_IN_USE;
4031 		goto response;
4032 	}
4033 
4034 	chan = pchan->ops->new_connection(pchan);
4035 	if (!chan)
4036 		goto response;
4037 
4038 	/* For certain devices (ex: HID mouse), support for authentication,
4039 	 * pairing and bonding is optional. For such devices, inorder to avoid
4040 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4041 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4042 	 */
4043 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4044 
4045 	bacpy(&chan->src, &conn->hcon->src);
4046 	bacpy(&chan->dst, &conn->hcon->dst);
4047 	chan->src_type = bdaddr_src_type(conn->hcon);
4048 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4049 	chan->psm  = psm;
4050 	chan->dcid = scid;
4051 
4052 	__l2cap_chan_add(conn, chan);
4053 
4054 	dcid = chan->scid;
4055 
4056 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4057 
4058 	chan->ident = cmd->ident;
4059 
4060 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4061 		if (l2cap_chan_check_security(chan, false)) {
4062 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4063 				l2cap_state_change(chan, BT_CONNECT2);
4064 				result = L2CAP_CR_PEND;
4065 				status = L2CAP_CS_AUTHOR_PEND;
4066 				chan->ops->defer(chan);
4067 			} else {
4068 				l2cap_state_change(chan, BT_CONFIG);
4069 				result = L2CAP_CR_SUCCESS;
4070 				status = L2CAP_CS_NO_INFO;
4071 			}
4072 		} else {
4073 			l2cap_state_change(chan, BT_CONNECT2);
4074 			result = L2CAP_CR_PEND;
4075 			status = L2CAP_CS_AUTHEN_PEND;
4076 		}
4077 	} else {
4078 		l2cap_state_change(chan, BT_CONNECT2);
4079 		result = L2CAP_CR_PEND;
4080 		status = L2CAP_CS_NO_INFO;
4081 	}
4082 
4083 response:
4084 	rsp.scid   = cpu_to_le16(scid);
4085 	rsp.dcid   = cpu_to_le16(dcid);
4086 	rsp.result = cpu_to_le16(result);
4087 	rsp.status = cpu_to_le16(status);
4088 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4089 
4090 	if (!pchan)
4091 		return;
4092 
4093 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4094 		struct l2cap_info_req info;
4095 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4096 
4097 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4098 		conn->info_ident = l2cap_get_ident(conn);
4099 
4100 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4101 
4102 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4103 			       sizeof(info), &info);
4104 	}
4105 
4106 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4107 	    result == L2CAP_CR_SUCCESS) {
4108 		u8 buf[128];
4109 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4110 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4111 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4112 		chan->num_conf_req++;
4113 	}
4114 
4115 	l2cap_chan_unlock(pchan);
4116 	l2cap_chan_put(pchan);
4117 }
4118 
4119 static int l2cap_connect_req(struct l2cap_conn *conn,
4120 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4121 {
4122 	if (cmd_len < sizeof(struct l2cap_conn_req))
4123 		return -EPROTO;
4124 
4125 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4126 	return 0;
4127 }
4128 
4129 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4130 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4131 				    u8 *data)
4132 {
4133 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4134 	u16 scid, dcid, result, status;
4135 	struct l2cap_chan *chan;
4136 	u8 req[128];
4137 	int err;
4138 
4139 	if (cmd_len < sizeof(*rsp))
4140 		return -EPROTO;
4141 
4142 	scid   = __le16_to_cpu(rsp->scid);
4143 	dcid   = __le16_to_cpu(rsp->dcid);
4144 	result = __le16_to_cpu(rsp->result);
4145 	status = __le16_to_cpu(rsp->status);
4146 
4147 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4148 					   dcid > L2CAP_CID_DYN_END))
4149 		return -EPROTO;
4150 
4151 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4152 	       dcid, scid, result, status);
4153 
4154 	if (scid) {
4155 		chan = __l2cap_get_chan_by_scid(conn, scid);
4156 		if (!chan)
4157 			return -EBADSLT;
4158 	} else {
4159 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4160 		if (!chan)
4161 			return -EBADSLT;
4162 	}
4163 
4164 	chan = l2cap_chan_hold_unless_zero(chan);
4165 	if (!chan)
4166 		return -EBADSLT;
4167 
4168 	err = 0;
4169 
4170 	l2cap_chan_lock(chan);
4171 
4172 	switch (result) {
4173 	case L2CAP_CR_SUCCESS:
4174 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4175 			err = -EBADSLT;
4176 			break;
4177 		}
4178 
4179 		l2cap_state_change(chan, BT_CONFIG);
4180 		chan->ident = 0;
4181 		chan->dcid = dcid;
4182 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4183 
4184 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4185 			break;
4186 
4187 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4188 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4189 		chan->num_conf_req++;
4190 		break;
4191 
4192 	case L2CAP_CR_PEND:
4193 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4194 		break;
4195 
4196 	default:
4197 		l2cap_chan_del(chan, ECONNREFUSED);
4198 		break;
4199 	}
4200 
4201 	l2cap_chan_unlock(chan);
4202 	l2cap_chan_put(chan);
4203 
4204 	return err;
4205 }
4206 
4207 static inline void set_default_fcs(struct l2cap_chan *chan)
4208 {
4209 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4210 	 * sides request it.
4211 	 */
4212 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4213 		chan->fcs = L2CAP_FCS_NONE;
4214 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4215 		chan->fcs = L2CAP_FCS_CRC16;
4216 }
4217 
4218 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4219 				    u8 ident, u16 flags)
4220 {
4221 	struct l2cap_conn *conn = chan->conn;
4222 
4223 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4224 	       flags);
4225 
4226 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4227 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4228 
4229 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4230 		       l2cap_build_conf_rsp(chan, data,
4231 					    L2CAP_CONF_SUCCESS, flags), data);
4232 }
4233 
4234 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4235 				   u16 scid, u16 dcid)
4236 {
4237 	struct l2cap_cmd_rej_cid rej;
4238 
4239 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4240 	rej.scid = __cpu_to_le16(scid);
4241 	rej.dcid = __cpu_to_le16(dcid);
4242 
4243 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4244 }
4245 
4246 static inline int l2cap_config_req(struct l2cap_conn *conn,
4247 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4248 				   u8 *data)
4249 {
4250 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4251 	u16 dcid, flags;
4252 	u8 rsp[64];
4253 	struct l2cap_chan *chan;
4254 	int len, err = 0;
4255 
4256 	if (cmd_len < sizeof(*req))
4257 		return -EPROTO;
4258 
4259 	dcid  = __le16_to_cpu(req->dcid);
4260 	flags = __le16_to_cpu(req->flags);
4261 
4262 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4263 
4264 	chan = l2cap_get_chan_by_scid(conn, dcid);
4265 	if (!chan) {
4266 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4267 		return 0;
4268 	}
4269 
4270 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4271 	    chan->state != BT_CONNECTED) {
4272 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4273 				       chan->dcid);
4274 		goto unlock;
4275 	}
4276 
4277 	/* Reject if config buffer is too small. */
4278 	len = cmd_len - sizeof(*req);
4279 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4280 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4281 			       l2cap_build_conf_rsp(chan, rsp,
4282 			       L2CAP_CONF_REJECT, flags), rsp);
4283 		goto unlock;
4284 	}
4285 
4286 	/* Store config. */
4287 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4288 	chan->conf_len += len;
4289 
4290 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4291 		/* Incomplete config. Send empty response. */
4292 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4293 			       l2cap_build_conf_rsp(chan, rsp,
4294 			       L2CAP_CONF_SUCCESS, flags), rsp);
4295 		goto unlock;
4296 	}
4297 
4298 	/* Complete config. */
4299 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4300 	if (len < 0) {
4301 		l2cap_send_disconn_req(chan, ECONNRESET);
4302 		goto unlock;
4303 	}
4304 
4305 	chan->ident = cmd->ident;
4306 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4307 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4308 		chan->num_conf_rsp++;
4309 
4310 	/* Reset config buffer. */
4311 	chan->conf_len = 0;
4312 
4313 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4314 		goto unlock;
4315 
4316 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4317 		set_default_fcs(chan);
4318 
4319 		if (chan->mode == L2CAP_MODE_ERTM ||
4320 		    chan->mode == L2CAP_MODE_STREAMING)
4321 			err = l2cap_ertm_init(chan);
4322 
4323 		if (err < 0)
4324 			l2cap_send_disconn_req(chan, -err);
4325 		else
4326 			l2cap_chan_ready(chan);
4327 
4328 		goto unlock;
4329 	}
4330 
4331 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4332 		u8 buf[64];
4333 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4334 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4335 		chan->num_conf_req++;
4336 	}
4337 
4338 	/* Got Conf Rsp PENDING from remote side and assume we sent
4339 	   Conf Rsp PENDING in the code above */
4340 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4341 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4342 
4343 		/* check compatibility */
4344 
4345 		/* Send rsp for BR/EDR channel */
4346 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4347 	}
4348 
4349 unlock:
4350 	l2cap_chan_unlock(chan);
4351 	l2cap_chan_put(chan);
4352 	return err;
4353 }
4354 
4355 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4356 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4357 				   u8 *data)
4358 {
4359 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4360 	u16 scid, flags, result;
4361 	struct l2cap_chan *chan;
4362 	int len = cmd_len - sizeof(*rsp);
4363 	int err = 0;
4364 
4365 	if (cmd_len < sizeof(*rsp))
4366 		return -EPROTO;
4367 
4368 	scid   = __le16_to_cpu(rsp->scid);
4369 	flags  = __le16_to_cpu(rsp->flags);
4370 	result = __le16_to_cpu(rsp->result);
4371 
4372 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4373 	       result, len);
4374 
4375 	chan = l2cap_get_chan_by_scid(conn, scid);
4376 	if (!chan)
4377 		return 0;
4378 
4379 	switch (result) {
4380 	case L2CAP_CONF_SUCCESS:
4381 		l2cap_conf_rfc_get(chan, rsp->data, len);
4382 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4383 		break;
4384 
4385 	case L2CAP_CONF_PENDING:
4386 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4387 
4388 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4389 			char buf[64];
4390 
4391 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4392 						   buf, sizeof(buf), &result);
4393 			if (len < 0) {
4394 				l2cap_send_disconn_req(chan, ECONNRESET);
4395 				goto done;
4396 			}
4397 
4398 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4399 		}
4400 		goto done;
4401 
4402 	case L2CAP_CONF_UNKNOWN:
4403 	case L2CAP_CONF_UNACCEPT:
4404 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4405 			char req[64];
4406 
4407 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4408 				l2cap_send_disconn_req(chan, ECONNRESET);
4409 				goto done;
4410 			}
4411 
4412 			/* throw out any old stored conf requests */
4413 			result = L2CAP_CONF_SUCCESS;
4414 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4415 						   req, sizeof(req), &result);
4416 			if (len < 0) {
4417 				l2cap_send_disconn_req(chan, ECONNRESET);
4418 				goto done;
4419 			}
4420 
4421 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4422 				       L2CAP_CONF_REQ, len, req);
4423 			chan->num_conf_req++;
4424 			if (result != L2CAP_CONF_SUCCESS)
4425 				goto done;
4426 			break;
4427 		}
4428 		fallthrough;
4429 
4430 	default:
4431 		l2cap_chan_set_err(chan, ECONNRESET);
4432 
4433 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4434 		l2cap_send_disconn_req(chan, ECONNRESET);
4435 		goto done;
4436 	}
4437 
4438 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4439 		goto done;
4440 
4441 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4442 
4443 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4444 		set_default_fcs(chan);
4445 
4446 		if (chan->mode == L2CAP_MODE_ERTM ||
4447 		    chan->mode == L2CAP_MODE_STREAMING)
4448 			err = l2cap_ertm_init(chan);
4449 
4450 		if (err < 0)
4451 			l2cap_send_disconn_req(chan, -err);
4452 		else
4453 			l2cap_chan_ready(chan);
4454 	}
4455 
4456 done:
4457 	l2cap_chan_unlock(chan);
4458 	l2cap_chan_put(chan);
4459 	return err;
4460 }
4461 
4462 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4463 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4464 				       u8 *data)
4465 {
4466 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4467 	struct l2cap_disconn_rsp rsp;
4468 	u16 dcid, scid;
4469 	struct l2cap_chan *chan;
4470 
4471 	if (cmd_len != sizeof(*req))
4472 		return -EPROTO;
4473 
4474 	scid = __le16_to_cpu(req->scid);
4475 	dcid = __le16_to_cpu(req->dcid);
4476 
4477 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4478 
4479 	chan = l2cap_get_chan_by_scid(conn, dcid);
4480 	if (!chan) {
4481 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4482 		return 0;
4483 	}
4484 
4485 	rsp.dcid = cpu_to_le16(chan->scid);
4486 	rsp.scid = cpu_to_le16(chan->dcid);
4487 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4488 
4489 	chan->ops->set_shutdown(chan);
4490 
4491 	l2cap_chan_del(chan, ECONNRESET);
4492 
4493 	chan->ops->close(chan);
4494 
4495 	l2cap_chan_unlock(chan);
4496 	l2cap_chan_put(chan);
4497 
4498 	return 0;
4499 }
4500 
4501 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4502 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4503 				       u8 *data)
4504 {
4505 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4506 	u16 dcid, scid;
4507 	struct l2cap_chan *chan;
4508 
4509 	if (cmd_len != sizeof(*rsp))
4510 		return -EPROTO;
4511 
4512 	scid = __le16_to_cpu(rsp->scid);
4513 	dcid = __le16_to_cpu(rsp->dcid);
4514 
4515 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4516 
4517 	chan = l2cap_get_chan_by_scid(conn, scid);
4518 	if (!chan) {
4519 		return 0;
4520 	}
4521 
4522 	if (chan->state != BT_DISCONN) {
4523 		l2cap_chan_unlock(chan);
4524 		l2cap_chan_put(chan);
4525 		return 0;
4526 	}
4527 
4528 	l2cap_chan_del(chan, 0);
4529 
4530 	chan->ops->close(chan);
4531 
4532 	l2cap_chan_unlock(chan);
4533 	l2cap_chan_put(chan);
4534 
4535 	return 0;
4536 }
4537 
4538 static inline int l2cap_information_req(struct l2cap_conn *conn,
4539 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4540 					u8 *data)
4541 {
4542 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4543 	u16 type;
4544 
4545 	if (cmd_len != sizeof(*req))
4546 		return -EPROTO;
4547 
4548 	type = __le16_to_cpu(req->type);
4549 
4550 	BT_DBG("type 0x%4.4x", type);
4551 
4552 	if (type == L2CAP_IT_FEAT_MASK) {
4553 		u8 buf[8];
4554 		u32 feat_mask = l2cap_feat_mask;
4555 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4556 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4557 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4558 		if (!disable_ertm)
4559 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4560 				| L2CAP_FEAT_FCS;
4561 
4562 		put_unaligned_le32(feat_mask, rsp->data);
4563 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4564 			       buf);
4565 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4566 		u8 buf[12];
4567 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4568 
4569 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4570 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4571 		rsp->data[0] = conn->local_fixed_chan;
4572 		memset(rsp->data + 1, 0, 7);
4573 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4574 			       buf);
4575 	} else {
4576 		struct l2cap_info_rsp rsp;
4577 		rsp.type   = cpu_to_le16(type);
4578 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4579 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4580 			       &rsp);
4581 	}
4582 
4583 	return 0;
4584 }
4585 
4586 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4587 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4588 					u8 *data)
4589 {
4590 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4591 	u16 type, result;
4592 
4593 	if (cmd_len < sizeof(*rsp))
4594 		return -EPROTO;
4595 
4596 	type   = __le16_to_cpu(rsp->type);
4597 	result = __le16_to_cpu(rsp->result);
4598 
4599 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4600 
4601 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4602 	if (cmd->ident != conn->info_ident ||
4603 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4604 		return 0;
4605 
4606 	cancel_delayed_work(&conn->info_timer);
4607 
4608 	if (result != L2CAP_IR_SUCCESS) {
4609 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4610 		conn->info_ident = 0;
4611 
4612 		l2cap_conn_start(conn);
4613 
4614 		return 0;
4615 	}
4616 
4617 	switch (type) {
4618 	case L2CAP_IT_FEAT_MASK:
4619 		conn->feat_mask = get_unaligned_le32(rsp->data);
4620 
4621 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4622 			struct l2cap_info_req req;
4623 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4624 
4625 			conn->info_ident = l2cap_get_ident(conn);
4626 
4627 			l2cap_send_cmd(conn, conn->info_ident,
4628 				       L2CAP_INFO_REQ, sizeof(req), &req);
4629 		} else {
4630 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4631 			conn->info_ident = 0;
4632 
4633 			l2cap_conn_start(conn);
4634 		}
4635 		break;
4636 
4637 	case L2CAP_IT_FIXED_CHAN:
4638 		conn->remote_fixed_chan = rsp->data[0];
4639 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4640 		conn->info_ident = 0;
4641 
4642 		l2cap_conn_start(conn);
4643 		break;
4644 	}
4645 
4646 	return 0;
4647 }
4648 
4649 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4650 					      struct l2cap_cmd_hdr *cmd,
4651 					      u16 cmd_len, u8 *data)
4652 {
4653 	struct hci_conn *hcon = conn->hcon;
4654 	struct l2cap_conn_param_update_req *req;
4655 	struct l2cap_conn_param_update_rsp rsp;
4656 	u16 min, max, latency, to_multiplier;
4657 	int err;
4658 
4659 	if (hcon->role != HCI_ROLE_MASTER)
4660 		return -EINVAL;
4661 
4662 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4663 		return -EPROTO;
4664 
4665 	req = (struct l2cap_conn_param_update_req *) data;
4666 	min		= __le16_to_cpu(req->min);
4667 	max		= __le16_to_cpu(req->max);
4668 	latency		= __le16_to_cpu(req->latency);
4669 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4670 
4671 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4672 	       min, max, latency, to_multiplier);
4673 
4674 	memset(&rsp, 0, sizeof(rsp));
4675 
4676 	err = hci_check_conn_params(min, max, latency, to_multiplier);
4677 	if (err)
4678 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4679 	else
4680 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4681 
4682 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4683 		       sizeof(rsp), &rsp);
4684 
4685 	if (!err) {
4686 		u8 store_hint;
4687 
4688 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4689 						to_multiplier);
4690 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4691 				    store_hint, min, max, latency,
4692 				    to_multiplier);
4693 
4694 	}
4695 
4696 	return 0;
4697 }
4698 
4699 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4700 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4701 				u8 *data)
4702 {
4703 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4704 	struct hci_conn *hcon = conn->hcon;
4705 	u16 dcid, mtu, mps, credits, result;
4706 	struct l2cap_chan *chan;
4707 	int err, sec_level;
4708 
4709 	if (cmd_len < sizeof(*rsp))
4710 		return -EPROTO;
4711 
4712 	dcid    = __le16_to_cpu(rsp->dcid);
4713 	mtu     = __le16_to_cpu(rsp->mtu);
4714 	mps     = __le16_to_cpu(rsp->mps);
4715 	credits = __le16_to_cpu(rsp->credits);
4716 	result  = __le16_to_cpu(rsp->result);
4717 
4718 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4719 					   dcid < L2CAP_CID_DYN_START ||
4720 					   dcid > L2CAP_CID_LE_DYN_END))
4721 		return -EPROTO;
4722 
4723 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4724 	       dcid, mtu, mps, credits, result);
4725 
4726 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4727 	if (!chan)
4728 		return -EBADSLT;
4729 
4730 	err = 0;
4731 
4732 	l2cap_chan_lock(chan);
4733 
4734 	switch (result) {
4735 	case L2CAP_CR_LE_SUCCESS:
4736 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4737 			err = -EBADSLT;
4738 			break;
4739 		}
4740 
4741 		chan->ident = 0;
4742 		chan->dcid = dcid;
4743 		chan->omtu = mtu;
4744 		chan->remote_mps = mps;
4745 		chan->tx_credits = credits;
4746 		l2cap_chan_ready(chan);
4747 		break;
4748 
4749 	case L2CAP_CR_LE_AUTHENTICATION:
4750 	case L2CAP_CR_LE_ENCRYPTION:
4751 		/* If we already have MITM protection we can't do
4752 		 * anything.
4753 		 */
4754 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4755 			l2cap_chan_del(chan, ECONNREFUSED);
4756 			break;
4757 		}
4758 
4759 		sec_level = hcon->sec_level + 1;
4760 		if (chan->sec_level < sec_level)
4761 			chan->sec_level = sec_level;
4762 
4763 		/* We'll need to send a new Connect Request */
4764 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4765 
4766 		smp_conn_security(hcon, chan->sec_level);
4767 		break;
4768 
4769 	default:
4770 		l2cap_chan_del(chan, ECONNREFUSED);
4771 		break;
4772 	}
4773 
4774 	l2cap_chan_unlock(chan);
4775 
4776 	return err;
4777 }
4778 
4779 static void l2cap_put_ident(struct l2cap_conn *conn, u8 code, u8 id)
4780 {
4781 	switch (code) {
4782 	case L2CAP_COMMAND_REJ:
4783 	case L2CAP_CONN_RSP:
4784 	case L2CAP_CONF_RSP:
4785 	case L2CAP_DISCONN_RSP:
4786 	case L2CAP_ECHO_RSP:
4787 	case L2CAP_INFO_RSP:
4788 	case L2CAP_CONN_PARAM_UPDATE_RSP:
4789 	case L2CAP_ECRED_CONN_RSP:
4790 	case L2CAP_ECRED_RECONF_RSP:
4791 		/* First do a lookup since the remote may send bogus ids that
4792 		 * would make ida_free to generate warnings.
4793 		 */
4794 		if (ida_find_first_range(&conn->tx_ida, id, id) >= 0)
4795 			ida_free(&conn->tx_ida, id);
4796 	}
4797 }
4798 
4799 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4800 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4801 				      u8 *data)
4802 {
4803 	int err = 0;
4804 
4805 	l2cap_put_ident(conn, cmd->code, cmd->ident);
4806 
4807 	switch (cmd->code) {
4808 	case L2CAP_COMMAND_REJ:
4809 		l2cap_command_rej(conn, cmd, cmd_len, data);
4810 		break;
4811 
4812 	case L2CAP_CONN_REQ:
4813 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4814 		break;
4815 
4816 	case L2CAP_CONN_RSP:
4817 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4818 		break;
4819 
4820 	case L2CAP_CONF_REQ:
4821 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4822 		break;
4823 
4824 	case L2CAP_CONF_RSP:
4825 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4826 		break;
4827 
4828 	case L2CAP_DISCONN_REQ:
4829 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4830 		break;
4831 
4832 	case L2CAP_DISCONN_RSP:
4833 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4834 		break;
4835 
4836 	case L2CAP_ECHO_REQ:
4837 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4838 		break;
4839 
4840 	case L2CAP_ECHO_RSP:
4841 		break;
4842 
4843 	case L2CAP_INFO_REQ:
4844 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4845 		break;
4846 
4847 	case L2CAP_INFO_RSP:
4848 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4849 		break;
4850 
4851 	default:
4852 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4853 		err = -EINVAL;
4854 		break;
4855 	}
4856 
4857 	return err;
4858 }
4859 
4860 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4861 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4862 				u8 *data)
4863 {
4864 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4865 	struct l2cap_le_conn_rsp rsp;
4866 	struct l2cap_chan *chan, *pchan;
4867 	u16 dcid, scid, credits, mtu, mps;
4868 	__le16 psm;
4869 	u8 result;
4870 
4871 	if (cmd_len != sizeof(*req))
4872 		return -EPROTO;
4873 
4874 	scid = __le16_to_cpu(req->scid);
4875 	mtu  = __le16_to_cpu(req->mtu);
4876 	mps  = __le16_to_cpu(req->mps);
4877 	psm  = req->psm;
4878 	dcid = 0;
4879 	credits = 0;
4880 
4881 	if (mtu < 23 || mps < 23)
4882 		return -EPROTO;
4883 
4884 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4885 	       scid, mtu, mps);
4886 
4887 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4888 	 * page 1059:
4889 	 *
4890 	 * Valid range: 0x0001-0x00ff
4891 	 *
4892 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4893 	 */
4894 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4895 		result = L2CAP_CR_LE_BAD_PSM;
4896 		chan = NULL;
4897 		goto response;
4898 	}
4899 
4900 	/* Check if we have socket listening on psm */
4901 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4902 					 &conn->hcon->dst, LE_LINK);
4903 	if (!pchan) {
4904 		result = L2CAP_CR_LE_BAD_PSM;
4905 		chan = NULL;
4906 		goto response;
4907 	}
4908 
4909 	l2cap_chan_lock(pchan);
4910 
4911 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4912 				     SMP_ALLOW_STK)) {
4913 		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4914 			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4915 		chan = NULL;
4916 		goto response_unlock;
4917 	}
4918 
4919 	/* Check for valid dynamic CID range */
4920 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4921 		result = L2CAP_CR_LE_INVALID_SCID;
4922 		chan = NULL;
4923 		goto response_unlock;
4924 	}
4925 
4926 	/* Check if we already have channel with that dcid */
4927 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4928 		result = L2CAP_CR_LE_SCID_IN_USE;
4929 		chan = NULL;
4930 		goto response_unlock;
4931 	}
4932 
4933 	chan = pchan->ops->new_connection(pchan);
4934 	if (!chan) {
4935 		result = L2CAP_CR_LE_NO_MEM;
4936 		goto response_unlock;
4937 	}
4938 
4939 	bacpy(&chan->src, &conn->hcon->src);
4940 	bacpy(&chan->dst, &conn->hcon->dst);
4941 	chan->src_type = bdaddr_src_type(conn->hcon);
4942 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4943 	chan->psm  = psm;
4944 	chan->dcid = scid;
4945 	chan->omtu = mtu;
4946 	chan->remote_mps = mps;
4947 
4948 	__l2cap_chan_add(conn, chan);
4949 
4950 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4951 
4952 	dcid = chan->scid;
4953 	credits = chan->rx_credits;
4954 
4955 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4956 
4957 	chan->ident = cmd->ident;
4958 
4959 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4960 		l2cap_state_change(chan, BT_CONNECT2);
4961 		/* The following result value is actually not defined
4962 		 * for LE CoC but we use it to let the function know
4963 		 * that it should bail out after doing its cleanup
4964 		 * instead of sending a response.
4965 		 */
4966 		result = L2CAP_CR_PEND;
4967 		chan->ops->defer(chan);
4968 	} else {
4969 		l2cap_chan_ready(chan);
4970 		result = L2CAP_CR_LE_SUCCESS;
4971 	}
4972 
4973 response_unlock:
4974 	l2cap_chan_unlock(pchan);
4975 	l2cap_chan_put(pchan);
4976 
4977 	if (result == L2CAP_CR_PEND)
4978 		return 0;
4979 
4980 response:
4981 	if (chan) {
4982 		rsp.mtu = cpu_to_le16(chan->imtu);
4983 		rsp.mps = cpu_to_le16(chan->mps);
4984 	} else {
4985 		rsp.mtu = 0;
4986 		rsp.mps = 0;
4987 	}
4988 
4989 	rsp.dcid    = cpu_to_le16(dcid);
4990 	rsp.credits = cpu_to_le16(credits);
4991 	rsp.result  = cpu_to_le16(result);
4992 
4993 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4994 
4995 	return 0;
4996 }
4997 
4998 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4999 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5000 				   u8 *data)
5001 {
5002 	struct l2cap_le_credits *pkt;
5003 	struct l2cap_chan *chan;
5004 	u16 cid, credits, max_credits;
5005 
5006 	if (cmd_len != sizeof(*pkt))
5007 		return -EPROTO;
5008 
5009 	pkt = (struct l2cap_le_credits *) data;
5010 	cid	= __le16_to_cpu(pkt->cid);
5011 	credits	= __le16_to_cpu(pkt->credits);
5012 
5013 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5014 
5015 	chan = l2cap_get_chan_by_dcid(conn, cid);
5016 	if (!chan)
5017 		return -EBADSLT;
5018 
5019 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5020 	if (credits > max_credits) {
5021 		BT_ERR("LE credits overflow");
5022 		l2cap_send_disconn_req(chan, ECONNRESET);
5023 
5024 		/* Return 0 so that we don't trigger an unnecessary
5025 		 * command reject packet.
5026 		 */
5027 		goto unlock;
5028 	}
5029 
5030 	chan->tx_credits += credits;
5031 
5032 	/* Resume sending */
5033 	l2cap_le_flowctl_send(chan);
5034 
5035 	if (chan->tx_credits)
5036 		chan->ops->resume(chan);
5037 
5038 unlock:
5039 	l2cap_chan_unlock(chan);
5040 	l2cap_chan_put(chan);
5041 
5042 	return 0;
5043 }
5044 
5045 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5046 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5047 				       u8 *data)
5048 {
5049 	struct l2cap_ecred_conn_req *req = (void *) data;
5050 	DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5051 	struct l2cap_chan *chan, *pchan;
5052 	u16 mtu, mps;
5053 	__le16 psm;
5054 	u8 result, len = 0;
5055 	int i, num_scid;
5056 	bool defer = false;
5057 
5058 	if (!enable_ecred)
5059 		return -EINVAL;
5060 
5061 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5062 		result = L2CAP_CR_LE_INVALID_PARAMS;
5063 		goto response;
5064 	}
5065 
5066 	cmd_len -= sizeof(*req);
5067 	num_scid = cmd_len / sizeof(u16);
5068 
5069 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5070 		result = L2CAP_CR_LE_INVALID_PARAMS;
5071 		goto response;
5072 	}
5073 
5074 	mtu  = __le16_to_cpu(req->mtu);
5075 	mps  = __le16_to_cpu(req->mps);
5076 
5077 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5078 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5079 		goto response;
5080 	}
5081 
5082 	psm  = req->psm;
5083 
5084 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5085 	 * page 1059:
5086 	 *
5087 	 * Valid range: 0x0001-0x00ff
5088 	 *
5089 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5090 	 */
5091 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5092 		result = L2CAP_CR_LE_BAD_PSM;
5093 		goto response;
5094 	}
5095 
5096 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5097 
5098 	memset(pdu, 0, sizeof(*pdu));
5099 
5100 	/* Check if we have socket listening on psm */
5101 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5102 					 &conn->hcon->dst, LE_LINK);
5103 	if (!pchan) {
5104 		result = L2CAP_CR_LE_BAD_PSM;
5105 		goto response;
5106 	}
5107 
5108 	l2cap_chan_lock(pchan);
5109 
5110 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5111 				     SMP_ALLOW_STK)) {
5112 		result = L2CAP_CR_LE_AUTHENTICATION;
5113 		goto unlock;
5114 	}
5115 
5116 	result = L2CAP_CR_LE_SUCCESS;
5117 
5118 	for (i = 0; i < num_scid; i++) {
5119 		u16 scid = __le16_to_cpu(req->scid[i]);
5120 
5121 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5122 
5123 		pdu->dcid[i] = 0x0000;
5124 		len += sizeof(*pdu->dcid);
5125 
5126 		/* Check for valid dynamic CID range */
5127 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5128 			result = L2CAP_CR_LE_INVALID_SCID;
5129 			continue;
5130 		}
5131 
5132 		/* Check if we already have channel with that dcid */
5133 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5134 			result = L2CAP_CR_LE_SCID_IN_USE;
5135 			continue;
5136 		}
5137 
5138 		chan = pchan->ops->new_connection(pchan);
5139 		if (!chan) {
5140 			result = L2CAP_CR_LE_NO_MEM;
5141 			continue;
5142 		}
5143 
5144 		bacpy(&chan->src, &conn->hcon->src);
5145 		bacpy(&chan->dst, &conn->hcon->dst);
5146 		chan->src_type = bdaddr_src_type(conn->hcon);
5147 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5148 		chan->psm  = psm;
5149 		chan->dcid = scid;
5150 		chan->omtu = mtu;
5151 		chan->remote_mps = mps;
5152 
5153 		__l2cap_chan_add(conn, chan);
5154 
5155 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5156 
5157 		/* Init response */
5158 		if (!pdu->credits) {
5159 			pdu->mtu = cpu_to_le16(chan->imtu);
5160 			pdu->mps = cpu_to_le16(chan->mps);
5161 			pdu->credits = cpu_to_le16(chan->rx_credits);
5162 		}
5163 
5164 		pdu->dcid[i] = cpu_to_le16(chan->scid);
5165 
5166 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5167 
5168 		chan->ident = cmd->ident;
5169 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5170 
5171 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5172 			l2cap_state_change(chan, BT_CONNECT2);
5173 			defer = true;
5174 			chan->ops->defer(chan);
5175 		} else {
5176 			l2cap_chan_ready(chan);
5177 		}
5178 	}
5179 
5180 unlock:
5181 	l2cap_chan_unlock(pchan);
5182 	l2cap_chan_put(pchan);
5183 
5184 response:
5185 	pdu->result = cpu_to_le16(result);
5186 
5187 	if (defer)
5188 		return 0;
5189 
5190 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5191 		       sizeof(*pdu) + len, pdu);
5192 
5193 	return 0;
5194 }
5195 
5196 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5197 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5198 				       u8 *data)
5199 {
5200 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5201 	struct hci_conn *hcon = conn->hcon;
5202 	u16 mtu, mps, credits, result;
5203 	struct l2cap_chan *chan, *tmp;
5204 	int err = 0, sec_level;
5205 	int i = 0;
5206 
5207 	if (cmd_len < sizeof(*rsp))
5208 		return -EPROTO;
5209 
5210 	mtu     = __le16_to_cpu(rsp->mtu);
5211 	mps     = __le16_to_cpu(rsp->mps);
5212 	credits = __le16_to_cpu(rsp->credits);
5213 	result  = __le16_to_cpu(rsp->result);
5214 
5215 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5216 	       result);
5217 
5218 	cmd_len -= sizeof(*rsp);
5219 
5220 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5221 		u16 dcid;
5222 
5223 		if (chan->ident != cmd->ident ||
5224 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5225 		    chan->state == BT_CONNECTED)
5226 			continue;
5227 
5228 		l2cap_chan_lock(chan);
5229 
5230 		/* Check that there is a dcid for each pending channel */
5231 		if (cmd_len < sizeof(dcid)) {
5232 			l2cap_chan_del(chan, ECONNREFUSED);
5233 			l2cap_chan_unlock(chan);
5234 			continue;
5235 		}
5236 
5237 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5238 		cmd_len -= sizeof(u16);
5239 
5240 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5241 
5242 		/* Check if dcid is already in use */
5243 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5244 			/* If a device receives a
5245 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5246 			 * already-assigned Destination CID, then both the
5247 			 * original channel and the new channel shall be
5248 			 * immediately discarded and not used.
5249 			 */
5250 			l2cap_chan_del(chan, ECONNREFUSED);
5251 			l2cap_chan_unlock(chan);
5252 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5253 			l2cap_chan_lock(chan);
5254 			l2cap_chan_del(chan, ECONNRESET);
5255 			l2cap_chan_unlock(chan);
5256 			continue;
5257 		}
5258 
5259 		switch (result) {
5260 		case L2CAP_CR_LE_AUTHENTICATION:
5261 		case L2CAP_CR_LE_ENCRYPTION:
5262 			/* If we already have MITM protection we can't do
5263 			 * anything.
5264 			 */
5265 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5266 				l2cap_chan_del(chan, ECONNREFUSED);
5267 				break;
5268 			}
5269 
5270 			sec_level = hcon->sec_level + 1;
5271 			if (chan->sec_level < sec_level)
5272 				chan->sec_level = sec_level;
5273 
5274 			/* We'll need to send a new Connect Request */
5275 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5276 
5277 			smp_conn_security(hcon, chan->sec_level);
5278 			break;
5279 
5280 		case L2CAP_CR_LE_BAD_PSM:
5281 			l2cap_chan_del(chan, ECONNREFUSED);
5282 			break;
5283 
5284 		default:
5285 			/* If dcid was not set it means channels was refused */
5286 			if (!dcid) {
5287 				l2cap_chan_del(chan, ECONNREFUSED);
5288 				break;
5289 			}
5290 
5291 			chan->ident = 0;
5292 			chan->dcid = dcid;
5293 			chan->omtu = mtu;
5294 			chan->remote_mps = mps;
5295 			chan->tx_credits = credits;
5296 			l2cap_chan_ready(chan);
5297 			break;
5298 		}
5299 
5300 		l2cap_chan_unlock(chan);
5301 	}
5302 
5303 	return err;
5304 }
5305 
5306 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5307 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5308 					 u8 *data)
5309 {
5310 	struct l2cap_ecred_reconf_req *req = (void *) data;
5311 	struct l2cap_ecred_reconf_rsp rsp;
5312 	u16 mtu, mps, result;
5313 	struct l2cap_chan *chan;
5314 	int i, num_scid;
5315 
5316 	if (!enable_ecred)
5317 		return -EINVAL;
5318 
5319 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5320 		result = L2CAP_CR_LE_INVALID_PARAMS;
5321 		goto respond;
5322 	}
5323 
5324 	mtu = __le16_to_cpu(req->mtu);
5325 	mps = __le16_to_cpu(req->mps);
5326 
5327 	BT_DBG("mtu %u mps %u", mtu, mps);
5328 
5329 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5330 		result = L2CAP_RECONF_INVALID_MTU;
5331 		goto respond;
5332 	}
5333 
5334 	if (mps < L2CAP_ECRED_MIN_MPS) {
5335 		result = L2CAP_RECONF_INVALID_MPS;
5336 		goto respond;
5337 	}
5338 
5339 	cmd_len -= sizeof(*req);
5340 	num_scid = cmd_len / sizeof(u16);
5341 	result = L2CAP_RECONF_SUCCESS;
5342 
5343 	for (i = 0; i < num_scid; i++) {
5344 		u16 scid;
5345 
5346 		scid = __le16_to_cpu(req->scid[i]);
5347 		if (!scid)
5348 			return -EPROTO;
5349 
5350 		chan = __l2cap_get_chan_by_dcid(conn, scid);
5351 		if (!chan)
5352 			continue;
5353 
5354 		/* If the MTU value is decreased for any of the included
5355 		 * channels, then the receiver shall disconnect all
5356 		 * included channels.
5357 		 */
5358 		if (chan->omtu > mtu) {
5359 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
5360 			       chan->omtu, mtu);
5361 			result = L2CAP_RECONF_INVALID_MTU;
5362 		}
5363 
5364 		chan->omtu = mtu;
5365 		chan->remote_mps = mps;
5366 	}
5367 
5368 respond:
5369 	rsp.result = cpu_to_le16(result);
5370 
5371 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5372 		       &rsp);
5373 
5374 	return 0;
5375 }
5376 
5377 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5378 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5379 					 u8 *data)
5380 {
5381 	struct l2cap_chan *chan, *tmp;
5382 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5383 	u16 result;
5384 
5385 	if (cmd_len < sizeof(*rsp))
5386 		return -EPROTO;
5387 
5388 	result = __le16_to_cpu(rsp->result);
5389 
5390 	BT_DBG("result 0x%4.4x", rsp->result);
5391 
5392 	if (!result)
5393 		return 0;
5394 
5395 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5396 		if (chan->ident != cmd->ident)
5397 			continue;
5398 
5399 		l2cap_chan_del(chan, ECONNRESET);
5400 	}
5401 
5402 	return 0;
5403 }
5404 
5405 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5406 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5407 				       u8 *data)
5408 {
5409 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5410 	struct l2cap_chan *chan;
5411 
5412 	if (cmd_len < sizeof(*rej))
5413 		return -EPROTO;
5414 
5415 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5416 	if (!chan)
5417 		goto done;
5418 
5419 	chan = l2cap_chan_hold_unless_zero(chan);
5420 	if (!chan)
5421 		goto done;
5422 
5423 	l2cap_chan_lock(chan);
5424 	l2cap_chan_del(chan, ECONNREFUSED);
5425 	l2cap_chan_unlock(chan);
5426 	l2cap_chan_put(chan);
5427 
5428 done:
5429 	return 0;
5430 }
5431 
5432 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5433 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5434 				   u8 *data)
5435 {
5436 	int err = 0;
5437 
5438 	l2cap_put_ident(conn, cmd->code, cmd->ident);
5439 
5440 	switch (cmd->code) {
5441 	case L2CAP_COMMAND_REJ:
5442 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5443 		break;
5444 
5445 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5446 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5447 		break;
5448 
5449 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5450 		break;
5451 
5452 	case L2CAP_LE_CONN_RSP:
5453 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5454 		break;
5455 
5456 	case L2CAP_LE_CONN_REQ:
5457 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5458 		break;
5459 
5460 	case L2CAP_LE_CREDITS:
5461 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5462 		break;
5463 
5464 	case L2CAP_ECRED_CONN_REQ:
5465 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5466 		break;
5467 
5468 	case L2CAP_ECRED_CONN_RSP:
5469 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5470 		break;
5471 
5472 	case L2CAP_ECRED_RECONF_REQ:
5473 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5474 		break;
5475 
5476 	case L2CAP_ECRED_RECONF_RSP:
5477 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5478 		break;
5479 
5480 	case L2CAP_DISCONN_REQ:
5481 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5482 		break;
5483 
5484 	case L2CAP_DISCONN_RSP:
5485 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5486 		break;
5487 
5488 	default:
5489 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5490 		err = -EINVAL;
5491 		break;
5492 	}
5493 
5494 	return err;
5495 }
5496 
5497 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5498 					struct sk_buff *skb)
5499 {
5500 	struct hci_conn *hcon = conn->hcon;
5501 	struct l2cap_cmd_hdr *cmd;
5502 	u16 len;
5503 	int err;
5504 
5505 	if (hcon->type != LE_LINK)
5506 		goto drop;
5507 
5508 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5509 		goto drop;
5510 
5511 	cmd = (void *) skb->data;
5512 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5513 
5514 	len = le16_to_cpu(cmd->len);
5515 
5516 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5517 
5518 	if (len != skb->len || !cmd->ident) {
5519 		BT_DBG("corrupted command");
5520 		goto drop;
5521 	}
5522 
5523 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5524 	if (err) {
5525 		struct l2cap_cmd_rej_unk rej;
5526 
5527 		BT_ERR("Wrong link type (%d)", err);
5528 
5529 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5530 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5531 			       sizeof(rej), &rej);
5532 	}
5533 
5534 drop:
5535 	kfree_skb(skb);
5536 }
5537 
5538 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5539 {
5540 	struct l2cap_cmd_rej_unk rej;
5541 
5542 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5543 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5544 }
5545 
5546 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5547 				     struct sk_buff *skb)
5548 {
5549 	struct hci_conn *hcon = conn->hcon;
5550 	struct l2cap_cmd_hdr *cmd;
5551 	int err;
5552 
5553 	l2cap_raw_recv(conn, skb);
5554 
5555 	if (hcon->type != ACL_LINK)
5556 		goto drop;
5557 
5558 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5559 		u16 len;
5560 
5561 		cmd = (void *) skb->data;
5562 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5563 
5564 		len = le16_to_cpu(cmd->len);
5565 
5566 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5567 		       cmd->ident);
5568 
5569 		if (len > skb->len || !cmd->ident) {
5570 			BT_DBG("corrupted command");
5571 			l2cap_sig_send_rej(conn, cmd->ident);
5572 			skb_pull(skb, len > skb->len ? skb->len : len);
5573 			continue;
5574 		}
5575 
5576 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5577 		if (err) {
5578 			BT_ERR("Wrong link type (%d)", err);
5579 			l2cap_sig_send_rej(conn, cmd->ident);
5580 		}
5581 
5582 		skb_pull(skb, len);
5583 	}
5584 
5585 	if (skb->len > 0) {
5586 		BT_DBG("corrupted command");
5587 		l2cap_sig_send_rej(conn, 0);
5588 	}
5589 
5590 drop:
5591 	kfree_skb(skb);
5592 }
5593 
5594 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5595 {
5596 	u16 our_fcs, rcv_fcs;
5597 	int hdr_size;
5598 
5599 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5600 		hdr_size = L2CAP_EXT_HDR_SIZE;
5601 	else
5602 		hdr_size = L2CAP_ENH_HDR_SIZE;
5603 
5604 	if (chan->fcs == L2CAP_FCS_CRC16) {
5605 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5606 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5607 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5608 
5609 		if (our_fcs != rcv_fcs)
5610 			return -EBADMSG;
5611 	}
5612 	return 0;
5613 }
5614 
5615 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5616 {
5617 	struct l2cap_ctrl control;
5618 
5619 	BT_DBG("chan %p", chan);
5620 
5621 	memset(&control, 0, sizeof(control));
5622 	control.sframe = 1;
5623 	control.final = 1;
5624 	control.reqseq = chan->buffer_seq;
5625 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5626 
5627 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5628 		control.super = L2CAP_SUPER_RNR;
5629 		l2cap_send_sframe(chan, &control);
5630 	}
5631 
5632 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5633 	    chan->unacked_frames > 0)
5634 		__set_retrans_timer(chan);
5635 
5636 	/* Send pending iframes */
5637 	l2cap_ertm_send(chan);
5638 
5639 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5640 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5641 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5642 		 * send it now.
5643 		 */
5644 		control.super = L2CAP_SUPER_RR;
5645 		l2cap_send_sframe(chan, &control);
5646 	}
5647 }
5648 
5649 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5650 			    struct sk_buff **last_frag)
5651 {
5652 	/* skb->len reflects data in skb as well as all fragments
5653 	 * skb->data_len reflects only data in fragments
5654 	 */
5655 	if (!skb_has_frag_list(skb))
5656 		skb_shinfo(skb)->frag_list = new_frag;
5657 
5658 	new_frag->next = NULL;
5659 
5660 	(*last_frag)->next = new_frag;
5661 	*last_frag = new_frag;
5662 
5663 	skb->len += new_frag->len;
5664 	skb->data_len += new_frag->len;
5665 	skb->truesize += new_frag->truesize;
5666 }
5667 
5668 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5669 				struct l2cap_ctrl *control)
5670 {
5671 	int err = -EINVAL;
5672 
5673 	switch (control->sar) {
5674 	case L2CAP_SAR_UNSEGMENTED:
5675 		if (chan->sdu)
5676 			break;
5677 
5678 		err = chan->ops->recv(chan, skb);
5679 		break;
5680 
5681 	case L2CAP_SAR_START:
5682 		if (chan->sdu)
5683 			break;
5684 
5685 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5686 			break;
5687 
5688 		chan->sdu_len = get_unaligned_le16(skb->data);
5689 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5690 
5691 		if (chan->sdu_len > chan->imtu) {
5692 			err = -EMSGSIZE;
5693 			break;
5694 		}
5695 
5696 		if (skb->len >= chan->sdu_len)
5697 			break;
5698 
5699 		chan->sdu = skb;
5700 		chan->sdu_last_frag = skb;
5701 
5702 		skb = NULL;
5703 		err = 0;
5704 		break;
5705 
5706 	case L2CAP_SAR_CONTINUE:
5707 		if (!chan->sdu)
5708 			break;
5709 
5710 		append_skb_frag(chan->sdu, skb,
5711 				&chan->sdu_last_frag);
5712 		skb = NULL;
5713 
5714 		if (chan->sdu->len >= chan->sdu_len)
5715 			break;
5716 
5717 		err = 0;
5718 		break;
5719 
5720 	case L2CAP_SAR_END:
5721 		if (!chan->sdu)
5722 			break;
5723 
5724 		append_skb_frag(chan->sdu, skb,
5725 				&chan->sdu_last_frag);
5726 		skb = NULL;
5727 
5728 		if (chan->sdu->len != chan->sdu_len)
5729 			break;
5730 
5731 		err = chan->ops->recv(chan, chan->sdu);
5732 
5733 		if (!err) {
5734 			/* Reassembly complete */
5735 			chan->sdu = NULL;
5736 			chan->sdu_last_frag = NULL;
5737 			chan->sdu_len = 0;
5738 		}
5739 		break;
5740 	}
5741 
5742 	if (err) {
5743 		kfree_skb(skb);
5744 		kfree_skb(chan->sdu);
5745 		chan->sdu = NULL;
5746 		chan->sdu_last_frag = NULL;
5747 		chan->sdu_len = 0;
5748 	}
5749 
5750 	return err;
5751 }
5752 
5753 static int l2cap_resegment(struct l2cap_chan *chan)
5754 {
5755 	/* Placeholder */
5756 	return 0;
5757 }
5758 
5759 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5760 {
5761 	u8 event;
5762 
5763 	if (chan->mode != L2CAP_MODE_ERTM)
5764 		return;
5765 
5766 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5767 	l2cap_tx(chan, NULL, NULL, event);
5768 }
5769 
5770 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5771 {
5772 	int err = 0;
5773 	/* Pass sequential frames to l2cap_reassemble_sdu()
5774 	 * until a gap is encountered.
5775 	 */
5776 
5777 	BT_DBG("chan %p", chan);
5778 
5779 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5780 		struct sk_buff *skb;
5781 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5782 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5783 
5784 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5785 
5786 		if (!skb)
5787 			break;
5788 
5789 		skb_unlink(skb, &chan->srej_q);
5790 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5791 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5792 		if (err)
5793 			break;
5794 	}
5795 
5796 	if (skb_queue_empty(&chan->srej_q)) {
5797 		chan->rx_state = L2CAP_RX_STATE_RECV;
5798 		l2cap_send_ack(chan);
5799 	}
5800 
5801 	return err;
5802 }
5803 
5804 static void l2cap_handle_srej(struct l2cap_chan *chan,
5805 			      struct l2cap_ctrl *control)
5806 {
5807 	struct sk_buff *skb;
5808 
5809 	BT_DBG("chan %p, control %p", chan, control);
5810 
5811 	if (control->reqseq == chan->next_tx_seq) {
5812 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5813 		l2cap_send_disconn_req(chan, ECONNRESET);
5814 		return;
5815 	}
5816 
5817 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5818 
5819 	if (skb == NULL) {
5820 		BT_DBG("Seq %d not available for retransmission",
5821 		       control->reqseq);
5822 		return;
5823 	}
5824 
5825 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5826 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5827 		l2cap_send_disconn_req(chan, ECONNRESET);
5828 		return;
5829 	}
5830 
5831 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5832 
5833 	if (control->poll) {
5834 		l2cap_pass_to_tx(chan, control);
5835 
5836 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5837 		l2cap_retransmit(chan, control);
5838 		l2cap_ertm_send(chan);
5839 
5840 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5841 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5842 			chan->srej_save_reqseq = control->reqseq;
5843 		}
5844 	} else {
5845 		l2cap_pass_to_tx_fbit(chan, control);
5846 
5847 		if (control->final) {
5848 			if (chan->srej_save_reqseq != control->reqseq ||
5849 			    !test_and_clear_bit(CONN_SREJ_ACT,
5850 						&chan->conn_state))
5851 				l2cap_retransmit(chan, control);
5852 		} else {
5853 			l2cap_retransmit(chan, control);
5854 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5855 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5856 				chan->srej_save_reqseq = control->reqseq;
5857 			}
5858 		}
5859 	}
5860 }
5861 
5862 static void l2cap_handle_rej(struct l2cap_chan *chan,
5863 			     struct l2cap_ctrl *control)
5864 {
5865 	struct sk_buff *skb;
5866 
5867 	BT_DBG("chan %p, control %p", chan, control);
5868 
5869 	if (control->reqseq == chan->next_tx_seq) {
5870 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5871 		l2cap_send_disconn_req(chan, ECONNRESET);
5872 		return;
5873 	}
5874 
5875 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5876 
5877 	if (chan->max_tx && skb &&
5878 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5879 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5880 		l2cap_send_disconn_req(chan, ECONNRESET);
5881 		return;
5882 	}
5883 
5884 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5885 
5886 	l2cap_pass_to_tx(chan, control);
5887 
5888 	if (control->final) {
5889 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5890 			l2cap_retransmit_all(chan, control);
5891 	} else {
5892 		l2cap_retransmit_all(chan, control);
5893 		l2cap_ertm_send(chan);
5894 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5895 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5896 	}
5897 }
5898 
5899 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5900 {
5901 	BT_DBG("chan %p, txseq %d", chan, txseq);
5902 
5903 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5904 	       chan->expected_tx_seq);
5905 
5906 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5907 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5908 		    chan->tx_win) {
5909 			/* See notes below regarding "double poll" and
5910 			 * invalid packets.
5911 			 */
5912 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5913 				BT_DBG("Invalid/Ignore - after SREJ");
5914 				return L2CAP_TXSEQ_INVALID_IGNORE;
5915 			} else {
5916 				BT_DBG("Invalid - in window after SREJ sent");
5917 				return L2CAP_TXSEQ_INVALID;
5918 			}
5919 		}
5920 
5921 		if (chan->srej_list.head == txseq) {
5922 			BT_DBG("Expected SREJ");
5923 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5924 		}
5925 
5926 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5927 			BT_DBG("Duplicate SREJ - txseq already stored");
5928 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5929 		}
5930 
5931 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5932 			BT_DBG("Unexpected SREJ - not requested");
5933 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5934 		}
5935 	}
5936 
5937 	if (chan->expected_tx_seq == txseq) {
5938 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5939 		    chan->tx_win) {
5940 			BT_DBG("Invalid - txseq outside tx window");
5941 			return L2CAP_TXSEQ_INVALID;
5942 		} else {
5943 			BT_DBG("Expected");
5944 			return L2CAP_TXSEQ_EXPECTED;
5945 		}
5946 	}
5947 
5948 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5949 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5950 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5951 		return L2CAP_TXSEQ_DUPLICATE;
5952 	}
5953 
5954 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5955 		/* A source of invalid packets is a "double poll" condition,
5956 		 * where delays cause us to send multiple poll packets.  If
5957 		 * the remote stack receives and processes both polls,
5958 		 * sequence numbers can wrap around in such a way that a
5959 		 * resent frame has a sequence number that looks like new data
5960 		 * with a sequence gap.  This would trigger an erroneous SREJ
5961 		 * request.
5962 		 *
5963 		 * Fortunately, this is impossible with a tx window that's
5964 		 * less than half of the maximum sequence number, which allows
5965 		 * invalid frames to be safely ignored.
5966 		 *
5967 		 * With tx window sizes greater than half of the tx window
5968 		 * maximum, the frame is invalid and cannot be ignored.  This
5969 		 * causes a disconnect.
5970 		 */
5971 
5972 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5973 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5974 			return L2CAP_TXSEQ_INVALID_IGNORE;
5975 		} else {
5976 			BT_DBG("Invalid - txseq outside tx window");
5977 			return L2CAP_TXSEQ_INVALID;
5978 		}
5979 	} else {
5980 		BT_DBG("Unexpected - txseq indicates missing frames");
5981 		return L2CAP_TXSEQ_UNEXPECTED;
5982 	}
5983 }
5984 
5985 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5986 			       struct l2cap_ctrl *control,
5987 			       struct sk_buff *skb, u8 event)
5988 {
5989 	struct l2cap_ctrl local_control;
5990 	int err = 0;
5991 	bool skb_in_use = false;
5992 
5993 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5994 	       event);
5995 
5996 	switch (event) {
5997 	case L2CAP_EV_RECV_IFRAME:
5998 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5999 		case L2CAP_TXSEQ_EXPECTED:
6000 			l2cap_pass_to_tx(chan, control);
6001 
6002 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6003 				BT_DBG("Busy, discarding expected seq %d",
6004 				       control->txseq);
6005 				break;
6006 			}
6007 
6008 			chan->expected_tx_seq = __next_seq(chan,
6009 							   control->txseq);
6010 
6011 			chan->buffer_seq = chan->expected_tx_seq;
6012 			skb_in_use = true;
6013 
6014 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6015 			 * control, so make a copy in advance to use it after
6016 			 * l2cap_reassemble_sdu returns and to avoid the race
6017 			 * condition, for example:
6018 			 *
6019 			 * The current thread calls:
6020 			 *   l2cap_reassemble_sdu
6021 			 *     chan->ops->recv == l2cap_sock_recv_cb
6022 			 *       __sock_queue_rcv_skb
6023 			 * Another thread calls:
6024 			 *   bt_sock_recvmsg
6025 			 *     skb_recv_datagram
6026 			 *     skb_free_datagram
6027 			 * Then the current thread tries to access control, but
6028 			 * it was freed by skb_free_datagram.
6029 			 */
6030 			local_control = *control;
6031 			err = l2cap_reassemble_sdu(chan, skb, control);
6032 			if (err)
6033 				break;
6034 
6035 			if (local_control.final) {
6036 				if (!test_and_clear_bit(CONN_REJ_ACT,
6037 							&chan->conn_state)) {
6038 					local_control.final = 0;
6039 					l2cap_retransmit_all(chan, &local_control);
6040 					l2cap_ertm_send(chan);
6041 				}
6042 			}
6043 
6044 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6045 				l2cap_send_ack(chan);
6046 			break;
6047 		case L2CAP_TXSEQ_UNEXPECTED:
6048 			l2cap_pass_to_tx(chan, control);
6049 
6050 			/* Can't issue SREJ frames in the local busy state.
6051 			 * Drop this frame, it will be seen as missing
6052 			 * when local busy is exited.
6053 			 */
6054 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6055 				BT_DBG("Busy, discarding unexpected seq %d",
6056 				       control->txseq);
6057 				break;
6058 			}
6059 
6060 			/* There was a gap in the sequence, so an SREJ
6061 			 * must be sent for each missing frame.  The
6062 			 * current frame is stored for later use.
6063 			 */
6064 			skb_queue_tail(&chan->srej_q, skb);
6065 			skb_in_use = true;
6066 			BT_DBG("Queued %p (queue len %d)", skb,
6067 			       skb_queue_len(&chan->srej_q));
6068 
6069 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6070 			l2cap_seq_list_clear(&chan->srej_list);
6071 			l2cap_send_srej(chan, control->txseq);
6072 
6073 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6074 			break;
6075 		case L2CAP_TXSEQ_DUPLICATE:
6076 			l2cap_pass_to_tx(chan, control);
6077 			break;
6078 		case L2CAP_TXSEQ_INVALID_IGNORE:
6079 			break;
6080 		case L2CAP_TXSEQ_INVALID:
6081 		default:
6082 			l2cap_send_disconn_req(chan, ECONNRESET);
6083 			break;
6084 		}
6085 		break;
6086 	case L2CAP_EV_RECV_RR:
6087 		l2cap_pass_to_tx(chan, control);
6088 		if (control->final) {
6089 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6090 
6091 			if (!test_and_clear_bit(CONN_REJ_ACT,
6092 						&chan->conn_state)) {
6093 				control->final = 0;
6094 				l2cap_retransmit_all(chan, control);
6095 			}
6096 
6097 			l2cap_ertm_send(chan);
6098 		} else if (control->poll) {
6099 			l2cap_send_i_or_rr_or_rnr(chan);
6100 		} else {
6101 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6102 					       &chan->conn_state) &&
6103 			    chan->unacked_frames)
6104 				__set_retrans_timer(chan);
6105 
6106 			l2cap_ertm_send(chan);
6107 		}
6108 		break;
6109 	case L2CAP_EV_RECV_RNR:
6110 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6111 		l2cap_pass_to_tx(chan, control);
6112 		if (control && control->poll) {
6113 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6114 			l2cap_send_rr_or_rnr(chan, 0);
6115 		}
6116 		__clear_retrans_timer(chan);
6117 		l2cap_seq_list_clear(&chan->retrans_list);
6118 		break;
6119 	case L2CAP_EV_RECV_REJ:
6120 		l2cap_handle_rej(chan, control);
6121 		break;
6122 	case L2CAP_EV_RECV_SREJ:
6123 		l2cap_handle_srej(chan, control);
6124 		break;
6125 	default:
6126 		break;
6127 	}
6128 
6129 	if (skb && !skb_in_use) {
6130 		BT_DBG("Freeing %p", skb);
6131 		kfree_skb(skb);
6132 	}
6133 
6134 	return err;
6135 }
6136 
6137 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6138 				    struct l2cap_ctrl *control,
6139 				    struct sk_buff *skb, u8 event)
6140 {
6141 	int err = 0;
6142 	u16 txseq = control->txseq;
6143 	bool skb_in_use = false;
6144 
6145 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6146 	       event);
6147 
6148 	switch (event) {
6149 	case L2CAP_EV_RECV_IFRAME:
6150 		switch (l2cap_classify_txseq(chan, txseq)) {
6151 		case L2CAP_TXSEQ_EXPECTED:
6152 			/* Keep frame for reassembly later */
6153 			l2cap_pass_to_tx(chan, control);
6154 			skb_queue_tail(&chan->srej_q, skb);
6155 			skb_in_use = true;
6156 			BT_DBG("Queued %p (queue len %d)", skb,
6157 			       skb_queue_len(&chan->srej_q));
6158 
6159 			chan->expected_tx_seq = __next_seq(chan, txseq);
6160 			break;
6161 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6162 			l2cap_seq_list_pop(&chan->srej_list);
6163 
6164 			l2cap_pass_to_tx(chan, control);
6165 			skb_queue_tail(&chan->srej_q, skb);
6166 			skb_in_use = true;
6167 			BT_DBG("Queued %p (queue len %d)", skb,
6168 			       skb_queue_len(&chan->srej_q));
6169 
6170 			err = l2cap_rx_queued_iframes(chan);
6171 			if (err)
6172 				break;
6173 
6174 			break;
6175 		case L2CAP_TXSEQ_UNEXPECTED:
6176 			/* Got a frame that can't be reassembled yet.
6177 			 * Save it for later, and send SREJs to cover
6178 			 * the missing frames.
6179 			 */
6180 			skb_queue_tail(&chan->srej_q, skb);
6181 			skb_in_use = true;
6182 			BT_DBG("Queued %p (queue len %d)", skb,
6183 			       skb_queue_len(&chan->srej_q));
6184 
6185 			l2cap_pass_to_tx(chan, control);
6186 			l2cap_send_srej(chan, control->txseq);
6187 			break;
6188 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6189 			/* This frame was requested with an SREJ, but
6190 			 * some expected retransmitted frames are
6191 			 * missing.  Request retransmission of missing
6192 			 * SREJ'd frames.
6193 			 */
6194 			skb_queue_tail(&chan->srej_q, skb);
6195 			skb_in_use = true;
6196 			BT_DBG("Queued %p (queue len %d)", skb,
6197 			       skb_queue_len(&chan->srej_q));
6198 
6199 			l2cap_pass_to_tx(chan, control);
6200 			l2cap_send_srej_list(chan, control->txseq);
6201 			break;
6202 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6203 			/* We've already queued this frame.  Drop this copy. */
6204 			l2cap_pass_to_tx(chan, control);
6205 			break;
6206 		case L2CAP_TXSEQ_DUPLICATE:
6207 			/* Expecting a later sequence number, so this frame
6208 			 * was already received.  Ignore it completely.
6209 			 */
6210 			break;
6211 		case L2CAP_TXSEQ_INVALID_IGNORE:
6212 			break;
6213 		case L2CAP_TXSEQ_INVALID:
6214 		default:
6215 			l2cap_send_disconn_req(chan, ECONNRESET);
6216 			break;
6217 		}
6218 		break;
6219 	case L2CAP_EV_RECV_RR:
6220 		l2cap_pass_to_tx(chan, control);
6221 		if (control->final) {
6222 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6223 
6224 			if (!test_and_clear_bit(CONN_REJ_ACT,
6225 						&chan->conn_state)) {
6226 				control->final = 0;
6227 				l2cap_retransmit_all(chan, control);
6228 			}
6229 
6230 			l2cap_ertm_send(chan);
6231 		} else if (control->poll) {
6232 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6233 					       &chan->conn_state) &&
6234 			    chan->unacked_frames) {
6235 				__set_retrans_timer(chan);
6236 			}
6237 
6238 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6239 			l2cap_send_srej_tail(chan);
6240 		} else {
6241 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6242 					       &chan->conn_state) &&
6243 			    chan->unacked_frames)
6244 				__set_retrans_timer(chan);
6245 
6246 			l2cap_send_ack(chan);
6247 		}
6248 		break;
6249 	case L2CAP_EV_RECV_RNR:
6250 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6251 		l2cap_pass_to_tx(chan, control);
6252 		if (control->poll) {
6253 			l2cap_send_srej_tail(chan);
6254 		} else {
6255 			struct l2cap_ctrl rr_control;
6256 			memset(&rr_control, 0, sizeof(rr_control));
6257 			rr_control.sframe = 1;
6258 			rr_control.super = L2CAP_SUPER_RR;
6259 			rr_control.reqseq = chan->buffer_seq;
6260 			l2cap_send_sframe(chan, &rr_control);
6261 		}
6262 
6263 		break;
6264 	case L2CAP_EV_RECV_REJ:
6265 		l2cap_handle_rej(chan, control);
6266 		break;
6267 	case L2CAP_EV_RECV_SREJ:
6268 		l2cap_handle_srej(chan, control);
6269 		break;
6270 	}
6271 
6272 	if (skb && !skb_in_use) {
6273 		BT_DBG("Freeing %p", skb);
6274 		kfree_skb(skb);
6275 	}
6276 
6277 	return err;
6278 }
6279 
6280 static int l2cap_finish_move(struct l2cap_chan *chan)
6281 {
6282 	BT_DBG("chan %p", chan);
6283 
6284 	chan->rx_state = L2CAP_RX_STATE_RECV;
6285 	chan->conn->mtu = chan->conn->hcon->mtu;
6286 
6287 	return l2cap_resegment(chan);
6288 }
6289 
6290 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6291 				 struct l2cap_ctrl *control,
6292 				 struct sk_buff *skb, u8 event)
6293 {
6294 	int err;
6295 
6296 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6297 	       event);
6298 
6299 	if (!control->poll)
6300 		return -EPROTO;
6301 
6302 	l2cap_process_reqseq(chan, control->reqseq);
6303 
6304 	if (!skb_queue_empty(&chan->tx_q))
6305 		chan->tx_send_head = skb_peek(&chan->tx_q);
6306 	else
6307 		chan->tx_send_head = NULL;
6308 
6309 	/* Rewind next_tx_seq to the point expected
6310 	 * by the receiver.
6311 	 */
6312 	chan->next_tx_seq = control->reqseq;
6313 	chan->unacked_frames = 0;
6314 
6315 	err = l2cap_finish_move(chan);
6316 	if (err)
6317 		return err;
6318 
6319 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6320 	l2cap_send_i_or_rr_or_rnr(chan);
6321 
6322 	if (event == L2CAP_EV_RECV_IFRAME)
6323 		return -EPROTO;
6324 
6325 	return l2cap_rx_state_recv(chan, control, NULL, event);
6326 }
6327 
6328 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6329 				 struct l2cap_ctrl *control,
6330 				 struct sk_buff *skb, u8 event)
6331 {
6332 	int err;
6333 
6334 	if (!control->final)
6335 		return -EPROTO;
6336 
6337 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6338 
6339 	chan->rx_state = L2CAP_RX_STATE_RECV;
6340 	l2cap_process_reqseq(chan, control->reqseq);
6341 
6342 	if (!skb_queue_empty(&chan->tx_q))
6343 		chan->tx_send_head = skb_peek(&chan->tx_q);
6344 	else
6345 		chan->tx_send_head = NULL;
6346 
6347 	/* Rewind next_tx_seq to the point expected
6348 	 * by the receiver.
6349 	 */
6350 	chan->next_tx_seq = control->reqseq;
6351 	chan->unacked_frames = 0;
6352 	chan->conn->mtu = chan->conn->hcon->mtu;
6353 
6354 	err = l2cap_resegment(chan);
6355 
6356 	if (!err)
6357 		err = l2cap_rx_state_recv(chan, control, skb, event);
6358 
6359 	return err;
6360 }
6361 
6362 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6363 {
6364 	/* Make sure reqseq is for a packet that has been sent but not acked */
6365 	u16 unacked;
6366 
6367 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6368 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6369 }
6370 
6371 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6372 		    struct sk_buff *skb, u8 event)
6373 {
6374 	int err = 0;
6375 
6376 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6377 	       control, skb, event, chan->rx_state);
6378 
6379 	if (__valid_reqseq(chan, control->reqseq)) {
6380 		switch (chan->rx_state) {
6381 		case L2CAP_RX_STATE_RECV:
6382 			err = l2cap_rx_state_recv(chan, control, skb, event);
6383 			break;
6384 		case L2CAP_RX_STATE_SREJ_SENT:
6385 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6386 						       event);
6387 			break;
6388 		case L2CAP_RX_STATE_WAIT_P:
6389 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6390 			break;
6391 		case L2CAP_RX_STATE_WAIT_F:
6392 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6393 			break;
6394 		default:
6395 			/* shut it down */
6396 			break;
6397 		}
6398 	} else {
6399 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6400 		       control->reqseq, chan->next_tx_seq,
6401 		       chan->expected_ack_seq);
6402 		l2cap_send_disconn_req(chan, ECONNRESET);
6403 	}
6404 
6405 	return err;
6406 }
6407 
6408 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6409 			   struct sk_buff *skb)
6410 {
6411 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6412 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6413 	 * returns and to avoid the race condition, for example:
6414 	 *
6415 	 * The current thread calls:
6416 	 *   l2cap_reassemble_sdu
6417 	 *     chan->ops->recv == l2cap_sock_recv_cb
6418 	 *       __sock_queue_rcv_skb
6419 	 * Another thread calls:
6420 	 *   bt_sock_recvmsg
6421 	 *     skb_recv_datagram
6422 	 *     skb_free_datagram
6423 	 * Then the current thread tries to access control, but it was freed by
6424 	 * skb_free_datagram.
6425 	 */
6426 	u16 txseq = control->txseq;
6427 
6428 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6429 	       chan->rx_state);
6430 
6431 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6432 		l2cap_pass_to_tx(chan, control);
6433 
6434 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6435 		       __next_seq(chan, chan->buffer_seq));
6436 
6437 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6438 
6439 		l2cap_reassemble_sdu(chan, skb, control);
6440 	} else {
6441 		if (chan->sdu) {
6442 			kfree_skb(chan->sdu);
6443 			chan->sdu = NULL;
6444 		}
6445 		chan->sdu_last_frag = NULL;
6446 		chan->sdu_len = 0;
6447 
6448 		if (skb) {
6449 			BT_DBG("Freeing %p", skb);
6450 			kfree_skb(skb);
6451 		}
6452 	}
6453 
6454 	chan->last_acked_seq = txseq;
6455 	chan->expected_tx_seq = __next_seq(chan, txseq);
6456 
6457 	return 0;
6458 }
6459 
6460 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6461 {
6462 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6463 	u16 len;
6464 	u8 event;
6465 
6466 	__unpack_control(chan, skb);
6467 
6468 	len = skb->len;
6469 
6470 	/*
6471 	 * We can just drop the corrupted I-frame here.
6472 	 * Receiver will miss it and start proper recovery
6473 	 * procedures and ask for retransmission.
6474 	 */
6475 	if (l2cap_check_fcs(chan, skb))
6476 		goto drop;
6477 
6478 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6479 		len -= L2CAP_SDULEN_SIZE;
6480 
6481 	if (chan->fcs == L2CAP_FCS_CRC16)
6482 		len -= L2CAP_FCS_SIZE;
6483 
6484 	if (len > chan->mps) {
6485 		l2cap_send_disconn_req(chan, ECONNRESET);
6486 		goto drop;
6487 	}
6488 
6489 	if (chan->ops->filter) {
6490 		if (chan->ops->filter(chan, skb))
6491 			goto drop;
6492 	}
6493 
6494 	if (!control->sframe) {
6495 		int err;
6496 
6497 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6498 		       control->sar, control->reqseq, control->final,
6499 		       control->txseq);
6500 
6501 		/* Validate F-bit - F=0 always valid, F=1 only
6502 		 * valid in TX WAIT_F
6503 		 */
6504 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6505 			goto drop;
6506 
6507 		if (chan->mode != L2CAP_MODE_STREAMING) {
6508 			event = L2CAP_EV_RECV_IFRAME;
6509 			err = l2cap_rx(chan, control, skb, event);
6510 		} else {
6511 			err = l2cap_stream_rx(chan, control, skb);
6512 		}
6513 
6514 		if (err)
6515 			l2cap_send_disconn_req(chan, ECONNRESET);
6516 	} else {
6517 		const u8 rx_func_to_event[4] = {
6518 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6519 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6520 		};
6521 
6522 		/* Only I-frames are expected in streaming mode */
6523 		if (chan->mode == L2CAP_MODE_STREAMING)
6524 			goto drop;
6525 
6526 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6527 		       control->reqseq, control->final, control->poll,
6528 		       control->super);
6529 
6530 		if (len != 0) {
6531 			BT_ERR("Trailing bytes: %d in sframe", len);
6532 			l2cap_send_disconn_req(chan, ECONNRESET);
6533 			goto drop;
6534 		}
6535 
6536 		/* Validate F and P bits */
6537 		if (control->final && (control->poll ||
6538 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6539 			goto drop;
6540 
6541 		event = rx_func_to_event[control->super];
6542 		if (l2cap_rx(chan, control, skb, event))
6543 			l2cap_send_disconn_req(chan, ECONNRESET);
6544 	}
6545 
6546 	return 0;
6547 
6548 drop:
6549 	kfree_skb(skb);
6550 	return 0;
6551 }
6552 
6553 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6554 {
6555 	struct l2cap_conn *conn = chan->conn;
6556 	struct l2cap_le_credits pkt;
6557 	u16 return_credits = l2cap_le_rx_credits(chan);
6558 
6559 	if (chan->rx_credits >= return_credits)
6560 		return;
6561 
6562 	return_credits -= chan->rx_credits;
6563 
6564 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6565 
6566 	chan->rx_credits += return_credits;
6567 
6568 	pkt.cid     = cpu_to_le16(chan->scid);
6569 	pkt.credits = cpu_to_le16(return_credits);
6570 
6571 	chan->ident = l2cap_get_ident(conn);
6572 
6573 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6574 }
6575 
6576 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6577 {
6578 	if (chan->rx_avail == rx_avail)
6579 		return;
6580 
6581 	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6582 
6583 	chan->rx_avail = rx_avail;
6584 
6585 	if (chan->state == BT_CONNECTED)
6586 		l2cap_chan_le_send_credits(chan);
6587 }
6588 
6589 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6590 {
6591 	int err;
6592 
6593 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6594 
6595 	/* Wait recv to confirm reception before updating the credits */
6596 	err = chan->ops->recv(chan, skb);
6597 
6598 	if (err < 0 && chan->rx_avail != -1) {
6599 		BT_ERR("Queueing received LE L2CAP data failed");
6600 		l2cap_send_disconn_req(chan, ECONNRESET);
6601 		return err;
6602 	}
6603 
6604 	/* Update credits whenever an SDU is received */
6605 	l2cap_chan_le_send_credits(chan);
6606 
6607 	return err;
6608 }
6609 
6610 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6611 {
6612 	int err;
6613 
6614 	if (!chan->rx_credits) {
6615 		BT_ERR("No credits to receive LE L2CAP data");
6616 		l2cap_send_disconn_req(chan, ECONNRESET);
6617 		return -ENOBUFS;
6618 	}
6619 
6620 	if (chan->imtu < skb->len) {
6621 		BT_ERR("Too big LE L2CAP PDU");
6622 		return -ENOBUFS;
6623 	}
6624 
6625 	chan->rx_credits--;
6626 	BT_DBG("chan %p: rx_credits %u -> %u",
6627 	       chan, chan->rx_credits + 1, chan->rx_credits);
6628 
6629 	/* Update if remote had run out of credits, this should only happens
6630 	 * if the remote is not using the entire MPS.
6631 	 */
6632 	if (!chan->rx_credits)
6633 		l2cap_chan_le_send_credits(chan);
6634 
6635 	err = 0;
6636 
6637 	if (!chan->sdu) {
6638 		u16 sdu_len;
6639 
6640 		sdu_len = get_unaligned_le16(skb->data);
6641 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6642 
6643 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6644 		       sdu_len, skb->len, chan->imtu);
6645 
6646 		if (sdu_len > chan->imtu) {
6647 			BT_ERR("Too big LE L2CAP SDU length received");
6648 			err = -EMSGSIZE;
6649 			goto failed;
6650 		}
6651 
6652 		if (skb->len > sdu_len) {
6653 			BT_ERR("Too much LE L2CAP data received");
6654 			err = -EINVAL;
6655 			goto failed;
6656 		}
6657 
6658 		if (skb->len == sdu_len)
6659 			return l2cap_ecred_recv(chan, skb);
6660 
6661 		chan->sdu = skb;
6662 		chan->sdu_len = sdu_len;
6663 		chan->sdu_last_frag = skb;
6664 
6665 		/* Detect if remote is not able to use the selected MPS */
6666 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6667 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6668 
6669 			/* Adjust the number of credits */
6670 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6671 			chan->mps = mps_len;
6672 			l2cap_chan_le_send_credits(chan);
6673 		}
6674 
6675 		return 0;
6676 	}
6677 
6678 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6679 	       chan->sdu->len, skb->len, chan->sdu_len);
6680 
6681 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6682 		BT_ERR("Too much LE L2CAP data received");
6683 		err = -EINVAL;
6684 		goto failed;
6685 	}
6686 
6687 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6688 	skb = NULL;
6689 
6690 	if (chan->sdu->len == chan->sdu_len) {
6691 		err = l2cap_ecred_recv(chan, chan->sdu);
6692 		if (!err) {
6693 			chan->sdu = NULL;
6694 			chan->sdu_last_frag = NULL;
6695 			chan->sdu_len = 0;
6696 		}
6697 	}
6698 
6699 failed:
6700 	if (err) {
6701 		kfree_skb(skb);
6702 		kfree_skb(chan->sdu);
6703 		chan->sdu = NULL;
6704 		chan->sdu_last_frag = NULL;
6705 		chan->sdu_len = 0;
6706 	}
6707 
6708 	/* We can't return an error here since we took care of the skb
6709 	 * freeing internally. An error return would cause the caller to
6710 	 * do a double-free of the skb.
6711 	 */
6712 	return 0;
6713 }
6714 
6715 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6716 			       struct sk_buff *skb)
6717 {
6718 	struct l2cap_chan *chan;
6719 
6720 	chan = l2cap_get_chan_by_scid(conn, cid);
6721 	if (!chan) {
6722 		BT_DBG("unknown cid 0x%4.4x", cid);
6723 		/* Drop packet and return */
6724 		kfree_skb(skb);
6725 		return;
6726 	}
6727 
6728 	BT_DBG("chan %p, len %d", chan, skb->len);
6729 
6730 	/* If we receive data on a fixed channel before the info req/rsp
6731 	 * procedure is done simply assume that the channel is supported
6732 	 * and mark it as ready.
6733 	 */
6734 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6735 		l2cap_chan_ready(chan);
6736 
6737 	if (chan->state != BT_CONNECTED)
6738 		goto drop;
6739 
6740 	switch (chan->mode) {
6741 	case L2CAP_MODE_LE_FLOWCTL:
6742 	case L2CAP_MODE_EXT_FLOWCTL:
6743 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6744 			goto drop;
6745 
6746 		goto done;
6747 
6748 	case L2CAP_MODE_BASIC:
6749 		/* If socket recv buffers overflows we drop data here
6750 		 * which is *bad* because L2CAP has to be reliable.
6751 		 * But we don't have any other choice. L2CAP doesn't
6752 		 * provide flow control mechanism. */
6753 
6754 		if (chan->imtu < skb->len) {
6755 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6756 			goto drop;
6757 		}
6758 
6759 		if (!chan->ops->recv(chan, skb))
6760 			goto done;
6761 		break;
6762 
6763 	case L2CAP_MODE_ERTM:
6764 	case L2CAP_MODE_STREAMING:
6765 		l2cap_data_rcv(chan, skb);
6766 		goto done;
6767 
6768 	default:
6769 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6770 		break;
6771 	}
6772 
6773 drop:
6774 	kfree_skb(skb);
6775 
6776 done:
6777 	l2cap_chan_unlock(chan);
6778 	l2cap_chan_put(chan);
6779 }
6780 
6781 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6782 				  struct sk_buff *skb)
6783 {
6784 	struct hci_conn *hcon = conn->hcon;
6785 	struct l2cap_chan *chan;
6786 
6787 	if (hcon->type != ACL_LINK)
6788 		goto free_skb;
6789 
6790 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6791 					ACL_LINK);
6792 	if (!chan)
6793 		goto free_skb;
6794 
6795 	BT_DBG("chan %p, len %d", chan, skb->len);
6796 
6797 	l2cap_chan_lock(chan);
6798 
6799 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6800 		goto drop;
6801 
6802 	if (chan->imtu < skb->len)
6803 		goto drop;
6804 
6805 	/* Store remote BD_ADDR and PSM for msg_name */
6806 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6807 	bt_cb(skb)->l2cap.psm = psm;
6808 
6809 	if (!chan->ops->recv(chan, skb)) {
6810 		l2cap_chan_unlock(chan);
6811 		l2cap_chan_put(chan);
6812 		return;
6813 	}
6814 
6815 drop:
6816 	l2cap_chan_unlock(chan);
6817 	l2cap_chan_put(chan);
6818 free_skb:
6819 	kfree_skb(skb);
6820 }
6821 
6822 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6823 {
6824 	struct l2cap_hdr *lh = (void *) skb->data;
6825 	struct hci_conn *hcon = conn->hcon;
6826 	u16 cid, len;
6827 	__le16 psm;
6828 
6829 	if (hcon->state != BT_CONNECTED) {
6830 		BT_DBG("queueing pending rx skb");
6831 		skb_queue_tail(&conn->pending_rx, skb);
6832 		return;
6833 	}
6834 
6835 	skb_pull(skb, L2CAP_HDR_SIZE);
6836 	cid = __le16_to_cpu(lh->cid);
6837 	len = __le16_to_cpu(lh->len);
6838 
6839 	if (len != skb->len) {
6840 		kfree_skb(skb);
6841 		return;
6842 	}
6843 
6844 	/* Since we can't actively block incoming LE connections we must
6845 	 * at least ensure that we ignore incoming data from them.
6846 	 */
6847 	if (hcon->type == LE_LINK &&
6848 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6849 				   bdaddr_dst_type(hcon))) {
6850 		kfree_skb(skb);
6851 		return;
6852 	}
6853 
6854 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6855 
6856 	switch (cid) {
6857 	case L2CAP_CID_SIGNALING:
6858 		l2cap_sig_channel(conn, skb);
6859 		break;
6860 
6861 	case L2CAP_CID_CONN_LESS:
6862 		psm = get_unaligned((__le16 *) skb->data);
6863 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6864 		l2cap_conless_channel(conn, psm, skb);
6865 		break;
6866 
6867 	case L2CAP_CID_LE_SIGNALING:
6868 		l2cap_le_sig_channel(conn, skb);
6869 		break;
6870 
6871 	default:
6872 		l2cap_data_channel(conn, cid, skb);
6873 		break;
6874 	}
6875 }
6876 
6877 static void process_pending_rx(struct work_struct *work)
6878 {
6879 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6880 					       pending_rx_work);
6881 	struct sk_buff *skb;
6882 
6883 	BT_DBG("");
6884 
6885 	mutex_lock(&conn->lock);
6886 
6887 	while ((skb = skb_dequeue(&conn->pending_rx)))
6888 		l2cap_recv_frame(conn, skb);
6889 
6890 	mutex_unlock(&conn->lock);
6891 }
6892 
6893 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6894 {
6895 	struct l2cap_conn *conn = hcon->l2cap_data;
6896 	struct hci_chan *hchan;
6897 
6898 	if (conn)
6899 		return conn;
6900 
6901 	hchan = hci_chan_create(hcon);
6902 	if (!hchan)
6903 		return NULL;
6904 
6905 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6906 	if (!conn) {
6907 		hci_chan_del(hchan);
6908 		return NULL;
6909 	}
6910 
6911 	kref_init(&conn->ref);
6912 	hcon->l2cap_data = conn;
6913 	conn->hcon = hci_conn_get(hcon);
6914 	conn->hchan = hchan;
6915 
6916 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6917 
6918 	conn->mtu = hcon->mtu;
6919 	conn->feat_mask = 0;
6920 
6921 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6922 
6923 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6924 	    (bredr_sc_enabled(hcon->hdev) ||
6925 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6926 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6927 
6928 	mutex_init(&conn->lock);
6929 
6930 	INIT_LIST_HEAD(&conn->chan_l);
6931 	INIT_LIST_HEAD(&conn->users);
6932 
6933 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6934 	ida_init(&conn->tx_ida);
6935 
6936 	skb_queue_head_init(&conn->pending_rx);
6937 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6938 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6939 
6940 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6941 
6942 	return conn;
6943 }
6944 
6945 static bool is_valid_psm(u16 psm, u8 dst_type)
6946 {
6947 	if (!psm)
6948 		return false;
6949 
6950 	if (bdaddr_type_is_le(dst_type))
6951 		return (psm <= 0x00ff);
6952 
6953 	/* PSM must be odd and lsb of upper byte must be 0 */
6954 	return ((psm & 0x0101) == 0x0001);
6955 }
6956 
6957 struct l2cap_chan_data {
6958 	struct l2cap_chan *chan;
6959 	struct pid *pid;
6960 	int count;
6961 };
6962 
6963 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6964 {
6965 	struct l2cap_chan_data *d = data;
6966 	struct pid *pid;
6967 
6968 	if (chan == d->chan)
6969 		return;
6970 
6971 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6972 		return;
6973 
6974 	pid = chan->ops->get_peer_pid(chan);
6975 
6976 	/* Only count deferred channels with the same PID/PSM */
6977 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6978 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6979 		return;
6980 
6981 	d->count++;
6982 }
6983 
6984 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6985 		       bdaddr_t *dst, u8 dst_type, u16 timeout)
6986 {
6987 	struct l2cap_conn *conn;
6988 	struct hci_conn *hcon;
6989 	struct hci_dev *hdev;
6990 	int err;
6991 
6992 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6993 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
6994 
6995 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
6996 	if (!hdev)
6997 		return -EHOSTUNREACH;
6998 
6999 	hci_dev_lock(hdev);
7000 
7001 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7002 	    chan->chan_type != L2CAP_CHAN_RAW) {
7003 		err = -EINVAL;
7004 		goto done;
7005 	}
7006 
7007 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7008 		err = -EINVAL;
7009 		goto done;
7010 	}
7011 
7012 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7013 		err = -EINVAL;
7014 		goto done;
7015 	}
7016 
7017 	switch (chan->mode) {
7018 	case L2CAP_MODE_BASIC:
7019 		break;
7020 	case L2CAP_MODE_LE_FLOWCTL:
7021 		break;
7022 	case L2CAP_MODE_EXT_FLOWCTL:
7023 		if (!enable_ecred) {
7024 			err = -EOPNOTSUPP;
7025 			goto done;
7026 		}
7027 		break;
7028 	case L2CAP_MODE_ERTM:
7029 	case L2CAP_MODE_STREAMING:
7030 		if (!disable_ertm)
7031 			break;
7032 		fallthrough;
7033 	default:
7034 		err = -EOPNOTSUPP;
7035 		goto done;
7036 	}
7037 
7038 	switch (chan->state) {
7039 	case BT_CONNECT:
7040 	case BT_CONNECT2:
7041 	case BT_CONFIG:
7042 		/* Already connecting */
7043 		err = 0;
7044 		goto done;
7045 
7046 	case BT_CONNECTED:
7047 		/* Already connected */
7048 		err = -EISCONN;
7049 		goto done;
7050 
7051 	case BT_OPEN:
7052 	case BT_BOUND:
7053 		/* Can connect */
7054 		break;
7055 
7056 	default:
7057 		err = -EBADFD;
7058 		goto done;
7059 	}
7060 
7061 	/* Set destination address and psm */
7062 	bacpy(&chan->dst, dst);
7063 	chan->dst_type = dst_type;
7064 
7065 	chan->psm = psm;
7066 	chan->dcid = cid;
7067 
7068 	if (bdaddr_type_is_le(dst_type)) {
7069 		/* Convert from L2CAP channel address type to HCI address type
7070 		 */
7071 		if (dst_type == BDADDR_LE_PUBLIC)
7072 			dst_type = ADDR_LE_DEV_PUBLIC;
7073 		else
7074 			dst_type = ADDR_LE_DEV_RANDOM;
7075 
7076 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7077 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7078 					      chan->sec_level, timeout,
7079 					      HCI_ROLE_SLAVE, 0, 0);
7080 		else
7081 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7082 						   chan->sec_level, timeout,
7083 						   CONN_REASON_L2CAP_CHAN);
7084 
7085 	} else {
7086 		u8 auth_type = l2cap_get_auth_type(chan);
7087 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7088 				       CONN_REASON_L2CAP_CHAN, timeout);
7089 	}
7090 
7091 	if (IS_ERR(hcon)) {
7092 		err = PTR_ERR(hcon);
7093 		goto done;
7094 	}
7095 
7096 	conn = l2cap_conn_add(hcon);
7097 	if (!conn) {
7098 		hci_conn_drop(hcon);
7099 		err = -ENOMEM;
7100 		goto done;
7101 	}
7102 
7103 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7104 		struct l2cap_chan_data data;
7105 
7106 		data.chan = chan;
7107 		data.pid = chan->ops->get_peer_pid(chan);
7108 		data.count = 1;
7109 
7110 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7111 
7112 		/* Check if there isn't too many channels being connected */
7113 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7114 			hci_conn_drop(hcon);
7115 			err = -EPROTO;
7116 			goto done;
7117 		}
7118 	}
7119 
7120 	mutex_lock(&conn->lock);
7121 	l2cap_chan_lock(chan);
7122 
7123 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7124 		hci_conn_drop(hcon);
7125 		err = -EBUSY;
7126 		goto chan_unlock;
7127 	}
7128 
7129 	/* Update source addr of the socket */
7130 	bacpy(&chan->src, &hcon->src);
7131 	chan->src_type = bdaddr_src_type(hcon);
7132 
7133 	__l2cap_chan_add(conn, chan);
7134 
7135 	/* l2cap_chan_add takes its own ref so we can drop this one */
7136 	hci_conn_drop(hcon);
7137 
7138 	l2cap_state_change(chan, BT_CONNECT);
7139 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7140 
7141 	/* Release chan->sport so that it can be reused by other
7142 	 * sockets (as it's only used for listening sockets).
7143 	 */
7144 	write_lock(&chan_list_lock);
7145 	chan->sport = 0;
7146 	write_unlock(&chan_list_lock);
7147 
7148 	if (hcon->state == BT_CONNECTED) {
7149 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7150 			__clear_chan_timer(chan);
7151 			if (l2cap_chan_check_security(chan, true))
7152 				l2cap_state_change(chan, BT_CONNECTED);
7153 		} else
7154 			l2cap_do_start(chan);
7155 	}
7156 
7157 	err = 0;
7158 
7159 chan_unlock:
7160 	l2cap_chan_unlock(chan);
7161 	mutex_unlock(&conn->lock);
7162 done:
7163 	hci_dev_unlock(hdev);
7164 	hci_dev_put(hdev);
7165 	return err;
7166 }
7167 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7168 
7169 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7170 {
7171 	struct l2cap_conn *conn = chan->conn;
7172 	DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7173 
7174 	pdu->mtu = cpu_to_le16(chan->imtu);
7175 	pdu->mps = cpu_to_le16(chan->mps);
7176 	pdu->scid[0] = cpu_to_le16(chan->scid);
7177 
7178 	chan->ident = l2cap_get_ident(conn);
7179 
7180 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7181 		       sizeof(pdu), &pdu);
7182 }
7183 
7184 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7185 {
7186 	if (chan->imtu > mtu)
7187 		return -EINVAL;
7188 
7189 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7190 
7191 	chan->imtu = mtu;
7192 
7193 	l2cap_ecred_reconfigure(chan);
7194 
7195 	return 0;
7196 }
7197 
7198 /* ---- L2CAP interface with lower layer (HCI) ---- */
7199 
7200 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7201 {
7202 	int exact = 0, lm1 = 0, lm2 = 0;
7203 	struct l2cap_chan *c;
7204 
7205 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7206 
7207 	/* Find listening sockets and check their link_mode */
7208 	read_lock(&chan_list_lock);
7209 	list_for_each_entry(c, &chan_list, global_l) {
7210 		if (c->state != BT_LISTEN)
7211 			continue;
7212 
7213 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7214 			lm1 |= HCI_LM_ACCEPT;
7215 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7216 				lm1 |= HCI_LM_MASTER;
7217 			exact++;
7218 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7219 			lm2 |= HCI_LM_ACCEPT;
7220 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7221 				lm2 |= HCI_LM_MASTER;
7222 		}
7223 	}
7224 	read_unlock(&chan_list_lock);
7225 
7226 	return exact ? lm1 : lm2;
7227 }
7228 
7229 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7230  * from an existing channel in the list or from the beginning of the
7231  * global list (by passing NULL as first parameter).
7232  */
7233 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7234 						  struct hci_conn *hcon)
7235 {
7236 	u8 src_type = bdaddr_src_type(hcon);
7237 
7238 	read_lock(&chan_list_lock);
7239 
7240 	if (c)
7241 		c = list_next_entry(c, global_l);
7242 	else
7243 		c = list_entry(chan_list.next, typeof(*c), global_l);
7244 
7245 	list_for_each_entry_from(c, &chan_list, global_l) {
7246 		if (c->chan_type != L2CAP_CHAN_FIXED)
7247 			continue;
7248 		if (c->state != BT_LISTEN)
7249 			continue;
7250 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7251 			continue;
7252 		if (src_type != c->src_type)
7253 			continue;
7254 
7255 		c = l2cap_chan_hold_unless_zero(c);
7256 		read_unlock(&chan_list_lock);
7257 		return c;
7258 	}
7259 
7260 	read_unlock(&chan_list_lock);
7261 
7262 	return NULL;
7263 }
7264 
7265 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7266 {
7267 	struct hci_dev *hdev = hcon->hdev;
7268 	struct l2cap_conn *conn;
7269 	struct l2cap_chan *pchan;
7270 	u8 dst_type;
7271 
7272 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7273 		return;
7274 
7275 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7276 
7277 	if (status) {
7278 		l2cap_conn_del(hcon, bt_to_errno(status));
7279 		return;
7280 	}
7281 
7282 	conn = l2cap_conn_add(hcon);
7283 	if (!conn)
7284 		return;
7285 
7286 	dst_type = bdaddr_dst_type(hcon);
7287 
7288 	/* If device is blocked, do not create channels for it */
7289 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7290 		return;
7291 
7292 	/* Find fixed channels and notify them of the new connection. We
7293 	 * use multiple individual lookups, continuing each time where
7294 	 * we left off, because the list lock would prevent calling the
7295 	 * potentially sleeping l2cap_chan_lock() function.
7296 	 */
7297 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7298 	while (pchan) {
7299 		struct l2cap_chan *chan, *next;
7300 
7301 		/* Client fixed channels should override server ones */
7302 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7303 			goto next;
7304 
7305 		l2cap_chan_lock(pchan);
7306 		chan = pchan->ops->new_connection(pchan);
7307 		if (chan) {
7308 			bacpy(&chan->src, &hcon->src);
7309 			bacpy(&chan->dst, &hcon->dst);
7310 			chan->src_type = bdaddr_src_type(hcon);
7311 			chan->dst_type = dst_type;
7312 
7313 			__l2cap_chan_add(conn, chan);
7314 		}
7315 
7316 		l2cap_chan_unlock(pchan);
7317 next:
7318 		next = l2cap_global_fixed_chan(pchan, hcon);
7319 		l2cap_chan_put(pchan);
7320 		pchan = next;
7321 	}
7322 
7323 	l2cap_conn_ready(conn);
7324 }
7325 
7326 int l2cap_disconn_ind(struct hci_conn *hcon)
7327 {
7328 	struct l2cap_conn *conn = hcon->l2cap_data;
7329 
7330 	BT_DBG("hcon %p", hcon);
7331 
7332 	if (!conn)
7333 		return HCI_ERROR_REMOTE_USER_TERM;
7334 	return conn->disc_reason;
7335 }
7336 
7337 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7338 {
7339 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7340 		return;
7341 
7342 	BT_DBG("hcon %p reason %d", hcon, reason);
7343 
7344 	l2cap_conn_del(hcon, bt_to_errno(reason));
7345 }
7346 
7347 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7348 {
7349 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7350 		return;
7351 
7352 	if (encrypt == 0x00) {
7353 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7354 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7355 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7356 			   chan->sec_level == BT_SECURITY_FIPS)
7357 			l2cap_chan_close(chan, ECONNREFUSED);
7358 	} else {
7359 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7360 			__clear_chan_timer(chan);
7361 	}
7362 }
7363 
7364 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7365 {
7366 	struct l2cap_conn *conn = hcon->l2cap_data;
7367 	struct l2cap_chan *chan;
7368 
7369 	if (!conn)
7370 		return;
7371 
7372 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7373 
7374 	mutex_lock(&conn->lock);
7375 
7376 	list_for_each_entry(chan, &conn->chan_l, list) {
7377 		l2cap_chan_lock(chan);
7378 
7379 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7380 		       state_to_string(chan->state));
7381 
7382 		if (!status && encrypt)
7383 			chan->sec_level = hcon->sec_level;
7384 
7385 		if (!__l2cap_no_conn_pending(chan)) {
7386 			l2cap_chan_unlock(chan);
7387 			continue;
7388 		}
7389 
7390 		if (!status && (chan->state == BT_CONNECTED ||
7391 				chan->state == BT_CONFIG)) {
7392 			chan->ops->resume(chan);
7393 			l2cap_check_encryption(chan, encrypt);
7394 			l2cap_chan_unlock(chan);
7395 			continue;
7396 		}
7397 
7398 		if (chan->state == BT_CONNECT) {
7399 			if (!status && l2cap_check_enc_key_size(hcon, chan))
7400 				l2cap_start_connection(chan);
7401 			else
7402 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7403 		} else if (chan->state == BT_CONNECT2 &&
7404 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7405 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7406 			struct l2cap_conn_rsp rsp;
7407 			__u16 res, stat;
7408 
7409 			if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7410 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7411 					res = L2CAP_CR_PEND;
7412 					stat = L2CAP_CS_AUTHOR_PEND;
7413 					chan->ops->defer(chan);
7414 				} else {
7415 					l2cap_state_change(chan, BT_CONFIG);
7416 					res = L2CAP_CR_SUCCESS;
7417 					stat = L2CAP_CS_NO_INFO;
7418 				}
7419 			} else {
7420 				l2cap_state_change(chan, BT_DISCONN);
7421 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7422 				res = L2CAP_CR_SEC_BLOCK;
7423 				stat = L2CAP_CS_NO_INFO;
7424 			}
7425 
7426 			rsp.scid   = cpu_to_le16(chan->dcid);
7427 			rsp.dcid   = cpu_to_le16(chan->scid);
7428 			rsp.result = cpu_to_le16(res);
7429 			rsp.status = cpu_to_le16(stat);
7430 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7431 				       sizeof(rsp), &rsp);
7432 
7433 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7434 			    res == L2CAP_CR_SUCCESS) {
7435 				char buf[128];
7436 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7437 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7438 					       L2CAP_CONF_REQ,
7439 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7440 					       buf);
7441 				chan->num_conf_req++;
7442 			}
7443 		}
7444 
7445 		l2cap_chan_unlock(chan);
7446 	}
7447 
7448 	mutex_unlock(&conn->lock);
7449 }
7450 
7451 /* Append fragment into frame respecting the maximum len of rx_skb */
7452 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7453 			   u16 len)
7454 {
7455 	if (!conn->rx_skb) {
7456 		/* Allocate skb for the complete frame (with header) */
7457 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7458 		if (!conn->rx_skb)
7459 			return -ENOMEM;
7460 		/* Init rx_len */
7461 		conn->rx_len = len;
7462 
7463 		skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7464 				      skb->tstamp_type);
7465 	}
7466 
7467 	/* Copy as much as the rx_skb can hold */
7468 	len = min_t(u16, len, skb->len);
7469 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7470 	skb_pull(skb, len);
7471 	conn->rx_len -= len;
7472 
7473 	return len;
7474 }
7475 
7476 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7477 {
7478 	struct sk_buff *rx_skb;
7479 	int len;
7480 
7481 	/* Append just enough to complete the header */
7482 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7483 
7484 	/* If header could not be read just continue */
7485 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7486 		return len;
7487 
7488 	rx_skb = conn->rx_skb;
7489 	len = get_unaligned_le16(rx_skb->data);
7490 
7491 	/* Check if rx_skb has enough space to received all fragments */
7492 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7493 		/* Update expected len */
7494 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7495 		return L2CAP_LEN_SIZE;
7496 	}
7497 
7498 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7499 	 * fit all fragments.
7500 	 */
7501 	conn->rx_skb = NULL;
7502 
7503 	/* Reallocates rx_skb using the exact expected length */
7504 	len = l2cap_recv_frag(conn, rx_skb,
7505 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7506 	kfree_skb(rx_skb);
7507 
7508 	return len;
7509 }
7510 
7511 static void l2cap_recv_reset(struct l2cap_conn *conn)
7512 {
7513 	kfree_skb(conn->rx_skb);
7514 	conn->rx_skb = NULL;
7515 	conn->rx_len = 0;
7516 }
7517 
7518 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7519 {
7520 	if (!c)
7521 		return NULL;
7522 
7523 	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7524 
7525 	if (!kref_get_unless_zero(&c->ref))
7526 		return NULL;
7527 
7528 	return c;
7529 }
7530 
7531 int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
7532 		       struct sk_buff *skb, u16 flags)
7533 {
7534 	struct hci_conn *hcon;
7535 	struct l2cap_conn *conn;
7536 	int len;
7537 
7538 	/* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
7539 	hci_dev_lock(hdev);
7540 
7541 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
7542 	if (!hcon) {
7543 		hci_dev_unlock(hdev);
7544 		kfree_skb(skb);
7545 		return -ENOENT;
7546 	}
7547 
7548 	hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
7549 
7550 	conn = hcon->l2cap_data;
7551 
7552 	if (!conn)
7553 		conn = l2cap_conn_add(hcon);
7554 
7555 	conn = l2cap_conn_hold_unless_zero(conn);
7556 	hcon = NULL;
7557 
7558 	hci_dev_unlock(hdev);
7559 
7560 	if (!conn) {
7561 		kfree_skb(skb);
7562 		return -EINVAL;
7563 	}
7564 
7565 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7566 
7567 	mutex_lock(&conn->lock);
7568 
7569 	switch (flags) {
7570 	case ACL_START:
7571 	case ACL_START_NO_FLUSH:
7572 	case ACL_COMPLETE:
7573 		if (conn->rx_skb) {
7574 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7575 			l2cap_recv_reset(conn);
7576 			l2cap_conn_unreliable(conn, ECOMM);
7577 		}
7578 
7579 		/* Start fragment may not contain the L2CAP length so just
7580 		 * copy the initial byte when that happens and use conn->mtu as
7581 		 * expected length.
7582 		 */
7583 		if (skb->len < L2CAP_LEN_SIZE) {
7584 			l2cap_recv_frag(conn, skb, conn->mtu);
7585 			break;
7586 		}
7587 
7588 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7589 
7590 		if (len == skb->len) {
7591 			/* Complete frame received */
7592 			l2cap_recv_frame(conn, skb);
7593 			goto unlock;
7594 		}
7595 
7596 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7597 
7598 		if (skb->len > len) {
7599 			BT_ERR("Frame is too long (len %u, expected len %d)",
7600 			       skb->len, len);
7601 			/* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7602 			 * (Multiple Signaling Command in one PDU, Data
7603 			 * Truncated, BR/EDR) send a C-frame to the IUT with
7604 			 * PDU Length set to 8 and Channel ID set to the
7605 			 * correct signaling channel for the logical link.
7606 			 * The Information payload contains one L2CAP_ECHO_REQ
7607 			 * packet with Data Length set to 0 with 0 octets of
7608 			 * echo data and one invalid command packet due to
7609 			 * data truncated in PDU but present in HCI packet.
7610 			 *
7611 			 * Shorter the socket buffer to the PDU length to
7612 			 * allow to process valid commands from the PDU before
7613 			 * setting the socket unreliable.
7614 			 */
7615 			skb->len = len;
7616 			l2cap_recv_frame(conn, skb);
7617 			l2cap_conn_unreliable(conn, ECOMM);
7618 			goto unlock;
7619 		}
7620 
7621 		/* Append fragment into frame (with header) */
7622 		if (l2cap_recv_frag(conn, skb, len) < 0)
7623 			goto drop;
7624 
7625 		break;
7626 
7627 	case ACL_CONT:
7628 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7629 
7630 		if (!conn->rx_skb) {
7631 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7632 			l2cap_conn_unreliable(conn, ECOMM);
7633 			goto drop;
7634 		}
7635 
7636 		/* Complete the L2CAP length if it has not been read */
7637 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7638 			if (l2cap_recv_len(conn, skb) < 0) {
7639 				l2cap_conn_unreliable(conn, ECOMM);
7640 				goto drop;
7641 			}
7642 
7643 			/* Header still could not be read just continue */
7644 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7645 				break;
7646 		}
7647 
7648 		if (skb->len > conn->rx_len) {
7649 			BT_ERR("Fragment is too long (len %u, expected %u)",
7650 			       skb->len, conn->rx_len);
7651 			l2cap_recv_reset(conn);
7652 			l2cap_conn_unreliable(conn, ECOMM);
7653 			goto drop;
7654 		}
7655 
7656 		/* Append fragment into frame (with header) */
7657 		l2cap_recv_frag(conn, skb, skb->len);
7658 
7659 		if (!conn->rx_len) {
7660 			/* Complete frame received. l2cap_recv_frame
7661 			 * takes ownership of the skb so set the global
7662 			 * rx_skb pointer to NULL first.
7663 			 */
7664 			struct sk_buff *rx_skb = conn->rx_skb;
7665 			conn->rx_skb = NULL;
7666 			l2cap_recv_frame(conn, rx_skb);
7667 		}
7668 		break;
7669 	}
7670 
7671 drop:
7672 	kfree_skb(skb);
7673 unlock:
7674 	mutex_unlock(&conn->lock);
7675 	l2cap_conn_put(conn);
7676 	return 0;
7677 }
7678 
7679 static struct hci_cb l2cap_cb = {
7680 	.name		= "L2CAP",
7681 	.connect_cfm	= l2cap_connect_cfm,
7682 	.disconn_cfm	= l2cap_disconn_cfm,
7683 	.security_cfm	= l2cap_security_cfm,
7684 };
7685 
7686 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7687 {
7688 	struct l2cap_chan *c;
7689 
7690 	read_lock(&chan_list_lock);
7691 
7692 	list_for_each_entry(c, &chan_list, global_l) {
7693 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7694 			   &c->src, c->src_type, &c->dst, c->dst_type,
7695 			   c->state, __le16_to_cpu(c->psm),
7696 			   c->scid, c->dcid, c->imtu, c->omtu,
7697 			   c->sec_level, c->mode);
7698 	}
7699 
7700 	read_unlock(&chan_list_lock);
7701 
7702 	return 0;
7703 }
7704 
7705 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7706 
7707 static struct dentry *l2cap_debugfs;
7708 
7709 int __init l2cap_init(void)
7710 {
7711 	int err;
7712 
7713 	err = l2cap_init_sockets();
7714 	if (err < 0)
7715 		return err;
7716 
7717 	hci_register_cb(&l2cap_cb);
7718 
7719 	if (IS_ERR_OR_NULL(bt_debugfs))
7720 		return 0;
7721 
7722 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7723 					    NULL, &l2cap_debugfs_fops);
7724 
7725 	return 0;
7726 }
7727 
7728 void l2cap_exit(void)
7729 {
7730 	debugfs_remove(l2cap_debugfs);
7731 	hci_unregister_cb(&l2cap_cb);
7732 	l2cap_cleanup_sockets();
7733 }
7734 
7735 module_param(disable_ertm, bool, 0644);
7736 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7737 
7738 module_param(enable_ecred, bool, 0644);
7739 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7740