xref: /linux/net/bluetooth/l2cap_core.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 
42 bool disable_ertm;
43 
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46 
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49 
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 				u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 								void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 				   struct l2cap_chan *chan, int err);
57 
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 		    struct sk_buff_head *skbs, u8 event);
60 
61 /* ---- L2CAP channels ---- */
62 
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
64 {
65 	struct l2cap_chan *c;
66 
67 	list_for_each_entry(c, &conn->chan_l, list) {
68 		if (c->dcid == cid)
69 			return c;
70 	}
71 	return NULL;
72 }
73 
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
75 {
76 	struct l2cap_chan *c;
77 
78 	list_for_each_entry(c, &conn->chan_l, list) {
79 		if (c->scid == cid)
80 			return c;
81 	}
82 	return NULL;
83 }
84 
85 /* Find channel with given SCID.
86  * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88 {
89 	struct l2cap_chan *c;
90 
91 	mutex_lock(&conn->chan_lock);
92 	c = __l2cap_get_chan_by_scid(conn, cid);
93 	if (c)
94 		l2cap_chan_lock(c);
95 	mutex_unlock(&conn->chan_lock);
96 
97 	return c;
98 }
99 
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 {
102 	struct l2cap_chan *c;
103 
104 	list_for_each_entry(c, &conn->chan_l, list) {
105 		if (c->ident == ident)
106 			return c;
107 	}
108 	return NULL;
109 }
110 
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 {
113 	struct l2cap_chan *c;
114 
115 	list_for_each_entry(c, &chan_list, global_l) {
116 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
117 			return c;
118 	}
119 	return NULL;
120 }
121 
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
123 {
124 	int err;
125 
126 	write_lock(&chan_list_lock);
127 
128 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
129 		err = -EADDRINUSE;
130 		goto done;
131 	}
132 
133 	if (psm) {
134 		chan->psm = psm;
135 		chan->sport = psm;
136 		err = 0;
137 	} else {
138 		u16 p;
139 
140 		err = -EINVAL;
141 		for (p = 0x1001; p < 0x1100; p += 2)
142 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 				chan->psm   = cpu_to_le16(p);
144 				chan->sport = cpu_to_le16(p);
145 				err = 0;
146 				break;
147 			}
148 	}
149 
150 done:
151 	write_unlock(&chan_list_lock);
152 	return err;
153 }
154 
155 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
156 {
157 	write_lock(&chan_list_lock);
158 
159 	chan->scid = scid;
160 
161 	write_unlock(&chan_list_lock);
162 
163 	return 0;
164 }
165 
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 {
168 	u16 cid = L2CAP_CID_DYN_START;
169 
170 	for (; cid < L2CAP_CID_DYN_END; cid++) {
171 		if (!__l2cap_get_chan_by_scid(conn, cid))
172 			return cid;
173 	}
174 
175 	return 0;
176 }
177 
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 {
180 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 						state_to_string(state));
182 
183 	chan->state = state;
184 	chan->ops->state_change(chan, state);
185 }
186 
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 {
189 	struct sock *sk = chan->sk;
190 
191 	lock_sock(sk);
192 	__l2cap_state_change(chan, state);
193 	release_sock(sk);
194 }
195 
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 {
198 	struct sock *sk = chan->sk;
199 
200 	sk->sk_err = err;
201 }
202 
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 {
205 	struct sock *sk = chan->sk;
206 
207 	lock_sock(sk);
208 	__l2cap_chan_set_err(chan, err);
209 	release_sock(sk);
210 }
211 
212 static void __set_retrans_timer(struct l2cap_chan *chan)
213 {
214 	if (!delayed_work_pending(&chan->monitor_timer) &&
215 	    chan->retrans_timeout) {
216 		l2cap_set_timer(chan, &chan->retrans_timer,
217 				msecs_to_jiffies(chan->retrans_timeout));
218 	}
219 }
220 
221 static void __set_monitor_timer(struct l2cap_chan *chan)
222 {
223 	__clear_retrans_timer(chan);
224 	if (chan->monitor_timeout) {
225 		l2cap_set_timer(chan, &chan->monitor_timer,
226 				msecs_to_jiffies(chan->monitor_timeout));
227 	}
228 }
229 
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 					       u16 seq)
232 {
233 	struct sk_buff *skb;
234 
235 	skb_queue_walk(head, skb) {
236 		if (bt_cb(skb)->control.txseq == seq)
237 			return skb;
238 	}
239 
240 	return NULL;
241 }
242 
243 /* ---- L2CAP sequence number lists ---- */
244 
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246  * SREJ requests that are received and for frames that are to be
247  * retransmitted. These seq_list functions implement a singly-linked
248  * list in an array, where membership in the list can also be checked
249  * in constant time. Items can also be added to the tail of the list
250  * and removed from the head in constant time, without further memory
251  * allocs or frees.
252  */
253 
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 	size_t alloc_size, i;
257 
258 	/* Allocated size is a power of 2 to map sequence numbers
259 	 * (which may be up to 14 bits) in to a smaller array that is
260 	 * sized for the negotiated ERTM transmit windows.
261 	 */
262 	alloc_size = roundup_pow_of_two(size);
263 
264 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 	if (!seq_list->list)
266 		return -ENOMEM;
267 
268 	seq_list->mask = alloc_size - 1;
269 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 	for (i = 0; i < alloc_size; i++)
272 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273 
274 	return 0;
275 }
276 
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 	kfree(seq_list->list);
280 }
281 
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 					   u16 seq)
284 {
285 	/* Constant-time check for list membership */
286 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288 
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 	u16 mask = seq_list->mask;
292 
293 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 		/* In case someone tries to pop the head of an empty list */
295 		return L2CAP_SEQ_LIST_CLEAR;
296 	} else if (seq_list->head == seq) {
297 		/* Head can be removed in constant time */
298 		seq_list->head = seq_list->list[seq & mask];
299 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300 
301 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 		}
305 	} else {
306 		/* Walk the list to find the sequence number */
307 		u16 prev = seq_list->head;
308 		while (seq_list->list[prev & mask] != seq) {
309 			prev = seq_list->list[prev & mask];
310 			if (prev == L2CAP_SEQ_LIST_TAIL)
311 				return L2CAP_SEQ_LIST_CLEAR;
312 		}
313 
314 		/* Unlink the number from the list and clear it */
315 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 		if (seq_list->tail == seq)
318 			seq_list->tail = prev;
319 	}
320 	return seq;
321 }
322 
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 	/* Remove the head in constant time */
326 	return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328 
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 	u16 i;
332 
333 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 		return;
335 
336 	for (i = 0; i <= seq_list->mask; i++)
337 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338 
339 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342 
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 	u16 mask = seq_list->mask;
346 
347 	/* All appends happen in constant time */
348 
349 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 		return;
351 
352 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 		seq_list->head = seq;
354 	else
355 		seq_list->list[seq_list->tail & mask] = seq;
356 
357 	seq_list->tail = seq;
358 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360 
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 							chan_timer.work);
365 	struct l2cap_conn *conn = chan->conn;
366 	int reason;
367 
368 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369 
370 	mutex_lock(&conn->chan_lock);
371 	l2cap_chan_lock(chan);
372 
373 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 		reason = ECONNREFUSED;
375 	else if (chan->state == BT_CONNECT &&
376 					chan->sec_level != BT_SECURITY_SDP)
377 		reason = ECONNREFUSED;
378 	else
379 		reason = ETIMEDOUT;
380 
381 	l2cap_chan_close(chan, reason);
382 
383 	l2cap_chan_unlock(chan);
384 
385 	chan->ops->close(chan);
386 	mutex_unlock(&conn->chan_lock);
387 
388 	l2cap_chan_put(chan);
389 }
390 
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 	struct l2cap_chan *chan;
394 
395 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 	if (!chan)
397 		return NULL;
398 
399 	mutex_init(&chan->lock);
400 
401 	write_lock(&chan_list_lock);
402 	list_add(&chan->global_l, &chan_list);
403 	write_unlock(&chan_list_lock);
404 
405 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406 
407 	chan->state = BT_OPEN;
408 
409 	atomic_set(&chan->refcnt, 1);
410 
411 	/* This flag is cleared in l2cap_chan_ready() */
412 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413 
414 	BT_DBG("chan %p", chan);
415 
416 	return chan;
417 }
418 
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 {
421 	write_lock(&chan_list_lock);
422 	list_del(&chan->global_l);
423 	write_unlock(&chan_list_lock);
424 
425 	l2cap_chan_put(chan);
426 }
427 
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 {
430 	chan->fcs  = L2CAP_FCS_CRC16;
431 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
435 	chan->sec_level = BT_SECURITY_LOW;
436 
437 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
438 }
439 
440 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 {
442 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
443 	       __le16_to_cpu(chan->psm), chan->dcid);
444 
445 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
446 
447 	chan->conn = conn;
448 
449 	switch (chan->chan_type) {
450 	case L2CAP_CHAN_CONN_ORIENTED:
451 		if (conn->hcon->type == LE_LINK) {
452 			/* LE connection */
453 			chan->omtu = L2CAP_DEFAULT_MTU;
454 			chan->scid = L2CAP_CID_LE_DATA;
455 			chan->dcid = L2CAP_CID_LE_DATA;
456 		} else {
457 			/* Alloc CID for connection-oriented socket */
458 			chan->scid = l2cap_alloc_cid(conn);
459 			chan->omtu = L2CAP_DEFAULT_MTU;
460 		}
461 		break;
462 
463 	case L2CAP_CHAN_CONN_LESS:
464 		/* Connectionless socket */
465 		chan->scid = L2CAP_CID_CONN_LESS;
466 		chan->dcid = L2CAP_CID_CONN_LESS;
467 		chan->omtu = L2CAP_DEFAULT_MTU;
468 		break;
469 
470 	case L2CAP_CHAN_CONN_FIX_A2MP:
471 		chan->scid = L2CAP_CID_A2MP;
472 		chan->dcid = L2CAP_CID_A2MP;
473 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
474 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
475 		break;
476 
477 	default:
478 		/* Raw socket can send/recv signalling messages only */
479 		chan->scid = L2CAP_CID_SIGNALING;
480 		chan->dcid = L2CAP_CID_SIGNALING;
481 		chan->omtu = L2CAP_DEFAULT_MTU;
482 	}
483 
484 	chan->local_id		= L2CAP_BESTEFFORT_ID;
485 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
486 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
487 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
488 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
489 	chan->local_flush_to	= L2CAP_DEFAULT_FLUSH_TO;
490 
491 	l2cap_chan_hold(chan);
492 
493 	list_add(&chan->list, &conn->chan_l);
494 }
495 
496 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
497 {
498 	mutex_lock(&conn->chan_lock);
499 	__l2cap_chan_add(conn, chan);
500 	mutex_unlock(&conn->chan_lock);
501 }
502 
503 void l2cap_chan_del(struct l2cap_chan *chan, int err)
504 {
505 	struct l2cap_conn *conn = chan->conn;
506 
507 	__clear_chan_timer(chan);
508 
509 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
510 
511 	if (conn) {
512 		/* Delete from channel list */
513 		list_del(&chan->list);
514 
515 		l2cap_chan_put(chan);
516 
517 		chan->conn = NULL;
518 
519 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
520 			hci_conn_put(conn->hcon);
521 	}
522 
523 	if (chan->ops->teardown)
524 		chan->ops->teardown(chan, err);
525 
526 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
527 		return;
528 
529 	switch(chan->mode) {
530 	case L2CAP_MODE_BASIC:
531 		break;
532 
533 	case L2CAP_MODE_ERTM:
534 		__clear_retrans_timer(chan);
535 		__clear_monitor_timer(chan);
536 		__clear_ack_timer(chan);
537 
538 		skb_queue_purge(&chan->srej_q);
539 
540 		l2cap_seq_list_free(&chan->srej_list);
541 		l2cap_seq_list_free(&chan->retrans_list);
542 
543 		/* fall through */
544 
545 	case L2CAP_MODE_STREAMING:
546 		skb_queue_purge(&chan->tx_q);
547 		break;
548 	}
549 
550 	return;
551 }
552 
553 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
554 {
555 	struct l2cap_conn *conn = chan->conn;
556 	struct sock *sk = chan->sk;
557 
558 	BT_DBG("chan %p state %s sk %p", chan,
559 					state_to_string(chan->state), sk);
560 
561 	switch (chan->state) {
562 	case BT_LISTEN:
563 		if (chan->ops->teardown)
564 			chan->ops->teardown(chan, 0);
565 		break;
566 
567 	case BT_CONNECTED:
568 	case BT_CONFIG:
569 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
570 					conn->hcon->type == ACL_LINK) {
571 			__set_chan_timer(chan, sk->sk_sndtimeo);
572 			l2cap_send_disconn_req(conn, chan, reason);
573 		} else
574 			l2cap_chan_del(chan, reason);
575 		break;
576 
577 	case BT_CONNECT2:
578 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
579 					conn->hcon->type == ACL_LINK) {
580 			struct l2cap_conn_rsp rsp;
581 			__u16 result;
582 
583 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
584 				result = L2CAP_CR_SEC_BLOCK;
585 			else
586 				result = L2CAP_CR_BAD_PSM;
587 			l2cap_state_change(chan, BT_DISCONN);
588 
589 			rsp.scid   = cpu_to_le16(chan->dcid);
590 			rsp.dcid   = cpu_to_le16(chan->scid);
591 			rsp.result = cpu_to_le16(result);
592 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
593 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
594 							sizeof(rsp), &rsp);
595 		}
596 
597 		l2cap_chan_del(chan, reason);
598 		break;
599 
600 	case BT_CONNECT:
601 	case BT_DISCONN:
602 		l2cap_chan_del(chan, reason);
603 		break;
604 
605 	default:
606 		if (chan->ops->teardown)
607 			chan->ops->teardown(chan, 0);
608 		break;
609 	}
610 }
611 
612 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
613 {
614 	if (chan->chan_type == L2CAP_CHAN_RAW) {
615 		switch (chan->sec_level) {
616 		case BT_SECURITY_HIGH:
617 			return HCI_AT_DEDICATED_BONDING_MITM;
618 		case BT_SECURITY_MEDIUM:
619 			return HCI_AT_DEDICATED_BONDING;
620 		default:
621 			return HCI_AT_NO_BONDING;
622 		}
623 	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
624 		if (chan->sec_level == BT_SECURITY_LOW)
625 			chan->sec_level = BT_SECURITY_SDP;
626 
627 		if (chan->sec_level == BT_SECURITY_HIGH)
628 			return HCI_AT_NO_BONDING_MITM;
629 		else
630 			return HCI_AT_NO_BONDING;
631 	} else {
632 		switch (chan->sec_level) {
633 		case BT_SECURITY_HIGH:
634 			return HCI_AT_GENERAL_BONDING_MITM;
635 		case BT_SECURITY_MEDIUM:
636 			return HCI_AT_GENERAL_BONDING;
637 		default:
638 			return HCI_AT_NO_BONDING;
639 		}
640 	}
641 }
642 
643 /* Service level security */
644 int l2cap_chan_check_security(struct l2cap_chan *chan)
645 {
646 	struct l2cap_conn *conn = chan->conn;
647 	__u8 auth_type;
648 
649 	auth_type = l2cap_get_auth_type(chan);
650 
651 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
652 }
653 
654 static u8 l2cap_get_ident(struct l2cap_conn *conn)
655 {
656 	u8 id;
657 
658 	/* Get next available identificator.
659 	 *    1 - 128 are used by kernel.
660 	 *  129 - 199 are reserved.
661 	 *  200 - 254 are used by utilities like l2ping, etc.
662 	 */
663 
664 	spin_lock(&conn->lock);
665 
666 	if (++conn->tx_ident > 128)
667 		conn->tx_ident = 1;
668 
669 	id = conn->tx_ident;
670 
671 	spin_unlock(&conn->lock);
672 
673 	return id;
674 }
675 
676 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
677 {
678 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
679 	u8 flags;
680 
681 	BT_DBG("code 0x%2.2x", code);
682 
683 	if (!skb)
684 		return;
685 
686 	if (lmp_no_flush_capable(conn->hcon->hdev))
687 		flags = ACL_START_NO_FLUSH;
688 	else
689 		flags = ACL_START;
690 
691 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
692 	skb->priority = HCI_PRIO_MAX;
693 
694 	hci_send_acl(conn->hchan, skb, flags);
695 }
696 
697 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
698 {
699 	struct hci_conn *hcon = chan->conn->hcon;
700 	u16 flags;
701 
702 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
703 							skb->priority);
704 
705 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
706 					lmp_no_flush_capable(hcon->hdev))
707 		flags = ACL_START_NO_FLUSH;
708 	else
709 		flags = ACL_START;
710 
711 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
712 	hci_send_acl(chan->conn->hchan, skb, flags);
713 }
714 
715 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
716 {
717 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
718 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
719 
720 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
721 		/* S-Frame */
722 		control->sframe = 1;
723 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
724 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
725 
726 		control->sar = 0;
727 		control->txseq = 0;
728 	} else {
729 		/* I-Frame */
730 		control->sframe = 0;
731 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
732 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
733 
734 		control->poll = 0;
735 		control->super = 0;
736 	}
737 }
738 
739 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
740 {
741 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
742 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
743 
744 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
745 		/* S-Frame */
746 		control->sframe = 1;
747 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
748 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
749 
750 		control->sar = 0;
751 		control->txseq = 0;
752 	} else {
753 		/* I-Frame */
754 		control->sframe = 0;
755 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
756 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
757 
758 		control->poll = 0;
759 		control->super = 0;
760 	}
761 }
762 
763 static inline void __unpack_control(struct l2cap_chan *chan,
764 				    struct sk_buff *skb)
765 {
766 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
767 		__unpack_extended_control(get_unaligned_le32(skb->data),
768 					  &bt_cb(skb)->control);
769 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
770 	} else {
771 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
772 					  &bt_cb(skb)->control);
773 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
774 	}
775 }
776 
777 static u32 __pack_extended_control(struct l2cap_ctrl *control)
778 {
779 	u32 packed;
780 
781 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
782 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
783 
784 	if (control->sframe) {
785 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
786 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
787 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
788 	} else {
789 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
790 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
791 	}
792 
793 	return packed;
794 }
795 
796 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
797 {
798 	u16 packed;
799 
800 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
801 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
802 
803 	if (control->sframe) {
804 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
805 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
806 		packed |= L2CAP_CTRL_FRAME_TYPE;
807 	} else {
808 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
809 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
810 	}
811 
812 	return packed;
813 }
814 
815 static inline void __pack_control(struct l2cap_chan *chan,
816 				  struct l2cap_ctrl *control,
817 				  struct sk_buff *skb)
818 {
819 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
820 		put_unaligned_le32(__pack_extended_control(control),
821 				   skb->data + L2CAP_HDR_SIZE);
822 	} else {
823 		put_unaligned_le16(__pack_enhanced_control(control),
824 				   skb->data + L2CAP_HDR_SIZE);
825 	}
826 }
827 
828 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
829 {
830 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
831 		return L2CAP_EXT_HDR_SIZE;
832 	else
833 		return L2CAP_ENH_HDR_SIZE;
834 }
835 
836 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
837 					       u32 control)
838 {
839 	struct sk_buff *skb;
840 	struct l2cap_hdr *lh;
841 	int hlen = __ertm_hdr_size(chan);
842 
843 	if (chan->fcs == L2CAP_FCS_CRC16)
844 		hlen += L2CAP_FCS_SIZE;
845 
846 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
847 
848 	if (!skb)
849 		return ERR_PTR(-ENOMEM);
850 
851 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
852 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
853 	lh->cid = cpu_to_le16(chan->dcid);
854 
855 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
856 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
857 	else
858 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
859 
860 	if (chan->fcs == L2CAP_FCS_CRC16) {
861 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
862 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
863 	}
864 
865 	skb->priority = HCI_PRIO_MAX;
866 	return skb;
867 }
868 
869 static void l2cap_send_sframe(struct l2cap_chan *chan,
870 			      struct l2cap_ctrl *control)
871 {
872 	struct sk_buff *skb;
873 	u32 control_field;
874 
875 	BT_DBG("chan %p, control %p", chan, control);
876 
877 	if (!control->sframe)
878 		return;
879 
880 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
881 	    !control->poll)
882 		control->final = 1;
883 
884 	if (control->super == L2CAP_SUPER_RR)
885 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
886 	else if (control->super == L2CAP_SUPER_RNR)
887 		set_bit(CONN_RNR_SENT, &chan->conn_state);
888 
889 	if (control->super != L2CAP_SUPER_SREJ) {
890 		chan->last_acked_seq = control->reqseq;
891 		__clear_ack_timer(chan);
892 	}
893 
894 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
895 	       control->final, control->poll, control->super);
896 
897 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
898 		control_field = __pack_extended_control(control);
899 	else
900 		control_field = __pack_enhanced_control(control);
901 
902 	skb = l2cap_create_sframe_pdu(chan, control_field);
903 	if (!IS_ERR(skb))
904 		l2cap_do_send(chan, skb);
905 }
906 
907 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
908 {
909 	struct l2cap_ctrl control;
910 
911 	BT_DBG("chan %p, poll %d", chan, poll);
912 
913 	memset(&control, 0, sizeof(control));
914 	control.sframe = 1;
915 	control.poll = poll;
916 
917 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
918 		control.super = L2CAP_SUPER_RNR;
919 	else
920 		control.super = L2CAP_SUPER_RR;
921 
922 	control.reqseq = chan->buffer_seq;
923 	l2cap_send_sframe(chan, &control);
924 }
925 
926 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
927 {
928 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
929 }
930 
931 static void l2cap_send_conn_req(struct l2cap_chan *chan)
932 {
933 	struct l2cap_conn *conn = chan->conn;
934 	struct l2cap_conn_req req;
935 
936 	req.scid = cpu_to_le16(chan->scid);
937 	req.psm  = chan->psm;
938 
939 	chan->ident = l2cap_get_ident(conn);
940 
941 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
942 
943 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
944 }
945 
946 static void l2cap_chan_ready(struct l2cap_chan *chan)
947 {
948 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
949 	chan->conf_state = 0;
950 	__clear_chan_timer(chan);
951 
952 	chan->state = BT_CONNECTED;
953 
954 	chan->ops->ready(chan);
955 }
956 
957 static void l2cap_do_start(struct l2cap_chan *chan)
958 {
959 	struct l2cap_conn *conn = chan->conn;
960 
961 	if (conn->hcon->type == LE_LINK) {
962 		l2cap_chan_ready(chan);
963 		return;
964 	}
965 
966 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
967 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
968 			return;
969 
970 		if (l2cap_chan_check_security(chan) &&
971 				__l2cap_no_conn_pending(chan))
972 			l2cap_send_conn_req(chan);
973 	} else {
974 		struct l2cap_info_req req;
975 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
976 
977 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
978 		conn->info_ident = l2cap_get_ident(conn);
979 
980 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
981 
982 		l2cap_send_cmd(conn, conn->info_ident,
983 					L2CAP_INFO_REQ, sizeof(req), &req);
984 	}
985 }
986 
987 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
988 {
989 	u32 local_feat_mask = l2cap_feat_mask;
990 	if (!disable_ertm)
991 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
992 
993 	switch (mode) {
994 	case L2CAP_MODE_ERTM:
995 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
996 	case L2CAP_MODE_STREAMING:
997 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
998 	default:
999 		return 0x00;
1000 	}
1001 }
1002 
1003 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1004 {
1005 	struct sock *sk = chan->sk;
1006 	struct l2cap_disconn_req req;
1007 
1008 	if (!conn)
1009 		return;
1010 
1011 	if (chan->mode == L2CAP_MODE_ERTM) {
1012 		__clear_retrans_timer(chan);
1013 		__clear_monitor_timer(chan);
1014 		__clear_ack_timer(chan);
1015 	}
1016 
1017 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1018 		__l2cap_state_change(chan, BT_DISCONN);
1019 		return;
1020 	}
1021 
1022 	req.dcid = cpu_to_le16(chan->dcid);
1023 	req.scid = cpu_to_le16(chan->scid);
1024 	l2cap_send_cmd(conn, l2cap_get_ident(conn),
1025 			L2CAP_DISCONN_REQ, sizeof(req), &req);
1026 
1027 	lock_sock(sk);
1028 	__l2cap_state_change(chan, BT_DISCONN);
1029 	__l2cap_chan_set_err(chan, err);
1030 	release_sock(sk);
1031 }
1032 
1033 /* ---- L2CAP connections ---- */
1034 static void l2cap_conn_start(struct l2cap_conn *conn)
1035 {
1036 	struct l2cap_chan *chan, *tmp;
1037 
1038 	BT_DBG("conn %p", conn);
1039 
1040 	mutex_lock(&conn->chan_lock);
1041 
1042 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1043 		struct sock *sk = chan->sk;
1044 
1045 		l2cap_chan_lock(chan);
1046 
1047 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1048 			l2cap_chan_unlock(chan);
1049 			continue;
1050 		}
1051 
1052 		if (chan->state == BT_CONNECT) {
1053 			if (!l2cap_chan_check_security(chan) ||
1054 					!__l2cap_no_conn_pending(chan)) {
1055 				l2cap_chan_unlock(chan);
1056 				continue;
1057 			}
1058 
1059 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1060 					&& test_bit(CONF_STATE2_DEVICE,
1061 					&chan->conf_state)) {
1062 				l2cap_chan_close(chan, ECONNRESET);
1063 				l2cap_chan_unlock(chan);
1064 				continue;
1065 			}
1066 
1067 			l2cap_send_conn_req(chan);
1068 
1069 		} else if (chan->state == BT_CONNECT2) {
1070 			struct l2cap_conn_rsp rsp;
1071 			char buf[128];
1072 			rsp.scid = cpu_to_le16(chan->dcid);
1073 			rsp.dcid = cpu_to_le16(chan->scid);
1074 
1075 			if (l2cap_chan_check_security(chan)) {
1076 				lock_sock(sk);
1077 				if (test_bit(BT_SK_DEFER_SETUP,
1078 					     &bt_sk(sk)->flags)) {
1079 					struct sock *parent = bt_sk(sk)->parent;
1080 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1081 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1082 					if (parent)
1083 						parent->sk_data_ready(parent, 0);
1084 
1085 				} else {
1086 					__l2cap_state_change(chan, BT_CONFIG);
1087 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1088 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1089 				}
1090 				release_sock(sk);
1091 			} else {
1092 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1093 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1094 			}
1095 
1096 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1097 							sizeof(rsp), &rsp);
1098 
1099 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1100 					rsp.result != L2CAP_CR_SUCCESS) {
1101 				l2cap_chan_unlock(chan);
1102 				continue;
1103 			}
1104 
1105 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1106 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1107 						l2cap_build_conf_req(chan, buf), buf);
1108 			chan->num_conf_req++;
1109 		}
1110 
1111 		l2cap_chan_unlock(chan);
1112 	}
1113 
1114 	mutex_unlock(&conn->chan_lock);
1115 }
1116 
1117 /* Find socket with cid and source/destination bdaddr.
1118  * Returns closest match, locked.
1119  */
1120 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1121 						    bdaddr_t *src,
1122 						    bdaddr_t *dst)
1123 {
1124 	struct l2cap_chan *c, *c1 = NULL;
1125 
1126 	read_lock(&chan_list_lock);
1127 
1128 	list_for_each_entry(c, &chan_list, global_l) {
1129 		struct sock *sk = c->sk;
1130 
1131 		if (state && c->state != state)
1132 			continue;
1133 
1134 		if (c->scid == cid) {
1135 			int src_match, dst_match;
1136 			int src_any, dst_any;
1137 
1138 			/* Exact match. */
1139 			src_match = !bacmp(&bt_sk(sk)->src, src);
1140 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1141 			if (src_match && dst_match) {
1142 				read_unlock(&chan_list_lock);
1143 				return c;
1144 			}
1145 
1146 			/* Closest match */
1147 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1148 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1149 			if ((src_match && dst_any) || (src_any && dst_match) ||
1150 			    (src_any && dst_any))
1151 				c1 = c;
1152 		}
1153 	}
1154 
1155 	read_unlock(&chan_list_lock);
1156 
1157 	return c1;
1158 }
1159 
1160 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1161 {
1162 	struct sock *parent, *sk;
1163 	struct l2cap_chan *chan, *pchan;
1164 
1165 	BT_DBG("");
1166 
1167 	/* Check if we have socket listening on cid */
1168 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1169 					  conn->src, conn->dst);
1170 	if (!pchan)
1171 		return;
1172 
1173 	parent = pchan->sk;
1174 
1175 	lock_sock(parent);
1176 
1177 	chan = pchan->ops->new_connection(pchan);
1178 	if (!chan)
1179 		goto clean;
1180 
1181 	sk = chan->sk;
1182 
1183 	hci_conn_hold(conn->hcon);
1184 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1185 
1186 	bacpy(&bt_sk(sk)->src, conn->src);
1187 	bacpy(&bt_sk(sk)->dst, conn->dst);
1188 
1189 	bt_accept_enqueue(parent, sk);
1190 
1191 	l2cap_chan_add(conn, chan);
1192 
1193 	l2cap_chan_ready(chan);
1194 
1195 clean:
1196 	release_sock(parent);
1197 }
1198 
1199 static void l2cap_conn_ready(struct l2cap_conn *conn)
1200 {
1201 	struct l2cap_chan *chan;
1202 
1203 	BT_DBG("conn %p", conn);
1204 
1205 	if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1206 		l2cap_le_conn_ready(conn);
1207 
1208 	if (conn->hcon->out && conn->hcon->type == LE_LINK)
1209 		smp_conn_security(conn, conn->hcon->pending_sec_level);
1210 
1211 	mutex_lock(&conn->chan_lock);
1212 
1213 	list_for_each_entry(chan, &conn->chan_l, list) {
1214 
1215 		l2cap_chan_lock(chan);
1216 
1217 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1218 			l2cap_chan_unlock(chan);
1219 			continue;
1220 		}
1221 
1222 		if (conn->hcon->type == LE_LINK) {
1223 			if (smp_conn_security(conn, chan->sec_level))
1224 				l2cap_chan_ready(chan);
1225 
1226 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1227 			struct sock *sk = chan->sk;
1228 			__clear_chan_timer(chan);
1229 			lock_sock(sk);
1230 			__l2cap_state_change(chan, BT_CONNECTED);
1231 			sk->sk_state_change(sk);
1232 			release_sock(sk);
1233 
1234 		} else if (chan->state == BT_CONNECT)
1235 			l2cap_do_start(chan);
1236 
1237 		l2cap_chan_unlock(chan);
1238 	}
1239 
1240 	mutex_unlock(&conn->chan_lock);
1241 }
1242 
1243 /* Notify sockets that we cannot guaranty reliability anymore */
1244 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1245 {
1246 	struct l2cap_chan *chan;
1247 
1248 	BT_DBG("conn %p", conn);
1249 
1250 	mutex_lock(&conn->chan_lock);
1251 
1252 	list_for_each_entry(chan, &conn->chan_l, list) {
1253 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1254 			__l2cap_chan_set_err(chan, err);
1255 	}
1256 
1257 	mutex_unlock(&conn->chan_lock);
1258 }
1259 
1260 static void l2cap_info_timeout(struct work_struct *work)
1261 {
1262 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1263 							info_timer.work);
1264 
1265 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1266 	conn->info_ident = 0;
1267 
1268 	l2cap_conn_start(conn);
1269 }
1270 
1271 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1272 {
1273 	struct l2cap_conn *conn = hcon->l2cap_data;
1274 	struct l2cap_chan *chan, *l;
1275 
1276 	if (!conn)
1277 		return;
1278 
1279 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1280 
1281 	kfree_skb(conn->rx_skb);
1282 
1283 	mutex_lock(&conn->chan_lock);
1284 
1285 	/* Kill channels */
1286 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1287 		l2cap_chan_hold(chan);
1288 		l2cap_chan_lock(chan);
1289 
1290 		l2cap_chan_del(chan, err);
1291 
1292 		l2cap_chan_unlock(chan);
1293 
1294 		chan->ops->close(chan);
1295 		l2cap_chan_put(chan);
1296 	}
1297 
1298 	mutex_unlock(&conn->chan_lock);
1299 
1300 	hci_chan_del(conn->hchan);
1301 
1302 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1303 		cancel_delayed_work_sync(&conn->info_timer);
1304 
1305 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1306 		cancel_delayed_work_sync(&conn->security_timer);
1307 		smp_chan_destroy(conn);
1308 	}
1309 
1310 	hcon->l2cap_data = NULL;
1311 	kfree(conn);
1312 }
1313 
1314 static void security_timeout(struct work_struct *work)
1315 {
1316 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1317 						security_timer.work);
1318 
1319 	BT_DBG("conn %p", conn);
1320 
1321 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1322 		smp_chan_destroy(conn);
1323 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1324 	}
1325 }
1326 
1327 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1328 {
1329 	struct l2cap_conn *conn = hcon->l2cap_data;
1330 	struct hci_chan *hchan;
1331 
1332 	if (conn || status)
1333 		return conn;
1334 
1335 	hchan = hci_chan_create(hcon);
1336 	if (!hchan)
1337 		return NULL;
1338 
1339 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1340 	if (!conn) {
1341 		hci_chan_del(hchan);
1342 		return NULL;
1343 	}
1344 
1345 	hcon->l2cap_data = conn;
1346 	conn->hcon = hcon;
1347 	conn->hchan = hchan;
1348 
1349 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1350 
1351 	if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1352 		conn->mtu = hcon->hdev->le_mtu;
1353 	else
1354 		conn->mtu = hcon->hdev->acl_mtu;
1355 
1356 	conn->src = &hcon->hdev->bdaddr;
1357 	conn->dst = &hcon->dst;
1358 
1359 	conn->feat_mask = 0;
1360 
1361 	spin_lock_init(&conn->lock);
1362 	mutex_init(&conn->chan_lock);
1363 
1364 	INIT_LIST_HEAD(&conn->chan_l);
1365 
1366 	if (hcon->type == LE_LINK)
1367 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1368 	else
1369 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1370 
1371 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1372 
1373 	return conn;
1374 }
1375 
1376 /* ---- Socket interface ---- */
1377 
1378 /* Find socket with psm and source / destination bdaddr.
1379  * Returns closest match.
1380  */
1381 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1382 						   bdaddr_t *src,
1383 						   bdaddr_t *dst)
1384 {
1385 	struct l2cap_chan *c, *c1 = NULL;
1386 
1387 	read_lock(&chan_list_lock);
1388 
1389 	list_for_each_entry(c, &chan_list, global_l) {
1390 		struct sock *sk = c->sk;
1391 
1392 		if (state && c->state != state)
1393 			continue;
1394 
1395 		if (c->psm == psm) {
1396 			int src_match, dst_match;
1397 			int src_any, dst_any;
1398 
1399 			/* Exact match. */
1400 			src_match = !bacmp(&bt_sk(sk)->src, src);
1401 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1402 			if (src_match && dst_match) {
1403 				read_unlock(&chan_list_lock);
1404 				return c;
1405 			}
1406 
1407 			/* Closest match */
1408 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1409 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1410 			if ((src_match && dst_any) || (src_any && dst_match) ||
1411 			    (src_any && dst_any))
1412 				c1 = c;
1413 		}
1414 	}
1415 
1416 	read_unlock(&chan_list_lock);
1417 
1418 	return c1;
1419 }
1420 
1421 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1422 		       bdaddr_t *dst, u8 dst_type)
1423 {
1424 	struct sock *sk = chan->sk;
1425 	bdaddr_t *src = &bt_sk(sk)->src;
1426 	struct l2cap_conn *conn;
1427 	struct hci_conn *hcon;
1428 	struct hci_dev *hdev;
1429 	__u8 auth_type;
1430 	int err;
1431 
1432 	BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1433 	       dst_type, __le16_to_cpu(chan->psm));
1434 
1435 	hdev = hci_get_route(dst, src);
1436 	if (!hdev)
1437 		return -EHOSTUNREACH;
1438 
1439 	hci_dev_lock(hdev);
1440 
1441 	l2cap_chan_lock(chan);
1442 
1443 	/* PSM must be odd and lsb of upper byte must be 0 */
1444 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1445 					chan->chan_type != L2CAP_CHAN_RAW) {
1446 		err = -EINVAL;
1447 		goto done;
1448 	}
1449 
1450 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1451 		err = -EINVAL;
1452 		goto done;
1453 	}
1454 
1455 	switch (chan->mode) {
1456 	case L2CAP_MODE_BASIC:
1457 		break;
1458 	case L2CAP_MODE_ERTM:
1459 	case L2CAP_MODE_STREAMING:
1460 		if (!disable_ertm)
1461 			break;
1462 		/* fall through */
1463 	default:
1464 		err = -ENOTSUPP;
1465 		goto done;
1466 	}
1467 
1468 	switch (chan->state) {
1469 	case BT_CONNECT:
1470 	case BT_CONNECT2:
1471 	case BT_CONFIG:
1472 		/* Already connecting */
1473 		err = 0;
1474 		goto done;
1475 
1476 	case BT_CONNECTED:
1477 		/* Already connected */
1478 		err = -EISCONN;
1479 		goto done;
1480 
1481 	case BT_OPEN:
1482 	case BT_BOUND:
1483 		/* Can connect */
1484 		break;
1485 
1486 	default:
1487 		err = -EBADFD;
1488 		goto done;
1489 	}
1490 
1491 	/* Set destination address and psm */
1492 	lock_sock(sk);
1493 	bacpy(&bt_sk(sk)->dst, dst);
1494 	release_sock(sk);
1495 
1496 	chan->psm = psm;
1497 	chan->dcid = cid;
1498 
1499 	auth_type = l2cap_get_auth_type(chan);
1500 
1501 	if (chan->dcid == L2CAP_CID_LE_DATA)
1502 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1503 				   chan->sec_level, auth_type);
1504 	else
1505 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1506 				   chan->sec_level, auth_type);
1507 
1508 	if (IS_ERR(hcon)) {
1509 		err = PTR_ERR(hcon);
1510 		goto done;
1511 	}
1512 
1513 	conn = l2cap_conn_add(hcon, 0);
1514 	if (!conn) {
1515 		hci_conn_put(hcon);
1516 		err = -ENOMEM;
1517 		goto done;
1518 	}
1519 
1520 	if (hcon->type == LE_LINK) {
1521 		err = 0;
1522 
1523 		if (!list_empty(&conn->chan_l)) {
1524 			err = -EBUSY;
1525 			hci_conn_put(hcon);
1526 		}
1527 
1528 		if (err)
1529 			goto done;
1530 	}
1531 
1532 	/* Update source addr of the socket */
1533 	bacpy(src, conn->src);
1534 
1535 	l2cap_chan_unlock(chan);
1536 	l2cap_chan_add(conn, chan);
1537 	l2cap_chan_lock(chan);
1538 
1539 	l2cap_state_change(chan, BT_CONNECT);
1540 	__set_chan_timer(chan, sk->sk_sndtimeo);
1541 
1542 	if (hcon->state == BT_CONNECTED) {
1543 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1544 			__clear_chan_timer(chan);
1545 			if (l2cap_chan_check_security(chan))
1546 				l2cap_state_change(chan, BT_CONNECTED);
1547 		} else
1548 			l2cap_do_start(chan);
1549 	}
1550 
1551 	err = 0;
1552 
1553 done:
1554 	l2cap_chan_unlock(chan);
1555 	hci_dev_unlock(hdev);
1556 	hci_dev_put(hdev);
1557 	return err;
1558 }
1559 
1560 int __l2cap_wait_ack(struct sock *sk)
1561 {
1562 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1563 	DECLARE_WAITQUEUE(wait, current);
1564 	int err = 0;
1565 	int timeo = HZ/5;
1566 
1567 	add_wait_queue(sk_sleep(sk), &wait);
1568 	set_current_state(TASK_INTERRUPTIBLE);
1569 	while (chan->unacked_frames > 0 && chan->conn) {
1570 		if (!timeo)
1571 			timeo = HZ/5;
1572 
1573 		if (signal_pending(current)) {
1574 			err = sock_intr_errno(timeo);
1575 			break;
1576 		}
1577 
1578 		release_sock(sk);
1579 		timeo = schedule_timeout(timeo);
1580 		lock_sock(sk);
1581 		set_current_state(TASK_INTERRUPTIBLE);
1582 
1583 		err = sock_error(sk);
1584 		if (err)
1585 			break;
1586 	}
1587 	set_current_state(TASK_RUNNING);
1588 	remove_wait_queue(sk_sleep(sk), &wait);
1589 	return err;
1590 }
1591 
1592 static void l2cap_monitor_timeout(struct work_struct *work)
1593 {
1594 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1595 					       monitor_timer.work);
1596 
1597 	BT_DBG("chan %p", chan);
1598 
1599 	l2cap_chan_lock(chan);
1600 
1601 	if (!chan->conn) {
1602 		l2cap_chan_unlock(chan);
1603 		l2cap_chan_put(chan);
1604 		return;
1605 	}
1606 
1607 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1608 
1609 	l2cap_chan_unlock(chan);
1610 	l2cap_chan_put(chan);
1611 }
1612 
1613 static void l2cap_retrans_timeout(struct work_struct *work)
1614 {
1615 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1616 					       retrans_timer.work);
1617 
1618 	BT_DBG("chan %p", chan);
1619 
1620 	l2cap_chan_lock(chan);
1621 
1622 	if (!chan->conn) {
1623 		l2cap_chan_unlock(chan);
1624 		l2cap_chan_put(chan);
1625 		return;
1626 	}
1627 
1628 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1629 	l2cap_chan_unlock(chan);
1630 	l2cap_chan_put(chan);
1631 }
1632 
1633 static void l2cap_streaming_send(struct l2cap_chan *chan,
1634 				 struct sk_buff_head *skbs)
1635 {
1636 	struct sk_buff *skb;
1637 	struct l2cap_ctrl *control;
1638 
1639 	BT_DBG("chan %p, skbs %p", chan, skbs);
1640 
1641 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1642 
1643 	while (!skb_queue_empty(&chan->tx_q)) {
1644 
1645 		skb = skb_dequeue(&chan->tx_q);
1646 
1647 		bt_cb(skb)->control.retries = 1;
1648 		control = &bt_cb(skb)->control;
1649 
1650 		control->reqseq = 0;
1651 		control->txseq = chan->next_tx_seq;
1652 
1653 		__pack_control(chan, control, skb);
1654 
1655 		if (chan->fcs == L2CAP_FCS_CRC16) {
1656 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1657 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1658 		}
1659 
1660 		l2cap_do_send(chan, skb);
1661 
1662 		BT_DBG("Sent txseq %u", control->txseq);
1663 
1664 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1665 		chan->frames_sent++;
1666 	}
1667 }
1668 
1669 static int l2cap_ertm_send(struct l2cap_chan *chan)
1670 {
1671 	struct sk_buff *skb, *tx_skb;
1672 	struct l2cap_ctrl *control;
1673 	int sent = 0;
1674 
1675 	BT_DBG("chan %p", chan);
1676 
1677 	if (chan->state != BT_CONNECTED)
1678 		return -ENOTCONN;
1679 
1680 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1681 		return 0;
1682 
1683 	while (chan->tx_send_head &&
1684 	       chan->unacked_frames < chan->remote_tx_win &&
1685 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1686 
1687 		skb = chan->tx_send_head;
1688 
1689 		bt_cb(skb)->control.retries = 1;
1690 		control = &bt_cb(skb)->control;
1691 
1692 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1693 			control->final = 1;
1694 
1695 		control->reqseq = chan->buffer_seq;
1696 		chan->last_acked_seq = chan->buffer_seq;
1697 		control->txseq = chan->next_tx_seq;
1698 
1699 		__pack_control(chan, control, skb);
1700 
1701 		if (chan->fcs == L2CAP_FCS_CRC16) {
1702 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1703 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1704 		}
1705 
1706 		/* Clone after data has been modified. Data is assumed to be
1707 		   read-only (for locking purposes) on cloned sk_buffs.
1708 		 */
1709 		tx_skb = skb_clone(skb, GFP_KERNEL);
1710 
1711 		if (!tx_skb)
1712 			break;
1713 
1714 		__set_retrans_timer(chan);
1715 
1716 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1717 		chan->unacked_frames++;
1718 		chan->frames_sent++;
1719 		sent++;
1720 
1721 		if (skb_queue_is_last(&chan->tx_q, skb))
1722 			chan->tx_send_head = NULL;
1723 		else
1724 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1725 
1726 		l2cap_do_send(chan, tx_skb);
1727 		BT_DBG("Sent txseq %u", control->txseq);
1728 	}
1729 
1730 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1731 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1732 
1733 	return sent;
1734 }
1735 
1736 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1737 {
1738 	struct l2cap_ctrl control;
1739 	struct sk_buff *skb;
1740 	struct sk_buff *tx_skb;
1741 	u16 seq;
1742 
1743 	BT_DBG("chan %p", chan);
1744 
1745 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1746 		return;
1747 
1748 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1749 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1750 
1751 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1752 		if (!skb) {
1753 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1754 				seq);
1755 			continue;
1756 		}
1757 
1758 		bt_cb(skb)->control.retries++;
1759 		control = bt_cb(skb)->control;
1760 
1761 		if (chan->max_tx != 0 &&
1762 		    bt_cb(skb)->control.retries > chan->max_tx) {
1763 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1764 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1765 			l2cap_seq_list_clear(&chan->retrans_list);
1766 			break;
1767 		}
1768 
1769 		control.reqseq = chan->buffer_seq;
1770 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1771 			control.final = 1;
1772 		else
1773 			control.final = 0;
1774 
1775 		if (skb_cloned(skb)) {
1776 			/* Cloned sk_buffs are read-only, so we need a
1777 			 * writeable copy
1778 			 */
1779 			tx_skb = skb_copy(skb, GFP_ATOMIC);
1780 		} else {
1781 			tx_skb = skb_clone(skb, GFP_ATOMIC);
1782 		}
1783 
1784 		if (!tx_skb) {
1785 			l2cap_seq_list_clear(&chan->retrans_list);
1786 			break;
1787 		}
1788 
1789 		/* Update skb contents */
1790 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1791 			put_unaligned_le32(__pack_extended_control(&control),
1792 					   tx_skb->data + L2CAP_HDR_SIZE);
1793 		} else {
1794 			put_unaligned_le16(__pack_enhanced_control(&control),
1795 					   tx_skb->data + L2CAP_HDR_SIZE);
1796 		}
1797 
1798 		if (chan->fcs == L2CAP_FCS_CRC16) {
1799 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1800 			put_unaligned_le16(fcs, skb_put(tx_skb,
1801 							L2CAP_FCS_SIZE));
1802 		}
1803 
1804 		l2cap_do_send(chan, tx_skb);
1805 
1806 		BT_DBG("Resent txseq %d", control.txseq);
1807 
1808 		chan->last_acked_seq = chan->buffer_seq;
1809 	}
1810 }
1811 
1812 static void l2cap_retransmit(struct l2cap_chan *chan,
1813 			     struct l2cap_ctrl *control)
1814 {
1815 	BT_DBG("chan %p, control %p", chan, control);
1816 
1817 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1818 	l2cap_ertm_resend(chan);
1819 }
1820 
1821 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1822 				 struct l2cap_ctrl *control)
1823 {
1824 	struct sk_buff *skb;
1825 
1826 	BT_DBG("chan %p, control %p", chan, control);
1827 
1828 	if (control->poll)
1829 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
1830 
1831 	l2cap_seq_list_clear(&chan->retrans_list);
1832 
1833 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1834 		return;
1835 
1836 	if (chan->unacked_frames) {
1837 		skb_queue_walk(&chan->tx_q, skb) {
1838 			if (bt_cb(skb)->control.txseq == control->reqseq ||
1839 				skb == chan->tx_send_head)
1840 				break;
1841 		}
1842 
1843 		skb_queue_walk_from(&chan->tx_q, skb) {
1844 			if (skb == chan->tx_send_head)
1845 				break;
1846 
1847 			l2cap_seq_list_append(&chan->retrans_list,
1848 					      bt_cb(skb)->control.txseq);
1849 		}
1850 
1851 		l2cap_ertm_resend(chan);
1852 	}
1853 }
1854 
1855 static void l2cap_send_ack(struct l2cap_chan *chan)
1856 {
1857 	struct l2cap_ctrl control;
1858 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1859 					 chan->last_acked_seq);
1860 	int threshold;
1861 
1862 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1863 	       chan, chan->last_acked_seq, chan->buffer_seq);
1864 
1865 	memset(&control, 0, sizeof(control));
1866 	control.sframe = 1;
1867 
1868 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1869 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
1870 		__clear_ack_timer(chan);
1871 		control.super = L2CAP_SUPER_RNR;
1872 		control.reqseq = chan->buffer_seq;
1873 		l2cap_send_sframe(chan, &control);
1874 	} else {
1875 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1876 			l2cap_ertm_send(chan);
1877 			/* If any i-frames were sent, they included an ack */
1878 			if (chan->buffer_seq == chan->last_acked_seq)
1879 				frames_to_ack = 0;
1880 		}
1881 
1882 		/* Ack now if the window is 3/4ths full.
1883 		 * Calculate without mul or div
1884 		 */
1885 		threshold = chan->ack_win;
1886 		threshold += threshold << 1;
1887 		threshold >>= 2;
1888 
1889 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1890 		       threshold);
1891 
1892 		if (frames_to_ack >= threshold) {
1893 			__clear_ack_timer(chan);
1894 			control.super = L2CAP_SUPER_RR;
1895 			control.reqseq = chan->buffer_seq;
1896 			l2cap_send_sframe(chan, &control);
1897 			frames_to_ack = 0;
1898 		}
1899 
1900 		if (frames_to_ack)
1901 			__set_ack_timer(chan);
1902 	}
1903 }
1904 
1905 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1906 					 struct msghdr *msg, int len,
1907 					 int count, struct sk_buff *skb)
1908 {
1909 	struct l2cap_conn *conn = chan->conn;
1910 	struct sk_buff **frag;
1911 	int sent = 0;
1912 
1913 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1914 		return -EFAULT;
1915 
1916 	sent += count;
1917 	len  -= count;
1918 
1919 	/* Continuation fragments (no L2CAP header) */
1920 	frag = &skb_shinfo(skb)->frag_list;
1921 	while (len) {
1922 		struct sk_buff *tmp;
1923 
1924 		count = min_t(unsigned int, conn->mtu, len);
1925 
1926 		tmp = chan->ops->alloc_skb(chan, count,
1927 					   msg->msg_flags & MSG_DONTWAIT);
1928 		if (IS_ERR(tmp))
1929 			return PTR_ERR(tmp);
1930 
1931 		*frag = tmp;
1932 
1933 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1934 			return -EFAULT;
1935 
1936 		(*frag)->priority = skb->priority;
1937 
1938 		sent += count;
1939 		len  -= count;
1940 
1941 		skb->len += (*frag)->len;
1942 		skb->data_len += (*frag)->len;
1943 
1944 		frag = &(*frag)->next;
1945 	}
1946 
1947 	return sent;
1948 }
1949 
1950 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1951 						 struct msghdr *msg, size_t len,
1952 						 u32 priority)
1953 {
1954 	struct l2cap_conn *conn = chan->conn;
1955 	struct sk_buff *skb;
1956 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1957 	struct l2cap_hdr *lh;
1958 
1959 	BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1960 
1961 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1962 
1963 	skb = chan->ops->alloc_skb(chan, count + hlen,
1964 				   msg->msg_flags & MSG_DONTWAIT);
1965 	if (IS_ERR(skb))
1966 		return skb;
1967 
1968 	skb->priority = priority;
1969 
1970 	/* Create L2CAP header */
1971 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1972 	lh->cid = cpu_to_le16(chan->dcid);
1973 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1974 	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1975 
1976 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1977 	if (unlikely(err < 0)) {
1978 		kfree_skb(skb);
1979 		return ERR_PTR(err);
1980 	}
1981 	return skb;
1982 }
1983 
1984 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1985 					      struct msghdr *msg, size_t len,
1986 					      u32 priority)
1987 {
1988 	struct l2cap_conn *conn = chan->conn;
1989 	struct sk_buff *skb;
1990 	int err, count;
1991 	struct l2cap_hdr *lh;
1992 
1993 	BT_DBG("chan %p len %zu", chan, len);
1994 
1995 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1996 
1997 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1998 				   msg->msg_flags & MSG_DONTWAIT);
1999 	if (IS_ERR(skb))
2000 		return skb;
2001 
2002 	skb->priority = priority;
2003 
2004 	/* Create L2CAP header */
2005 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2006 	lh->cid = cpu_to_le16(chan->dcid);
2007 	lh->len = cpu_to_le16(len);
2008 
2009 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2010 	if (unlikely(err < 0)) {
2011 		kfree_skb(skb);
2012 		return ERR_PTR(err);
2013 	}
2014 	return skb;
2015 }
2016 
2017 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2018 					       struct msghdr *msg, size_t len,
2019 					       u16 sdulen)
2020 {
2021 	struct l2cap_conn *conn = chan->conn;
2022 	struct sk_buff *skb;
2023 	int err, count, hlen;
2024 	struct l2cap_hdr *lh;
2025 
2026 	BT_DBG("chan %p len %zu", chan, len);
2027 
2028 	if (!conn)
2029 		return ERR_PTR(-ENOTCONN);
2030 
2031 	hlen = __ertm_hdr_size(chan);
2032 
2033 	if (sdulen)
2034 		hlen += L2CAP_SDULEN_SIZE;
2035 
2036 	if (chan->fcs == L2CAP_FCS_CRC16)
2037 		hlen += L2CAP_FCS_SIZE;
2038 
2039 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2040 
2041 	skb = chan->ops->alloc_skb(chan, count + hlen,
2042 				   msg->msg_flags & MSG_DONTWAIT);
2043 	if (IS_ERR(skb))
2044 		return skb;
2045 
2046 	/* Create L2CAP header */
2047 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2048 	lh->cid = cpu_to_le16(chan->dcid);
2049 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2050 
2051 	/* Control header is populated later */
2052 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2053 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2054 	else
2055 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2056 
2057 	if (sdulen)
2058 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2059 
2060 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2061 	if (unlikely(err < 0)) {
2062 		kfree_skb(skb);
2063 		return ERR_PTR(err);
2064 	}
2065 
2066 	bt_cb(skb)->control.fcs = chan->fcs;
2067 	bt_cb(skb)->control.retries = 0;
2068 	return skb;
2069 }
2070 
2071 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2072 			     struct sk_buff_head *seg_queue,
2073 			     struct msghdr *msg, size_t len)
2074 {
2075 	struct sk_buff *skb;
2076 	u16 sdu_len;
2077 	size_t pdu_len;
2078 	u8 sar;
2079 
2080 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2081 
2082 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2083 	 * so fragmented skbs are not used.  The HCI layer's handling
2084 	 * of fragmented skbs is not compatible with ERTM's queueing.
2085 	 */
2086 
2087 	/* PDU size is derived from the HCI MTU */
2088 	pdu_len = chan->conn->mtu;
2089 
2090 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2091 
2092 	/* Adjust for largest possible L2CAP overhead. */
2093 	if (chan->fcs)
2094 		pdu_len -= L2CAP_FCS_SIZE;
2095 
2096 	pdu_len -= __ertm_hdr_size(chan);
2097 
2098 	/* Remote device may have requested smaller PDUs */
2099 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2100 
2101 	if (len <= pdu_len) {
2102 		sar = L2CAP_SAR_UNSEGMENTED;
2103 		sdu_len = 0;
2104 		pdu_len = len;
2105 	} else {
2106 		sar = L2CAP_SAR_START;
2107 		sdu_len = len;
2108 		pdu_len -= L2CAP_SDULEN_SIZE;
2109 	}
2110 
2111 	while (len > 0) {
2112 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2113 
2114 		if (IS_ERR(skb)) {
2115 			__skb_queue_purge(seg_queue);
2116 			return PTR_ERR(skb);
2117 		}
2118 
2119 		bt_cb(skb)->control.sar = sar;
2120 		__skb_queue_tail(seg_queue, skb);
2121 
2122 		len -= pdu_len;
2123 		if (sdu_len) {
2124 			sdu_len = 0;
2125 			pdu_len += L2CAP_SDULEN_SIZE;
2126 		}
2127 
2128 		if (len <= pdu_len) {
2129 			sar = L2CAP_SAR_END;
2130 			pdu_len = len;
2131 		} else {
2132 			sar = L2CAP_SAR_CONTINUE;
2133 		}
2134 	}
2135 
2136 	return 0;
2137 }
2138 
2139 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2140 								u32 priority)
2141 {
2142 	struct sk_buff *skb;
2143 	int err;
2144 	struct sk_buff_head seg_queue;
2145 
2146 	/* Connectionless channel */
2147 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2148 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2149 		if (IS_ERR(skb))
2150 			return PTR_ERR(skb);
2151 
2152 		l2cap_do_send(chan, skb);
2153 		return len;
2154 	}
2155 
2156 	switch (chan->mode) {
2157 	case L2CAP_MODE_BASIC:
2158 		/* Check outgoing MTU */
2159 		if (len > chan->omtu)
2160 			return -EMSGSIZE;
2161 
2162 		/* Create a basic PDU */
2163 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2164 		if (IS_ERR(skb))
2165 			return PTR_ERR(skb);
2166 
2167 		l2cap_do_send(chan, skb);
2168 		err = len;
2169 		break;
2170 
2171 	case L2CAP_MODE_ERTM:
2172 	case L2CAP_MODE_STREAMING:
2173 		/* Check outgoing MTU */
2174 		if (len > chan->omtu) {
2175 			err = -EMSGSIZE;
2176 			break;
2177 		}
2178 
2179 		__skb_queue_head_init(&seg_queue);
2180 
2181 		/* Do segmentation before calling in to the state machine,
2182 		 * since it's possible to block while waiting for memory
2183 		 * allocation.
2184 		 */
2185 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2186 
2187 		/* The channel could have been closed while segmenting,
2188 		 * check that it is still connected.
2189 		 */
2190 		if (chan->state != BT_CONNECTED) {
2191 			__skb_queue_purge(&seg_queue);
2192 			err = -ENOTCONN;
2193 		}
2194 
2195 		if (err)
2196 			break;
2197 
2198 		if (chan->mode == L2CAP_MODE_ERTM)
2199 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2200 		else
2201 			l2cap_streaming_send(chan, &seg_queue);
2202 
2203 		err = len;
2204 
2205 		/* If the skbs were not queued for sending, they'll still be in
2206 		 * seg_queue and need to be purged.
2207 		 */
2208 		__skb_queue_purge(&seg_queue);
2209 		break;
2210 
2211 	default:
2212 		BT_DBG("bad state %1.1x", chan->mode);
2213 		err = -EBADFD;
2214 	}
2215 
2216 	return err;
2217 }
2218 
2219 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2220 {
2221 	struct l2cap_ctrl control;
2222 	u16 seq;
2223 
2224 	BT_DBG("chan %p, txseq %u", chan, txseq);
2225 
2226 	memset(&control, 0, sizeof(control));
2227 	control.sframe = 1;
2228 	control.super = L2CAP_SUPER_SREJ;
2229 
2230 	for (seq = chan->expected_tx_seq; seq != txseq;
2231 	     seq = __next_seq(chan, seq)) {
2232 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2233 			control.reqseq = seq;
2234 			l2cap_send_sframe(chan, &control);
2235 			l2cap_seq_list_append(&chan->srej_list, seq);
2236 		}
2237 	}
2238 
2239 	chan->expected_tx_seq = __next_seq(chan, txseq);
2240 }
2241 
2242 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2243 {
2244 	struct l2cap_ctrl control;
2245 
2246 	BT_DBG("chan %p", chan);
2247 
2248 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2249 		return;
2250 
2251 	memset(&control, 0, sizeof(control));
2252 	control.sframe = 1;
2253 	control.super = L2CAP_SUPER_SREJ;
2254 	control.reqseq = chan->srej_list.tail;
2255 	l2cap_send_sframe(chan, &control);
2256 }
2257 
2258 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2259 {
2260 	struct l2cap_ctrl control;
2261 	u16 initial_head;
2262 	u16 seq;
2263 
2264 	BT_DBG("chan %p, txseq %u", chan, txseq);
2265 
2266 	memset(&control, 0, sizeof(control));
2267 	control.sframe = 1;
2268 	control.super = L2CAP_SUPER_SREJ;
2269 
2270 	/* Capture initial list head to allow only one pass through the list. */
2271 	initial_head = chan->srej_list.head;
2272 
2273 	do {
2274 		seq = l2cap_seq_list_pop(&chan->srej_list);
2275 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2276 			break;
2277 
2278 		control.reqseq = seq;
2279 		l2cap_send_sframe(chan, &control);
2280 		l2cap_seq_list_append(&chan->srej_list, seq);
2281 	} while (chan->srej_list.head != initial_head);
2282 }
2283 
2284 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2285 {
2286 	struct sk_buff *acked_skb;
2287 	u16 ackseq;
2288 
2289 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2290 
2291 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2292 		return;
2293 
2294 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2295 	       chan->expected_ack_seq, chan->unacked_frames);
2296 
2297 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2298 	     ackseq = __next_seq(chan, ackseq)) {
2299 
2300 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2301 		if (acked_skb) {
2302 			skb_unlink(acked_skb, &chan->tx_q);
2303 			kfree_skb(acked_skb);
2304 			chan->unacked_frames--;
2305 		}
2306 	}
2307 
2308 	chan->expected_ack_seq = reqseq;
2309 
2310 	if (chan->unacked_frames == 0)
2311 		__clear_retrans_timer(chan);
2312 
2313 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2314 }
2315 
2316 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2317 {
2318 	BT_DBG("chan %p", chan);
2319 
2320 	chan->expected_tx_seq = chan->buffer_seq;
2321 	l2cap_seq_list_clear(&chan->srej_list);
2322 	skb_queue_purge(&chan->srej_q);
2323 	chan->rx_state = L2CAP_RX_STATE_RECV;
2324 }
2325 
2326 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2327 				struct l2cap_ctrl *control,
2328 				struct sk_buff_head *skbs, u8 event)
2329 {
2330 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2331 	       event);
2332 
2333 	switch (event) {
2334 	case L2CAP_EV_DATA_REQUEST:
2335 		if (chan->tx_send_head == NULL)
2336 			chan->tx_send_head = skb_peek(skbs);
2337 
2338 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2339 		l2cap_ertm_send(chan);
2340 		break;
2341 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2342 		BT_DBG("Enter LOCAL_BUSY");
2343 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2344 
2345 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2346 			/* The SREJ_SENT state must be aborted if we are to
2347 			 * enter the LOCAL_BUSY state.
2348 			 */
2349 			l2cap_abort_rx_srej_sent(chan);
2350 		}
2351 
2352 		l2cap_send_ack(chan);
2353 
2354 		break;
2355 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2356 		BT_DBG("Exit LOCAL_BUSY");
2357 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2358 
2359 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2360 			struct l2cap_ctrl local_control;
2361 
2362 			memset(&local_control, 0, sizeof(local_control));
2363 			local_control.sframe = 1;
2364 			local_control.super = L2CAP_SUPER_RR;
2365 			local_control.poll = 1;
2366 			local_control.reqseq = chan->buffer_seq;
2367 			l2cap_send_sframe(chan, &local_control);
2368 
2369 			chan->retry_count = 1;
2370 			__set_monitor_timer(chan);
2371 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2372 		}
2373 		break;
2374 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2375 		l2cap_process_reqseq(chan, control->reqseq);
2376 		break;
2377 	case L2CAP_EV_EXPLICIT_POLL:
2378 		l2cap_send_rr_or_rnr(chan, 1);
2379 		chan->retry_count = 1;
2380 		__set_monitor_timer(chan);
2381 		__clear_ack_timer(chan);
2382 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2383 		break;
2384 	case L2CAP_EV_RETRANS_TO:
2385 		l2cap_send_rr_or_rnr(chan, 1);
2386 		chan->retry_count = 1;
2387 		__set_monitor_timer(chan);
2388 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2389 		break;
2390 	case L2CAP_EV_RECV_FBIT:
2391 		/* Nothing to process */
2392 		break;
2393 	default:
2394 		break;
2395 	}
2396 }
2397 
2398 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2399 				  struct l2cap_ctrl *control,
2400 				  struct sk_buff_head *skbs, u8 event)
2401 {
2402 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2403 	       event);
2404 
2405 	switch (event) {
2406 	case L2CAP_EV_DATA_REQUEST:
2407 		if (chan->tx_send_head == NULL)
2408 			chan->tx_send_head = skb_peek(skbs);
2409 		/* Queue data, but don't send. */
2410 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2411 		break;
2412 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2413 		BT_DBG("Enter LOCAL_BUSY");
2414 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2415 
2416 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2417 			/* The SREJ_SENT state must be aborted if we are to
2418 			 * enter the LOCAL_BUSY state.
2419 			 */
2420 			l2cap_abort_rx_srej_sent(chan);
2421 		}
2422 
2423 		l2cap_send_ack(chan);
2424 
2425 		break;
2426 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2427 		BT_DBG("Exit LOCAL_BUSY");
2428 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2429 
2430 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2431 			struct l2cap_ctrl local_control;
2432 			memset(&local_control, 0, sizeof(local_control));
2433 			local_control.sframe = 1;
2434 			local_control.super = L2CAP_SUPER_RR;
2435 			local_control.poll = 1;
2436 			local_control.reqseq = chan->buffer_seq;
2437 			l2cap_send_sframe(chan, &local_control);
2438 
2439 			chan->retry_count = 1;
2440 			__set_monitor_timer(chan);
2441 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2442 		}
2443 		break;
2444 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2445 		l2cap_process_reqseq(chan, control->reqseq);
2446 
2447 		/* Fall through */
2448 
2449 	case L2CAP_EV_RECV_FBIT:
2450 		if (control && control->final) {
2451 			__clear_monitor_timer(chan);
2452 			if (chan->unacked_frames > 0)
2453 				__set_retrans_timer(chan);
2454 			chan->retry_count = 0;
2455 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2456 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2457 		}
2458 		break;
2459 	case L2CAP_EV_EXPLICIT_POLL:
2460 		/* Ignore */
2461 		break;
2462 	case L2CAP_EV_MONITOR_TO:
2463 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2464 			l2cap_send_rr_or_rnr(chan, 1);
2465 			__set_monitor_timer(chan);
2466 			chan->retry_count++;
2467 		} else {
2468 			l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2469 		}
2470 		break;
2471 	default:
2472 		break;
2473 	}
2474 }
2475 
2476 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2477 		     struct sk_buff_head *skbs, u8 event)
2478 {
2479 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2480 	       chan, control, skbs, event, chan->tx_state);
2481 
2482 	switch (chan->tx_state) {
2483 	case L2CAP_TX_STATE_XMIT:
2484 		l2cap_tx_state_xmit(chan, control, skbs, event);
2485 		break;
2486 	case L2CAP_TX_STATE_WAIT_F:
2487 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2488 		break;
2489 	default:
2490 		/* Ignore event */
2491 		break;
2492 	}
2493 }
2494 
2495 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2496 			     struct l2cap_ctrl *control)
2497 {
2498 	BT_DBG("chan %p, control %p", chan, control);
2499 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2500 }
2501 
2502 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2503 				  struct l2cap_ctrl *control)
2504 {
2505 	BT_DBG("chan %p, control %p", chan, control);
2506 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2507 }
2508 
2509 /* Copy frame to all raw sockets on that connection */
2510 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2511 {
2512 	struct sk_buff *nskb;
2513 	struct l2cap_chan *chan;
2514 
2515 	BT_DBG("conn %p", conn);
2516 
2517 	mutex_lock(&conn->chan_lock);
2518 
2519 	list_for_each_entry(chan, &conn->chan_l, list) {
2520 		struct sock *sk = chan->sk;
2521 		if (chan->chan_type != L2CAP_CHAN_RAW)
2522 			continue;
2523 
2524 		/* Don't send frame to the socket it came from */
2525 		if (skb->sk == sk)
2526 			continue;
2527 		nskb = skb_clone(skb, GFP_ATOMIC);
2528 		if (!nskb)
2529 			continue;
2530 
2531 		if (chan->ops->recv(chan, nskb))
2532 			kfree_skb(nskb);
2533 	}
2534 
2535 	mutex_unlock(&conn->chan_lock);
2536 }
2537 
2538 /* ---- L2CAP signalling commands ---- */
2539 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2540 				       u8 ident, u16 dlen, void *data)
2541 {
2542 	struct sk_buff *skb, **frag;
2543 	struct l2cap_cmd_hdr *cmd;
2544 	struct l2cap_hdr *lh;
2545 	int len, count;
2546 
2547 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2548 	       conn, code, ident, dlen);
2549 
2550 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2551 	count = min_t(unsigned int, conn->mtu, len);
2552 
2553 	skb = bt_skb_alloc(count, GFP_ATOMIC);
2554 	if (!skb)
2555 		return NULL;
2556 
2557 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2558 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2559 
2560 	if (conn->hcon->type == LE_LINK)
2561 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2562 	else
2563 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2564 
2565 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2566 	cmd->code  = code;
2567 	cmd->ident = ident;
2568 	cmd->len   = cpu_to_le16(dlen);
2569 
2570 	if (dlen) {
2571 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2572 		memcpy(skb_put(skb, count), data, count);
2573 		data += count;
2574 	}
2575 
2576 	len -= skb->len;
2577 
2578 	/* Continuation fragments (no L2CAP header) */
2579 	frag = &skb_shinfo(skb)->frag_list;
2580 	while (len) {
2581 		count = min_t(unsigned int, conn->mtu, len);
2582 
2583 		*frag = bt_skb_alloc(count, GFP_ATOMIC);
2584 		if (!*frag)
2585 			goto fail;
2586 
2587 		memcpy(skb_put(*frag, count), data, count);
2588 
2589 		len  -= count;
2590 		data += count;
2591 
2592 		frag = &(*frag)->next;
2593 	}
2594 
2595 	return skb;
2596 
2597 fail:
2598 	kfree_skb(skb);
2599 	return NULL;
2600 }
2601 
2602 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2603 {
2604 	struct l2cap_conf_opt *opt = *ptr;
2605 	int len;
2606 
2607 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2608 	*ptr += len;
2609 
2610 	*type = opt->type;
2611 	*olen = opt->len;
2612 
2613 	switch (opt->len) {
2614 	case 1:
2615 		*val = *((u8 *) opt->val);
2616 		break;
2617 
2618 	case 2:
2619 		*val = get_unaligned_le16(opt->val);
2620 		break;
2621 
2622 	case 4:
2623 		*val = get_unaligned_le32(opt->val);
2624 		break;
2625 
2626 	default:
2627 		*val = (unsigned long) opt->val;
2628 		break;
2629 	}
2630 
2631 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2632 	return len;
2633 }
2634 
2635 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2636 {
2637 	struct l2cap_conf_opt *opt = *ptr;
2638 
2639 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2640 
2641 	opt->type = type;
2642 	opt->len  = len;
2643 
2644 	switch (len) {
2645 	case 1:
2646 		*((u8 *) opt->val)  = val;
2647 		break;
2648 
2649 	case 2:
2650 		put_unaligned_le16(val, opt->val);
2651 		break;
2652 
2653 	case 4:
2654 		put_unaligned_le32(val, opt->val);
2655 		break;
2656 
2657 	default:
2658 		memcpy(opt->val, (void *) val, len);
2659 		break;
2660 	}
2661 
2662 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2663 }
2664 
2665 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2666 {
2667 	struct l2cap_conf_efs efs;
2668 
2669 	switch (chan->mode) {
2670 	case L2CAP_MODE_ERTM:
2671 		efs.id		= chan->local_id;
2672 		efs.stype	= chan->local_stype;
2673 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2674 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2675 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2676 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2677 		break;
2678 
2679 	case L2CAP_MODE_STREAMING:
2680 		efs.id		= 1;
2681 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2682 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2683 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2684 		efs.acc_lat	= 0;
2685 		efs.flush_to	= 0;
2686 		break;
2687 
2688 	default:
2689 		return;
2690 	}
2691 
2692 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2693 							(unsigned long) &efs);
2694 }
2695 
2696 static void l2cap_ack_timeout(struct work_struct *work)
2697 {
2698 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2699 					       ack_timer.work);
2700 	u16 frames_to_ack;
2701 
2702 	BT_DBG("chan %p", chan);
2703 
2704 	l2cap_chan_lock(chan);
2705 
2706 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2707 				     chan->last_acked_seq);
2708 
2709 	if (frames_to_ack)
2710 		l2cap_send_rr_or_rnr(chan, 0);
2711 
2712 	l2cap_chan_unlock(chan);
2713 	l2cap_chan_put(chan);
2714 }
2715 
2716 int l2cap_ertm_init(struct l2cap_chan *chan)
2717 {
2718 	int err;
2719 
2720 	chan->next_tx_seq = 0;
2721 	chan->expected_tx_seq = 0;
2722 	chan->expected_ack_seq = 0;
2723 	chan->unacked_frames = 0;
2724 	chan->buffer_seq = 0;
2725 	chan->frames_sent = 0;
2726 	chan->last_acked_seq = 0;
2727 	chan->sdu = NULL;
2728 	chan->sdu_last_frag = NULL;
2729 	chan->sdu_len = 0;
2730 
2731 	skb_queue_head_init(&chan->tx_q);
2732 
2733 	if (chan->mode != L2CAP_MODE_ERTM)
2734 		return 0;
2735 
2736 	chan->rx_state = L2CAP_RX_STATE_RECV;
2737 	chan->tx_state = L2CAP_TX_STATE_XMIT;
2738 
2739 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2740 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2741 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2742 
2743 	skb_queue_head_init(&chan->srej_q);
2744 
2745 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2746 	if (err < 0)
2747 		return err;
2748 
2749 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2750 	if (err < 0)
2751 		l2cap_seq_list_free(&chan->srej_list);
2752 
2753 	return err;
2754 }
2755 
2756 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2757 {
2758 	switch (mode) {
2759 	case L2CAP_MODE_STREAMING:
2760 	case L2CAP_MODE_ERTM:
2761 		if (l2cap_mode_supported(mode, remote_feat_mask))
2762 			return mode;
2763 		/* fall through */
2764 	default:
2765 		return L2CAP_MODE_BASIC;
2766 	}
2767 }
2768 
2769 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2770 {
2771 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2772 }
2773 
2774 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2775 {
2776 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2777 }
2778 
2779 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2780 {
2781 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2782 						__l2cap_ews_supported(chan)) {
2783 		/* use extended control field */
2784 		set_bit(FLAG_EXT_CTRL, &chan->flags);
2785 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2786 	} else {
2787 		chan->tx_win = min_t(u16, chan->tx_win,
2788 						L2CAP_DEFAULT_TX_WINDOW);
2789 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2790 	}
2791 	chan->ack_win = chan->tx_win;
2792 }
2793 
2794 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2795 {
2796 	struct l2cap_conf_req *req = data;
2797 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2798 	void *ptr = req->data;
2799 	u16 size;
2800 
2801 	BT_DBG("chan %p", chan);
2802 
2803 	if (chan->num_conf_req || chan->num_conf_rsp)
2804 		goto done;
2805 
2806 	switch (chan->mode) {
2807 	case L2CAP_MODE_STREAMING:
2808 	case L2CAP_MODE_ERTM:
2809 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2810 			break;
2811 
2812 		if (__l2cap_efs_supported(chan))
2813 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
2814 
2815 		/* fall through */
2816 	default:
2817 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2818 		break;
2819 	}
2820 
2821 done:
2822 	if (chan->imtu != L2CAP_DEFAULT_MTU)
2823 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2824 
2825 	switch (chan->mode) {
2826 	case L2CAP_MODE_BASIC:
2827 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2828 				!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2829 			break;
2830 
2831 		rfc.mode            = L2CAP_MODE_BASIC;
2832 		rfc.txwin_size      = 0;
2833 		rfc.max_transmit    = 0;
2834 		rfc.retrans_timeout = 0;
2835 		rfc.monitor_timeout = 0;
2836 		rfc.max_pdu_size    = 0;
2837 
2838 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2839 							(unsigned long) &rfc);
2840 		break;
2841 
2842 	case L2CAP_MODE_ERTM:
2843 		rfc.mode            = L2CAP_MODE_ERTM;
2844 		rfc.max_transmit    = chan->max_tx;
2845 		rfc.retrans_timeout = 0;
2846 		rfc.monitor_timeout = 0;
2847 
2848 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2849 						L2CAP_EXT_HDR_SIZE -
2850 						L2CAP_SDULEN_SIZE -
2851 						L2CAP_FCS_SIZE);
2852 		rfc.max_pdu_size = cpu_to_le16(size);
2853 
2854 		l2cap_txwin_setup(chan);
2855 
2856 		rfc.txwin_size = min_t(u16, chan->tx_win,
2857 						L2CAP_DEFAULT_TX_WINDOW);
2858 
2859 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2860 							(unsigned long) &rfc);
2861 
2862 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2863 			l2cap_add_opt_efs(&ptr, chan);
2864 
2865 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2866 			break;
2867 
2868 		if (chan->fcs == L2CAP_FCS_NONE ||
2869 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2870 			chan->fcs = L2CAP_FCS_NONE;
2871 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2872 		}
2873 
2874 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2875 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2876 								chan->tx_win);
2877 		break;
2878 
2879 	case L2CAP_MODE_STREAMING:
2880 		l2cap_txwin_setup(chan);
2881 		rfc.mode            = L2CAP_MODE_STREAMING;
2882 		rfc.txwin_size      = 0;
2883 		rfc.max_transmit    = 0;
2884 		rfc.retrans_timeout = 0;
2885 		rfc.monitor_timeout = 0;
2886 
2887 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2888 						L2CAP_EXT_HDR_SIZE -
2889 						L2CAP_SDULEN_SIZE -
2890 						L2CAP_FCS_SIZE);
2891 		rfc.max_pdu_size = cpu_to_le16(size);
2892 
2893 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2894 							(unsigned long) &rfc);
2895 
2896 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2897 			l2cap_add_opt_efs(&ptr, chan);
2898 
2899 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2900 			break;
2901 
2902 		if (chan->fcs == L2CAP_FCS_NONE ||
2903 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2904 			chan->fcs = L2CAP_FCS_NONE;
2905 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2906 		}
2907 		break;
2908 	}
2909 
2910 	req->dcid  = cpu_to_le16(chan->dcid);
2911 	req->flags = __constant_cpu_to_le16(0);
2912 
2913 	return ptr - data;
2914 }
2915 
2916 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2917 {
2918 	struct l2cap_conf_rsp *rsp = data;
2919 	void *ptr = rsp->data;
2920 	void *req = chan->conf_req;
2921 	int len = chan->conf_len;
2922 	int type, hint, olen;
2923 	unsigned long val;
2924 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2925 	struct l2cap_conf_efs efs;
2926 	u8 remote_efs = 0;
2927 	u16 mtu = L2CAP_DEFAULT_MTU;
2928 	u16 result = L2CAP_CONF_SUCCESS;
2929 	u16 size;
2930 
2931 	BT_DBG("chan %p", chan);
2932 
2933 	while (len >= L2CAP_CONF_OPT_SIZE) {
2934 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2935 
2936 		hint  = type & L2CAP_CONF_HINT;
2937 		type &= L2CAP_CONF_MASK;
2938 
2939 		switch (type) {
2940 		case L2CAP_CONF_MTU:
2941 			mtu = val;
2942 			break;
2943 
2944 		case L2CAP_CONF_FLUSH_TO:
2945 			chan->flush_to = val;
2946 			break;
2947 
2948 		case L2CAP_CONF_QOS:
2949 			break;
2950 
2951 		case L2CAP_CONF_RFC:
2952 			if (olen == sizeof(rfc))
2953 				memcpy(&rfc, (void *) val, olen);
2954 			break;
2955 
2956 		case L2CAP_CONF_FCS:
2957 			if (val == L2CAP_FCS_NONE)
2958 				set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2959 			break;
2960 
2961 		case L2CAP_CONF_EFS:
2962 			remote_efs = 1;
2963 			if (olen == sizeof(efs))
2964 				memcpy(&efs, (void *) val, olen);
2965 			break;
2966 
2967 		case L2CAP_CONF_EWS:
2968 			if (!enable_hs)
2969 				return -ECONNREFUSED;
2970 
2971 			set_bit(FLAG_EXT_CTRL, &chan->flags);
2972 			set_bit(CONF_EWS_RECV, &chan->conf_state);
2973 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2974 			chan->remote_tx_win = val;
2975 			break;
2976 
2977 		default:
2978 			if (hint)
2979 				break;
2980 
2981 			result = L2CAP_CONF_UNKNOWN;
2982 			*((u8 *) ptr++) = type;
2983 			break;
2984 		}
2985 	}
2986 
2987 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
2988 		goto done;
2989 
2990 	switch (chan->mode) {
2991 	case L2CAP_MODE_STREAMING:
2992 	case L2CAP_MODE_ERTM:
2993 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2994 			chan->mode = l2cap_select_mode(rfc.mode,
2995 					chan->conn->feat_mask);
2996 			break;
2997 		}
2998 
2999 		if (remote_efs) {
3000 			if (__l2cap_efs_supported(chan))
3001 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3002 			else
3003 				return -ECONNREFUSED;
3004 		}
3005 
3006 		if (chan->mode != rfc.mode)
3007 			return -ECONNREFUSED;
3008 
3009 		break;
3010 	}
3011 
3012 done:
3013 	if (chan->mode != rfc.mode) {
3014 		result = L2CAP_CONF_UNACCEPT;
3015 		rfc.mode = chan->mode;
3016 
3017 		if (chan->num_conf_rsp == 1)
3018 			return -ECONNREFUSED;
3019 
3020 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3021 					sizeof(rfc), (unsigned long) &rfc);
3022 	}
3023 
3024 	if (result == L2CAP_CONF_SUCCESS) {
3025 		/* Configure output options and let the other side know
3026 		 * which ones we don't like. */
3027 
3028 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3029 			result = L2CAP_CONF_UNACCEPT;
3030 		else {
3031 			chan->omtu = mtu;
3032 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3033 		}
3034 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3035 
3036 		if (remote_efs) {
3037 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3038 					efs.stype != L2CAP_SERV_NOTRAFIC &&
3039 					efs.stype != chan->local_stype) {
3040 
3041 				result = L2CAP_CONF_UNACCEPT;
3042 
3043 				if (chan->num_conf_req >= 1)
3044 					return -ECONNREFUSED;
3045 
3046 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3047 							sizeof(efs),
3048 							(unsigned long) &efs);
3049 			} else {
3050 				/* Send PENDING Conf Rsp */
3051 				result = L2CAP_CONF_PENDING;
3052 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3053 			}
3054 		}
3055 
3056 		switch (rfc.mode) {
3057 		case L2CAP_MODE_BASIC:
3058 			chan->fcs = L2CAP_FCS_NONE;
3059 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3060 			break;
3061 
3062 		case L2CAP_MODE_ERTM:
3063 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3064 				chan->remote_tx_win = rfc.txwin_size;
3065 			else
3066 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3067 
3068 			chan->remote_max_tx = rfc.max_transmit;
3069 
3070 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3071 						chan->conn->mtu -
3072 						L2CAP_EXT_HDR_SIZE -
3073 						L2CAP_SDULEN_SIZE -
3074 						L2CAP_FCS_SIZE);
3075 			rfc.max_pdu_size = cpu_to_le16(size);
3076 			chan->remote_mps = size;
3077 
3078 			rfc.retrans_timeout =
3079 				__constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3080 			rfc.monitor_timeout =
3081 				__constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3082 
3083 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3084 
3085 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3086 					sizeof(rfc), (unsigned long) &rfc);
3087 
3088 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3089 				chan->remote_id = efs.id;
3090 				chan->remote_stype = efs.stype;
3091 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3092 				chan->remote_flush_to =
3093 						le32_to_cpu(efs.flush_to);
3094 				chan->remote_acc_lat =
3095 						le32_to_cpu(efs.acc_lat);
3096 				chan->remote_sdu_itime =
3097 					le32_to_cpu(efs.sdu_itime);
3098 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3099 					sizeof(efs), (unsigned long) &efs);
3100 			}
3101 			break;
3102 
3103 		case L2CAP_MODE_STREAMING:
3104 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3105 						chan->conn->mtu -
3106 						L2CAP_EXT_HDR_SIZE -
3107 						L2CAP_SDULEN_SIZE -
3108 						L2CAP_FCS_SIZE);
3109 			rfc.max_pdu_size = cpu_to_le16(size);
3110 			chan->remote_mps = size;
3111 
3112 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3113 
3114 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3115 					sizeof(rfc), (unsigned long) &rfc);
3116 
3117 			break;
3118 
3119 		default:
3120 			result = L2CAP_CONF_UNACCEPT;
3121 
3122 			memset(&rfc, 0, sizeof(rfc));
3123 			rfc.mode = chan->mode;
3124 		}
3125 
3126 		if (result == L2CAP_CONF_SUCCESS)
3127 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3128 	}
3129 	rsp->scid   = cpu_to_le16(chan->dcid);
3130 	rsp->result = cpu_to_le16(result);
3131 	rsp->flags  = __constant_cpu_to_le16(0);
3132 
3133 	return ptr - data;
3134 }
3135 
3136 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3137 {
3138 	struct l2cap_conf_req *req = data;
3139 	void *ptr = req->data;
3140 	int type, olen;
3141 	unsigned long val;
3142 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3143 	struct l2cap_conf_efs efs;
3144 
3145 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3146 
3147 	while (len >= L2CAP_CONF_OPT_SIZE) {
3148 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3149 
3150 		switch (type) {
3151 		case L2CAP_CONF_MTU:
3152 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3153 				*result = L2CAP_CONF_UNACCEPT;
3154 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3155 			} else
3156 				chan->imtu = val;
3157 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3158 			break;
3159 
3160 		case L2CAP_CONF_FLUSH_TO:
3161 			chan->flush_to = val;
3162 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3163 							2, chan->flush_to);
3164 			break;
3165 
3166 		case L2CAP_CONF_RFC:
3167 			if (olen == sizeof(rfc))
3168 				memcpy(&rfc, (void *)val, olen);
3169 
3170 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3171 							rfc.mode != chan->mode)
3172 				return -ECONNREFUSED;
3173 
3174 			chan->fcs = 0;
3175 
3176 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3177 					sizeof(rfc), (unsigned long) &rfc);
3178 			break;
3179 
3180 		case L2CAP_CONF_EWS:
3181 			chan->ack_win = min_t(u16, val, chan->ack_win);
3182 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3183 					   chan->tx_win);
3184 			break;
3185 
3186 		case L2CAP_CONF_EFS:
3187 			if (olen == sizeof(efs))
3188 				memcpy(&efs, (void *)val, olen);
3189 
3190 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3191 					efs.stype != L2CAP_SERV_NOTRAFIC &&
3192 					efs.stype != chan->local_stype)
3193 				return -ECONNREFUSED;
3194 
3195 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3196 					sizeof(efs), (unsigned long) &efs);
3197 			break;
3198 		}
3199 	}
3200 
3201 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3202 		return -ECONNREFUSED;
3203 
3204 	chan->mode = rfc.mode;
3205 
3206 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3207 		switch (rfc.mode) {
3208 		case L2CAP_MODE_ERTM:
3209 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3210 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3211 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3212 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3213 				chan->ack_win = min_t(u16, chan->ack_win,
3214 						      rfc.txwin_size);
3215 
3216 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3217 				chan->local_msdu = le16_to_cpu(efs.msdu);
3218 				chan->local_sdu_itime =
3219 						le32_to_cpu(efs.sdu_itime);
3220 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3221 				chan->local_flush_to =
3222 						le32_to_cpu(efs.flush_to);
3223 			}
3224 			break;
3225 
3226 		case L2CAP_MODE_STREAMING:
3227 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3228 		}
3229 	}
3230 
3231 	req->dcid   = cpu_to_le16(chan->dcid);
3232 	req->flags  = __constant_cpu_to_le16(0);
3233 
3234 	return ptr - data;
3235 }
3236 
3237 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3238 {
3239 	struct l2cap_conf_rsp *rsp = data;
3240 	void *ptr = rsp->data;
3241 
3242 	BT_DBG("chan %p", chan);
3243 
3244 	rsp->scid   = cpu_to_le16(chan->dcid);
3245 	rsp->result = cpu_to_le16(result);
3246 	rsp->flags  = cpu_to_le16(flags);
3247 
3248 	return ptr - data;
3249 }
3250 
3251 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3252 {
3253 	struct l2cap_conn_rsp rsp;
3254 	struct l2cap_conn *conn = chan->conn;
3255 	u8 buf[128];
3256 
3257 	rsp.scid   = cpu_to_le16(chan->dcid);
3258 	rsp.dcid   = cpu_to_le16(chan->scid);
3259 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3260 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3261 	l2cap_send_cmd(conn, chan->ident,
3262 				L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3263 
3264 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3265 		return;
3266 
3267 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3268 			l2cap_build_conf_req(chan, buf), buf);
3269 	chan->num_conf_req++;
3270 }
3271 
3272 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3273 {
3274 	int type, olen;
3275 	unsigned long val;
3276 	/* Use sane default values in case a misbehaving remote device
3277 	 * did not send an RFC or extended window size option.
3278 	 */
3279 	u16 txwin_ext = chan->ack_win;
3280 	struct l2cap_conf_rfc rfc = {
3281 		.mode = chan->mode,
3282 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3283 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3284 		.max_pdu_size = cpu_to_le16(chan->imtu),
3285 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3286 	};
3287 
3288 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3289 
3290 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3291 		return;
3292 
3293 	while (len >= L2CAP_CONF_OPT_SIZE) {
3294 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3295 
3296 		switch (type) {
3297 		case L2CAP_CONF_RFC:
3298 			if (olen == sizeof(rfc))
3299 				memcpy(&rfc, (void *)val, olen);
3300 			break;
3301 		case L2CAP_CONF_EWS:
3302 			txwin_ext = val;
3303 			break;
3304 		}
3305 	}
3306 
3307 	switch (rfc.mode) {
3308 	case L2CAP_MODE_ERTM:
3309 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3310 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3311 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3312 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3313 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3314 		else
3315 			chan->ack_win = min_t(u16, chan->ack_win,
3316 					      rfc.txwin_size);
3317 		break;
3318 	case L2CAP_MODE_STREAMING:
3319 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3320 	}
3321 }
3322 
3323 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3324 {
3325 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3326 
3327 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3328 		return 0;
3329 
3330 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3331 					cmd->ident == conn->info_ident) {
3332 		cancel_delayed_work(&conn->info_timer);
3333 
3334 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3335 		conn->info_ident = 0;
3336 
3337 		l2cap_conn_start(conn);
3338 	}
3339 
3340 	return 0;
3341 }
3342 
3343 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3344 {
3345 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3346 	struct l2cap_conn_rsp rsp;
3347 	struct l2cap_chan *chan = NULL, *pchan;
3348 	struct sock *parent, *sk = NULL;
3349 	int result, status = L2CAP_CS_NO_INFO;
3350 
3351 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3352 	__le16 psm = req->psm;
3353 
3354 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3355 
3356 	/* Check if we have socket listening on psm */
3357 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3358 	if (!pchan) {
3359 		result = L2CAP_CR_BAD_PSM;
3360 		goto sendresp;
3361 	}
3362 
3363 	parent = pchan->sk;
3364 
3365 	mutex_lock(&conn->chan_lock);
3366 	lock_sock(parent);
3367 
3368 	/* Check if the ACL is secure enough (if not SDP) */
3369 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3370 				!hci_conn_check_link_mode(conn->hcon)) {
3371 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3372 		result = L2CAP_CR_SEC_BLOCK;
3373 		goto response;
3374 	}
3375 
3376 	result = L2CAP_CR_NO_MEM;
3377 
3378 	/* Check if we already have channel with that dcid */
3379 	if (__l2cap_get_chan_by_dcid(conn, scid))
3380 		goto response;
3381 
3382 	chan = pchan->ops->new_connection(pchan);
3383 	if (!chan)
3384 		goto response;
3385 
3386 	sk = chan->sk;
3387 
3388 	hci_conn_hold(conn->hcon);
3389 
3390 	bacpy(&bt_sk(sk)->src, conn->src);
3391 	bacpy(&bt_sk(sk)->dst, conn->dst);
3392 	chan->psm  = psm;
3393 	chan->dcid = scid;
3394 
3395 	bt_accept_enqueue(parent, sk);
3396 
3397 	__l2cap_chan_add(conn, chan);
3398 
3399 	dcid = chan->scid;
3400 
3401 	__set_chan_timer(chan, sk->sk_sndtimeo);
3402 
3403 	chan->ident = cmd->ident;
3404 
3405 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3406 		if (l2cap_chan_check_security(chan)) {
3407 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3408 				__l2cap_state_change(chan, BT_CONNECT2);
3409 				result = L2CAP_CR_PEND;
3410 				status = L2CAP_CS_AUTHOR_PEND;
3411 				parent->sk_data_ready(parent, 0);
3412 			} else {
3413 				__l2cap_state_change(chan, BT_CONFIG);
3414 				result = L2CAP_CR_SUCCESS;
3415 				status = L2CAP_CS_NO_INFO;
3416 			}
3417 		} else {
3418 			__l2cap_state_change(chan, BT_CONNECT2);
3419 			result = L2CAP_CR_PEND;
3420 			status = L2CAP_CS_AUTHEN_PEND;
3421 		}
3422 	} else {
3423 		__l2cap_state_change(chan, BT_CONNECT2);
3424 		result = L2CAP_CR_PEND;
3425 		status = L2CAP_CS_NO_INFO;
3426 	}
3427 
3428 response:
3429 	release_sock(parent);
3430 	mutex_unlock(&conn->chan_lock);
3431 
3432 sendresp:
3433 	rsp.scid   = cpu_to_le16(scid);
3434 	rsp.dcid   = cpu_to_le16(dcid);
3435 	rsp.result = cpu_to_le16(result);
3436 	rsp.status = cpu_to_le16(status);
3437 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3438 
3439 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3440 		struct l2cap_info_req info;
3441 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3442 
3443 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3444 		conn->info_ident = l2cap_get_ident(conn);
3445 
3446 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3447 
3448 		l2cap_send_cmd(conn, conn->info_ident,
3449 					L2CAP_INFO_REQ, sizeof(info), &info);
3450 	}
3451 
3452 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3453 				result == L2CAP_CR_SUCCESS) {
3454 		u8 buf[128];
3455 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3456 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3457 					l2cap_build_conf_req(chan, buf), buf);
3458 		chan->num_conf_req++;
3459 	}
3460 
3461 	return 0;
3462 }
3463 
3464 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3465 {
3466 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3467 	u16 scid, dcid, result, status;
3468 	struct l2cap_chan *chan;
3469 	u8 req[128];
3470 	int err;
3471 
3472 	scid   = __le16_to_cpu(rsp->scid);
3473 	dcid   = __le16_to_cpu(rsp->dcid);
3474 	result = __le16_to_cpu(rsp->result);
3475 	status = __le16_to_cpu(rsp->status);
3476 
3477 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3478 						dcid, scid, result, status);
3479 
3480 	mutex_lock(&conn->chan_lock);
3481 
3482 	if (scid) {
3483 		chan = __l2cap_get_chan_by_scid(conn, scid);
3484 		if (!chan) {
3485 			err = -EFAULT;
3486 			goto unlock;
3487 		}
3488 	} else {
3489 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3490 		if (!chan) {
3491 			err = -EFAULT;
3492 			goto unlock;
3493 		}
3494 	}
3495 
3496 	err = 0;
3497 
3498 	l2cap_chan_lock(chan);
3499 
3500 	switch (result) {
3501 	case L2CAP_CR_SUCCESS:
3502 		l2cap_state_change(chan, BT_CONFIG);
3503 		chan->ident = 0;
3504 		chan->dcid = dcid;
3505 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3506 
3507 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3508 			break;
3509 
3510 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3511 					l2cap_build_conf_req(chan, req), req);
3512 		chan->num_conf_req++;
3513 		break;
3514 
3515 	case L2CAP_CR_PEND:
3516 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3517 		break;
3518 
3519 	default:
3520 		l2cap_chan_del(chan, ECONNREFUSED);
3521 		break;
3522 	}
3523 
3524 	l2cap_chan_unlock(chan);
3525 
3526 unlock:
3527 	mutex_unlock(&conn->chan_lock);
3528 
3529 	return err;
3530 }
3531 
3532 static inline void set_default_fcs(struct l2cap_chan *chan)
3533 {
3534 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3535 	 * sides request it.
3536 	 */
3537 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3538 		chan->fcs = L2CAP_FCS_NONE;
3539 	else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3540 		chan->fcs = L2CAP_FCS_CRC16;
3541 }
3542 
3543 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3544 {
3545 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3546 	u16 dcid, flags;
3547 	u8 rsp[64];
3548 	struct l2cap_chan *chan;
3549 	int len, err = 0;
3550 
3551 	dcid  = __le16_to_cpu(req->dcid);
3552 	flags = __le16_to_cpu(req->flags);
3553 
3554 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3555 
3556 	chan = l2cap_get_chan_by_scid(conn, dcid);
3557 	if (!chan)
3558 		return -ENOENT;
3559 
3560 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3561 		struct l2cap_cmd_rej_cid rej;
3562 
3563 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3564 		rej.scid = cpu_to_le16(chan->scid);
3565 		rej.dcid = cpu_to_le16(chan->dcid);
3566 
3567 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3568 				sizeof(rej), &rej);
3569 		goto unlock;
3570 	}
3571 
3572 	/* Reject if config buffer is too small. */
3573 	len = cmd_len - sizeof(*req);
3574 	if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3575 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3576 				l2cap_build_conf_rsp(chan, rsp,
3577 					L2CAP_CONF_REJECT, flags), rsp);
3578 		goto unlock;
3579 	}
3580 
3581 	/* Store config. */
3582 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
3583 	chan->conf_len += len;
3584 
3585 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3586 		/* Incomplete config. Send empty response. */
3587 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3588 				l2cap_build_conf_rsp(chan, rsp,
3589 					L2CAP_CONF_SUCCESS, flags), rsp);
3590 		goto unlock;
3591 	}
3592 
3593 	/* Complete config. */
3594 	len = l2cap_parse_conf_req(chan, rsp);
3595 	if (len < 0) {
3596 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
3597 		goto unlock;
3598 	}
3599 
3600 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3601 	chan->num_conf_rsp++;
3602 
3603 	/* Reset config buffer. */
3604 	chan->conf_len = 0;
3605 
3606 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3607 		goto unlock;
3608 
3609 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3610 		set_default_fcs(chan);
3611 
3612 		if (chan->mode == L2CAP_MODE_ERTM ||
3613 		    chan->mode == L2CAP_MODE_STREAMING)
3614 			err = l2cap_ertm_init(chan);
3615 
3616 		if (err < 0)
3617 			l2cap_send_disconn_req(chan->conn, chan, -err);
3618 		else
3619 			l2cap_chan_ready(chan);
3620 
3621 		goto unlock;
3622 	}
3623 
3624 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3625 		u8 buf[64];
3626 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3627 					l2cap_build_conf_req(chan, buf), buf);
3628 		chan->num_conf_req++;
3629 	}
3630 
3631 	/* Got Conf Rsp PENDING from remote side and asume we sent
3632 	   Conf Rsp PENDING in the code above */
3633 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3634 			test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3635 
3636 		/* check compatibility */
3637 
3638 		clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3639 		set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3640 
3641 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3642 					l2cap_build_conf_rsp(chan, rsp,
3643 					L2CAP_CONF_SUCCESS, flags), rsp);
3644 	}
3645 
3646 unlock:
3647 	l2cap_chan_unlock(chan);
3648 	return err;
3649 }
3650 
3651 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3652 {
3653 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3654 	u16 scid, flags, result;
3655 	struct l2cap_chan *chan;
3656 	int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3657 	int err = 0;
3658 
3659 	scid   = __le16_to_cpu(rsp->scid);
3660 	flags  = __le16_to_cpu(rsp->flags);
3661 	result = __le16_to_cpu(rsp->result);
3662 
3663 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3664 	       result, len);
3665 
3666 	chan = l2cap_get_chan_by_scid(conn, scid);
3667 	if (!chan)
3668 		return 0;
3669 
3670 	switch (result) {
3671 	case L2CAP_CONF_SUCCESS:
3672 		l2cap_conf_rfc_get(chan, rsp->data, len);
3673 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3674 		break;
3675 
3676 	case L2CAP_CONF_PENDING:
3677 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3678 
3679 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3680 			char buf[64];
3681 
3682 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3683 								buf, &result);
3684 			if (len < 0) {
3685 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
3686 				goto done;
3687 			}
3688 
3689 			/* check compatibility */
3690 
3691 			clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3692 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3693 
3694 			l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3695 						l2cap_build_conf_rsp(chan, buf,
3696 						L2CAP_CONF_SUCCESS, 0x0000), buf);
3697 		}
3698 		goto done;
3699 
3700 	case L2CAP_CONF_UNACCEPT:
3701 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3702 			char req[64];
3703 
3704 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3705 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
3706 				goto done;
3707 			}
3708 
3709 			/* throw out any old stored conf requests */
3710 			result = L2CAP_CONF_SUCCESS;
3711 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3712 								req, &result);
3713 			if (len < 0) {
3714 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
3715 				goto done;
3716 			}
3717 
3718 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
3719 						L2CAP_CONF_REQ, len, req);
3720 			chan->num_conf_req++;
3721 			if (result != L2CAP_CONF_SUCCESS)
3722 				goto done;
3723 			break;
3724 		}
3725 
3726 	default:
3727 		l2cap_chan_set_err(chan, ECONNRESET);
3728 
3729 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3730 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
3731 		goto done;
3732 	}
3733 
3734 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3735 		goto done;
3736 
3737 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
3738 
3739 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3740 		set_default_fcs(chan);
3741 
3742 		if (chan->mode == L2CAP_MODE_ERTM ||
3743 		    chan->mode == L2CAP_MODE_STREAMING)
3744 			err = l2cap_ertm_init(chan);
3745 
3746 		if (err < 0)
3747 			l2cap_send_disconn_req(chan->conn, chan, -err);
3748 		else
3749 			l2cap_chan_ready(chan);
3750 	}
3751 
3752 done:
3753 	l2cap_chan_unlock(chan);
3754 	return err;
3755 }
3756 
3757 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3758 {
3759 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3760 	struct l2cap_disconn_rsp rsp;
3761 	u16 dcid, scid;
3762 	struct l2cap_chan *chan;
3763 	struct sock *sk;
3764 
3765 	scid = __le16_to_cpu(req->scid);
3766 	dcid = __le16_to_cpu(req->dcid);
3767 
3768 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3769 
3770 	mutex_lock(&conn->chan_lock);
3771 
3772 	chan = __l2cap_get_chan_by_scid(conn, dcid);
3773 	if (!chan) {
3774 		mutex_unlock(&conn->chan_lock);
3775 		return 0;
3776 	}
3777 
3778 	l2cap_chan_lock(chan);
3779 
3780 	sk = chan->sk;
3781 
3782 	rsp.dcid = cpu_to_le16(chan->scid);
3783 	rsp.scid = cpu_to_le16(chan->dcid);
3784 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3785 
3786 	lock_sock(sk);
3787 	sk->sk_shutdown = SHUTDOWN_MASK;
3788 	release_sock(sk);
3789 
3790 	l2cap_chan_hold(chan);
3791 	l2cap_chan_del(chan, ECONNRESET);
3792 
3793 	l2cap_chan_unlock(chan);
3794 
3795 	chan->ops->close(chan);
3796 	l2cap_chan_put(chan);
3797 
3798 	mutex_unlock(&conn->chan_lock);
3799 
3800 	return 0;
3801 }
3802 
3803 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3804 {
3805 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3806 	u16 dcid, scid;
3807 	struct l2cap_chan *chan;
3808 
3809 	scid = __le16_to_cpu(rsp->scid);
3810 	dcid = __le16_to_cpu(rsp->dcid);
3811 
3812 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3813 
3814 	mutex_lock(&conn->chan_lock);
3815 
3816 	chan = __l2cap_get_chan_by_scid(conn, scid);
3817 	if (!chan) {
3818 		mutex_unlock(&conn->chan_lock);
3819 		return 0;
3820 	}
3821 
3822 	l2cap_chan_lock(chan);
3823 
3824 	l2cap_chan_hold(chan);
3825 	l2cap_chan_del(chan, 0);
3826 
3827 	l2cap_chan_unlock(chan);
3828 
3829 	chan->ops->close(chan);
3830 	l2cap_chan_put(chan);
3831 
3832 	mutex_unlock(&conn->chan_lock);
3833 
3834 	return 0;
3835 }
3836 
3837 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3838 {
3839 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3840 	u16 type;
3841 
3842 	type = __le16_to_cpu(req->type);
3843 
3844 	BT_DBG("type 0x%4.4x", type);
3845 
3846 	if (type == L2CAP_IT_FEAT_MASK) {
3847 		u8 buf[8];
3848 		u32 feat_mask = l2cap_feat_mask;
3849 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3850 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3851 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3852 		if (!disable_ertm)
3853 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3854 							 | L2CAP_FEAT_FCS;
3855 		if (enable_hs)
3856 			feat_mask |= L2CAP_FEAT_EXT_FLOW
3857 						| L2CAP_FEAT_EXT_WINDOW;
3858 
3859 		put_unaligned_le32(feat_mask, rsp->data);
3860 		l2cap_send_cmd(conn, cmd->ident,
3861 					L2CAP_INFO_RSP, sizeof(buf), buf);
3862 	} else if (type == L2CAP_IT_FIXED_CHAN) {
3863 		u8 buf[12];
3864 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3865 
3866 		if (enable_hs)
3867 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3868 		else
3869 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3870 
3871 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3872 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3873 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3874 		l2cap_send_cmd(conn, cmd->ident,
3875 					L2CAP_INFO_RSP, sizeof(buf), buf);
3876 	} else {
3877 		struct l2cap_info_rsp rsp;
3878 		rsp.type   = cpu_to_le16(type);
3879 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3880 		l2cap_send_cmd(conn, cmd->ident,
3881 					L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3882 	}
3883 
3884 	return 0;
3885 }
3886 
3887 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3888 {
3889 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3890 	u16 type, result;
3891 
3892 	type   = __le16_to_cpu(rsp->type);
3893 	result = __le16_to_cpu(rsp->result);
3894 
3895 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3896 
3897 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
3898 	if (cmd->ident != conn->info_ident ||
3899 			conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3900 		return 0;
3901 
3902 	cancel_delayed_work(&conn->info_timer);
3903 
3904 	if (result != L2CAP_IR_SUCCESS) {
3905 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3906 		conn->info_ident = 0;
3907 
3908 		l2cap_conn_start(conn);
3909 
3910 		return 0;
3911 	}
3912 
3913 	switch (type) {
3914 	case L2CAP_IT_FEAT_MASK:
3915 		conn->feat_mask = get_unaligned_le32(rsp->data);
3916 
3917 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3918 			struct l2cap_info_req req;
3919 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3920 
3921 			conn->info_ident = l2cap_get_ident(conn);
3922 
3923 			l2cap_send_cmd(conn, conn->info_ident,
3924 					L2CAP_INFO_REQ, sizeof(req), &req);
3925 		} else {
3926 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3927 			conn->info_ident = 0;
3928 
3929 			l2cap_conn_start(conn);
3930 		}
3931 		break;
3932 
3933 	case L2CAP_IT_FIXED_CHAN:
3934 		conn->fixed_chan_mask = rsp->data[0];
3935 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3936 		conn->info_ident = 0;
3937 
3938 		l2cap_conn_start(conn);
3939 		break;
3940 	}
3941 
3942 	return 0;
3943 }
3944 
3945 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3946 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3947 					void *data)
3948 {
3949 	struct l2cap_create_chan_req *req = data;
3950 	struct l2cap_create_chan_rsp rsp;
3951 	u16 psm, scid;
3952 
3953 	if (cmd_len != sizeof(*req))
3954 		return -EPROTO;
3955 
3956 	if (!enable_hs)
3957 		return -EINVAL;
3958 
3959 	psm = le16_to_cpu(req->psm);
3960 	scid = le16_to_cpu(req->scid);
3961 
3962 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3963 
3964 	/* Placeholder: Always reject */
3965 	rsp.dcid = 0;
3966 	rsp.scid = cpu_to_le16(scid);
3967 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3968 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3969 
3970 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3971 		       sizeof(rsp), &rsp);
3972 
3973 	return 0;
3974 }
3975 
3976 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3977 					struct l2cap_cmd_hdr *cmd, void *data)
3978 {
3979 	BT_DBG("conn %p", conn);
3980 
3981 	return l2cap_connect_rsp(conn, cmd, data);
3982 }
3983 
3984 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3985 				     u16 icid, u16 result)
3986 {
3987 	struct l2cap_move_chan_rsp rsp;
3988 
3989 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3990 
3991 	rsp.icid = cpu_to_le16(icid);
3992 	rsp.result = cpu_to_le16(result);
3993 
3994 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3995 }
3996 
3997 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3998 				     struct l2cap_chan *chan,
3999 				     u16 icid, u16 result)
4000 {
4001 	struct l2cap_move_chan_cfm cfm;
4002 	u8 ident;
4003 
4004 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4005 
4006 	ident = l2cap_get_ident(conn);
4007 	if (chan)
4008 		chan->ident = ident;
4009 
4010 	cfm.icid = cpu_to_le16(icid);
4011 	cfm.result = cpu_to_le16(result);
4012 
4013 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4014 }
4015 
4016 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4017 					 u16 icid)
4018 {
4019 	struct l2cap_move_chan_cfm_rsp rsp;
4020 
4021 	BT_DBG("icid 0x%4.4x", icid);
4022 
4023 	rsp.icid = cpu_to_le16(icid);
4024 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4025 }
4026 
4027 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4028 					 struct l2cap_cmd_hdr *cmd,
4029 					 u16 cmd_len, void *data)
4030 {
4031 	struct l2cap_move_chan_req *req = data;
4032 	u16 icid = 0;
4033 	u16 result = L2CAP_MR_NOT_ALLOWED;
4034 
4035 	if (cmd_len != sizeof(*req))
4036 		return -EPROTO;
4037 
4038 	icid = le16_to_cpu(req->icid);
4039 
4040 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4041 
4042 	if (!enable_hs)
4043 		return -EINVAL;
4044 
4045 	/* Placeholder: Always refuse */
4046 	l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4047 
4048 	return 0;
4049 }
4050 
4051 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4052 					 struct l2cap_cmd_hdr *cmd,
4053 					 u16 cmd_len, void *data)
4054 {
4055 	struct l2cap_move_chan_rsp *rsp = data;
4056 	u16 icid, result;
4057 
4058 	if (cmd_len != sizeof(*rsp))
4059 		return -EPROTO;
4060 
4061 	icid = le16_to_cpu(rsp->icid);
4062 	result = le16_to_cpu(rsp->result);
4063 
4064 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4065 
4066 	/* Placeholder: Always unconfirmed */
4067 	l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4068 
4069 	return 0;
4070 }
4071 
4072 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4073 					     struct l2cap_cmd_hdr *cmd,
4074 					     u16 cmd_len, void *data)
4075 {
4076 	struct l2cap_move_chan_cfm *cfm = data;
4077 	u16 icid, result;
4078 
4079 	if (cmd_len != sizeof(*cfm))
4080 		return -EPROTO;
4081 
4082 	icid = le16_to_cpu(cfm->icid);
4083 	result = le16_to_cpu(cfm->result);
4084 
4085 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4086 
4087 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4088 
4089 	return 0;
4090 }
4091 
4092 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4093 						 struct l2cap_cmd_hdr *cmd,
4094 						 u16 cmd_len, void *data)
4095 {
4096 	struct l2cap_move_chan_cfm_rsp *rsp = data;
4097 	u16 icid;
4098 
4099 	if (cmd_len != sizeof(*rsp))
4100 		return -EPROTO;
4101 
4102 	icid = le16_to_cpu(rsp->icid);
4103 
4104 	BT_DBG("icid 0x%4.4x", icid);
4105 
4106 	return 0;
4107 }
4108 
4109 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4110 							u16 to_multiplier)
4111 {
4112 	u16 max_latency;
4113 
4114 	if (min > max || min < 6 || max > 3200)
4115 		return -EINVAL;
4116 
4117 	if (to_multiplier < 10 || to_multiplier > 3200)
4118 		return -EINVAL;
4119 
4120 	if (max >= to_multiplier * 8)
4121 		return -EINVAL;
4122 
4123 	max_latency = (to_multiplier * 8 / max) - 1;
4124 	if (latency > 499 || latency > max_latency)
4125 		return -EINVAL;
4126 
4127 	return 0;
4128 }
4129 
4130 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4131 					struct l2cap_cmd_hdr *cmd, u8 *data)
4132 {
4133 	struct hci_conn *hcon = conn->hcon;
4134 	struct l2cap_conn_param_update_req *req;
4135 	struct l2cap_conn_param_update_rsp rsp;
4136 	u16 min, max, latency, to_multiplier, cmd_len;
4137 	int err;
4138 
4139 	if (!(hcon->link_mode & HCI_LM_MASTER))
4140 		return -EINVAL;
4141 
4142 	cmd_len = __le16_to_cpu(cmd->len);
4143 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4144 		return -EPROTO;
4145 
4146 	req = (struct l2cap_conn_param_update_req *) data;
4147 	min		= __le16_to_cpu(req->min);
4148 	max		= __le16_to_cpu(req->max);
4149 	latency		= __le16_to_cpu(req->latency);
4150 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4151 
4152 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4153 						min, max, latency, to_multiplier);
4154 
4155 	memset(&rsp, 0, sizeof(rsp));
4156 
4157 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4158 	if (err)
4159 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4160 	else
4161 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4162 
4163 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4164 							sizeof(rsp), &rsp);
4165 
4166 	if (!err)
4167 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4168 
4169 	return 0;
4170 }
4171 
4172 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4173 			struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4174 {
4175 	int err = 0;
4176 
4177 	switch (cmd->code) {
4178 	case L2CAP_COMMAND_REJ:
4179 		l2cap_command_rej(conn, cmd, data);
4180 		break;
4181 
4182 	case L2CAP_CONN_REQ:
4183 		err = l2cap_connect_req(conn, cmd, data);
4184 		break;
4185 
4186 	case L2CAP_CONN_RSP:
4187 		err = l2cap_connect_rsp(conn, cmd, data);
4188 		break;
4189 
4190 	case L2CAP_CONF_REQ:
4191 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4192 		break;
4193 
4194 	case L2CAP_CONF_RSP:
4195 		err = l2cap_config_rsp(conn, cmd, data);
4196 		break;
4197 
4198 	case L2CAP_DISCONN_REQ:
4199 		err = l2cap_disconnect_req(conn, cmd, data);
4200 		break;
4201 
4202 	case L2CAP_DISCONN_RSP:
4203 		err = l2cap_disconnect_rsp(conn, cmd, data);
4204 		break;
4205 
4206 	case L2CAP_ECHO_REQ:
4207 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4208 		break;
4209 
4210 	case L2CAP_ECHO_RSP:
4211 		break;
4212 
4213 	case L2CAP_INFO_REQ:
4214 		err = l2cap_information_req(conn, cmd, data);
4215 		break;
4216 
4217 	case L2CAP_INFO_RSP:
4218 		err = l2cap_information_rsp(conn, cmd, data);
4219 		break;
4220 
4221 	case L2CAP_CREATE_CHAN_REQ:
4222 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4223 		break;
4224 
4225 	case L2CAP_CREATE_CHAN_RSP:
4226 		err = l2cap_create_channel_rsp(conn, cmd, data);
4227 		break;
4228 
4229 	case L2CAP_MOVE_CHAN_REQ:
4230 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4231 		break;
4232 
4233 	case L2CAP_MOVE_CHAN_RSP:
4234 		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4235 		break;
4236 
4237 	case L2CAP_MOVE_CHAN_CFM:
4238 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4239 		break;
4240 
4241 	case L2CAP_MOVE_CHAN_CFM_RSP:
4242 		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4243 		break;
4244 
4245 	default:
4246 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4247 		err = -EINVAL;
4248 		break;
4249 	}
4250 
4251 	return err;
4252 }
4253 
4254 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4255 					struct l2cap_cmd_hdr *cmd, u8 *data)
4256 {
4257 	switch (cmd->code) {
4258 	case L2CAP_COMMAND_REJ:
4259 		return 0;
4260 
4261 	case L2CAP_CONN_PARAM_UPDATE_REQ:
4262 		return l2cap_conn_param_update_req(conn, cmd, data);
4263 
4264 	case L2CAP_CONN_PARAM_UPDATE_RSP:
4265 		return 0;
4266 
4267 	default:
4268 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4269 		return -EINVAL;
4270 	}
4271 }
4272 
4273 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4274 							struct sk_buff *skb)
4275 {
4276 	u8 *data = skb->data;
4277 	int len = skb->len;
4278 	struct l2cap_cmd_hdr cmd;
4279 	int err;
4280 
4281 	l2cap_raw_recv(conn, skb);
4282 
4283 	while (len >= L2CAP_CMD_HDR_SIZE) {
4284 		u16 cmd_len;
4285 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4286 		data += L2CAP_CMD_HDR_SIZE;
4287 		len  -= L2CAP_CMD_HDR_SIZE;
4288 
4289 		cmd_len = le16_to_cpu(cmd.len);
4290 
4291 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4292 
4293 		if (cmd_len > len || !cmd.ident) {
4294 			BT_DBG("corrupted command");
4295 			break;
4296 		}
4297 
4298 		if (conn->hcon->type == LE_LINK)
4299 			err = l2cap_le_sig_cmd(conn, &cmd, data);
4300 		else
4301 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4302 
4303 		if (err) {
4304 			struct l2cap_cmd_rej_unk rej;
4305 
4306 			BT_ERR("Wrong link type (%d)", err);
4307 
4308 			/* FIXME: Map err to a valid reason */
4309 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4310 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4311 		}
4312 
4313 		data += cmd_len;
4314 		len  -= cmd_len;
4315 	}
4316 
4317 	kfree_skb(skb);
4318 }
4319 
4320 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
4321 {
4322 	u16 our_fcs, rcv_fcs;
4323 	int hdr_size;
4324 
4325 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4326 		hdr_size = L2CAP_EXT_HDR_SIZE;
4327 	else
4328 		hdr_size = L2CAP_ENH_HDR_SIZE;
4329 
4330 	if (chan->fcs == L2CAP_FCS_CRC16) {
4331 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4332 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4333 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4334 
4335 		if (our_fcs != rcv_fcs)
4336 			return -EBADMSG;
4337 	}
4338 	return 0;
4339 }
4340 
4341 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4342 {
4343 	struct l2cap_ctrl control;
4344 
4345 	BT_DBG("chan %p", chan);
4346 
4347 	memset(&control, 0, sizeof(control));
4348 	control.sframe = 1;
4349 	control.final = 1;
4350 	control.reqseq = chan->buffer_seq;
4351 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
4352 
4353 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4354 		control.super = L2CAP_SUPER_RNR;
4355 		l2cap_send_sframe(chan, &control);
4356 	}
4357 
4358 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4359 	    chan->unacked_frames > 0)
4360 		__set_retrans_timer(chan);
4361 
4362 	/* Send pending iframes */
4363 	l2cap_ertm_send(chan);
4364 
4365 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4366 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4367 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
4368 		 * send it now.
4369 		 */
4370 		control.super = L2CAP_SUPER_RR;
4371 		l2cap_send_sframe(chan, &control);
4372 	}
4373 }
4374 
4375 static void append_skb_frag(struct sk_buff *skb,
4376 			struct sk_buff *new_frag, struct sk_buff **last_frag)
4377 {
4378 	/* skb->len reflects data in skb as well as all fragments
4379 	 * skb->data_len reflects only data in fragments
4380 	 */
4381 	if (!skb_has_frag_list(skb))
4382 		skb_shinfo(skb)->frag_list = new_frag;
4383 
4384 	new_frag->next = NULL;
4385 
4386 	(*last_frag)->next = new_frag;
4387 	*last_frag = new_frag;
4388 
4389 	skb->len += new_frag->len;
4390 	skb->data_len += new_frag->len;
4391 	skb->truesize += new_frag->truesize;
4392 }
4393 
4394 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4395 				struct l2cap_ctrl *control)
4396 {
4397 	int err = -EINVAL;
4398 
4399 	switch (control->sar) {
4400 	case L2CAP_SAR_UNSEGMENTED:
4401 		if (chan->sdu)
4402 			break;
4403 
4404 		err = chan->ops->recv(chan, skb);
4405 		break;
4406 
4407 	case L2CAP_SAR_START:
4408 		if (chan->sdu)
4409 			break;
4410 
4411 		chan->sdu_len = get_unaligned_le16(skb->data);
4412 		skb_pull(skb, L2CAP_SDULEN_SIZE);
4413 
4414 		if (chan->sdu_len > chan->imtu) {
4415 			err = -EMSGSIZE;
4416 			break;
4417 		}
4418 
4419 		if (skb->len >= chan->sdu_len)
4420 			break;
4421 
4422 		chan->sdu = skb;
4423 		chan->sdu_last_frag = skb;
4424 
4425 		skb = NULL;
4426 		err = 0;
4427 		break;
4428 
4429 	case L2CAP_SAR_CONTINUE:
4430 		if (!chan->sdu)
4431 			break;
4432 
4433 		append_skb_frag(chan->sdu, skb,
4434 				&chan->sdu_last_frag);
4435 		skb = NULL;
4436 
4437 		if (chan->sdu->len >= chan->sdu_len)
4438 			break;
4439 
4440 		err = 0;
4441 		break;
4442 
4443 	case L2CAP_SAR_END:
4444 		if (!chan->sdu)
4445 			break;
4446 
4447 		append_skb_frag(chan->sdu, skb,
4448 				&chan->sdu_last_frag);
4449 		skb = NULL;
4450 
4451 		if (chan->sdu->len != chan->sdu_len)
4452 			break;
4453 
4454 		err = chan->ops->recv(chan, chan->sdu);
4455 
4456 		if (!err) {
4457 			/* Reassembly complete */
4458 			chan->sdu = NULL;
4459 			chan->sdu_last_frag = NULL;
4460 			chan->sdu_len = 0;
4461 		}
4462 		break;
4463 	}
4464 
4465 	if (err) {
4466 		kfree_skb(skb);
4467 		kfree_skb(chan->sdu);
4468 		chan->sdu = NULL;
4469 		chan->sdu_last_frag = NULL;
4470 		chan->sdu_len = 0;
4471 	}
4472 
4473 	return err;
4474 }
4475 
4476 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4477 {
4478 	u8 event;
4479 
4480 	if (chan->mode != L2CAP_MODE_ERTM)
4481 		return;
4482 
4483 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4484 	l2cap_tx(chan, NULL, NULL, event);
4485 }
4486 
4487 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4488 {
4489 	int err = 0;
4490 	/* Pass sequential frames to l2cap_reassemble_sdu()
4491 	 * until a gap is encountered.
4492 	 */
4493 
4494 	BT_DBG("chan %p", chan);
4495 
4496 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4497 		struct sk_buff *skb;
4498 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
4499 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
4500 
4501 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4502 
4503 		if (!skb)
4504 			break;
4505 
4506 		skb_unlink(skb, &chan->srej_q);
4507 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4508 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4509 		if (err)
4510 			break;
4511 	}
4512 
4513 	if (skb_queue_empty(&chan->srej_q)) {
4514 		chan->rx_state = L2CAP_RX_STATE_RECV;
4515 		l2cap_send_ack(chan);
4516 	}
4517 
4518 	return err;
4519 }
4520 
4521 static void l2cap_handle_srej(struct l2cap_chan *chan,
4522 			      struct l2cap_ctrl *control)
4523 {
4524 	struct sk_buff *skb;
4525 
4526 	BT_DBG("chan %p, control %p", chan, control);
4527 
4528 	if (control->reqseq == chan->next_tx_seq) {
4529 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4530 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4531 		return;
4532 	}
4533 
4534 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4535 
4536 	if (skb == NULL) {
4537 		BT_DBG("Seq %d not available for retransmission",
4538 		       control->reqseq);
4539 		return;
4540 	}
4541 
4542 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4543 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4544 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4545 		return;
4546 	}
4547 
4548 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4549 
4550 	if (control->poll) {
4551 		l2cap_pass_to_tx(chan, control);
4552 
4553 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
4554 		l2cap_retransmit(chan, control);
4555 		l2cap_ertm_send(chan);
4556 
4557 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4558 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
4559 			chan->srej_save_reqseq = control->reqseq;
4560 		}
4561 	} else {
4562 		l2cap_pass_to_tx_fbit(chan, control);
4563 
4564 		if (control->final) {
4565 			if (chan->srej_save_reqseq != control->reqseq ||
4566 			    !test_and_clear_bit(CONN_SREJ_ACT,
4567 						&chan->conn_state))
4568 				l2cap_retransmit(chan, control);
4569 		} else {
4570 			l2cap_retransmit(chan, control);
4571 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4572 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
4573 				chan->srej_save_reqseq = control->reqseq;
4574 			}
4575 		}
4576 	}
4577 }
4578 
4579 static void l2cap_handle_rej(struct l2cap_chan *chan,
4580 			     struct l2cap_ctrl *control)
4581 {
4582 	struct sk_buff *skb;
4583 
4584 	BT_DBG("chan %p, control %p", chan, control);
4585 
4586 	if (control->reqseq == chan->next_tx_seq) {
4587 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4588 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4589 		return;
4590 	}
4591 
4592 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4593 
4594 	if (chan->max_tx && skb &&
4595 	    bt_cb(skb)->control.retries >= chan->max_tx) {
4596 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4597 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4598 		return;
4599 	}
4600 
4601 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4602 
4603 	l2cap_pass_to_tx(chan, control);
4604 
4605 	if (control->final) {
4606 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4607 			l2cap_retransmit_all(chan, control);
4608 	} else {
4609 		l2cap_retransmit_all(chan, control);
4610 		l2cap_ertm_send(chan);
4611 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4612 			set_bit(CONN_REJ_ACT, &chan->conn_state);
4613 	}
4614 }
4615 
4616 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4617 {
4618 	BT_DBG("chan %p, txseq %d", chan, txseq);
4619 
4620 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4621 	       chan->expected_tx_seq);
4622 
4623 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4624 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4625 								chan->tx_win) {
4626 			/* See notes below regarding "double poll" and
4627 			 * invalid packets.
4628 			 */
4629 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4630 				BT_DBG("Invalid/Ignore - after SREJ");
4631 				return L2CAP_TXSEQ_INVALID_IGNORE;
4632 			} else {
4633 				BT_DBG("Invalid - in window after SREJ sent");
4634 				return L2CAP_TXSEQ_INVALID;
4635 			}
4636 		}
4637 
4638 		if (chan->srej_list.head == txseq) {
4639 			BT_DBG("Expected SREJ");
4640 			return L2CAP_TXSEQ_EXPECTED_SREJ;
4641 		}
4642 
4643 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4644 			BT_DBG("Duplicate SREJ - txseq already stored");
4645 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
4646 		}
4647 
4648 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4649 			BT_DBG("Unexpected SREJ - not requested");
4650 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4651 		}
4652 	}
4653 
4654 	if (chan->expected_tx_seq == txseq) {
4655 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4656 		    chan->tx_win) {
4657 			BT_DBG("Invalid - txseq outside tx window");
4658 			return L2CAP_TXSEQ_INVALID;
4659 		} else {
4660 			BT_DBG("Expected");
4661 			return L2CAP_TXSEQ_EXPECTED;
4662 		}
4663 	}
4664 
4665 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4666 		__seq_offset(chan, chan->expected_tx_seq,
4667 			     chan->last_acked_seq)){
4668 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
4669 		return L2CAP_TXSEQ_DUPLICATE;
4670 	}
4671 
4672 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4673 		/* A source of invalid packets is a "double poll" condition,
4674 		 * where delays cause us to send multiple poll packets.  If
4675 		 * the remote stack receives and processes both polls,
4676 		 * sequence numbers can wrap around in such a way that a
4677 		 * resent frame has a sequence number that looks like new data
4678 		 * with a sequence gap.  This would trigger an erroneous SREJ
4679 		 * request.
4680 		 *
4681 		 * Fortunately, this is impossible with a tx window that's
4682 		 * less than half of the maximum sequence number, which allows
4683 		 * invalid frames to be safely ignored.
4684 		 *
4685 		 * With tx window sizes greater than half of the tx window
4686 		 * maximum, the frame is invalid and cannot be ignored.  This
4687 		 * causes a disconnect.
4688 		 */
4689 
4690 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4691 			BT_DBG("Invalid/Ignore - txseq outside tx window");
4692 			return L2CAP_TXSEQ_INVALID_IGNORE;
4693 		} else {
4694 			BT_DBG("Invalid - txseq outside tx window");
4695 			return L2CAP_TXSEQ_INVALID;
4696 		}
4697 	} else {
4698 		BT_DBG("Unexpected - txseq indicates missing frames");
4699 		return L2CAP_TXSEQ_UNEXPECTED;
4700 	}
4701 }
4702 
4703 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4704 			       struct l2cap_ctrl *control,
4705 			       struct sk_buff *skb, u8 event)
4706 {
4707 	int err = 0;
4708 	bool skb_in_use = 0;
4709 
4710 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4711 	       event);
4712 
4713 	switch (event) {
4714 	case L2CAP_EV_RECV_IFRAME:
4715 		switch (l2cap_classify_txseq(chan, control->txseq)) {
4716 		case L2CAP_TXSEQ_EXPECTED:
4717 			l2cap_pass_to_tx(chan, control);
4718 
4719 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4720 				BT_DBG("Busy, discarding expected seq %d",
4721 				       control->txseq);
4722 				break;
4723 			}
4724 
4725 			chan->expected_tx_seq = __next_seq(chan,
4726 							   control->txseq);
4727 
4728 			chan->buffer_seq = chan->expected_tx_seq;
4729 			skb_in_use = 1;
4730 
4731 			err = l2cap_reassemble_sdu(chan, skb, control);
4732 			if (err)
4733 				break;
4734 
4735 			if (control->final) {
4736 				if (!test_and_clear_bit(CONN_REJ_ACT,
4737 							&chan->conn_state)) {
4738 					control->final = 0;
4739 					l2cap_retransmit_all(chan, control);
4740 					l2cap_ertm_send(chan);
4741 				}
4742 			}
4743 
4744 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4745 				l2cap_send_ack(chan);
4746 			break;
4747 		case L2CAP_TXSEQ_UNEXPECTED:
4748 			l2cap_pass_to_tx(chan, control);
4749 
4750 			/* Can't issue SREJ frames in the local busy state.
4751 			 * Drop this frame, it will be seen as missing
4752 			 * when local busy is exited.
4753 			 */
4754 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4755 				BT_DBG("Busy, discarding unexpected seq %d",
4756 				       control->txseq);
4757 				break;
4758 			}
4759 
4760 			/* There was a gap in the sequence, so an SREJ
4761 			 * must be sent for each missing frame.  The
4762 			 * current frame is stored for later use.
4763 			 */
4764 			skb_queue_tail(&chan->srej_q, skb);
4765 			skb_in_use = 1;
4766 			BT_DBG("Queued %p (queue len %d)", skb,
4767 			       skb_queue_len(&chan->srej_q));
4768 
4769 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4770 			l2cap_seq_list_clear(&chan->srej_list);
4771 			l2cap_send_srej(chan, control->txseq);
4772 
4773 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4774 			break;
4775 		case L2CAP_TXSEQ_DUPLICATE:
4776 			l2cap_pass_to_tx(chan, control);
4777 			break;
4778 		case L2CAP_TXSEQ_INVALID_IGNORE:
4779 			break;
4780 		case L2CAP_TXSEQ_INVALID:
4781 		default:
4782 			l2cap_send_disconn_req(chan->conn, chan,
4783 					       ECONNRESET);
4784 			break;
4785 		}
4786 		break;
4787 	case L2CAP_EV_RECV_RR:
4788 		l2cap_pass_to_tx(chan, control);
4789 		if (control->final) {
4790 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4791 
4792 			if (!test_and_clear_bit(CONN_REJ_ACT,
4793 						&chan->conn_state)) {
4794 				control->final = 0;
4795 				l2cap_retransmit_all(chan, control);
4796 			}
4797 
4798 			l2cap_ertm_send(chan);
4799 		} else if (control->poll) {
4800 			l2cap_send_i_or_rr_or_rnr(chan);
4801 		} else {
4802 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
4803 					       &chan->conn_state) &&
4804 			    chan->unacked_frames)
4805 				__set_retrans_timer(chan);
4806 
4807 			l2cap_ertm_send(chan);
4808 		}
4809 		break;
4810 	case L2CAP_EV_RECV_RNR:
4811 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4812 		l2cap_pass_to_tx(chan, control);
4813 		if (control && control->poll) {
4814 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
4815 			l2cap_send_rr_or_rnr(chan, 0);
4816 		}
4817 		__clear_retrans_timer(chan);
4818 		l2cap_seq_list_clear(&chan->retrans_list);
4819 		break;
4820 	case L2CAP_EV_RECV_REJ:
4821 		l2cap_handle_rej(chan, control);
4822 		break;
4823 	case L2CAP_EV_RECV_SREJ:
4824 		l2cap_handle_srej(chan, control);
4825 		break;
4826 	default:
4827 		break;
4828 	}
4829 
4830 	if (skb && !skb_in_use) {
4831 		BT_DBG("Freeing %p", skb);
4832 		kfree_skb(skb);
4833 	}
4834 
4835 	return err;
4836 }
4837 
4838 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4839 				    struct l2cap_ctrl *control,
4840 				    struct sk_buff *skb, u8 event)
4841 {
4842 	int err = 0;
4843 	u16 txseq = control->txseq;
4844 	bool skb_in_use = 0;
4845 
4846 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4847 	       event);
4848 
4849 	switch (event) {
4850 	case L2CAP_EV_RECV_IFRAME:
4851 		switch (l2cap_classify_txseq(chan, txseq)) {
4852 		case L2CAP_TXSEQ_EXPECTED:
4853 			/* Keep frame for reassembly later */
4854 			l2cap_pass_to_tx(chan, control);
4855 			skb_queue_tail(&chan->srej_q, skb);
4856 			skb_in_use = 1;
4857 			BT_DBG("Queued %p (queue len %d)", skb,
4858 			       skb_queue_len(&chan->srej_q));
4859 
4860 			chan->expected_tx_seq = __next_seq(chan, txseq);
4861 			break;
4862 		case L2CAP_TXSEQ_EXPECTED_SREJ:
4863 			l2cap_seq_list_pop(&chan->srej_list);
4864 
4865 			l2cap_pass_to_tx(chan, control);
4866 			skb_queue_tail(&chan->srej_q, skb);
4867 			skb_in_use = 1;
4868 			BT_DBG("Queued %p (queue len %d)", skb,
4869 			       skb_queue_len(&chan->srej_q));
4870 
4871 			err = l2cap_rx_queued_iframes(chan);
4872 			if (err)
4873 				break;
4874 
4875 			break;
4876 		case L2CAP_TXSEQ_UNEXPECTED:
4877 			/* Got a frame that can't be reassembled yet.
4878 			 * Save it for later, and send SREJs to cover
4879 			 * the missing frames.
4880 			 */
4881 			skb_queue_tail(&chan->srej_q, skb);
4882 			skb_in_use = 1;
4883 			BT_DBG("Queued %p (queue len %d)", skb,
4884 			       skb_queue_len(&chan->srej_q));
4885 
4886 			l2cap_pass_to_tx(chan, control);
4887 			l2cap_send_srej(chan, control->txseq);
4888 			break;
4889 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4890 			/* This frame was requested with an SREJ, but
4891 			 * some expected retransmitted frames are
4892 			 * missing.  Request retransmission of missing
4893 			 * SREJ'd frames.
4894 			 */
4895 			skb_queue_tail(&chan->srej_q, skb);
4896 			skb_in_use = 1;
4897 			BT_DBG("Queued %p (queue len %d)", skb,
4898 			       skb_queue_len(&chan->srej_q));
4899 
4900 			l2cap_pass_to_tx(chan, control);
4901 			l2cap_send_srej_list(chan, control->txseq);
4902 			break;
4903 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
4904 			/* We've already queued this frame.  Drop this copy. */
4905 			l2cap_pass_to_tx(chan, control);
4906 			break;
4907 		case L2CAP_TXSEQ_DUPLICATE:
4908 			/* Expecting a later sequence number, so this frame
4909 			 * was already received.  Ignore it completely.
4910 			 */
4911 			break;
4912 		case L2CAP_TXSEQ_INVALID_IGNORE:
4913 			break;
4914 		case L2CAP_TXSEQ_INVALID:
4915 		default:
4916 			l2cap_send_disconn_req(chan->conn, chan,
4917 					       ECONNRESET);
4918 			break;
4919 		}
4920 		break;
4921 	case L2CAP_EV_RECV_RR:
4922 		l2cap_pass_to_tx(chan, control);
4923 		if (control->final) {
4924 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4925 
4926 			if (!test_and_clear_bit(CONN_REJ_ACT,
4927 						&chan->conn_state)) {
4928 				control->final = 0;
4929 				l2cap_retransmit_all(chan, control);
4930 			}
4931 
4932 			l2cap_ertm_send(chan);
4933 		} else if (control->poll) {
4934 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
4935 					       &chan->conn_state) &&
4936 			    chan->unacked_frames) {
4937 				__set_retrans_timer(chan);
4938 			}
4939 
4940 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
4941 			l2cap_send_srej_tail(chan);
4942 		} else {
4943 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
4944 					       &chan->conn_state) &&
4945 			    chan->unacked_frames)
4946 				__set_retrans_timer(chan);
4947 
4948 			l2cap_send_ack(chan);
4949 		}
4950 		break;
4951 	case L2CAP_EV_RECV_RNR:
4952 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4953 		l2cap_pass_to_tx(chan, control);
4954 		if (control->poll) {
4955 			l2cap_send_srej_tail(chan);
4956 		} else {
4957 			struct l2cap_ctrl rr_control;
4958 			memset(&rr_control, 0, sizeof(rr_control));
4959 			rr_control.sframe = 1;
4960 			rr_control.super = L2CAP_SUPER_RR;
4961 			rr_control.reqseq = chan->buffer_seq;
4962 			l2cap_send_sframe(chan, &rr_control);
4963 		}
4964 
4965 		break;
4966 	case L2CAP_EV_RECV_REJ:
4967 		l2cap_handle_rej(chan, control);
4968 		break;
4969 	case L2CAP_EV_RECV_SREJ:
4970 		l2cap_handle_srej(chan, control);
4971 		break;
4972 	}
4973 
4974 	if (skb && !skb_in_use) {
4975 		BT_DBG("Freeing %p", skb);
4976 		kfree_skb(skb);
4977 	}
4978 
4979 	return err;
4980 }
4981 
4982 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4983 {
4984 	/* Make sure reqseq is for a packet that has been sent but not acked */
4985 	u16 unacked;
4986 
4987 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4988 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4989 }
4990 
4991 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4992 		    struct sk_buff *skb, u8 event)
4993 {
4994 	int err = 0;
4995 
4996 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4997 	       control, skb, event, chan->rx_state);
4998 
4999 	if (__valid_reqseq(chan, control->reqseq)) {
5000 		switch (chan->rx_state) {
5001 		case L2CAP_RX_STATE_RECV:
5002 			err = l2cap_rx_state_recv(chan, control, skb, event);
5003 			break;
5004 		case L2CAP_RX_STATE_SREJ_SENT:
5005 			err = l2cap_rx_state_srej_sent(chan, control, skb,
5006 						       event);
5007 			break;
5008 		default:
5009 			/* shut it down */
5010 			break;
5011 		}
5012 	} else {
5013 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5014 		       control->reqseq, chan->next_tx_seq,
5015 		       chan->expected_ack_seq);
5016 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5017 	}
5018 
5019 	return err;
5020 }
5021 
5022 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5023 			   struct sk_buff *skb)
5024 {
5025 	int err = 0;
5026 
5027 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5028 	       chan->rx_state);
5029 
5030 	if (l2cap_classify_txseq(chan, control->txseq) ==
5031 	    L2CAP_TXSEQ_EXPECTED) {
5032 		l2cap_pass_to_tx(chan, control);
5033 
5034 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5035 		       __next_seq(chan, chan->buffer_seq));
5036 
5037 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5038 
5039 		l2cap_reassemble_sdu(chan, skb, control);
5040 	} else {
5041 		if (chan->sdu) {
5042 			kfree_skb(chan->sdu);
5043 			chan->sdu = NULL;
5044 		}
5045 		chan->sdu_last_frag = NULL;
5046 		chan->sdu_len = 0;
5047 
5048 		if (skb) {
5049 			BT_DBG("Freeing %p", skb);
5050 			kfree_skb(skb);
5051 		}
5052 	}
5053 
5054 	chan->last_acked_seq = control->txseq;
5055 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
5056 
5057 	return err;
5058 }
5059 
5060 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5061 {
5062 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
5063 	u16 len;
5064 	u8 event;
5065 
5066 	__unpack_control(chan, skb);
5067 
5068 	len = skb->len;
5069 
5070 	/*
5071 	 * We can just drop the corrupted I-frame here.
5072 	 * Receiver will miss it and start proper recovery
5073 	 * procedures and ask for retransmission.
5074 	 */
5075 	if (l2cap_check_fcs(chan, skb))
5076 		goto drop;
5077 
5078 	if (!control->sframe && control->sar == L2CAP_SAR_START)
5079 		len -= L2CAP_SDULEN_SIZE;
5080 
5081 	if (chan->fcs == L2CAP_FCS_CRC16)
5082 		len -= L2CAP_FCS_SIZE;
5083 
5084 	if (len > chan->mps) {
5085 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5086 		goto drop;
5087 	}
5088 
5089 	if (!control->sframe) {
5090 		int err;
5091 
5092 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5093 		       control->sar, control->reqseq, control->final,
5094 		       control->txseq);
5095 
5096 		/* Validate F-bit - F=0 always valid, F=1 only
5097 		 * valid in TX WAIT_F
5098 		 */
5099 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5100 			goto drop;
5101 
5102 		if (chan->mode != L2CAP_MODE_STREAMING) {
5103 			event = L2CAP_EV_RECV_IFRAME;
5104 			err = l2cap_rx(chan, control, skb, event);
5105 		} else {
5106 			err = l2cap_stream_rx(chan, control, skb);
5107 		}
5108 
5109 		if (err)
5110 			l2cap_send_disconn_req(chan->conn, chan,
5111 					       ECONNRESET);
5112 	} else {
5113 		const u8 rx_func_to_event[4] = {
5114 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5115 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5116 		};
5117 
5118 		/* Only I-frames are expected in streaming mode */
5119 		if (chan->mode == L2CAP_MODE_STREAMING)
5120 			goto drop;
5121 
5122 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5123 		       control->reqseq, control->final, control->poll,
5124 		       control->super);
5125 
5126 		if (len != 0) {
5127 			BT_ERR("%d", len);
5128 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5129 			goto drop;
5130 		}
5131 
5132 		/* Validate F and P bits */
5133 		if (control->final && (control->poll ||
5134 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5135 			goto drop;
5136 
5137 		event = rx_func_to_event[control->super];
5138 		if (l2cap_rx(chan, control, skb, event))
5139 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5140 	}
5141 
5142 	return 0;
5143 
5144 drop:
5145 	kfree_skb(skb);
5146 	return 0;
5147 }
5148 
5149 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5150 			       struct sk_buff *skb)
5151 {
5152 	struct l2cap_chan *chan;
5153 
5154 	chan = l2cap_get_chan_by_scid(conn, cid);
5155 	if (!chan) {
5156 		if (cid == L2CAP_CID_A2MP) {
5157 			chan = a2mp_channel_create(conn, skb);
5158 			if (!chan) {
5159 				kfree_skb(skb);
5160 				return;
5161 			}
5162 
5163 			l2cap_chan_lock(chan);
5164 		} else {
5165 			BT_DBG("unknown cid 0x%4.4x", cid);
5166 			/* Drop packet and return */
5167 			kfree_skb(skb);
5168 			return;
5169 		}
5170 	}
5171 
5172 	BT_DBG("chan %p, len %d", chan, skb->len);
5173 
5174 	if (chan->state != BT_CONNECTED)
5175 		goto drop;
5176 
5177 	switch (chan->mode) {
5178 	case L2CAP_MODE_BASIC:
5179 		/* If socket recv buffers overflows we drop data here
5180 		 * which is *bad* because L2CAP has to be reliable.
5181 		 * But we don't have any other choice. L2CAP doesn't
5182 		 * provide flow control mechanism. */
5183 
5184 		if (chan->imtu < skb->len)
5185 			goto drop;
5186 
5187 		if (!chan->ops->recv(chan, skb))
5188 			goto done;
5189 		break;
5190 
5191 	case L2CAP_MODE_ERTM:
5192 	case L2CAP_MODE_STREAMING:
5193 		l2cap_data_rcv(chan, skb);
5194 		goto done;
5195 
5196 	default:
5197 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5198 		break;
5199 	}
5200 
5201 drop:
5202 	kfree_skb(skb);
5203 
5204 done:
5205 	l2cap_chan_unlock(chan);
5206 }
5207 
5208 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5209 				  struct sk_buff *skb)
5210 {
5211 	struct l2cap_chan *chan;
5212 
5213 	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5214 	if (!chan)
5215 		goto drop;
5216 
5217 	BT_DBG("chan %p, len %d", chan, skb->len);
5218 
5219 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5220 		goto drop;
5221 
5222 	if (chan->imtu < skb->len)
5223 		goto drop;
5224 
5225 	if (!chan->ops->recv(chan, skb))
5226 		return;
5227 
5228 drop:
5229 	kfree_skb(skb);
5230 }
5231 
5232 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5233 			      struct sk_buff *skb)
5234 {
5235 	struct l2cap_chan *chan;
5236 
5237 	chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5238 	if (!chan)
5239 		goto drop;
5240 
5241 	BT_DBG("chan %p, len %d", chan, skb->len);
5242 
5243 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5244 		goto drop;
5245 
5246 	if (chan->imtu < skb->len)
5247 		goto drop;
5248 
5249 	if (!chan->ops->recv(chan, skb))
5250 		return;
5251 
5252 drop:
5253 	kfree_skb(skb);
5254 }
5255 
5256 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5257 {
5258 	struct l2cap_hdr *lh = (void *) skb->data;
5259 	u16 cid, len;
5260 	__le16 psm;
5261 
5262 	skb_pull(skb, L2CAP_HDR_SIZE);
5263 	cid = __le16_to_cpu(lh->cid);
5264 	len = __le16_to_cpu(lh->len);
5265 
5266 	if (len != skb->len) {
5267 		kfree_skb(skb);
5268 		return;
5269 	}
5270 
5271 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
5272 
5273 	switch (cid) {
5274 	case L2CAP_CID_LE_SIGNALING:
5275 	case L2CAP_CID_SIGNALING:
5276 		l2cap_sig_channel(conn, skb);
5277 		break;
5278 
5279 	case L2CAP_CID_CONN_LESS:
5280 		psm = get_unaligned((__le16 *) skb->data);
5281 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
5282 		l2cap_conless_channel(conn, psm, skb);
5283 		break;
5284 
5285 	case L2CAP_CID_LE_DATA:
5286 		l2cap_att_channel(conn, cid, skb);
5287 		break;
5288 
5289 	case L2CAP_CID_SMP:
5290 		if (smp_sig_channel(conn, skb))
5291 			l2cap_conn_del(conn->hcon, EACCES);
5292 		break;
5293 
5294 	default:
5295 		l2cap_data_channel(conn, cid, skb);
5296 		break;
5297 	}
5298 }
5299 
5300 /* ---- L2CAP interface with lower layer (HCI) ---- */
5301 
5302 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5303 {
5304 	int exact = 0, lm1 = 0, lm2 = 0;
5305 	struct l2cap_chan *c;
5306 
5307 	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5308 
5309 	/* Find listening sockets and check their link_mode */
5310 	read_lock(&chan_list_lock);
5311 	list_for_each_entry(c, &chan_list, global_l) {
5312 		struct sock *sk = c->sk;
5313 
5314 		if (c->state != BT_LISTEN)
5315 			continue;
5316 
5317 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5318 			lm1 |= HCI_LM_ACCEPT;
5319 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5320 				lm1 |= HCI_LM_MASTER;
5321 			exact++;
5322 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5323 			lm2 |= HCI_LM_ACCEPT;
5324 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5325 				lm2 |= HCI_LM_MASTER;
5326 		}
5327 	}
5328 	read_unlock(&chan_list_lock);
5329 
5330 	return exact ? lm1 : lm2;
5331 }
5332 
5333 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5334 {
5335 	struct l2cap_conn *conn;
5336 
5337 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5338 
5339 	if (!status) {
5340 		conn = l2cap_conn_add(hcon, status);
5341 		if (conn)
5342 			l2cap_conn_ready(conn);
5343 	} else
5344 		l2cap_conn_del(hcon, bt_to_errno(status));
5345 
5346 	return 0;
5347 }
5348 
5349 int l2cap_disconn_ind(struct hci_conn *hcon)
5350 {
5351 	struct l2cap_conn *conn = hcon->l2cap_data;
5352 
5353 	BT_DBG("hcon %p", hcon);
5354 
5355 	if (!conn)
5356 		return HCI_ERROR_REMOTE_USER_TERM;
5357 	return conn->disc_reason;
5358 }
5359 
5360 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5361 {
5362 	BT_DBG("hcon %p reason %d", hcon, reason);
5363 
5364 	l2cap_conn_del(hcon, bt_to_errno(reason));
5365 	return 0;
5366 }
5367 
5368 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5369 {
5370 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5371 		return;
5372 
5373 	if (encrypt == 0x00) {
5374 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
5375 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5376 		} else if (chan->sec_level == BT_SECURITY_HIGH)
5377 			l2cap_chan_close(chan, ECONNREFUSED);
5378 	} else {
5379 		if (chan->sec_level == BT_SECURITY_MEDIUM)
5380 			__clear_chan_timer(chan);
5381 	}
5382 }
5383 
5384 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5385 {
5386 	struct l2cap_conn *conn = hcon->l2cap_data;
5387 	struct l2cap_chan *chan;
5388 
5389 	if (!conn)
5390 		return 0;
5391 
5392 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5393 
5394 	if (hcon->type == LE_LINK) {
5395 		if (!status && encrypt)
5396 			smp_distribute_keys(conn, 0);
5397 		cancel_delayed_work(&conn->security_timer);
5398 	}
5399 
5400 	mutex_lock(&conn->chan_lock);
5401 
5402 	list_for_each_entry(chan, &conn->chan_l, list) {
5403 		l2cap_chan_lock(chan);
5404 
5405 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5406 		       state_to_string(chan->state));
5407 
5408 		if (chan->scid == L2CAP_CID_LE_DATA) {
5409 			if (!status && encrypt) {
5410 				chan->sec_level = hcon->sec_level;
5411 				l2cap_chan_ready(chan);
5412 			}
5413 
5414 			l2cap_chan_unlock(chan);
5415 			continue;
5416 		}
5417 
5418 		if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5419 			l2cap_chan_unlock(chan);
5420 			continue;
5421 		}
5422 
5423 		if (!status && (chan->state == BT_CONNECTED ||
5424 						chan->state == BT_CONFIG)) {
5425 			struct sock *sk = chan->sk;
5426 
5427 			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5428 			sk->sk_state_change(sk);
5429 
5430 			l2cap_check_encryption(chan, encrypt);
5431 			l2cap_chan_unlock(chan);
5432 			continue;
5433 		}
5434 
5435 		if (chan->state == BT_CONNECT) {
5436 			if (!status) {
5437 				l2cap_send_conn_req(chan);
5438 			} else {
5439 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5440 			}
5441 		} else if (chan->state == BT_CONNECT2) {
5442 			struct sock *sk = chan->sk;
5443 			struct l2cap_conn_rsp rsp;
5444 			__u16 res, stat;
5445 
5446 			lock_sock(sk);
5447 
5448 			if (!status) {
5449 				if (test_bit(BT_SK_DEFER_SETUP,
5450 					     &bt_sk(sk)->flags)) {
5451 					struct sock *parent = bt_sk(sk)->parent;
5452 					res = L2CAP_CR_PEND;
5453 					stat = L2CAP_CS_AUTHOR_PEND;
5454 					if (parent)
5455 						parent->sk_data_ready(parent, 0);
5456 				} else {
5457 					__l2cap_state_change(chan, BT_CONFIG);
5458 					res = L2CAP_CR_SUCCESS;
5459 					stat = L2CAP_CS_NO_INFO;
5460 				}
5461 			} else {
5462 				__l2cap_state_change(chan, BT_DISCONN);
5463 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5464 				res = L2CAP_CR_SEC_BLOCK;
5465 				stat = L2CAP_CS_NO_INFO;
5466 			}
5467 
5468 			release_sock(sk);
5469 
5470 			rsp.scid   = cpu_to_le16(chan->dcid);
5471 			rsp.dcid   = cpu_to_le16(chan->scid);
5472 			rsp.result = cpu_to_le16(res);
5473 			rsp.status = cpu_to_le16(stat);
5474 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5475 							sizeof(rsp), &rsp);
5476 
5477 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5478 			    res == L2CAP_CR_SUCCESS) {
5479 				char buf[128];
5480 				set_bit(CONF_REQ_SENT, &chan->conf_state);
5481 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
5482 					       L2CAP_CONF_REQ,
5483 					       l2cap_build_conf_req(chan, buf),
5484 					       buf);
5485 				chan->num_conf_req++;
5486 			}
5487 		}
5488 
5489 		l2cap_chan_unlock(chan);
5490 	}
5491 
5492 	mutex_unlock(&conn->chan_lock);
5493 
5494 	return 0;
5495 }
5496 
5497 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5498 {
5499 	struct l2cap_conn *conn = hcon->l2cap_data;
5500 
5501 	if (!conn)
5502 		conn = l2cap_conn_add(hcon, 0);
5503 
5504 	if (!conn)
5505 		goto drop;
5506 
5507 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5508 
5509 	if (!(flags & ACL_CONT)) {
5510 		struct l2cap_hdr *hdr;
5511 		int len;
5512 
5513 		if (conn->rx_len) {
5514 			BT_ERR("Unexpected start frame (len %d)", skb->len);
5515 			kfree_skb(conn->rx_skb);
5516 			conn->rx_skb = NULL;
5517 			conn->rx_len = 0;
5518 			l2cap_conn_unreliable(conn, ECOMM);
5519 		}
5520 
5521 		/* Start fragment always begin with Basic L2CAP header */
5522 		if (skb->len < L2CAP_HDR_SIZE) {
5523 			BT_ERR("Frame is too short (len %d)", skb->len);
5524 			l2cap_conn_unreliable(conn, ECOMM);
5525 			goto drop;
5526 		}
5527 
5528 		hdr = (struct l2cap_hdr *) skb->data;
5529 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5530 
5531 		if (len == skb->len) {
5532 			/* Complete frame received */
5533 			l2cap_recv_frame(conn, skb);
5534 			return 0;
5535 		}
5536 
5537 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5538 
5539 		if (skb->len > len) {
5540 			BT_ERR("Frame is too long (len %d, expected len %d)",
5541 				skb->len, len);
5542 			l2cap_conn_unreliable(conn, ECOMM);
5543 			goto drop;
5544 		}
5545 
5546 		/* Allocate skb for the complete frame (with header) */
5547 		conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5548 		if (!conn->rx_skb)
5549 			goto drop;
5550 
5551 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5552 								skb->len);
5553 		conn->rx_len = len - skb->len;
5554 	} else {
5555 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5556 
5557 		if (!conn->rx_len) {
5558 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5559 			l2cap_conn_unreliable(conn, ECOMM);
5560 			goto drop;
5561 		}
5562 
5563 		if (skb->len > conn->rx_len) {
5564 			BT_ERR("Fragment is too long (len %d, expected %d)",
5565 					skb->len, conn->rx_len);
5566 			kfree_skb(conn->rx_skb);
5567 			conn->rx_skb = NULL;
5568 			conn->rx_len = 0;
5569 			l2cap_conn_unreliable(conn, ECOMM);
5570 			goto drop;
5571 		}
5572 
5573 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5574 								skb->len);
5575 		conn->rx_len -= skb->len;
5576 
5577 		if (!conn->rx_len) {
5578 			/* Complete frame received */
5579 			l2cap_recv_frame(conn, conn->rx_skb);
5580 			conn->rx_skb = NULL;
5581 		}
5582 	}
5583 
5584 drop:
5585 	kfree_skb(skb);
5586 	return 0;
5587 }
5588 
5589 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5590 {
5591 	struct l2cap_chan *c;
5592 
5593 	read_lock(&chan_list_lock);
5594 
5595 	list_for_each_entry(c, &chan_list, global_l) {
5596 		struct sock *sk = c->sk;
5597 
5598 		seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5599 					batostr(&bt_sk(sk)->src),
5600 					batostr(&bt_sk(sk)->dst),
5601 					c->state, __le16_to_cpu(c->psm),
5602 					c->scid, c->dcid, c->imtu, c->omtu,
5603 					c->sec_level, c->mode);
5604 	}
5605 
5606 	read_unlock(&chan_list_lock);
5607 
5608 	return 0;
5609 }
5610 
5611 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5612 {
5613 	return single_open(file, l2cap_debugfs_show, inode->i_private);
5614 }
5615 
5616 static const struct file_operations l2cap_debugfs_fops = {
5617 	.open		= l2cap_debugfs_open,
5618 	.read		= seq_read,
5619 	.llseek		= seq_lseek,
5620 	.release	= single_release,
5621 };
5622 
5623 static struct dentry *l2cap_debugfs;
5624 
5625 int __init l2cap_init(void)
5626 {
5627 	int err;
5628 
5629 	err = l2cap_init_sockets();
5630 	if (err < 0)
5631 		return err;
5632 
5633 	if (bt_debugfs) {
5634 		l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5635 					bt_debugfs, NULL, &l2cap_debugfs_fops);
5636 		if (!l2cap_debugfs)
5637 			BT_ERR("Failed to create L2CAP debug file");
5638 	}
5639 
5640 	return 0;
5641 }
5642 
5643 void l2cap_exit(void)
5644 {
5645 	debugfs_remove(l2cap_debugfs);
5646 	l2cap_cleanup_sockets();
5647 }
5648 
5649 module_param(disable_ertm, bool, 0644);
5650 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
5651