xref: /linux/net/bluetooth/l2cap_core.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6 
7    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License version 2 as
11    published by the Free Software Foundation;
12 
13    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 
22    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24    SOFTWARE IS DISCLAIMED.
25 */
26 
27 /* Bluetooth L2CAP core. */
28 
29 #include <linux/module.h>
30 
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50 
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53 
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58 
59 int disable_ertm;
60 
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
66 
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 				u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 								void *data);
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 				struct l2cap_chan *chan, int err);
74 
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76 
77 /* ---- L2CAP channels ---- */
78 
79 static inline void chan_hold(struct l2cap_chan *c)
80 {
81 	atomic_inc(&c->refcnt);
82 }
83 
84 static inline void chan_put(struct l2cap_chan *c)
85 {
86 	if (atomic_dec_and_test(&c->refcnt))
87 		kfree(c);
88 }
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 {
117 	struct l2cap_chan *c;
118 
119 	read_lock(&conn->chan_lock);
120 	c = __l2cap_get_chan_by_scid(conn, cid);
121 	if (c)
122 		bh_lock_sock(c->sk);
123 	read_unlock(&conn->chan_lock);
124 	return c;
125 }
126 
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 {
129 	struct l2cap_chan *c;
130 
131 	list_for_each_entry(c, &conn->chan_l, list) {
132 		if (c->ident == ident)
133 			return c;
134 	}
135 	return NULL;
136 }
137 
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139 {
140 	struct l2cap_chan *c;
141 
142 	read_lock(&conn->chan_lock);
143 	c = __l2cap_get_chan_by_ident(conn, ident);
144 	if (c)
145 		bh_lock_sock(c->sk);
146 	read_unlock(&conn->chan_lock);
147 	return c;
148 }
149 
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
151 {
152 	struct l2cap_chan *c;
153 
154 	list_for_each_entry(c, &chan_list, global_l) {
155 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
156 			goto found;
157 	}
158 
159 	c = NULL;
160 found:
161 	return c;
162 }
163 
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165 {
166 	int err;
167 
168 	write_lock_bh(&chan_list_lock);
169 
170 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 		err = -EADDRINUSE;
172 		goto done;
173 	}
174 
175 	if (psm) {
176 		chan->psm = psm;
177 		chan->sport = psm;
178 		err = 0;
179 	} else {
180 		u16 p;
181 
182 		err = -EINVAL;
183 		for (p = 0x1001; p < 0x1100; p += 2)
184 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 				chan->psm   = cpu_to_le16(p);
186 				chan->sport = cpu_to_le16(p);
187 				err = 0;
188 				break;
189 			}
190 	}
191 
192 done:
193 	write_unlock_bh(&chan_list_lock);
194 	return err;
195 }
196 
197 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
198 {
199 	write_lock_bh(&chan_list_lock);
200 
201 	chan->scid = scid;
202 
203 	write_unlock_bh(&chan_list_lock);
204 
205 	return 0;
206 }
207 
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
209 {
210 	u16 cid = L2CAP_CID_DYN_START;
211 
212 	for (; cid < L2CAP_CID_DYN_END; cid++) {
213 		if (!__l2cap_get_chan_by_scid(conn, cid))
214 			return cid;
215 	}
216 
217 	return 0;
218 }
219 
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221 {
222 	BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
223 
224 	if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 		chan_hold(chan);
226 }
227 
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
229 {
230 	BT_DBG("chan %p state %d", chan, chan->state);
231 
232 	if (timer_pending(timer) && del_timer(timer))
233 		chan_put(chan);
234 }
235 
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
237 {
238 	chan->state = state;
239 	chan->ops->state_change(chan->data, state);
240 }
241 
242 static void l2cap_chan_timeout(unsigned long arg)
243 {
244 	struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 	struct sock *sk = chan->sk;
246 	int reason;
247 
248 	BT_DBG("chan %p state %d", chan, chan->state);
249 
250 	bh_lock_sock(sk);
251 
252 	if (sock_owned_by_user(sk)) {
253 		/* sk is owned by user. Try again later */
254 		__set_chan_timer(chan, HZ / 5);
255 		bh_unlock_sock(sk);
256 		chan_put(chan);
257 		return;
258 	}
259 
260 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 		reason = ECONNREFUSED;
262 	else if (chan->state == BT_CONNECT &&
263 					chan->sec_level != BT_SECURITY_SDP)
264 		reason = ECONNREFUSED;
265 	else
266 		reason = ETIMEDOUT;
267 
268 	l2cap_chan_close(chan, reason);
269 
270 	bh_unlock_sock(sk);
271 
272 	chan->ops->close(chan->data);
273 	chan_put(chan);
274 }
275 
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
277 {
278 	struct l2cap_chan *chan;
279 
280 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
281 	if (!chan)
282 		return NULL;
283 
284 	chan->sk = sk;
285 
286 	write_lock_bh(&chan_list_lock);
287 	list_add(&chan->global_l, &chan_list);
288 	write_unlock_bh(&chan_list_lock);
289 
290 	setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
291 
292 	chan->state = BT_OPEN;
293 
294 	atomic_set(&chan->refcnt, 1);
295 
296 	return chan;
297 }
298 
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
300 {
301 	write_lock_bh(&chan_list_lock);
302 	list_del(&chan->global_l);
303 	write_unlock_bh(&chan_list_lock);
304 
305 	chan_put(chan);
306 }
307 
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
309 {
310 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 			chan->psm, chan->dcid);
312 
313 	conn->disc_reason = 0x13;
314 
315 	chan->conn = conn;
316 
317 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 		if (conn->hcon->type == LE_LINK) {
319 			/* LE connection */
320 			chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 			chan->scid = L2CAP_CID_LE_DATA;
322 			chan->dcid = L2CAP_CID_LE_DATA;
323 		} else {
324 			/* Alloc CID for connection-oriented socket */
325 			chan->scid = l2cap_alloc_cid(conn);
326 			chan->omtu = L2CAP_DEFAULT_MTU;
327 		}
328 	} else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 		/* Connectionless socket */
330 		chan->scid = L2CAP_CID_CONN_LESS;
331 		chan->dcid = L2CAP_CID_CONN_LESS;
332 		chan->omtu = L2CAP_DEFAULT_MTU;
333 	} else {
334 		/* Raw socket can send/recv signalling messages only */
335 		chan->scid = L2CAP_CID_SIGNALING;
336 		chan->dcid = L2CAP_CID_SIGNALING;
337 		chan->omtu = L2CAP_DEFAULT_MTU;
338 	}
339 
340 	chan_hold(chan);
341 
342 	list_add(&chan->list, &conn->chan_l);
343 }
344 
345 /* Delete channel.
346  * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
348 {
349 	struct sock *sk = chan->sk;
350 	struct l2cap_conn *conn = chan->conn;
351 	struct sock *parent = bt_sk(sk)->parent;
352 
353 	__clear_chan_timer(chan);
354 
355 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
356 
357 	if (conn) {
358 		/* Delete from channel list */
359 		write_lock_bh(&conn->chan_lock);
360 		list_del(&chan->list);
361 		write_unlock_bh(&conn->chan_lock);
362 		chan_put(chan);
363 
364 		chan->conn = NULL;
365 		hci_conn_put(conn->hcon);
366 	}
367 
368 	l2cap_state_change(chan, BT_CLOSED);
369 	sock_set_flag(sk, SOCK_ZAPPED);
370 
371 	if (err)
372 		sk->sk_err = err;
373 
374 	if (parent) {
375 		bt_accept_unlink(sk);
376 		parent->sk_data_ready(parent, 0);
377 	} else
378 		sk->sk_state_change(sk);
379 
380 	if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 			test_bit(CONF_INPUT_DONE, &chan->conf_state)))
382 		return;
383 
384 	skb_queue_purge(&chan->tx_q);
385 
386 	if (chan->mode == L2CAP_MODE_ERTM) {
387 		struct srej_list *l, *tmp;
388 
389 		__clear_retrans_timer(chan);
390 		__clear_monitor_timer(chan);
391 		__clear_ack_timer(chan);
392 
393 		skb_queue_purge(&chan->srej_q);
394 
395 		list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
396 			list_del(&l->list);
397 			kfree(l);
398 		}
399 	}
400 }
401 
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
403 {
404 	struct sock *sk;
405 
406 	BT_DBG("parent %p", parent);
407 
408 	/* Close not yet accepted channels */
409 	while ((sk = bt_accept_dequeue(parent, NULL))) {
410 		struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 		__clear_chan_timer(chan);
412 		lock_sock(sk);
413 		l2cap_chan_close(chan, ECONNRESET);
414 		release_sock(sk);
415 		chan->ops->close(chan->data);
416 	}
417 }
418 
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
420 {
421 	struct l2cap_conn *conn = chan->conn;
422 	struct sock *sk = chan->sk;
423 
424 	BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
425 
426 	switch (chan->state) {
427 	case BT_LISTEN:
428 		l2cap_chan_cleanup_listen(sk);
429 
430 		l2cap_state_change(chan, BT_CLOSED);
431 		sock_set_flag(sk, SOCK_ZAPPED);
432 		break;
433 
434 	case BT_CONNECTED:
435 	case BT_CONFIG:
436 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 					conn->hcon->type == ACL_LINK) {
438 			__clear_chan_timer(chan);
439 			__set_chan_timer(chan, sk->sk_sndtimeo);
440 			l2cap_send_disconn_req(conn, chan, reason);
441 		} else
442 			l2cap_chan_del(chan, reason);
443 		break;
444 
445 	case BT_CONNECT2:
446 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 					conn->hcon->type == ACL_LINK) {
448 			struct l2cap_conn_rsp rsp;
449 			__u16 result;
450 
451 			if (bt_sk(sk)->defer_setup)
452 				result = L2CAP_CR_SEC_BLOCK;
453 			else
454 				result = L2CAP_CR_BAD_PSM;
455 			l2cap_state_change(chan, BT_DISCONN);
456 
457 			rsp.scid   = cpu_to_le16(chan->dcid);
458 			rsp.dcid   = cpu_to_le16(chan->scid);
459 			rsp.result = cpu_to_le16(result);
460 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 							sizeof(rsp), &rsp);
463 		}
464 
465 		l2cap_chan_del(chan, reason);
466 		break;
467 
468 	case BT_CONNECT:
469 	case BT_DISCONN:
470 		l2cap_chan_del(chan, reason);
471 		break;
472 
473 	default:
474 		sock_set_flag(sk, SOCK_ZAPPED);
475 		break;
476 	}
477 }
478 
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
480 {
481 	if (chan->chan_type == L2CAP_CHAN_RAW) {
482 		switch (chan->sec_level) {
483 		case BT_SECURITY_HIGH:
484 			return HCI_AT_DEDICATED_BONDING_MITM;
485 		case BT_SECURITY_MEDIUM:
486 			return HCI_AT_DEDICATED_BONDING;
487 		default:
488 			return HCI_AT_NO_BONDING;
489 		}
490 	} else if (chan->psm == cpu_to_le16(0x0001)) {
491 		if (chan->sec_level == BT_SECURITY_LOW)
492 			chan->sec_level = BT_SECURITY_SDP;
493 
494 		if (chan->sec_level == BT_SECURITY_HIGH)
495 			return HCI_AT_NO_BONDING_MITM;
496 		else
497 			return HCI_AT_NO_BONDING;
498 	} else {
499 		switch (chan->sec_level) {
500 		case BT_SECURITY_HIGH:
501 			return HCI_AT_GENERAL_BONDING_MITM;
502 		case BT_SECURITY_MEDIUM:
503 			return HCI_AT_GENERAL_BONDING;
504 		default:
505 			return HCI_AT_NO_BONDING;
506 		}
507 	}
508 }
509 
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
512 {
513 	struct l2cap_conn *conn = chan->conn;
514 	__u8 auth_type;
515 
516 	auth_type = l2cap_get_auth_type(chan);
517 
518 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
519 }
520 
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
522 {
523 	u8 id;
524 
525 	/* Get next available identificator.
526 	 *    1 - 128 are used by kernel.
527 	 *  129 - 199 are reserved.
528 	 *  200 - 254 are used by utilities like l2ping, etc.
529 	 */
530 
531 	spin_lock_bh(&conn->lock);
532 
533 	if (++conn->tx_ident > 128)
534 		conn->tx_ident = 1;
535 
536 	id = conn->tx_ident;
537 
538 	spin_unlock_bh(&conn->lock);
539 
540 	return id;
541 }
542 
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
544 {
545 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
546 	u8 flags;
547 
548 	BT_DBG("code 0x%2.2x", code);
549 
550 	if (!skb)
551 		return;
552 
553 	if (lmp_no_flush_capable(conn->hcon->hdev))
554 		flags = ACL_START_NO_FLUSH;
555 	else
556 		flags = ACL_START;
557 
558 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
559 
560 	hci_send_acl(conn->hcon, skb, flags);
561 }
562 
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
564 {
565 	struct sk_buff *skb;
566 	struct l2cap_hdr *lh;
567 	struct l2cap_conn *conn = chan->conn;
568 	int count, hlen = L2CAP_HDR_SIZE + 2;
569 	u8 flags;
570 
571 	if (chan->state != BT_CONNECTED)
572 		return;
573 
574 	if (chan->fcs == L2CAP_FCS_CRC16)
575 		hlen += 2;
576 
577 	BT_DBG("chan %p, control 0x%2.2x", chan, control);
578 
579 	count = min_t(unsigned int, conn->mtu, hlen);
580 	control |= L2CAP_CTRL_FRAME_TYPE;
581 
582 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 		control |= L2CAP_CTRL_FINAL;
584 
585 	if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 		control |= L2CAP_CTRL_POLL;
587 
588 	skb = bt_skb_alloc(count, GFP_ATOMIC);
589 	if (!skb)
590 		return;
591 
592 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 	lh->cid = cpu_to_le16(chan->dcid);
595 	put_unaligned_le16(control, skb_put(skb, 2));
596 
597 	if (chan->fcs == L2CAP_FCS_CRC16) {
598 		u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 		put_unaligned_le16(fcs, skb_put(skb, 2));
600 	}
601 
602 	if (lmp_no_flush_capable(conn->hcon->hdev))
603 		flags = ACL_START_NO_FLUSH;
604 	else
605 		flags = ACL_START;
606 
607 	bt_cb(skb)->force_active = chan->force_active;
608 
609 	hci_send_acl(chan->conn->hcon, skb, flags);
610 }
611 
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
613 {
614 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 		control |= L2CAP_SUPER_RCV_NOT_READY;
616 		set_bit(CONN_RNR_SENT, &chan->conn_state);
617 	} else
618 		control |= L2CAP_SUPER_RCV_READY;
619 
620 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
621 
622 	l2cap_send_sframe(chan, control);
623 }
624 
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
626 {
627 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
628 }
629 
630 static void l2cap_do_start(struct l2cap_chan *chan)
631 {
632 	struct l2cap_conn *conn = chan->conn;
633 
634 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
636 			return;
637 
638 		if (l2cap_check_security(chan) &&
639 				__l2cap_no_conn_pending(chan)) {
640 			struct l2cap_conn_req req;
641 			req.scid = cpu_to_le16(chan->scid);
642 			req.psm  = chan->psm;
643 
644 			chan->ident = l2cap_get_ident(conn);
645 			set_bit(CONF_CONNECT_PEND, &chan->conf_state);
646 
647 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
648 							sizeof(req), &req);
649 		}
650 	} else {
651 		struct l2cap_info_req req;
652 		req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
653 
654 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 		conn->info_ident = l2cap_get_ident(conn);
656 
657 		mod_timer(&conn->info_timer, jiffies +
658 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
659 
660 		l2cap_send_cmd(conn, conn->info_ident,
661 					L2CAP_INFO_REQ, sizeof(req), &req);
662 	}
663 }
664 
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
666 {
667 	u32 local_feat_mask = l2cap_feat_mask;
668 	if (!disable_ertm)
669 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
670 
671 	switch (mode) {
672 	case L2CAP_MODE_ERTM:
673 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 	case L2CAP_MODE_STREAMING:
675 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
676 	default:
677 		return 0x00;
678 	}
679 }
680 
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
682 {
683 	struct sock *sk;
684 	struct l2cap_disconn_req req;
685 
686 	if (!conn)
687 		return;
688 
689 	sk = chan->sk;
690 
691 	if (chan->mode == L2CAP_MODE_ERTM) {
692 		__clear_retrans_timer(chan);
693 		__clear_monitor_timer(chan);
694 		__clear_ack_timer(chan);
695 	}
696 
697 	req.dcid = cpu_to_le16(chan->dcid);
698 	req.scid = cpu_to_le16(chan->scid);
699 	l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 			L2CAP_DISCONN_REQ, sizeof(req), &req);
701 
702 	l2cap_state_change(chan, BT_DISCONN);
703 	sk->sk_err = err;
704 }
705 
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
708 {
709 	struct l2cap_chan *chan, *tmp;
710 
711 	BT_DBG("conn %p", conn);
712 
713 	read_lock(&conn->chan_lock);
714 
715 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 		struct sock *sk = chan->sk;
717 
718 		bh_lock_sock(sk);
719 
720 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
721 			bh_unlock_sock(sk);
722 			continue;
723 		}
724 
725 		if (chan->state == BT_CONNECT) {
726 			struct l2cap_conn_req req;
727 
728 			if (!l2cap_check_security(chan) ||
729 					!__l2cap_no_conn_pending(chan)) {
730 				bh_unlock_sock(sk);
731 				continue;
732 			}
733 
734 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 					&& test_bit(CONF_STATE2_DEVICE,
736 					&chan->conf_state)) {
737 				/* l2cap_chan_close() calls list_del(chan)
738 				 * so release the lock */
739 				read_unlock(&conn->chan_lock);
740 				l2cap_chan_close(chan, ECONNRESET);
741 				read_lock(&conn->chan_lock);
742 				bh_unlock_sock(sk);
743 				continue;
744 			}
745 
746 			req.scid = cpu_to_le16(chan->scid);
747 			req.psm  = chan->psm;
748 
749 			chan->ident = l2cap_get_ident(conn);
750 			set_bit(CONF_CONNECT_PEND, &chan->conf_state);
751 
752 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
753 							sizeof(req), &req);
754 
755 		} else if (chan->state == BT_CONNECT2) {
756 			struct l2cap_conn_rsp rsp;
757 			char buf[128];
758 			rsp.scid = cpu_to_le16(chan->dcid);
759 			rsp.dcid = cpu_to_le16(chan->scid);
760 
761 			if (l2cap_check_security(chan)) {
762 				if (bt_sk(sk)->defer_setup) {
763 					struct sock *parent = bt_sk(sk)->parent;
764 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
766 					if (parent)
767 						parent->sk_data_ready(parent, 0);
768 
769 				} else {
770 					l2cap_state_change(chan, BT_CONFIG);
771 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
773 				}
774 			} else {
775 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
777 			}
778 
779 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
780 							sizeof(rsp), &rsp);
781 
782 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 					rsp.result != L2CAP_CR_SUCCESS) {
784 				bh_unlock_sock(sk);
785 				continue;
786 			}
787 
788 			set_bit(CONF_REQ_SENT, &chan->conf_state);
789 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 						l2cap_build_conf_req(chan, buf), buf);
791 			chan->num_conf_req++;
792 		}
793 
794 		bh_unlock_sock(sk);
795 	}
796 
797 	read_unlock(&conn->chan_lock);
798 }
799 
800 /* Find socket with cid and source bdaddr.
801  * Returns closest match, locked.
802  */
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
804 {
805 	struct l2cap_chan *c, *c1 = NULL;
806 
807 	read_lock(&chan_list_lock);
808 
809 	list_for_each_entry(c, &chan_list, global_l) {
810 		struct sock *sk = c->sk;
811 
812 		if (state && c->state != state)
813 			continue;
814 
815 		if (c->scid == cid) {
816 			/* Exact match. */
817 			if (!bacmp(&bt_sk(sk)->src, src)) {
818 				read_unlock(&chan_list_lock);
819 				return c;
820 			}
821 
822 			/* Closest match */
823 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
824 				c1 = c;
825 		}
826 	}
827 
828 	read_unlock(&chan_list_lock);
829 
830 	return c1;
831 }
832 
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
834 {
835 	struct sock *parent, *sk;
836 	struct l2cap_chan *chan, *pchan;
837 
838 	BT_DBG("");
839 
840 	/* Check if we have socket listening on cid */
841 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
842 							conn->src);
843 	if (!pchan)
844 		return;
845 
846 	parent = pchan->sk;
847 
848 	bh_lock_sock(parent);
849 
850 	/* Check for backlog size */
851 	if (sk_acceptq_is_full(parent)) {
852 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
853 		goto clean;
854 	}
855 
856 	chan = pchan->ops->new_connection(pchan->data);
857 	if (!chan)
858 		goto clean;
859 
860 	sk = chan->sk;
861 
862 	write_lock_bh(&conn->chan_lock);
863 
864 	hci_conn_hold(conn->hcon);
865 
866 	bacpy(&bt_sk(sk)->src, conn->src);
867 	bacpy(&bt_sk(sk)->dst, conn->dst);
868 
869 	bt_accept_enqueue(parent, sk);
870 
871 	__l2cap_chan_add(conn, chan);
872 
873 	__set_chan_timer(chan, sk->sk_sndtimeo);
874 
875 	l2cap_state_change(chan, BT_CONNECTED);
876 	parent->sk_data_ready(parent, 0);
877 
878 	write_unlock_bh(&conn->chan_lock);
879 
880 clean:
881 	bh_unlock_sock(parent);
882 }
883 
884 static void l2cap_chan_ready(struct sock *sk)
885 {
886 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 	struct sock *parent = bt_sk(sk)->parent;
888 
889 	BT_DBG("sk %p, parent %p", sk, parent);
890 
891 	chan->conf_state = 0;
892 	__clear_chan_timer(chan);
893 
894 	l2cap_state_change(chan, BT_CONNECTED);
895 	sk->sk_state_change(sk);
896 
897 	if (parent)
898 		parent->sk_data_ready(parent, 0);
899 }
900 
901 static void l2cap_conn_ready(struct l2cap_conn *conn)
902 {
903 	struct l2cap_chan *chan;
904 
905 	BT_DBG("conn %p", conn);
906 
907 	if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 		l2cap_le_conn_ready(conn);
909 
910 	if (conn->hcon->out && conn->hcon->type == LE_LINK)
911 		smp_conn_security(conn, conn->hcon->pending_sec_level);
912 
913 	read_lock(&conn->chan_lock);
914 
915 	list_for_each_entry(chan, &conn->chan_l, list) {
916 		struct sock *sk = chan->sk;
917 
918 		bh_lock_sock(sk);
919 
920 		if (conn->hcon->type == LE_LINK) {
921 			if (smp_conn_security(conn, chan->sec_level))
922 				l2cap_chan_ready(sk);
923 
924 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
925 			__clear_chan_timer(chan);
926 			l2cap_state_change(chan, BT_CONNECTED);
927 			sk->sk_state_change(sk);
928 
929 		} else if (chan->state == BT_CONNECT)
930 			l2cap_do_start(chan);
931 
932 		bh_unlock_sock(sk);
933 	}
934 
935 	read_unlock(&conn->chan_lock);
936 }
937 
938 /* Notify sockets that we cannot guaranty reliability anymore */
939 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
940 {
941 	struct l2cap_chan *chan;
942 
943 	BT_DBG("conn %p", conn);
944 
945 	read_lock(&conn->chan_lock);
946 
947 	list_for_each_entry(chan, &conn->chan_l, list) {
948 		struct sock *sk = chan->sk;
949 
950 		if (chan->force_reliable)
951 			sk->sk_err = err;
952 	}
953 
954 	read_unlock(&conn->chan_lock);
955 }
956 
957 static void l2cap_info_timeout(unsigned long arg)
958 {
959 	struct l2cap_conn *conn = (void *) arg;
960 
961 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
962 	conn->info_ident = 0;
963 
964 	l2cap_conn_start(conn);
965 }
966 
967 static void l2cap_conn_del(struct hci_conn *hcon, int err)
968 {
969 	struct l2cap_conn *conn = hcon->l2cap_data;
970 	struct l2cap_chan *chan, *l;
971 	struct sock *sk;
972 
973 	if (!conn)
974 		return;
975 
976 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
977 
978 	kfree_skb(conn->rx_skb);
979 
980 	/* Kill channels */
981 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
982 		sk = chan->sk;
983 		bh_lock_sock(sk);
984 		l2cap_chan_del(chan, err);
985 		bh_unlock_sock(sk);
986 		chan->ops->close(chan->data);
987 	}
988 
989 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
990 		del_timer_sync(&conn->info_timer);
991 
992 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
993 		del_timer(&conn->security_timer);
994 		smp_chan_destroy(conn);
995 	}
996 
997 	hcon->l2cap_data = NULL;
998 	kfree(conn);
999 }
1000 
1001 static void security_timeout(unsigned long arg)
1002 {
1003 	struct l2cap_conn *conn = (void *) arg;
1004 
1005 	l2cap_conn_del(conn->hcon, ETIMEDOUT);
1006 }
1007 
1008 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1009 {
1010 	struct l2cap_conn *conn = hcon->l2cap_data;
1011 
1012 	if (conn || status)
1013 		return conn;
1014 
1015 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1016 	if (!conn)
1017 		return NULL;
1018 
1019 	hcon->l2cap_data = conn;
1020 	conn->hcon = hcon;
1021 
1022 	BT_DBG("hcon %p conn %p", hcon, conn);
1023 
1024 	if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1025 		conn->mtu = hcon->hdev->le_mtu;
1026 	else
1027 		conn->mtu = hcon->hdev->acl_mtu;
1028 
1029 	conn->src = &hcon->hdev->bdaddr;
1030 	conn->dst = &hcon->dst;
1031 
1032 	conn->feat_mask = 0;
1033 
1034 	spin_lock_init(&conn->lock);
1035 	rwlock_init(&conn->chan_lock);
1036 
1037 	INIT_LIST_HEAD(&conn->chan_l);
1038 
1039 	if (hcon->type == LE_LINK)
1040 		setup_timer(&conn->security_timer, security_timeout,
1041 						(unsigned long) conn);
1042 	else
1043 		setup_timer(&conn->info_timer, l2cap_info_timeout,
1044 						(unsigned long) conn);
1045 
1046 	conn->disc_reason = 0x13;
1047 
1048 	return conn;
1049 }
1050 
1051 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1052 {
1053 	write_lock_bh(&conn->chan_lock);
1054 	__l2cap_chan_add(conn, chan);
1055 	write_unlock_bh(&conn->chan_lock);
1056 }
1057 
1058 /* ---- Socket interface ---- */
1059 
1060 /* Find socket with psm and source bdaddr.
1061  * Returns closest match.
1062  */
1063 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1064 {
1065 	struct l2cap_chan *c, *c1 = NULL;
1066 
1067 	read_lock(&chan_list_lock);
1068 
1069 	list_for_each_entry(c, &chan_list, global_l) {
1070 		struct sock *sk = c->sk;
1071 
1072 		if (state && c->state != state)
1073 			continue;
1074 
1075 		if (c->psm == psm) {
1076 			/* Exact match. */
1077 			if (!bacmp(&bt_sk(sk)->src, src)) {
1078 				read_unlock(&chan_list_lock);
1079 				return c;
1080 			}
1081 
1082 			/* Closest match */
1083 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1084 				c1 = c;
1085 		}
1086 	}
1087 
1088 	read_unlock(&chan_list_lock);
1089 
1090 	return c1;
1091 }
1092 
1093 int l2cap_chan_connect(struct l2cap_chan *chan)
1094 {
1095 	struct sock *sk = chan->sk;
1096 	bdaddr_t *src = &bt_sk(sk)->src;
1097 	bdaddr_t *dst = &bt_sk(sk)->dst;
1098 	struct l2cap_conn *conn;
1099 	struct hci_conn *hcon;
1100 	struct hci_dev *hdev;
1101 	__u8 auth_type;
1102 	int err;
1103 
1104 	BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1105 							chan->psm);
1106 
1107 	hdev = hci_get_route(dst, src);
1108 	if (!hdev)
1109 		return -EHOSTUNREACH;
1110 
1111 	hci_dev_lock_bh(hdev);
1112 
1113 	auth_type = l2cap_get_auth_type(chan);
1114 
1115 	if (chan->dcid == L2CAP_CID_LE_DATA)
1116 		hcon = hci_connect(hdev, LE_LINK, dst,
1117 					chan->sec_level, auth_type);
1118 	else
1119 		hcon = hci_connect(hdev, ACL_LINK, dst,
1120 					chan->sec_level, auth_type);
1121 
1122 	if (IS_ERR(hcon)) {
1123 		err = PTR_ERR(hcon);
1124 		goto done;
1125 	}
1126 
1127 	conn = l2cap_conn_add(hcon, 0);
1128 	if (!conn) {
1129 		hci_conn_put(hcon);
1130 		err = -ENOMEM;
1131 		goto done;
1132 	}
1133 
1134 	/* Update source addr of the socket */
1135 	bacpy(src, conn->src);
1136 
1137 	l2cap_chan_add(conn, chan);
1138 
1139 	l2cap_state_change(chan, BT_CONNECT);
1140 	__set_chan_timer(chan, sk->sk_sndtimeo);
1141 
1142 	if (hcon->state == BT_CONNECTED) {
1143 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1144 			__clear_chan_timer(chan);
1145 			if (l2cap_check_security(chan))
1146 				l2cap_state_change(chan, BT_CONNECTED);
1147 		} else
1148 			l2cap_do_start(chan);
1149 	}
1150 
1151 	err = 0;
1152 
1153 done:
1154 	hci_dev_unlock_bh(hdev);
1155 	hci_dev_put(hdev);
1156 	return err;
1157 }
1158 
1159 int __l2cap_wait_ack(struct sock *sk)
1160 {
1161 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1162 	DECLARE_WAITQUEUE(wait, current);
1163 	int err = 0;
1164 	int timeo = HZ/5;
1165 
1166 	add_wait_queue(sk_sleep(sk), &wait);
1167 	set_current_state(TASK_INTERRUPTIBLE);
1168 	while (chan->unacked_frames > 0 && chan->conn) {
1169 		if (!timeo)
1170 			timeo = HZ/5;
1171 
1172 		if (signal_pending(current)) {
1173 			err = sock_intr_errno(timeo);
1174 			break;
1175 		}
1176 
1177 		release_sock(sk);
1178 		timeo = schedule_timeout(timeo);
1179 		lock_sock(sk);
1180 		set_current_state(TASK_INTERRUPTIBLE);
1181 
1182 		err = sock_error(sk);
1183 		if (err)
1184 			break;
1185 	}
1186 	set_current_state(TASK_RUNNING);
1187 	remove_wait_queue(sk_sleep(sk), &wait);
1188 	return err;
1189 }
1190 
1191 static void l2cap_monitor_timeout(unsigned long arg)
1192 {
1193 	struct l2cap_chan *chan = (void *) arg;
1194 	struct sock *sk = chan->sk;
1195 
1196 	BT_DBG("chan %p", chan);
1197 
1198 	bh_lock_sock(sk);
1199 	if (chan->retry_count >= chan->remote_max_tx) {
1200 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1201 		bh_unlock_sock(sk);
1202 		return;
1203 	}
1204 
1205 	chan->retry_count++;
1206 	__set_monitor_timer(chan);
1207 
1208 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1209 	bh_unlock_sock(sk);
1210 }
1211 
1212 static void l2cap_retrans_timeout(unsigned long arg)
1213 {
1214 	struct l2cap_chan *chan = (void *) arg;
1215 	struct sock *sk = chan->sk;
1216 
1217 	BT_DBG("chan %p", chan);
1218 
1219 	bh_lock_sock(sk);
1220 	chan->retry_count = 1;
1221 	__set_monitor_timer(chan);
1222 
1223 	set_bit(CONN_WAIT_F, &chan->conn_state);
1224 
1225 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1226 	bh_unlock_sock(sk);
1227 }
1228 
1229 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1230 {
1231 	struct sk_buff *skb;
1232 
1233 	while ((skb = skb_peek(&chan->tx_q)) &&
1234 			chan->unacked_frames) {
1235 		if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1236 			break;
1237 
1238 		skb = skb_dequeue(&chan->tx_q);
1239 		kfree_skb(skb);
1240 
1241 		chan->unacked_frames--;
1242 	}
1243 
1244 	if (!chan->unacked_frames)
1245 		__clear_retrans_timer(chan);
1246 }
1247 
1248 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1249 {
1250 	struct hci_conn *hcon = chan->conn->hcon;
1251 	u16 flags;
1252 
1253 	BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1254 
1255 	if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1256 		flags = ACL_START_NO_FLUSH;
1257 	else
1258 		flags = ACL_START;
1259 
1260 	bt_cb(skb)->force_active = chan->force_active;
1261 	hci_send_acl(hcon, skb, flags);
1262 }
1263 
1264 static void l2cap_streaming_send(struct l2cap_chan *chan)
1265 {
1266 	struct sk_buff *skb;
1267 	u16 control, fcs;
1268 
1269 	while ((skb = skb_dequeue(&chan->tx_q))) {
1270 		control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1271 		control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1272 		put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1273 
1274 		if (chan->fcs == L2CAP_FCS_CRC16) {
1275 			fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1276 			put_unaligned_le16(fcs, skb->data + skb->len - 2);
1277 		}
1278 
1279 		l2cap_do_send(chan, skb);
1280 
1281 		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1282 	}
1283 }
1284 
1285 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1286 {
1287 	struct sk_buff *skb, *tx_skb;
1288 	u16 control, fcs;
1289 
1290 	skb = skb_peek(&chan->tx_q);
1291 	if (!skb)
1292 		return;
1293 
1294 	do {
1295 		if (bt_cb(skb)->tx_seq == tx_seq)
1296 			break;
1297 
1298 		if (skb_queue_is_last(&chan->tx_q, skb))
1299 			return;
1300 
1301 	} while ((skb = skb_queue_next(&chan->tx_q, skb)));
1302 
1303 	if (chan->remote_max_tx &&
1304 			bt_cb(skb)->retries == chan->remote_max_tx) {
1305 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1306 		return;
1307 	}
1308 
1309 	tx_skb = skb_clone(skb, GFP_ATOMIC);
1310 	bt_cb(skb)->retries++;
1311 	control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1312 	control &= L2CAP_CTRL_SAR;
1313 
1314 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1315 		control |= L2CAP_CTRL_FINAL;
1316 
1317 	control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1318 			| (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1319 
1320 	put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1321 
1322 	if (chan->fcs == L2CAP_FCS_CRC16) {
1323 		fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1324 		put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1325 	}
1326 
1327 	l2cap_do_send(chan, tx_skb);
1328 }
1329 
1330 static int l2cap_ertm_send(struct l2cap_chan *chan)
1331 {
1332 	struct sk_buff *skb, *tx_skb;
1333 	u16 control, fcs;
1334 	int nsent = 0;
1335 
1336 	if (chan->state != BT_CONNECTED)
1337 		return -ENOTCONN;
1338 
1339 	while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1340 
1341 		if (chan->remote_max_tx &&
1342 				bt_cb(skb)->retries == chan->remote_max_tx) {
1343 			l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1344 			break;
1345 		}
1346 
1347 		tx_skb = skb_clone(skb, GFP_ATOMIC);
1348 
1349 		bt_cb(skb)->retries++;
1350 
1351 		control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1352 		control &= L2CAP_CTRL_SAR;
1353 
1354 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1355 			control |= L2CAP_CTRL_FINAL;
1356 
1357 		control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1358 				| (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1359 		put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1360 
1361 
1362 		if (chan->fcs == L2CAP_FCS_CRC16) {
1363 			fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1364 			put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1365 		}
1366 
1367 		l2cap_do_send(chan, tx_skb);
1368 
1369 		__set_retrans_timer(chan);
1370 
1371 		bt_cb(skb)->tx_seq = chan->next_tx_seq;
1372 		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1373 
1374 		if (bt_cb(skb)->retries == 1)
1375 			chan->unacked_frames++;
1376 
1377 		chan->frames_sent++;
1378 
1379 		if (skb_queue_is_last(&chan->tx_q, skb))
1380 			chan->tx_send_head = NULL;
1381 		else
1382 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1383 
1384 		nsent++;
1385 	}
1386 
1387 	return nsent;
1388 }
1389 
1390 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1391 {
1392 	int ret;
1393 
1394 	if (!skb_queue_empty(&chan->tx_q))
1395 		chan->tx_send_head = chan->tx_q.next;
1396 
1397 	chan->next_tx_seq = chan->expected_ack_seq;
1398 	ret = l2cap_ertm_send(chan);
1399 	return ret;
1400 }
1401 
1402 static void l2cap_send_ack(struct l2cap_chan *chan)
1403 {
1404 	u16 control = 0;
1405 
1406 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1407 
1408 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1409 		control |= L2CAP_SUPER_RCV_NOT_READY;
1410 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1411 		l2cap_send_sframe(chan, control);
1412 		return;
1413 	}
1414 
1415 	if (l2cap_ertm_send(chan) > 0)
1416 		return;
1417 
1418 	control |= L2CAP_SUPER_RCV_READY;
1419 	l2cap_send_sframe(chan, control);
1420 }
1421 
1422 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1423 {
1424 	struct srej_list *tail;
1425 	u16 control;
1426 
1427 	control = L2CAP_SUPER_SELECT_REJECT;
1428 	control |= L2CAP_CTRL_FINAL;
1429 
1430 	tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1431 	control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1432 
1433 	l2cap_send_sframe(chan, control);
1434 }
1435 
1436 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1437 {
1438 	struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1439 	struct sk_buff **frag;
1440 	int err, sent = 0;
1441 
1442 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1443 		return -EFAULT;
1444 
1445 	sent += count;
1446 	len  -= count;
1447 
1448 	/* Continuation fragments (no L2CAP header) */
1449 	frag = &skb_shinfo(skb)->frag_list;
1450 	while (len) {
1451 		count = min_t(unsigned int, conn->mtu, len);
1452 
1453 		*frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1454 		if (!*frag)
1455 			return err;
1456 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1457 			return -EFAULT;
1458 
1459 		sent += count;
1460 		len  -= count;
1461 
1462 		frag = &(*frag)->next;
1463 	}
1464 
1465 	return sent;
1466 }
1467 
1468 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1469 {
1470 	struct sock *sk = chan->sk;
1471 	struct l2cap_conn *conn = chan->conn;
1472 	struct sk_buff *skb;
1473 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1474 	struct l2cap_hdr *lh;
1475 
1476 	BT_DBG("sk %p len %d", sk, (int)len);
1477 
1478 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1479 	skb = bt_skb_send_alloc(sk, count + hlen,
1480 			msg->msg_flags & MSG_DONTWAIT, &err);
1481 	if (!skb)
1482 		return ERR_PTR(err);
1483 
1484 	/* Create L2CAP header */
1485 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1486 	lh->cid = cpu_to_le16(chan->dcid);
1487 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1488 	put_unaligned_le16(chan->psm, skb_put(skb, 2));
1489 
1490 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1491 	if (unlikely(err < 0)) {
1492 		kfree_skb(skb);
1493 		return ERR_PTR(err);
1494 	}
1495 	return skb;
1496 }
1497 
1498 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1499 {
1500 	struct sock *sk = chan->sk;
1501 	struct l2cap_conn *conn = chan->conn;
1502 	struct sk_buff *skb;
1503 	int err, count, hlen = L2CAP_HDR_SIZE;
1504 	struct l2cap_hdr *lh;
1505 
1506 	BT_DBG("sk %p len %d", sk, (int)len);
1507 
1508 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1509 	skb = bt_skb_send_alloc(sk, count + hlen,
1510 			msg->msg_flags & MSG_DONTWAIT, &err);
1511 	if (!skb)
1512 		return ERR_PTR(err);
1513 
1514 	/* Create L2CAP header */
1515 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1516 	lh->cid = cpu_to_le16(chan->dcid);
1517 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1518 
1519 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1520 	if (unlikely(err < 0)) {
1521 		kfree_skb(skb);
1522 		return ERR_PTR(err);
1523 	}
1524 	return skb;
1525 }
1526 
1527 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1528 						struct msghdr *msg, size_t len,
1529 						u16 control, u16 sdulen)
1530 {
1531 	struct sock *sk = chan->sk;
1532 	struct l2cap_conn *conn = chan->conn;
1533 	struct sk_buff *skb;
1534 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1535 	struct l2cap_hdr *lh;
1536 
1537 	BT_DBG("sk %p len %d", sk, (int)len);
1538 
1539 	if (!conn)
1540 		return ERR_PTR(-ENOTCONN);
1541 
1542 	if (sdulen)
1543 		hlen += 2;
1544 
1545 	if (chan->fcs == L2CAP_FCS_CRC16)
1546 		hlen += 2;
1547 
1548 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 	skb = bt_skb_send_alloc(sk, count + hlen,
1550 			msg->msg_flags & MSG_DONTWAIT, &err);
1551 	if (!skb)
1552 		return ERR_PTR(err);
1553 
1554 	/* Create L2CAP header */
1555 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1556 	lh->cid = cpu_to_le16(chan->dcid);
1557 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1558 	put_unaligned_le16(control, skb_put(skb, 2));
1559 	if (sdulen)
1560 		put_unaligned_le16(sdulen, skb_put(skb, 2));
1561 
1562 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1563 	if (unlikely(err < 0)) {
1564 		kfree_skb(skb);
1565 		return ERR_PTR(err);
1566 	}
1567 
1568 	if (chan->fcs == L2CAP_FCS_CRC16)
1569 		put_unaligned_le16(0, skb_put(skb, 2));
1570 
1571 	bt_cb(skb)->retries = 0;
1572 	return skb;
1573 }
1574 
1575 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1576 {
1577 	struct sk_buff *skb;
1578 	struct sk_buff_head sar_queue;
1579 	u16 control;
1580 	size_t size = 0;
1581 
1582 	skb_queue_head_init(&sar_queue);
1583 	control = L2CAP_SDU_START;
1584 	skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1585 	if (IS_ERR(skb))
1586 		return PTR_ERR(skb);
1587 
1588 	__skb_queue_tail(&sar_queue, skb);
1589 	len -= chan->remote_mps;
1590 	size += chan->remote_mps;
1591 
1592 	while (len > 0) {
1593 		size_t buflen;
1594 
1595 		if (len > chan->remote_mps) {
1596 			control = L2CAP_SDU_CONTINUE;
1597 			buflen = chan->remote_mps;
1598 		} else {
1599 			control = L2CAP_SDU_END;
1600 			buflen = len;
1601 		}
1602 
1603 		skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1604 		if (IS_ERR(skb)) {
1605 			skb_queue_purge(&sar_queue);
1606 			return PTR_ERR(skb);
1607 		}
1608 
1609 		__skb_queue_tail(&sar_queue, skb);
1610 		len -= buflen;
1611 		size += buflen;
1612 	}
1613 	skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1614 	if (chan->tx_send_head == NULL)
1615 		chan->tx_send_head = sar_queue.next;
1616 
1617 	return size;
1618 }
1619 
1620 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1621 {
1622 	struct sk_buff *skb;
1623 	u16 control;
1624 	int err;
1625 
1626 	/* Connectionless channel */
1627 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1628 		skb = l2cap_create_connless_pdu(chan, msg, len);
1629 		if (IS_ERR(skb))
1630 			return PTR_ERR(skb);
1631 
1632 		l2cap_do_send(chan, skb);
1633 		return len;
1634 	}
1635 
1636 	switch (chan->mode) {
1637 	case L2CAP_MODE_BASIC:
1638 		/* Check outgoing MTU */
1639 		if (len > chan->omtu)
1640 			return -EMSGSIZE;
1641 
1642 		/* Create a basic PDU */
1643 		skb = l2cap_create_basic_pdu(chan, msg, len);
1644 		if (IS_ERR(skb))
1645 			return PTR_ERR(skb);
1646 
1647 		l2cap_do_send(chan, skb);
1648 		err = len;
1649 		break;
1650 
1651 	case L2CAP_MODE_ERTM:
1652 	case L2CAP_MODE_STREAMING:
1653 		/* Entire SDU fits into one PDU */
1654 		if (len <= chan->remote_mps) {
1655 			control = L2CAP_SDU_UNSEGMENTED;
1656 			skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1657 									0);
1658 			if (IS_ERR(skb))
1659 				return PTR_ERR(skb);
1660 
1661 			__skb_queue_tail(&chan->tx_q, skb);
1662 
1663 			if (chan->tx_send_head == NULL)
1664 				chan->tx_send_head = skb;
1665 
1666 		} else {
1667 			/* Segment SDU into multiples PDUs */
1668 			err = l2cap_sar_segment_sdu(chan, msg, len);
1669 			if (err < 0)
1670 				return err;
1671 		}
1672 
1673 		if (chan->mode == L2CAP_MODE_STREAMING) {
1674 			l2cap_streaming_send(chan);
1675 			err = len;
1676 			break;
1677 		}
1678 
1679 		if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1680 				test_bit(CONN_WAIT_F, &chan->conn_state)) {
1681 			err = len;
1682 			break;
1683 		}
1684 
1685 		err = l2cap_ertm_send(chan);
1686 		if (err >= 0)
1687 			err = len;
1688 
1689 		break;
1690 
1691 	default:
1692 		BT_DBG("bad state %1.1x", chan->mode);
1693 		err = -EBADFD;
1694 	}
1695 
1696 	return err;
1697 }
1698 
1699 /* Copy frame to all raw sockets on that connection */
1700 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1701 {
1702 	struct sk_buff *nskb;
1703 	struct l2cap_chan *chan;
1704 
1705 	BT_DBG("conn %p", conn);
1706 
1707 	read_lock(&conn->chan_lock);
1708 	list_for_each_entry(chan, &conn->chan_l, list) {
1709 		struct sock *sk = chan->sk;
1710 		if (chan->chan_type != L2CAP_CHAN_RAW)
1711 			continue;
1712 
1713 		/* Don't send frame to the socket it came from */
1714 		if (skb->sk == sk)
1715 			continue;
1716 		nskb = skb_clone(skb, GFP_ATOMIC);
1717 		if (!nskb)
1718 			continue;
1719 
1720 		if (chan->ops->recv(chan->data, nskb))
1721 			kfree_skb(nskb);
1722 	}
1723 	read_unlock(&conn->chan_lock);
1724 }
1725 
1726 /* ---- L2CAP signalling commands ---- */
1727 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1728 				u8 code, u8 ident, u16 dlen, void *data)
1729 {
1730 	struct sk_buff *skb, **frag;
1731 	struct l2cap_cmd_hdr *cmd;
1732 	struct l2cap_hdr *lh;
1733 	int len, count;
1734 
1735 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1736 			conn, code, ident, dlen);
1737 
1738 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1739 	count = min_t(unsigned int, conn->mtu, len);
1740 
1741 	skb = bt_skb_alloc(count, GFP_ATOMIC);
1742 	if (!skb)
1743 		return NULL;
1744 
1745 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1746 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1747 
1748 	if (conn->hcon->type == LE_LINK)
1749 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1750 	else
1751 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1752 
1753 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1754 	cmd->code  = code;
1755 	cmd->ident = ident;
1756 	cmd->len   = cpu_to_le16(dlen);
1757 
1758 	if (dlen) {
1759 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1760 		memcpy(skb_put(skb, count), data, count);
1761 		data += count;
1762 	}
1763 
1764 	len -= skb->len;
1765 
1766 	/* Continuation fragments (no L2CAP header) */
1767 	frag = &skb_shinfo(skb)->frag_list;
1768 	while (len) {
1769 		count = min_t(unsigned int, conn->mtu, len);
1770 
1771 		*frag = bt_skb_alloc(count, GFP_ATOMIC);
1772 		if (!*frag)
1773 			goto fail;
1774 
1775 		memcpy(skb_put(*frag, count), data, count);
1776 
1777 		len  -= count;
1778 		data += count;
1779 
1780 		frag = &(*frag)->next;
1781 	}
1782 
1783 	return skb;
1784 
1785 fail:
1786 	kfree_skb(skb);
1787 	return NULL;
1788 }
1789 
1790 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1791 {
1792 	struct l2cap_conf_opt *opt = *ptr;
1793 	int len;
1794 
1795 	len = L2CAP_CONF_OPT_SIZE + opt->len;
1796 	*ptr += len;
1797 
1798 	*type = opt->type;
1799 	*olen = opt->len;
1800 
1801 	switch (opt->len) {
1802 	case 1:
1803 		*val = *((u8 *) opt->val);
1804 		break;
1805 
1806 	case 2:
1807 		*val = get_unaligned_le16(opt->val);
1808 		break;
1809 
1810 	case 4:
1811 		*val = get_unaligned_le32(opt->val);
1812 		break;
1813 
1814 	default:
1815 		*val = (unsigned long) opt->val;
1816 		break;
1817 	}
1818 
1819 	BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1820 	return len;
1821 }
1822 
1823 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1824 {
1825 	struct l2cap_conf_opt *opt = *ptr;
1826 
1827 	BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1828 
1829 	opt->type = type;
1830 	opt->len  = len;
1831 
1832 	switch (len) {
1833 	case 1:
1834 		*((u8 *) opt->val)  = val;
1835 		break;
1836 
1837 	case 2:
1838 		put_unaligned_le16(val, opt->val);
1839 		break;
1840 
1841 	case 4:
1842 		put_unaligned_le32(val, opt->val);
1843 		break;
1844 
1845 	default:
1846 		memcpy(opt->val, (void *) val, len);
1847 		break;
1848 	}
1849 
1850 	*ptr += L2CAP_CONF_OPT_SIZE + len;
1851 }
1852 
1853 static void l2cap_ack_timeout(unsigned long arg)
1854 {
1855 	struct l2cap_chan *chan = (void *) arg;
1856 
1857 	bh_lock_sock(chan->sk);
1858 	l2cap_send_ack(chan);
1859 	bh_unlock_sock(chan->sk);
1860 }
1861 
1862 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1863 {
1864 	struct sock *sk = chan->sk;
1865 
1866 	chan->expected_ack_seq = 0;
1867 	chan->unacked_frames = 0;
1868 	chan->buffer_seq = 0;
1869 	chan->num_acked = 0;
1870 	chan->frames_sent = 0;
1871 
1872 	setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1873 							(unsigned long) chan);
1874 	setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1875 							(unsigned long) chan);
1876 	setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1877 
1878 	skb_queue_head_init(&chan->srej_q);
1879 
1880 	INIT_LIST_HEAD(&chan->srej_l);
1881 
1882 
1883 	sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1884 }
1885 
1886 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1887 {
1888 	switch (mode) {
1889 	case L2CAP_MODE_STREAMING:
1890 	case L2CAP_MODE_ERTM:
1891 		if (l2cap_mode_supported(mode, remote_feat_mask))
1892 			return mode;
1893 		/* fall through */
1894 	default:
1895 		return L2CAP_MODE_BASIC;
1896 	}
1897 }
1898 
1899 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1900 {
1901 	struct l2cap_conf_req *req = data;
1902 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1903 	void *ptr = req->data;
1904 
1905 	BT_DBG("chan %p", chan);
1906 
1907 	if (chan->num_conf_req || chan->num_conf_rsp)
1908 		goto done;
1909 
1910 	switch (chan->mode) {
1911 	case L2CAP_MODE_STREAMING:
1912 	case L2CAP_MODE_ERTM:
1913 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1914 			break;
1915 
1916 		/* fall through */
1917 	default:
1918 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1919 		break;
1920 	}
1921 
1922 done:
1923 	if (chan->imtu != L2CAP_DEFAULT_MTU)
1924 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1925 
1926 	switch (chan->mode) {
1927 	case L2CAP_MODE_BASIC:
1928 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1929 				!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1930 			break;
1931 
1932 		rfc.mode            = L2CAP_MODE_BASIC;
1933 		rfc.txwin_size      = 0;
1934 		rfc.max_transmit    = 0;
1935 		rfc.retrans_timeout = 0;
1936 		rfc.monitor_timeout = 0;
1937 		rfc.max_pdu_size    = 0;
1938 
1939 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1940 							(unsigned long) &rfc);
1941 		break;
1942 
1943 	case L2CAP_MODE_ERTM:
1944 		rfc.mode            = L2CAP_MODE_ERTM;
1945 		rfc.txwin_size      = chan->tx_win;
1946 		rfc.max_transmit    = chan->max_tx;
1947 		rfc.retrans_timeout = 0;
1948 		rfc.monitor_timeout = 0;
1949 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1950 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1951 			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1952 
1953 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1954 							(unsigned long) &rfc);
1955 
1956 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1957 			break;
1958 
1959 		if (chan->fcs == L2CAP_FCS_NONE ||
1960 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1961 			chan->fcs = L2CAP_FCS_NONE;
1962 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1963 		}
1964 		break;
1965 
1966 	case L2CAP_MODE_STREAMING:
1967 		rfc.mode            = L2CAP_MODE_STREAMING;
1968 		rfc.txwin_size      = 0;
1969 		rfc.max_transmit    = 0;
1970 		rfc.retrans_timeout = 0;
1971 		rfc.monitor_timeout = 0;
1972 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1973 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1974 			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1975 
1976 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1977 							(unsigned long) &rfc);
1978 
1979 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1980 			break;
1981 
1982 		if (chan->fcs == L2CAP_FCS_NONE ||
1983 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1984 			chan->fcs = L2CAP_FCS_NONE;
1985 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1986 		}
1987 		break;
1988 	}
1989 
1990 	req->dcid  = cpu_to_le16(chan->dcid);
1991 	req->flags = cpu_to_le16(0);
1992 
1993 	return ptr - data;
1994 }
1995 
1996 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1997 {
1998 	struct l2cap_conf_rsp *rsp = data;
1999 	void *ptr = rsp->data;
2000 	void *req = chan->conf_req;
2001 	int len = chan->conf_len;
2002 	int type, hint, olen;
2003 	unsigned long val;
2004 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2005 	u16 mtu = L2CAP_DEFAULT_MTU;
2006 	u16 result = L2CAP_CONF_SUCCESS;
2007 
2008 	BT_DBG("chan %p", chan);
2009 
2010 	while (len >= L2CAP_CONF_OPT_SIZE) {
2011 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2012 
2013 		hint  = type & L2CAP_CONF_HINT;
2014 		type &= L2CAP_CONF_MASK;
2015 
2016 		switch (type) {
2017 		case L2CAP_CONF_MTU:
2018 			mtu = val;
2019 			break;
2020 
2021 		case L2CAP_CONF_FLUSH_TO:
2022 			chan->flush_to = val;
2023 			break;
2024 
2025 		case L2CAP_CONF_QOS:
2026 			break;
2027 
2028 		case L2CAP_CONF_RFC:
2029 			if (olen == sizeof(rfc))
2030 				memcpy(&rfc, (void *) val, olen);
2031 			break;
2032 
2033 		case L2CAP_CONF_FCS:
2034 			if (val == L2CAP_FCS_NONE)
2035 				set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2036 
2037 			break;
2038 
2039 		default:
2040 			if (hint)
2041 				break;
2042 
2043 			result = L2CAP_CONF_UNKNOWN;
2044 			*((u8 *) ptr++) = type;
2045 			break;
2046 		}
2047 	}
2048 
2049 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
2050 		goto done;
2051 
2052 	switch (chan->mode) {
2053 	case L2CAP_MODE_STREAMING:
2054 	case L2CAP_MODE_ERTM:
2055 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2056 			chan->mode = l2cap_select_mode(rfc.mode,
2057 					chan->conn->feat_mask);
2058 			break;
2059 		}
2060 
2061 		if (chan->mode != rfc.mode)
2062 			return -ECONNREFUSED;
2063 
2064 		break;
2065 	}
2066 
2067 done:
2068 	if (chan->mode != rfc.mode) {
2069 		result = L2CAP_CONF_UNACCEPT;
2070 		rfc.mode = chan->mode;
2071 
2072 		if (chan->num_conf_rsp == 1)
2073 			return -ECONNREFUSED;
2074 
2075 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2076 					sizeof(rfc), (unsigned long) &rfc);
2077 	}
2078 
2079 
2080 	if (result == L2CAP_CONF_SUCCESS) {
2081 		/* Configure output options and let the other side know
2082 		 * which ones we don't like. */
2083 
2084 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
2085 			result = L2CAP_CONF_UNACCEPT;
2086 		else {
2087 			chan->omtu = mtu;
2088 			set_bit(CONF_MTU_DONE, &chan->conf_state);
2089 		}
2090 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2091 
2092 		switch (rfc.mode) {
2093 		case L2CAP_MODE_BASIC:
2094 			chan->fcs = L2CAP_FCS_NONE;
2095 			set_bit(CONF_MODE_DONE, &chan->conf_state);
2096 			break;
2097 
2098 		case L2CAP_MODE_ERTM:
2099 			chan->remote_tx_win = rfc.txwin_size;
2100 			chan->remote_max_tx = rfc.max_transmit;
2101 
2102 			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2103 				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2104 
2105 			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2106 
2107 			rfc.retrans_timeout =
2108 				le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2109 			rfc.monitor_timeout =
2110 				le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2111 
2112 			set_bit(CONF_MODE_DONE, &chan->conf_state);
2113 
2114 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2115 					sizeof(rfc), (unsigned long) &rfc);
2116 
2117 			break;
2118 
2119 		case L2CAP_MODE_STREAMING:
2120 			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2121 				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2122 
2123 			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2124 
2125 			set_bit(CONF_MODE_DONE, &chan->conf_state);
2126 
2127 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2128 					sizeof(rfc), (unsigned long) &rfc);
2129 
2130 			break;
2131 
2132 		default:
2133 			result = L2CAP_CONF_UNACCEPT;
2134 
2135 			memset(&rfc, 0, sizeof(rfc));
2136 			rfc.mode = chan->mode;
2137 		}
2138 
2139 		if (result == L2CAP_CONF_SUCCESS)
2140 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2141 	}
2142 	rsp->scid   = cpu_to_le16(chan->dcid);
2143 	rsp->result = cpu_to_le16(result);
2144 	rsp->flags  = cpu_to_le16(0x0000);
2145 
2146 	return ptr - data;
2147 }
2148 
2149 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2150 {
2151 	struct l2cap_conf_req *req = data;
2152 	void *ptr = req->data;
2153 	int type, olen;
2154 	unsigned long val;
2155 	struct l2cap_conf_rfc rfc;
2156 
2157 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2158 
2159 	while (len >= L2CAP_CONF_OPT_SIZE) {
2160 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2161 
2162 		switch (type) {
2163 		case L2CAP_CONF_MTU:
2164 			if (val < L2CAP_DEFAULT_MIN_MTU) {
2165 				*result = L2CAP_CONF_UNACCEPT;
2166 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2167 			} else
2168 				chan->imtu = val;
2169 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2170 			break;
2171 
2172 		case L2CAP_CONF_FLUSH_TO:
2173 			chan->flush_to = val;
2174 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2175 							2, chan->flush_to);
2176 			break;
2177 
2178 		case L2CAP_CONF_RFC:
2179 			if (olen == sizeof(rfc))
2180 				memcpy(&rfc, (void *)val, olen);
2181 
2182 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2183 							rfc.mode != chan->mode)
2184 				return -ECONNREFUSED;
2185 
2186 			chan->fcs = 0;
2187 
2188 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2189 					sizeof(rfc), (unsigned long) &rfc);
2190 			break;
2191 		}
2192 	}
2193 
2194 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2195 		return -ECONNREFUSED;
2196 
2197 	chan->mode = rfc.mode;
2198 
2199 	if (*result == L2CAP_CONF_SUCCESS) {
2200 		switch (rfc.mode) {
2201 		case L2CAP_MODE_ERTM:
2202 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2203 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2204 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2205 			break;
2206 		case L2CAP_MODE_STREAMING:
2207 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2208 		}
2209 	}
2210 
2211 	req->dcid   = cpu_to_le16(chan->dcid);
2212 	req->flags  = cpu_to_le16(0x0000);
2213 
2214 	return ptr - data;
2215 }
2216 
2217 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2218 {
2219 	struct l2cap_conf_rsp *rsp = data;
2220 	void *ptr = rsp->data;
2221 
2222 	BT_DBG("chan %p", chan);
2223 
2224 	rsp->scid   = cpu_to_le16(chan->dcid);
2225 	rsp->result = cpu_to_le16(result);
2226 	rsp->flags  = cpu_to_le16(flags);
2227 
2228 	return ptr - data;
2229 }
2230 
2231 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2232 {
2233 	struct l2cap_conn_rsp rsp;
2234 	struct l2cap_conn *conn = chan->conn;
2235 	u8 buf[128];
2236 
2237 	rsp.scid   = cpu_to_le16(chan->dcid);
2238 	rsp.dcid   = cpu_to_le16(chan->scid);
2239 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2240 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2241 	l2cap_send_cmd(conn, chan->ident,
2242 				L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2243 
2244 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2245 		return;
2246 
2247 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2248 			l2cap_build_conf_req(chan, buf), buf);
2249 	chan->num_conf_req++;
2250 }
2251 
2252 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2253 {
2254 	int type, olen;
2255 	unsigned long val;
2256 	struct l2cap_conf_rfc rfc;
2257 
2258 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2259 
2260 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2261 		return;
2262 
2263 	while (len >= L2CAP_CONF_OPT_SIZE) {
2264 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2265 
2266 		switch (type) {
2267 		case L2CAP_CONF_RFC:
2268 			if (olen == sizeof(rfc))
2269 				memcpy(&rfc, (void *)val, olen);
2270 			goto done;
2271 		}
2272 	}
2273 
2274 done:
2275 	switch (rfc.mode) {
2276 	case L2CAP_MODE_ERTM:
2277 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2278 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2279 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2280 		break;
2281 	case L2CAP_MODE_STREAMING:
2282 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2283 	}
2284 }
2285 
2286 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2287 {
2288 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2289 
2290 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2291 		return 0;
2292 
2293 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2294 					cmd->ident == conn->info_ident) {
2295 		del_timer(&conn->info_timer);
2296 
2297 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2298 		conn->info_ident = 0;
2299 
2300 		l2cap_conn_start(conn);
2301 	}
2302 
2303 	return 0;
2304 }
2305 
2306 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2307 {
2308 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2309 	struct l2cap_conn_rsp rsp;
2310 	struct l2cap_chan *chan = NULL, *pchan;
2311 	struct sock *parent, *sk = NULL;
2312 	int result, status = L2CAP_CS_NO_INFO;
2313 
2314 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2315 	__le16 psm = req->psm;
2316 
2317 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2318 
2319 	/* Check if we have socket listening on psm */
2320 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2321 	if (!pchan) {
2322 		result = L2CAP_CR_BAD_PSM;
2323 		goto sendresp;
2324 	}
2325 
2326 	parent = pchan->sk;
2327 
2328 	bh_lock_sock(parent);
2329 
2330 	/* Check if the ACL is secure enough (if not SDP) */
2331 	if (psm != cpu_to_le16(0x0001) &&
2332 				!hci_conn_check_link_mode(conn->hcon)) {
2333 		conn->disc_reason = 0x05;
2334 		result = L2CAP_CR_SEC_BLOCK;
2335 		goto response;
2336 	}
2337 
2338 	result = L2CAP_CR_NO_MEM;
2339 
2340 	/* Check for backlog size */
2341 	if (sk_acceptq_is_full(parent)) {
2342 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
2343 		goto response;
2344 	}
2345 
2346 	chan = pchan->ops->new_connection(pchan->data);
2347 	if (!chan)
2348 		goto response;
2349 
2350 	sk = chan->sk;
2351 
2352 	write_lock_bh(&conn->chan_lock);
2353 
2354 	/* Check if we already have channel with that dcid */
2355 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
2356 		write_unlock_bh(&conn->chan_lock);
2357 		sock_set_flag(sk, SOCK_ZAPPED);
2358 		chan->ops->close(chan->data);
2359 		goto response;
2360 	}
2361 
2362 	hci_conn_hold(conn->hcon);
2363 
2364 	bacpy(&bt_sk(sk)->src, conn->src);
2365 	bacpy(&bt_sk(sk)->dst, conn->dst);
2366 	chan->psm  = psm;
2367 	chan->dcid = scid;
2368 
2369 	bt_accept_enqueue(parent, sk);
2370 
2371 	__l2cap_chan_add(conn, chan);
2372 
2373 	dcid = chan->scid;
2374 
2375 	__set_chan_timer(chan, sk->sk_sndtimeo);
2376 
2377 	chan->ident = cmd->ident;
2378 
2379 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2380 		if (l2cap_check_security(chan)) {
2381 			if (bt_sk(sk)->defer_setup) {
2382 				l2cap_state_change(chan, BT_CONNECT2);
2383 				result = L2CAP_CR_PEND;
2384 				status = L2CAP_CS_AUTHOR_PEND;
2385 				parent->sk_data_ready(parent, 0);
2386 			} else {
2387 				l2cap_state_change(chan, BT_CONFIG);
2388 				result = L2CAP_CR_SUCCESS;
2389 				status = L2CAP_CS_NO_INFO;
2390 			}
2391 		} else {
2392 			l2cap_state_change(chan, BT_CONNECT2);
2393 			result = L2CAP_CR_PEND;
2394 			status = L2CAP_CS_AUTHEN_PEND;
2395 		}
2396 	} else {
2397 		l2cap_state_change(chan, BT_CONNECT2);
2398 		result = L2CAP_CR_PEND;
2399 		status = L2CAP_CS_NO_INFO;
2400 	}
2401 
2402 	write_unlock_bh(&conn->chan_lock);
2403 
2404 response:
2405 	bh_unlock_sock(parent);
2406 
2407 sendresp:
2408 	rsp.scid   = cpu_to_le16(scid);
2409 	rsp.dcid   = cpu_to_le16(dcid);
2410 	rsp.result = cpu_to_le16(result);
2411 	rsp.status = cpu_to_le16(status);
2412 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2413 
2414 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2415 		struct l2cap_info_req info;
2416 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2417 
2418 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2419 		conn->info_ident = l2cap_get_ident(conn);
2420 
2421 		mod_timer(&conn->info_timer, jiffies +
2422 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2423 
2424 		l2cap_send_cmd(conn, conn->info_ident,
2425 					L2CAP_INFO_REQ, sizeof(info), &info);
2426 	}
2427 
2428 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2429 				result == L2CAP_CR_SUCCESS) {
2430 		u8 buf[128];
2431 		set_bit(CONF_REQ_SENT, &chan->conf_state);
2432 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2433 					l2cap_build_conf_req(chan, buf), buf);
2434 		chan->num_conf_req++;
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2441 {
2442 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2443 	u16 scid, dcid, result, status;
2444 	struct l2cap_chan *chan;
2445 	struct sock *sk;
2446 	u8 req[128];
2447 
2448 	scid   = __le16_to_cpu(rsp->scid);
2449 	dcid   = __le16_to_cpu(rsp->dcid);
2450 	result = __le16_to_cpu(rsp->result);
2451 	status = __le16_to_cpu(rsp->status);
2452 
2453 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2454 
2455 	if (scid) {
2456 		chan = l2cap_get_chan_by_scid(conn, scid);
2457 		if (!chan)
2458 			return -EFAULT;
2459 	} else {
2460 		chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2461 		if (!chan)
2462 			return -EFAULT;
2463 	}
2464 
2465 	sk = chan->sk;
2466 
2467 	switch (result) {
2468 	case L2CAP_CR_SUCCESS:
2469 		l2cap_state_change(chan, BT_CONFIG);
2470 		chan->ident = 0;
2471 		chan->dcid = dcid;
2472 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2473 
2474 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2475 			break;
2476 
2477 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2478 					l2cap_build_conf_req(chan, req), req);
2479 		chan->num_conf_req++;
2480 		break;
2481 
2482 	case L2CAP_CR_PEND:
2483 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2484 		break;
2485 
2486 	default:
2487 		/* don't delete l2cap channel if sk is owned by user */
2488 		if (sock_owned_by_user(sk)) {
2489 			l2cap_state_change(chan, BT_DISCONN);
2490 			__clear_chan_timer(chan);
2491 			__set_chan_timer(chan, HZ / 5);
2492 			break;
2493 		}
2494 
2495 		l2cap_chan_del(chan, ECONNREFUSED);
2496 		break;
2497 	}
2498 
2499 	bh_unlock_sock(sk);
2500 	return 0;
2501 }
2502 
2503 static inline void set_default_fcs(struct l2cap_chan *chan)
2504 {
2505 	/* FCS is enabled only in ERTM or streaming mode, if one or both
2506 	 * sides request it.
2507 	 */
2508 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2509 		chan->fcs = L2CAP_FCS_NONE;
2510 	else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2511 		chan->fcs = L2CAP_FCS_CRC16;
2512 }
2513 
2514 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2515 {
2516 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2517 	u16 dcid, flags;
2518 	u8 rsp[64];
2519 	struct l2cap_chan *chan;
2520 	struct sock *sk;
2521 	int len;
2522 
2523 	dcid  = __le16_to_cpu(req->dcid);
2524 	flags = __le16_to_cpu(req->flags);
2525 
2526 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2527 
2528 	chan = l2cap_get_chan_by_scid(conn, dcid);
2529 	if (!chan)
2530 		return -ENOENT;
2531 
2532 	sk = chan->sk;
2533 
2534 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2535 		struct l2cap_cmd_rej_cid rej;
2536 
2537 		rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2538 		rej.scid = cpu_to_le16(chan->scid);
2539 		rej.dcid = cpu_to_le16(chan->dcid);
2540 
2541 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2542 				sizeof(rej), &rej);
2543 		goto unlock;
2544 	}
2545 
2546 	/* Reject if config buffer is too small. */
2547 	len = cmd_len - sizeof(*req);
2548 	if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2549 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2550 				l2cap_build_conf_rsp(chan, rsp,
2551 					L2CAP_CONF_REJECT, flags), rsp);
2552 		goto unlock;
2553 	}
2554 
2555 	/* Store config. */
2556 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
2557 	chan->conf_len += len;
2558 
2559 	if (flags & 0x0001) {
2560 		/* Incomplete config. Send empty response. */
2561 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2562 				l2cap_build_conf_rsp(chan, rsp,
2563 					L2CAP_CONF_SUCCESS, 0x0001), rsp);
2564 		goto unlock;
2565 	}
2566 
2567 	/* Complete config. */
2568 	len = l2cap_parse_conf_req(chan, rsp);
2569 	if (len < 0) {
2570 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
2571 		goto unlock;
2572 	}
2573 
2574 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2575 	chan->num_conf_rsp++;
2576 
2577 	/* Reset config buffer. */
2578 	chan->conf_len = 0;
2579 
2580 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2581 		goto unlock;
2582 
2583 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2584 		set_default_fcs(chan);
2585 
2586 		l2cap_state_change(chan, BT_CONNECTED);
2587 
2588 		chan->next_tx_seq = 0;
2589 		chan->expected_tx_seq = 0;
2590 		skb_queue_head_init(&chan->tx_q);
2591 		if (chan->mode == L2CAP_MODE_ERTM)
2592 			l2cap_ertm_init(chan);
2593 
2594 		l2cap_chan_ready(sk);
2595 		goto unlock;
2596 	}
2597 
2598 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2599 		u8 buf[64];
2600 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2601 					l2cap_build_conf_req(chan, buf), buf);
2602 		chan->num_conf_req++;
2603 	}
2604 
2605 unlock:
2606 	bh_unlock_sock(sk);
2607 	return 0;
2608 }
2609 
2610 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2611 {
2612 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2613 	u16 scid, flags, result;
2614 	struct l2cap_chan *chan;
2615 	struct sock *sk;
2616 	int len = cmd->len - sizeof(*rsp);
2617 
2618 	scid   = __le16_to_cpu(rsp->scid);
2619 	flags  = __le16_to_cpu(rsp->flags);
2620 	result = __le16_to_cpu(rsp->result);
2621 
2622 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2623 			scid, flags, result);
2624 
2625 	chan = l2cap_get_chan_by_scid(conn, scid);
2626 	if (!chan)
2627 		return 0;
2628 
2629 	sk = chan->sk;
2630 
2631 	switch (result) {
2632 	case L2CAP_CONF_SUCCESS:
2633 		l2cap_conf_rfc_get(chan, rsp->data, len);
2634 		break;
2635 
2636 	case L2CAP_CONF_UNACCEPT:
2637 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2638 			char req[64];
2639 
2640 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2641 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
2642 				goto done;
2643 			}
2644 
2645 			/* throw out any old stored conf requests */
2646 			result = L2CAP_CONF_SUCCESS;
2647 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2648 								req, &result);
2649 			if (len < 0) {
2650 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
2651 				goto done;
2652 			}
2653 
2654 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
2655 						L2CAP_CONF_REQ, len, req);
2656 			chan->num_conf_req++;
2657 			if (result != L2CAP_CONF_SUCCESS)
2658 				goto done;
2659 			break;
2660 		}
2661 
2662 	default:
2663 		sk->sk_err = ECONNRESET;
2664 		__set_chan_timer(chan, HZ * 5);
2665 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
2666 		goto done;
2667 	}
2668 
2669 	if (flags & 0x01)
2670 		goto done;
2671 
2672 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
2673 
2674 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2675 		set_default_fcs(chan);
2676 
2677 		l2cap_state_change(chan, BT_CONNECTED);
2678 		chan->next_tx_seq = 0;
2679 		chan->expected_tx_seq = 0;
2680 		skb_queue_head_init(&chan->tx_q);
2681 		if (chan->mode ==  L2CAP_MODE_ERTM)
2682 			l2cap_ertm_init(chan);
2683 
2684 		l2cap_chan_ready(sk);
2685 	}
2686 
2687 done:
2688 	bh_unlock_sock(sk);
2689 	return 0;
2690 }
2691 
2692 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2693 {
2694 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2695 	struct l2cap_disconn_rsp rsp;
2696 	u16 dcid, scid;
2697 	struct l2cap_chan *chan;
2698 	struct sock *sk;
2699 
2700 	scid = __le16_to_cpu(req->scid);
2701 	dcid = __le16_to_cpu(req->dcid);
2702 
2703 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2704 
2705 	chan = l2cap_get_chan_by_scid(conn, dcid);
2706 	if (!chan)
2707 		return 0;
2708 
2709 	sk = chan->sk;
2710 
2711 	rsp.dcid = cpu_to_le16(chan->scid);
2712 	rsp.scid = cpu_to_le16(chan->dcid);
2713 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2714 
2715 	sk->sk_shutdown = SHUTDOWN_MASK;
2716 
2717 	/* don't delete l2cap channel if sk is owned by user */
2718 	if (sock_owned_by_user(sk)) {
2719 		l2cap_state_change(chan, BT_DISCONN);
2720 		__clear_chan_timer(chan);
2721 		__set_chan_timer(chan, HZ / 5);
2722 		bh_unlock_sock(sk);
2723 		return 0;
2724 	}
2725 
2726 	l2cap_chan_del(chan, ECONNRESET);
2727 	bh_unlock_sock(sk);
2728 
2729 	chan->ops->close(chan->data);
2730 	return 0;
2731 }
2732 
2733 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2734 {
2735 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2736 	u16 dcid, scid;
2737 	struct l2cap_chan *chan;
2738 	struct sock *sk;
2739 
2740 	scid = __le16_to_cpu(rsp->scid);
2741 	dcid = __le16_to_cpu(rsp->dcid);
2742 
2743 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2744 
2745 	chan = l2cap_get_chan_by_scid(conn, scid);
2746 	if (!chan)
2747 		return 0;
2748 
2749 	sk = chan->sk;
2750 
2751 	/* don't delete l2cap channel if sk is owned by user */
2752 	if (sock_owned_by_user(sk)) {
2753 		l2cap_state_change(chan,BT_DISCONN);
2754 		__clear_chan_timer(chan);
2755 		__set_chan_timer(chan, HZ / 5);
2756 		bh_unlock_sock(sk);
2757 		return 0;
2758 	}
2759 
2760 	l2cap_chan_del(chan, 0);
2761 	bh_unlock_sock(sk);
2762 
2763 	chan->ops->close(chan->data);
2764 	return 0;
2765 }
2766 
2767 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2768 {
2769 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2770 	u16 type;
2771 
2772 	type = __le16_to_cpu(req->type);
2773 
2774 	BT_DBG("type 0x%4.4x", type);
2775 
2776 	if (type == L2CAP_IT_FEAT_MASK) {
2777 		u8 buf[8];
2778 		u32 feat_mask = l2cap_feat_mask;
2779 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2780 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2781 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2782 		if (!disable_ertm)
2783 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2784 							 | L2CAP_FEAT_FCS;
2785 		put_unaligned_le32(feat_mask, rsp->data);
2786 		l2cap_send_cmd(conn, cmd->ident,
2787 					L2CAP_INFO_RSP, sizeof(buf), buf);
2788 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2789 		u8 buf[12];
2790 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2791 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2792 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2793 		memcpy(buf + 4, l2cap_fixed_chan, 8);
2794 		l2cap_send_cmd(conn, cmd->ident,
2795 					L2CAP_INFO_RSP, sizeof(buf), buf);
2796 	} else {
2797 		struct l2cap_info_rsp rsp;
2798 		rsp.type   = cpu_to_le16(type);
2799 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2800 		l2cap_send_cmd(conn, cmd->ident,
2801 					L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2802 	}
2803 
2804 	return 0;
2805 }
2806 
2807 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2808 {
2809 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2810 	u16 type, result;
2811 
2812 	type   = __le16_to_cpu(rsp->type);
2813 	result = __le16_to_cpu(rsp->result);
2814 
2815 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2816 
2817 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
2818 	if (cmd->ident != conn->info_ident ||
2819 			conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2820 		return 0;
2821 
2822 	del_timer(&conn->info_timer);
2823 
2824 	if (result != L2CAP_IR_SUCCESS) {
2825 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2826 		conn->info_ident = 0;
2827 
2828 		l2cap_conn_start(conn);
2829 
2830 		return 0;
2831 	}
2832 
2833 	if (type == L2CAP_IT_FEAT_MASK) {
2834 		conn->feat_mask = get_unaligned_le32(rsp->data);
2835 
2836 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2837 			struct l2cap_info_req req;
2838 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2839 
2840 			conn->info_ident = l2cap_get_ident(conn);
2841 
2842 			l2cap_send_cmd(conn, conn->info_ident,
2843 					L2CAP_INFO_REQ, sizeof(req), &req);
2844 		} else {
2845 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2846 			conn->info_ident = 0;
2847 
2848 			l2cap_conn_start(conn);
2849 		}
2850 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2851 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2852 		conn->info_ident = 0;
2853 
2854 		l2cap_conn_start(conn);
2855 	}
2856 
2857 	return 0;
2858 }
2859 
2860 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2861 							u16 to_multiplier)
2862 {
2863 	u16 max_latency;
2864 
2865 	if (min > max || min < 6 || max > 3200)
2866 		return -EINVAL;
2867 
2868 	if (to_multiplier < 10 || to_multiplier > 3200)
2869 		return -EINVAL;
2870 
2871 	if (max >= to_multiplier * 8)
2872 		return -EINVAL;
2873 
2874 	max_latency = (to_multiplier * 8 / max) - 1;
2875 	if (latency > 499 || latency > max_latency)
2876 		return -EINVAL;
2877 
2878 	return 0;
2879 }
2880 
2881 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2882 					struct l2cap_cmd_hdr *cmd, u8 *data)
2883 {
2884 	struct hci_conn *hcon = conn->hcon;
2885 	struct l2cap_conn_param_update_req *req;
2886 	struct l2cap_conn_param_update_rsp rsp;
2887 	u16 min, max, latency, to_multiplier, cmd_len;
2888 	int err;
2889 
2890 	if (!(hcon->link_mode & HCI_LM_MASTER))
2891 		return -EINVAL;
2892 
2893 	cmd_len = __le16_to_cpu(cmd->len);
2894 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2895 		return -EPROTO;
2896 
2897 	req = (struct l2cap_conn_param_update_req *) data;
2898 	min		= __le16_to_cpu(req->min);
2899 	max		= __le16_to_cpu(req->max);
2900 	latency		= __le16_to_cpu(req->latency);
2901 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
2902 
2903 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2904 						min, max, latency, to_multiplier);
2905 
2906 	memset(&rsp, 0, sizeof(rsp));
2907 
2908 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2909 	if (err)
2910 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2911 	else
2912 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2913 
2914 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2915 							sizeof(rsp), &rsp);
2916 
2917 	if (!err)
2918 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2919 
2920 	return 0;
2921 }
2922 
2923 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2924 			struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2925 {
2926 	int err = 0;
2927 
2928 	switch (cmd->code) {
2929 	case L2CAP_COMMAND_REJ:
2930 		l2cap_command_rej(conn, cmd, data);
2931 		break;
2932 
2933 	case L2CAP_CONN_REQ:
2934 		err = l2cap_connect_req(conn, cmd, data);
2935 		break;
2936 
2937 	case L2CAP_CONN_RSP:
2938 		err = l2cap_connect_rsp(conn, cmd, data);
2939 		break;
2940 
2941 	case L2CAP_CONF_REQ:
2942 		err = l2cap_config_req(conn, cmd, cmd_len, data);
2943 		break;
2944 
2945 	case L2CAP_CONF_RSP:
2946 		err = l2cap_config_rsp(conn, cmd, data);
2947 		break;
2948 
2949 	case L2CAP_DISCONN_REQ:
2950 		err = l2cap_disconnect_req(conn, cmd, data);
2951 		break;
2952 
2953 	case L2CAP_DISCONN_RSP:
2954 		err = l2cap_disconnect_rsp(conn, cmd, data);
2955 		break;
2956 
2957 	case L2CAP_ECHO_REQ:
2958 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2959 		break;
2960 
2961 	case L2CAP_ECHO_RSP:
2962 		break;
2963 
2964 	case L2CAP_INFO_REQ:
2965 		err = l2cap_information_req(conn, cmd, data);
2966 		break;
2967 
2968 	case L2CAP_INFO_RSP:
2969 		err = l2cap_information_rsp(conn, cmd, data);
2970 		break;
2971 
2972 	default:
2973 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2974 		err = -EINVAL;
2975 		break;
2976 	}
2977 
2978 	return err;
2979 }
2980 
2981 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2982 					struct l2cap_cmd_hdr *cmd, u8 *data)
2983 {
2984 	switch (cmd->code) {
2985 	case L2CAP_COMMAND_REJ:
2986 		return 0;
2987 
2988 	case L2CAP_CONN_PARAM_UPDATE_REQ:
2989 		return l2cap_conn_param_update_req(conn, cmd, data);
2990 
2991 	case L2CAP_CONN_PARAM_UPDATE_RSP:
2992 		return 0;
2993 
2994 	default:
2995 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2996 		return -EINVAL;
2997 	}
2998 }
2999 
3000 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3001 							struct sk_buff *skb)
3002 {
3003 	u8 *data = skb->data;
3004 	int len = skb->len;
3005 	struct l2cap_cmd_hdr cmd;
3006 	int err;
3007 
3008 	l2cap_raw_recv(conn, skb);
3009 
3010 	while (len >= L2CAP_CMD_HDR_SIZE) {
3011 		u16 cmd_len;
3012 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3013 		data += L2CAP_CMD_HDR_SIZE;
3014 		len  -= L2CAP_CMD_HDR_SIZE;
3015 
3016 		cmd_len = le16_to_cpu(cmd.len);
3017 
3018 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3019 
3020 		if (cmd_len > len || !cmd.ident) {
3021 			BT_DBG("corrupted command");
3022 			break;
3023 		}
3024 
3025 		if (conn->hcon->type == LE_LINK)
3026 			err = l2cap_le_sig_cmd(conn, &cmd, data);
3027 		else
3028 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3029 
3030 		if (err) {
3031 			struct l2cap_cmd_rej_unk rej;
3032 
3033 			BT_ERR("Wrong link type (%d)", err);
3034 
3035 			/* FIXME: Map err to a valid reason */
3036 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3037 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3038 		}
3039 
3040 		data += cmd_len;
3041 		len  -= cmd_len;
3042 	}
3043 
3044 	kfree_skb(skb);
3045 }
3046 
3047 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
3048 {
3049 	u16 our_fcs, rcv_fcs;
3050 	int hdr_size = L2CAP_HDR_SIZE + 2;
3051 
3052 	if (chan->fcs == L2CAP_FCS_CRC16) {
3053 		skb_trim(skb, skb->len - 2);
3054 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3055 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3056 
3057 		if (our_fcs != rcv_fcs)
3058 			return -EBADMSG;
3059 	}
3060 	return 0;
3061 }
3062 
3063 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3064 {
3065 	u16 control = 0;
3066 
3067 	chan->frames_sent = 0;
3068 
3069 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3070 
3071 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3072 		control |= L2CAP_SUPER_RCV_NOT_READY;
3073 		l2cap_send_sframe(chan, control);
3074 		set_bit(CONN_RNR_SENT, &chan->conn_state);
3075 	}
3076 
3077 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3078 		l2cap_retransmit_frames(chan);
3079 
3080 	l2cap_ertm_send(chan);
3081 
3082 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3083 			chan->frames_sent == 0) {
3084 		control |= L2CAP_SUPER_RCV_READY;
3085 		l2cap_send_sframe(chan, control);
3086 	}
3087 }
3088 
3089 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3090 {
3091 	struct sk_buff *next_skb;
3092 	int tx_seq_offset, next_tx_seq_offset;
3093 
3094 	bt_cb(skb)->tx_seq = tx_seq;
3095 	bt_cb(skb)->sar = sar;
3096 
3097 	next_skb = skb_peek(&chan->srej_q);
3098 	if (!next_skb) {
3099 		__skb_queue_tail(&chan->srej_q, skb);
3100 		return 0;
3101 	}
3102 
3103 	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3104 	if (tx_seq_offset < 0)
3105 		tx_seq_offset += 64;
3106 
3107 	do {
3108 		if (bt_cb(next_skb)->tx_seq == tx_seq)
3109 			return -EINVAL;
3110 
3111 		next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3112 						chan->buffer_seq) % 64;
3113 		if (next_tx_seq_offset < 0)
3114 			next_tx_seq_offset += 64;
3115 
3116 		if (next_tx_seq_offset > tx_seq_offset) {
3117 			__skb_queue_before(&chan->srej_q, next_skb, skb);
3118 			return 0;
3119 		}
3120 
3121 		if (skb_queue_is_last(&chan->srej_q, next_skb))
3122 			break;
3123 
3124 	} while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3125 
3126 	__skb_queue_tail(&chan->srej_q, skb);
3127 
3128 	return 0;
3129 }
3130 
3131 static void append_skb_frag(struct sk_buff *skb,
3132 			struct sk_buff *new_frag, struct sk_buff **last_frag)
3133 {
3134 	/* skb->len reflects data in skb as well as all fragments
3135 	 * skb->data_len reflects only data in fragments
3136 	 */
3137 	if (!skb_has_frag_list(skb))
3138 		skb_shinfo(skb)->frag_list = new_frag;
3139 
3140 	new_frag->next = NULL;
3141 
3142 	(*last_frag)->next = new_frag;
3143 	*last_frag = new_frag;
3144 
3145 	skb->len += new_frag->len;
3146 	skb->data_len += new_frag->len;
3147 	skb->truesize += new_frag->truesize;
3148 }
3149 
3150 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3151 {
3152 	int err = -EINVAL;
3153 
3154 	switch (control & L2CAP_CTRL_SAR) {
3155 	case L2CAP_SDU_UNSEGMENTED:
3156 		if (chan->sdu)
3157 			break;
3158 
3159 		err = chan->ops->recv(chan->data, skb);
3160 		break;
3161 
3162 	case L2CAP_SDU_START:
3163 		if (chan->sdu)
3164 			break;
3165 
3166 		chan->sdu_len = get_unaligned_le16(skb->data);
3167 		skb_pull(skb, 2);
3168 
3169 		if (chan->sdu_len > chan->imtu) {
3170 			err = -EMSGSIZE;
3171 			break;
3172 		}
3173 
3174 		if (skb->len >= chan->sdu_len)
3175 			break;
3176 
3177 		chan->sdu = skb;
3178 		chan->sdu_last_frag = skb;
3179 
3180 		skb = NULL;
3181 		err = 0;
3182 		break;
3183 
3184 	case L2CAP_SDU_CONTINUE:
3185 		if (!chan->sdu)
3186 			break;
3187 
3188 		append_skb_frag(chan->sdu, skb,
3189 				&chan->sdu_last_frag);
3190 		skb = NULL;
3191 
3192 		if (chan->sdu->len >= chan->sdu_len)
3193 			break;
3194 
3195 		err = 0;
3196 		break;
3197 
3198 	case L2CAP_SDU_END:
3199 		if (!chan->sdu)
3200 			break;
3201 
3202 		append_skb_frag(chan->sdu, skb,
3203 				&chan->sdu_last_frag);
3204 		skb = NULL;
3205 
3206 		if (chan->sdu->len != chan->sdu_len)
3207 			break;
3208 
3209 		err = chan->ops->recv(chan->data, chan->sdu);
3210 
3211 		if (!err) {
3212 			/* Reassembly complete */
3213 			chan->sdu = NULL;
3214 			chan->sdu_last_frag = NULL;
3215 			chan->sdu_len = 0;
3216 		}
3217 		break;
3218 	}
3219 
3220 	if (err) {
3221 		kfree_skb(skb);
3222 		kfree_skb(chan->sdu);
3223 		chan->sdu = NULL;
3224 		chan->sdu_last_frag = NULL;
3225 		chan->sdu_len = 0;
3226 	}
3227 
3228 	return err;
3229 }
3230 
3231 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3232 {
3233 	u16 control;
3234 
3235 	BT_DBG("chan %p, Enter local busy", chan);
3236 
3237 	set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3238 
3239 	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3240 	control |= L2CAP_SUPER_RCV_NOT_READY;
3241 	l2cap_send_sframe(chan, control);
3242 
3243 	set_bit(CONN_RNR_SENT, &chan->conn_state);
3244 
3245 	__clear_ack_timer(chan);
3246 }
3247 
3248 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3249 {
3250 	u16 control;
3251 
3252 	if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3253 		goto done;
3254 
3255 	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3256 	control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3257 	l2cap_send_sframe(chan, control);
3258 	chan->retry_count = 1;
3259 
3260 	__clear_retrans_timer(chan);
3261 	__set_monitor_timer(chan);
3262 
3263 	set_bit(CONN_WAIT_F, &chan->conn_state);
3264 
3265 done:
3266 	clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3267 	clear_bit(CONN_RNR_SENT, &chan->conn_state);
3268 
3269 	BT_DBG("chan %p, Exit local busy", chan);
3270 }
3271 
3272 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3273 {
3274 	if (chan->mode == L2CAP_MODE_ERTM) {
3275 		if (busy)
3276 			l2cap_ertm_enter_local_busy(chan);
3277 		else
3278 			l2cap_ertm_exit_local_busy(chan);
3279 	}
3280 }
3281 
3282 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3283 {
3284 	struct sk_buff *skb;
3285 	u16 control;
3286 
3287 	while ((skb = skb_peek(&chan->srej_q)) &&
3288 			!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3289 		int err;
3290 
3291 		if (bt_cb(skb)->tx_seq != tx_seq)
3292 			break;
3293 
3294 		skb = skb_dequeue(&chan->srej_q);
3295 		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3296 		err = l2cap_reassemble_sdu(chan, skb, control);
3297 
3298 		if (err < 0) {
3299 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3300 			break;
3301 		}
3302 
3303 		chan->buffer_seq_srej =
3304 			(chan->buffer_seq_srej + 1) % 64;
3305 		tx_seq = (tx_seq + 1) % 64;
3306 	}
3307 }
3308 
3309 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3310 {
3311 	struct srej_list *l, *tmp;
3312 	u16 control;
3313 
3314 	list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3315 		if (l->tx_seq == tx_seq) {
3316 			list_del(&l->list);
3317 			kfree(l);
3318 			return;
3319 		}
3320 		control = L2CAP_SUPER_SELECT_REJECT;
3321 		control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3322 		l2cap_send_sframe(chan, control);
3323 		list_del(&l->list);
3324 		list_add_tail(&l->list, &chan->srej_l);
3325 	}
3326 }
3327 
3328 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3329 {
3330 	struct srej_list *new;
3331 	u16 control;
3332 
3333 	while (tx_seq != chan->expected_tx_seq) {
3334 		control = L2CAP_SUPER_SELECT_REJECT;
3335 		control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3336 		l2cap_send_sframe(chan, control);
3337 
3338 		new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3339 		new->tx_seq = chan->expected_tx_seq;
3340 		chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3341 		list_add_tail(&new->list, &chan->srej_l);
3342 	}
3343 	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3344 }
3345 
3346 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3347 {
3348 	u8 tx_seq = __get_txseq(rx_control);
3349 	u8 req_seq = __get_reqseq(rx_control);
3350 	u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3351 	int tx_seq_offset, expected_tx_seq_offset;
3352 	int num_to_ack = (chan->tx_win/6) + 1;
3353 	int err = 0;
3354 
3355 	BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3356 							tx_seq, rx_control);
3357 
3358 	if (L2CAP_CTRL_FINAL & rx_control &&
3359 			test_bit(CONN_WAIT_F, &chan->conn_state)) {
3360 		__clear_monitor_timer(chan);
3361 		if (chan->unacked_frames > 0)
3362 			__set_retrans_timer(chan);
3363 		clear_bit(CONN_WAIT_F, &chan->conn_state);
3364 	}
3365 
3366 	chan->expected_ack_seq = req_seq;
3367 	l2cap_drop_acked_frames(chan);
3368 
3369 	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3370 	if (tx_seq_offset < 0)
3371 		tx_seq_offset += 64;
3372 
3373 	/* invalid tx_seq */
3374 	if (tx_seq_offset >= chan->tx_win) {
3375 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3376 		goto drop;
3377 	}
3378 
3379 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3380 		goto drop;
3381 
3382 	if (tx_seq == chan->expected_tx_seq)
3383 		goto expected;
3384 
3385 	if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3386 		struct srej_list *first;
3387 
3388 		first = list_first_entry(&chan->srej_l,
3389 				struct srej_list, list);
3390 		if (tx_seq == first->tx_seq) {
3391 			l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3392 			l2cap_check_srej_gap(chan, tx_seq);
3393 
3394 			list_del(&first->list);
3395 			kfree(first);
3396 
3397 			if (list_empty(&chan->srej_l)) {
3398 				chan->buffer_seq = chan->buffer_seq_srej;
3399 				clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3400 				l2cap_send_ack(chan);
3401 				BT_DBG("chan %p, Exit SREJ_SENT", chan);
3402 			}
3403 		} else {
3404 			struct srej_list *l;
3405 
3406 			/* duplicated tx_seq */
3407 			if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3408 				goto drop;
3409 
3410 			list_for_each_entry(l, &chan->srej_l, list) {
3411 				if (l->tx_seq == tx_seq) {
3412 					l2cap_resend_srejframe(chan, tx_seq);
3413 					return 0;
3414 				}
3415 			}
3416 			l2cap_send_srejframe(chan, tx_seq);
3417 		}
3418 	} else {
3419 		expected_tx_seq_offset =
3420 			(chan->expected_tx_seq - chan->buffer_seq) % 64;
3421 		if (expected_tx_seq_offset < 0)
3422 			expected_tx_seq_offset += 64;
3423 
3424 		/* duplicated tx_seq */
3425 		if (tx_seq_offset < expected_tx_seq_offset)
3426 			goto drop;
3427 
3428 		set_bit(CONN_SREJ_SENT, &chan->conn_state);
3429 
3430 		BT_DBG("chan %p, Enter SREJ", chan);
3431 
3432 		INIT_LIST_HEAD(&chan->srej_l);
3433 		chan->buffer_seq_srej = chan->buffer_seq;
3434 
3435 		__skb_queue_head_init(&chan->srej_q);
3436 		l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3437 
3438 		set_bit(CONN_SEND_PBIT, &chan->conn_state);
3439 
3440 		l2cap_send_srejframe(chan, tx_seq);
3441 
3442 		__clear_ack_timer(chan);
3443 	}
3444 	return 0;
3445 
3446 expected:
3447 	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3448 
3449 	if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3450 		bt_cb(skb)->tx_seq = tx_seq;
3451 		bt_cb(skb)->sar = sar;
3452 		__skb_queue_tail(&chan->srej_q, skb);
3453 		return 0;
3454 	}
3455 
3456 	err = l2cap_reassemble_sdu(chan, skb, rx_control);
3457 	chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3458 	if (err < 0) {
3459 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3460 		return err;
3461 	}
3462 
3463 	if (rx_control & L2CAP_CTRL_FINAL) {
3464 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3465 			l2cap_retransmit_frames(chan);
3466 	}
3467 
3468 	__set_ack_timer(chan);
3469 
3470 	chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3471 	if (chan->num_acked == num_to_ack - 1)
3472 		l2cap_send_ack(chan);
3473 
3474 	return 0;
3475 
3476 drop:
3477 	kfree_skb(skb);
3478 	return 0;
3479 }
3480 
3481 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3482 {
3483 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3484 						rx_control);
3485 
3486 	chan->expected_ack_seq = __get_reqseq(rx_control);
3487 	l2cap_drop_acked_frames(chan);
3488 
3489 	if (rx_control & L2CAP_CTRL_POLL) {
3490 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
3491 		if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3492 			if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3493 					(chan->unacked_frames > 0))
3494 				__set_retrans_timer(chan);
3495 
3496 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3497 			l2cap_send_srejtail(chan);
3498 		} else {
3499 			l2cap_send_i_or_rr_or_rnr(chan);
3500 		}
3501 
3502 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3503 		clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3504 
3505 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3506 			l2cap_retransmit_frames(chan);
3507 
3508 	} else {
3509 		if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3510 				(chan->unacked_frames > 0))
3511 			__set_retrans_timer(chan);
3512 
3513 		clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3514 		if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3515 			l2cap_send_ack(chan);
3516 		else
3517 			l2cap_ertm_send(chan);
3518 	}
3519 }
3520 
3521 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3522 {
3523 	u8 tx_seq = __get_reqseq(rx_control);
3524 
3525 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3526 
3527 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3528 
3529 	chan->expected_ack_seq = tx_seq;
3530 	l2cap_drop_acked_frames(chan);
3531 
3532 	if (rx_control & L2CAP_CTRL_FINAL) {
3533 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3534 			l2cap_retransmit_frames(chan);
3535 	} else {
3536 		l2cap_retransmit_frames(chan);
3537 
3538 		if (test_bit(CONN_WAIT_F, &chan->conn_state))
3539 			set_bit(CONN_REJ_ACT, &chan->conn_state);
3540 	}
3541 }
3542 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3543 {
3544 	u8 tx_seq = __get_reqseq(rx_control);
3545 
3546 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3547 
3548 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3549 
3550 	if (rx_control & L2CAP_CTRL_POLL) {
3551 		chan->expected_ack_seq = tx_seq;
3552 		l2cap_drop_acked_frames(chan);
3553 
3554 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
3555 		l2cap_retransmit_one_frame(chan, tx_seq);
3556 
3557 		l2cap_ertm_send(chan);
3558 
3559 		if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3560 			chan->srej_save_reqseq = tx_seq;
3561 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
3562 		}
3563 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3564 		if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3565 				chan->srej_save_reqseq == tx_seq)
3566 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3567 		else
3568 			l2cap_retransmit_one_frame(chan, tx_seq);
3569 	} else {
3570 		l2cap_retransmit_one_frame(chan, tx_seq);
3571 		if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3572 			chan->srej_save_reqseq = tx_seq;
3573 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
3574 		}
3575 	}
3576 }
3577 
3578 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3579 {
3580 	u8 tx_seq = __get_reqseq(rx_control);
3581 
3582 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3583 
3584 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3585 	chan->expected_ack_seq = tx_seq;
3586 	l2cap_drop_acked_frames(chan);
3587 
3588 	if (rx_control & L2CAP_CTRL_POLL)
3589 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
3590 
3591 	if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3592 		__clear_retrans_timer(chan);
3593 		if (rx_control & L2CAP_CTRL_POLL)
3594 			l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3595 		return;
3596 	}
3597 
3598 	if (rx_control & L2CAP_CTRL_POLL)
3599 		l2cap_send_srejtail(chan);
3600 	else
3601 		l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3602 }
3603 
3604 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3605 {
3606 	BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3607 
3608 	if (L2CAP_CTRL_FINAL & rx_control &&
3609 			test_bit(CONN_WAIT_F, &chan->conn_state)) {
3610 		__clear_monitor_timer(chan);
3611 		if (chan->unacked_frames > 0)
3612 			__set_retrans_timer(chan);
3613 		clear_bit(CONN_WAIT_F, &chan->conn_state);
3614 	}
3615 
3616 	switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3617 	case L2CAP_SUPER_RCV_READY:
3618 		l2cap_data_channel_rrframe(chan, rx_control);
3619 		break;
3620 
3621 	case L2CAP_SUPER_REJECT:
3622 		l2cap_data_channel_rejframe(chan, rx_control);
3623 		break;
3624 
3625 	case L2CAP_SUPER_SELECT_REJECT:
3626 		l2cap_data_channel_srejframe(chan, rx_control);
3627 		break;
3628 
3629 	case L2CAP_SUPER_RCV_NOT_READY:
3630 		l2cap_data_channel_rnrframe(chan, rx_control);
3631 		break;
3632 	}
3633 
3634 	kfree_skb(skb);
3635 	return 0;
3636 }
3637 
3638 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3639 {
3640 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3641 	u16 control;
3642 	u8 req_seq;
3643 	int len, next_tx_seq_offset, req_seq_offset;
3644 
3645 	control = get_unaligned_le16(skb->data);
3646 	skb_pull(skb, 2);
3647 	len = skb->len;
3648 
3649 	/*
3650 	 * We can just drop the corrupted I-frame here.
3651 	 * Receiver will miss it and start proper recovery
3652 	 * procedures and ask retransmission.
3653 	 */
3654 	if (l2cap_check_fcs(chan, skb))
3655 		goto drop;
3656 
3657 	if (__is_sar_start(control) && __is_iframe(control))
3658 		len -= 2;
3659 
3660 	if (chan->fcs == L2CAP_FCS_CRC16)
3661 		len -= 2;
3662 
3663 	if (len > chan->mps) {
3664 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3665 		goto drop;
3666 	}
3667 
3668 	req_seq = __get_reqseq(control);
3669 	req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3670 	if (req_seq_offset < 0)
3671 		req_seq_offset += 64;
3672 
3673 	next_tx_seq_offset =
3674 		(chan->next_tx_seq - chan->expected_ack_seq) % 64;
3675 	if (next_tx_seq_offset < 0)
3676 		next_tx_seq_offset += 64;
3677 
3678 	/* check for invalid req-seq */
3679 	if (req_seq_offset > next_tx_seq_offset) {
3680 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3681 		goto drop;
3682 	}
3683 
3684 	if (__is_iframe(control)) {
3685 		if (len < 0) {
3686 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3687 			goto drop;
3688 		}
3689 
3690 		l2cap_data_channel_iframe(chan, control, skb);
3691 	} else {
3692 		if (len != 0) {
3693 			BT_ERR("%d", len);
3694 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3695 			goto drop;
3696 		}
3697 
3698 		l2cap_data_channel_sframe(chan, control, skb);
3699 	}
3700 
3701 	return 0;
3702 
3703 drop:
3704 	kfree_skb(skb);
3705 	return 0;
3706 }
3707 
3708 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3709 {
3710 	struct l2cap_chan *chan;
3711 	struct sock *sk = NULL;
3712 	u16 control;
3713 	u8 tx_seq;
3714 	int len;
3715 
3716 	chan = l2cap_get_chan_by_scid(conn, cid);
3717 	if (!chan) {
3718 		BT_DBG("unknown cid 0x%4.4x", cid);
3719 		goto drop;
3720 	}
3721 
3722 	sk = chan->sk;
3723 
3724 	BT_DBG("chan %p, len %d", chan, skb->len);
3725 
3726 	if (chan->state != BT_CONNECTED)
3727 		goto drop;
3728 
3729 	switch (chan->mode) {
3730 	case L2CAP_MODE_BASIC:
3731 		/* If socket recv buffers overflows we drop data here
3732 		 * which is *bad* because L2CAP has to be reliable.
3733 		 * But we don't have any other choice. L2CAP doesn't
3734 		 * provide flow control mechanism. */
3735 
3736 		if (chan->imtu < skb->len)
3737 			goto drop;
3738 
3739 		if (!chan->ops->recv(chan->data, skb))
3740 			goto done;
3741 		break;
3742 
3743 	case L2CAP_MODE_ERTM:
3744 		if (!sock_owned_by_user(sk)) {
3745 			l2cap_ertm_data_rcv(sk, skb);
3746 		} else {
3747 			if (sk_add_backlog(sk, skb))
3748 				goto drop;
3749 		}
3750 
3751 		goto done;
3752 
3753 	case L2CAP_MODE_STREAMING:
3754 		control = get_unaligned_le16(skb->data);
3755 		skb_pull(skb, 2);
3756 		len = skb->len;
3757 
3758 		if (l2cap_check_fcs(chan, skb))
3759 			goto drop;
3760 
3761 		if (__is_sar_start(control))
3762 			len -= 2;
3763 
3764 		if (chan->fcs == L2CAP_FCS_CRC16)
3765 			len -= 2;
3766 
3767 		if (len > chan->mps || len < 0 || __is_sframe(control))
3768 			goto drop;
3769 
3770 		tx_seq = __get_txseq(control);
3771 
3772 		if (chan->expected_tx_seq != tx_seq) {
3773 			/* Frame(s) missing - must discard partial SDU */
3774 			kfree_skb(chan->sdu);
3775 			chan->sdu = NULL;
3776 			chan->sdu_last_frag = NULL;
3777 			chan->sdu_len = 0;
3778 
3779 			/* TODO: Notify userland of missing data */
3780 		}
3781 
3782 		chan->expected_tx_seq = (tx_seq + 1) % 64;
3783 
3784 		if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3785 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3786 
3787 		goto done;
3788 
3789 	default:
3790 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3791 		break;
3792 	}
3793 
3794 drop:
3795 	kfree_skb(skb);
3796 
3797 done:
3798 	if (sk)
3799 		bh_unlock_sock(sk);
3800 
3801 	return 0;
3802 }
3803 
3804 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3805 {
3806 	struct sock *sk = NULL;
3807 	struct l2cap_chan *chan;
3808 
3809 	chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3810 	if (!chan)
3811 		goto drop;
3812 
3813 	sk = chan->sk;
3814 
3815 	bh_lock_sock(sk);
3816 
3817 	BT_DBG("sk %p, len %d", sk, skb->len);
3818 
3819 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3820 		goto drop;
3821 
3822 	if (chan->imtu < skb->len)
3823 		goto drop;
3824 
3825 	if (!chan->ops->recv(chan->data, skb))
3826 		goto done;
3827 
3828 drop:
3829 	kfree_skb(skb);
3830 
3831 done:
3832 	if (sk)
3833 		bh_unlock_sock(sk);
3834 	return 0;
3835 }
3836 
3837 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3838 {
3839 	struct sock *sk = NULL;
3840 	struct l2cap_chan *chan;
3841 
3842 	chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3843 	if (!chan)
3844 		goto drop;
3845 
3846 	sk = chan->sk;
3847 
3848 	bh_lock_sock(sk);
3849 
3850 	BT_DBG("sk %p, len %d", sk, skb->len);
3851 
3852 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3853 		goto drop;
3854 
3855 	if (chan->imtu < skb->len)
3856 		goto drop;
3857 
3858 	if (!chan->ops->recv(chan->data, skb))
3859 		goto done;
3860 
3861 drop:
3862 	kfree_skb(skb);
3863 
3864 done:
3865 	if (sk)
3866 		bh_unlock_sock(sk);
3867 	return 0;
3868 }
3869 
3870 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3871 {
3872 	struct l2cap_hdr *lh = (void *) skb->data;
3873 	u16 cid, len;
3874 	__le16 psm;
3875 
3876 	skb_pull(skb, L2CAP_HDR_SIZE);
3877 	cid = __le16_to_cpu(lh->cid);
3878 	len = __le16_to_cpu(lh->len);
3879 
3880 	if (len != skb->len) {
3881 		kfree_skb(skb);
3882 		return;
3883 	}
3884 
3885 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
3886 
3887 	switch (cid) {
3888 	case L2CAP_CID_LE_SIGNALING:
3889 	case L2CAP_CID_SIGNALING:
3890 		l2cap_sig_channel(conn, skb);
3891 		break;
3892 
3893 	case L2CAP_CID_CONN_LESS:
3894 		psm = get_unaligned_le16(skb->data);
3895 		skb_pull(skb, 2);
3896 		l2cap_conless_channel(conn, psm, skb);
3897 		break;
3898 
3899 	case L2CAP_CID_LE_DATA:
3900 		l2cap_att_channel(conn, cid, skb);
3901 		break;
3902 
3903 	case L2CAP_CID_SMP:
3904 		if (smp_sig_channel(conn, skb))
3905 			l2cap_conn_del(conn->hcon, EACCES);
3906 		break;
3907 
3908 	default:
3909 		l2cap_data_channel(conn, cid, skb);
3910 		break;
3911 	}
3912 }
3913 
3914 /* ---- L2CAP interface with lower layer (HCI) ---- */
3915 
3916 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3917 {
3918 	int exact = 0, lm1 = 0, lm2 = 0;
3919 	struct l2cap_chan *c;
3920 
3921 	if (type != ACL_LINK)
3922 		return -EINVAL;
3923 
3924 	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3925 
3926 	/* Find listening sockets and check their link_mode */
3927 	read_lock(&chan_list_lock);
3928 	list_for_each_entry(c, &chan_list, global_l) {
3929 		struct sock *sk = c->sk;
3930 
3931 		if (c->state != BT_LISTEN)
3932 			continue;
3933 
3934 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3935 			lm1 |= HCI_LM_ACCEPT;
3936 			if (c->role_switch)
3937 				lm1 |= HCI_LM_MASTER;
3938 			exact++;
3939 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3940 			lm2 |= HCI_LM_ACCEPT;
3941 			if (c->role_switch)
3942 				lm2 |= HCI_LM_MASTER;
3943 		}
3944 	}
3945 	read_unlock(&chan_list_lock);
3946 
3947 	return exact ? lm1 : lm2;
3948 }
3949 
3950 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3951 {
3952 	struct l2cap_conn *conn;
3953 
3954 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3955 
3956 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3957 		return -EINVAL;
3958 
3959 	if (!status) {
3960 		conn = l2cap_conn_add(hcon, status);
3961 		if (conn)
3962 			l2cap_conn_ready(conn);
3963 	} else
3964 		l2cap_conn_del(hcon, bt_to_errno(status));
3965 
3966 	return 0;
3967 }
3968 
3969 static int l2cap_disconn_ind(struct hci_conn *hcon)
3970 {
3971 	struct l2cap_conn *conn = hcon->l2cap_data;
3972 
3973 	BT_DBG("hcon %p", hcon);
3974 
3975 	if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3976 		return 0x13;
3977 
3978 	return conn->disc_reason;
3979 }
3980 
3981 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3982 {
3983 	BT_DBG("hcon %p reason %d", hcon, reason);
3984 
3985 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3986 		return -EINVAL;
3987 
3988 	l2cap_conn_del(hcon, bt_to_errno(reason));
3989 
3990 	return 0;
3991 }
3992 
3993 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3994 {
3995 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
3996 		return;
3997 
3998 	if (encrypt == 0x00) {
3999 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
4000 			__clear_chan_timer(chan);
4001 			__set_chan_timer(chan, HZ * 5);
4002 		} else if (chan->sec_level == BT_SECURITY_HIGH)
4003 			l2cap_chan_close(chan, ECONNREFUSED);
4004 	} else {
4005 		if (chan->sec_level == BT_SECURITY_MEDIUM)
4006 			__clear_chan_timer(chan);
4007 	}
4008 }
4009 
4010 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4011 {
4012 	struct l2cap_conn *conn = hcon->l2cap_data;
4013 	struct l2cap_chan *chan;
4014 
4015 	if (!conn)
4016 		return 0;
4017 
4018 	BT_DBG("conn %p", conn);
4019 
4020 	if (hcon->type == LE_LINK) {
4021 		smp_distribute_keys(conn, 0);
4022 		del_timer(&conn->security_timer);
4023 	}
4024 
4025 	read_lock(&conn->chan_lock);
4026 
4027 	list_for_each_entry(chan, &conn->chan_l, list) {
4028 		struct sock *sk = chan->sk;
4029 
4030 		bh_lock_sock(sk);
4031 
4032 		BT_DBG("chan->scid %d", chan->scid);
4033 
4034 		if (chan->scid == L2CAP_CID_LE_DATA) {
4035 			if (!status && encrypt) {
4036 				chan->sec_level = hcon->sec_level;
4037 				l2cap_chan_ready(sk);
4038 			}
4039 
4040 			bh_unlock_sock(sk);
4041 			continue;
4042 		}
4043 
4044 		if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4045 			bh_unlock_sock(sk);
4046 			continue;
4047 		}
4048 
4049 		if (!status && (chan->state == BT_CONNECTED ||
4050 						chan->state == BT_CONFIG)) {
4051 			l2cap_check_encryption(chan, encrypt);
4052 			bh_unlock_sock(sk);
4053 			continue;
4054 		}
4055 
4056 		if (chan->state == BT_CONNECT) {
4057 			if (!status) {
4058 				struct l2cap_conn_req req;
4059 				req.scid = cpu_to_le16(chan->scid);
4060 				req.psm  = chan->psm;
4061 
4062 				chan->ident = l2cap_get_ident(conn);
4063 				set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4064 
4065 				l2cap_send_cmd(conn, chan->ident,
4066 					L2CAP_CONN_REQ, sizeof(req), &req);
4067 			} else {
4068 				__clear_chan_timer(chan);
4069 				__set_chan_timer(chan, HZ / 10);
4070 			}
4071 		} else if (chan->state == BT_CONNECT2) {
4072 			struct l2cap_conn_rsp rsp;
4073 			__u16 res, stat;
4074 
4075 			if (!status) {
4076 				if (bt_sk(sk)->defer_setup) {
4077 					struct sock *parent = bt_sk(sk)->parent;
4078 					res = L2CAP_CR_PEND;
4079 					stat = L2CAP_CS_AUTHOR_PEND;
4080 					if (parent)
4081 						parent->sk_data_ready(parent, 0);
4082 				} else {
4083 					l2cap_state_change(chan, BT_CONFIG);
4084 					res = L2CAP_CR_SUCCESS;
4085 					stat = L2CAP_CS_NO_INFO;
4086 				}
4087 			} else {
4088 				l2cap_state_change(chan, BT_DISCONN);
4089 				__set_chan_timer(chan, HZ / 10);
4090 				res = L2CAP_CR_SEC_BLOCK;
4091 				stat = L2CAP_CS_NO_INFO;
4092 			}
4093 
4094 			rsp.scid   = cpu_to_le16(chan->dcid);
4095 			rsp.dcid   = cpu_to_le16(chan->scid);
4096 			rsp.result = cpu_to_le16(res);
4097 			rsp.status = cpu_to_le16(stat);
4098 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4099 							sizeof(rsp), &rsp);
4100 		}
4101 
4102 		bh_unlock_sock(sk);
4103 	}
4104 
4105 	read_unlock(&conn->chan_lock);
4106 
4107 	return 0;
4108 }
4109 
4110 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4111 {
4112 	struct l2cap_conn *conn = hcon->l2cap_data;
4113 
4114 	if (!conn)
4115 		conn = l2cap_conn_add(hcon, 0);
4116 
4117 	if (!conn)
4118 		goto drop;
4119 
4120 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4121 
4122 	if (!(flags & ACL_CONT)) {
4123 		struct l2cap_hdr *hdr;
4124 		struct l2cap_chan *chan;
4125 		u16 cid;
4126 		int len;
4127 
4128 		if (conn->rx_len) {
4129 			BT_ERR("Unexpected start frame (len %d)", skb->len);
4130 			kfree_skb(conn->rx_skb);
4131 			conn->rx_skb = NULL;
4132 			conn->rx_len = 0;
4133 			l2cap_conn_unreliable(conn, ECOMM);
4134 		}
4135 
4136 		/* Start fragment always begin with Basic L2CAP header */
4137 		if (skb->len < L2CAP_HDR_SIZE) {
4138 			BT_ERR("Frame is too short (len %d)", skb->len);
4139 			l2cap_conn_unreliable(conn, ECOMM);
4140 			goto drop;
4141 		}
4142 
4143 		hdr = (struct l2cap_hdr *) skb->data;
4144 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4145 		cid = __le16_to_cpu(hdr->cid);
4146 
4147 		if (len == skb->len) {
4148 			/* Complete frame received */
4149 			l2cap_recv_frame(conn, skb);
4150 			return 0;
4151 		}
4152 
4153 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4154 
4155 		if (skb->len > len) {
4156 			BT_ERR("Frame is too long (len %d, expected len %d)",
4157 				skb->len, len);
4158 			l2cap_conn_unreliable(conn, ECOMM);
4159 			goto drop;
4160 		}
4161 
4162 		chan = l2cap_get_chan_by_scid(conn, cid);
4163 
4164 		if (chan && chan->sk) {
4165 			struct sock *sk = chan->sk;
4166 
4167 			if (chan->imtu < len - L2CAP_HDR_SIZE) {
4168 				BT_ERR("Frame exceeding recv MTU (len %d, "
4169 							"MTU %d)", len,
4170 							chan->imtu);
4171 				bh_unlock_sock(sk);
4172 				l2cap_conn_unreliable(conn, ECOMM);
4173 				goto drop;
4174 			}
4175 			bh_unlock_sock(sk);
4176 		}
4177 
4178 		/* Allocate skb for the complete frame (with header) */
4179 		conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4180 		if (!conn->rx_skb)
4181 			goto drop;
4182 
4183 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4184 								skb->len);
4185 		conn->rx_len = len - skb->len;
4186 	} else {
4187 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4188 
4189 		if (!conn->rx_len) {
4190 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4191 			l2cap_conn_unreliable(conn, ECOMM);
4192 			goto drop;
4193 		}
4194 
4195 		if (skb->len > conn->rx_len) {
4196 			BT_ERR("Fragment is too long (len %d, expected %d)",
4197 					skb->len, conn->rx_len);
4198 			kfree_skb(conn->rx_skb);
4199 			conn->rx_skb = NULL;
4200 			conn->rx_len = 0;
4201 			l2cap_conn_unreliable(conn, ECOMM);
4202 			goto drop;
4203 		}
4204 
4205 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4206 								skb->len);
4207 		conn->rx_len -= skb->len;
4208 
4209 		if (!conn->rx_len) {
4210 			/* Complete frame received */
4211 			l2cap_recv_frame(conn, conn->rx_skb);
4212 			conn->rx_skb = NULL;
4213 		}
4214 	}
4215 
4216 drop:
4217 	kfree_skb(skb);
4218 	return 0;
4219 }
4220 
4221 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4222 {
4223 	struct l2cap_chan *c;
4224 
4225 	read_lock_bh(&chan_list_lock);
4226 
4227 	list_for_each_entry(c, &chan_list, global_l) {
4228 		struct sock *sk = c->sk;
4229 
4230 		seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4231 					batostr(&bt_sk(sk)->src),
4232 					batostr(&bt_sk(sk)->dst),
4233 					c->state, __le16_to_cpu(c->psm),
4234 					c->scid, c->dcid, c->imtu, c->omtu,
4235 					c->sec_level, c->mode);
4236 }
4237 
4238 	read_unlock_bh(&chan_list_lock);
4239 
4240 	return 0;
4241 }
4242 
4243 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4244 {
4245 	return single_open(file, l2cap_debugfs_show, inode->i_private);
4246 }
4247 
4248 static const struct file_operations l2cap_debugfs_fops = {
4249 	.open		= l2cap_debugfs_open,
4250 	.read		= seq_read,
4251 	.llseek		= seq_lseek,
4252 	.release	= single_release,
4253 };
4254 
4255 static struct dentry *l2cap_debugfs;
4256 
4257 static struct hci_proto l2cap_hci_proto = {
4258 	.name		= "L2CAP",
4259 	.id		= HCI_PROTO_L2CAP,
4260 	.connect_ind	= l2cap_connect_ind,
4261 	.connect_cfm	= l2cap_connect_cfm,
4262 	.disconn_ind	= l2cap_disconn_ind,
4263 	.disconn_cfm	= l2cap_disconn_cfm,
4264 	.security_cfm	= l2cap_security_cfm,
4265 	.recv_acldata	= l2cap_recv_acldata
4266 };
4267 
4268 int __init l2cap_init(void)
4269 {
4270 	int err;
4271 
4272 	err = l2cap_init_sockets();
4273 	if (err < 0)
4274 		return err;
4275 
4276 	err = hci_register_proto(&l2cap_hci_proto);
4277 	if (err < 0) {
4278 		BT_ERR("L2CAP protocol registration failed");
4279 		bt_sock_unregister(BTPROTO_L2CAP);
4280 		goto error;
4281 	}
4282 
4283 	if (bt_debugfs) {
4284 		l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4285 					bt_debugfs, NULL, &l2cap_debugfs_fops);
4286 		if (!l2cap_debugfs)
4287 			BT_ERR("Failed to create L2CAP debug file");
4288 	}
4289 
4290 	return 0;
4291 
4292 error:
4293 	l2cap_cleanup_sockets();
4294 	return err;
4295 }
4296 
4297 void l2cap_exit(void)
4298 {
4299 	debugfs_remove(l2cap_debugfs);
4300 
4301 	if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4302 		BT_ERR("L2CAP protocol unregistration failed");
4303 
4304 	l2cap_cleanup_sockets();
4305 }
4306 
4307 module_param(disable_ertm, bool, 0644);
4308 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4309