1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 Copyright (c) 2013-2014 Intel Corp.
4
5 */
6
7 #include <linux/if_arp.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/module.h>
11 #include <linux/debugfs.h>
12
13 #include <net/ipv6.h>
14 #include <net/ip6_route.h>
15 #include <net/addrconf.h>
16 #include <net/netdev_lock.h>
17 #include <net/pkt_sched.h>
18
19 #include <net/bluetooth/bluetooth.h>
20 #include <net/bluetooth/hci_core.h>
21 #include <net/bluetooth/l2cap.h>
22
23 #include <net/6lowpan.h> /* for the compression support */
24
25 #define VERSION "0.1"
26
27 static struct dentry *lowpan_enable_debugfs;
28 static struct dentry *lowpan_control_debugfs;
29
30 #define IFACE_NAME_TEMPLATE "bt%d"
31
32 struct skb_cb {
33 struct in6_addr addr;
34 struct in6_addr gw;
35 struct l2cap_chan *chan;
36 };
37 #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
38
39 /* The devices list contains those devices that we are acting
40 * as a proxy. The BT 6LoWPAN device is a virtual device that
41 * connects to the Bluetooth LE device. The real connection to
42 * BT device is done via l2cap layer. There exists one
43 * virtual device / one BT 6LoWPAN network (=hciX device).
44 * The list contains struct lowpan_dev elements.
45 */
46 static LIST_HEAD(bt_6lowpan_devices);
47 static DEFINE_SPINLOCK(devices_lock);
48
49 static bool enable_6lowpan;
50
51 /* We are listening incoming connections via this channel
52 */
53 static struct l2cap_chan *listen_chan;
54 static DEFINE_MUTEX(set_lock);
55
56 enum {
57 LOWPAN_PEER_CLOSING,
58 LOWPAN_PEER_MAXBITS
59 };
60
61 struct lowpan_peer {
62 struct list_head list;
63 struct rcu_head rcu;
64 struct l2cap_chan *chan;
65
66 /* peer addresses in various formats */
67 unsigned char lladdr[ETH_ALEN];
68 struct in6_addr peer_addr;
69
70 DECLARE_BITMAP(flags, LOWPAN_PEER_MAXBITS);
71 };
72
73 struct lowpan_btle_dev {
74 struct list_head list;
75
76 struct hci_dev *hdev;
77 struct net_device *netdev;
78 struct list_head peers;
79 atomic_t peer_count; /* number of items in peers list */
80
81 struct work_struct delete_netdev;
82 struct delayed_work notify_peers;
83 };
84
85 static inline struct lowpan_btle_dev *
lowpan_btle_dev(const struct net_device * netdev)86 lowpan_btle_dev(const struct net_device *netdev)
87 {
88 return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
89 }
90
peer_add(struct lowpan_btle_dev * dev,struct lowpan_peer * peer)91 static inline void peer_add(struct lowpan_btle_dev *dev,
92 struct lowpan_peer *peer)
93 {
94 list_add_rcu(&peer->list, &dev->peers);
95 atomic_inc(&dev->peer_count);
96 }
97
peer_del(struct lowpan_btle_dev * dev,struct lowpan_peer * peer)98 static inline bool peer_del(struct lowpan_btle_dev *dev,
99 struct lowpan_peer *peer)
100 {
101 list_del_rcu(&peer->list);
102 kfree_rcu(peer, rcu);
103
104 module_put(THIS_MODULE);
105
106 if (atomic_dec_and_test(&dev->peer_count)) {
107 BT_DBG("last peer");
108 return true;
109 }
110
111 return false;
112 }
113
114 static inline struct lowpan_peer *
__peer_lookup_chan(struct lowpan_btle_dev * dev,struct l2cap_chan * chan)115 __peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
116 {
117 struct lowpan_peer *peer;
118
119 list_for_each_entry_rcu(peer, &dev->peers, list) {
120 if (peer->chan == chan)
121 return peer;
122 }
123
124 return NULL;
125 }
126
127 static inline struct lowpan_peer *
__peer_lookup_conn(struct lowpan_btle_dev * dev,struct l2cap_conn * conn)128 __peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
129 {
130 struct lowpan_peer *peer;
131
132 list_for_each_entry_rcu(peer, &dev->peers, list) {
133 if (peer->chan->conn == conn)
134 return peer;
135 }
136
137 return NULL;
138 }
139
peer_lookup_dst(struct lowpan_btle_dev * dev,struct in6_addr * daddr,struct sk_buff * skb)140 static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
141 struct in6_addr *daddr,
142 struct sk_buff *skb)
143 {
144 struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
145 int count = atomic_read(&dev->peer_count);
146 const struct in6_addr *nexthop;
147 struct lowpan_peer *peer;
148 struct neighbour *neigh;
149
150 BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
151
152 if (!rt) {
153 if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
154 /* There is neither route nor gateway,
155 * probably the destination is a direct peer.
156 */
157 nexthop = daddr;
158 } else {
159 /* There is a known gateway
160 */
161 nexthop = &lowpan_cb(skb)->gw;
162 }
163 } else {
164 nexthop = rt6_nexthop(rt, daddr);
165
166 /* We need to remember the address because it is needed
167 * by bt_xmit() when sending the packet. In bt_xmit(), the
168 * destination routing info is not set.
169 */
170 memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
171 }
172
173 BT_DBG("gw %pI6c", nexthop);
174
175 rcu_read_lock();
176
177 list_for_each_entry_rcu(peer, &dev->peers, list) {
178 BT_DBG("dst addr %pMR dst type %u ip %pI6c",
179 &peer->chan->dst, peer->chan->dst_type,
180 &peer->peer_addr);
181
182 if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
183 rcu_read_unlock();
184 return peer;
185 }
186 }
187
188 /* use the neighbour cache for matching addresses assigned by SLAAC */
189 neigh = __ipv6_neigh_lookup(dev->netdev, nexthop);
190 if (neigh) {
191 list_for_each_entry_rcu(peer, &dev->peers, list) {
192 if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) {
193 neigh_release(neigh);
194 rcu_read_unlock();
195 return peer;
196 }
197 }
198 neigh_release(neigh);
199 }
200
201 rcu_read_unlock();
202
203 return NULL;
204 }
205
lookup_peer(struct l2cap_conn * conn)206 static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
207 {
208 struct lowpan_btle_dev *entry;
209 struct lowpan_peer *peer = NULL;
210
211 rcu_read_lock();
212
213 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
214 peer = __peer_lookup_conn(entry, conn);
215 if (peer)
216 break;
217 }
218
219 rcu_read_unlock();
220
221 return peer;
222 }
223
lookup_dev(struct l2cap_conn * conn)224 static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
225 {
226 struct lowpan_btle_dev *entry;
227 struct lowpan_btle_dev *dev = NULL;
228
229 rcu_read_lock();
230
231 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
232 if (conn->hcon->hdev == entry->hdev) {
233 dev = entry;
234 break;
235 }
236 }
237
238 rcu_read_unlock();
239
240 return dev;
241 }
242
give_skb_to_upper(struct sk_buff * skb,struct net_device * dev)243 static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
244 {
245 struct sk_buff *skb_cp;
246
247 skb_cp = skb_copy(skb, GFP_ATOMIC);
248 if (!skb_cp)
249 return NET_RX_DROP;
250
251 return netif_rx(skb_cp);
252 }
253
iphc_decompress(struct sk_buff * skb,struct net_device * netdev,struct lowpan_peer * peer)254 static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
255 struct lowpan_peer *peer)
256 {
257 const u8 *saddr;
258
259 saddr = peer->lladdr;
260
261 return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr);
262 }
263
recv_pkt(struct sk_buff * skb,struct net_device * dev,struct lowpan_peer * peer)264 static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
265 struct lowpan_peer *peer)
266 {
267 struct sk_buff *local_skb;
268 int ret;
269
270 if (!netif_running(dev))
271 goto drop;
272
273 if (dev->type != ARPHRD_6LOWPAN || !skb->len)
274 goto drop;
275
276 skb_reset_network_header(skb);
277
278 skb = skb_share_check(skb, GFP_ATOMIC);
279 if (!skb)
280 goto drop;
281
282 /* check that it's our buffer */
283 if (lowpan_is_ipv6(*skb_network_header(skb))) {
284 /* Pull off the 1-byte of 6lowpan header. */
285 skb_pull(skb, 1);
286
287 /* Copy the packet so that the IPv6 header is
288 * properly aligned.
289 */
290 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
291 skb_tailroom(skb), GFP_ATOMIC);
292 if (!local_skb)
293 goto drop;
294
295 local_skb->protocol = htons(ETH_P_IPV6);
296 local_skb->pkt_type = PACKET_HOST;
297 local_skb->dev = dev;
298
299 skb_reset_mac_header(local_skb);
300 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
301
302 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
303 kfree_skb(local_skb);
304 goto drop;
305 }
306
307 dev->stats.rx_bytes += skb->len;
308 dev->stats.rx_packets++;
309
310 consume_skb(local_skb);
311 consume_skb(skb);
312 } else if (lowpan_is_iphc(*skb_network_header(skb))) {
313 local_skb = skb_clone(skb, GFP_ATOMIC);
314 if (!local_skb)
315 goto drop;
316
317 local_skb->dev = dev;
318
319 ret = iphc_decompress(local_skb, dev, peer);
320 if (ret < 0) {
321 BT_DBG("iphc_decompress failed: %d", ret);
322 kfree_skb(local_skb);
323 goto drop;
324 }
325
326 local_skb->protocol = htons(ETH_P_IPV6);
327 local_skb->pkt_type = PACKET_HOST;
328
329 if (give_skb_to_upper(local_skb, dev)
330 != NET_RX_SUCCESS) {
331 kfree_skb(local_skb);
332 goto drop;
333 }
334
335 dev->stats.rx_bytes += skb->len;
336 dev->stats.rx_packets++;
337
338 consume_skb(local_skb);
339 consume_skb(skb);
340 } else {
341 BT_DBG("unknown packet type");
342 goto drop;
343 }
344
345 return NET_RX_SUCCESS;
346
347 drop:
348 dev->stats.rx_dropped++;
349 return NET_RX_DROP;
350 }
351
352 /* Packet from BT LE device */
chan_recv_cb(struct l2cap_chan * chan,struct sk_buff * skb)353 static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
354 {
355 struct lowpan_btle_dev *dev;
356 struct lowpan_peer *peer;
357 int err;
358
359 peer = lookup_peer(chan->conn);
360 if (!peer)
361 return -ENOENT;
362
363 dev = lookup_dev(chan->conn);
364 if (!dev || !dev->netdev)
365 return -ENOENT;
366
367 err = recv_pkt(skb, dev->netdev, peer);
368 if (err) {
369 BT_DBG("recv pkt %d", err);
370 err = -EAGAIN;
371 }
372
373 return err;
374 }
375
setup_header(struct sk_buff * skb,struct net_device * netdev,bdaddr_t * peer_addr,u8 * peer_addr_type)376 static int setup_header(struct sk_buff *skb, struct net_device *netdev,
377 bdaddr_t *peer_addr, u8 *peer_addr_type)
378 {
379 struct in6_addr ipv6_daddr;
380 struct ipv6hdr *hdr;
381 struct lowpan_btle_dev *dev;
382 struct lowpan_peer *peer;
383 u8 *daddr;
384 int err, status = 0;
385
386 hdr = ipv6_hdr(skb);
387
388 dev = lowpan_btle_dev(netdev);
389
390 memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
391
392 if (ipv6_addr_is_multicast(&ipv6_daddr)) {
393 lowpan_cb(skb)->chan = NULL;
394 daddr = NULL;
395 } else {
396 BT_DBG("dest IP %pI6c", &ipv6_daddr);
397
398 /* The packet might be sent to 6lowpan interface
399 * because of routing (either via default route
400 * or user set route) so get peer according to
401 * the destination address.
402 */
403 peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
404 if (!peer) {
405 BT_DBG("no such peer");
406 return -ENOENT;
407 }
408
409 daddr = peer->lladdr;
410 *peer_addr = peer->chan->dst;
411 *peer_addr_type = peer->chan->dst_type;
412 lowpan_cb(skb)->chan = peer->chan;
413
414 status = 1;
415 }
416
417 lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
418
419 err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
420 if (err < 0)
421 return err;
422
423 return status;
424 }
425
header_create(struct sk_buff * skb,struct net_device * netdev,unsigned short type,const void * _daddr,const void * _saddr,unsigned int len)426 static int header_create(struct sk_buff *skb, struct net_device *netdev,
427 unsigned short type, const void *_daddr,
428 const void *_saddr, unsigned int len)
429 {
430 if (type != ETH_P_IPV6)
431 return -EINVAL;
432
433 return 0;
434 }
435
436 /* Packet to BT LE device */
send_pkt(struct l2cap_chan * chan,struct sk_buff * skb,struct net_device * netdev)437 static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
438 struct net_device *netdev)
439 {
440 struct msghdr msg;
441 struct kvec iv;
442 int err;
443
444 /* Remember the skb so that we can send EAGAIN to the caller if
445 * we run out of credits.
446 */
447 chan->data = skb;
448
449 iv.iov_base = skb->data;
450 iv.iov_len = skb->len;
451
452 memset(&msg, 0, sizeof(msg));
453 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len);
454
455 err = l2cap_chan_send(chan, &msg, skb->len, NULL);
456 if (err > 0) {
457 netdev->stats.tx_bytes += err;
458 netdev->stats.tx_packets++;
459 return 0;
460 }
461
462 if (err < 0)
463 netdev->stats.tx_errors++;
464
465 return err;
466 }
467
send_mcast_pkt(struct sk_buff * skb,struct net_device * netdev)468 static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
469 {
470 struct sk_buff *local_skb;
471 struct lowpan_btle_dev *entry;
472 int err = 0;
473
474 rcu_read_lock();
475
476 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
477 struct lowpan_peer *pentry;
478 struct lowpan_btle_dev *dev;
479
480 if (entry->netdev != netdev)
481 continue;
482
483 dev = lowpan_btle_dev(entry->netdev);
484
485 list_for_each_entry_rcu(pentry, &dev->peers, list) {
486 int ret;
487
488 local_skb = skb_clone(skb, GFP_ATOMIC);
489
490 BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
491 netdev->name,
492 &pentry->chan->dst, pentry->chan->dst_type,
493 &pentry->peer_addr, pentry->chan);
494 ret = send_pkt(pentry->chan, local_skb, netdev);
495 if (ret < 0)
496 err = ret;
497
498 kfree_skb(local_skb);
499 }
500 }
501
502 rcu_read_unlock();
503
504 return err;
505 }
506
bt_xmit(struct sk_buff * skb,struct net_device * netdev)507 static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
508 {
509 int err = 0;
510 bdaddr_t addr;
511 u8 addr_type;
512
513 /* We must take a copy of the skb before we modify/replace the ipv6
514 * header as the header could be used elsewhere
515 */
516 skb = skb_unshare(skb, GFP_ATOMIC);
517 if (!skb)
518 return NET_XMIT_DROP;
519
520 /* Return values from setup_header()
521 * <0 - error, packet is dropped
522 * 0 - this is a multicast packet
523 * 1 - this is unicast packet
524 */
525 err = setup_header(skb, netdev, &addr, &addr_type);
526 if (err < 0) {
527 kfree_skb(skb);
528 return NET_XMIT_DROP;
529 }
530
531 if (err) {
532 if (lowpan_cb(skb)->chan) {
533 BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
534 netdev->name, &addr, addr_type,
535 &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
536 err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
537 } else {
538 err = -ENOENT;
539 }
540 } else {
541 /* We need to send the packet to every device behind this
542 * interface.
543 */
544 err = send_mcast_pkt(skb, netdev);
545 }
546
547 dev_kfree_skb(skb);
548
549 if (err)
550 BT_DBG("ERROR: xmit failed (%d)", err);
551
552 return err < 0 ? NET_XMIT_DROP : err;
553 }
554
bt_dev_init(struct net_device * dev)555 static int bt_dev_init(struct net_device *dev)
556 {
557 netdev_lockdep_set_classes(dev);
558
559 return 0;
560 }
561
562 static const struct net_device_ops netdev_ops = {
563 .ndo_init = bt_dev_init,
564 .ndo_start_xmit = bt_xmit,
565 };
566
567 static const struct header_ops header_ops = {
568 .create = header_create,
569 };
570
netdev_setup(struct net_device * dev)571 static void netdev_setup(struct net_device *dev)
572 {
573 dev->hard_header_len = 0;
574 dev->needed_tailroom = 0;
575 dev->flags = IFF_RUNNING | IFF_MULTICAST;
576 dev->watchdog_timeo = 0;
577 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
578
579 dev->netdev_ops = &netdev_ops;
580 dev->header_ops = &header_ops;
581 dev->needs_free_netdev = true;
582 }
583
584 static const struct device_type bt_type = {
585 .name = "bluetooth",
586 };
587
ifup(struct net_device * netdev)588 static void ifup(struct net_device *netdev)
589 {
590 int err;
591
592 rtnl_lock();
593 err = dev_open(netdev, NULL);
594 if (err < 0)
595 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
596 rtnl_unlock();
597 }
598
ifdown(struct net_device * netdev)599 static void ifdown(struct net_device *netdev)
600 {
601 rtnl_lock();
602 dev_close(netdev);
603 rtnl_unlock();
604 }
605
do_notify_peers(struct work_struct * work)606 static void do_notify_peers(struct work_struct *work)
607 {
608 struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
609 notify_peers.work);
610
611 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
612 }
613
is_bt_6lowpan(struct hci_conn * hcon)614 static bool is_bt_6lowpan(struct hci_conn *hcon)
615 {
616 if (hcon->type != LE_LINK)
617 return false;
618
619 if (!enable_6lowpan)
620 return false;
621
622 return true;
623 }
624
chan_create(void)625 static struct l2cap_chan *chan_create(void)
626 {
627 struct l2cap_chan *chan;
628
629 chan = l2cap_chan_create();
630 if (!chan)
631 return NULL;
632
633 l2cap_chan_set_defaults(chan);
634
635 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
636 chan->mode = L2CAP_MODE_LE_FLOWCTL;
637 chan->imtu = 1280;
638
639 return chan;
640 }
641
add_peer_chan(struct l2cap_chan * chan,struct lowpan_btle_dev * dev,bool new_netdev)642 static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
643 struct lowpan_btle_dev *dev,
644 bool new_netdev)
645 {
646 struct lowpan_peer *peer;
647
648 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
649 if (!peer)
650 return NULL;
651
652 peer->chan = chan;
653
654 baswap((void *)peer->lladdr, &chan->dst);
655
656 lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr);
657
658 spin_lock(&devices_lock);
659 INIT_LIST_HEAD(&peer->list);
660 peer_add(dev, peer);
661 spin_unlock(&devices_lock);
662
663 /* Notifying peers about us needs to be done without locks held */
664 if (new_netdev)
665 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
666 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
667
668 return peer->chan;
669 }
670
setup_netdev(struct l2cap_chan * chan,struct lowpan_btle_dev ** dev)671 static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
672 {
673 struct net_device *netdev;
674 bdaddr_t addr;
675 int err;
676
677 netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
678 IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
679 netdev_setup);
680 if (!netdev)
681 return -ENOMEM;
682
683 netdev->addr_assign_type = NET_ADDR_PERM;
684 baswap(&addr, &chan->src);
685 __dev_addr_set(netdev, &addr, sizeof(addr));
686
687 netdev->netdev_ops = &netdev_ops;
688 SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
689 SET_NETDEV_DEVTYPE(netdev, &bt_type);
690
691 *dev = lowpan_btle_dev(netdev);
692 (*dev)->netdev = netdev;
693 (*dev)->hdev = chan->conn->hcon->hdev;
694 INIT_LIST_HEAD(&(*dev)->peers);
695
696 spin_lock(&devices_lock);
697 INIT_LIST_HEAD(&(*dev)->list);
698 list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
699 spin_unlock(&devices_lock);
700
701 err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
702 if (err < 0) {
703 BT_INFO("register_netdev failed %d", err);
704 spin_lock(&devices_lock);
705 list_del_rcu(&(*dev)->list);
706 spin_unlock(&devices_lock);
707 free_netdev(netdev);
708 goto out;
709 }
710
711 BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
712 netdev->ifindex, &chan->dst, chan->dst_type,
713 &chan->src, chan->src_type);
714 set_bit(__LINK_STATE_PRESENT, &netdev->state);
715
716 return 0;
717
718 out:
719 return err;
720 }
721
chan_ready_cb(struct l2cap_chan * chan)722 static inline void chan_ready_cb(struct l2cap_chan *chan)
723 {
724 struct lowpan_btle_dev *dev;
725 bool new_netdev = false;
726
727 dev = lookup_dev(chan->conn);
728
729 BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
730
731 if (!dev) {
732 if (setup_netdev(chan, &dev) < 0) {
733 l2cap_chan_del(chan, -ENOENT);
734 return;
735 }
736 new_netdev = true;
737 }
738
739 if (!try_module_get(THIS_MODULE))
740 return;
741
742 add_peer_chan(chan, dev, new_netdev);
743 ifup(dev->netdev);
744 }
745
chan_new_conn_cb(struct l2cap_chan * pchan)746 static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
747 {
748 struct l2cap_chan *chan;
749
750 chan = chan_create();
751 if (!chan)
752 return NULL;
753
754 chan->ops = pchan->ops;
755
756 BT_DBG("chan %p pchan %p", chan, pchan);
757
758 return chan;
759 }
760
delete_netdev(struct work_struct * work)761 static void delete_netdev(struct work_struct *work)
762 {
763 struct lowpan_btle_dev *entry = container_of(work,
764 struct lowpan_btle_dev,
765 delete_netdev);
766
767 lowpan_unregister_netdev(entry->netdev);
768
769 /* The entry pointer is deleted by the netdev destructor. */
770 }
771
chan_close_cb(struct l2cap_chan * chan)772 static void chan_close_cb(struct l2cap_chan *chan)
773 {
774 struct lowpan_btle_dev *entry;
775 struct lowpan_btle_dev *dev = NULL;
776 struct lowpan_peer *peer;
777 int err = -ENOENT;
778 bool last = false, remove = true;
779
780 BT_DBG("chan %p conn %p", chan, chan->conn);
781
782 if (chan->conn && chan->conn->hcon) {
783 if (!is_bt_6lowpan(chan->conn->hcon))
784 return;
785
786 /* If conn is set, then the netdev is also there and we should
787 * not remove it.
788 */
789 remove = false;
790 }
791
792 spin_lock(&devices_lock);
793
794 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
795 dev = lowpan_btle_dev(entry->netdev);
796 peer = __peer_lookup_chan(dev, chan);
797 if (peer) {
798 last = peer_del(dev, peer);
799 err = 0;
800
801 BT_DBG("dev %p removing %speer %p", dev,
802 last ? "last " : "1 ", peer);
803 BT_DBG("chan %p orig refcnt %u", chan,
804 kref_read(&chan->kref));
805
806 l2cap_chan_put(chan);
807 break;
808 }
809 }
810
811 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
812 spin_unlock(&devices_lock);
813
814 cancel_delayed_work_sync(&dev->notify_peers);
815
816 ifdown(dev->netdev);
817
818 if (remove) {
819 INIT_WORK(&entry->delete_netdev, delete_netdev);
820 schedule_work(&entry->delete_netdev);
821 }
822 } else {
823 spin_unlock(&devices_lock);
824 }
825 }
826
chan_state_change_cb(struct l2cap_chan * chan,int state,int err)827 static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
828 {
829 BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
830 state_to_string(state), err);
831 }
832
chan_alloc_skb_cb(struct l2cap_chan * chan,unsigned long hdr_len,unsigned long len,int nb)833 static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
834 unsigned long hdr_len,
835 unsigned long len, int nb)
836 {
837 struct sk_buff *skb;
838
839 /* Note that we must allocate using GFP_ATOMIC here as
840 * this function is called originally from netdev hard xmit
841 * function in atomic context.
842 */
843 skb = bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
844 if (!skb)
845 return ERR_PTR(-ENOMEM);
846 return skb;
847 }
848
chan_suspend_cb(struct l2cap_chan * chan)849 static void chan_suspend_cb(struct l2cap_chan *chan)
850 {
851 struct lowpan_btle_dev *dev;
852
853 BT_DBG("chan %p suspend", chan);
854
855 dev = lookup_dev(chan->conn);
856 if (!dev || !dev->netdev)
857 return;
858
859 netif_stop_queue(dev->netdev);
860 }
861
chan_resume_cb(struct l2cap_chan * chan)862 static void chan_resume_cb(struct l2cap_chan *chan)
863 {
864 struct lowpan_btle_dev *dev;
865
866 BT_DBG("chan %p resume", chan);
867
868 dev = lookup_dev(chan->conn);
869 if (!dev || !dev->netdev)
870 return;
871
872 netif_wake_queue(dev->netdev);
873 }
874
chan_get_sndtimeo_cb(struct l2cap_chan * chan)875 static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
876 {
877 return L2CAP_CONN_TIMEOUT;
878 }
879
880 static const struct l2cap_ops bt_6lowpan_chan_ops = {
881 .name = "L2CAP 6LoWPAN channel",
882 .new_connection = chan_new_conn_cb,
883 .recv = chan_recv_cb,
884 .close = chan_close_cb,
885 .state_change = chan_state_change_cb,
886 .ready = chan_ready_cb,
887 .resume = chan_resume_cb,
888 .suspend = chan_suspend_cb,
889 .get_sndtimeo = chan_get_sndtimeo_cb,
890 .alloc_skb = chan_alloc_skb_cb,
891
892 .teardown = l2cap_chan_no_teardown,
893 .defer = l2cap_chan_no_defer,
894 .set_shutdown = l2cap_chan_no_set_shutdown,
895 };
896
bt_6lowpan_connect(bdaddr_t * addr,u8 dst_type)897 static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
898 {
899 struct l2cap_chan *chan;
900 int err;
901
902 chan = chan_create();
903 if (!chan)
904 return -EINVAL;
905
906 chan->ops = &bt_6lowpan_chan_ops;
907
908 err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
909 addr, dst_type, L2CAP_CONN_TIMEOUT);
910
911 BT_DBG("chan %p err %d", chan, err);
912 if (err < 0)
913 l2cap_chan_put(chan);
914
915 return err;
916 }
917
bt_6lowpan_disconnect(struct l2cap_conn * conn,u8 dst_type)918 static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
919 {
920 struct lowpan_peer *peer;
921
922 BT_DBG("conn %p dst type %u", conn, dst_type);
923
924 peer = lookup_peer(conn);
925 if (!peer)
926 return -ENOENT;
927
928 BT_DBG("peer %p chan %p", peer, peer->chan);
929
930 l2cap_chan_lock(peer->chan);
931 l2cap_chan_close(peer->chan, ENOENT);
932 l2cap_chan_unlock(peer->chan);
933
934 return 0;
935 }
936
bt_6lowpan_listen(void)937 static struct l2cap_chan *bt_6lowpan_listen(void)
938 {
939 bdaddr_t *addr = BDADDR_ANY;
940 struct l2cap_chan *chan;
941 int err;
942
943 if (!enable_6lowpan)
944 return NULL;
945
946 chan = chan_create();
947 if (!chan)
948 return NULL;
949
950 chan->ops = &bt_6lowpan_chan_ops;
951 chan->state = BT_LISTEN;
952 chan->src_type = BDADDR_LE_PUBLIC;
953
954 atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
955
956 BT_DBG("chan %p src type %u", chan, chan->src_type);
957
958 err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
959 if (err) {
960 l2cap_chan_put(chan);
961 BT_ERR("psm cannot be added err %d", err);
962 return NULL;
963 }
964
965 return chan;
966 }
967
get_l2cap_conn(char * buf,bdaddr_t * addr,u8 * addr_type,struct l2cap_conn ** conn,bool disconnect)968 static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
969 struct l2cap_conn **conn, bool disconnect)
970 {
971 struct hci_conn *hcon;
972 struct hci_dev *hdev;
973 int le_addr_type;
974 int n;
975
976 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
977 &addr->b[5], &addr->b[4], &addr->b[3],
978 &addr->b[2], &addr->b[1], &addr->b[0],
979 addr_type);
980
981 if (n < 7)
982 return -EINVAL;
983
984 if (disconnect) {
985 /* The "disconnect" debugfs command has used different address
986 * type constants than "connect" since 2015. Let's retain that
987 * for now even though it's obviously buggy...
988 */
989 *addr_type += 1;
990 }
991
992 switch (*addr_type) {
993 case BDADDR_LE_PUBLIC:
994 le_addr_type = ADDR_LE_DEV_PUBLIC;
995 break;
996 case BDADDR_LE_RANDOM:
997 le_addr_type = ADDR_LE_DEV_RANDOM;
998 break;
999 default:
1000 return -EINVAL;
1001 }
1002
1003 /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
1004 hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
1005 if (!hdev)
1006 return -ENOENT;
1007
1008 hci_dev_lock(hdev);
1009 hcon = hci_conn_hash_lookup_le(hdev, addr, le_addr_type);
1010 hci_dev_unlock(hdev);
1011 hci_dev_put(hdev);
1012
1013 if (!hcon)
1014 return -ENOENT;
1015
1016 *conn = (struct l2cap_conn *)hcon->l2cap_data;
1017
1018 BT_DBG("conn %p dst %pMR type %u", *conn, &hcon->dst, hcon->dst_type);
1019
1020 return 0;
1021 }
1022
disconnect_all_peers(void)1023 static void disconnect_all_peers(void)
1024 {
1025 struct lowpan_btle_dev *entry;
1026 struct lowpan_peer *peer;
1027 int nchans;
1028
1029 /* l2cap_chan_close() cannot be called from RCU, and lock ordering
1030 * chan->lock > devices_lock prevents taking write side lock, so copy
1031 * then close.
1032 */
1033
1034 rcu_read_lock();
1035 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list)
1036 list_for_each_entry_rcu(peer, &entry->peers, list)
1037 clear_bit(LOWPAN_PEER_CLOSING, peer->flags);
1038 rcu_read_unlock();
1039
1040 do {
1041 struct l2cap_chan *chans[32];
1042 int i;
1043
1044 nchans = 0;
1045
1046 spin_lock(&devices_lock);
1047
1048 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1049 list_for_each_entry_rcu(peer, &entry->peers, list) {
1050 if (test_and_set_bit(LOWPAN_PEER_CLOSING,
1051 peer->flags))
1052 continue;
1053
1054 l2cap_chan_hold(peer->chan);
1055 chans[nchans++] = peer->chan;
1056
1057 if (nchans >= ARRAY_SIZE(chans))
1058 goto done;
1059 }
1060 }
1061
1062 done:
1063 spin_unlock(&devices_lock);
1064
1065 for (i = 0; i < nchans; ++i) {
1066 l2cap_chan_lock(chans[i]);
1067 l2cap_chan_close(chans[i], ENOENT);
1068 l2cap_chan_unlock(chans[i]);
1069 l2cap_chan_put(chans[i]);
1070 }
1071 } while (nchans);
1072 }
1073
1074 struct set_enable {
1075 struct work_struct work;
1076 bool flag;
1077 };
1078
do_enable_set(struct work_struct * work)1079 static void do_enable_set(struct work_struct *work)
1080 {
1081 struct set_enable *set_enable = container_of(work,
1082 struct set_enable, work);
1083
1084 if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1085 /* Disconnect existing connections if 6lowpan is
1086 * disabled
1087 */
1088 disconnect_all_peers();
1089
1090 enable_6lowpan = set_enable->flag;
1091
1092 mutex_lock(&set_lock);
1093 if (listen_chan) {
1094 l2cap_chan_lock(listen_chan);
1095 l2cap_chan_close(listen_chan, 0);
1096 l2cap_chan_unlock(listen_chan);
1097 l2cap_chan_put(listen_chan);
1098 }
1099
1100 listen_chan = bt_6lowpan_listen();
1101 mutex_unlock(&set_lock);
1102
1103 kfree(set_enable);
1104 }
1105
lowpan_enable_set(void * data,u64 val)1106 static int lowpan_enable_set(void *data, u64 val)
1107 {
1108 struct set_enable *set_enable;
1109
1110 set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1111 if (!set_enable)
1112 return -ENOMEM;
1113
1114 set_enable->flag = !!val;
1115 INIT_WORK(&set_enable->work, do_enable_set);
1116
1117 schedule_work(&set_enable->work);
1118
1119 return 0;
1120 }
1121
lowpan_enable_get(void * data,u64 * val)1122 static int lowpan_enable_get(void *data, u64 *val)
1123 {
1124 *val = enable_6lowpan;
1125 return 0;
1126 }
1127
1128 DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1129 lowpan_enable_set, "%llu\n");
1130
lowpan_control_write(struct file * fp,const char __user * user_buffer,size_t count,loff_t * position)1131 static ssize_t lowpan_control_write(struct file *fp,
1132 const char __user *user_buffer,
1133 size_t count,
1134 loff_t *position)
1135 {
1136 char buf[32];
1137 size_t buf_size = min(count, sizeof(buf) - 1);
1138 int ret;
1139 bdaddr_t addr;
1140 u8 addr_type;
1141 struct l2cap_conn *conn = NULL;
1142
1143 if (copy_from_user(buf, user_buffer, buf_size))
1144 return -EFAULT;
1145
1146 buf[buf_size] = '\0';
1147
1148 if (memcmp(buf, "connect ", 8) == 0) {
1149 ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn, false);
1150 if (ret == -EINVAL)
1151 return ret;
1152
1153 mutex_lock(&set_lock);
1154 if (listen_chan) {
1155 l2cap_chan_lock(listen_chan);
1156 l2cap_chan_close(listen_chan, 0);
1157 l2cap_chan_unlock(listen_chan);
1158 l2cap_chan_put(listen_chan);
1159 listen_chan = NULL;
1160 }
1161 mutex_unlock(&set_lock);
1162
1163 if (conn) {
1164 struct lowpan_peer *peer;
1165
1166 if (!is_bt_6lowpan(conn->hcon))
1167 return -EINVAL;
1168
1169 peer = lookup_peer(conn);
1170 if (peer) {
1171 BT_DBG("6LoWPAN connection already exists");
1172 return -EALREADY;
1173 }
1174
1175 BT_DBG("conn %p dst %pMR type %d user %u", conn,
1176 &conn->hcon->dst, conn->hcon->dst_type,
1177 addr_type);
1178 }
1179
1180 ret = bt_6lowpan_connect(&addr, addr_type);
1181 if (ret < 0)
1182 return ret;
1183
1184 return count;
1185 }
1186
1187 if (memcmp(buf, "disconnect ", 11) == 0) {
1188 ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn, true);
1189 if (ret < 0)
1190 return ret;
1191
1192 ret = bt_6lowpan_disconnect(conn, addr_type);
1193 if (ret < 0)
1194 return ret;
1195
1196 return count;
1197 }
1198
1199 return count;
1200 }
1201
lowpan_control_show(struct seq_file * f,void * ptr)1202 static int lowpan_control_show(struct seq_file *f, void *ptr)
1203 {
1204 struct lowpan_btle_dev *entry;
1205 struct lowpan_peer *peer;
1206
1207 spin_lock(&devices_lock);
1208
1209 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1210 list_for_each_entry(peer, &entry->peers, list)
1211 seq_printf(f, "%pMR (type %u)\n",
1212 &peer->chan->dst, peer->chan->dst_type);
1213 }
1214
1215 spin_unlock(&devices_lock);
1216
1217 return 0;
1218 }
1219
lowpan_control_open(struct inode * inode,struct file * file)1220 static int lowpan_control_open(struct inode *inode, struct file *file)
1221 {
1222 return single_open(file, lowpan_control_show, inode->i_private);
1223 }
1224
1225 static const struct file_operations lowpan_control_fops = {
1226 .open = lowpan_control_open,
1227 .read = seq_read,
1228 .write = lowpan_control_write,
1229 .llseek = seq_lseek,
1230 .release = single_release,
1231 };
1232
disconnect_devices(void)1233 static void disconnect_devices(void)
1234 {
1235 struct lowpan_btle_dev *entry, *tmp, *new_dev;
1236 struct list_head devices;
1237
1238 INIT_LIST_HEAD(&devices);
1239
1240 /* We make a separate list of devices because the unregister_netdev()
1241 * will call device_event() which will also want to modify the same
1242 * devices list.
1243 */
1244
1245 rcu_read_lock();
1246
1247 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1248 new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1249 if (!new_dev)
1250 break;
1251
1252 new_dev->netdev = entry->netdev;
1253 INIT_LIST_HEAD(&new_dev->list);
1254
1255 list_add_rcu(&new_dev->list, &devices);
1256 }
1257
1258 rcu_read_unlock();
1259
1260 list_for_each_entry_safe(entry, tmp, &devices, list) {
1261 ifdown(entry->netdev);
1262 BT_DBG("Unregistering netdev %s %p",
1263 entry->netdev->name, entry->netdev);
1264 lowpan_unregister_netdev(entry->netdev);
1265 kfree(entry);
1266 }
1267 }
1268
device_event(struct notifier_block * unused,unsigned long event,void * ptr)1269 static int device_event(struct notifier_block *unused,
1270 unsigned long event, void *ptr)
1271 {
1272 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1273 struct lowpan_btle_dev *entry;
1274
1275 if (netdev->type != ARPHRD_6LOWPAN)
1276 return NOTIFY_DONE;
1277
1278 switch (event) {
1279 case NETDEV_UNREGISTER:
1280 spin_lock(&devices_lock);
1281 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1282 if (entry->netdev == netdev) {
1283 BT_DBG("Unregistered netdev %s %p",
1284 netdev->name, netdev);
1285 list_del(&entry->list);
1286 break;
1287 }
1288 }
1289 spin_unlock(&devices_lock);
1290 break;
1291 }
1292
1293 return NOTIFY_DONE;
1294 }
1295
1296 static struct notifier_block bt_6lowpan_dev_notifier = {
1297 .notifier_call = device_event,
1298 };
1299
bt_6lowpan_init(void)1300 static int __init bt_6lowpan_init(void)
1301 {
1302 lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable",
1303 0644, bt_debugfs,
1304 NULL,
1305 &lowpan_enable_fops);
1306 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1307 bt_debugfs, NULL,
1308 &lowpan_control_fops);
1309
1310 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1311 }
1312
bt_6lowpan_exit(void)1313 static void __exit bt_6lowpan_exit(void)
1314 {
1315 debugfs_remove(lowpan_enable_debugfs);
1316 debugfs_remove(lowpan_control_debugfs);
1317
1318 if (listen_chan) {
1319 l2cap_chan_lock(listen_chan);
1320 l2cap_chan_close(listen_chan, 0);
1321 l2cap_chan_unlock(listen_chan);
1322 l2cap_chan_put(listen_chan);
1323 }
1324
1325 disconnect_devices();
1326
1327 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1328 }
1329
1330 module_init(bt_6lowpan_init);
1331 module_exit(bt_6lowpan_exit);
1332
1333 MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1334 MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1335 MODULE_VERSION(VERSION);
1336 MODULE_LICENSE("GPL");
1337