xref: /linux/net/can/af_can.c (revision 4e0385dd7469d933c4adf84a617f872ca547aa07)
1 /*
2  * af_can.c - Protocol family CAN core module
3  *            (used by different CAN protocol modules)
4  *
5  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of Volkswagen nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * Alternatively, provided that this notice is retained in full, this
21  * software may be distributed under the terms of the GNU General
22  * Public License ("GPL") version 2, in which case the provisions of the
23  * GPL apply INSTEAD OF those given above.
24  *
25  * The provided data structures and external interfaces from this code
26  * are not restricted to be used by modules with a GPL compatible license.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39  * DAMAGE.
40  *
41  */
42 
43 #include <linux/module.h>
44 #include <linux/stddef.h>
45 #include <linux/init.h>
46 #include <linux/kmod.h>
47 #include <linux/slab.h>
48 #include <linux/list.h>
49 #include <linux/spinlock.h>
50 #include <linux/rcupdate.h>
51 #include <linux/uaccess.h>
52 #include <linux/net.h>
53 #include <linux/netdevice.h>
54 #include <linux/socket.h>
55 #include <linux/if_ether.h>
56 #include <linux/if_arp.h>
57 #include <linux/skbuff.h>
58 #include <linux/can.h>
59 #include <linux/can/core.h>
60 #include <linux/can/skb.h>
61 #include <linux/ratelimit.h>
62 #include <net/net_namespace.h>
63 #include <net/sock.h>
64 
65 #include "af_can.h"
66 
67 MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
68 MODULE_LICENSE("Dual BSD/GPL");
69 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
70 	      "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
71 
72 MODULE_ALIAS_NETPROTO(PF_CAN);
73 
74 static int stats_timer __read_mostly = 1;
75 module_param(stats_timer, int, S_IRUGO);
76 MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
77 
78 /* receive filters subscribed for 'all' CAN devices */
79 struct dev_rcv_lists can_rx_alldev_list;
80 static DEFINE_SPINLOCK(can_rcvlists_lock);
81 
82 static struct kmem_cache *rcv_cache __read_mostly;
83 
84 /* table of registered CAN protocols */
85 static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
86 static DEFINE_MUTEX(proto_tab_lock);
87 
88 struct timer_list can_stattimer;   /* timer for statistics update */
89 struct s_stats    can_stats;       /* packet statistics */
90 struct s_pstats   can_pstats;      /* receive list statistics */
91 
92 static atomic_t skbcounter = ATOMIC_INIT(0);
93 
94 /*
95  * af_can socket functions
96  */
97 
98 int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
99 {
100 	struct sock *sk = sock->sk;
101 
102 	switch (cmd) {
103 
104 	case SIOCGSTAMP:
105 		return sock_get_timestamp(sk, (struct timeval __user *)arg);
106 
107 	default:
108 		return -ENOIOCTLCMD;
109 	}
110 }
111 EXPORT_SYMBOL(can_ioctl);
112 
113 static void can_sock_destruct(struct sock *sk)
114 {
115 	skb_queue_purge(&sk->sk_receive_queue);
116 }
117 
118 static const struct can_proto *can_get_proto(int protocol)
119 {
120 	const struct can_proto *cp;
121 
122 	rcu_read_lock();
123 	cp = rcu_dereference(proto_tab[protocol]);
124 	if (cp && !try_module_get(cp->prot->owner))
125 		cp = NULL;
126 	rcu_read_unlock();
127 
128 	return cp;
129 }
130 
131 static inline void can_put_proto(const struct can_proto *cp)
132 {
133 	module_put(cp->prot->owner);
134 }
135 
136 static int can_create(struct net *net, struct socket *sock, int protocol,
137 		      int kern)
138 {
139 	struct sock *sk;
140 	const struct can_proto *cp;
141 	int err = 0;
142 
143 	sock->state = SS_UNCONNECTED;
144 
145 	if (protocol < 0 || protocol >= CAN_NPROTO)
146 		return -EINVAL;
147 
148 	if (!net_eq(net, &init_net))
149 		return -EAFNOSUPPORT;
150 
151 	cp = can_get_proto(protocol);
152 
153 #ifdef CONFIG_MODULES
154 	if (!cp) {
155 		/* try to load protocol module if kernel is modular */
156 
157 		err = request_module("can-proto-%d", protocol);
158 
159 		/*
160 		 * In case of error we only print a message but don't
161 		 * return the error code immediately.  Below we will
162 		 * return -EPROTONOSUPPORT
163 		 */
164 		if (err)
165 			printk_ratelimited(KERN_ERR "can: request_module "
166 			       "(can-proto-%d) failed.\n", protocol);
167 
168 		cp = can_get_proto(protocol);
169 	}
170 #endif
171 
172 	/* check for available protocol and correct usage */
173 
174 	if (!cp)
175 		return -EPROTONOSUPPORT;
176 
177 	if (cp->type != sock->type) {
178 		err = -EPROTOTYPE;
179 		goto errout;
180 	}
181 
182 	sock->ops = cp->ops;
183 
184 	sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
185 	if (!sk) {
186 		err = -ENOMEM;
187 		goto errout;
188 	}
189 
190 	sock_init_data(sock, sk);
191 	sk->sk_destruct = can_sock_destruct;
192 
193 	if (sk->sk_prot->init)
194 		err = sk->sk_prot->init(sk);
195 
196 	if (err) {
197 		/* release sk on errors */
198 		sock_orphan(sk);
199 		sock_put(sk);
200 	}
201 
202  errout:
203 	can_put_proto(cp);
204 	return err;
205 }
206 
207 /*
208  * af_can tx path
209  */
210 
211 /**
212  * can_send - transmit a CAN frame (optional with local loopback)
213  * @skb: pointer to socket buffer with CAN frame in data section
214  * @loop: loopback for listeners on local CAN sockets (recommended default!)
215  *
216  * Due to the loopback this routine must not be called from hardirq context.
217  *
218  * Return:
219  *  0 on success
220  *  -ENETDOWN when the selected interface is down
221  *  -ENOBUFS on full driver queue (see net_xmit_errno())
222  *  -ENOMEM when local loopback failed at calling skb_clone()
223  *  -EPERM when trying to send on a non-CAN interface
224  *  -EMSGSIZE CAN frame size is bigger than CAN interface MTU
225  *  -EINVAL when the skb->data does not contain a valid CAN frame
226  */
227 int can_send(struct sk_buff *skb, int loop)
228 {
229 	struct sk_buff *newskb = NULL;
230 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
231 	int err = -EINVAL;
232 
233 	if (skb->len == CAN_MTU) {
234 		skb->protocol = htons(ETH_P_CAN);
235 		if (unlikely(cfd->len > CAN_MAX_DLEN))
236 			goto inval_skb;
237 	} else if (skb->len == CANFD_MTU) {
238 		skb->protocol = htons(ETH_P_CANFD);
239 		if (unlikely(cfd->len > CANFD_MAX_DLEN))
240 			goto inval_skb;
241 	} else
242 		goto inval_skb;
243 
244 	/*
245 	 * Make sure the CAN frame can pass the selected CAN netdevice.
246 	 * As structs can_frame and canfd_frame are similar, we can provide
247 	 * CAN FD frames to legacy CAN drivers as long as the length is <= 8
248 	 */
249 	if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) {
250 		err = -EMSGSIZE;
251 		goto inval_skb;
252 	}
253 
254 	if (unlikely(skb->dev->type != ARPHRD_CAN)) {
255 		err = -EPERM;
256 		goto inval_skb;
257 	}
258 
259 	if (unlikely(!(skb->dev->flags & IFF_UP))) {
260 		err = -ENETDOWN;
261 		goto inval_skb;
262 	}
263 
264 	skb->ip_summed = CHECKSUM_UNNECESSARY;
265 
266 	skb_reset_mac_header(skb);
267 	skb_reset_network_header(skb);
268 	skb_reset_transport_header(skb);
269 
270 	if (loop) {
271 		/* local loopback of sent CAN frames */
272 
273 		/* indication for the CAN driver: do loopback */
274 		skb->pkt_type = PACKET_LOOPBACK;
275 
276 		/*
277 		 * The reference to the originating sock may be required
278 		 * by the receiving socket to check whether the frame is
279 		 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS
280 		 * Therefore we have to ensure that skb->sk remains the
281 		 * reference to the originating sock by restoring skb->sk
282 		 * after each skb_clone() or skb_orphan() usage.
283 		 */
284 
285 		if (!(skb->dev->flags & IFF_ECHO)) {
286 			/*
287 			 * If the interface is not capable to do loopback
288 			 * itself, we do it here.
289 			 */
290 			newskb = skb_clone(skb, GFP_ATOMIC);
291 			if (!newskb) {
292 				kfree_skb(skb);
293 				return -ENOMEM;
294 			}
295 
296 			can_skb_set_owner(newskb, skb->sk);
297 			newskb->ip_summed = CHECKSUM_UNNECESSARY;
298 			newskb->pkt_type = PACKET_BROADCAST;
299 		}
300 	} else {
301 		/* indication for the CAN driver: no loopback required */
302 		skb->pkt_type = PACKET_HOST;
303 	}
304 
305 	/* send to netdevice */
306 	err = dev_queue_xmit(skb);
307 	if (err > 0)
308 		err = net_xmit_errno(err);
309 
310 	if (err) {
311 		kfree_skb(newskb);
312 		return err;
313 	}
314 
315 	if (newskb)
316 		netif_rx_ni(newskb);
317 
318 	/* update statistics */
319 	can_stats.tx_frames++;
320 	can_stats.tx_frames_delta++;
321 
322 	return 0;
323 
324 inval_skb:
325 	kfree_skb(skb);
326 	return err;
327 }
328 EXPORT_SYMBOL(can_send);
329 
330 /*
331  * af_can rx path
332  */
333 
334 static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
335 {
336 	if (!dev)
337 		return &can_rx_alldev_list;
338 	else
339 		return (struct dev_rcv_lists *)dev->ml_priv;
340 }
341 
342 /**
343  * effhash - hash function for 29 bit CAN identifier reduction
344  * @can_id: 29 bit CAN identifier
345  *
346  * Description:
347  *  To reduce the linear traversal in one linked list of _single_ EFF CAN
348  *  frame subscriptions the 29 bit identifier is mapped to 10 bits.
349  *  (see CAN_EFF_RCV_HASH_BITS definition)
350  *
351  * Return:
352  *  Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
353  */
354 static unsigned int effhash(canid_t can_id)
355 {
356 	unsigned int hash;
357 
358 	hash = can_id;
359 	hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
360 	hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
361 
362 	return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
363 }
364 
365 /**
366  * find_rcv_list - determine optimal filterlist inside device filter struct
367  * @can_id: pointer to CAN identifier of a given can_filter
368  * @mask: pointer to CAN mask of a given can_filter
369  * @d: pointer to the device filter struct
370  *
371  * Description:
372  *  Returns the optimal filterlist to reduce the filter handling in the
373  *  receive path. This function is called by service functions that need
374  *  to register or unregister a can_filter in the filter lists.
375  *
376  *  A filter matches in general, when
377  *
378  *          <received_can_id> & mask == can_id & mask
379  *
380  *  so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
381  *  relevant bits for the filter.
382  *
383  *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
384  *  filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg
385  *  frames there is a special filterlist and a special rx path filter handling.
386  *
387  * Return:
388  *  Pointer to optimal filterlist for the given can_id/mask pair.
389  *  Constistency checked mask.
390  *  Reduced can_id to have a preprocessed filter compare value.
391  */
392 static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
393 					struct dev_rcv_lists *d)
394 {
395 	canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
396 
397 	/* filter for error message frames in extra filterlist */
398 	if (*mask & CAN_ERR_FLAG) {
399 		/* clear CAN_ERR_FLAG in filter entry */
400 		*mask &= CAN_ERR_MASK;
401 		return &d->rx[RX_ERR];
402 	}
403 
404 	/* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
405 
406 #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
407 
408 	/* ensure valid values in can_mask for 'SFF only' frame filtering */
409 	if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
410 		*mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
411 
412 	/* reduce condition testing at receive time */
413 	*can_id &= *mask;
414 
415 	/* inverse can_id/can_mask filter */
416 	if (inv)
417 		return &d->rx[RX_INV];
418 
419 	/* mask == 0 => no condition testing at receive time */
420 	if (!(*mask))
421 		return &d->rx[RX_ALL];
422 
423 	/* extra filterlists for the subscription of a single non-RTR can_id */
424 	if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
425 	    !(*can_id & CAN_RTR_FLAG)) {
426 
427 		if (*can_id & CAN_EFF_FLAG) {
428 			if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
429 				return &d->rx_eff[effhash(*can_id)];
430 		} else {
431 			if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
432 				return &d->rx_sff[*can_id];
433 		}
434 	}
435 
436 	/* default: filter via can_id/can_mask */
437 	return &d->rx[RX_FIL];
438 }
439 
440 /**
441  * can_rx_register - subscribe CAN frames from a specific interface
442  * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list)
443  * @can_id: CAN identifier (see description)
444  * @mask: CAN mask (see description)
445  * @func: callback function on filter match
446  * @data: returned parameter for callback function
447  * @ident: string for calling module identification
448  * @sk: socket pointer (might be NULL)
449  *
450  * Description:
451  *  Invokes the callback function with the received sk_buff and the given
452  *  parameter 'data' on a matching receive filter. A filter matches, when
453  *
454  *          <received_can_id> & mask == can_id & mask
455  *
456  *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
457  *  filter for error message frames (CAN_ERR_FLAG bit set in mask).
458  *
459  *  The provided pointer to the sk_buff is guaranteed to be valid as long as
460  *  the callback function is running. The callback function must *not* free
461  *  the given sk_buff while processing it's task. When the given sk_buff is
462  *  needed after the end of the callback function it must be cloned inside
463  *  the callback function with skb_clone().
464  *
465  * Return:
466  *  0 on success
467  *  -ENOMEM on missing cache mem to create subscription entry
468  *  -ENODEV unknown device
469  */
470 int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
471 		    void (*func)(struct sk_buff *, void *), void *data,
472 		    char *ident, struct sock *sk)
473 {
474 	struct receiver *r;
475 	struct hlist_head *rl;
476 	struct dev_rcv_lists *d;
477 	int err = 0;
478 
479 	/* insert new receiver  (dev,canid,mask) -> (func,data) */
480 
481 	if (dev && dev->type != ARPHRD_CAN)
482 		return -ENODEV;
483 
484 	r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
485 	if (!r)
486 		return -ENOMEM;
487 
488 	spin_lock(&can_rcvlists_lock);
489 
490 	d = find_dev_rcv_lists(dev);
491 	if (d) {
492 		rl = find_rcv_list(&can_id, &mask, d);
493 
494 		r->can_id  = can_id;
495 		r->mask    = mask;
496 		r->matches = 0;
497 		r->func    = func;
498 		r->data    = data;
499 		r->ident   = ident;
500 		r->sk      = sk;
501 
502 		hlist_add_head_rcu(&r->list, rl);
503 		d->entries++;
504 
505 		can_pstats.rcv_entries++;
506 		if (can_pstats.rcv_entries_max < can_pstats.rcv_entries)
507 			can_pstats.rcv_entries_max = can_pstats.rcv_entries;
508 	} else {
509 		kmem_cache_free(rcv_cache, r);
510 		err = -ENODEV;
511 	}
512 
513 	spin_unlock(&can_rcvlists_lock);
514 
515 	return err;
516 }
517 EXPORT_SYMBOL(can_rx_register);
518 
519 /*
520  * can_rx_delete_receiver - rcu callback for single receiver entry removal
521  */
522 static void can_rx_delete_receiver(struct rcu_head *rp)
523 {
524 	struct receiver *r = container_of(rp, struct receiver, rcu);
525 	struct sock *sk = r->sk;
526 
527 	kmem_cache_free(rcv_cache, r);
528 	if (sk)
529 		sock_put(sk);
530 }
531 
532 /**
533  * can_rx_unregister - unsubscribe CAN frames from a specific interface
534  * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list)
535  * @can_id: CAN identifier
536  * @mask: CAN mask
537  * @func: callback function on filter match
538  * @data: returned parameter for callback function
539  *
540  * Description:
541  *  Removes subscription entry depending on given (subscription) values.
542  */
543 void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
544 		       void (*func)(struct sk_buff *, void *), void *data)
545 {
546 	struct receiver *r = NULL;
547 	struct hlist_head *rl;
548 	struct dev_rcv_lists *d;
549 
550 	if (dev && dev->type != ARPHRD_CAN)
551 		return;
552 
553 	spin_lock(&can_rcvlists_lock);
554 
555 	d = find_dev_rcv_lists(dev);
556 	if (!d) {
557 		pr_err("BUG: receive list not found for "
558 		       "dev %s, id %03X, mask %03X\n",
559 		       DNAME(dev), can_id, mask);
560 		goto out;
561 	}
562 
563 	rl = find_rcv_list(&can_id, &mask, d);
564 
565 	/*
566 	 * Search the receiver list for the item to delete.  This should
567 	 * exist, since no receiver may be unregistered that hasn't
568 	 * been registered before.
569 	 */
570 
571 	hlist_for_each_entry_rcu(r, rl, list) {
572 		if (r->can_id == can_id && r->mask == mask &&
573 		    r->func == func && r->data == data)
574 			break;
575 	}
576 
577 	/*
578 	 * Check for bugs in CAN protocol implementations using af_can.c:
579 	 * 'r' will be NULL if no matching list item was found for removal.
580 	 */
581 
582 	if (!r) {
583 		WARN(1, "BUG: receive list entry not found for dev %s, "
584 		     "id %03X, mask %03X\n", DNAME(dev), can_id, mask);
585 		goto out;
586 	}
587 
588 	hlist_del_rcu(&r->list);
589 	d->entries--;
590 
591 	if (can_pstats.rcv_entries > 0)
592 		can_pstats.rcv_entries--;
593 
594 	/* remove device structure requested by NETDEV_UNREGISTER */
595 	if (d->remove_on_zero_entries && !d->entries) {
596 		kfree(d);
597 		dev->ml_priv = NULL;
598 	}
599 
600  out:
601 	spin_unlock(&can_rcvlists_lock);
602 
603 	/* schedule the receiver item for deletion */
604 	if (r) {
605 		if (r->sk)
606 			sock_hold(r->sk);
607 		call_rcu(&r->rcu, can_rx_delete_receiver);
608 	}
609 }
610 EXPORT_SYMBOL(can_rx_unregister);
611 
612 static inline void deliver(struct sk_buff *skb, struct receiver *r)
613 {
614 	r->func(skb, r->data);
615 	r->matches++;
616 }
617 
618 static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
619 {
620 	struct receiver *r;
621 	int matches = 0;
622 	struct can_frame *cf = (struct can_frame *)skb->data;
623 	canid_t can_id = cf->can_id;
624 
625 	if (d->entries == 0)
626 		return 0;
627 
628 	if (can_id & CAN_ERR_FLAG) {
629 		/* check for error message frame entries only */
630 		hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) {
631 			if (can_id & r->mask) {
632 				deliver(skb, r);
633 				matches++;
634 			}
635 		}
636 		return matches;
637 	}
638 
639 	/* check for unfiltered entries */
640 	hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) {
641 		deliver(skb, r);
642 		matches++;
643 	}
644 
645 	/* check for can_id/mask entries */
646 	hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) {
647 		if ((can_id & r->mask) == r->can_id) {
648 			deliver(skb, r);
649 			matches++;
650 		}
651 	}
652 
653 	/* check for inverted can_id/mask entries */
654 	hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) {
655 		if ((can_id & r->mask) != r->can_id) {
656 			deliver(skb, r);
657 			matches++;
658 		}
659 	}
660 
661 	/* check filterlists for single non-RTR can_ids */
662 	if (can_id & CAN_RTR_FLAG)
663 		return matches;
664 
665 	if (can_id & CAN_EFF_FLAG) {
666 		hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) {
667 			if (r->can_id == can_id) {
668 				deliver(skb, r);
669 				matches++;
670 			}
671 		}
672 	} else {
673 		can_id &= CAN_SFF_MASK;
674 		hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) {
675 			deliver(skb, r);
676 			matches++;
677 		}
678 	}
679 
680 	return matches;
681 }
682 
683 static void can_receive(struct sk_buff *skb, struct net_device *dev)
684 {
685 	struct dev_rcv_lists *d;
686 	int matches;
687 
688 	/* update statistics */
689 	can_stats.rx_frames++;
690 	can_stats.rx_frames_delta++;
691 
692 	/* create non-zero unique skb identifier together with *skb */
693 	while (!(can_skb_prv(skb)->skbcnt))
694 		can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
695 
696 	rcu_read_lock();
697 
698 	/* deliver the packet to sockets listening on all devices */
699 	matches = can_rcv_filter(&can_rx_alldev_list, skb);
700 
701 	/* find receive list for this device */
702 	d = find_dev_rcv_lists(dev);
703 	if (d)
704 		matches += can_rcv_filter(d, skb);
705 
706 	rcu_read_unlock();
707 
708 	/* consume the skbuff allocated by the netdevice driver */
709 	consume_skb(skb);
710 
711 	if (matches > 0) {
712 		can_stats.matches++;
713 		can_stats.matches_delta++;
714 	}
715 }
716 
717 static int can_rcv(struct sk_buff *skb, struct net_device *dev,
718 		   struct packet_type *pt, struct net_device *orig_dev)
719 {
720 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
721 
722 	if (unlikely(!net_eq(dev_net(dev), &init_net)))
723 		goto drop;
724 
725 	if (WARN_ONCE(dev->type != ARPHRD_CAN ||
726 		      skb->len != CAN_MTU ||
727 		      cfd->len > CAN_MAX_DLEN,
728 		      "PF_CAN: dropped non conform CAN skbuf: "
729 		      "dev type %d, len %d, datalen %d\n",
730 		      dev->type, skb->len, cfd->len))
731 		goto drop;
732 
733 	can_receive(skb, dev);
734 	return NET_RX_SUCCESS;
735 
736 drop:
737 	kfree_skb(skb);
738 	return NET_RX_DROP;
739 }
740 
741 static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
742 		   struct packet_type *pt, struct net_device *orig_dev)
743 {
744 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
745 
746 	if (unlikely(!net_eq(dev_net(dev), &init_net)))
747 		goto drop;
748 
749 	if (WARN_ONCE(dev->type != ARPHRD_CAN ||
750 		      skb->len != CANFD_MTU ||
751 		      cfd->len > CANFD_MAX_DLEN,
752 		      "PF_CAN: dropped non conform CAN FD skbuf: "
753 		      "dev type %d, len %d, datalen %d\n",
754 		      dev->type, skb->len, cfd->len))
755 		goto drop;
756 
757 	can_receive(skb, dev);
758 	return NET_RX_SUCCESS;
759 
760 drop:
761 	kfree_skb(skb);
762 	return NET_RX_DROP;
763 }
764 
765 /*
766  * af_can protocol functions
767  */
768 
769 /**
770  * can_proto_register - register CAN transport protocol
771  * @cp: pointer to CAN protocol structure
772  *
773  * Return:
774  *  0 on success
775  *  -EINVAL invalid (out of range) protocol number
776  *  -EBUSY  protocol already in use
777  *  -ENOBUF if proto_register() fails
778  */
779 int can_proto_register(const struct can_proto *cp)
780 {
781 	int proto = cp->protocol;
782 	int err = 0;
783 
784 	if (proto < 0 || proto >= CAN_NPROTO) {
785 		pr_err("can: protocol number %d out of range\n", proto);
786 		return -EINVAL;
787 	}
788 
789 	err = proto_register(cp->prot, 0);
790 	if (err < 0)
791 		return err;
792 
793 	mutex_lock(&proto_tab_lock);
794 
795 	if (proto_tab[proto]) {
796 		pr_err("can: protocol %d already registered\n", proto);
797 		err = -EBUSY;
798 	} else
799 		RCU_INIT_POINTER(proto_tab[proto], cp);
800 
801 	mutex_unlock(&proto_tab_lock);
802 
803 	if (err < 0)
804 		proto_unregister(cp->prot);
805 
806 	return err;
807 }
808 EXPORT_SYMBOL(can_proto_register);
809 
810 /**
811  * can_proto_unregister - unregister CAN transport protocol
812  * @cp: pointer to CAN protocol structure
813  */
814 void can_proto_unregister(const struct can_proto *cp)
815 {
816 	int proto = cp->protocol;
817 
818 	mutex_lock(&proto_tab_lock);
819 	BUG_ON(proto_tab[proto] != cp);
820 	RCU_INIT_POINTER(proto_tab[proto], NULL);
821 	mutex_unlock(&proto_tab_lock);
822 
823 	synchronize_rcu();
824 
825 	proto_unregister(cp->prot);
826 }
827 EXPORT_SYMBOL(can_proto_unregister);
828 
829 /*
830  * af_can notifier to create/remove CAN netdevice specific structs
831  */
832 static int can_notifier(struct notifier_block *nb, unsigned long msg,
833 			void *ptr)
834 {
835 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
836 	struct dev_rcv_lists *d;
837 
838 	if (!net_eq(dev_net(dev), &init_net))
839 		return NOTIFY_DONE;
840 
841 	if (dev->type != ARPHRD_CAN)
842 		return NOTIFY_DONE;
843 
844 	switch (msg) {
845 
846 	case NETDEV_REGISTER:
847 
848 		/* create new dev_rcv_lists for this device */
849 		d = kzalloc(sizeof(*d), GFP_KERNEL);
850 		if (!d)
851 			return NOTIFY_DONE;
852 		BUG_ON(dev->ml_priv);
853 		dev->ml_priv = d;
854 
855 		break;
856 
857 	case NETDEV_UNREGISTER:
858 		spin_lock(&can_rcvlists_lock);
859 
860 		d = dev->ml_priv;
861 		if (d) {
862 			if (d->entries)
863 				d->remove_on_zero_entries = 1;
864 			else {
865 				kfree(d);
866 				dev->ml_priv = NULL;
867 			}
868 		} else
869 			pr_err("can: notifier: receive list not found for dev "
870 			       "%s\n", dev->name);
871 
872 		spin_unlock(&can_rcvlists_lock);
873 
874 		break;
875 	}
876 
877 	return NOTIFY_DONE;
878 }
879 
880 /*
881  * af_can module init/exit functions
882  */
883 
884 static struct packet_type can_packet __read_mostly = {
885 	.type = cpu_to_be16(ETH_P_CAN),
886 	.func = can_rcv,
887 };
888 
889 static struct packet_type canfd_packet __read_mostly = {
890 	.type = cpu_to_be16(ETH_P_CANFD),
891 	.func = canfd_rcv,
892 };
893 
894 static const struct net_proto_family can_family_ops = {
895 	.family = PF_CAN,
896 	.create = can_create,
897 	.owner  = THIS_MODULE,
898 };
899 
900 /* notifier block for netdevice event */
901 static struct notifier_block can_netdev_notifier __read_mostly = {
902 	.notifier_call = can_notifier,
903 };
904 
905 static __init int can_init(void)
906 {
907 	/* check for correct padding to be able to use the structs similarly */
908 	BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
909 		     offsetof(struct canfd_frame, len) ||
910 		     offsetof(struct can_frame, data) !=
911 		     offsetof(struct canfd_frame, data));
912 
913 	pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n");
914 
915 	memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
916 
917 	rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
918 				      0, 0, NULL);
919 	if (!rcv_cache)
920 		return -ENOMEM;
921 
922 	if (IS_ENABLED(CONFIG_PROC_FS)) {
923 		if (stats_timer) {
924 		/* the statistics are updated every second (timer triggered) */
925 			setup_timer(&can_stattimer, can_stat_update, 0);
926 			mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
927 		}
928 		can_init_proc();
929 	}
930 
931 	/* protocol register */
932 	sock_register(&can_family_ops);
933 	register_netdevice_notifier(&can_netdev_notifier);
934 	dev_add_pack(&can_packet);
935 	dev_add_pack(&canfd_packet);
936 
937 	return 0;
938 }
939 
940 static __exit void can_exit(void)
941 {
942 	struct net_device *dev;
943 
944 	if (IS_ENABLED(CONFIG_PROC_FS)) {
945 		if (stats_timer)
946 			del_timer_sync(&can_stattimer);
947 
948 		can_remove_proc();
949 	}
950 
951 	/* protocol unregister */
952 	dev_remove_pack(&canfd_packet);
953 	dev_remove_pack(&can_packet);
954 	unregister_netdevice_notifier(&can_netdev_notifier);
955 	sock_unregister(PF_CAN);
956 
957 	/* remove created dev_rcv_lists from still registered CAN devices */
958 	rcu_read_lock();
959 	for_each_netdev_rcu(&init_net, dev) {
960 		if (dev->type == ARPHRD_CAN && dev->ml_priv) {
961 
962 			struct dev_rcv_lists *d = dev->ml_priv;
963 
964 			BUG_ON(d->entries);
965 			kfree(d);
966 			dev->ml_priv = NULL;
967 		}
968 	}
969 	rcu_read_unlock();
970 
971 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
972 
973 	kmem_cache_destroy(rcv_cache);
974 }
975 
976 module_init(can_init);
977 module_exit(can_exit);
978