1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* 3 * af_can.c - Protocol family CAN core module 4 * (used by different CAN protocol modules) 5 * 6 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Volkswagen nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * Alternatively, provided that this notice is retained in full, this 22 * software may be distributed under the terms of the GNU General 23 * Public License ("GPL") version 2, in which case the provisions of the 24 * GPL apply INSTEAD OF those given above. 25 * 26 * The provided data structures and external interfaces from this code 27 * are not restricted to be used by modules with a GPL compatible license. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 40 * DAMAGE. 41 * 42 */ 43 44 #include <linux/module.h> 45 #include <linux/stddef.h> 46 #include <linux/init.h> 47 #include <linux/kmod.h> 48 #include <linux/slab.h> 49 #include <linux/list.h> 50 #include <linux/spinlock.h> 51 #include <linux/rcupdate.h> 52 #include <linux/uaccess.h> 53 #include <linux/net.h> 54 #include <linux/netdevice.h> 55 #include <linux/socket.h> 56 #include <linux/if_ether.h> 57 #include <linux/if_arp.h> 58 #include <linux/skbuff.h> 59 #include <linux/can.h> 60 #include <linux/can/core.h> 61 #include <linux/can/skb.h> 62 #include <linux/ratelimit.h> 63 #include <net/net_namespace.h> 64 #include <net/sock.h> 65 66 #include "af_can.h" 67 68 MODULE_DESCRIPTION("Controller Area Network PF_CAN core"); 69 MODULE_LICENSE("Dual BSD/GPL"); 70 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, " 71 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 72 73 MODULE_ALIAS_NETPROTO(PF_CAN); 74 75 static int stats_timer __read_mostly = 1; 76 module_param(stats_timer, int, 0444); 77 MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); 78 79 static struct kmem_cache *rcv_cache __read_mostly; 80 81 /* table of registered CAN protocols */ 82 static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly; 83 static DEFINE_MUTEX(proto_tab_lock); 84 85 static atomic_t skbcounter = ATOMIC_INIT(0); 86 87 /* 88 * af_can socket functions 89 */ 90 91 static void can_sock_destruct(struct sock *sk) 92 { 93 skb_queue_purge(&sk->sk_receive_queue); 94 skb_queue_purge(&sk->sk_error_queue); 95 } 96 97 static const struct can_proto *can_get_proto(int protocol) 98 { 99 const struct can_proto *cp; 100 101 rcu_read_lock(); 102 cp = rcu_dereference(proto_tab[protocol]); 103 if (cp && !try_module_get(cp->prot->owner)) 104 cp = NULL; 105 rcu_read_unlock(); 106 107 return cp; 108 } 109 110 static inline void can_put_proto(const struct can_proto *cp) 111 { 112 module_put(cp->prot->owner); 113 } 114 115 static int can_create(struct net *net, struct socket *sock, int protocol, 116 int kern) 117 { 118 struct sock *sk; 119 const struct can_proto *cp; 120 int err = 0; 121 122 sock->state = SS_UNCONNECTED; 123 124 if (protocol < 0 || protocol >= CAN_NPROTO) 125 return -EINVAL; 126 127 cp = can_get_proto(protocol); 128 129 #ifdef CONFIG_MODULES 130 if (!cp) { 131 /* try to load protocol module if kernel is modular */ 132 133 err = request_module("can-proto-%d", protocol); 134 135 /* 136 * In case of error we only print a message but don't 137 * return the error code immediately. Below we will 138 * return -EPROTONOSUPPORT 139 */ 140 if (err) 141 printk_ratelimited(KERN_ERR "can: request_module " 142 "(can-proto-%d) failed.\n", protocol); 143 144 cp = can_get_proto(protocol); 145 } 146 #endif 147 148 /* check for available protocol and correct usage */ 149 150 if (!cp) 151 return -EPROTONOSUPPORT; 152 153 if (cp->type != sock->type) { 154 err = -EPROTOTYPE; 155 goto errout; 156 } 157 158 sock->ops = cp->ops; 159 160 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern); 161 if (!sk) { 162 err = -ENOMEM; 163 goto errout; 164 } 165 166 sock_init_data(sock, sk); 167 sk->sk_destruct = can_sock_destruct; 168 169 if (sk->sk_prot->init) 170 err = sk->sk_prot->init(sk); 171 172 if (err) { 173 /* release sk on errors */ 174 sock_orphan(sk); 175 sock_put(sk); 176 } 177 178 errout: 179 can_put_proto(cp); 180 return err; 181 } 182 183 /* 184 * af_can tx path 185 */ 186 187 /** 188 * can_send - transmit a CAN frame (optional with local loopback) 189 * @skb: pointer to socket buffer with CAN frame in data section 190 * @loop: loopback for listeners on local CAN sockets (recommended default!) 191 * 192 * Due to the loopback this routine must not be called from hardirq context. 193 * 194 * Return: 195 * 0 on success 196 * -ENETDOWN when the selected interface is down 197 * -ENOBUFS on full driver queue (see net_xmit_errno()) 198 * -ENOMEM when local loopback failed at calling skb_clone() 199 * -EPERM when trying to send on a non-CAN interface 200 * -EMSGSIZE CAN frame size is bigger than CAN interface MTU 201 * -EINVAL when the skb->data does not contain a valid CAN frame 202 */ 203 int can_send(struct sk_buff *skb, int loop) 204 { 205 struct sk_buff *newskb = NULL; 206 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 207 struct s_stats *can_stats = dev_net(skb->dev)->can.can_stats; 208 int err = -EINVAL; 209 210 if (skb->len == CAN_MTU) { 211 skb->protocol = htons(ETH_P_CAN); 212 if (unlikely(cfd->len > CAN_MAX_DLEN)) 213 goto inval_skb; 214 } else if (skb->len == CANFD_MTU) { 215 skb->protocol = htons(ETH_P_CANFD); 216 if (unlikely(cfd->len > CANFD_MAX_DLEN)) 217 goto inval_skb; 218 } else 219 goto inval_skb; 220 221 /* 222 * Make sure the CAN frame can pass the selected CAN netdevice. 223 * As structs can_frame and canfd_frame are similar, we can provide 224 * CAN FD frames to legacy CAN drivers as long as the length is <= 8 225 */ 226 if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) { 227 err = -EMSGSIZE; 228 goto inval_skb; 229 } 230 231 if (unlikely(skb->dev->type != ARPHRD_CAN)) { 232 err = -EPERM; 233 goto inval_skb; 234 } 235 236 if (unlikely(!(skb->dev->flags & IFF_UP))) { 237 err = -ENETDOWN; 238 goto inval_skb; 239 } 240 241 skb->ip_summed = CHECKSUM_UNNECESSARY; 242 243 skb_reset_mac_header(skb); 244 skb_reset_network_header(skb); 245 skb_reset_transport_header(skb); 246 247 if (loop) { 248 /* local loopback of sent CAN frames */ 249 250 /* indication for the CAN driver: do loopback */ 251 skb->pkt_type = PACKET_LOOPBACK; 252 253 /* 254 * The reference to the originating sock may be required 255 * by the receiving socket to check whether the frame is 256 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS 257 * Therefore we have to ensure that skb->sk remains the 258 * reference to the originating sock by restoring skb->sk 259 * after each skb_clone() or skb_orphan() usage. 260 */ 261 262 if (!(skb->dev->flags & IFF_ECHO)) { 263 /* 264 * If the interface is not capable to do loopback 265 * itself, we do it here. 266 */ 267 newskb = skb_clone(skb, GFP_ATOMIC); 268 if (!newskb) { 269 kfree_skb(skb); 270 return -ENOMEM; 271 } 272 273 can_skb_set_owner(newskb, skb->sk); 274 newskb->ip_summed = CHECKSUM_UNNECESSARY; 275 newskb->pkt_type = PACKET_BROADCAST; 276 } 277 } else { 278 /* indication for the CAN driver: no loopback required */ 279 skb->pkt_type = PACKET_HOST; 280 } 281 282 /* send to netdevice */ 283 err = dev_queue_xmit(skb); 284 if (err > 0) 285 err = net_xmit_errno(err); 286 287 if (err) { 288 kfree_skb(newskb); 289 return err; 290 } 291 292 if (newskb) 293 netif_rx_ni(newskb); 294 295 /* update statistics */ 296 can_stats->tx_frames++; 297 can_stats->tx_frames_delta++; 298 299 return 0; 300 301 inval_skb: 302 kfree_skb(skb); 303 return err; 304 } 305 EXPORT_SYMBOL(can_send); 306 307 /* 308 * af_can rx path 309 */ 310 311 static struct can_dev_rcv_lists *find_dev_rcv_lists(struct net *net, 312 struct net_device *dev) 313 { 314 if (!dev) 315 return net->can.can_rx_alldev_list; 316 else 317 return (struct can_dev_rcv_lists *)dev->ml_priv; 318 } 319 320 /** 321 * effhash - hash function for 29 bit CAN identifier reduction 322 * @can_id: 29 bit CAN identifier 323 * 324 * Description: 325 * To reduce the linear traversal in one linked list of _single_ EFF CAN 326 * frame subscriptions the 29 bit identifier is mapped to 10 bits. 327 * (see CAN_EFF_RCV_HASH_BITS definition) 328 * 329 * Return: 330 * Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask ) 331 */ 332 static unsigned int effhash(canid_t can_id) 333 { 334 unsigned int hash; 335 336 hash = can_id; 337 hash ^= can_id >> CAN_EFF_RCV_HASH_BITS; 338 hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS); 339 340 return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1); 341 } 342 343 /** 344 * find_rcv_list - determine optimal filterlist inside device filter struct 345 * @can_id: pointer to CAN identifier of a given can_filter 346 * @mask: pointer to CAN mask of a given can_filter 347 * @d: pointer to the device filter struct 348 * 349 * Description: 350 * Returns the optimal filterlist to reduce the filter handling in the 351 * receive path. This function is called by service functions that need 352 * to register or unregister a can_filter in the filter lists. 353 * 354 * A filter matches in general, when 355 * 356 * <received_can_id> & mask == can_id & mask 357 * 358 * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe 359 * relevant bits for the filter. 360 * 361 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 362 * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg 363 * frames there is a special filterlist and a special rx path filter handling. 364 * 365 * Return: 366 * Pointer to optimal filterlist for the given can_id/mask pair. 367 * Constistency checked mask. 368 * Reduced can_id to have a preprocessed filter compare value. 369 */ 370 static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, 371 struct can_dev_rcv_lists *d) 372 { 373 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 374 375 /* filter for error message frames in extra filterlist */ 376 if (*mask & CAN_ERR_FLAG) { 377 /* clear CAN_ERR_FLAG in filter entry */ 378 *mask &= CAN_ERR_MASK; 379 return &d->rx[RX_ERR]; 380 } 381 382 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */ 383 384 #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG) 385 386 /* ensure valid values in can_mask for 'SFF only' frame filtering */ 387 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG)) 388 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS); 389 390 /* reduce condition testing at receive time */ 391 *can_id &= *mask; 392 393 /* inverse can_id/can_mask filter */ 394 if (inv) 395 return &d->rx[RX_INV]; 396 397 /* mask == 0 => no condition testing at receive time */ 398 if (!(*mask)) 399 return &d->rx[RX_ALL]; 400 401 /* extra filterlists for the subscription of a single non-RTR can_id */ 402 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) && 403 !(*can_id & CAN_RTR_FLAG)) { 404 405 if (*can_id & CAN_EFF_FLAG) { 406 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) 407 return &d->rx_eff[effhash(*can_id)]; 408 } else { 409 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) 410 return &d->rx_sff[*can_id]; 411 } 412 } 413 414 /* default: filter via can_id/can_mask */ 415 return &d->rx[RX_FIL]; 416 } 417 418 /** 419 * can_rx_register - subscribe CAN frames from a specific interface 420 * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list) 421 * @can_id: CAN identifier (see description) 422 * @mask: CAN mask (see description) 423 * @func: callback function on filter match 424 * @data: returned parameter for callback function 425 * @ident: string for calling module identification 426 * @sk: socket pointer (might be NULL) 427 * 428 * Description: 429 * Invokes the callback function with the received sk_buff and the given 430 * parameter 'data' on a matching receive filter. A filter matches, when 431 * 432 * <received_can_id> & mask == can_id & mask 433 * 434 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 435 * filter for error message frames (CAN_ERR_FLAG bit set in mask). 436 * 437 * The provided pointer to the sk_buff is guaranteed to be valid as long as 438 * the callback function is running. The callback function must *not* free 439 * the given sk_buff while processing it's task. When the given sk_buff is 440 * needed after the end of the callback function it must be cloned inside 441 * the callback function with skb_clone(). 442 * 443 * Return: 444 * 0 on success 445 * -ENOMEM on missing cache mem to create subscription entry 446 * -ENODEV unknown device 447 */ 448 int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id, 449 canid_t mask, void (*func)(struct sk_buff *, void *), 450 void *data, char *ident, struct sock *sk) 451 { 452 struct receiver *r; 453 struct hlist_head *rl; 454 struct can_dev_rcv_lists *d; 455 struct s_pstats *can_pstats = net->can.can_pstats; 456 int err = 0; 457 458 /* insert new receiver (dev,canid,mask) -> (func,data) */ 459 460 if (dev && dev->type != ARPHRD_CAN) 461 return -ENODEV; 462 463 if (dev && !net_eq(net, dev_net(dev))) 464 return -ENODEV; 465 466 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); 467 if (!r) 468 return -ENOMEM; 469 470 spin_lock(&net->can.can_rcvlists_lock); 471 472 d = find_dev_rcv_lists(net, dev); 473 if (d) { 474 rl = find_rcv_list(&can_id, &mask, d); 475 476 r->can_id = can_id; 477 r->mask = mask; 478 r->matches = 0; 479 r->func = func; 480 r->data = data; 481 r->ident = ident; 482 r->sk = sk; 483 484 hlist_add_head_rcu(&r->list, rl); 485 d->entries++; 486 487 can_pstats->rcv_entries++; 488 if (can_pstats->rcv_entries_max < can_pstats->rcv_entries) 489 can_pstats->rcv_entries_max = can_pstats->rcv_entries; 490 } else { 491 kmem_cache_free(rcv_cache, r); 492 err = -ENODEV; 493 } 494 495 spin_unlock(&net->can.can_rcvlists_lock); 496 497 return err; 498 } 499 EXPORT_SYMBOL(can_rx_register); 500 501 /* 502 * can_rx_delete_receiver - rcu callback for single receiver entry removal 503 */ 504 static void can_rx_delete_receiver(struct rcu_head *rp) 505 { 506 struct receiver *r = container_of(rp, struct receiver, rcu); 507 struct sock *sk = r->sk; 508 509 kmem_cache_free(rcv_cache, r); 510 if (sk) 511 sock_put(sk); 512 } 513 514 /** 515 * can_rx_unregister - unsubscribe CAN frames from a specific interface 516 * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list) 517 * @can_id: CAN identifier 518 * @mask: CAN mask 519 * @func: callback function on filter match 520 * @data: returned parameter for callback function 521 * 522 * Description: 523 * Removes subscription entry depending on given (subscription) values. 524 */ 525 void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id, 526 canid_t mask, void (*func)(struct sk_buff *, void *), 527 void *data) 528 { 529 struct receiver *r = NULL; 530 struct hlist_head *rl; 531 struct s_pstats *can_pstats = net->can.can_pstats; 532 struct can_dev_rcv_lists *d; 533 534 if (dev && dev->type != ARPHRD_CAN) 535 return; 536 537 if (dev && !net_eq(net, dev_net(dev))) 538 return; 539 540 spin_lock(&net->can.can_rcvlists_lock); 541 542 d = find_dev_rcv_lists(net, dev); 543 if (!d) { 544 pr_err("BUG: receive list not found for " 545 "dev %s, id %03X, mask %03X\n", 546 DNAME(dev), can_id, mask); 547 goto out; 548 } 549 550 rl = find_rcv_list(&can_id, &mask, d); 551 552 /* 553 * Search the receiver list for the item to delete. This should 554 * exist, since no receiver may be unregistered that hasn't 555 * been registered before. 556 */ 557 558 hlist_for_each_entry_rcu(r, rl, list) { 559 if (r->can_id == can_id && r->mask == mask && 560 r->func == func && r->data == data) 561 break; 562 } 563 564 /* 565 * Check for bugs in CAN protocol implementations using af_can.c: 566 * 'r' will be NULL if no matching list item was found for removal. 567 */ 568 569 if (!r) { 570 WARN(1, "BUG: receive list entry not found for dev %s, " 571 "id %03X, mask %03X\n", DNAME(dev), can_id, mask); 572 goto out; 573 } 574 575 hlist_del_rcu(&r->list); 576 d->entries--; 577 578 if (can_pstats->rcv_entries > 0) 579 can_pstats->rcv_entries--; 580 581 /* remove device structure requested by NETDEV_UNREGISTER */ 582 if (d->remove_on_zero_entries && !d->entries) { 583 kfree(d); 584 dev->ml_priv = NULL; 585 } 586 587 out: 588 spin_unlock(&net->can.can_rcvlists_lock); 589 590 /* schedule the receiver item for deletion */ 591 if (r) { 592 if (r->sk) 593 sock_hold(r->sk); 594 call_rcu(&r->rcu, can_rx_delete_receiver); 595 } 596 } 597 EXPORT_SYMBOL(can_rx_unregister); 598 599 static inline void deliver(struct sk_buff *skb, struct receiver *r) 600 { 601 r->func(skb, r->data); 602 r->matches++; 603 } 604 605 static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb) 606 { 607 struct receiver *r; 608 int matches = 0; 609 struct can_frame *cf = (struct can_frame *)skb->data; 610 canid_t can_id = cf->can_id; 611 612 if (d->entries == 0) 613 return 0; 614 615 if (can_id & CAN_ERR_FLAG) { 616 /* check for error message frame entries only */ 617 hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) { 618 if (can_id & r->mask) { 619 deliver(skb, r); 620 matches++; 621 } 622 } 623 return matches; 624 } 625 626 /* check for unfiltered entries */ 627 hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) { 628 deliver(skb, r); 629 matches++; 630 } 631 632 /* check for can_id/mask entries */ 633 hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) { 634 if ((can_id & r->mask) == r->can_id) { 635 deliver(skb, r); 636 matches++; 637 } 638 } 639 640 /* check for inverted can_id/mask entries */ 641 hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) { 642 if ((can_id & r->mask) != r->can_id) { 643 deliver(skb, r); 644 matches++; 645 } 646 } 647 648 /* check filterlists for single non-RTR can_ids */ 649 if (can_id & CAN_RTR_FLAG) 650 return matches; 651 652 if (can_id & CAN_EFF_FLAG) { 653 hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) { 654 if (r->can_id == can_id) { 655 deliver(skb, r); 656 matches++; 657 } 658 } 659 } else { 660 can_id &= CAN_SFF_MASK; 661 hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) { 662 deliver(skb, r); 663 matches++; 664 } 665 } 666 667 return matches; 668 } 669 670 static void can_receive(struct sk_buff *skb, struct net_device *dev) 671 { 672 struct can_dev_rcv_lists *d; 673 struct net *net = dev_net(dev); 674 struct s_stats *can_stats = net->can.can_stats; 675 int matches; 676 677 /* update statistics */ 678 can_stats->rx_frames++; 679 can_stats->rx_frames_delta++; 680 681 /* create non-zero unique skb identifier together with *skb */ 682 while (!(can_skb_prv(skb)->skbcnt)) 683 can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter); 684 685 rcu_read_lock(); 686 687 /* deliver the packet to sockets listening on all devices */ 688 matches = can_rcv_filter(net->can.can_rx_alldev_list, skb); 689 690 /* find receive list for this device */ 691 d = find_dev_rcv_lists(net, dev); 692 if (d) 693 matches += can_rcv_filter(d, skb); 694 695 rcu_read_unlock(); 696 697 /* consume the skbuff allocated by the netdevice driver */ 698 consume_skb(skb); 699 700 if (matches > 0) { 701 can_stats->matches++; 702 can_stats->matches_delta++; 703 } 704 } 705 706 static int can_rcv(struct sk_buff *skb, struct net_device *dev, 707 struct packet_type *pt, struct net_device *orig_dev) 708 { 709 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 710 711 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU || 712 cfd->len > CAN_MAX_DLEN)) { 713 pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n", 714 dev->type, skb->len, cfd->len); 715 kfree_skb(skb); 716 return NET_RX_DROP; 717 } 718 719 can_receive(skb, dev); 720 return NET_RX_SUCCESS; 721 } 722 723 static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, 724 struct packet_type *pt, struct net_device *orig_dev) 725 { 726 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 727 728 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU || 729 cfd->len > CANFD_MAX_DLEN)) { 730 pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n", 731 dev->type, skb->len, cfd->len); 732 kfree_skb(skb); 733 return NET_RX_DROP; 734 } 735 736 can_receive(skb, dev); 737 return NET_RX_SUCCESS; 738 } 739 740 /* 741 * af_can protocol functions 742 */ 743 744 /** 745 * can_proto_register - register CAN transport protocol 746 * @cp: pointer to CAN protocol structure 747 * 748 * Return: 749 * 0 on success 750 * -EINVAL invalid (out of range) protocol number 751 * -EBUSY protocol already in use 752 * -ENOBUF if proto_register() fails 753 */ 754 int can_proto_register(const struct can_proto *cp) 755 { 756 int proto = cp->protocol; 757 int err = 0; 758 759 if (proto < 0 || proto >= CAN_NPROTO) { 760 pr_err("can: protocol number %d out of range\n", proto); 761 return -EINVAL; 762 } 763 764 err = proto_register(cp->prot, 0); 765 if (err < 0) 766 return err; 767 768 mutex_lock(&proto_tab_lock); 769 770 if (rcu_access_pointer(proto_tab[proto])) { 771 pr_err("can: protocol %d already registered\n", proto); 772 err = -EBUSY; 773 } else 774 RCU_INIT_POINTER(proto_tab[proto], cp); 775 776 mutex_unlock(&proto_tab_lock); 777 778 if (err < 0) 779 proto_unregister(cp->prot); 780 781 return err; 782 } 783 EXPORT_SYMBOL(can_proto_register); 784 785 /** 786 * can_proto_unregister - unregister CAN transport protocol 787 * @cp: pointer to CAN protocol structure 788 */ 789 void can_proto_unregister(const struct can_proto *cp) 790 { 791 int proto = cp->protocol; 792 793 mutex_lock(&proto_tab_lock); 794 BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp); 795 RCU_INIT_POINTER(proto_tab[proto], NULL); 796 mutex_unlock(&proto_tab_lock); 797 798 synchronize_rcu(); 799 800 proto_unregister(cp->prot); 801 } 802 EXPORT_SYMBOL(can_proto_unregister); 803 804 /* 805 * af_can notifier to create/remove CAN netdevice specific structs 806 */ 807 static int can_notifier(struct notifier_block *nb, unsigned long msg, 808 void *ptr) 809 { 810 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 811 struct can_dev_rcv_lists *d; 812 813 if (dev->type != ARPHRD_CAN) 814 return NOTIFY_DONE; 815 816 switch (msg) { 817 818 case NETDEV_REGISTER: 819 820 /* create new dev_rcv_lists for this device */ 821 d = kzalloc(sizeof(*d), GFP_KERNEL); 822 if (!d) 823 return NOTIFY_DONE; 824 BUG_ON(dev->ml_priv); 825 dev->ml_priv = d; 826 827 break; 828 829 case NETDEV_UNREGISTER: 830 spin_lock(&dev_net(dev)->can.can_rcvlists_lock); 831 832 d = dev->ml_priv; 833 if (d) { 834 if (d->entries) 835 d->remove_on_zero_entries = 1; 836 else { 837 kfree(d); 838 dev->ml_priv = NULL; 839 } 840 } else 841 pr_err("can: notifier: receive list not found for dev " 842 "%s\n", dev->name); 843 844 spin_unlock(&dev_net(dev)->can.can_rcvlists_lock); 845 846 break; 847 } 848 849 return NOTIFY_DONE; 850 } 851 852 static int can_pernet_init(struct net *net) 853 { 854 spin_lock_init(&net->can.can_rcvlists_lock); 855 net->can.can_rx_alldev_list = 856 kzalloc(sizeof(struct can_dev_rcv_lists), GFP_KERNEL); 857 if (!net->can.can_rx_alldev_list) 858 goto out; 859 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); 860 if (!net->can.can_stats) 861 goto out_free_alldev_list; 862 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); 863 if (!net->can.can_pstats) 864 goto out_free_can_stats; 865 866 if (IS_ENABLED(CONFIG_PROC_FS)) { 867 /* the statistics are updated every second (timer triggered) */ 868 if (stats_timer) { 869 timer_setup(&net->can.can_stattimer, can_stat_update, 870 0); 871 mod_timer(&net->can.can_stattimer, 872 round_jiffies(jiffies + HZ)); 873 } 874 net->can.can_stats->jiffies_init = jiffies; 875 can_init_proc(net); 876 } 877 878 return 0; 879 880 out_free_can_stats: 881 kfree(net->can.can_stats); 882 out_free_alldev_list: 883 kfree(net->can.can_rx_alldev_list); 884 out: 885 return -ENOMEM; 886 } 887 888 static void can_pernet_exit(struct net *net) 889 { 890 struct net_device *dev; 891 892 if (IS_ENABLED(CONFIG_PROC_FS)) { 893 can_remove_proc(net); 894 if (stats_timer) 895 del_timer_sync(&net->can.can_stattimer); 896 } 897 898 /* remove created dev_rcv_lists from still registered CAN devices */ 899 rcu_read_lock(); 900 for_each_netdev_rcu(net, dev) { 901 if (dev->type == ARPHRD_CAN && dev->ml_priv) { 902 struct can_dev_rcv_lists *d = dev->ml_priv; 903 904 BUG_ON(d->entries); 905 kfree(d); 906 dev->ml_priv = NULL; 907 } 908 } 909 rcu_read_unlock(); 910 911 kfree(net->can.can_rx_alldev_list); 912 kfree(net->can.can_stats); 913 kfree(net->can.can_pstats); 914 } 915 916 /* 917 * af_can module init/exit functions 918 */ 919 920 static struct packet_type can_packet __read_mostly = { 921 .type = cpu_to_be16(ETH_P_CAN), 922 .func = can_rcv, 923 }; 924 925 static struct packet_type canfd_packet __read_mostly = { 926 .type = cpu_to_be16(ETH_P_CANFD), 927 .func = canfd_rcv, 928 }; 929 930 static const struct net_proto_family can_family_ops = { 931 .family = PF_CAN, 932 .create = can_create, 933 .owner = THIS_MODULE, 934 }; 935 936 /* notifier block for netdevice event */ 937 static struct notifier_block can_netdev_notifier __read_mostly = { 938 .notifier_call = can_notifier, 939 }; 940 941 static struct pernet_operations can_pernet_ops __read_mostly = { 942 .init = can_pernet_init, 943 .exit = can_pernet_exit, 944 }; 945 946 static __init int can_init(void) 947 { 948 int err; 949 950 /* check for correct padding to be able to use the structs similarly */ 951 BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) != 952 offsetof(struct canfd_frame, len) || 953 offsetof(struct can_frame, data) != 954 offsetof(struct canfd_frame, data)); 955 956 pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n"); 957 958 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 959 0, 0, NULL); 960 if (!rcv_cache) 961 return -ENOMEM; 962 963 err = register_pernet_subsys(&can_pernet_ops); 964 if (err) 965 goto out_pernet; 966 967 /* protocol register */ 968 err = sock_register(&can_family_ops); 969 if (err) 970 goto out_sock; 971 err = register_netdevice_notifier(&can_netdev_notifier); 972 if (err) 973 goto out_notifier; 974 975 dev_add_pack(&can_packet); 976 dev_add_pack(&canfd_packet); 977 978 return 0; 979 980 out_notifier: 981 sock_unregister(PF_CAN); 982 out_sock: 983 unregister_pernet_subsys(&can_pernet_ops); 984 out_pernet: 985 kmem_cache_destroy(rcv_cache); 986 987 return err; 988 } 989 990 static __exit void can_exit(void) 991 { 992 /* protocol unregister */ 993 dev_remove_pack(&canfd_packet); 994 dev_remove_pack(&can_packet); 995 unregister_netdevice_notifier(&can_netdev_notifier); 996 sock_unregister(PF_CAN); 997 998 unregister_pernet_subsys(&can_pernet_ops); 999 1000 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1001 1002 kmem_cache_destroy(rcv_cache); 1003 } 1004 1005 module_init(can_init); 1006 module_exit(can_exit); 1007