1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* 3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 4 * 5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of Volkswagen nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * Alternatively, provided that this notice is retained in full, this 21 * software may be distributed under the terms of the GNU General 22 * Public License ("GPL") version 2, in which case the provisions of the 23 * GPL apply INSTEAD OF those given above. 24 * 25 * The provided data structures and external interfaces from this code 26 * are not restricted to be used by modules with a GPL compatible license. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 39 * DAMAGE. 40 * 41 */ 42 43 #include <linux/module.h> 44 #include <linux/init.h> 45 #include <linux/interrupt.h> 46 #include <linux/hrtimer.h> 47 #include <linux/list.h> 48 #include <linux/proc_fs.h> 49 #include <linux/seq_file.h> 50 #include <linux/uio.h> 51 #include <linux/net.h> 52 #include <linux/netdevice.h> 53 #include <linux/socket.h> 54 #include <linux/if_arp.h> 55 #include <linux/skbuff.h> 56 #include <linux/can.h> 57 #include <linux/can/core.h> 58 #include <linux/can/skb.h> 59 #include <linux/can/bcm.h> 60 #include <linux/slab.h> 61 #include <linux/spinlock.h> 62 #include <net/can.h> 63 #include <net/sock.h> 64 #include <net/net_namespace.h> 65 66 /* 67 * To send multiple CAN frame content within TX_SETUP or to filter 68 * CAN messages with multiplex index within RX_SETUP, the number of 69 * different filters is limited to 256 due to the one byte index value. 70 */ 71 #define MAX_NFRAMES 256 72 73 /* limit timers to 400 days for sending/timeouts */ 74 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) 75 76 /* use of last_frames[index].flags */ 77 #define RX_LOCAL 0x10 /* frame was created on the local host */ 78 #define RX_OWN 0x20 /* frame was sent via the socket it was received on */ 79 #define RX_RECV 0x40 /* received data for this element */ 80 #define RX_THR 0x80 /* element not been sent due to throttle feature */ 81 #define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */ 82 83 /* get best masking value for can_rx_register() for a given single can_id */ 84 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 85 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 86 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 87 88 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 89 MODULE_LICENSE("Dual BSD/GPL"); 90 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 91 MODULE_ALIAS("can-proto-2"); 92 93 #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) 94 95 /* 96 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is 97 * 64 bit aligned so the offset has to be multiples of 8 which is ensured 98 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler(). 99 */ 100 static inline u64 get_u64(const struct canfd_frame *cp, int offset) 101 { 102 return *(u64 *)(cp->data + offset); 103 } 104 105 struct bcm_op { 106 struct list_head list; 107 struct rcu_head rcu; 108 int ifindex; 109 canid_t can_id; 110 u32 flags; 111 unsigned long frames_abs, frames_filtered; 112 struct bcm_timeval ival1, ival2; 113 struct hrtimer timer, thrtimer; 114 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 115 int rx_ifindex; 116 int cfsiz; 117 u32 count; 118 u32 nframes; 119 u32 currframe; 120 /* void pointers to arrays of struct can[fd]_frame */ 121 void *frames; 122 void *last_frames; 123 struct canfd_frame sframe; 124 struct canfd_frame last_sframe; 125 struct sock *sk; 126 struct net_device *rx_reg_dev; 127 spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */ 128 }; 129 130 struct bcm_sock { 131 struct sock sk; 132 int bound; 133 int ifindex; 134 struct list_head notifier; 135 struct list_head rx_ops; 136 struct list_head tx_ops; 137 unsigned long dropped_usr_msgs; 138 struct proc_dir_entry *bcm_proc_read; 139 char procname [32]; /* inode number in decimal with \0 */ 140 }; 141 142 static LIST_HEAD(bcm_notifier_list); 143 static DEFINE_SPINLOCK(bcm_notifier_lock); 144 static struct bcm_sock *bcm_busy_notifier; 145 146 /* Return pointer to store the extra msg flags for bcm_recvmsg(). 147 * We use the space of one unsigned int beyond the 'struct sockaddr_can' 148 * in skb->cb. 149 */ 150 static inline unsigned int *bcm_flags(struct sk_buff *skb) 151 { 152 /* return pointer after struct sockaddr_can */ 153 return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]); 154 } 155 156 static inline struct bcm_sock *bcm_sk(const struct sock *sk) 157 { 158 return (struct bcm_sock *)sk; 159 } 160 161 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) 162 { 163 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); 164 } 165 166 /* check limitations for timeval provided by user */ 167 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) 168 { 169 if ((msg_head->ival1.tv_sec < 0) || 170 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || 171 (msg_head->ival1.tv_usec < 0) || 172 (msg_head->ival1.tv_usec >= USEC_PER_SEC) || 173 (msg_head->ival2.tv_sec < 0) || 174 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || 175 (msg_head->ival2.tv_usec < 0) || 176 (msg_head->ival2.tv_usec >= USEC_PER_SEC)) 177 return true; 178 179 return false; 180 } 181 182 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) 183 #define OPSIZ sizeof(struct bcm_op) 184 #define MHSIZ sizeof(struct bcm_msg_head) 185 186 /* 187 * procfs functions 188 */ 189 #if IS_ENABLED(CONFIG_PROC_FS) 190 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex) 191 { 192 struct net_device *dev; 193 194 if (!ifindex) 195 return "any"; 196 197 rcu_read_lock(); 198 dev = dev_get_by_index_rcu(net, ifindex); 199 if (dev) 200 strcpy(result, dev->name); 201 else 202 strcpy(result, "???"); 203 rcu_read_unlock(); 204 205 return result; 206 } 207 208 static int bcm_proc_show(struct seq_file *m, void *v) 209 { 210 char ifname[IFNAMSIZ]; 211 struct net *net = m->private; 212 struct sock *sk = (struct sock *)pde_data(m->file->f_inode); 213 struct bcm_sock *bo = bcm_sk(sk); 214 struct bcm_op *op; 215 216 seq_printf(m, ">>> socket %pK", sk->sk_socket); 217 seq_printf(m, " / sk %pK", sk); 218 seq_printf(m, " / bo %pK", bo); 219 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 220 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); 221 seq_printf(m, " <<<\n"); 222 223 rcu_read_lock(); 224 225 list_for_each_entry_rcu(op, &bo->rx_ops, list) { 226 227 unsigned long reduction; 228 229 /* print only active entries & prevent division by zero */ 230 if (!op->frames_abs) 231 continue; 232 233 seq_printf(m, "rx_op: %03X %-5s ", op->can_id, 234 bcm_proc_getifname(net, ifname, op->ifindex)); 235 236 if (op->flags & CAN_FD_FRAME) 237 seq_printf(m, "(%u)", op->nframes); 238 else 239 seq_printf(m, "[%u]", op->nframes); 240 241 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' '); 242 243 if (op->kt_ival1) 244 seq_printf(m, "timeo=%lld ", 245 (long long)ktime_to_us(op->kt_ival1)); 246 247 if (op->kt_ival2) 248 seq_printf(m, "thr=%lld ", 249 (long long)ktime_to_us(op->kt_ival2)); 250 251 seq_printf(m, "# recv %ld (%ld) => reduction: ", 252 op->frames_filtered, op->frames_abs); 253 254 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 255 256 seq_printf(m, "%s%ld%%\n", 257 (reduction == 100) ? "near " : "", reduction); 258 } 259 260 list_for_each_entry(op, &bo->tx_ops, list) { 261 262 seq_printf(m, "tx_op: %03X %s ", op->can_id, 263 bcm_proc_getifname(net, ifname, op->ifindex)); 264 265 if (op->flags & CAN_FD_FRAME) 266 seq_printf(m, "(%u) ", op->nframes); 267 else 268 seq_printf(m, "[%u] ", op->nframes); 269 270 if (op->kt_ival1) 271 seq_printf(m, "t1=%lld ", 272 (long long)ktime_to_us(op->kt_ival1)); 273 274 if (op->kt_ival2) 275 seq_printf(m, "t2=%lld ", 276 (long long)ktime_to_us(op->kt_ival2)); 277 278 seq_printf(m, "# sent %ld\n", op->frames_abs); 279 } 280 seq_putc(m, '\n'); 281 282 rcu_read_unlock(); 283 284 return 0; 285 } 286 #endif /* CONFIG_PROC_FS */ 287 288 /* 289 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface 290 * of the given bcm tx op 291 */ 292 static void bcm_can_tx(struct bcm_op *op) 293 { 294 struct sk_buff *skb; 295 struct can_skb_ext *csx; 296 struct net_device *dev; 297 struct canfd_frame *cf; 298 int err; 299 300 /* no target device? => exit */ 301 if (!op->ifindex) 302 return; 303 304 /* read currframe under lock protection */ 305 spin_lock_bh(&op->bcm_tx_lock); 306 cf = op->frames + op->cfsiz * op->currframe; 307 spin_unlock_bh(&op->bcm_tx_lock); 308 309 dev = dev_get_by_index(sock_net(op->sk), op->ifindex); 310 if (!dev) { 311 /* RFC: should this bcm_op remove itself here? */ 312 return; 313 } 314 315 skb = alloc_skb(op->cfsiz, gfp_any()); 316 if (!skb) 317 goto out; 318 319 csx = can_skb_ext_add(skb); 320 if (!csx) { 321 kfree_skb(skb); 322 goto out; 323 } 324 325 csx->can_iif = dev->ifindex; 326 327 skb_put_data(skb, cf, op->cfsiz); 328 329 /* send with loopback */ 330 skb->dev = dev; 331 can_skb_set_owner(skb, op->sk); 332 err = can_send(skb, 1); 333 334 /* update currframe and count under lock protection */ 335 spin_lock_bh(&op->bcm_tx_lock); 336 337 if (!err) 338 op->frames_abs++; 339 340 op->currframe++; 341 342 /* reached last frame? */ 343 if (op->currframe >= op->nframes) 344 op->currframe = 0; 345 346 if (op->count > 0) 347 op->count--; 348 349 spin_unlock_bh(&op->bcm_tx_lock); 350 out: 351 dev_put(dev); 352 } 353 354 /* 355 * bcm_send_to_user - send a BCM message to the userspace 356 * (consisting of bcm_msg_head + x CAN frames) 357 */ 358 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, 359 struct canfd_frame *frames, int has_timestamp) 360 { 361 struct sk_buff *skb; 362 struct canfd_frame *firstframe; 363 struct sockaddr_can *addr; 364 struct sock *sk = op->sk; 365 unsigned int datalen = head->nframes * op->cfsiz; 366 int err; 367 unsigned int *pflags; 368 enum skb_drop_reason reason; 369 370 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 371 if (!skb) 372 return; 373 374 skb_put_data(skb, head, sizeof(*head)); 375 376 /* ensure space for sockaddr_can and msg flags */ 377 sock_skb_cb_check_size(sizeof(struct sockaddr_can) + 378 sizeof(unsigned int)); 379 380 /* initialize msg flags */ 381 pflags = bcm_flags(skb); 382 *pflags = 0; 383 384 if (head->nframes) { 385 /* CAN frames starting here */ 386 firstframe = (struct canfd_frame *)skb_tail_pointer(skb); 387 388 skb_put_data(skb, frames, datalen); 389 390 /* 391 * the BCM uses the flags-element of the canfd_frame 392 * structure for internal purposes. This is only 393 * relevant for updates that are generated by the 394 * BCM, where nframes is 1 395 */ 396 if (head->nframes == 1) { 397 if (firstframe->flags & RX_LOCAL) 398 *pflags |= MSG_DONTROUTE; 399 if (firstframe->flags & RX_OWN) 400 *pflags |= MSG_CONFIRM; 401 402 firstframe->flags &= BCM_CAN_FLAGS_MASK; 403 } 404 } 405 406 if (has_timestamp) { 407 /* restore rx timestamp */ 408 skb->tstamp = op->rx_stamp; 409 } 410 411 /* 412 * Put the datagram to the queue so that bcm_recvmsg() can 413 * get it from there. We need to pass the interface index to 414 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb 415 * containing the interface index. 416 */ 417 418 addr = (struct sockaddr_can *)skb->cb; 419 memset(addr, 0, sizeof(*addr)); 420 addr->can_family = AF_CAN; 421 addr->can_ifindex = op->rx_ifindex; 422 423 err = sock_queue_rcv_skb_reason(sk, skb, &reason); 424 if (err < 0) { 425 struct bcm_sock *bo = bcm_sk(sk); 426 427 sk_skb_reason_drop(sk, skb, reason); 428 /* don't care about overflows in this statistic */ 429 bo->dropped_usr_msgs++; 430 } 431 } 432 433 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt) 434 { 435 ktime_t ival; 436 437 if (op->kt_ival1 && op->count) 438 ival = op->kt_ival1; 439 else if (op->kt_ival2) 440 ival = op->kt_ival2; 441 else 442 return false; 443 444 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival)); 445 return true; 446 } 447 448 static void bcm_tx_start_timer(struct bcm_op *op) 449 { 450 if (bcm_tx_set_expiry(op, &op->timer)) 451 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT); 452 } 453 454 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */ 455 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) 456 { 457 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 458 struct bcm_msg_head msg_head; 459 460 if (op->kt_ival1 && (op->count > 0)) { 461 bcm_can_tx(op); 462 if (!op->count && (op->flags & TX_COUNTEVT)) { 463 464 /* create notification to user */ 465 memset(&msg_head, 0, sizeof(msg_head)); 466 msg_head.opcode = TX_EXPIRED; 467 msg_head.flags = op->flags; 468 msg_head.count = op->count; 469 msg_head.ival1 = op->ival1; 470 msg_head.ival2 = op->ival2; 471 msg_head.can_id = op->can_id; 472 msg_head.nframes = 0; 473 474 bcm_send_to_user(op, &msg_head, NULL, 0); 475 } 476 477 } else if (op->kt_ival2) { 478 bcm_can_tx(op); 479 } 480 481 return bcm_tx_set_expiry(op, &op->timer) ? 482 HRTIMER_RESTART : HRTIMER_NORESTART; 483 } 484 485 /* 486 * bcm_rx_changed - create a RX_CHANGED notification due to changed content 487 */ 488 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data) 489 { 490 struct bcm_msg_head head; 491 492 /* update statistics */ 493 op->frames_filtered++; 494 495 /* prevent statistics overflow */ 496 if (op->frames_filtered > ULONG_MAX/100) 497 op->frames_filtered = op->frames_abs = 0; 498 499 /* this element is not throttled anymore */ 500 data->flags &= ~RX_THR; 501 502 memset(&head, 0, sizeof(head)); 503 head.opcode = RX_CHANGED; 504 head.flags = op->flags; 505 head.count = op->count; 506 head.ival1 = op->ival1; 507 head.ival2 = op->ival2; 508 head.can_id = op->can_id; 509 head.nframes = 1; 510 511 bcm_send_to_user(op, &head, data, 1); 512 } 513 514 /* 515 * bcm_rx_update_and_send - process a detected relevant receive content change 516 * 1. update the last received data 517 * 2. send a notification to the user (if possible) 518 */ 519 static void bcm_rx_update_and_send(struct bcm_op *op, 520 struct canfd_frame *lastdata, 521 const struct canfd_frame *rxdata, 522 unsigned char traffic_flags) 523 { 524 memcpy(lastdata, rxdata, op->cfsiz); 525 526 /* mark as used and throttled by default */ 527 lastdata->flags |= (RX_RECV|RX_THR); 528 529 /* add own/local/remote traffic flags */ 530 lastdata->flags |= traffic_flags; 531 532 /* throttling mode inactive ? */ 533 if (!op->kt_ival2) { 534 /* send RX_CHANGED to the user immediately */ 535 bcm_rx_changed(op, lastdata); 536 return; 537 } 538 539 /* with active throttling timer we are just done here */ 540 if (hrtimer_active(&op->thrtimer)) 541 return; 542 543 /* first reception with enabled throttling mode */ 544 if (!op->kt_lastmsg) 545 goto rx_changed_settime; 546 547 /* got a second frame inside a potential throttle period? */ 548 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 549 ktime_to_us(op->kt_ival2)) { 550 /* do not send the saved data - only start throttle timer */ 551 hrtimer_start(&op->thrtimer, 552 ktime_add(op->kt_lastmsg, op->kt_ival2), 553 HRTIMER_MODE_ABS_SOFT); 554 return; 555 } 556 557 /* the gap was that big, that throttling was not needed here */ 558 rx_changed_settime: 559 bcm_rx_changed(op, lastdata); 560 op->kt_lastmsg = ktime_get(); 561 } 562 563 /* 564 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 565 * received data stored in op->last_frames[] 566 */ 567 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, 568 const struct canfd_frame *rxdata, 569 unsigned char traffic_flags) 570 { 571 struct canfd_frame *cf = op->frames + op->cfsiz * index; 572 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; 573 int i; 574 575 /* 576 * no one uses the MSBs of flags for comparison, 577 * so we use it here to detect the first time of reception 578 */ 579 580 if (!(lcf->flags & RX_RECV)) { 581 /* received data for the first time => send update to user */ 582 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags); 583 return; 584 } 585 586 /* do a real check in CAN frame data section */ 587 for (i = 0; i < rxdata->len; i += 8) { 588 if ((get_u64(cf, i) & get_u64(rxdata, i)) != 589 (get_u64(cf, i) & get_u64(lcf, i))) { 590 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags); 591 return; 592 } 593 } 594 595 if (op->flags & RX_CHECK_DLC) { 596 /* do a real check in CAN frame length */ 597 if (rxdata->len != lcf->len) { 598 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags); 599 return; 600 } 601 } 602 } 603 604 /* 605 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception 606 */ 607 static void bcm_rx_starttimer(struct bcm_op *op) 608 { 609 if (op->flags & RX_NO_AUTOTIMER) 610 return; 611 612 if (op->kt_ival1) 613 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT); 614 } 615 616 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */ 617 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) 618 { 619 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 620 struct bcm_msg_head msg_head; 621 622 /* if user wants to be informed, when cyclic CAN-Messages come back */ 623 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { 624 /* clear received CAN frames to indicate 'nothing received' */ 625 memset(op->last_frames, 0, op->nframes * op->cfsiz); 626 } 627 628 /* create notification to user */ 629 memset(&msg_head, 0, sizeof(msg_head)); 630 msg_head.opcode = RX_TIMEOUT; 631 msg_head.flags = op->flags; 632 msg_head.count = op->count; 633 msg_head.ival1 = op->ival1; 634 msg_head.ival2 = op->ival2; 635 msg_head.can_id = op->can_id; 636 msg_head.nframes = 0; 637 638 bcm_send_to_user(op, &msg_head, NULL, 0); 639 640 return HRTIMER_NORESTART; 641 } 642 643 /* 644 * bcm_rx_do_flush - helper for bcm_rx_thr_flush 645 */ 646 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index) 647 { 648 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; 649 650 if ((op->last_frames) && (lcf->flags & RX_THR)) { 651 bcm_rx_changed(op, lcf); 652 return 1; 653 } 654 return 0; 655 } 656 657 /* 658 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 659 */ 660 static int bcm_rx_thr_flush(struct bcm_op *op) 661 { 662 int updated = 0; 663 664 if (op->nframes > 1) { 665 unsigned int i; 666 667 /* for MUX filter we start at index 1 */ 668 for (i = 1; i < op->nframes; i++) 669 updated += bcm_rx_do_flush(op, i); 670 671 } else { 672 /* for RX_FILTER_ID and simple filter */ 673 updated += bcm_rx_do_flush(op, 0); 674 } 675 676 return updated; 677 } 678 679 /* 680 * bcm_rx_thr_handler - the time for blocked content updates is over now: 681 * Check for throttled data and send it to the userspace 682 */ 683 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) 684 { 685 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 686 687 if (bcm_rx_thr_flush(op)) { 688 hrtimer_forward_now(hrtimer, op->kt_ival2); 689 return HRTIMER_RESTART; 690 } else { 691 /* rearm throttle handling */ 692 op->kt_lastmsg = 0; 693 return HRTIMER_NORESTART; 694 } 695 } 696 697 /* 698 * bcm_rx_handler - handle a CAN frame reception 699 */ 700 static void bcm_rx_handler(struct sk_buff *skb, void *data) 701 { 702 struct bcm_op *op = (struct bcm_op *)data; 703 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data; 704 unsigned int i; 705 unsigned char traffic_flags; 706 707 if (op->can_id != rxframe->can_id) 708 return; 709 710 /* make sure to handle the correct frame type (CAN / CAN FD) */ 711 if (op->flags & CAN_FD_FRAME) { 712 if (!can_is_canfd_skb(skb)) 713 return; 714 } else { 715 if (!can_is_can_skb(skb)) 716 return; 717 } 718 719 /* disable timeout */ 720 hrtimer_cancel(&op->timer); 721 722 /* save rx timestamp */ 723 op->rx_stamp = skb->tstamp; 724 /* save originator for recvfrom() */ 725 op->rx_ifindex = skb->dev->ifindex; 726 /* update statistics */ 727 op->frames_abs++; 728 729 if (op->flags & RX_RTR_FRAME) { 730 /* send reply for RTR-request (placed in op->frames[0]) */ 731 bcm_can_tx(op); 732 return; 733 } 734 735 /* compute flags to distinguish between own/local/remote CAN traffic */ 736 traffic_flags = 0; 737 if (skb->sk) { 738 traffic_flags |= RX_LOCAL; 739 if (skb->sk == op->sk) 740 traffic_flags |= RX_OWN; 741 } 742 743 if (op->flags & RX_FILTER_ID) { 744 /* the easiest case */ 745 bcm_rx_update_and_send(op, op->last_frames, rxframe, 746 traffic_flags); 747 goto rx_starttimer; 748 } 749 750 if (op->nframes == 1) { 751 /* simple compare with index 0 */ 752 bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags); 753 goto rx_starttimer; 754 } 755 756 if (op->nframes > 1) { 757 /* 758 * multiplex compare 759 * 760 * find the first multiplex mask that fits. 761 * Remark: The MUX-mask is stored in index 0 - but only the 762 * first 64 bits of the frame data[] are relevant (CAN FD) 763 */ 764 765 for (i = 1; i < op->nframes; i++) { 766 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) == 767 (get_u64(op->frames, 0) & 768 get_u64(op->frames + op->cfsiz * i, 0))) { 769 bcm_rx_cmp_to_index(op, i, rxframe, 770 traffic_flags); 771 break; 772 } 773 } 774 } 775 776 rx_starttimer: 777 bcm_rx_starttimer(op); 778 } 779 780 /* 781 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements 782 */ 783 static struct bcm_op *bcm_find_op(struct list_head *ops, 784 struct bcm_msg_head *mh, int ifindex) 785 { 786 struct bcm_op *op; 787 788 list_for_each_entry(op, ops, list) { 789 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && 790 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) 791 return op; 792 } 793 794 return NULL; 795 } 796 797 static void bcm_free_op_rcu(struct rcu_head *rcu_head) 798 { 799 struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu); 800 801 if ((op->frames) && (op->frames != &op->sframe)) 802 kfree(op->frames); 803 804 if ((op->last_frames) && (op->last_frames != &op->last_sframe)) 805 kfree(op->last_frames); 806 807 kfree(op); 808 } 809 810 static void bcm_remove_op(struct bcm_op *op) 811 { 812 hrtimer_cancel(&op->timer); 813 hrtimer_cancel(&op->thrtimer); 814 815 call_rcu(&op->rcu, bcm_free_op_rcu); 816 } 817 818 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) 819 { 820 if (op->rx_reg_dev == dev) { 821 can_rx_unregister(dev_net(dev), dev, op->can_id, 822 REGMASK(op->can_id), bcm_rx_handler, op); 823 824 /* mark as removed subscription */ 825 op->rx_reg_dev = NULL; 826 } else 827 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " 828 "mismatch %p %p\n", op->rx_reg_dev, dev); 829 } 830 831 /* 832 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) 833 */ 834 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, 835 int ifindex) 836 { 837 struct bcm_op *op, *n; 838 839 list_for_each_entry_safe(op, n, ops, list) { 840 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && 841 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { 842 843 /* disable automatic timer on frame reception */ 844 op->flags |= RX_NO_AUTOTIMER; 845 846 /* 847 * Don't care if we're bound or not (due to netdev 848 * problems) can_rx_unregister() is always a save 849 * thing to do here. 850 */ 851 if (op->ifindex) { 852 /* 853 * Only remove subscriptions that had not 854 * been removed due to NETDEV_UNREGISTER 855 * in bcm_notifier() 856 */ 857 if (op->rx_reg_dev) { 858 struct net_device *dev; 859 860 dev = dev_get_by_index(sock_net(op->sk), 861 op->ifindex); 862 if (dev) { 863 bcm_rx_unreg(dev, op); 864 dev_put(dev); 865 } 866 } 867 } else 868 can_rx_unregister(sock_net(op->sk), NULL, 869 op->can_id, 870 REGMASK(op->can_id), 871 bcm_rx_handler, op); 872 873 list_del_rcu(&op->list); 874 bcm_remove_op(op); 875 return 1; /* done */ 876 } 877 } 878 879 return 0; /* not found */ 880 } 881 882 /* 883 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) 884 */ 885 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, 886 int ifindex) 887 { 888 struct bcm_op *op, *n; 889 890 list_for_each_entry_safe(op, n, ops, list) { 891 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && 892 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { 893 list_del_rcu(&op->list); 894 bcm_remove_op(op); 895 return 1; /* done */ 896 } 897 } 898 899 return 0; /* not found */ 900 } 901 902 /* 903 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) 904 */ 905 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, 906 int ifindex) 907 { 908 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex); 909 910 if (!op) 911 return -EINVAL; 912 913 /* put current values into msg_head */ 914 msg_head->flags = op->flags; 915 msg_head->count = op->count; 916 msg_head->ival1 = op->ival1; 917 msg_head->ival2 = op->ival2; 918 msg_head->nframes = op->nframes; 919 920 bcm_send_to_user(op, msg_head, op->frames, 0); 921 922 return MHSIZ; 923 } 924 925 /* 926 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) 927 */ 928 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 929 int ifindex, struct sock *sk) 930 { 931 struct bcm_sock *bo = bcm_sk(sk); 932 struct bcm_op *op; 933 struct canfd_frame *cf; 934 unsigned int i; 935 int err; 936 937 /* we need a real device to send frames */ 938 if (!ifindex) 939 return -ENODEV; 940 941 /* check nframes boundaries - we need at least one CAN frame */ 942 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) 943 return -EINVAL; 944 945 /* check timeval limitations */ 946 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) 947 return -EINVAL; 948 949 /* check the given can_id */ 950 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); 951 if (op) { 952 /* update existing BCM operation */ 953 954 /* 955 * Do we need more space for the CAN frames than currently 956 * allocated? -> This is a _really_ unusual use-case and 957 * therefore (complexity / locking) it is not supported. 958 */ 959 if (msg_head->nframes > op->nframes) 960 return -E2BIG; 961 962 /* update CAN frames content */ 963 for (i = 0; i < msg_head->nframes; i++) { 964 965 cf = op->frames + op->cfsiz * i; 966 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); 967 968 if (op->flags & CAN_FD_FRAME) { 969 if (cf->len > 64) 970 err = -EINVAL; 971 } else { 972 if (cf->len > 8) 973 err = -EINVAL; 974 } 975 976 if (err < 0) 977 return err; 978 979 if (msg_head->flags & TX_CP_CAN_ID) { 980 /* copy can_id into frame */ 981 cf->can_id = msg_head->can_id; 982 } 983 } 984 op->flags = msg_head->flags; 985 986 /* only lock for unlikely count/nframes/currframe changes */ 987 if (op->nframes != msg_head->nframes || 988 op->flags & TX_RESET_MULTI_IDX || 989 op->flags & SETTIMER) { 990 991 spin_lock_bh(&op->bcm_tx_lock); 992 993 if (op->nframes != msg_head->nframes || 994 op->flags & TX_RESET_MULTI_IDX) { 995 /* potentially update changed nframes */ 996 op->nframes = msg_head->nframes; 997 /* restart multiple frame transmission */ 998 op->currframe = 0; 999 } 1000 1001 if (op->flags & SETTIMER) 1002 op->count = msg_head->count; 1003 1004 spin_unlock_bh(&op->bcm_tx_lock); 1005 } 1006 1007 } else { 1008 /* insert new BCM operation for the given can_id */ 1009 1010 op = kzalloc(OPSIZ, GFP_KERNEL); 1011 if (!op) 1012 return -ENOMEM; 1013 1014 spin_lock_init(&op->bcm_tx_lock); 1015 op->can_id = msg_head->can_id; 1016 op->cfsiz = CFSIZ(msg_head->flags); 1017 op->flags = msg_head->flags; 1018 op->nframes = msg_head->nframes; 1019 1020 if (op->flags & SETTIMER) 1021 op->count = msg_head->count; 1022 1023 /* create array for CAN frames and copy the data */ 1024 if (msg_head->nframes > 1) { 1025 op->frames = kmalloc_array(msg_head->nframes, 1026 op->cfsiz, 1027 GFP_KERNEL); 1028 if (!op->frames) { 1029 kfree(op); 1030 return -ENOMEM; 1031 } 1032 } else 1033 op->frames = &op->sframe; 1034 1035 for (i = 0; i < msg_head->nframes; i++) { 1036 1037 cf = op->frames + op->cfsiz * i; 1038 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); 1039 if (err < 0) 1040 goto free_op; 1041 1042 if (op->flags & CAN_FD_FRAME) { 1043 if (cf->len > 64) 1044 err = -EINVAL; 1045 } else { 1046 if (cf->len > 8) 1047 err = -EINVAL; 1048 } 1049 1050 if (err < 0) 1051 goto free_op; 1052 1053 if (msg_head->flags & TX_CP_CAN_ID) { 1054 /* copy can_id into frame */ 1055 cf->can_id = msg_head->can_id; 1056 } 1057 } 1058 1059 /* tx_ops never compare with previous received messages */ 1060 op->last_frames = NULL; 1061 1062 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1063 op->sk = sk; 1064 op->ifindex = ifindex; 1065 1066 /* initialize uninitialized (kzalloc) structure */ 1067 hrtimer_setup(&op->timer, bcm_tx_timeout_handler, CLOCK_MONOTONIC, 1068 HRTIMER_MODE_REL_SOFT); 1069 1070 /* currently unused in tx_ops */ 1071 hrtimer_setup(&op->thrtimer, hrtimer_dummy_timeout, CLOCK_MONOTONIC, 1072 HRTIMER_MODE_REL_SOFT); 1073 1074 /* add this bcm_op to the list of the tx_ops */ 1075 list_add(&op->list, &bo->tx_ops); 1076 1077 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ 1078 1079 if (op->flags & SETTIMER) { 1080 /* set timer values */ 1081 op->ival1 = msg_head->ival1; 1082 op->ival2 = msg_head->ival2; 1083 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); 1084 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1085 1086 /* disable an active timer due to zero values? */ 1087 if (!op->kt_ival1 && !op->kt_ival2) 1088 hrtimer_cancel(&op->timer); 1089 } 1090 1091 if (op->flags & STARTTIMER) { 1092 hrtimer_cancel(&op->timer); 1093 /* spec: send CAN frame when starting timer */ 1094 op->flags |= TX_ANNOUNCE; 1095 } 1096 1097 if (op->flags & TX_ANNOUNCE) 1098 bcm_can_tx(op); 1099 1100 if (op->flags & STARTTIMER) 1101 bcm_tx_start_timer(op); 1102 1103 return msg_head->nframes * op->cfsiz + MHSIZ; 1104 1105 free_op: 1106 if (op->frames != &op->sframe) 1107 kfree(op->frames); 1108 kfree(op); 1109 return err; 1110 } 1111 1112 /* 1113 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) 1114 */ 1115 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 1116 int ifindex, struct sock *sk) 1117 { 1118 struct bcm_sock *bo = bcm_sk(sk); 1119 struct bcm_op *op; 1120 int do_rx_register; 1121 int err = 0; 1122 1123 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { 1124 /* be robust against wrong usage ... */ 1125 msg_head->flags |= RX_FILTER_ID; 1126 /* ignore trailing garbage */ 1127 msg_head->nframes = 0; 1128 } 1129 1130 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ 1131 if (msg_head->nframes > MAX_NFRAMES + 1) 1132 return -EINVAL; 1133 1134 if ((msg_head->flags & RX_RTR_FRAME) && 1135 ((msg_head->nframes != 1) || 1136 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1137 return -EINVAL; 1138 1139 /* check timeval limitations */ 1140 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) 1141 return -EINVAL; 1142 1143 /* check the given can_id */ 1144 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); 1145 if (op) { 1146 /* update existing BCM operation */ 1147 1148 /* 1149 * Do we need more space for the CAN frames than currently 1150 * allocated? -> This is a _really_ unusual use-case and 1151 * therefore (complexity / locking) it is not supported. 1152 */ 1153 if (msg_head->nframes > op->nframes) 1154 return -E2BIG; 1155 1156 if (msg_head->nframes) { 1157 /* update CAN frames content */ 1158 err = memcpy_from_msg(op->frames, msg, 1159 msg_head->nframes * op->cfsiz); 1160 if (err < 0) 1161 return err; 1162 1163 /* clear last_frames to indicate 'nothing received' */ 1164 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz); 1165 } 1166 1167 op->nframes = msg_head->nframes; 1168 op->flags = msg_head->flags; 1169 1170 /* Only an update -> do not call can_rx_register() */ 1171 do_rx_register = 0; 1172 1173 } else { 1174 /* insert new BCM operation for the given can_id */ 1175 op = kzalloc(OPSIZ, GFP_KERNEL); 1176 if (!op) 1177 return -ENOMEM; 1178 1179 op->can_id = msg_head->can_id; 1180 op->nframes = msg_head->nframes; 1181 op->cfsiz = CFSIZ(msg_head->flags); 1182 op->flags = msg_head->flags; 1183 1184 if (msg_head->nframes > 1) { 1185 /* create array for CAN frames and copy the data */ 1186 op->frames = kmalloc_array(msg_head->nframes, 1187 op->cfsiz, 1188 GFP_KERNEL); 1189 if (!op->frames) { 1190 kfree(op); 1191 return -ENOMEM; 1192 } 1193 1194 /* create and init array for received CAN frames */ 1195 op->last_frames = kcalloc(msg_head->nframes, 1196 op->cfsiz, 1197 GFP_KERNEL); 1198 if (!op->last_frames) { 1199 kfree(op->frames); 1200 kfree(op); 1201 return -ENOMEM; 1202 } 1203 1204 } else { 1205 op->frames = &op->sframe; 1206 op->last_frames = &op->last_sframe; 1207 } 1208 1209 if (msg_head->nframes) { 1210 err = memcpy_from_msg(op->frames, msg, 1211 msg_head->nframes * op->cfsiz); 1212 if (err < 0) { 1213 if (op->frames != &op->sframe) 1214 kfree(op->frames); 1215 if (op->last_frames != &op->last_sframe) 1216 kfree(op->last_frames); 1217 kfree(op); 1218 return err; 1219 } 1220 } 1221 1222 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1223 op->sk = sk; 1224 op->ifindex = ifindex; 1225 1226 /* ifindex for timeout events w/o previous frame reception */ 1227 op->rx_ifindex = ifindex; 1228 1229 /* initialize uninitialized (kzalloc) structure */ 1230 hrtimer_setup(&op->timer, bcm_rx_timeout_handler, CLOCK_MONOTONIC, 1231 HRTIMER_MODE_REL_SOFT); 1232 hrtimer_setup(&op->thrtimer, bcm_rx_thr_handler, CLOCK_MONOTONIC, 1233 HRTIMER_MODE_REL_SOFT); 1234 1235 /* add this bcm_op to the list of the rx_ops */ 1236 list_add(&op->list, &bo->rx_ops); 1237 1238 /* call can_rx_register() */ 1239 do_rx_register = 1; 1240 1241 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ 1242 1243 /* check flags */ 1244 1245 if (op->flags & RX_RTR_FRAME) { 1246 struct canfd_frame *frame0 = op->frames; 1247 1248 /* no timers in RTR-mode */ 1249 hrtimer_cancel(&op->thrtimer); 1250 hrtimer_cancel(&op->timer); 1251 1252 /* 1253 * funny feature in RX(!)_SETUP only for RTR-mode: 1254 * copy can_id into frame BUT without RTR-flag to 1255 * prevent a full-load-loopback-test ... ;-] 1256 */ 1257 if ((op->flags & TX_CP_CAN_ID) || 1258 (frame0->can_id == op->can_id)) 1259 frame0->can_id = op->can_id & ~CAN_RTR_FLAG; 1260 1261 } else { 1262 if (op->flags & SETTIMER) { 1263 1264 /* set timer value */ 1265 op->ival1 = msg_head->ival1; 1266 op->ival2 = msg_head->ival2; 1267 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); 1268 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1269 1270 /* disable an active timer due to zero value? */ 1271 if (!op->kt_ival1) 1272 hrtimer_cancel(&op->timer); 1273 1274 /* 1275 * In any case cancel the throttle timer, flush 1276 * potentially blocked msgs and reset throttle handling 1277 */ 1278 op->kt_lastmsg = 0; 1279 hrtimer_cancel(&op->thrtimer); 1280 bcm_rx_thr_flush(op); 1281 } 1282 1283 if ((op->flags & STARTTIMER) && op->kt_ival1) 1284 hrtimer_start(&op->timer, op->kt_ival1, 1285 HRTIMER_MODE_REL_SOFT); 1286 } 1287 1288 /* now we can register for can_ids, if we added a new bcm_op */ 1289 if (do_rx_register) { 1290 if (ifindex) { 1291 struct net_device *dev; 1292 1293 dev = dev_get_by_index(sock_net(sk), ifindex); 1294 if (dev) { 1295 err = can_rx_register(sock_net(sk), dev, 1296 op->can_id, 1297 REGMASK(op->can_id), 1298 bcm_rx_handler, op, 1299 "bcm", sk); 1300 1301 op->rx_reg_dev = dev; 1302 dev_put(dev); 1303 } 1304 1305 } else 1306 err = can_rx_register(sock_net(sk), NULL, op->can_id, 1307 REGMASK(op->can_id), 1308 bcm_rx_handler, op, "bcm", sk); 1309 if (err) { 1310 /* this bcm rx op is broken -> remove it */ 1311 list_del_rcu(&op->list); 1312 bcm_remove_op(op); 1313 return err; 1314 } 1315 } 1316 1317 return msg_head->nframes * op->cfsiz + MHSIZ; 1318 } 1319 1320 /* 1321 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) 1322 */ 1323 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk, 1324 int cfsiz) 1325 { 1326 struct sk_buff *skb; 1327 struct can_skb_ext *csx; 1328 struct net_device *dev; 1329 int err; 1330 1331 /* we need a real device to send frames */ 1332 if (!ifindex) 1333 return -ENODEV; 1334 1335 skb = alloc_skb(cfsiz, GFP_KERNEL); 1336 if (!skb) 1337 return -ENOMEM; 1338 1339 csx = can_skb_ext_add(skb); 1340 if (!csx) { 1341 kfree_skb(skb); 1342 return -ENOMEM; 1343 } 1344 1345 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz); 1346 if (err < 0) { 1347 kfree_skb(skb); 1348 return err; 1349 } 1350 1351 dev = dev_get_by_index(sock_net(sk), ifindex); 1352 if (!dev) { 1353 kfree_skb(skb); 1354 return -ENODEV; 1355 } 1356 1357 csx->can_iif = dev->ifindex; 1358 skb->dev = dev; 1359 can_skb_set_owner(skb, sk); 1360 err = can_send(skb, 1); /* send with loopback */ 1361 dev_put(dev); 1362 1363 if (err) 1364 return err; 1365 1366 return cfsiz + MHSIZ; 1367 } 1368 1369 /* 1370 * bcm_sendmsg - process BCM commands (opcodes) from the userspace 1371 */ 1372 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 1373 { 1374 struct sock *sk = sock->sk; 1375 struct bcm_sock *bo = bcm_sk(sk); 1376 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ 1377 struct bcm_msg_head msg_head; 1378 int cfsiz; 1379 int ret; /* read bytes or error codes as return value */ 1380 1381 if (!bo->bound) 1382 return -ENOTCONN; 1383 1384 /* check for valid message length from userspace */ 1385 if (size < MHSIZ) 1386 return -EINVAL; 1387 1388 /* read message head information */ 1389 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ); 1390 if (ret < 0) 1391 return ret; 1392 1393 cfsiz = CFSIZ(msg_head.flags); 1394 if ((size - MHSIZ) % cfsiz) 1395 return -EINVAL; 1396 1397 /* check for alternative ifindex for this bcm_op */ 1398 1399 if (!ifindex && msg->msg_name) { 1400 /* no bound device as default => check msg_name */ 1401 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 1402 1403 if (msg->msg_namelen < BCM_MIN_NAMELEN) 1404 return -EINVAL; 1405 1406 if (addr->can_family != AF_CAN) 1407 return -EINVAL; 1408 1409 /* ifindex from sendto() */ 1410 ifindex = addr->can_ifindex; 1411 1412 if (ifindex) { 1413 struct net_device *dev; 1414 1415 dev = dev_get_by_index(sock_net(sk), ifindex); 1416 if (!dev) 1417 return -ENODEV; 1418 1419 if (dev->type != ARPHRD_CAN) { 1420 dev_put(dev); 1421 return -ENODEV; 1422 } 1423 1424 dev_put(dev); 1425 } 1426 } 1427 1428 lock_sock(sk); 1429 1430 switch (msg_head.opcode) { 1431 1432 case TX_SETUP: 1433 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); 1434 break; 1435 1436 case RX_SETUP: 1437 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); 1438 break; 1439 1440 case TX_DELETE: 1441 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex)) 1442 ret = MHSIZ; 1443 else 1444 ret = -EINVAL; 1445 break; 1446 1447 case RX_DELETE: 1448 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex)) 1449 ret = MHSIZ; 1450 else 1451 ret = -EINVAL; 1452 break; 1453 1454 case TX_READ: 1455 /* reuse msg_head for the reply to TX_READ */ 1456 msg_head.opcode = TX_STATUS; 1457 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); 1458 break; 1459 1460 case RX_READ: 1461 /* reuse msg_head for the reply to RX_READ */ 1462 msg_head.opcode = RX_STATUS; 1463 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); 1464 break; 1465 1466 case TX_SEND: 1467 /* we need exactly one CAN frame behind the msg head */ 1468 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ)) 1469 ret = -EINVAL; 1470 else 1471 ret = bcm_tx_send(msg, ifindex, sk, cfsiz); 1472 break; 1473 1474 default: 1475 ret = -EINVAL; 1476 break; 1477 } 1478 1479 release_sock(sk); 1480 1481 return ret; 1482 } 1483 1484 /* 1485 * notification handler for netdevice status changes 1486 */ 1487 static void bcm_notify(struct bcm_sock *bo, unsigned long msg, 1488 struct net_device *dev) 1489 { 1490 struct sock *sk = &bo->sk; 1491 struct bcm_op *op; 1492 int notify_enodev = 0; 1493 1494 if (!net_eq(dev_net(dev), sock_net(sk))) 1495 return; 1496 1497 switch (msg) { 1498 1499 case NETDEV_UNREGISTER: 1500 lock_sock(sk); 1501 1502 /* remove device specific receive entries */ 1503 list_for_each_entry(op, &bo->rx_ops, list) 1504 if (op->rx_reg_dev == dev) 1505 bcm_rx_unreg(dev, op); 1506 1507 /* remove device reference, if this is our bound device */ 1508 if (bo->bound && bo->ifindex == dev->ifindex) { 1509 #if IS_ENABLED(CONFIG_PROC_FS) 1510 if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) { 1511 remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir); 1512 bo->bcm_proc_read = NULL; 1513 } 1514 #endif 1515 bo->bound = 0; 1516 bo->ifindex = 0; 1517 notify_enodev = 1; 1518 } 1519 1520 release_sock(sk); 1521 1522 if (notify_enodev) { 1523 sk->sk_err = ENODEV; 1524 if (!sock_flag(sk, SOCK_DEAD)) 1525 sk_error_report(sk); 1526 } 1527 break; 1528 1529 case NETDEV_DOWN: 1530 if (bo->bound && bo->ifindex == dev->ifindex) { 1531 sk->sk_err = ENETDOWN; 1532 if (!sock_flag(sk, SOCK_DEAD)) 1533 sk_error_report(sk); 1534 } 1535 } 1536 } 1537 1538 static int bcm_notifier(struct notifier_block *nb, unsigned long msg, 1539 void *ptr) 1540 { 1541 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1542 1543 if (dev->type != ARPHRD_CAN) 1544 return NOTIFY_DONE; 1545 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) 1546 return NOTIFY_DONE; 1547 if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */ 1548 return NOTIFY_DONE; 1549 1550 spin_lock(&bcm_notifier_lock); 1551 list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) { 1552 spin_unlock(&bcm_notifier_lock); 1553 bcm_notify(bcm_busy_notifier, msg, dev); 1554 spin_lock(&bcm_notifier_lock); 1555 } 1556 bcm_busy_notifier = NULL; 1557 spin_unlock(&bcm_notifier_lock); 1558 return NOTIFY_DONE; 1559 } 1560 1561 /* 1562 * initial settings for all BCM sockets to be set at socket creation time 1563 */ 1564 static int bcm_init(struct sock *sk) 1565 { 1566 struct bcm_sock *bo = bcm_sk(sk); 1567 1568 bo->bound = 0; 1569 bo->ifindex = 0; 1570 bo->dropped_usr_msgs = 0; 1571 bo->bcm_proc_read = NULL; 1572 1573 INIT_LIST_HEAD(&bo->tx_ops); 1574 INIT_LIST_HEAD(&bo->rx_ops); 1575 1576 /* set notifier */ 1577 spin_lock(&bcm_notifier_lock); 1578 list_add_tail(&bo->notifier, &bcm_notifier_list); 1579 spin_unlock(&bcm_notifier_lock); 1580 1581 return 0; 1582 } 1583 1584 /* 1585 * standard socket functions 1586 */ 1587 static int bcm_release(struct socket *sock) 1588 { 1589 struct sock *sk = sock->sk; 1590 struct net *net; 1591 struct bcm_sock *bo; 1592 struct bcm_op *op, *next; 1593 1594 if (!sk) 1595 return 0; 1596 1597 net = sock_net(sk); 1598 bo = bcm_sk(sk); 1599 1600 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1601 1602 spin_lock(&bcm_notifier_lock); 1603 while (bcm_busy_notifier == bo) { 1604 spin_unlock(&bcm_notifier_lock); 1605 schedule_timeout_uninterruptible(1); 1606 spin_lock(&bcm_notifier_lock); 1607 } 1608 list_del(&bo->notifier); 1609 spin_unlock(&bcm_notifier_lock); 1610 1611 lock_sock(sk); 1612 1613 #if IS_ENABLED(CONFIG_PROC_FS) 1614 /* remove procfs entry */ 1615 if (net->can.bcmproc_dir && bo->bcm_proc_read) 1616 remove_proc_entry(bo->procname, net->can.bcmproc_dir); 1617 #endif /* CONFIG_PROC_FS */ 1618 1619 list_for_each_entry_safe(op, next, &bo->tx_ops, list) 1620 bcm_remove_op(op); 1621 1622 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { 1623 /* 1624 * Don't care if we're bound or not (due to netdev problems) 1625 * can_rx_unregister() is always a save thing to do here. 1626 */ 1627 if (op->ifindex) { 1628 /* 1629 * Only remove subscriptions that had not 1630 * been removed due to NETDEV_UNREGISTER 1631 * in bcm_notifier() 1632 */ 1633 if (op->rx_reg_dev) { 1634 struct net_device *dev; 1635 1636 dev = dev_get_by_index(net, op->ifindex); 1637 if (dev) { 1638 bcm_rx_unreg(dev, op); 1639 dev_put(dev); 1640 } 1641 } 1642 } else 1643 can_rx_unregister(net, NULL, op->can_id, 1644 REGMASK(op->can_id), 1645 bcm_rx_handler, op); 1646 1647 } 1648 1649 synchronize_rcu(); 1650 1651 list_for_each_entry_safe(op, next, &bo->rx_ops, list) 1652 bcm_remove_op(op); 1653 1654 /* remove device reference */ 1655 if (bo->bound) { 1656 bo->bound = 0; 1657 bo->ifindex = 0; 1658 } 1659 1660 sock_orphan(sk); 1661 sock->sk = NULL; 1662 1663 release_sock(sk); 1664 sock_prot_inuse_add(net, sk->sk_prot, -1); 1665 sock_put(sk); 1666 1667 return 0; 1668 } 1669 1670 static int bcm_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int len, 1671 int flags) 1672 { 1673 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1674 struct sock *sk = sock->sk; 1675 struct bcm_sock *bo = bcm_sk(sk); 1676 struct net *net = sock_net(sk); 1677 int ret = 0; 1678 1679 if (len < BCM_MIN_NAMELEN) 1680 return -EINVAL; 1681 1682 lock_sock(sk); 1683 1684 if (bo->bound) { 1685 ret = -EISCONN; 1686 goto fail; 1687 } 1688 1689 /* bind a device to this socket */ 1690 if (addr->can_ifindex) { 1691 struct net_device *dev; 1692 1693 dev = dev_get_by_index(net, addr->can_ifindex); 1694 if (!dev) { 1695 ret = -ENODEV; 1696 goto fail; 1697 } 1698 if (dev->type != ARPHRD_CAN) { 1699 dev_put(dev); 1700 ret = -ENODEV; 1701 goto fail; 1702 } 1703 1704 bo->ifindex = dev->ifindex; 1705 dev_put(dev); 1706 1707 } else { 1708 /* no interface reference for ifindex = 0 ('any' CAN device) */ 1709 bo->ifindex = 0; 1710 } 1711 1712 #if IS_ENABLED(CONFIG_PROC_FS) 1713 if (net->can.bcmproc_dir) { 1714 /* unique socket address as filename */ 1715 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1716 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644, 1717 net->can.bcmproc_dir, 1718 bcm_proc_show, sk); 1719 if (!bo->bcm_proc_read) { 1720 ret = -ENOMEM; 1721 goto fail; 1722 } 1723 } 1724 #endif /* CONFIG_PROC_FS */ 1725 1726 bo->bound = 1; 1727 1728 fail: 1729 release_sock(sk); 1730 1731 return ret; 1732 } 1733 1734 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 1735 int flags) 1736 { 1737 struct sock *sk = sock->sk; 1738 struct sk_buff *skb; 1739 int error = 0; 1740 int err; 1741 1742 skb = skb_recv_datagram(sk, flags, &error); 1743 if (!skb) 1744 return error; 1745 1746 if (skb->len < size) 1747 size = skb->len; 1748 1749 err = memcpy_to_msg(msg, skb->data, size); 1750 if (err < 0) { 1751 skb_free_datagram(sk, skb); 1752 return err; 1753 } 1754 1755 sock_recv_cmsgs(msg, sk, skb); 1756 1757 if (msg->msg_name) { 1758 __sockaddr_check_size(BCM_MIN_NAMELEN); 1759 msg->msg_namelen = BCM_MIN_NAMELEN; 1760 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1761 } 1762 1763 /* assign the flags that have been recorded in bcm_send_to_user() */ 1764 msg->msg_flags |= *(bcm_flags(skb)); 1765 1766 skb_free_datagram(sk, skb); 1767 1768 return size; 1769 } 1770 1771 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, 1772 unsigned long arg) 1773 { 1774 /* no ioctls for socket layer -> hand it down to NIC layer */ 1775 return -ENOIOCTLCMD; 1776 } 1777 1778 static const struct proto_ops bcm_ops = { 1779 .family = PF_CAN, 1780 .release = bcm_release, 1781 .bind = sock_no_bind, 1782 .connect = bcm_connect, 1783 .socketpair = sock_no_socketpair, 1784 .accept = sock_no_accept, 1785 .getname = sock_no_getname, 1786 .poll = datagram_poll, 1787 .ioctl = bcm_sock_no_ioctlcmd, 1788 .gettstamp = sock_gettstamp, 1789 .listen = sock_no_listen, 1790 .shutdown = sock_no_shutdown, 1791 .sendmsg = bcm_sendmsg, 1792 .recvmsg = bcm_recvmsg, 1793 .mmap = sock_no_mmap, 1794 }; 1795 1796 static struct proto bcm_proto __read_mostly = { 1797 .name = "CAN_BCM", 1798 .owner = THIS_MODULE, 1799 .obj_size = sizeof(struct bcm_sock), 1800 .init = bcm_init, 1801 }; 1802 1803 static const struct can_proto bcm_can_proto = { 1804 .type = SOCK_DGRAM, 1805 .protocol = CAN_BCM, 1806 .ops = &bcm_ops, 1807 .prot = &bcm_proto, 1808 }; 1809 1810 static int canbcm_pernet_init(struct net *net) 1811 { 1812 #if IS_ENABLED(CONFIG_PROC_FS) 1813 /* create /proc/net/can-bcm directory */ 1814 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net); 1815 #endif /* CONFIG_PROC_FS */ 1816 1817 return 0; 1818 } 1819 1820 static void canbcm_pernet_exit(struct net *net) 1821 { 1822 #if IS_ENABLED(CONFIG_PROC_FS) 1823 /* remove /proc/net/can-bcm directory */ 1824 if (net->can.bcmproc_dir) 1825 remove_proc_entry("can-bcm", net->proc_net); 1826 #endif /* CONFIG_PROC_FS */ 1827 } 1828 1829 static struct pernet_operations canbcm_pernet_ops __read_mostly = { 1830 .init = canbcm_pernet_init, 1831 .exit = canbcm_pernet_exit, 1832 }; 1833 1834 static struct notifier_block canbcm_notifier = { 1835 .notifier_call = bcm_notifier 1836 }; 1837 1838 static int __init bcm_module_init(void) 1839 { 1840 int err; 1841 1842 pr_info("can: broadcast manager protocol\n"); 1843 1844 err = register_pernet_subsys(&canbcm_pernet_ops); 1845 if (err) 1846 return err; 1847 1848 err = register_netdevice_notifier(&canbcm_notifier); 1849 if (err) 1850 goto register_notifier_failed; 1851 1852 err = can_proto_register(&bcm_can_proto); 1853 if (err < 0) { 1854 printk(KERN_ERR "can: registration of bcm protocol failed\n"); 1855 goto register_proto_failed; 1856 } 1857 1858 return 0; 1859 1860 register_proto_failed: 1861 unregister_netdevice_notifier(&canbcm_notifier); 1862 register_notifier_failed: 1863 unregister_pernet_subsys(&canbcm_pernet_ops); 1864 return err; 1865 } 1866 1867 static void __exit bcm_module_exit(void) 1868 { 1869 can_proto_unregister(&bcm_can_proto); 1870 unregister_netdevice_notifier(&canbcm_notifier); 1871 unregister_pernet_subsys(&canbcm_pernet_ops); 1872 } 1873 1874 module_init(bcm_module_init); 1875 module_exit(bcm_module_exit); 1876