1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* 3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 4 * 5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of Volkswagen nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * Alternatively, provided that this notice is retained in full, this 21 * software may be distributed under the terms of the GNU General 22 * Public License ("GPL") version 2, in which case the provisions of the 23 * GPL apply INSTEAD OF those given above. 24 * 25 * The provided data structures and external interfaces from this code 26 * are not restricted to be used by modules with a GPL compatible license. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 39 * DAMAGE. 40 * 41 */ 42 43 #include <linux/module.h> 44 #include <linux/init.h> 45 #include <linux/interrupt.h> 46 #include <linux/hrtimer.h> 47 #include <linux/list.h> 48 #include <linux/proc_fs.h> 49 #include <linux/seq_file.h> 50 #include <linux/uio.h> 51 #include <linux/net.h> 52 #include <linux/netdevice.h> 53 #include <linux/socket.h> 54 #include <linux/if_arp.h> 55 #include <linux/skbuff.h> 56 #include <linux/can.h> 57 #include <linux/can/core.h> 58 #include <linux/can/skb.h> 59 #include <linux/can/bcm.h> 60 #include <linux/slab.h> 61 #include <linux/spinlock.h> 62 #include <net/sock.h> 63 #include <net/net_namespace.h> 64 65 /* 66 * To send multiple CAN frame content within TX_SETUP or to filter 67 * CAN messages with multiplex index within RX_SETUP, the number of 68 * different filters is limited to 256 due to the one byte index value. 69 */ 70 #define MAX_NFRAMES 256 71 72 /* limit timers to 400 days for sending/timeouts */ 73 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) 74 75 /* use of last_frames[index].flags */ 76 #define RX_LOCAL 0x10 /* frame was created on the local host */ 77 #define RX_OWN 0x20 /* frame was sent via the socket it was received on */ 78 #define RX_RECV 0x40 /* received data for this element */ 79 #define RX_THR 0x80 /* element not been sent due to throttle feature */ 80 #define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */ 81 82 /* get best masking value for can_rx_register() for a given single can_id */ 83 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 84 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 85 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 86 87 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 88 MODULE_LICENSE("Dual BSD/GPL"); 89 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 90 MODULE_ALIAS("can-proto-2"); 91 92 #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) 93 94 /* 95 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is 96 * 64 bit aligned so the offset has to be multiples of 8 which is ensured 97 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler(). 98 */ 99 static inline u64 get_u64(const struct canfd_frame *cp, int offset) 100 { 101 return *(u64 *)(cp->data + offset); 102 } 103 104 struct bcm_op { 105 struct list_head list; 106 struct rcu_head rcu; 107 int ifindex; 108 canid_t can_id; 109 u32 flags; 110 unsigned long frames_abs, frames_filtered; 111 struct bcm_timeval ival1, ival2; 112 struct hrtimer timer, thrtimer; 113 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 114 int rx_ifindex; 115 int cfsiz; 116 u32 count; 117 u32 nframes; 118 u32 currframe; 119 /* void pointers to arrays of struct can[fd]_frame */ 120 void *frames; 121 void *last_frames; 122 struct canfd_frame sframe; 123 struct canfd_frame last_sframe; 124 struct sock *sk; 125 struct net_device *rx_reg_dev; 126 spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */ 127 }; 128 129 struct bcm_sock { 130 struct sock sk; 131 int bound; 132 int ifindex; 133 struct list_head notifier; 134 struct list_head rx_ops; 135 struct list_head tx_ops; 136 unsigned long dropped_usr_msgs; 137 struct proc_dir_entry *bcm_proc_read; 138 char procname [32]; /* inode number in decimal with \0 */ 139 }; 140 141 static LIST_HEAD(bcm_notifier_list); 142 static DEFINE_SPINLOCK(bcm_notifier_lock); 143 static struct bcm_sock *bcm_busy_notifier; 144 145 /* Return pointer to store the extra msg flags for bcm_recvmsg(). 146 * We use the space of one unsigned int beyond the 'struct sockaddr_can' 147 * in skb->cb. 148 */ 149 static inline unsigned int *bcm_flags(struct sk_buff *skb) 150 { 151 /* return pointer after struct sockaddr_can */ 152 return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]); 153 } 154 155 static inline struct bcm_sock *bcm_sk(const struct sock *sk) 156 { 157 return (struct bcm_sock *)sk; 158 } 159 160 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) 161 { 162 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); 163 } 164 165 /* check limitations for timeval provided by user */ 166 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) 167 { 168 if ((msg_head->ival1.tv_sec < 0) || 169 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || 170 (msg_head->ival1.tv_usec < 0) || 171 (msg_head->ival1.tv_usec >= USEC_PER_SEC) || 172 (msg_head->ival2.tv_sec < 0) || 173 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || 174 (msg_head->ival2.tv_usec < 0) || 175 (msg_head->ival2.tv_usec >= USEC_PER_SEC)) 176 return true; 177 178 return false; 179 } 180 181 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) 182 #define OPSIZ sizeof(struct bcm_op) 183 #define MHSIZ sizeof(struct bcm_msg_head) 184 185 /* 186 * procfs functions 187 */ 188 #if IS_ENABLED(CONFIG_PROC_FS) 189 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex) 190 { 191 struct net_device *dev; 192 193 if (!ifindex) 194 return "any"; 195 196 rcu_read_lock(); 197 dev = dev_get_by_index_rcu(net, ifindex); 198 if (dev) 199 strcpy(result, dev->name); 200 else 201 strcpy(result, "???"); 202 rcu_read_unlock(); 203 204 return result; 205 } 206 207 static int bcm_proc_show(struct seq_file *m, void *v) 208 { 209 char ifname[IFNAMSIZ]; 210 struct net *net = m->private; 211 struct sock *sk = (struct sock *)pde_data(m->file->f_inode); 212 struct bcm_sock *bo = bcm_sk(sk); 213 struct bcm_op *op; 214 215 seq_printf(m, ">>> socket %pK", sk->sk_socket); 216 seq_printf(m, " / sk %pK", sk); 217 seq_printf(m, " / bo %pK", bo); 218 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 219 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); 220 seq_printf(m, " <<<\n"); 221 222 rcu_read_lock(); 223 224 list_for_each_entry_rcu(op, &bo->rx_ops, list) { 225 226 unsigned long reduction; 227 228 /* print only active entries & prevent division by zero */ 229 if (!op->frames_abs) 230 continue; 231 232 seq_printf(m, "rx_op: %03X %-5s ", op->can_id, 233 bcm_proc_getifname(net, ifname, op->ifindex)); 234 235 if (op->flags & CAN_FD_FRAME) 236 seq_printf(m, "(%u)", op->nframes); 237 else 238 seq_printf(m, "[%u]", op->nframes); 239 240 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' '); 241 242 if (op->kt_ival1) 243 seq_printf(m, "timeo=%lld ", 244 (long long)ktime_to_us(op->kt_ival1)); 245 246 if (op->kt_ival2) 247 seq_printf(m, "thr=%lld ", 248 (long long)ktime_to_us(op->kt_ival2)); 249 250 seq_printf(m, "# recv %ld (%ld) => reduction: ", 251 op->frames_filtered, op->frames_abs); 252 253 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 254 255 seq_printf(m, "%s%ld%%\n", 256 (reduction == 100) ? "near " : "", reduction); 257 } 258 259 list_for_each_entry(op, &bo->tx_ops, list) { 260 261 seq_printf(m, "tx_op: %03X %s ", op->can_id, 262 bcm_proc_getifname(net, ifname, op->ifindex)); 263 264 if (op->flags & CAN_FD_FRAME) 265 seq_printf(m, "(%u) ", op->nframes); 266 else 267 seq_printf(m, "[%u] ", op->nframes); 268 269 if (op->kt_ival1) 270 seq_printf(m, "t1=%lld ", 271 (long long)ktime_to_us(op->kt_ival1)); 272 273 if (op->kt_ival2) 274 seq_printf(m, "t2=%lld ", 275 (long long)ktime_to_us(op->kt_ival2)); 276 277 seq_printf(m, "# sent %ld\n", op->frames_abs); 278 } 279 seq_putc(m, '\n'); 280 281 rcu_read_unlock(); 282 283 return 0; 284 } 285 #endif /* CONFIG_PROC_FS */ 286 287 /* 288 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface 289 * of the given bcm tx op 290 */ 291 static void bcm_can_tx(struct bcm_op *op) 292 { 293 struct sk_buff *skb; 294 struct net_device *dev; 295 struct canfd_frame *cf; 296 int err; 297 298 /* no target device? => exit */ 299 if (!op->ifindex) 300 return; 301 302 /* read currframe under lock protection */ 303 spin_lock_bh(&op->bcm_tx_lock); 304 cf = op->frames + op->cfsiz * op->currframe; 305 spin_unlock_bh(&op->bcm_tx_lock); 306 307 dev = dev_get_by_index(sock_net(op->sk), op->ifindex); 308 if (!dev) { 309 /* RFC: should this bcm_op remove itself here? */ 310 return; 311 } 312 313 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any()); 314 if (!skb) 315 goto out; 316 317 can_skb_reserve(skb); 318 can_skb_prv(skb)->ifindex = dev->ifindex; 319 can_skb_prv(skb)->skbcnt = 0; 320 321 skb_put_data(skb, cf, op->cfsiz); 322 323 /* send with loopback */ 324 skb->dev = dev; 325 can_skb_set_owner(skb, op->sk); 326 err = can_send(skb, 1); 327 328 /* update currframe and count under lock protection */ 329 spin_lock_bh(&op->bcm_tx_lock); 330 331 if (!err) 332 op->frames_abs++; 333 334 op->currframe++; 335 336 /* reached last frame? */ 337 if (op->currframe >= op->nframes) 338 op->currframe = 0; 339 340 if (op->count > 0) 341 op->count--; 342 343 spin_unlock_bh(&op->bcm_tx_lock); 344 out: 345 dev_put(dev); 346 } 347 348 /* 349 * bcm_send_to_user - send a BCM message to the userspace 350 * (consisting of bcm_msg_head + x CAN frames) 351 */ 352 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, 353 struct canfd_frame *frames, int has_timestamp) 354 { 355 struct sk_buff *skb; 356 struct canfd_frame *firstframe; 357 struct sockaddr_can *addr; 358 struct sock *sk = op->sk; 359 unsigned int datalen = head->nframes * op->cfsiz; 360 int err; 361 unsigned int *pflags; 362 enum skb_drop_reason reason; 363 364 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 365 if (!skb) 366 return; 367 368 skb_put_data(skb, head, sizeof(*head)); 369 370 /* ensure space for sockaddr_can and msg flags */ 371 sock_skb_cb_check_size(sizeof(struct sockaddr_can) + 372 sizeof(unsigned int)); 373 374 /* initialize msg flags */ 375 pflags = bcm_flags(skb); 376 *pflags = 0; 377 378 if (head->nframes) { 379 /* CAN frames starting here */ 380 firstframe = (struct canfd_frame *)skb_tail_pointer(skb); 381 382 skb_put_data(skb, frames, datalen); 383 384 /* 385 * the BCM uses the flags-element of the canfd_frame 386 * structure for internal purposes. This is only 387 * relevant for updates that are generated by the 388 * BCM, where nframes is 1 389 */ 390 if (head->nframes == 1) { 391 if (firstframe->flags & RX_LOCAL) 392 *pflags |= MSG_DONTROUTE; 393 if (firstframe->flags & RX_OWN) 394 *pflags |= MSG_CONFIRM; 395 396 firstframe->flags &= BCM_CAN_FLAGS_MASK; 397 } 398 } 399 400 if (has_timestamp) { 401 /* restore rx timestamp */ 402 skb->tstamp = op->rx_stamp; 403 } 404 405 /* 406 * Put the datagram to the queue so that bcm_recvmsg() can 407 * get it from there. We need to pass the interface index to 408 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb 409 * containing the interface index. 410 */ 411 412 addr = (struct sockaddr_can *)skb->cb; 413 memset(addr, 0, sizeof(*addr)); 414 addr->can_family = AF_CAN; 415 addr->can_ifindex = op->rx_ifindex; 416 417 err = sock_queue_rcv_skb_reason(sk, skb, &reason); 418 if (err < 0) { 419 struct bcm_sock *bo = bcm_sk(sk); 420 421 sk_skb_reason_drop(sk, skb, reason); 422 /* don't care about overflows in this statistic */ 423 bo->dropped_usr_msgs++; 424 } 425 } 426 427 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt) 428 { 429 ktime_t ival; 430 431 if (op->kt_ival1 && op->count) 432 ival = op->kt_ival1; 433 else if (op->kt_ival2) 434 ival = op->kt_ival2; 435 else 436 return false; 437 438 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival)); 439 return true; 440 } 441 442 static void bcm_tx_start_timer(struct bcm_op *op) 443 { 444 if (bcm_tx_set_expiry(op, &op->timer)) 445 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT); 446 } 447 448 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */ 449 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) 450 { 451 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 452 struct bcm_msg_head msg_head; 453 454 if (op->kt_ival1 && (op->count > 0)) { 455 bcm_can_tx(op); 456 if (!op->count && (op->flags & TX_COUNTEVT)) { 457 458 /* create notification to user */ 459 memset(&msg_head, 0, sizeof(msg_head)); 460 msg_head.opcode = TX_EXPIRED; 461 msg_head.flags = op->flags; 462 msg_head.count = op->count; 463 msg_head.ival1 = op->ival1; 464 msg_head.ival2 = op->ival2; 465 msg_head.can_id = op->can_id; 466 msg_head.nframes = 0; 467 468 bcm_send_to_user(op, &msg_head, NULL, 0); 469 } 470 471 } else if (op->kt_ival2) { 472 bcm_can_tx(op); 473 } 474 475 return bcm_tx_set_expiry(op, &op->timer) ? 476 HRTIMER_RESTART : HRTIMER_NORESTART; 477 } 478 479 /* 480 * bcm_rx_changed - create a RX_CHANGED notification due to changed content 481 */ 482 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data) 483 { 484 struct bcm_msg_head head; 485 486 /* update statistics */ 487 op->frames_filtered++; 488 489 /* prevent statistics overflow */ 490 if (op->frames_filtered > ULONG_MAX/100) 491 op->frames_filtered = op->frames_abs = 0; 492 493 /* this element is not throttled anymore */ 494 data->flags &= ~RX_THR; 495 496 memset(&head, 0, sizeof(head)); 497 head.opcode = RX_CHANGED; 498 head.flags = op->flags; 499 head.count = op->count; 500 head.ival1 = op->ival1; 501 head.ival2 = op->ival2; 502 head.can_id = op->can_id; 503 head.nframes = 1; 504 505 bcm_send_to_user(op, &head, data, 1); 506 } 507 508 /* 509 * bcm_rx_update_and_send - process a detected relevant receive content change 510 * 1. update the last received data 511 * 2. send a notification to the user (if possible) 512 */ 513 static void bcm_rx_update_and_send(struct bcm_op *op, 514 struct canfd_frame *lastdata, 515 const struct canfd_frame *rxdata, 516 unsigned char traffic_flags) 517 { 518 memcpy(lastdata, rxdata, op->cfsiz); 519 520 /* mark as used and throttled by default */ 521 lastdata->flags |= (RX_RECV|RX_THR); 522 523 /* add own/local/remote traffic flags */ 524 lastdata->flags |= traffic_flags; 525 526 /* throttling mode inactive ? */ 527 if (!op->kt_ival2) { 528 /* send RX_CHANGED to the user immediately */ 529 bcm_rx_changed(op, lastdata); 530 return; 531 } 532 533 /* with active throttling timer we are just done here */ 534 if (hrtimer_active(&op->thrtimer)) 535 return; 536 537 /* first reception with enabled throttling mode */ 538 if (!op->kt_lastmsg) 539 goto rx_changed_settime; 540 541 /* got a second frame inside a potential throttle period? */ 542 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 543 ktime_to_us(op->kt_ival2)) { 544 /* do not send the saved data - only start throttle timer */ 545 hrtimer_start(&op->thrtimer, 546 ktime_add(op->kt_lastmsg, op->kt_ival2), 547 HRTIMER_MODE_ABS_SOFT); 548 return; 549 } 550 551 /* the gap was that big, that throttling was not needed here */ 552 rx_changed_settime: 553 bcm_rx_changed(op, lastdata); 554 op->kt_lastmsg = ktime_get(); 555 } 556 557 /* 558 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 559 * received data stored in op->last_frames[] 560 */ 561 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, 562 const struct canfd_frame *rxdata, 563 unsigned char traffic_flags) 564 { 565 struct canfd_frame *cf = op->frames + op->cfsiz * index; 566 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; 567 int i; 568 569 /* 570 * no one uses the MSBs of flags for comparison, 571 * so we use it here to detect the first time of reception 572 */ 573 574 if (!(lcf->flags & RX_RECV)) { 575 /* received data for the first time => send update to user */ 576 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags); 577 return; 578 } 579 580 /* do a real check in CAN frame data section */ 581 for (i = 0; i < rxdata->len; i += 8) { 582 if ((get_u64(cf, i) & get_u64(rxdata, i)) != 583 (get_u64(cf, i) & get_u64(lcf, i))) { 584 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags); 585 return; 586 } 587 } 588 589 if (op->flags & RX_CHECK_DLC) { 590 /* do a real check in CAN frame length */ 591 if (rxdata->len != lcf->len) { 592 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags); 593 return; 594 } 595 } 596 } 597 598 /* 599 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception 600 */ 601 static void bcm_rx_starttimer(struct bcm_op *op) 602 { 603 if (op->flags & RX_NO_AUTOTIMER) 604 return; 605 606 if (op->kt_ival1) 607 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT); 608 } 609 610 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */ 611 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) 612 { 613 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 614 struct bcm_msg_head msg_head; 615 616 /* if user wants to be informed, when cyclic CAN-Messages come back */ 617 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { 618 /* clear received CAN frames to indicate 'nothing received' */ 619 memset(op->last_frames, 0, op->nframes * op->cfsiz); 620 } 621 622 /* create notification to user */ 623 memset(&msg_head, 0, sizeof(msg_head)); 624 msg_head.opcode = RX_TIMEOUT; 625 msg_head.flags = op->flags; 626 msg_head.count = op->count; 627 msg_head.ival1 = op->ival1; 628 msg_head.ival2 = op->ival2; 629 msg_head.can_id = op->can_id; 630 msg_head.nframes = 0; 631 632 bcm_send_to_user(op, &msg_head, NULL, 0); 633 634 return HRTIMER_NORESTART; 635 } 636 637 /* 638 * bcm_rx_do_flush - helper for bcm_rx_thr_flush 639 */ 640 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index) 641 { 642 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; 643 644 if ((op->last_frames) && (lcf->flags & RX_THR)) { 645 bcm_rx_changed(op, lcf); 646 return 1; 647 } 648 return 0; 649 } 650 651 /* 652 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 653 */ 654 static int bcm_rx_thr_flush(struct bcm_op *op) 655 { 656 int updated = 0; 657 658 if (op->nframes > 1) { 659 unsigned int i; 660 661 /* for MUX filter we start at index 1 */ 662 for (i = 1; i < op->nframes; i++) 663 updated += bcm_rx_do_flush(op, i); 664 665 } else { 666 /* for RX_FILTER_ID and simple filter */ 667 updated += bcm_rx_do_flush(op, 0); 668 } 669 670 return updated; 671 } 672 673 /* 674 * bcm_rx_thr_handler - the time for blocked content updates is over now: 675 * Check for throttled data and send it to the userspace 676 */ 677 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) 678 { 679 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 680 681 if (bcm_rx_thr_flush(op)) { 682 hrtimer_forward_now(hrtimer, op->kt_ival2); 683 return HRTIMER_RESTART; 684 } else { 685 /* rearm throttle handling */ 686 op->kt_lastmsg = 0; 687 return HRTIMER_NORESTART; 688 } 689 } 690 691 /* 692 * bcm_rx_handler - handle a CAN frame reception 693 */ 694 static void bcm_rx_handler(struct sk_buff *skb, void *data) 695 { 696 struct bcm_op *op = (struct bcm_op *)data; 697 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data; 698 unsigned int i; 699 unsigned char traffic_flags; 700 701 if (op->can_id != rxframe->can_id) 702 return; 703 704 /* make sure to handle the correct frame type (CAN / CAN FD) */ 705 if (op->flags & CAN_FD_FRAME) { 706 if (!can_is_canfd_skb(skb)) 707 return; 708 } else { 709 if (!can_is_can_skb(skb)) 710 return; 711 } 712 713 /* disable timeout */ 714 hrtimer_cancel(&op->timer); 715 716 /* save rx timestamp */ 717 op->rx_stamp = skb->tstamp; 718 /* save originator for recvfrom() */ 719 op->rx_ifindex = skb->dev->ifindex; 720 /* update statistics */ 721 op->frames_abs++; 722 723 if (op->flags & RX_RTR_FRAME) { 724 /* send reply for RTR-request (placed in op->frames[0]) */ 725 bcm_can_tx(op); 726 return; 727 } 728 729 /* compute flags to distinguish between own/local/remote CAN traffic */ 730 traffic_flags = 0; 731 if (skb->sk) { 732 traffic_flags |= RX_LOCAL; 733 if (skb->sk == op->sk) 734 traffic_flags |= RX_OWN; 735 } 736 737 if (op->flags & RX_FILTER_ID) { 738 /* the easiest case */ 739 bcm_rx_update_and_send(op, op->last_frames, rxframe, 740 traffic_flags); 741 goto rx_starttimer; 742 } 743 744 if (op->nframes == 1) { 745 /* simple compare with index 0 */ 746 bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags); 747 goto rx_starttimer; 748 } 749 750 if (op->nframes > 1) { 751 /* 752 * multiplex compare 753 * 754 * find the first multiplex mask that fits. 755 * Remark: The MUX-mask is stored in index 0 - but only the 756 * first 64 bits of the frame data[] are relevant (CAN FD) 757 */ 758 759 for (i = 1; i < op->nframes; i++) { 760 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) == 761 (get_u64(op->frames, 0) & 762 get_u64(op->frames + op->cfsiz * i, 0))) { 763 bcm_rx_cmp_to_index(op, i, rxframe, 764 traffic_flags); 765 break; 766 } 767 } 768 } 769 770 rx_starttimer: 771 bcm_rx_starttimer(op); 772 } 773 774 /* 775 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements 776 */ 777 static struct bcm_op *bcm_find_op(struct list_head *ops, 778 struct bcm_msg_head *mh, int ifindex) 779 { 780 struct bcm_op *op; 781 782 list_for_each_entry(op, ops, list) { 783 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && 784 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) 785 return op; 786 } 787 788 return NULL; 789 } 790 791 static void bcm_free_op_rcu(struct rcu_head *rcu_head) 792 { 793 struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu); 794 795 if ((op->frames) && (op->frames != &op->sframe)) 796 kfree(op->frames); 797 798 if ((op->last_frames) && (op->last_frames != &op->last_sframe)) 799 kfree(op->last_frames); 800 801 kfree(op); 802 } 803 804 static void bcm_remove_op(struct bcm_op *op) 805 { 806 hrtimer_cancel(&op->timer); 807 hrtimer_cancel(&op->thrtimer); 808 809 call_rcu(&op->rcu, bcm_free_op_rcu); 810 } 811 812 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) 813 { 814 if (op->rx_reg_dev == dev) { 815 can_rx_unregister(dev_net(dev), dev, op->can_id, 816 REGMASK(op->can_id), bcm_rx_handler, op); 817 818 /* mark as removed subscription */ 819 op->rx_reg_dev = NULL; 820 } else 821 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " 822 "mismatch %p %p\n", op->rx_reg_dev, dev); 823 } 824 825 /* 826 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) 827 */ 828 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, 829 int ifindex) 830 { 831 struct bcm_op *op, *n; 832 833 list_for_each_entry_safe(op, n, ops, list) { 834 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && 835 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { 836 837 /* disable automatic timer on frame reception */ 838 op->flags |= RX_NO_AUTOTIMER; 839 840 /* 841 * Don't care if we're bound or not (due to netdev 842 * problems) can_rx_unregister() is always a save 843 * thing to do here. 844 */ 845 if (op->ifindex) { 846 /* 847 * Only remove subscriptions that had not 848 * been removed due to NETDEV_UNREGISTER 849 * in bcm_notifier() 850 */ 851 if (op->rx_reg_dev) { 852 struct net_device *dev; 853 854 dev = dev_get_by_index(sock_net(op->sk), 855 op->ifindex); 856 if (dev) { 857 bcm_rx_unreg(dev, op); 858 dev_put(dev); 859 } 860 } 861 } else 862 can_rx_unregister(sock_net(op->sk), NULL, 863 op->can_id, 864 REGMASK(op->can_id), 865 bcm_rx_handler, op); 866 867 list_del_rcu(&op->list); 868 bcm_remove_op(op); 869 return 1; /* done */ 870 } 871 } 872 873 return 0; /* not found */ 874 } 875 876 /* 877 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) 878 */ 879 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, 880 int ifindex) 881 { 882 struct bcm_op *op, *n; 883 884 list_for_each_entry_safe(op, n, ops, list) { 885 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && 886 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { 887 list_del_rcu(&op->list); 888 bcm_remove_op(op); 889 return 1; /* done */ 890 } 891 } 892 893 return 0; /* not found */ 894 } 895 896 /* 897 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) 898 */ 899 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, 900 int ifindex) 901 { 902 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex); 903 904 if (!op) 905 return -EINVAL; 906 907 /* put current values into msg_head */ 908 msg_head->flags = op->flags; 909 msg_head->count = op->count; 910 msg_head->ival1 = op->ival1; 911 msg_head->ival2 = op->ival2; 912 msg_head->nframes = op->nframes; 913 914 bcm_send_to_user(op, msg_head, op->frames, 0); 915 916 return MHSIZ; 917 } 918 919 /* 920 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) 921 */ 922 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 923 int ifindex, struct sock *sk) 924 { 925 struct bcm_sock *bo = bcm_sk(sk); 926 struct bcm_op *op; 927 struct canfd_frame *cf; 928 unsigned int i; 929 int err; 930 931 /* we need a real device to send frames */ 932 if (!ifindex) 933 return -ENODEV; 934 935 /* check nframes boundaries - we need at least one CAN frame */ 936 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) 937 return -EINVAL; 938 939 /* check timeval limitations */ 940 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) 941 return -EINVAL; 942 943 /* check the given can_id */ 944 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); 945 if (op) { 946 /* update existing BCM operation */ 947 948 /* 949 * Do we need more space for the CAN frames than currently 950 * allocated? -> This is a _really_ unusual use-case and 951 * therefore (complexity / locking) it is not supported. 952 */ 953 if (msg_head->nframes > op->nframes) 954 return -E2BIG; 955 956 /* update CAN frames content */ 957 for (i = 0; i < msg_head->nframes; i++) { 958 959 cf = op->frames + op->cfsiz * i; 960 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); 961 962 if (op->flags & CAN_FD_FRAME) { 963 if (cf->len > 64) 964 err = -EINVAL; 965 } else { 966 if (cf->len > 8) 967 err = -EINVAL; 968 } 969 970 if (err < 0) 971 return err; 972 973 if (msg_head->flags & TX_CP_CAN_ID) { 974 /* copy can_id into frame */ 975 cf->can_id = msg_head->can_id; 976 } 977 } 978 op->flags = msg_head->flags; 979 980 /* only lock for unlikely count/nframes/currframe changes */ 981 if (op->nframes != msg_head->nframes || 982 op->flags & TX_RESET_MULTI_IDX || 983 op->flags & SETTIMER) { 984 985 spin_lock_bh(&op->bcm_tx_lock); 986 987 if (op->nframes != msg_head->nframes || 988 op->flags & TX_RESET_MULTI_IDX) { 989 /* potentially update changed nframes */ 990 op->nframes = msg_head->nframes; 991 /* restart multiple frame transmission */ 992 op->currframe = 0; 993 } 994 995 if (op->flags & SETTIMER) 996 op->count = msg_head->count; 997 998 spin_unlock_bh(&op->bcm_tx_lock); 999 } 1000 1001 } else { 1002 /* insert new BCM operation for the given can_id */ 1003 1004 op = kzalloc(OPSIZ, GFP_KERNEL); 1005 if (!op) 1006 return -ENOMEM; 1007 1008 spin_lock_init(&op->bcm_tx_lock); 1009 op->can_id = msg_head->can_id; 1010 op->cfsiz = CFSIZ(msg_head->flags); 1011 op->flags = msg_head->flags; 1012 op->nframes = msg_head->nframes; 1013 1014 if (op->flags & SETTIMER) 1015 op->count = msg_head->count; 1016 1017 /* create array for CAN frames and copy the data */ 1018 if (msg_head->nframes > 1) { 1019 op->frames = kmalloc_array(msg_head->nframes, 1020 op->cfsiz, 1021 GFP_KERNEL); 1022 if (!op->frames) { 1023 kfree(op); 1024 return -ENOMEM; 1025 } 1026 } else 1027 op->frames = &op->sframe; 1028 1029 for (i = 0; i < msg_head->nframes; i++) { 1030 1031 cf = op->frames + op->cfsiz * i; 1032 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); 1033 if (err < 0) 1034 goto free_op; 1035 1036 if (op->flags & CAN_FD_FRAME) { 1037 if (cf->len > 64) 1038 err = -EINVAL; 1039 } else { 1040 if (cf->len > 8) 1041 err = -EINVAL; 1042 } 1043 1044 if (err < 0) 1045 goto free_op; 1046 1047 if (msg_head->flags & TX_CP_CAN_ID) { 1048 /* copy can_id into frame */ 1049 cf->can_id = msg_head->can_id; 1050 } 1051 } 1052 1053 /* tx_ops never compare with previous received messages */ 1054 op->last_frames = NULL; 1055 1056 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1057 op->sk = sk; 1058 op->ifindex = ifindex; 1059 1060 /* initialize uninitialized (kzalloc) structure */ 1061 hrtimer_setup(&op->timer, bcm_tx_timeout_handler, CLOCK_MONOTONIC, 1062 HRTIMER_MODE_REL_SOFT); 1063 1064 /* currently unused in tx_ops */ 1065 hrtimer_setup(&op->thrtimer, hrtimer_dummy_timeout, CLOCK_MONOTONIC, 1066 HRTIMER_MODE_REL_SOFT); 1067 1068 /* add this bcm_op to the list of the tx_ops */ 1069 list_add(&op->list, &bo->tx_ops); 1070 1071 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ 1072 1073 if (op->flags & SETTIMER) { 1074 /* set timer values */ 1075 op->ival1 = msg_head->ival1; 1076 op->ival2 = msg_head->ival2; 1077 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); 1078 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1079 1080 /* disable an active timer due to zero values? */ 1081 if (!op->kt_ival1 && !op->kt_ival2) 1082 hrtimer_cancel(&op->timer); 1083 } 1084 1085 if (op->flags & STARTTIMER) { 1086 hrtimer_cancel(&op->timer); 1087 /* spec: send CAN frame when starting timer */ 1088 op->flags |= TX_ANNOUNCE; 1089 } 1090 1091 if (op->flags & TX_ANNOUNCE) 1092 bcm_can_tx(op); 1093 1094 if (op->flags & STARTTIMER) 1095 bcm_tx_start_timer(op); 1096 1097 return msg_head->nframes * op->cfsiz + MHSIZ; 1098 1099 free_op: 1100 if (op->frames != &op->sframe) 1101 kfree(op->frames); 1102 kfree(op); 1103 return err; 1104 } 1105 1106 /* 1107 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) 1108 */ 1109 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 1110 int ifindex, struct sock *sk) 1111 { 1112 struct bcm_sock *bo = bcm_sk(sk); 1113 struct bcm_op *op; 1114 int do_rx_register; 1115 int err = 0; 1116 1117 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { 1118 /* be robust against wrong usage ... */ 1119 msg_head->flags |= RX_FILTER_ID; 1120 /* ignore trailing garbage */ 1121 msg_head->nframes = 0; 1122 } 1123 1124 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ 1125 if (msg_head->nframes > MAX_NFRAMES + 1) 1126 return -EINVAL; 1127 1128 if ((msg_head->flags & RX_RTR_FRAME) && 1129 ((msg_head->nframes != 1) || 1130 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1131 return -EINVAL; 1132 1133 /* check timeval limitations */ 1134 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) 1135 return -EINVAL; 1136 1137 /* check the given can_id */ 1138 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); 1139 if (op) { 1140 /* update existing BCM operation */ 1141 1142 /* 1143 * Do we need more space for the CAN frames than currently 1144 * allocated? -> This is a _really_ unusual use-case and 1145 * therefore (complexity / locking) it is not supported. 1146 */ 1147 if (msg_head->nframes > op->nframes) 1148 return -E2BIG; 1149 1150 if (msg_head->nframes) { 1151 /* update CAN frames content */ 1152 err = memcpy_from_msg(op->frames, msg, 1153 msg_head->nframes * op->cfsiz); 1154 if (err < 0) 1155 return err; 1156 1157 /* clear last_frames to indicate 'nothing received' */ 1158 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz); 1159 } 1160 1161 op->nframes = msg_head->nframes; 1162 op->flags = msg_head->flags; 1163 1164 /* Only an update -> do not call can_rx_register() */ 1165 do_rx_register = 0; 1166 1167 } else { 1168 /* insert new BCM operation for the given can_id */ 1169 op = kzalloc(OPSIZ, GFP_KERNEL); 1170 if (!op) 1171 return -ENOMEM; 1172 1173 op->can_id = msg_head->can_id; 1174 op->nframes = msg_head->nframes; 1175 op->cfsiz = CFSIZ(msg_head->flags); 1176 op->flags = msg_head->flags; 1177 1178 if (msg_head->nframes > 1) { 1179 /* create array for CAN frames and copy the data */ 1180 op->frames = kmalloc_array(msg_head->nframes, 1181 op->cfsiz, 1182 GFP_KERNEL); 1183 if (!op->frames) { 1184 kfree(op); 1185 return -ENOMEM; 1186 } 1187 1188 /* create and init array for received CAN frames */ 1189 op->last_frames = kcalloc(msg_head->nframes, 1190 op->cfsiz, 1191 GFP_KERNEL); 1192 if (!op->last_frames) { 1193 kfree(op->frames); 1194 kfree(op); 1195 return -ENOMEM; 1196 } 1197 1198 } else { 1199 op->frames = &op->sframe; 1200 op->last_frames = &op->last_sframe; 1201 } 1202 1203 if (msg_head->nframes) { 1204 err = memcpy_from_msg(op->frames, msg, 1205 msg_head->nframes * op->cfsiz); 1206 if (err < 0) { 1207 if (op->frames != &op->sframe) 1208 kfree(op->frames); 1209 if (op->last_frames != &op->last_sframe) 1210 kfree(op->last_frames); 1211 kfree(op); 1212 return err; 1213 } 1214 } 1215 1216 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1217 op->sk = sk; 1218 op->ifindex = ifindex; 1219 1220 /* ifindex for timeout events w/o previous frame reception */ 1221 op->rx_ifindex = ifindex; 1222 1223 /* initialize uninitialized (kzalloc) structure */ 1224 hrtimer_setup(&op->timer, bcm_rx_timeout_handler, CLOCK_MONOTONIC, 1225 HRTIMER_MODE_REL_SOFT); 1226 hrtimer_setup(&op->thrtimer, bcm_rx_thr_handler, CLOCK_MONOTONIC, 1227 HRTIMER_MODE_REL_SOFT); 1228 1229 /* add this bcm_op to the list of the rx_ops */ 1230 list_add(&op->list, &bo->rx_ops); 1231 1232 /* call can_rx_register() */ 1233 do_rx_register = 1; 1234 1235 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ 1236 1237 /* check flags */ 1238 1239 if (op->flags & RX_RTR_FRAME) { 1240 struct canfd_frame *frame0 = op->frames; 1241 1242 /* no timers in RTR-mode */ 1243 hrtimer_cancel(&op->thrtimer); 1244 hrtimer_cancel(&op->timer); 1245 1246 /* 1247 * funny feature in RX(!)_SETUP only for RTR-mode: 1248 * copy can_id into frame BUT without RTR-flag to 1249 * prevent a full-load-loopback-test ... ;-] 1250 */ 1251 if ((op->flags & TX_CP_CAN_ID) || 1252 (frame0->can_id == op->can_id)) 1253 frame0->can_id = op->can_id & ~CAN_RTR_FLAG; 1254 1255 } else { 1256 if (op->flags & SETTIMER) { 1257 1258 /* set timer value */ 1259 op->ival1 = msg_head->ival1; 1260 op->ival2 = msg_head->ival2; 1261 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); 1262 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1263 1264 /* disable an active timer due to zero value? */ 1265 if (!op->kt_ival1) 1266 hrtimer_cancel(&op->timer); 1267 1268 /* 1269 * In any case cancel the throttle timer, flush 1270 * potentially blocked msgs and reset throttle handling 1271 */ 1272 op->kt_lastmsg = 0; 1273 hrtimer_cancel(&op->thrtimer); 1274 bcm_rx_thr_flush(op); 1275 } 1276 1277 if ((op->flags & STARTTIMER) && op->kt_ival1) 1278 hrtimer_start(&op->timer, op->kt_ival1, 1279 HRTIMER_MODE_REL_SOFT); 1280 } 1281 1282 /* now we can register for can_ids, if we added a new bcm_op */ 1283 if (do_rx_register) { 1284 if (ifindex) { 1285 struct net_device *dev; 1286 1287 dev = dev_get_by_index(sock_net(sk), ifindex); 1288 if (dev) { 1289 err = can_rx_register(sock_net(sk), dev, 1290 op->can_id, 1291 REGMASK(op->can_id), 1292 bcm_rx_handler, op, 1293 "bcm", sk); 1294 1295 op->rx_reg_dev = dev; 1296 dev_put(dev); 1297 } 1298 1299 } else 1300 err = can_rx_register(sock_net(sk), NULL, op->can_id, 1301 REGMASK(op->can_id), 1302 bcm_rx_handler, op, "bcm", sk); 1303 if (err) { 1304 /* this bcm rx op is broken -> remove it */ 1305 list_del_rcu(&op->list); 1306 bcm_remove_op(op); 1307 return err; 1308 } 1309 } 1310 1311 return msg_head->nframes * op->cfsiz + MHSIZ; 1312 } 1313 1314 /* 1315 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) 1316 */ 1317 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk, 1318 int cfsiz) 1319 { 1320 struct sk_buff *skb; 1321 struct net_device *dev; 1322 int err; 1323 1324 /* we need a real device to send frames */ 1325 if (!ifindex) 1326 return -ENODEV; 1327 1328 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL); 1329 if (!skb) 1330 return -ENOMEM; 1331 1332 can_skb_reserve(skb); 1333 1334 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz); 1335 if (err < 0) { 1336 kfree_skb(skb); 1337 return err; 1338 } 1339 1340 dev = dev_get_by_index(sock_net(sk), ifindex); 1341 if (!dev) { 1342 kfree_skb(skb); 1343 return -ENODEV; 1344 } 1345 1346 can_skb_prv(skb)->ifindex = dev->ifindex; 1347 can_skb_prv(skb)->skbcnt = 0; 1348 skb->dev = dev; 1349 can_skb_set_owner(skb, sk); 1350 err = can_send(skb, 1); /* send with loopback */ 1351 dev_put(dev); 1352 1353 if (err) 1354 return err; 1355 1356 return cfsiz + MHSIZ; 1357 } 1358 1359 /* 1360 * bcm_sendmsg - process BCM commands (opcodes) from the userspace 1361 */ 1362 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 1363 { 1364 struct sock *sk = sock->sk; 1365 struct bcm_sock *bo = bcm_sk(sk); 1366 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ 1367 struct bcm_msg_head msg_head; 1368 int cfsiz; 1369 int ret; /* read bytes or error codes as return value */ 1370 1371 if (!bo->bound) 1372 return -ENOTCONN; 1373 1374 /* check for valid message length from userspace */ 1375 if (size < MHSIZ) 1376 return -EINVAL; 1377 1378 /* read message head information */ 1379 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ); 1380 if (ret < 0) 1381 return ret; 1382 1383 cfsiz = CFSIZ(msg_head.flags); 1384 if ((size - MHSIZ) % cfsiz) 1385 return -EINVAL; 1386 1387 /* check for alternative ifindex for this bcm_op */ 1388 1389 if (!ifindex && msg->msg_name) { 1390 /* no bound device as default => check msg_name */ 1391 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 1392 1393 if (msg->msg_namelen < BCM_MIN_NAMELEN) 1394 return -EINVAL; 1395 1396 if (addr->can_family != AF_CAN) 1397 return -EINVAL; 1398 1399 /* ifindex from sendto() */ 1400 ifindex = addr->can_ifindex; 1401 1402 if (ifindex) { 1403 struct net_device *dev; 1404 1405 dev = dev_get_by_index(sock_net(sk), ifindex); 1406 if (!dev) 1407 return -ENODEV; 1408 1409 if (dev->type != ARPHRD_CAN) { 1410 dev_put(dev); 1411 return -ENODEV; 1412 } 1413 1414 dev_put(dev); 1415 } 1416 } 1417 1418 lock_sock(sk); 1419 1420 switch (msg_head.opcode) { 1421 1422 case TX_SETUP: 1423 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); 1424 break; 1425 1426 case RX_SETUP: 1427 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); 1428 break; 1429 1430 case TX_DELETE: 1431 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex)) 1432 ret = MHSIZ; 1433 else 1434 ret = -EINVAL; 1435 break; 1436 1437 case RX_DELETE: 1438 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex)) 1439 ret = MHSIZ; 1440 else 1441 ret = -EINVAL; 1442 break; 1443 1444 case TX_READ: 1445 /* reuse msg_head for the reply to TX_READ */ 1446 msg_head.opcode = TX_STATUS; 1447 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); 1448 break; 1449 1450 case RX_READ: 1451 /* reuse msg_head for the reply to RX_READ */ 1452 msg_head.opcode = RX_STATUS; 1453 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); 1454 break; 1455 1456 case TX_SEND: 1457 /* we need exactly one CAN frame behind the msg head */ 1458 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ)) 1459 ret = -EINVAL; 1460 else 1461 ret = bcm_tx_send(msg, ifindex, sk, cfsiz); 1462 break; 1463 1464 default: 1465 ret = -EINVAL; 1466 break; 1467 } 1468 1469 release_sock(sk); 1470 1471 return ret; 1472 } 1473 1474 /* 1475 * notification handler for netdevice status changes 1476 */ 1477 static void bcm_notify(struct bcm_sock *bo, unsigned long msg, 1478 struct net_device *dev) 1479 { 1480 struct sock *sk = &bo->sk; 1481 struct bcm_op *op; 1482 int notify_enodev = 0; 1483 1484 if (!net_eq(dev_net(dev), sock_net(sk))) 1485 return; 1486 1487 switch (msg) { 1488 1489 case NETDEV_UNREGISTER: 1490 lock_sock(sk); 1491 1492 /* remove device specific receive entries */ 1493 list_for_each_entry(op, &bo->rx_ops, list) 1494 if (op->rx_reg_dev == dev) 1495 bcm_rx_unreg(dev, op); 1496 1497 /* remove device reference, if this is our bound device */ 1498 if (bo->bound && bo->ifindex == dev->ifindex) { 1499 #if IS_ENABLED(CONFIG_PROC_FS) 1500 if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) { 1501 remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir); 1502 bo->bcm_proc_read = NULL; 1503 } 1504 #endif 1505 bo->bound = 0; 1506 bo->ifindex = 0; 1507 notify_enodev = 1; 1508 } 1509 1510 release_sock(sk); 1511 1512 if (notify_enodev) { 1513 sk->sk_err = ENODEV; 1514 if (!sock_flag(sk, SOCK_DEAD)) 1515 sk_error_report(sk); 1516 } 1517 break; 1518 1519 case NETDEV_DOWN: 1520 if (bo->bound && bo->ifindex == dev->ifindex) { 1521 sk->sk_err = ENETDOWN; 1522 if (!sock_flag(sk, SOCK_DEAD)) 1523 sk_error_report(sk); 1524 } 1525 } 1526 } 1527 1528 static int bcm_notifier(struct notifier_block *nb, unsigned long msg, 1529 void *ptr) 1530 { 1531 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1532 1533 if (dev->type != ARPHRD_CAN) 1534 return NOTIFY_DONE; 1535 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) 1536 return NOTIFY_DONE; 1537 if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */ 1538 return NOTIFY_DONE; 1539 1540 spin_lock(&bcm_notifier_lock); 1541 list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) { 1542 spin_unlock(&bcm_notifier_lock); 1543 bcm_notify(bcm_busy_notifier, msg, dev); 1544 spin_lock(&bcm_notifier_lock); 1545 } 1546 bcm_busy_notifier = NULL; 1547 spin_unlock(&bcm_notifier_lock); 1548 return NOTIFY_DONE; 1549 } 1550 1551 /* 1552 * initial settings for all BCM sockets to be set at socket creation time 1553 */ 1554 static int bcm_init(struct sock *sk) 1555 { 1556 struct bcm_sock *bo = bcm_sk(sk); 1557 1558 bo->bound = 0; 1559 bo->ifindex = 0; 1560 bo->dropped_usr_msgs = 0; 1561 bo->bcm_proc_read = NULL; 1562 1563 INIT_LIST_HEAD(&bo->tx_ops); 1564 INIT_LIST_HEAD(&bo->rx_ops); 1565 1566 /* set notifier */ 1567 spin_lock(&bcm_notifier_lock); 1568 list_add_tail(&bo->notifier, &bcm_notifier_list); 1569 spin_unlock(&bcm_notifier_lock); 1570 1571 return 0; 1572 } 1573 1574 /* 1575 * standard socket functions 1576 */ 1577 static int bcm_release(struct socket *sock) 1578 { 1579 struct sock *sk = sock->sk; 1580 struct net *net; 1581 struct bcm_sock *bo; 1582 struct bcm_op *op, *next; 1583 1584 if (!sk) 1585 return 0; 1586 1587 net = sock_net(sk); 1588 bo = bcm_sk(sk); 1589 1590 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1591 1592 spin_lock(&bcm_notifier_lock); 1593 while (bcm_busy_notifier == bo) { 1594 spin_unlock(&bcm_notifier_lock); 1595 schedule_timeout_uninterruptible(1); 1596 spin_lock(&bcm_notifier_lock); 1597 } 1598 list_del(&bo->notifier); 1599 spin_unlock(&bcm_notifier_lock); 1600 1601 lock_sock(sk); 1602 1603 #if IS_ENABLED(CONFIG_PROC_FS) 1604 /* remove procfs entry */ 1605 if (net->can.bcmproc_dir && bo->bcm_proc_read) 1606 remove_proc_entry(bo->procname, net->can.bcmproc_dir); 1607 #endif /* CONFIG_PROC_FS */ 1608 1609 list_for_each_entry_safe(op, next, &bo->tx_ops, list) 1610 bcm_remove_op(op); 1611 1612 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { 1613 /* 1614 * Don't care if we're bound or not (due to netdev problems) 1615 * can_rx_unregister() is always a save thing to do here. 1616 */ 1617 if (op->ifindex) { 1618 /* 1619 * Only remove subscriptions that had not 1620 * been removed due to NETDEV_UNREGISTER 1621 * in bcm_notifier() 1622 */ 1623 if (op->rx_reg_dev) { 1624 struct net_device *dev; 1625 1626 dev = dev_get_by_index(net, op->ifindex); 1627 if (dev) { 1628 bcm_rx_unreg(dev, op); 1629 dev_put(dev); 1630 } 1631 } 1632 } else 1633 can_rx_unregister(net, NULL, op->can_id, 1634 REGMASK(op->can_id), 1635 bcm_rx_handler, op); 1636 1637 } 1638 1639 synchronize_rcu(); 1640 1641 list_for_each_entry_safe(op, next, &bo->rx_ops, list) 1642 bcm_remove_op(op); 1643 1644 /* remove device reference */ 1645 if (bo->bound) { 1646 bo->bound = 0; 1647 bo->ifindex = 0; 1648 } 1649 1650 sock_orphan(sk); 1651 sock->sk = NULL; 1652 1653 release_sock(sk); 1654 sock_prot_inuse_add(net, sk->sk_prot, -1); 1655 sock_put(sk); 1656 1657 return 0; 1658 } 1659 1660 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, 1661 int flags) 1662 { 1663 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1664 struct sock *sk = sock->sk; 1665 struct bcm_sock *bo = bcm_sk(sk); 1666 struct net *net = sock_net(sk); 1667 int ret = 0; 1668 1669 if (len < BCM_MIN_NAMELEN) 1670 return -EINVAL; 1671 1672 lock_sock(sk); 1673 1674 if (bo->bound) { 1675 ret = -EISCONN; 1676 goto fail; 1677 } 1678 1679 /* bind a device to this socket */ 1680 if (addr->can_ifindex) { 1681 struct net_device *dev; 1682 1683 dev = dev_get_by_index(net, addr->can_ifindex); 1684 if (!dev) { 1685 ret = -ENODEV; 1686 goto fail; 1687 } 1688 if (dev->type != ARPHRD_CAN) { 1689 dev_put(dev); 1690 ret = -ENODEV; 1691 goto fail; 1692 } 1693 1694 bo->ifindex = dev->ifindex; 1695 dev_put(dev); 1696 1697 } else { 1698 /* no interface reference for ifindex = 0 ('any' CAN device) */ 1699 bo->ifindex = 0; 1700 } 1701 1702 #if IS_ENABLED(CONFIG_PROC_FS) 1703 if (net->can.bcmproc_dir) { 1704 /* unique socket address as filename */ 1705 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1706 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644, 1707 net->can.bcmproc_dir, 1708 bcm_proc_show, sk); 1709 if (!bo->bcm_proc_read) { 1710 ret = -ENOMEM; 1711 goto fail; 1712 } 1713 } 1714 #endif /* CONFIG_PROC_FS */ 1715 1716 bo->bound = 1; 1717 1718 fail: 1719 release_sock(sk); 1720 1721 return ret; 1722 } 1723 1724 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 1725 int flags) 1726 { 1727 struct sock *sk = sock->sk; 1728 struct sk_buff *skb; 1729 int error = 0; 1730 int err; 1731 1732 skb = skb_recv_datagram(sk, flags, &error); 1733 if (!skb) 1734 return error; 1735 1736 if (skb->len < size) 1737 size = skb->len; 1738 1739 err = memcpy_to_msg(msg, skb->data, size); 1740 if (err < 0) { 1741 skb_free_datagram(sk, skb); 1742 return err; 1743 } 1744 1745 sock_recv_cmsgs(msg, sk, skb); 1746 1747 if (msg->msg_name) { 1748 __sockaddr_check_size(BCM_MIN_NAMELEN); 1749 msg->msg_namelen = BCM_MIN_NAMELEN; 1750 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1751 } 1752 1753 /* assign the flags that have been recorded in bcm_send_to_user() */ 1754 msg->msg_flags |= *(bcm_flags(skb)); 1755 1756 skb_free_datagram(sk, skb); 1757 1758 return size; 1759 } 1760 1761 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, 1762 unsigned long arg) 1763 { 1764 /* no ioctls for socket layer -> hand it down to NIC layer */ 1765 return -ENOIOCTLCMD; 1766 } 1767 1768 static const struct proto_ops bcm_ops = { 1769 .family = PF_CAN, 1770 .release = bcm_release, 1771 .bind = sock_no_bind, 1772 .connect = bcm_connect, 1773 .socketpair = sock_no_socketpair, 1774 .accept = sock_no_accept, 1775 .getname = sock_no_getname, 1776 .poll = datagram_poll, 1777 .ioctl = bcm_sock_no_ioctlcmd, 1778 .gettstamp = sock_gettstamp, 1779 .listen = sock_no_listen, 1780 .shutdown = sock_no_shutdown, 1781 .sendmsg = bcm_sendmsg, 1782 .recvmsg = bcm_recvmsg, 1783 .mmap = sock_no_mmap, 1784 }; 1785 1786 static struct proto bcm_proto __read_mostly = { 1787 .name = "CAN_BCM", 1788 .owner = THIS_MODULE, 1789 .obj_size = sizeof(struct bcm_sock), 1790 .init = bcm_init, 1791 }; 1792 1793 static const struct can_proto bcm_can_proto = { 1794 .type = SOCK_DGRAM, 1795 .protocol = CAN_BCM, 1796 .ops = &bcm_ops, 1797 .prot = &bcm_proto, 1798 }; 1799 1800 static int canbcm_pernet_init(struct net *net) 1801 { 1802 #if IS_ENABLED(CONFIG_PROC_FS) 1803 /* create /proc/net/can-bcm directory */ 1804 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net); 1805 #endif /* CONFIG_PROC_FS */ 1806 1807 return 0; 1808 } 1809 1810 static void canbcm_pernet_exit(struct net *net) 1811 { 1812 #if IS_ENABLED(CONFIG_PROC_FS) 1813 /* remove /proc/net/can-bcm directory */ 1814 if (net->can.bcmproc_dir) 1815 remove_proc_entry("can-bcm", net->proc_net); 1816 #endif /* CONFIG_PROC_FS */ 1817 } 1818 1819 static struct pernet_operations canbcm_pernet_ops __read_mostly = { 1820 .init = canbcm_pernet_init, 1821 .exit = canbcm_pernet_exit, 1822 }; 1823 1824 static struct notifier_block canbcm_notifier = { 1825 .notifier_call = bcm_notifier 1826 }; 1827 1828 static int __init bcm_module_init(void) 1829 { 1830 int err; 1831 1832 pr_info("can: broadcast manager protocol\n"); 1833 1834 err = register_pernet_subsys(&canbcm_pernet_ops); 1835 if (err) 1836 return err; 1837 1838 err = register_netdevice_notifier(&canbcm_notifier); 1839 if (err) 1840 goto register_notifier_failed; 1841 1842 err = can_proto_register(&bcm_can_proto); 1843 if (err < 0) { 1844 printk(KERN_ERR "can: registration of bcm protocol failed\n"); 1845 goto register_proto_failed; 1846 } 1847 1848 return 0; 1849 1850 register_proto_failed: 1851 unregister_netdevice_notifier(&canbcm_notifier); 1852 register_notifier_failed: 1853 unregister_pernet_subsys(&canbcm_pernet_ops); 1854 return err; 1855 } 1856 1857 static void __exit bcm_module_exit(void) 1858 { 1859 can_proto_unregister(&bcm_can_proto); 1860 unregister_netdevice_notifier(&canbcm_notifier); 1861 unregister_pernet_subsys(&canbcm_pernet_ops); 1862 } 1863 1864 module_init(bcm_module_init); 1865 module_exit(bcm_module_exit); 1866