1 /* 2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 3 * 4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Volkswagen nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * Alternatively, provided that this notice is retained in full, this 20 * software may be distributed under the terms of the GNU General 21 * Public License ("GPL") version 2, in which case the provisions of the 22 * GPL apply INSTEAD OF those given above. 23 * 24 * The provided data structures and external interfaces from this code 25 * are not restricted to be used by modules with a GPL compatible license. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 * 40 */ 41 42 #include <linux/module.h> 43 #include <linux/init.h> 44 #include <linux/interrupt.h> 45 #include <linux/hrtimer.h> 46 #include <linux/list.h> 47 #include <linux/proc_fs.h> 48 #include <linux/seq_file.h> 49 #include <linux/uio.h> 50 #include <linux/net.h> 51 #include <linux/netdevice.h> 52 #include <linux/socket.h> 53 #include <linux/if_arp.h> 54 #include <linux/skbuff.h> 55 #include <linux/can.h> 56 #include <linux/can/core.h> 57 #include <linux/can/skb.h> 58 #include <linux/can/bcm.h> 59 #include <linux/slab.h> 60 #include <net/sock.h> 61 #include <net/net_namespace.h> 62 63 /* 64 * To send multiple CAN frame content within TX_SETUP or to filter 65 * CAN messages with multiplex index within RX_SETUP, the number of 66 * different filters is limited to 256 due to the one byte index value. 67 */ 68 #define MAX_NFRAMES 256 69 70 /* use of last_frames[index].can_dlc */ 71 #define RX_RECV 0x40 /* received data for this element */ 72 #define RX_THR 0x80 /* element not been sent due to throttle feature */ 73 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 74 75 /* get best masking value for can_rx_register() for a given single can_id */ 76 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 79 80 #define CAN_BCM_VERSION CAN_VERSION 81 82 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 83 MODULE_LICENSE("Dual BSD/GPL"); 84 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 85 MODULE_ALIAS("can-proto-2"); 86 87 /* easy access to can_frame payload */ 88 static inline u64 GET_U64(const struct can_frame *cp) 89 { 90 return *(u64 *)cp->data; 91 } 92 93 struct bcm_op { 94 struct list_head list; 95 int ifindex; 96 canid_t can_id; 97 u32 flags; 98 unsigned long frames_abs, frames_filtered; 99 struct timeval ival1, ival2; 100 struct hrtimer timer, thrtimer; 101 struct tasklet_struct tsklet, thrtsklet; 102 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 103 int rx_ifindex; 104 u32 count; 105 u32 nframes; 106 u32 currframe; 107 struct can_frame *frames; 108 struct can_frame *last_frames; 109 struct can_frame sframe; 110 struct can_frame last_sframe; 111 struct sock *sk; 112 struct net_device *rx_reg_dev; 113 }; 114 115 static struct proc_dir_entry *proc_dir; 116 117 struct bcm_sock { 118 struct sock sk; 119 int bound; 120 int ifindex; 121 struct notifier_block notifier; 122 struct list_head rx_ops; 123 struct list_head tx_ops; 124 unsigned long dropped_usr_msgs; 125 struct proc_dir_entry *bcm_proc_read; 126 char procname [32]; /* inode number in decimal with \0 */ 127 }; 128 129 static inline struct bcm_sock *bcm_sk(const struct sock *sk) 130 { 131 return (struct bcm_sock *)sk; 132 } 133 134 #define CFSIZ sizeof(struct can_frame) 135 #define OPSIZ sizeof(struct bcm_op) 136 #define MHSIZ sizeof(struct bcm_msg_head) 137 138 /* 139 * procfs functions 140 */ 141 static char *bcm_proc_getifname(char *result, int ifindex) 142 { 143 struct net_device *dev; 144 145 if (!ifindex) 146 return "any"; 147 148 rcu_read_lock(); 149 dev = dev_get_by_index_rcu(&init_net, ifindex); 150 if (dev) 151 strcpy(result, dev->name); 152 else 153 strcpy(result, "???"); 154 rcu_read_unlock(); 155 156 return result; 157 } 158 159 static int bcm_proc_show(struct seq_file *m, void *v) 160 { 161 char ifname[IFNAMSIZ]; 162 struct sock *sk = (struct sock *)m->private; 163 struct bcm_sock *bo = bcm_sk(sk); 164 struct bcm_op *op; 165 166 seq_printf(m, ">>> socket %pK", sk->sk_socket); 167 seq_printf(m, " / sk %pK", sk); 168 seq_printf(m, " / bo %pK", bo); 169 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 170 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); 171 seq_printf(m, " <<<\n"); 172 173 list_for_each_entry(op, &bo->rx_ops, list) { 174 175 unsigned long reduction; 176 177 /* print only active entries & prevent division by zero */ 178 if (!op->frames_abs) 179 continue; 180 181 seq_printf(m, "rx_op: %03X %-5s ", 182 op->can_id, bcm_proc_getifname(ifname, op->ifindex)); 183 seq_printf(m, "[%u]%c ", op->nframes, 184 (op->flags & RX_CHECK_DLC)?'d':' '); 185 if (op->kt_ival1.tv64) 186 seq_printf(m, "timeo=%lld ", 187 (long long) 188 ktime_to_us(op->kt_ival1)); 189 190 if (op->kt_ival2.tv64) 191 seq_printf(m, "thr=%lld ", 192 (long long) 193 ktime_to_us(op->kt_ival2)); 194 195 seq_printf(m, "# recv %ld (%ld) => reduction: ", 196 op->frames_filtered, op->frames_abs); 197 198 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 199 200 seq_printf(m, "%s%ld%%\n", 201 (reduction == 100)?"near ":"", reduction); 202 } 203 204 list_for_each_entry(op, &bo->tx_ops, list) { 205 206 seq_printf(m, "tx_op: %03X %s [%u] ", 207 op->can_id, 208 bcm_proc_getifname(ifname, op->ifindex), 209 op->nframes); 210 211 if (op->kt_ival1.tv64) 212 seq_printf(m, "t1=%lld ", 213 (long long) ktime_to_us(op->kt_ival1)); 214 215 if (op->kt_ival2.tv64) 216 seq_printf(m, "t2=%lld ", 217 (long long) ktime_to_us(op->kt_ival2)); 218 219 seq_printf(m, "# sent %ld\n", op->frames_abs); 220 } 221 seq_putc(m, '\n'); 222 return 0; 223 } 224 225 static int bcm_proc_open(struct inode *inode, struct file *file) 226 { 227 return single_open(file, bcm_proc_show, PDE_DATA(inode)); 228 } 229 230 static const struct file_operations bcm_proc_fops = { 231 .owner = THIS_MODULE, 232 .open = bcm_proc_open, 233 .read = seq_read, 234 .llseek = seq_lseek, 235 .release = single_release, 236 }; 237 238 /* 239 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface 240 * of the given bcm tx op 241 */ 242 static void bcm_can_tx(struct bcm_op *op) 243 { 244 struct sk_buff *skb; 245 struct net_device *dev; 246 struct can_frame *cf = &op->frames[op->currframe]; 247 248 /* no target device? => exit */ 249 if (!op->ifindex) 250 return; 251 252 dev = dev_get_by_index(&init_net, op->ifindex); 253 if (!dev) { 254 /* RFC: should this bcm_op remove itself here? */ 255 return; 256 } 257 258 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), gfp_any()); 259 if (!skb) 260 goto out; 261 262 can_skb_reserve(skb); 263 can_skb_prv(skb)->ifindex = dev->ifindex; 264 can_skb_prv(skb)->skbcnt = 0; 265 266 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 267 268 /* send with loopback */ 269 skb->dev = dev; 270 can_skb_set_owner(skb, op->sk); 271 can_send(skb, 1); 272 273 /* update statistics */ 274 op->currframe++; 275 op->frames_abs++; 276 277 /* reached last frame? */ 278 if (op->currframe >= op->nframes) 279 op->currframe = 0; 280 out: 281 dev_put(dev); 282 } 283 284 /* 285 * bcm_send_to_user - send a BCM message to the userspace 286 * (consisting of bcm_msg_head + x CAN frames) 287 */ 288 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, 289 struct can_frame *frames, int has_timestamp) 290 { 291 struct sk_buff *skb; 292 struct can_frame *firstframe; 293 struct sockaddr_can *addr; 294 struct sock *sk = op->sk; 295 unsigned int datalen = head->nframes * CFSIZ; 296 int err; 297 298 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 299 if (!skb) 300 return; 301 302 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head)); 303 304 if (head->nframes) { 305 /* can_frames starting here */ 306 firstframe = (struct can_frame *)skb_tail_pointer(skb); 307 308 memcpy(skb_put(skb, datalen), frames, datalen); 309 310 /* 311 * the BCM uses the can_dlc-element of the can_frame 312 * structure for internal purposes. This is only 313 * relevant for updates that are generated by the 314 * BCM, where nframes is 1 315 */ 316 if (head->nframes == 1) 317 firstframe->can_dlc &= BCM_CAN_DLC_MASK; 318 } 319 320 if (has_timestamp) { 321 /* restore rx timestamp */ 322 skb->tstamp = op->rx_stamp; 323 } 324 325 /* 326 * Put the datagram to the queue so that bcm_recvmsg() can 327 * get it from there. We need to pass the interface index to 328 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb 329 * containing the interface index. 330 */ 331 332 sock_skb_cb_check_size(sizeof(struct sockaddr_can)); 333 addr = (struct sockaddr_can *)skb->cb; 334 memset(addr, 0, sizeof(*addr)); 335 addr->can_family = AF_CAN; 336 addr->can_ifindex = op->rx_ifindex; 337 338 err = sock_queue_rcv_skb(sk, skb); 339 if (err < 0) { 340 struct bcm_sock *bo = bcm_sk(sk); 341 342 kfree_skb(skb); 343 /* don't care about overflows in this statistic */ 344 bo->dropped_usr_msgs++; 345 } 346 } 347 348 static void bcm_tx_start_timer(struct bcm_op *op) 349 { 350 if (op->kt_ival1.tv64 && op->count) 351 hrtimer_start(&op->timer, 352 ktime_add(ktime_get(), op->kt_ival1), 353 HRTIMER_MODE_ABS); 354 else if (op->kt_ival2.tv64) 355 hrtimer_start(&op->timer, 356 ktime_add(ktime_get(), op->kt_ival2), 357 HRTIMER_MODE_ABS); 358 } 359 360 static void bcm_tx_timeout_tsklet(unsigned long data) 361 { 362 struct bcm_op *op = (struct bcm_op *)data; 363 struct bcm_msg_head msg_head; 364 365 if (op->kt_ival1.tv64 && (op->count > 0)) { 366 367 op->count--; 368 if (!op->count && (op->flags & TX_COUNTEVT)) { 369 370 /* create notification to user */ 371 msg_head.opcode = TX_EXPIRED; 372 msg_head.flags = op->flags; 373 msg_head.count = op->count; 374 msg_head.ival1 = op->ival1; 375 msg_head.ival2 = op->ival2; 376 msg_head.can_id = op->can_id; 377 msg_head.nframes = 0; 378 379 bcm_send_to_user(op, &msg_head, NULL, 0); 380 } 381 bcm_can_tx(op); 382 383 } else if (op->kt_ival2.tv64) 384 bcm_can_tx(op); 385 386 bcm_tx_start_timer(op); 387 } 388 389 /* 390 * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions 391 */ 392 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) 393 { 394 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 395 396 tasklet_schedule(&op->tsklet); 397 398 return HRTIMER_NORESTART; 399 } 400 401 /* 402 * bcm_rx_changed - create a RX_CHANGED notification due to changed content 403 */ 404 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) 405 { 406 struct bcm_msg_head head; 407 408 /* update statistics */ 409 op->frames_filtered++; 410 411 /* prevent statistics overflow */ 412 if (op->frames_filtered > ULONG_MAX/100) 413 op->frames_filtered = op->frames_abs = 0; 414 415 /* this element is not throttled anymore */ 416 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV); 417 418 head.opcode = RX_CHANGED; 419 head.flags = op->flags; 420 head.count = op->count; 421 head.ival1 = op->ival1; 422 head.ival2 = op->ival2; 423 head.can_id = op->can_id; 424 head.nframes = 1; 425 426 bcm_send_to_user(op, &head, data, 1); 427 } 428 429 /* 430 * bcm_rx_update_and_send - process a detected relevant receive content change 431 * 1. update the last received data 432 * 2. send a notification to the user (if possible) 433 */ 434 static void bcm_rx_update_and_send(struct bcm_op *op, 435 struct can_frame *lastdata, 436 const struct can_frame *rxdata) 437 { 438 memcpy(lastdata, rxdata, CFSIZ); 439 440 /* mark as used and throttled by default */ 441 lastdata->can_dlc |= (RX_RECV|RX_THR); 442 443 /* throttling mode inactive ? */ 444 if (!op->kt_ival2.tv64) { 445 /* send RX_CHANGED to the user immediately */ 446 bcm_rx_changed(op, lastdata); 447 return; 448 } 449 450 /* with active throttling timer we are just done here */ 451 if (hrtimer_active(&op->thrtimer)) 452 return; 453 454 /* first reception with enabled throttling mode */ 455 if (!op->kt_lastmsg.tv64) 456 goto rx_changed_settime; 457 458 /* got a second frame inside a potential throttle period? */ 459 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 460 ktime_to_us(op->kt_ival2)) { 461 /* do not send the saved data - only start throttle timer */ 462 hrtimer_start(&op->thrtimer, 463 ktime_add(op->kt_lastmsg, op->kt_ival2), 464 HRTIMER_MODE_ABS); 465 return; 466 } 467 468 /* the gap was that big, that throttling was not needed here */ 469 rx_changed_settime: 470 bcm_rx_changed(op, lastdata); 471 op->kt_lastmsg = ktime_get(); 472 } 473 474 /* 475 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 476 * received data stored in op->last_frames[] 477 */ 478 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, 479 const struct can_frame *rxdata) 480 { 481 /* 482 * no one uses the MSBs of can_dlc for comparison, 483 * so we use it here to detect the first time of reception 484 */ 485 486 if (!(op->last_frames[index].can_dlc & RX_RECV)) { 487 /* received data for the first time => send update to user */ 488 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 489 return; 490 } 491 492 /* do a real check in can_frame data section */ 493 494 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) != 495 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) { 496 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 497 return; 498 } 499 500 if (op->flags & RX_CHECK_DLC) { 501 /* do a real check in can_frame dlc */ 502 if (rxdata->can_dlc != (op->last_frames[index].can_dlc & 503 BCM_CAN_DLC_MASK)) { 504 bcm_rx_update_and_send(op, &op->last_frames[index], 505 rxdata); 506 return; 507 } 508 } 509 } 510 511 /* 512 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception 513 */ 514 static void bcm_rx_starttimer(struct bcm_op *op) 515 { 516 if (op->flags & RX_NO_AUTOTIMER) 517 return; 518 519 if (op->kt_ival1.tv64) 520 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 521 } 522 523 static void bcm_rx_timeout_tsklet(unsigned long data) 524 { 525 struct bcm_op *op = (struct bcm_op *)data; 526 struct bcm_msg_head msg_head; 527 528 /* create notification to user */ 529 msg_head.opcode = RX_TIMEOUT; 530 msg_head.flags = op->flags; 531 msg_head.count = op->count; 532 msg_head.ival1 = op->ival1; 533 msg_head.ival2 = op->ival2; 534 msg_head.can_id = op->can_id; 535 msg_head.nframes = 0; 536 537 bcm_send_to_user(op, &msg_head, NULL, 0); 538 } 539 540 /* 541 * bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out 542 */ 543 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) 544 { 545 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 546 547 /* schedule before NET_RX_SOFTIRQ */ 548 tasklet_hi_schedule(&op->tsklet); 549 550 /* no restart of the timer is done here! */ 551 552 /* if user wants to be informed, when cyclic CAN-Messages come back */ 553 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { 554 /* clear received can_frames to indicate 'nothing received' */ 555 memset(op->last_frames, 0, op->nframes * CFSIZ); 556 } 557 558 return HRTIMER_NORESTART; 559 } 560 561 /* 562 * bcm_rx_do_flush - helper for bcm_rx_thr_flush 563 */ 564 static inline int bcm_rx_do_flush(struct bcm_op *op, int update, 565 unsigned int index) 566 { 567 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { 568 if (update) 569 bcm_rx_changed(op, &op->last_frames[index]); 570 return 1; 571 } 572 return 0; 573 } 574 575 /* 576 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 577 * 578 * update == 0 : just check if throttled data is available (any irq context) 579 * update == 1 : check and send throttled data to userspace (soft_irq context) 580 */ 581 static int bcm_rx_thr_flush(struct bcm_op *op, int update) 582 { 583 int updated = 0; 584 585 if (op->nframes > 1) { 586 unsigned int i; 587 588 /* for MUX filter we start at index 1 */ 589 for (i = 1; i < op->nframes; i++) 590 updated += bcm_rx_do_flush(op, update, i); 591 592 } else { 593 /* for RX_FILTER_ID and simple filter */ 594 updated += bcm_rx_do_flush(op, update, 0); 595 } 596 597 return updated; 598 } 599 600 static void bcm_rx_thr_tsklet(unsigned long data) 601 { 602 struct bcm_op *op = (struct bcm_op *)data; 603 604 /* push the changed data to the userspace */ 605 bcm_rx_thr_flush(op, 1); 606 } 607 608 /* 609 * bcm_rx_thr_handler - the time for blocked content updates is over now: 610 * Check for throttled data and send it to the userspace 611 */ 612 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) 613 { 614 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 615 616 tasklet_schedule(&op->thrtsklet); 617 618 if (bcm_rx_thr_flush(op, 0)) { 619 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); 620 return HRTIMER_RESTART; 621 } else { 622 /* rearm throttle handling */ 623 op->kt_lastmsg = ktime_set(0, 0); 624 return HRTIMER_NORESTART; 625 } 626 } 627 628 /* 629 * bcm_rx_handler - handle a CAN frame reception 630 */ 631 static void bcm_rx_handler(struct sk_buff *skb, void *data) 632 { 633 struct bcm_op *op = (struct bcm_op *)data; 634 const struct can_frame *rxframe = (struct can_frame *)skb->data; 635 unsigned int i; 636 637 /* disable timeout */ 638 hrtimer_cancel(&op->timer); 639 640 if (op->can_id != rxframe->can_id) 641 return; 642 643 /* save rx timestamp */ 644 op->rx_stamp = skb->tstamp; 645 /* save originator for recvfrom() */ 646 op->rx_ifindex = skb->dev->ifindex; 647 /* update statistics */ 648 op->frames_abs++; 649 650 if (op->flags & RX_RTR_FRAME) { 651 /* send reply for RTR-request (placed in op->frames[0]) */ 652 bcm_can_tx(op); 653 return; 654 } 655 656 if (op->flags & RX_FILTER_ID) { 657 /* the easiest case */ 658 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); 659 goto rx_starttimer; 660 } 661 662 if (op->nframes == 1) { 663 /* simple compare with index 0 */ 664 bcm_rx_cmp_to_index(op, 0, rxframe); 665 goto rx_starttimer; 666 } 667 668 if (op->nframes > 1) { 669 /* 670 * multiplex compare 671 * 672 * find the first multiplex mask that fits. 673 * Remark: The MUX-mask is stored in index 0 674 */ 675 676 for (i = 1; i < op->nframes; i++) { 677 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) == 678 (GET_U64(&op->frames[0]) & 679 GET_U64(&op->frames[i]))) { 680 bcm_rx_cmp_to_index(op, i, rxframe); 681 break; 682 } 683 } 684 } 685 686 rx_starttimer: 687 bcm_rx_starttimer(op); 688 } 689 690 /* 691 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements 692 */ 693 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, 694 int ifindex) 695 { 696 struct bcm_op *op; 697 698 list_for_each_entry(op, ops, list) { 699 if ((op->can_id == can_id) && (op->ifindex == ifindex)) 700 return op; 701 } 702 703 return NULL; 704 } 705 706 static void bcm_remove_op(struct bcm_op *op) 707 { 708 hrtimer_cancel(&op->timer); 709 hrtimer_cancel(&op->thrtimer); 710 711 if (op->tsklet.func) 712 tasklet_kill(&op->tsklet); 713 714 if (op->thrtsklet.func) 715 tasklet_kill(&op->thrtsklet); 716 717 if ((op->frames) && (op->frames != &op->sframe)) 718 kfree(op->frames); 719 720 if ((op->last_frames) && (op->last_frames != &op->last_sframe)) 721 kfree(op->last_frames); 722 723 kfree(op); 724 } 725 726 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) 727 { 728 if (op->rx_reg_dev == dev) { 729 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id), 730 bcm_rx_handler, op); 731 732 /* mark as removed subscription */ 733 op->rx_reg_dev = NULL; 734 } else 735 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " 736 "mismatch %p %p\n", op->rx_reg_dev, dev); 737 } 738 739 /* 740 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) 741 */ 742 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex) 743 { 744 struct bcm_op *op, *n; 745 746 list_for_each_entry_safe(op, n, ops, list) { 747 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 748 749 /* 750 * Don't care if we're bound or not (due to netdev 751 * problems) can_rx_unregister() is always a save 752 * thing to do here. 753 */ 754 if (op->ifindex) { 755 /* 756 * Only remove subscriptions that had not 757 * been removed due to NETDEV_UNREGISTER 758 * in bcm_notifier() 759 */ 760 if (op->rx_reg_dev) { 761 struct net_device *dev; 762 763 dev = dev_get_by_index(&init_net, 764 op->ifindex); 765 if (dev) { 766 bcm_rx_unreg(dev, op); 767 dev_put(dev); 768 } 769 } 770 } else 771 can_rx_unregister(NULL, op->can_id, 772 REGMASK(op->can_id), 773 bcm_rx_handler, op); 774 775 list_del(&op->list); 776 bcm_remove_op(op); 777 return 1; /* done */ 778 } 779 } 780 781 return 0; /* not found */ 782 } 783 784 /* 785 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) 786 */ 787 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex) 788 { 789 struct bcm_op *op, *n; 790 791 list_for_each_entry_safe(op, n, ops, list) { 792 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 793 list_del(&op->list); 794 bcm_remove_op(op); 795 return 1; /* done */ 796 } 797 } 798 799 return 0; /* not found */ 800 } 801 802 /* 803 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) 804 */ 805 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, 806 int ifindex) 807 { 808 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex); 809 810 if (!op) 811 return -EINVAL; 812 813 /* put current values into msg_head */ 814 msg_head->flags = op->flags; 815 msg_head->count = op->count; 816 msg_head->ival1 = op->ival1; 817 msg_head->ival2 = op->ival2; 818 msg_head->nframes = op->nframes; 819 820 bcm_send_to_user(op, msg_head, op->frames, 0); 821 822 return MHSIZ; 823 } 824 825 /* 826 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) 827 */ 828 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 829 int ifindex, struct sock *sk) 830 { 831 struct bcm_sock *bo = bcm_sk(sk); 832 struct bcm_op *op; 833 unsigned int i; 834 int err; 835 836 /* we need a real device to send frames */ 837 if (!ifindex) 838 return -ENODEV; 839 840 /* check nframes boundaries - we need at least one can_frame */ 841 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) 842 return -EINVAL; 843 844 /* check the given can_id */ 845 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex); 846 847 if (op) { 848 /* update existing BCM operation */ 849 850 /* 851 * Do we need more space for the can_frames than currently 852 * allocated? -> This is a _really_ unusual use-case and 853 * therefore (complexity / locking) it is not supported. 854 */ 855 if (msg_head->nframes > op->nframes) 856 return -E2BIG; 857 858 /* update can_frames content */ 859 for (i = 0; i < msg_head->nframes; i++) { 860 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ); 861 862 if (op->frames[i].can_dlc > 8) 863 err = -EINVAL; 864 865 if (err < 0) 866 return err; 867 868 if (msg_head->flags & TX_CP_CAN_ID) { 869 /* copy can_id into frame */ 870 op->frames[i].can_id = msg_head->can_id; 871 } 872 } 873 874 } else { 875 /* insert new BCM operation for the given can_id */ 876 877 op = kzalloc(OPSIZ, GFP_KERNEL); 878 if (!op) 879 return -ENOMEM; 880 881 op->can_id = msg_head->can_id; 882 883 /* create array for can_frames and copy the data */ 884 if (msg_head->nframes > 1) { 885 op->frames = kmalloc(msg_head->nframes * CFSIZ, 886 GFP_KERNEL); 887 if (!op->frames) { 888 kfree(op); 889 return -ENOMEM; 890 } 891 } else 892 op->frames = &op->sframe; 893 894 for (i = 0; i < msg_head->nframes; i++) { 895 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ); 896 897 if (op->frames[i].can_dlc > 8) 898 err = -EINVAL; 899 900 if (err < 0) { 901 if (op->frames != &op->sframe) 902 kfree(op->frames); 903 kfree(op); 904 return err; 905 } 906 907 if (msg_head->flags & TX_CP_CAN_ID) { 908 /* copy can_id into frame */ 909 op->frames[i].can_id = msg_head->can_id; 910 } 911 } 912 913 /* tx_ops never compare with previous received messages */ 914 op->last_frames = NULL; 915 916 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 917 op->sk = sk; 918 op->ifindex = ifindex; 919 920 /* initialize uninitialized (kzalloc) structure */ 921 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 922 op->timer.function = bcm_tx_timeout_handler; 923 924 /* initialize tasklet for tx countevent notification */ 925 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet, 926 (unsigned long) op); 927 928 /* currently unused in tx_ops */ 929 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 930 931 /* add this bcm_op to the list of the tx_ops */ 932 list_add(&op->list, &bo->tx_ops); 933 934 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ 935 936 if (op->nframes != msg_head->nframes) { 937 op->nframes = msg_head->nframes; 938 /* start multiple frame transmission with index 0 */ 939 op->currframe = 0; 940 } 941 942 /* check flags */ 943 944 op->flags = msg_head->flags; 945 946 if (op->flags & TX_RESET_MULTI_IDX) { 947 /* start multiple frame transmission with index 0 */ 948 op->currframe = 0; 949 } 950 951 if (op->flags & SETTIMER) { 952 /* set timer values */ 953 op->count = msg_head->count; 954 op->ival1 = msg_head->ival1; 955 op->ival2 = msg_head->ival2; 956 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 957 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 958 959 /* disable an active timer due to zero values? */ 960 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) 961 hrtimer_cancel(&op->timer); 962 } 963 964 if (op->flags & STARTTIMER) { 965 hrtimer_cancel(&op->timer); 966 /* spec: send can_frame when starting timer */ 967 op->flags |= TX_ANNOUNCE; 968 } 969 970 if (op->flags & TX_ANNOUNCE) { 971 bcm_can_tx(op); 972 if (op->count) 973 op->count--; 974 } 975 976 if (op->flags & STARTTIMER) 977 bcm_tx_start_timer(op); 978 979 return msg_head->nframes * CFSIZ + MHSIZ; 980 } 981 982 /* 983 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) 984 */ 985 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 986 int ifindex, struct sock *sk) 987 { 988 struct bcm_sock *bo = bcm_sk(sk); 989 struct bcm_op *op; 990 int do_rx_register; 991 int err = 0; 992 993 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { 994 /* be robust against wrong usage ... */ 995 msg_head->flags |= RX_FILTER_ID; 996 /* ignore trailing garbage */ 997 msg_head->nframes = 0; 998 } 999 1000 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ 1001 if (msg_head->nframes > MAX_NFRAMES + 1) 1002 return -EINVAL; 1003 1004 if ((msg_head->flags & RX_RTR_FRAME) && 1005 ((msg_head->nframes != 1) || 1006 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1007 return -EINVAL; 1008 1009 /* check the given can_id */ 1010 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex); 1011 if (op) { 1012 /* update existing BCM operation */ 1013 1014 /* 1015 * Do we need more space for the can_frames than currently 1016 * allocated? -> This is a _really_ unusual use-case and 1017 * therefore (complexity / locking) it is not supported. 1018 */ 1019 if (msg_head->nframes > op->nframes) 1020 return -E2BIG; 1021 1022 if (msg_head->nframes) { 1023 /* update can_frames content */ 1024 err = memcpy_from_msg((u8 *)op->frames, msg, 1025 msg_head->nframes * CFSIZ); 1026 if (err < 0) 1027 return err; 1028 1029 /* clear last_frames to indicate 'nothing received' */ 1030 memset(op->last_frames, 0, msg_head->nframes * CFSIZ); 1031 } 1032 1033 op->nframes = msg_head->nframes; 1034 1035 /* Only an update -> do not call can_rx_register() */ 1036 do_rx_register = 0; 1037 1038 } else { 1039 /* insert new BCM operation for the given can_id */ 1040 op = kzalloc(OPSIZ, GFP_KERNEL); 1041 if (!op) 1042 return -ENOMEM; 1043 1044 op->can_id = msg_head->can_id; 1045 op->nframes = msg_head->nframes; 1046 1047 if (msg_head->nframes > 1) { 1048 /* create array for can_frames and copy the data */ 1049 op->frames = kmalloc(msg_head->nframes * CFSIZ, 1050 GFP_KERNEL); 1051 if (!op->frames) { 1052 kfree(op); 1053 return -ENOMEM; 1054 } 1055 1056 /* create and init array for received can_frames */ 1057 op->last_frames = kzalloc(msg_head->nframes * CFSIZ, 1058 GFP_KERNEL); 1059 if (!op->last_frames) { 1060 kfree(op->frames); 1061 kfree(op); 1062 return -ENOMEM; 1063 } 1064 1065 } else { 1066 op->frames = &op->sframe; 1067 op->last_frames = &op->last_sframe; 1068 } 1069 1070 if (msg_head->nframes) { 1071 err = memcpy_from_msg((u8 *)op->frames, msg, 1072 msg_head->nframes * CFSIZ); 1073 if (err < 0) { 1074 if (op->frames != &op->sframe) 1075 kfree(op->frames); 1076 if (op->last_frames != &op->last_sframe) 1077 kfree(op->last_frames); 1078 kfree(op); 1079 return err; 1080 } 1081 } 1082 1083 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1084 op->sk = sk; 1085 op->ifindex = ifindex; 1086 1087 /* ifindex for timeout events w/o previous frame reception */ 1088 op->rx_ifindex = ifindex; 1089 1090 /* initialize uninitialized (kzalloc) structure */ 1091 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1092 op->timer.function = bcm_rx_timeout_handler; 1093 1094 /* initialize tasklet for rx timeout notification */ 1095 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet, 1096 (unsigned long) op); 1097 1098 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1099 op->thrtimer.function = bcm_rx_thr_handler; 1100 1101 /* initialize tasklet for rx throttle handling */ 1102 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet, 1103 (unsigned long) op); 1104 1105 /* add this bcm_op to the list of the rx_ops */ 1106 list_add(&op->list, &bo->rx_ops); 1107 1108 /* call can_rx_register() */ 1109 do_rx_register = 1; 1110 1111 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ 1112 1113 /* check flags */ 1114 op->flags = msg_head->flags; 1115 1116 if (op->flags & RX_RTR_FRAME) { 1117 1118 /* no timers in RTR-mode */ 1119 hrtimer_cancel(&op->thrtimer); 1120 hrtimer_cancel(&op->timer); 1121 1122 /* 1123 * funny feature in RX(!)_SETUP only for RTR-mode: 1124 * copy can_id into frame BUT without RTR-flag to 1125 * prevent a full-load-loopback-test ... ;-] 1126 */ 1127 if ((op->flags & TX_CP_CAN_ID) || 1128 (op->frames[0].can_id == op->can_id)) 1129 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; 1130 1131 } else { 1132 if (op->flags & SETTIMER) { 1133 1134 /* set timer value */ 1135 op->ival1 = msg_head->ival1; 1136 op->ival2 = msg_head->ival2; 1137 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 1138 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 1139 1140 /* disable an active timer due to zero value? */ 1141 if (!op->kt_ival1.tv64) 1142 hrtimer_cancel(&op->timer); 1143 1144 /* 1145 * In any case cancel the throttle timer, flush 1146 * potentially blocked msgs and reset throttle handling 1147 */ 1148 op->kt_lastmsg = ktime_set(0, 0); 1149 hrtimer_cancel(&op->thrtimer); 1150 bcm_rx_thr_flush(op, 1); 1151 } 1152 1153 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1154 hrtimer_start(&op->timer, op->kt_ival1, 1155 HRTIMER_MODE_REL); 1156 } 1157 1158 /* now we can register for can_ids, if we added a new bcm_op */ 1159 if (do_rx_register) { 1160 if (ifindex) { 1161 struct net_device *dev; 1162 1163 dev = dev_get_by_index(&init_net, ifindex); 1164 if (dev) { 1165 err = can_rx_register(dev, op->can_id, 1166 REGMASK(op->can_id), 1167 bcm_rx_handler, op, 1168 "bcm"); 1169 1170 op->rx_reg_dev = dev; 1171 dev_put(dev); 1172 } 1173 1174 } else 1175 err = can_rx_register(NULL, op->can_id, 1176 REGMASK(op->can_id), 1177 bcm_rx_handler, op, "bcm"); 1178 if (err) { 1179 /* this bcm rx op is broken -> remove it */ 1180 list_del(&op->list); 1181 bcm_remove_op(op); 1182 return err; 1183 } 1184 } 1185 1186 return msg_head->nframes * CFSIZ + MHSIZ; 1187 } 1188 1189 /* 1190 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) 1191 */ 1192 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) 1193 { 1194 struct sk_buff *skb; 1195 struct net_device *dev; 1196 int err; 1197 1198 /* we need a real device to send frames */ 1199 if (!ifindex) 1200 return -ENODEV; 1201 1202 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), GFP_KERNEL); 1203 if (!skb) 1204 return -ENOMEM; 1205 1206 can_skb_reserve(skb); 1207 1208 err = memcpy_from_msg(skb_put(skb, CFSIZ), msg, CFSIZ); 1209 if (err < 0) { 1210 kfree_skb(skb); 1211 return err; 1212 } 1213 1214 dev = dev_get_by_index(&init_net, ifindex); 1215 if (!dev) { 1216 kfree_skb(skb); 1217 return -ENODEV; 1218 } 1219 1220 can_skb_prv(skb)->ifindex = dev->ifindex; 1221 can_skb_prv(skb)->skbcnt = 0; 1222 skb->dev = dev; 1223 can_skb_set_owner(skb, sk); 1224 err = can_send(skb, 1); /* send with loopback */ 1225 dev_put(dev); 1226 1227 if (err) 1228 return err; 1229 1230 return CFSIZ + MHSIZ; 1231 } 1232 1233 /* 1234 * bcm_sendmsg - process BCM commands (opcodes) from the userspace 1235 */ 1236 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 1237 { 1238 struct sock *sk = sock->sk; 1239 struct bcm_sock *bo = bcm_sk(sk); 1240 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ 1241 struct bcm_msg_head msg_head; 1242 int ret; /* read bytes or error codes as return value */ 1243 1244 if (!bo->bound) 1245 return -ENOTCONN; 1246 1247 /* check for valid message length from userspace */ 1248 if (size < MHSIZ || (size - MHSIZ) % CFSIZ) 1249 return -EINVAL; 1250 1251 /* check for alternative ifindex for this bcm_op */ 1252 1253 if (!ifindex && msg->msg_name) { 1254 /* no bound device as default => check msg_name */ 1255 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 1256 1257 if (msg->msg_namelen < sizeof(*addr)) 1258 return -EINVAL; 1259 1260 if (addr->can_family != AF_CAN) 1261 return -EINVAL; 1262 1263 /* ifindex from sendto() */ 1264 ifindex = addr->can_ifindex; 1265 1266 if (ifindex) { 1267 struct net_device *dev; 1268 1269 dev = dev_get_by_index(&init_net, ifindex); 1270 if (!dev) 1271 return -ENODEV; 1272 1273 if (dev->type != ARPHRD_CAN) { 1274 dev_put(dev); 1275 return -ENODEV; 1276 } 1277 1278 dev_put(dev); 1279 } 1280 } 1281 1282 /* read message head information */ 1283 1284 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ); 1285 if (ret < 0) 1286 return ret; 1287 1288 lock_sock(sk); 1289 1290 switch (msg_head.opcode) { 1291 1292 case TX_SETUP: 1293 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); 1294 break; 1295 1296 case RX_SETUP: 1297 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); 1298 break; 1299 1300 case TX_DELETE: 1301 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex)) 1302 ret = MHSIZ; 1303 else 1304 ret = -EINVAL; 1305 break; 1306 1307 case RX_DELETE: 1308 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex)) 1309 ret = MHSIZ; 1310 else 1311 ret = -EINVAL; 1312 break; 1313 1314 case TX_READ: 1315 /* reuse msg_head for the reply to TX_READ */ 1316 msg_head.opcode = TX_STATUS; 1317 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); 1318 break; 1319 1320 case RX_READ: 1321 /* reuse msg_head for the reply to RX_READ */ 1322 msg_head.opcode = RX_STATUS; 1323 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); 1324 break; 1325 1326 case TX_SEND: 1327 /* we need exactly one can_frame behind the msg head */ 1328 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ)) 1329 ret = -EINVAL; 1330 else 1331 ret = bcm_tx_send(msg, ifindex, sk); 1332 break; 1333 1334 default: 1335 ret = -EINVAL; 1336 break; 1337 } 1338 1339 release_sock(sk); 1340 1341 return ret; 1342 } 1343 1344 /* 1345 * notification handler for netdevice status changes 1346 */ 1347 static int bcm_notifier(struct notifier_block *nb, unsigned long msg, 1348 void *ptr) 1349 { 1350 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1351 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); 1352 struct sock *sk = &bo->sk; 1353 struct bcm_op *op; 1354 int notify_enodev = 0; 1355 1356 if (!net_eq(dev_net(dev), &init_net)) 1357 return NOTIFY_DONE; 1358 1359 if (dev->type != ARPHRD_CAN) 1360 return NOTIFY_DONE; 1361 1362 switch (msg) { 1363 1364 case NETDEV_UNREGISTER: 1365 lock_sock(sk); 1366 1367 /* remove device specific receive entries */ 1368 list_for_each_entry(op, &bo->rx_ops, list) 1369 if (op->rx_reg_dev == dev) 1370 bcm_rx_unreg(dev, op); 1371 1372 /* remove device reference, if this is our bound device */ 1373 if (bo->bound && bo->ifindex == dev->ifindex) { 1374 bo->bound = 0; 1375 bo->ifindex = 0; 1376 notify_enodev = 1; 1377 } 1378 1379 release_sock(sk); 1380 1381 if (notify_enodev) { 1382 sk->sk_err = ENODEV; 1383 if (!sock_flag(sk, SOCK_DEAD)) 1384 sk->sk_error_report(sk); 1385 } 1386 break; 1387 1388 case NETDEV_DOWN: 1389 if (bo->bound && bo->ifindex == dev->ifindex) { 1390 sk->sk_err = ENETDOWN; 1391 if (!sock_flag(sk, SOCK_DEAD)) 1392 sk->sk_error_report(sk); 1393 } 1394 } 1395 1396 return NOTIFY_DONE; 1397 } 1398 1399 /* 1400 * initial settings for all BCM sockets to be set at socket creation time 1401 */ 1402 static int bcm_init(struct sock *sk) 1403 { 1404 struct bcm_sock *bo = bcm_sk(sk); 1405 1406 bo->bound = 0; 1407 bo->ifindex = 0; 1408 bo->dropped_usr_msgs = 0; 1409 bo->bcm_proc_read = NULL; 1410 1411 INIT_LIST_HEAD(&bo->tx_ops); 1412 INIT_LIST_HEAD(&bo->rx_ops); 1413 1414 /* set notifier */ 1415 bo->notifier.notifier_call = bcm_notifier; 1416 1417 register_netdevice_notifier(&bo->notifier); 1418 1419 return 0; 1420 } 1421 1422 /* 1423 * standard socket functions 1424 */ 1425 static int bcm_release(struct socket *sock) 1426 { 1427 struct sock *sk = sock->sk; 1428 struct bcm_sock *bo; 1429 struct bcm_op *op, *next; 1430 1431 if (sk == NULL) 1432 return 0; 1433 1434 bo = bcm_sk(sk); 1435 1436 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1437 1438 unregister_netdevice_notifier(&bo->notifier); 1439 1440 lock_sock(sk); 1441 1442 list_for_each_entry_safe(op, next, &bo->tx_ops, list) 1443 bcm_remove_op(op); 1444 1445 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { 1446 /* 1447 * Don't care if we're bound or not (due to netdev problems) 1448 * can_rx_unregister() is always a save thing to do here. 1449 */ 1450 if (op->ifindex) { 1451 /* 1452 * Only remove subscriptions that had not 1453 * been removed due to NETDEV_UNREGISTER 1454 * in bcm_notifier() 1455 */ 1456 if (op->rx_reg_dev) { 1457 struct net_device *dev; 1458 1459 dev = dev_get_by_index(&init_net, op->ifindex); 1460 if (dev) { 1461 bcm_rx_unreg(dev, op); 1462 dev_put(dev); 1463 } 1464 } 1465 } else 1466 can_rx_unregister(NULL, op->can_id, 1467 REGMASK(op->can_id), 1468 bcm_rx_handler, op); 1469 1470 bcm_remove_op(op); 1471 } 1472 1473 /* remove procfs entry */ 1474 if (proc_dir && bo->bcm_proc_read) 1475 remove_proc_entry(bo->procname, proc_dir); 1476 1477 /* remove device reference */ 1478 if (bo->bound) { 1479 bo->bound = 0; 1480 bo->ifindex = 0; 1481 } 1482 1483 sock_orphan(sk); 1484 sock->sk = NULL; 1485 1486 release_sock(sk); 1487 sock_put(sk); 1488 1489 return 0; 1490 } 1491 1492 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, 1493 int flags) 1494 { 1495 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1496 struct sock *sk = sock->sk; 1497 struct bcm_sock *bo = bcm_sk(sk); 1498 1499 if (len < sizeof(*addr)) 1500 return -EINVAL; 1501 1502 if (bo->bound) 1503 return -EISCONN; 1504 1505 /* bind a device to this socket */ 1506 if (addr->can_ifindex) { 1507 struct net_device *dev; 1508 1509 dev = dev_get_by_index(&init_net, addr->can_ifindex); 1510 if (!dev) 1511 return -ENODEV; 1512 1513 if (dev->type != ARPHRD_CAN) { 1514 dev_put(dev); 1515 return -ENODEV; 1516 } 1517 1518 bo->ifindex = dev->ifindex; 1519 dev_put(dev); 1520 1521 } else { 1522 /* no interface reference for ifindex = 0 ('any' CAN device) */ 1523 bo->ifindex = 0; 1524 } 1525 1526 bo->bound = 1; 1527 1528 if (proc_dir) { 1529 /* unique socket address as filename */ 1530 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1531 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, 1532 proc_dir, 1533 &bcm_proc_fops, sk); 1534 } 1535 1536 return 0; 1537 } 1538 1539 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 1540 int flags) 1541 { 1542 struct sock *sk = sock->sk; 1543 struct sk_buff *skb; 1544 int error = 0; 1545 int noblock; 1546 int err; 1547 1548 noblock = flags & MSG_DONTWAIT; 1549 flags &= ~MSG_DONTWAIT; 1550 skb = skb_recv_datagram(sk, flags, noblock, &error); 1551 if (!skb) 1552 return error; 1553 1554 if (skb->len < size) 1555 size = skb->len; 1556 1557 err = memcpy_to_msg(msg, skb->data, size); 1558 if (err < 0) { 1559 skb_free_datagram(sk, skb); 1560 return err; 1561 } 1562 1563 sock_recv_ts_and_drops(msg, sk, skb); 1564 1565 if (msg->msg_name) { 1566 __sockaddr_check_size(sizeof(struct sockaddr_can)); 1567 msg->msg_namelen = sizeof(struct sockaddr_can); 1568 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1569 } 1570 1571 skb_free_datagram(sk, skb); 1572 1573 return size; 1574 } 1575 1576 static const struct proto_ops bcm_ops = { 1577 .family = PF_CAN, 1578 .release = bcm_release, 1579 .bind = sock_no_bind, 1580 .connect = bcm_connect, 1581 .socketpair = sock_no_socketpair, 1582 .accept = sock_no_accept, 1583 .getname = sock_no_getname, 1584 .poll = datagram_poll, 1585 .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ 1586 .listen = sock_no_listen, 1587 .shutdown = sock_no_shutdown, 1588 .setsockopt = sock_no_setsockopt, 1589 .getsockopt = sock_no_getsockopt, 1590 .sendmsg = bcm_sendmsg, 1591 .recvmsg = bcm_recvmsg, 1592 .mmap = sock_no_mmap, 1593 .sendpage = sock_no_sendpage, 1594 }; 1595 1596 static struct proto bcm_proto __read_mostly = { 1597 .name = "CAN_BCM", 1598 .owner = THIS_MODULE, 1599 .obj_size = sizeof(struct bcm_sock), 1600 .init = bcm_init, 1601 }; 1602 1603 static const struct can_proto bcm_can_proto = { 1604 .type = SOCK_DGRAM, 1605 .protocol = CAN_BCM, 1606 .ops = &bcm_ops, 1607 .prot = &bcm_proto, 1608 }; 1609 1610 static int __init bcm_module_init(void) 1611 { 1612 int err; 1613 1614 pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"); 1615 1616 err = can_proto_register(&bcm_can_proto); 1617 if (err < 0) { 1618 printk(KERN_ERR "can: registration of bcm protocol failed\n"); 1619 return err; 1620 } 1621 1622 /* create /proc/net/can-bcm directory */ 1623 proc_dir = proc_mkdir("can-bcm", init_net.proc_net); 1624 return 0; 1625 } 1626 1627 static void __exit bcm_module_exit(void) 1628 { 1629 can_proto_unregister(&bcm_can_proto); 1630 1631 if (proc_dir) 1632 remove_proc_entry("can-bcm", init_net.proc_net); 1633 } 1634 1635 module_init(bcm_module_init); 1636 module_exit(bcm_module_exit); 1637