1 /* 2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 3 * 4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Volkswagen nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * Alternatively, provided that this notice is retained in full, this 20 * software may be distributed under the terms of the GNU General 21 * Public License ("GPL") version 2, in which case the provisions of the 22 * GPL apply INSTEAD OF those given above. 23 * 24 * The provided data structures and external interfaces from this code 25 * are not restricted to be used by modules with a GPL compatible license. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 * 40 */ 41 42 #include <linux/module.h> 43 #include <linux/init.h> 44 #include <linux/interrupt.h> 45 #include <linux/hrtimer.h> 46 #include <linux/list.h> 47 #include <linux/proc_fs.h> 48 #include <linux/seq_file.h> 49 #include <linux/uio.h> 50 #include <linux/net.h> 51 #include <linux/netdevice.h> 52 #include <linux/socket.h> 53 #include <linux/if_arp.h> 54 #include <linux/skbuff.h> 55 #include <linux/can.h> 56 #include <linux/can/core.h> 57 #include <linux/can/skb.h> 58 #include <linux/can/bcm.h> 59 #include <linux/slab.h> 60 #include <net/sock.h> 61 #include <net/net_namespace.h> 62 63 /* 64 * To send multiple CAN frame content within TX_SETUP or to filter 65 * CAN messages with multiplex index within RX_SETUP, the number of 66 * different filters is limited to 256 due to the one byte index value. 67 */ 68 #define MAX_NFRAMES 256 69 70 /* use of last_frames[index].can_dlc */ 71 #define RX_RECV 0x40 /* received data for this element */ 72 #define RX_THR 0x80 /* element not been sent due to throttle feature */ 73 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 74 75 /* get best masking value for can_rx_register() for a given single can_id */ 76 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 79 80 #define CAN_BCM_VERSION CAN_VERSION 81 82 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 83 MODULE_LICENSE("Dual BSD/GPL"); 84 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 85 MODULE_ALIAS("can-proto-2"); 86 87 /* easy access to can_frame payload */ 88 static inline u64 GET_U64(const struct can_frame *cp) 89 { 90 return *(u64 *)cp->data; 91 } 92 93 struct bcm_op { 94 struct list_head list; 95 int ifindex; 96 canid_t can_id; 97 u32 flags; 98 unsigned long frames_abs, frames_filtered; 99 struct timeval ival1, ival2; 100 struct hrtimer timer, thrtimer; 101 struct tasklet_struct tsklet, thrtsklet; 102 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 103 int rx_ifindex; 104 u32 count; 105 u32 nframes; 106 u32 currframe; 107 struct can_frame *frames; 108 struct can_frame *last_frames; 109 struct can_frame sframe; 110 struct can_frame last_sframe; 111 struct sock *sk; 112 struct net_device *rx_reg_dev; 113 }; 114 115 static struct proc_dir_entry *proc_dir; 116 117 struct bcm_sock { 118 struct sock sk; 119 int bound; 120 int ifindex; 121 struct notifier_block notifier; 122 struct list_head rx_ops; 123 struct list_head tx_ops; 124 unsigned long dropped_usr_msgs; 125 struct proc_dir_entry *bcm_proc_read; 126 char procname [32]; /* inode number in decimal with \0 */ 127 }; 128 129 static inline struct bcm_sock *bcm_sk(const struct sock *sk) 130 { 131 return (struct bcm_sock *)sk; 132 } 133 134 #define CFSIZ sizeof(struct can_frame) 135 #define OPSIZ sizeof(struct bcm_op) 136 #define MHSIZ sizeof(struct bcm_msg_head) 137 138 /* 139 * procfs functions 140 */ 141 static char *bcm_proc_getifname(char *result, int ifindex) 142 { 143 struct net_device *dev; 144 145 if (!ifindex) 146 return "any"; 147 148 rcu_read_lock(); 149 dev = dev_get_by_index_rcu(&init_net, ifindex); 150 if (dev) 151 strcpy(result, dev->name); 152 else 153 strcpy(result, "???"); 154 rcu_read_unlock(); 155 156 return result; 157 } 158 159 static int bcm_proc_show(struct seq_file *m, void *v) 160 { 161 char ifname[IFNAMSIZ]; 162 struct sock *sk = (struct sock *)m->private; 163 struct bcm_sock *bo = bcm_sk(sk); 164 struct bcm_op *op; 165 166 seq_printf(m, ">>> socket %pK", sk->sk_socket); 167 seq_printf(m, " / sk %pK", sk); 168 seq_printf(m, " / bo %pK", bo); 169 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 170 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); 171 seq_printf(m, " <<<\n"); 172 173 list_for_each_entry(op, &bo->rx_ops, list) { 174 175 unsigned long reduction; 176 177 /* print only active entries & prevent division by zero */ 178 if (!op->frames_abs) 179 continue; 180 181 seq_printf(m, "rx_op: %03X %-5s ", 182 op->can_id, bcm_proc_getifname(ifname, op->ifindex)); 183 seq_printf(m, "[%u]%c ", op->nframes, 184 (op->flags & RX_CHECK_DLC)?'d':' '); 185 if (op->kt_ival1.tv64) 186 seq_printf(m, "timeo=%lld ", 187 (long long) 188 ktime_to_us(op->kt_ival1)); 189 190 if (op->kt_ival2.tv64) 191 seq_printf(m, "thr=%lld ", 192 (long long) 193 ktime_to_us(op->kt_ival2)); 194 195 seq_printf(m, "# recv %ld (%ld) => reduction: ", 196 op->frames_filtered, op->frames_abs); 197 198 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 199 200 seq_printf(m, "%s%ld%%\n", 201 (reduction == 100)?"near ":"", reduction); 202 } 203 204 list_for_each_entry(op, &bo->tx_ops, list) { 205 206 seq_printf(m, "tx_op: %03X %s [%u] ", 207 op->can_id, 208 bcm_proc_getifname(ifname, op->ifindex), 209 op->nframes); 210 211 if (op->kt_ival1.tv64) 212 seq_printf(m, "t1=%lld ", 213 (long long) ktime_to_us(op->kt_ival1)); 214 215 if (op->kt_ival2.tv64) 216 seq_printf(m, "t2=%lld ", 217 (long long) ktime_to_us(op->kt_ival2)); 218 219 seq_printf(m, "# sent %ld\n", op->frames_abs); 220 } 221 seq_putc(m, '\n'); 222 return 0; 223 } 224 225 static int bcm_proc_open(struct inode *inode, struct file *file) 226 { 227 return single_open(file, bcm_proc_show, PDE_DATA(inode)); 228 } 229 230 static const struct file_operations bcm_proc_fops = { 231 .owner = THIS_MODULE, 232 .open = bcm_proc_open, 233 .read = seq_read, 234 .llseek = seq_lseek, 235 .release = single_release, 236 }; 237 238 /* 239 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface 240 * of the given bcm tx op 241 */ 242 static void bcm_can_tx(struct bcm_op *op) 243 { 244 struct sk_buff *skb; 245 struct net_device *dev; 246 struct can_frame *cf = &op->frames[op->currframe]; 247 248 /* no target device? => exit */ 249 if (!op->ifindex) 250 return; 251 252 dev = dev_get_by_index(&init_net, op->ifindex); 253 if (!dev) { 254 /* RFC: should this bcm_op remove itself here? */ 255 return; 256 } 257 258 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), gfp_any()); 259 if (!skb) 260 goto out; 261 262 can_skb_reserve(skb); 263 can_skb_prv(skb)->ifindex = dev->ifindex; 264 265 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 266 267 /* send with loopback */ 268 skb->dev = dev; 269 can_skb_set_owner(skb, op->sk); 270 can_send(skb, 1); 271 272 /* update statistics */ 273 op->currframe++; 274 op->frames_abs++; 275 276 /* reached last frame? */ 277 if (op->currframe >= op->nframes) 278 op->currframe = 0; 279 out: 280 dev_put(dev); 281 } 282 283 /* 284 * bcm_send_to_user - send a BCM message to the userspace 285 * (consisting of bcm_msg_head + x CAN frames) 286 */ 287 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, 288 struct can_frame *frames, int has_timestamp) 289 { 290 struct sk_buff *skb; 291 struct can_frame *firstframe; 292 struct sockaddr_can *addr; 293 struct sock *sk = op->sk; 294 unsigned int datalen = head->nframes * CFSIZ; 295 int err; 296 297 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 298 if (!skb) 299 return; 300 301 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head)); 302 303 if (head->nframes) { 304 /* can_frames starting here */ 305 firstframe = (struct can_frame *)skb_tail_pointer(skb); 306 307 memcpy(skb_put(skb, datalen), frames, datalen); 308 309 /* 310 * the BCM uses the can_dlc-element of the can_frame 311 * structure for internal purposes. This is only 312 * relevant for updates that are generated by the 313 * BCM, where nframes is 1 314 */ 315 if (head->nframes == 1) 316 firstframe->can_dlc &= BCM_CAN_DLC_MASK; 317 } 318 319 if (has_timestamp) { 320 /* restore rx timestamp */ 321 skb->tstamp = op->rx_stamp; 322 } 323 324 /* 325 * Put the datagram to the queue so that bcm_recvmsg() can 326 * get it from there. We need to pass the interface index to 327 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb 328 * containing the interface index. 329 */ 330 331 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can)); 332 addr = (struct sockaddr_can *)skb->cb; 333 memset(addr, 0, sizeof(*addr)); 334 addr->can_family = AF_CAN; 335 addr->can_ifindex = op->rx_ifindex; 336 337 err = sock_queue_rcv_skb(sk, skb); 338 if (err < 0) { 339 struct bcm_sock *bo = bcm_sk(sk); 340 341 kfree_skb(skb); 342 /* don't care about overflows in this statistic */ 343 bo->dropped_usr_msgs++; 344 } 345 } 346 347 static void bcm_tx_start_timer(struct bcm_op *op) 348 { 349 if (op->kt_ival1.tv64 && op->count) 350 hrtimer_start(&op->timer, 351 ktime_add(ktime_get(), op->kt_ival1), 352 HRTIMER_MODE_ABS); 353 else if (op->kt_ival2.tv64) 354 hrtimer_start(&op->timer, 355 ktime_add(ktime_get(), op->kt_ival2), 356 HRTIMER_MODE_ABS); 357 } 358 359 static void bcm_tx_timeout_tsklet(unsigned long data) 360 { 361 struct bcm_op *op = (struct bcm_op *)data; 362 struct bcm_msg_head msg_head; 363 364 if (op->kt_ival1.tv64 && (op->count > 0)) { 365 366 op->count--; 367 if (!op->count && (op->flags & TX_COUNTEVT)) { 368 369 /* create notification to user */ 370 msg_head.opcode = TX_EXPIRED; 371 msg_head.flags = op->flags; 372 msg_head.count = op->count; 373 msg_head.ival1 = op->ival1; 374 msg_head.ival2 = op->ival2; 375 msg_head.can_id = op->can_id; 376 msg_head.nframes = 0; 377 378 bcm_send_to_user(op, &msg_head, NULL, 0); 379 } 380 bcm_can_tx(op); 381 382 } else if (op->kt_ival2.tv64) 383 bcm_can_tx(op); 384 385 bcm_tx_start_timer(op); 386 } 387 388 /* 389 * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions 390 */ 391 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) 392 { 393 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 394 395 tasklet_schedule(&op->tsklet); 396 397 return HRTIMER_NORESTART; 398 } 399 400 /* 401 * bcm_rx_changed - create a RX_CHANGED notification due to changed content 402 */ 403 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) 404 { 405 struct bcm_msg_head head; 406 407 /* update statistics */ 408 op->frames_filtered++; 409 410 /* prevent statistics overflow */ 411 if (op->frames_filtered > ULONG_MAX/100) 412 op->frames_filtered = op->frames_abs = 0; 413 414 /* this element is not throttled anymore */ 415 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV); 416 417 head.opcode = RX_CHANGED; 418 head.flags = op->flags; 419 head.count = op->count; 420 head.ival1 = op->ival1; 421 head.ival2 = op->ival2; 422 head.can_id = op->can_id; 423 head.nframes = 1; 424 425 bcm_send_to_user(op, &head, data, 1); 426 } 427 428 /* 429 * bcm_rx_update_and_send - process a detected relevant receive content change 430 * 1. update the last received data 431 * 2. send a notification to the user (if possible) 432 */ 433 static void bcm_rx_update_and_send(struct bcm_op *op, 434 struct can_frame *lastdata, 435 const struct can_frame *rxdata) 436 { 437 memcpy(lastdata, rxdata, CFSIZ); 438 439 /* mark as used and throttled by default */ 440 lastdata->can_dlc |= (RX_RECV|RX_THR); 441 442 /* throttling mode inactive ? */ 443 if (!op->kt_ival2.tv64) { 444 /* send RX_CHANGED to the user immediately */ 445 bcm_rx_changed(op, lastdata); 446 return; 447 } 448 449 /* with active throttling timer we are just done here */ 450 if (hrtimer_active(&op->thrtimer)) 451 return; 452 453 /* first reception with enabled throttling mode */ 454 if (!op->kt_lastmsg.tv64) 455 goto rx_changed_settime; 456 457 /* got a second frame inside a potential throttle period? */ 458 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 459 ktime_to_us(op->kt_ival2)) { 460 /* do not send the saved data - only start throttle timer */ 461 hrtimer_start(&op->thrtimer, 462 ktime_add(op->kt_lastmsg, op->kt_ival2), 463 HRTIMER_MODE_ABS); 464 return; 465 } 466 467 /* the gap was that big, that throttling was not needed here */ 468 rx_changed_settime: 469 bcm_rx_changed(op, lastdata); 470 op->kt_lastmsg = ktime_get(); 471 } 472 473 /* 474 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 475 * received data stored in op->last_frames[] 476 */ 477 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, 478 const struct can_frame *rxdata) 479 { 480 /* 481 * no one uses the MSBs of can_dlc for comparison, 482 * so we use it here to detect the first time of reception 483 */ 484 485 if (!(op->last_frames[index].can_dlc & RX_RECV)) { 486 /* received data for the first time => send update to user */ 487 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 488 return; 489 } 490 491 /* do a real check in can_frame data section */ 492 493 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) != 494 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) { 495 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 496 return; 497 } 498 499 if (op->flags & RX_CHECK_DLC) { 500 /* do a real check in can_frame dlc */ 501 if (rxdata->can_dlc != (op->last_frames[index].can_dlc & 502 BCM_CAN_DLC_MASK)) { 503 bcm_rx_update_and_send(op, &op->last_frames[index], 504 rxdata); 505 return; 506 } 507 } 508 } 509 510 /* 511 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception 512 */ 513 static void bcm_rx_starttimer(struct bcm_op *op) 514 { 515 if (op->flags & RX_NO_AUTOTIMER) 516 return; 517 518 if (op->kt_ival1.tv64) 519 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 520 } 521 522 static void bcm_rx_timeout_tsklet(unsigned long data) 523 { 524 struct bcm_op *op = (struct bcm_op *)data; 525 struct bcm_msg_head msg_head; 526 527 /* create notification to user */ 528 msg_head.opcode = RX_TIMEOUT; 529 msg_head.flags = op->flags; 530 msg_head.count = op->count; 531 msg_head.ival1 = op->ival1; 532 msg_head.ival2 = op->ival2; 533 msg_head.can_id = op->can_id; 534 msg_head.nframes = 0; 535 536 bcm_send_to_user(op, &msg_head, NULL, 0); 537 } 538 539 /* 540 * bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out 541 */ 542 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) 543 { 544 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 545 546 /* schedule before NET_RX_SOFTIRQ */ 547 tasklet_hi_schedule(&op->tsklet); 548 549 /* no restart of the timer is done here! */ 550 551 /* if user wants to be informed, when cyclic CAN-Messages come back */ 552 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { 553 /* clear received can_frames to indicate 'nothing received' */ 554 memset(op->last_frames, 0, op->nframes * CFSIZ); 555 } 556 557 return HRTIMER_NORESTART; 558 } 559 560 /* 561 * bcm_rx_do_flush - helper for bcm_rx_thr_flush 562 */ 563 static inline int bcm_rx_do_flush(struct bcm_op *op, int update, 564 unsigned int index) 565 { 566 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { 567 if (update) 568 bcm_rx_changed(op, &op->last_frames[index]); 569 return 1; 570 } 571 return 0; 572 } 573 574 /* 575 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 576 * 577 * update == 0 : just check if throttled data is available (any irq context) 578 * update == 1 : check and send throttled data to userspace (soft_irq context) 579 */ 580 static int bcm_rx_thr_flush(struct bcm_op *op, int update) 581 { 582 int updated = 0; 583 584 if (op->nframes > 1) { 585 unsigned int i; 586 587 /* for MUX filter we start at index 1 */ 588 for (i = 1; i < op->nframes; i++) 589 updated += bcm_rx_do_flush(op, update, i); 590 591 } else { 592 /* for RX_FILTER_ID and simple filter */ 593 updated += bcm_rx_do_flush(op, update, 0); 594 } 595 596 return updated; 597 } 598 599 static void bcm_rx_thr_tsklet(unsigned long data) 600 { 601 struct bcm_op *op = (struct bcm_op *)data; 602 603 /* push the changed data to the userspace */ 604 bcm_rx_thr_flush(op, 1); 605 } 606 607 /* 608 * bcm_rx_thr_handler - the time for blocked content updates is over now: 609 * Check for throttled data and send it to the userspace 610 */ 611 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) 612 { 613 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 614 615 tasklet_schedule(&op->thrtsklet); 616 617 if (bcm_rx_thr_flush(op, 0)) { 618 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); 619 return HRTIMER_RESTART; 620 } else { 621 /* rearm throttle handling */ 622 op->kt_lastmsg = ktime_set(0, 0); 623 return HRTIMER_NORESTART; 624 } 625 } 626 627 /* 628 * bcm_rx_handler - handle a CAN frame reception 629 */ 630 static void bcm_rx_handler(struct sk_buff *skb, void *data) 631 { 632 struct bcm_op *op = (struct bcm_op *)data; 633 const struct can_frame *rxframe = (struct can_frame *)skb->data; 634 unsigned int i; 635 636 /* disable timeout */ 637 hrtimer_cancel(&op->timer); 638 639 if (op->can_id != rxframe->can_id) 640 return; 641 642 /* save rx timestamp */ 643 op->rx_stamp = skb->tstamp; 644 /* save originator for recvfrom() */ 645 op->rx_ifindex = skb->dev->ifindex; 646 /* update statistics */ 647 op->frames_abs++; 648 649 if (op->flags & RX_RTR_FRAME) { 650 /* send reply for RTR-request (placed in op->frames[0]) */ 651 bcm_can_tx(op); 652 return; 653 } 654 655 if (op->flags & RX_FILTER_ID) { 656 /* the easiest case */ 657 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); 658 goto rx_starttimer; 659 } 660 661 if (op->nframes == 1) { 662 /* simple compare with index 0 */ 663 bcm_rx_cmp_to_index(op, 0, rxframe); 664 goto rx_starttimer; 665 } 666 667 if (op->nframes > 1) { 668 /* 669 * multiplex compare 670 * 671 * find the first multiplex mask that fits. 672 * Remark: The MUX-mask is stored in index 0 673 */ 674 675 for (i = 1; i < op->nframes; i++) { 676 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) == 677 (GET_U64(&op->frames[0]) & 678 GET_U64(&op->frames[i]))) { 679 bcm_rx_cmp_to_index(op, i, rxframe); 680 break; 681 } 682 } 683 } 684 685 rx_starttimer: 686 bcm_rx_starttimer(op); 687 } 688 689 /* 690 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements 691 */ 692 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, 693 int ifindex) 694 { 695 struct bcm_op *op; 696 697 list_for_each_entry(op, ops, list) { 698 if ((op->can_id == can_id) && (op->ifindex == ifindex)) 699 return op; 700 } 701 702 return NULL; 703 } 704 705 static void bcm_remove_op(struct bcm_op *op) 706 { 707 hrtimer_cancel(&op->timer); 708 hrtimer_cancel(&op->thrtimer); 709 710 if (op->tsklet.func) 711 tasklet_kill(&op->tsklet); 712 713 if (op->thrtsklet.func) 714 tasklet_kill(&op->thrtsklet); 715 716 if ((op->frames) && (op->frames != &op->sframe)) 717 kfree(op->frames); 718 719 if ((op->last_frames) && (op->last_frames != &op->last_sframe)) 720 kfree(op->last_frames); 721 722 kfree(op); 723 } 724 725 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) 726 { 727 if (op->rx_reg_dev == dev) { 728 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id), 729 bcm_rx_handler, op); 730 731 /* mark as removed subscription */ 732 op->rx_reg_dev = NULL; 733 } else 734 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " 735 "mismatch %p %p\n", op->rx_reg_dev, dev); 736 } 737 738 /* 739 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) 740 */ 741 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex) 742 { 743 struct bcm_op *op, *n; 744 745 list_for_each_entry_safe(op, n, ops, list) { 746 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 747 748 /* 749 * Don't care if we're bound or not (due to netdev 750 * problems) can_rx_unregister() is always a save 751 * thing to do here. 752 */ 753 if (op->ifindex) { 754 /* 755 * Only remove subscriptions that had not 756 * been removed due to NETDEV_UNREGISTER 757 * in bcm_notifier() 758 */ 759 if (op->rx_reg_dev) { 760 struct net_device *dev; 761 762 dev = dev_get_by_index(&init_net, 763 op->ifindex); 764 if (dev) { 765 bcm_rx_unreg(dev, op); 766 dev_put(dev); 767 } 768 } 769 } else 770 can_rx_unregister(NULL, op->can_id, 771 REGMASK(op->can_id), 772 bcm_rx_handler, op); 773 774 list_del(&op->list); 775 bcm_remove_op(op); 776 return 1; /* done */ 777 } 778 } 779 780 return 0; /* not found */ 781 } 782 783 /* 784 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) 785 */ 786 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex) 787 { 788 struct bcm_op *op, *n; 789 790 list_for_each_entry_safe(op, n, ops, list) { 791 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 792 list_del(&op->list); 793 bcm_remove_op(op); 794 return 1; /* done */ 795 } 796 } 797 798 return 0; /* not found */ 799 } 800 801 /* 802 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) 803 */ 804 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, 805 int ifindex) 806 { 807 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex); 808 809 if (!op) 810 return -EINVAL; 811 812 /* put current values into msg_head */ 813 msg_head->flags = op->flags; 814 msg_head->count = op->count; 815 msg_head->ival1 = op->ival1; 816 msg_head->ival2 = op->ival2; 817 msg_head->nframes = op->nframes; 818 819 bcm_send_to_user(op, msg_head, op->frames, 0); 820 821 return MHSIZ; 822 } 823 824 /* 825 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) 826 */ 827 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 828 int ifindex, struct sock *sk) 829 { 830 struct bcm_sock *bo = bcm_sk(sk); 831 struct bcm_op *op; 832 unsigned int i; 833 int err; 834 835 /* we need a real device to send frames */ 836 if (!ifindex) 837 return -ENODEV; 838 839 /* check nframes boundaries - we need at least one can_frame */ 840 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) 841 return -EINVAL; 842 843 /* check the given can_id */ 844 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex); 845 846 if (op) { 847 /* update existing BCM operation */ 848 849 /* 850 * Do we need more space for the can_frames than currently 851 * allocated? -> This is a _really_ unusual use-case and 852 * therefore (complexity / locking) it is not supported. 853 */ 854 if (msg_head->nframes > op->nframes) 855 return -E2BIG; 856 857 /* update can_frames content */ 858 for (i = 0; i < msg_head->nframes; i++) { 859 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ); 860 861 if (op->frames[i].can_dlc > 8) 862 err = -EINVAL; 863 864 if (err < 0) 865 return err; 866 867 if (msg_head->flags & TX_CP_CAN_ID) { 868 /* copy can_id into frame */ 869 op->frames[i].can_id = msg_head->can_id; 870 } 871 } 872 873 } else { 874 /* insert new BCM operation for the given can_id */ 875 876 op = kzalloc(OPSIZ, GFP_KERNEL); 877 if (!op) 878 return -ENOMEM; 879 880 op->can_id = msg_head->can_id; 881 882 /* create array for can_frames and copy the data */ 883 if (msg_head->nframes > 1) { 884 op->frames = kmalloc(msg_head->nframes * CFSIZ, 885 GFP_KERNEL); 886 if (!op->frames) { 887 kfree(op); 888 return -ENOMEM; 889 } 890 } else 891 op->frames = &op->sframe; 892 893 for (i = 0; i < msg_head->nframes; i++) { 894 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ); 895 896 if (op->frames[i].can_dlc > 8) 897 err = -EINVAL; 898 899 if (err < 0) { 900 if (op->frames != &op->sframe) 901 kfree(op->frames); 902 kfree(op); 903 return err; 904 } 905 906 if (msg_head->flags & TX_CP_CAN_ID) { 907 /* copy can_id into frame */ 908 op->frames[i].can_id = msg_head->can_id; 909 } 910 } 911 912 /* tx_ops never compare with previous received messages */ 913 op->last_frames = NULL; 914 915 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 916 op->sk = sk; 917 op->ifindex = ifindex; 918 919 /* initialize uninitialized (kzalloc) structure */ 920 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 921 op->timer.function = bcm_tx_timeout_handler; 922 923 /* initialize tasklet for tx countevent notification */ 924 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet, 925 (unsigned long) op); 926 927 /* currently unused in tx_ops */ 928 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 929 930 /* add this bcm_op to the list of the tx_ops */ 931 list_add(&op->list, &bo->tx_ops); 932 933 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ 934 935 if (op->nframes != msg_head->nframes) { 936 op->nframes = msg_head->nframes; 937 /* start multiple frame transmission with index 0 */ 938 op->currframe = 0; 939 } 940 941 /* check flags */ 942 943 op->flags = msg_head->flags; 944 945 if (op->flags & TX_RESET_MULTI_IDX) { 946 /* start multiple frame transmission with index 0 */ 947 op->currframe = 0; 948 } 949 950 if (op->flags & SETTIMER) { 951 /* set timer values */ 952 op->count = msg_head->count; 953 op->ival1 = msg_head->ival1; 954 op->ival2 = msg_head->ival2; 955 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 956 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 957 958 /* disable an active timer due to zero values? */ 959 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) 960 hrtimer_cancel(&op->timer); 961 } 962 963 if (op->flags & STARTTIMER) { 964 hrtimer_cancel(&op->timer); 965 /* spec: send can_frame when starting timer */ 966 op->flags |= TX_ANNOUNCE; 967 } 968 969 if (op->flags & TX_ANNOUNCE) { 970 bcm_can_tx(op); 971 if (op->count) 972 op->count--; 973 } 974 975 if (op->flags & STARTTIMER) 976 bcm_tx_start_timer(op); 977 978 return msg_head->nframes * CFSIZ + MHSIZ; 979 } 980 981 /* 982 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) 983 */ 984 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 985 int ifindex, struct sock *sk) 986 { 987 struct bcm_sock *bo = bcm_sk(sk); 988 struct bcm_op *op; 989 int do_rx_register; 990 int err = 0; 991 992 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { 993 /* be robust against wrong usage ... */ 994 msg_head->flags |= RX_FILTER_ID; 995 /* ignore trailing garbage */ 996 msg_head->nframes = 0; 997 } 998 999 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ 1000 if (msg_head->nframes > MAX_NFRAMES + 1) 1001 return -EINVAL; 1002 1003 if ((msg_head->flags & RX_RTR_FRAME) && 1004 ((msg_head->nframes != 1) || 1005 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1006 return -EINVAL; 1007 1008 /* check the given can_id */ 1009 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex); 1010 if (op) { 1011 /* update existing BCM operation */ 1012 1013 /* 1014 * Do we need more space for the can_frames than currently 1015 * allocated? -> This is a _really_ unusual use-case and 1016 * therefore (complexity / locking) it is not supported. 1017 */ 1018 if (msg_head->nframes > op->nframes) 1019 return -E2BIG; 1020 1021 if (msg_head->nframes) { 1022 /* update can_frames content */ 1023 err = memcpy_from_msg((u8 *)op->frames, msg, 1024 msg_head->nframes * CFSIZ); 1025 if (err < 0) 1026 return err; 1027 1028 /* clear last_frames to indicate 'nothing received' */ 1029 memset(op->last_frames, 0, msg_head->nframes * CFSIZ); 1030 } 1031 1032 op->nframes = msg_head->nframes; 1033 1034 /* Only an update -> do not call can_rx_register() */ 1035 do_rx_register = 0; 1036 1037 } else { 1038 /* insert new BCM operation for the given can_id */ 1039 op = kzalloc(OPSIZ, GFP_KERNEL); 1040 if (!op) 1041 return -ENOMEM; 1042 1043 op->can_id = msg_head->can_id; 1044 op->nframes = msg_head->nframes; 1045 1046 if (msg_head->nframes > 1) { 1047 /* create array for can_frames and copy the data */ 1048 op->frames = kmalloc(msg_head->nframes * CFSIZ, 1049 GFP_KERNEL); 1050 if (!op->frames) { 1051 kfree(op); 1052 return -ENOMEM; 1053 } 1054 1055 /* create and init array for received can_frames */ 1056 op->last_frames = kzalloc(msg_head->nframes * CFSIZ, 1057 GFP_KERNEL); 1058 if (!op->last_frames) { 1059 kfree(op->frames); 1060 kfree(op); 1061 return -ENOMEM; 1062 } 1063 1064 } else { 1065 op->frames = &op->sframe; 1066 op->last_frames = &op->last_sframe; 1067 } 1068 1069 if (msg_head->nframes) { 1070 err = memcpy_from_msg((u8 *)op->frames, msg, 1071 msg_head->nframes * CFSIZ); 1072 if (err < 0) { 1073 if (op->frames != &op->sframe) 1074 kfree(op->frames); 1075 if (op->last_frames != &op->last_sframe) 1076 kfree(op->last_frames); 1077 kfree(op); 1078 return err; 1079 } 1080 } 1081 1082 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1083 op->sk = sk; 1084 op->ifindex = ifindex; 1085 1086 /* ifindex for timeout events w/o previous frame reception */ 1087 op->rx_ifindex = ifindex; 1088 1089 /* initialize uninitialized (kzalloc) structure */ 1090 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1091 op->timer.function = bcm_rx_timeout_handler; 1092 1093 /* initialize tasklet for rx timeout notification */ 1094 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet, 1095 (unsigned long) op); 1096 1097 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1098 op->thrtimer.function = bcm_rx_thr_handler; 1099 1100 /* initialize tasklet for rx throttle handling */ 1101 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet, 1102 (unsigned long) op); 1103 1104 /* add this bcm_op to the list of the rx_ops */ 1105 list_add(&op->list, &bo->rx_ops); 1106 1107 /* call can_rx_register() */ 1108 do_rx_register = 1; 1109 1110 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ 1111 1112 /* check flags */ 1113 op->flags = msg_head->flags; 1114 1115 if (op->flags & RX_RTR_FRAME) { 1116 1117 /* no timers in RTR-mode */ 1118 hrtimer_cancel(&op->thrtimer); 1119 hrtimer_cancel(&op->timer); 1120 1121 /* 1122 * funny feature in RX(!)_SETUP only for RTR-mode: 1123 * copy can_id into frame BUT without RTR-flag to 1124 * prevent a full-load-loopback-test ... ;-] 1125 */ 1126 if ((op->flags & TX_CP_CAN_ID) || 1127 (op->frames[0].can_id == op->can_id)) 1128 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; 1129 1130 } else { 1131 if (op->flags & SETTIMER) { 1132 1133 /* set timer value */ 1134 op->ival1 = msg_head->ival1; 1135 op->ival2 = msg_head->ival2; 1136 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 1137 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 1138 1139 /* disable an active timer due to zero value? */ 1140 if (!op->kt_ival1.tv64) 1141 hrtimer_cancel(&op->timer); 1142 1143 /* 1144 * In any case cancel the throttle timer, flush 1145 * potentially blocked msgs and reset throttle handling 1146 */ 1147 op->kt_lastmsg = ktime_set(0, 0); 1148 hrtimer_cancel(&op->thrtimer); 1149 bcm_rx_thr_flush(op, 1); 1150 } 1151 1152 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1153 hrtimer_start(&op->timer, op->kt_ival1, 1154 HRTIMER_MODE_REL); 1155 } 1156 1157 /* now we can register for can_ids, if we added a new bcm_op */ 1158 if (do_rx_register) { 1159 if (ifindex) { 1160 struct net_device *dev; 1161 1162 dev = dev_get_by_index(&init_net, ifindex); 1163 if (dev) { 1164 err = can_rx_register(dev, op->can_id, 1165 REGMASK(op->can_id), 1166 bcm_rx_handler, op, 1167 "bcm"); 1168 1169 op->rx_reg_dev = dev; 1170 dev_put(dev); 1171 } 1172 1173 } else 1174 err = can_rx_register(NULL, op->can_id, 1175 REGMASK(op->can_id), 1176 bcm_rx_handler, op, "bcm"); 1177 if (err) { 1178 /* this bcm rx op is broken -> remove it */ 1179 list_del(&op->list); 1180 bcm_remove_op(op); 1181 return err; 1182 } 1183 } 1184 1185 return msg_head->nframes * CFSIZ + MHSIZ; 1186 } 1187 1188 /* 1189 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) 1190 */ 1191 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) 1192 { 1193 struct sk_buff *skb; 1194 struct net_device *dev; 1195 int err; 1196 1197 /* we need a real device to send frames */ 1198 if (!ifindex) 1199 return -ENODEV; 1200 1201 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), GFP_KERNEL); 1202 if (!skb) 1203 return -ENOMEM; 1204 1205 can_skb_reserve(skb); 1206 1207 err = memcpy_from_msg(skb_put(skb, CFSIZ), msg, CFSIZ); 1208 if (err < 0) { 1209 kfree_skb(skb); 1210 return err; 1211 } 1212 1213 dev = dev_get_by_index(&init_net, ifindex); 1214 if (!dev) { 1215 kfree_skb(skb); 1216 return -ENODEV; 1217 } 1218 1219 can_skb_prv(skb)->ifindex = dev->ifindex; 1220 skb->dev = dev; 1221 can_skb_set_owner(skb, sk); 1222 err = can_send(skb, 1); /* send with loopback */ 1223 dev_put(dev); 1224 1225 if (err) 1226 return err; 1227 1228 return CFSIZ + MHSIZ; 1229 } 1230 1231 /* 1232 * bcm_sendmsg - process BCM commands (opcodes) from the userspace 1233 */ 1234 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, 1235 struct msghdr *msg, size_t size) 1236 { 1237 struct sock *sk = sock->sk; 1238 struct bcm_sock *bo = bcm_sk(sk); 1239 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ 1240 struct bcm_msg_head msg_head; 1241 int ret; /* read bytes or error codes as return value */ 1242 1243 if (!bo->bound) 1244 return -ENOTCONN; 1245 1246 /* check for valid message length from userspace */ 1247 if (size < MHSIZ || (size - MHSIZ) % CFSIZ) 1248 return -EINVAL; 1249 1250 /* check for alternative ifindex for this bcm_op */ 1251 1252 if (!ifindex && msg->msg_name) { 1253 /* no bound device as default => check msg_name */ 1254 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 1255 1256 if (msg->msg_namelen < sizeof(*addr)) 1257 return -EINVAL; 1258 1259 if (addr->can_family != AF_CAN) 1260 return -EINVAL; 1261 1262 /* ifindex from sendto() */ 1263 ifindex = addr->can_ifindex; 1264 1265 if (ifindex) { 1266 struct net_device *dev; 1267 1268 dev = dev_get_by_index(&init_net, ifindex); 1269 if (!dev) 1270 return -ENODEV; 1271 1272 if (dev->type != ARPHRD_CAN) { 1273 dev_put(dev); 1274 return -ENODEV; 1275 } 1276 1277 dev_put(dev); 1278 } 1279 } 1280 1281 /* read message head information */ 1282 1283 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ); 1284 if (ret < 0) 1285 return ret; 1286 1287 lock_sock(sk); 1288 1289 switch (msg_head.opcode) { 1290 1291 case TX_SETUP: 1292 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); 1293 break; 1294 1295 case RX_SETUP: 1296 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); 1297 break; 1298 1299 case TX_DELETE: 1300 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex)) 1301 ret = MHSIZ; 1302 else 1303 ret = -EINVAL; 1304 break; 1305 1306 case RX_DELETE: 1307 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex)) 1308 ret = MHSIZ; 1309 else 1310 ret = -EINVAL; 1311 break; 1312 1313 case TX_READ: 1314 /* reuse msg_head for the reply to TX_READ */ 1315 msg_head.opcode = TX_STATUS; 1316 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); 1317 break; 1318 1319 case RX_READ: 1320 /* reuse msg_head for the reply to RX_READ */ 1321 msg_head.opcode = RX_STATUS; 1322 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); 1323 break; 1324 1325 case TX_SEND: 1326 /* we need exactly one can_frame behind the msg head */ 1327 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ)) 1328 ret = -EINVAL; 1329 else 1330 ret = bcm_tx_send(msg, ifindex, sk); 1331 break; 1332 1333 default: 1334 ret = -EINVAL; 1335 break; 1336 } 1337 1338 release_sock(sk); 1339 1340 return ret; 1341 } 1342 1343 /* 1344 * notification handler for netdevice status changes 1345 */ 1346 static int bcm_notifier(struct notifier_block *nb, unsigned long msg, 1347 void *ptr) 1348 { 1349 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1350 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); 1351 struct sock *sk = &bo->sk; 1352 struct bcm_op *op; 1353 int notify_enodev = 0; 1354 1355 if (!net_eq(dev_net(dev), &init_net)) 1356 return NOTIFY_DONE; 1357 1358 if (dev->type != ARPHRD_CAN) 1359 return NOTIFY_DONE; 1360 1361 switch (msg) { 1362 1363 case NETDEV_UNREGISTER: 1364 lock_sock(sk); 1365 1366 /* remove device specific receive entries */ 1367 list_for_each_entry(op, &bo->rx_ops, list) 1368 if (op->rx_reg_dev == dev) 1369 bcm_rx_unreg(dev, op); 1370 1371 /* remove device reference, if this is our bound device */ 1372 if (bo->bound && bo->ifindex == dev->ifindex) { 1373 bo->bound = 0; 1374 bo->ifindex = 0; 1375 notify_enodev = 1; 1376 } 1377 1378 release_sock(sk); 1379 1380 if (notify_enodev) { 1381 sk->sk_err = ENODEV; 1382 if (!sock_flag(sk, SOCK_DEAD)) 1383 sk->sk_error_report(sk); 1384 } 1385 break; 1386 1387 case NETDEV_DOWN: 1388 if (bo->bound && bo->ifindex == dev->ifindex) { 1389 sk->sk_err = ENETDOWN; 1390 if (!sock_flag(sk, SOCK_DEAD)) 1391 sk->sk_error_report(sk); 1392 } 1393 } 1394 1395 return NOTIFY_DONE; 1396 } 1397 1398 /* 1399 * initial settings for all BCM sockets to be set at socket creation time 1400 */ 1401 static int bcm_init(struct sock *sk) 1402 { 1403 struct bcm_sock *bo = bcm_sk(sk); 1404 1405 bo->bound = 0; 1406 bo->ifindex = 0; 1407 bo->dropped_usr_msgs = 0; 1408 bo->bcm_proc_read = NULL; 1409 1410 INIT_LIST_HEAD(&bo->tx_ops); 1411 INIT_LIST_HEAD(&bo->rx_ops); 1412 1413 /* set notifier */ 1414 bo->notifier.notifier_call = bcm_notifier; 1415 1416 register_netdevice_notifier(&bo->notifier); 1417 1418 return 0; 1419 } 1420 1421 /* 1422 * standard socket functions 1423 */ 1424 static int bcm_release(struct socket *sock) 1425 { 1426 struct sock *sk = sock->sk; 1427 struct bcm_sock *bo; 1428 struct bcm_op *op, *next; 1429 1430 if (sk == NULL) 1431 return 0; 1432 1433 bo = bcm_sk(sk); 1434 1435 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1436 1437 unregister_netdevice_notifier(&bo->notifier); 1438 1439 lock_sock(sk); 1440 1441 list_for_each_entry_safe(op, next, &bo->tx_ops, list) 1442 bcm_remove_op(op); 1443 1444 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { 1445 /* 1446 * Don't care if we're bound or not (due to netdev problems) 1447 * can_rx_unregister() is always a save thing to do here. 1448 */ 1449 if (op->ifindex) { 1450 /* 1451 * Only remove subscriptions that had not 1452 * been removed due to NETDEV_UNREGISTER 1453 * in bcm_notifier() 1454 */ 1455 if (op->rx_reg_dev) { 1456 struct net_device *dev; 1457 1458 dev = dev_get_by_index(&init_net, op->ifindex); 1459 if (dev) { 1460 bcm_rx_unreg(dev, op); 1461 dev_put(dev); 1462 } 1463 } 1464 } else 1465 can_rx_unregister(NULL, op->can_id, 1466 REGMASK(op->can_id), 1467 bcm_rx_handler, op); 1468 1469 bcm_remove_op(op); 1470 } 1471 1472 /* remove procfs entry */ 1473 if (proc_dir && bo->bcm_proc_read) 1474 remove_proc_entry(bo->procname, proc_dir); 1475 1476 /* remove device reference */ 1477 if (bo->bound) { 1478 bo->bound = 0; 1479 bo->ifindex = 0; 1480 } 1481 1482 sock_orphan(sk); 1483 sock->sk = NULL; 1484 1485 release_sock(sk); 1486 sock_put(sk); 1487 1488 return 0; 1489 } 1490 1491 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, 1492 int flags) 1493 { 1494 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1495 struct sock *sk = sock->sk; 1496 struct bcm_sock *bo = bcm_sk(sk); 1497 1498 if (len < sizeof(*addr)) 1499 return -EINVAL; 1500 1501 if (bo->bound) 1502 return -EISCONN; 1503 1504 /* bind a device to this socket */ 1505 if (addr->can_ifindex) { 1506 struct net_device *dev; 1507 1508 dev = dev_get_by_index(&init_net, addr->can_ifindex); 1509 if (!dev) 1510 return -ENODEV; 1511 1512 if (dev->type != ARPHRD_CAN) { 1513 dev_put(dev); 1514 return -ENODEV; 1515 } 1516 1517 bo->ifindex = dev->ifindex; 1518 dev_put(dev); 1519 1520 } else { 1521 /* no interface reference for ifindex = 0 ('any' CAN device) */ 1522 bo->ifindex = 0; 1523 } 1524 1525 bo->bound = 1; 1526 1527 if (proc_dir) { 1528 /* unique socket address as filename */ 1529 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1530 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, 1531 proc_dir, 1532 &bcm_proc_fops, sk); 1533 } 1534 1535 return 0; 1536 } 1537 1538 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, 1539 struct msghdr *msg, size_t size, int flags) 1540 { 1541 struct sock *sk = sock->sk; 1542 struct sk_buff *skb; 1543 int error = 0; 1544 int noblock; 1545 int err; 1546 1547 noblock = flags & MSG_DONTWAIT; 1548 flags &= ~MSG_DONTWAIT; 1549 skb = skb_recv_datagram(sk, flags, noblock, &error); 1550 if (!skb) 1551 return error; 1552 1553 if (skb->len < size) 1554 size = skb->len; 1555 1556 err = memcpy_to_msg(msg, skb->data, size); 1557 if (err < 0) { 1558 skb_free_datagram(sk, skb); 1559 return err; 1560 } 1561 1562 sock_recv_ts_and_drops(msg, sk, skb); 1563 1564 if (msg->msg_name) { 1565 __sockaddr_check_size(sizeof(struct sockaddr_can)); 1566 msg->msg_namelen = sizeof(struct sockaddr_can); 1567 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1568 } 1569 1570 skb_free_datagram(sk, skb); 1571 1572 return size; 1573 } 1574 1575 static const struct proto_ops bcm_ops = { 1576 .family = PF_CAN, 1577 .release = bcm_release, 1578 .bind = sock_no_bind, 1579 .connect = bcm_connect, 1580 .socketpair = sock_no_socketpair, 1581 .accept = sock_no_accept, 1582 .getname = sock_no_getname, 1583 .poll = datagram_poll, 1584 .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ 1585 .listen = sock_no_listen, 1586 .shutdown = sock_no_shutdown, 1587 .setsockopt = sock_no_setsockopt, 1588 .getsockopt = sock_no_getsockopt, 1589 .sendmsg = bcm_sendmsg, 1590 .recvmsg = bcm_recvmsg, 1591 .mmap = sock_no_mmap, 1592 .sendpage = sock_no_sendpage, 1593 }; 1594 1595 static struct proto bcm_proto __read_mostly = { 1596 .name = "CAN_BCM", 1597 .owner = THIS_MODULE, 1598 .obj_size = sizeof(struct bcm_sock), 1599 .init = bcm_init, 1600 }; 1601 1602 static const struct can_proto bcm_can_proto = { 1603 .type = SOCK_DGRAM, 1604 .protocol = CAN_BCM, 1605 .ops = &bcm_ops, 1606 .prot = &bcm_proto, 1607 }; 1608 1609 static int __init bcm_module_init(void) 1610 { 1611 int err; 1612 1613 pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"); 1614 1615 err = can_proto_register(&bcm_can_proto); 1616 if (err < 0) { 1617 printk(KERN_ERR "can: registration of bcm protocol failed\n"); 1618 return err; 1619 } 1620 1621 /* create /proc/net/can-bcm directory */ 1622 proc_dir = proc_mkdir("can-bcm", init_net.proc_net); 1623 return 0; 1624 } 1625 1626 static void __exit bcm_module_exit(void) 1627 { 1628 can_proto_unregister(&bcm_can_proto); 1629 1630 if (proc_dir) 1631 remove_proc_entry("can-bcm", init_net.proc_net); 1632 } 1633 1634 module_init(bcm_module_init); 1635 module_exit(bcm_module_exit); 1636