1 /* 2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 3 * 4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Volkswagen nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * Alternatively, provided that this notice is retained in full, this 20 * software may be distributed under the terms of the GNU General 21 * Public License ("GPL") version 2, in which case the provisions of the 22 * GPL apply INSTEAD OF those given above. 23 * 24 * The provided data structures and external interfaces from this code 25 * are not restricted to be used by modules with a GPL compatible license. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 * 40 * Send feedback to <socketcan-users@lists.berlios.de> 41 * 42 */ 43 44 #include <linux/module.h> 45 #include <linux/init.h> 46 #include <linux/hrtimer.h> 47 #include <linux/list.h> 48 #include <linux/proc_fs.h> 49 #include <linux/uio.h> 50 #include <linux/net.h> 51 #include <linux/netdevice.h> 52 #include <linux/socket.h> 53 #include <linux/if_arp.h> 54 #include <linux/skbuff.h> 55 #include <linux/can.h> 56 #include <linux/can/core.h> 57 #include <linux/can/bcm.h> 58 #include <net/sock.h> 59 #include <net/net_namespace.h> 60 61 /* use of last_frames[index].can_dlc */ 62 #define RX_RECV 0x40 /* received data for this element */ 63 #define RX_THR 0x80 /* element not been sent due to throttle feature */ 64 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 65 66 /* get best masking value for can_rx_register() for a given single can_id */ 67 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 68 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 69 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 70 71 #define CAN_BCM_VERSION CAN_VERSION 72 static __initdata const char banner[] = KERN_INFO 73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; 74 75 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 76 MODULE_LICENSE("Dual BSD/GPL"); 77 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 78 79 /* easy access to can_frame payload */ 80 static inline u64 GET_U64(const struct can_frame *cp) 81 { 82 return *(u64 *)cp->data; 83 } 84 85 struct bcm_op { 86 struct list_head list; 87 int ifindex; 88 canid_t can_id; 89 int flags; 90 unsigned long frames_abs, frames_filtered; 91 struct timeval ival1, ival2; 92 struct hrtimer timer, thrtimer; 93 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 94 int rx_ifindex; 95 int count; 96 int nframes; 97 int currframe; 98 struct can_frame *frames; 99 struct can_frame *last_frames; 100 struct can_frame sframe; 101 struct can_frame last_sframe; 102 struct sock *sk; 103 struct net_device *rx_reg_dev; 104 }; 105 106 static struct proc_dir_entry *proc_dir; 107 108 struct bcm_sock { 109 struct sock sk; 110 int bound; 111 int ifindex; 112 struct notifier_block notifier; 113 struct list_head rx_ops; 114 struct list_head tx_ops; 115 unsigned long dropped_usr_msgs; 116 struct proc_dir_entry *bcm_proc_read; 117 char procname [9]; /* pointer printed in ASCII with \0 */ 118 }; 119 120 static inline struct bcm_sock *bcm_sk(const struct sock *sk) 121 { 122 return (struct bcm_sock *)sk; 123 } 124 125 #define CFSIZ sizeof(struct can_frame) 126 #define OPSIZ sizeof(struct bcm_op) 127 #define MHSIZ sizeof(struct bcm_msg_head) 128 129 /* 130 * procfs functions 131 */ 132 static char *bcm_proc_getifname(int ifindex) 133 { 134 struct net_device *dev; 135 136 if (!ifindex) 137 return "any"; 138 139 /* no usage counting */ 140 dev = __dev_get_by_index(&init_net, ifindex); 141 if (dev) 142 return dev->name; 143 144 return "???"; 145 } 146 147 static int bcm_read_proc(char *page, char **start, off_t off, 148 int count, int *eof, void *data) 149 { 150 int len = 0; 151 struct sock *sk = (struct sock *)data; 152 struct bcm_sock *bo = bcm_sk(sk); 153 struct bcm_op *op; 154 155 len += snprintf(page + len, PAGE_SIZE - len, ">>> socket %p", 156 sk->sk_socket); 157 len += snprintf(page + len, PAGE_SIZE - len, " / sk %p", sk); 158 len += snprintf(page + len, PAGE_SIZE - len, " / bo %p", bo); 159 len += snprintf(page + len, PAGE_SIZE - len, " / dropped %lu", 160 bo->dropped_usr_msgs); 161 len += snprintf(page + len, PAGE_SIZE - len, " / bound %s", 162 bcm_proc_getifname(bo->ifindex)); 163 len += snprintf(page + len, PAGE_SIZE - len, " <<<\n"); 164 165 list_for_each_entry(op, &bo->rx_ops, list) { 166 167 unsigned long reduction; 168 169 /* print only active entries & prevent division by zero */ 170 if (!op->frames_abs) 171 continue; 172 173 len += snprintf(page + len, PAGE_SIZE - len, 174 "rx_op: %03X %-5s ", 175 op->can_id, bcm_proc_getifname(op->ifindex)); 176 len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ", 177 op->nframes, 178 (op->flags & RX_CHECK_DLC)?'d':' '); 179 if (op->kt_ival1.tv64) 180 len += snprintf(page + len, PAGE_SIZE - len, 181 "timeo=%lld ", 182 (long long) 183 ktime_to_us(op->kt_ival1)); 184 185 if (op->kt_ival2.tv64) 186 len += snprintf(page + len, PAGE_SIZE - len, 187 "thr=%lld ", 188 (long long) 189 ktime_to_us(op->kt_ival2)); 190 191 len += snprintf(page + len, PAGE_SIZE - len, 192 "# recv %ld (%ld) => reduction: ", 193 op->frames_filtered, op->frames_abs); 194 195 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 196 197 len += snprintf(page + len, PAGE_SIZE - len, "%s%ld%%\n", 198 (reduction == 100)?"near ":"", reduction); 199 200 if (len > PAGE_SIZE - 200) { 201 /* mark output cut off */ 202 len += snprintf(page + len, PAGE_SIZE - len, "(..)\n"); 203 break; 204 } 205 } 206 207 list_for_each_entry(op, &bo->tx_ops, list) { 208 209 len += snprintf(page + len, PAGE_SIZE - len, 210 "tx_op: %03X %s [%d] ", 211 op->can_id, bcm_proc_getifname(op->ifindex), 212 op->nframes); 213 214 if (op->kt_ival1.tv64) 215 len += snprintf(page + len, PAGE_SIZE - len, "t1=%lld ", 216 (long long) ktime_to_us(op->kt_ival1)); 217 218 if (op->kt_ival2.tv64) 219 len += snprintf(page + len, PAGE_SIZE - len, "t2=%lld ", 220 (long long) ktime_to_us(op->kt_ival2)); 221 222 len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n", 223 op->frames_abs); 224 225 if (len > PAGE_SIZE - 100) { 226 /* mark output cut off */ 227 len += snprintf(page + len, PAGE_SIZE - len, "(..)\n"); 228 break; 229 } 230 } 231 232 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 233 234 *eof = 1; 235 return len; 236 } 237 238 /* 239 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface 240 * of the given bcm tx op 241 */ 242 static void bcm_can_tx(struct bcm_op *op) 243 { 244 struct sk_buff *skb; 245 struct net_device *dev; 246 struct can_frame *cf = &op->frames[op->currframe]; 247 248 /* no target device? => exit */ 249 if (!op->ifindex) 250 return; 251 252 dev = dev_get_by_index(&init_net, op->ifindex); 253 if (!dev) { 254 /* RFC: should this bcm_op remove itself here? */ 255 return; 256 } 257 258 skb = alloc_skb(CFSIZ, gfp_any()); 259 if (!skb) 260 goto out; 261 262 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 263 264 /* send with loopback */ 265 skb->dev = dev; 266 skb->sk = op->sk; 267 can_send(skb, 1); 268 269 /* update statistics */ 270 op->currframe++; 271 op->frames_abs++; 272 273 /* reached last frame? */ 274 if (op->currframe >= op->nframes) 275 op->currframe = 0; 276 out: 277 dev_put(dev); 278 } 279 280 /* 281 * bcm_send_to_user - send a BCM message to the userspace 282 * (consisting of bcm_msg_head + x CAN frames) 283 */ 284 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, 285 struct can_frame *frames, int has_timestamp) 286 { 287 struct sk_buff *skb; 288 struct can_frame *firstframe; 289 struct sockaddr_can *addr; 290 struct sock *sk = op->sk; 291 int datalen = head->nframes * CFSIZ; 292 int err; 293 294 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 295 if (!skb) 296 return; 297 298 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head)); 299 300 if (head->nframes) { 301 /* can_frames starting here */ 302 firstframe = (struct can_frame *)skb_tail_pointer(skb); 303 304 memcpy(skb_put(skb, datalen), frames, datalen); 305 306 /* 307 * the BCM uses the can_dlc-element of the can_frame 308 * structure for internal purposes. This is only 309 * relevant for updates that are generated by the 310 * BCM, where nframes is 1 311 */ 312 if (head->nframes == 1) 313 firstframe->can_dlc &= BCM_CAN_DLC_MASK; 314 } 315 316 if (has_timestamp) { 317 /* restore rx timestamp */ 318 skb->tstamp = op->rx_stamp; 319 } 320 321 /* 322 * Put the datagram to the queue so that bcm_recvmsg() can 323 * get it from there. We need to pass the interface index to 324 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb 325 * containing the interface index. 326 */ 327 328 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can)); 329 addr = (struct sockaddr_can *)skb->cb; 330 memset(addr, 0, sizeof(*addr)); 331 addr->can_family = AF_CAN; 332 addr->can_ifindex = op->rx_ifindex; 333 334 err = sock_queue_rcv_skb(sk, skb); 335 if (err < 0) { 336 struct bcm_sock *bo = bcm_sk(sk); 337 338 kfree_skb(skb); 339 /* don't care about overflows in this statistic */ 340 bo->dropped_usr_msgs++; 341 } 342 } 343 344 /* 345 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions 346 */ 347 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) 348 { 349 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 350 enum hrtimer_restart ret = HRTIMER_NORESTART; 351 352 if (op->kt_ival1.tv64 && (op->count > 0)) { 353 354 op->count--; 355 if (!op->count && (op->flags & TX_COUNTEVT)) { 356 struct bcm_msg_head msg_head; 357 358 /* create notification to user */ 359 msg_head.opcode = TX_EXPIRED; 360 msg_head.flags = op->flags; 361 msg_head.count = op->count; 362 msg_head.ival1 = op->ival1; 363 msg_head.ival2 = op->ival2; 364 msg_head.can_id = op->can_id; 365 msg_head.nframes = 0; 366 367 bcm_send_to_user(op, &msg_head, NULL, 0); 368 } 369 } 370 371 if (op->kt_ival1.tv64 && (op->count > 0)) { 372 373 /* send (next) frame */ 374 bcm_can_tx(op); 375 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1); 376 ret = HRTIMER_RESTART; 377 378 } else { 379 if (op->kt_ival2.tv64) { 380 381 /* send (next) frame */ 382 bcm_can_tx(op); 383 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); 384 ret = HRTIMER_RESTART; 385 } 386 } 387 388 return ret; 389 } 390 391 /* 392 * bcm_rx_changed - create a RX_CHANGED notification due to changed content 393 */ 394 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) 395 { 396 struct bcm_msg_head head; 397 398 /* update statistics */ 399 op->frames_filtered++; 400 401 /* prevent statistics overflow */ 402 if (op->frames_filtered > ULONG_MAX/100) 403 op->frames_filtered = op->frames_abs = 0; 404 405 head.opcode = RX_CHANGED; 406 head.flags = op->flags; 407 head.count = op->count; 408 head.ival1 = op->ival1; 409 head.ival2 = op->ival2; 410 head.can_id = op->can_id; 411 head.nframes = 1; 412 413 bcm_send_to_user(op, &head, data, 1); 414 } 415 416 /* 417 * bcm_rx_update_and_send - process a detected relevant receive content change 418 * 1. update the last received data 419 * 2. send a notification to the user (if possible) 420 */ 421 static void bcm_rx_update_and_send(struct bcm_op *op, 422 struct can_frame *lastdata, 423 struct can_frame *rxdata) 424 { 425 memcpy(lastdata, rxdata, CFSIZ); 426 427 /* mark as used */ 428 lastdata->can_dlc |= RX_RECV; 429 430 /* throtteling mode inactive OR data update already on the run ? */ 431 if (!op->kt_ival2.tv64 || hrtimer_callback_running(&op->thrtimer)) { 432 /* send RX_CHANGED to the user immediately */ 433 bcm_rx_changed(op, rxdata); 434 return; 435 } 436 437 if (hrtimer_active(&op->thrtimer)) { 438 /* mark as 'throttled' */ 439 lastdata->can_dlc |= RX_THR; 440 return; 441 } 442 443 if (!op->kt_lastmsg.tv64) { 444 /* send first RX_CHANGED to the user immediately */ 445 bcm_rx_changed(op, rxdata); 446 op->kt_lastmsg = ktime_get(); 447 return; 448 } 449 450 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 451 ktime_to_us(op->kt_ival2)) { 452 /* mark as 'throttled' and start timer */ 453 lastdata->can_dlc |= RX_THR; 454 hrtimer_start(&op->thrtimer, 455 ktime_add(op->kt_lastmsg, op->kt_ival2), 456 HRTIMER_MODE_ABS); 457 return; 458 } 459 460 /* the gap was that big, that throttling was not needed here */ 461 bcm_rx_changed(op, rxdata); 462 op->kt_lastmsg = ktime_get(); 463 } 464 465 /* 466 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 467 * received data stored in op->last_frames[] 468 */ 469 static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, 470 struct can_frame *rxdata) 471 { 472 /* 473 * no one uses the MSBs of can_dlc for comparation, 474 * so we use it here to detect the first time of reception 475 */ 476 477 if (!(op->last_frames[index].can_dlc & RX_RECV)) { 478 /* received data for the first time => send update to user */ 479 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 480 return; 481 } 482 483 /* do a real check in can_frame data section */ 484 485 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) != 486 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) { 487 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 488 return; 489 } 490 491 if (op->flags & RX_CHECK_DLC) { 492 /* do a real check in can_frame dlc */ 493 if (rxdata->can_dlc != (op->last_frames[index].can_dlc & 494 BCM_CAN_DLC_MASK)) { 495 bcm_rx_update_and_send(op, &op->last_frames[index], 496 rxdata); 497 return; 498 } 499 } 500 } 501 502 /* 503 * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption 504 */ 505 static void bcm_rx_starttimer(struct bcm_op *op) 506 { 507 if (op->flags & RX_NO_AUTOTIMER) 508 return; 509 510 if (op->kt_ival1.tv64) 511 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 512 } 513 514 /* 515 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out 516 */ 517 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) 518 { 519 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 520 struct bcm_msg_head msg_head; 521 522 msg_head.opcode = RX_TIMEOUT; 523 msg_head.flags = op->flags; 524 msg_head.count = op->count; 525 msg_head.ival1 = op->ival1; 526 msg_head.ival2 = op->ival2; 527 msg_head.can_id = op->can_id; 528 msg_head.nframes = 0; 529 530 bcm_send_to_user(op, &msg_head, NULL, 0); 531 532 /* no restart of the timer is done here! */ 533 534 /* if user wants to be informed, when cyclic CAN-Messages come back */ 535 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { 536 /* clear received can_frames to indicate 'nothing received' */ 537 memset(op->last_frames, 0, op->nframes * CFSIZ); 538 } 539 540 return HRTIMER_NORESTART; 541 } 542 543 /* 544 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 545 */ 546 static int bcm_rx_thr_flush(struct bcm_op *op) 547 { 548 int updated = 0; 549 550 if (op->nframes > 1) { 551 int i; 552 553 /* for MUX filter we start at index 1 */ 554 for (i = 1; i < op->nframes; i++) { 555 if ((op->last_frames) && 556 (op->last_frames[i].can_dlc & RX_THR)) { 557 op->last_frames[i].can_dlc &= ~RX_THR; 558 bcm_rx_changed(op, &op->last_frames[i]); 559 updated++; 560 } 561 } 562 563 } else { 564 /* for RX_FILTER_ID and simple filter */ 565 if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) { 566 op->last_frames[0].can_dlc &= ~RX_THR; 567 bcm_rx_changed(op, &op->last_frames[0]); 568 updated++; 569 } 570 } 571 572 return updated; 573 } 574 575 /* 576 * bcm_rx_thr_handler - the time for blocked content updates is over now: 577 * Check for throttled data and send it to the userspace 578 */ 579 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) 580 { 581 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 582 583 if (bcm_rx_thr_flush(op)) { 584 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); 585 return HRTIMER_RESTART; 586 } else { 587 /* rearm throttle handling */ 588 op->kt_lastmsg = ktime_set(0, 0); 589 return HRTIMER_NORESTART; 590 } 591 } 592 593 /* 594 * bcm_rx_handler - handle a CAN frame receiption 595 */ 596 static void bcm_rx_handler(struct sk_buff *skb, void *data) 597 { 598 struct bcm_op *op = (struct bcm_op *)data; 599 struct can_frame rxframe; 600 int i; 601 602 /* disable timeout */ 603 hrtimer_cancel(&op->timer); 604 605 if (skb->len == sizeof(rxframe)) { 606 memcpy(&rxframe, skb->data, sizeof(rxframe)); 607 /* save rx timestamp */ 608 op->rx_stamp = skb->tstamp; 609 /* save originator for recvfrom() */ 610 op->rx_ifindex = skb->dev->ifindex; 611 /* update statistics */ 612 op->frames_abs++; 613 kfree_skb(skb); 614 615 } else { 616 kfree_skb(skb); 617 return; 618 } 619 620 if (op->can_id != rxframe.can_id) 621 return; 622 623 if (op->flags & RX_RTR_FRAME) { 624 /* send reply for RTR-request (placed in op->frames[0]) */ 625 bcm_can_tx(op); 626 return; 627 } 628 629 if (op->flags & RX_FILTER_ID) { 630 /* the easiest case */ 631 bcm_rx_update_and_send(op, &op->last_frames[0], &rxframe); 632 bcm_rx_starttimer(op); 633 return; 634 } 635 636 if (op->nframes == 1) { 637 /* simple compare with index 0 */ 638 bcm_rx_cmp_to_index(op, 0, &rxframe); 639 bcm_rx_starttimer(op); 640 return; 641 } 642 643 if (op->nframes > 1) { 644 /* 645 * multiplex compare 646 * 647 * find the first multiplex mask that fits. 648 * Remark: The MUX-mask is stored in index 0 649 */ 650 651 for (i = 1; i < op->nframes; i++) { 652 if ((GET_U64(&op->frames[0]) & GET_U64(&rxframe)) == 653 (GET_U64(&op->frames[0]) & 654 GET_U64(&op->frames[i]))) { 655 bcm_rx_cmp_to_index(op, i, &rxframe); 656 break; 657 } 658 } 659 bcm_rx_starttimer(op); 660 } 661 } 662 663 /* 664 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements 665 */ 666 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, 667 int ifindex) 668 { 669 struct bcm_op *op; 670 671 list_for_each_entry(op, ops, list) { 672 if ((op->can_id == can_id) && (op->ifindex == ifindex)) 673 return op; 674 } 675 676 return NULL; 677 } 678 679 static void bcm_remove_op(struct bcm_op *op) 680 { 681 hrtimer_cancel(&op->timer); 682 hrtimer_cancel(&op->thrtimer); 683 684 if ((op->frames) && (op->frames != &op->sframe)) 685 kfree(op->frames); 686 687 if ((op->last_frames) && (op->last_frames != &op->last_sframe)) 688 kfree(op->last_frames); 689 690 kfree(op); 691 692 return; 693 } 694 695 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) 696 { 697 if (op->rx_reg_dev == dev) { 698 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id), 699 bcm_rx_handler, op); 700 701 /* mark as removed subscription */ 702 op->rx_reg_dev = NULL; 703 } else 704 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " 705 "mismatch %p %p\n", op->rx_reg_dev, dev); 706 } 707 708 /* 709 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) 710 */ 711 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex) 712 { 713 struct bcm_op *op, *n; 714 715 list_for_each_entry_safe(op, n, ops, list) { 716 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 717 718 /* 719 * Don't care if we're bound or not (due to netdev 720 * problems) can_rx_unregister() is always a save 721 * thing to do here. 722 */ 723 if (op->ifindex) { 724 /* 725 * Only remove subscriptions that had not 726 * been removed due to NETDEV_UNREGISTER 727 * in bcm_notifier() 728 */ 729 if (op->rx_reg_dev) { 730 struct net_device *dev; 731 732 dev = dev_get_by_index(&init_net, 733 op->ifindex); 734 if (dev) { 735 bcm_rx_unreg(dev, op); 736 dev_put(dev); 737 } 738 } 739 } else 740 can_rx_unregister(NULL, op->can_id, 741 REGMASK(op->can_id), 742 bcm_rx_handler, op); 743 744 list_del(&op->list); 745 bcm_remove_op(op); 746 return 1; /* done */ 747 } 748 } 749 750 return 0; /* not found */ 751 } 752 753 /* 754 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) 755 */ 756 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex) 757 { 758 struct bcm_op *op, *n; 759 760 list_for_each_entry_safe(op, n, ops, list) { 761 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 762 list_del(&op->list); 763 bcm_remove_op(op); 764 return 1; /* done */ 765 } 766 } 767 768 return 0; /* not found */ 769 } 770 771 /* 772 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) 773 */ 774 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, 775 int ifindex) 776 { 777 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex); 778 779 if (!op) 780 return -EINVAL; 781 782 /* put current values into msg_head */ 783 msg_head->flags = op->flags; 784 msg_head->count = op->count; 785 msg_head->ival1 = op->ival1; 786 msg_head->ival2 = op->ival2; 787 msg_head->nframes = op->nframes; 788 789 bcm_send_to_user(op, msg_head, op->frames, 0); 790 791 return MHSIZ; 792 } 793 794 /* 795 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) 796 */ 797 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 798 int ifindex, struct sock *sk) 799 { 800 struct bcm_sock *bo = bcm_sk(sk); 801 struct bcm_op *op; 802 int i, err; 803 804 /* we need a real device to send frames */ 805 if (!ifindex) 806 return -ENODEV; 807 808 /* we need at least one can_frame */ 809 if (msg_head->nframes < 1) 810 return -EINVAL; 811 812 /* check the given can_id */ 813 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex); 814 815 if (op) { 816 /* update existing BCM operation */ 817 818 /* 819 * Do we need more space for the can_frames than currently 820 * allocated? -> This is a _really_ unusual use-case and 821 * therefore (complexity / locking) it is not supported. 822 */ 823 if (msg_head->nframes > op->nframes) 824 return -E2BIG; 825 826 /* update can_frames content */ 827 for (i = 0; i < msg_head->nframes; i++) { 828 err = memcpy_fromiovec((u8 *)&op->frames[i], 829 msg->msg_iov, CFSIZ); 830 831 if (op->frames[i].can_dlc > 8) 832 err = -EINVAL; 833 834 if (err < 0) 835 return err; 836 837 if (msg_head->flags & TX_CP_CAN_ID) { 838 /* copy can_id into frame */ 839 op->frames[i].can_id = msg_head->can_id; 840 } 841 } 842 843 } else { 844 /* insert new BCM operation for the given can_id */ 845 846 op = kzalloc(OPSIZ, GFP_KERNEL); 847 if (!op) 848 return -ENOMEM; 849 850 op->can_id = msg_head->can_id; 851 852 /* create array for can_frames and copy the data */ 853 if (msg_head->nframes > 1) { 854 op->frames = kmalloc(msg_head->nframes * CFSIZ, 855 GFP_KERNEL); 856 if (!op->frames) { 857 kfree(op); 858 return -ENOMEM; 859 } 860 } else 861 op->frames = &op->sframe; 862 863 for (i = 0; i < msg_head->nframes; i++) { 864 err = memcpy_fromiovec((u8 *)&op->frames[i], 865 msg->msg_iov, CFSIZ); 866 867 if (op->frames[i].can_dlc > 8) 868 err = -EINVAL; 869 870 if (err < 0) { 871 if (op->frames != &op->sframe) 872 kfree(op->frames); 873 kfree(op); 874 return err; 875 } 876 877 if (msg_head->flags & TX_CP_CAN_ID) { 878 /* copy can_id into frame */ 879 op->frames[i].can_id = msg_head->can_id; 880 } 881 } 882 883 /* tx_ops never compare with previous received messages */ 884 op->last_frames = NULL; 885 886 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 887 op->sk = sk; 888 op->ifindex = ifindex; 889 890 /* initialize uninitialized (kzalloc) structure */ 891 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 892 op->timer.function = bcm_tx_timeout_handler; 893 894 /* currently unused in tx_ops */ 895 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 896 897 /* add this bcm_op to the list of the tx_ops */ 898 list_add(&op->list, &bo->tx_ops); 899 900 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ 901 902 if (op->nframes != msg_head->nframes) { 903 op->nframes = msg_head->nframes; 904 /* start multiple frame transmission with index 0 */ 905 op->currframe = 0; 906 } 907 908 /* check flags */ 909 910 op->flags = msg_head->flags; 911 912 if (op->flags & TX_RESET_MULTI_IDX) { 913 /* start multiple frame transmission with index 0 */ 914 op->currframe = 0; 915 } 916 917 if (op->flags & SETTIMER) { 918 /* set timer values */ 919 op->count = msg_head->count; 920 op->ival1 = msg_head->ival1; 921 op->ival2 = msg_head->ival2; 922 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 923 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 924 925 /* disable an active timer due to zero values? */ 926 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) 927 hrtimer_cancel(&op->timer); 928 } 929 930 if ((op->flags & STARTTIMER) && 931 ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { 932 933 /* spec: send can_frame when starting timer */ 934 op->flags |= TX_ANNOUNCE; 935 936 if (op->kt_ival1.tv64 && (op->count > 0)) { 937 /* op->count-- is done in bcm_tx_timeout_handler */ 938 hrtimer_start(&op->timer, op->kt_ival1, 939 HRTIMER_MODE_REL); 940 } else 941 hrtimer_start(&op->timer, op->kt_ival2, 942 HRTIMER_MODE_REL); 943 } 944 945 if (op->flags & TX_ANNOUNCE) 946 bcm_can_tx(op); 947 948 return msg_head->nframes * CFSIZ + MHSIZ; 949 } 950 951 /* 952 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) 953 */ 954 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 955 int ifindex, struct sock *sk) 956 { 957 struct bcm_sock *bo = bcm_sk(sk); 958 struct bcm_op *op; 959 int do_rx_register; 960 int err = 0; 961 962 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { 963 /* be robust against wrong usage ... */ 964 msg_head->flags |= RX_FILTER_ID; 965 /* ignore trailing garbage */ 966 msg_head->nframes = 0; 967 } 968 969 if ((msg_head->flags & RX_RTR_FRAME) && 970 ((msg_head->nframes != 1) || 971 (!(msg_head->can_id & CAN_RTR_FLAG)))) 972 return -EINVAL; 973 974 /* check the given can_id */ 975 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex); 976 if (op) { 977 /* update existing BCM operation */ 978 979 /* 980 * Do we need more space for the can_frames than currently 981 * allocated? -> This is a _really_ unusual use-case and 982 * therefore (complexity / locking) it is not supported. 983 */ 984 if (msg_head->nframes > op->nframes) 985 return -E2BIG; 986 987 if (msg_head->nframes) { 988 /* update can_frames content */ 989 err = memcpy_fromiovec((u8 *)op->frames, 990 msg->msg_iov, 991 msg_head->nframes * CFSIZ); 992 if (err < 0) 993 return err; 994 995 /* clear last_frames to indicate 'nothing received' */ 996 memset(op->last_frames, 0, msg_head->nframes * CFSIZ); 997 } 998 999 op->nframes = msg_head->nframes; 1000 1001 /* Only an update -> do not call can_rx_register() */ 1002 do_rx_register = 0; 1003 1004 } else { 1005 /* insert new BCM operation for the given can_id */ 1006 op = kzalloc(OPSIZ, GFP_KERNEL); 1007 if (!op) 1008 return -ENOMEM; 1009 1010 op->can_id = msg_head->can_id; 1011 op->nframes = msg_head->nframes; 1012 1013 if (msg_head->nframes > 1) { 1014 /* create array for can_frames and copy the data */ 1015 op->frames = kmalloc(msg_head->nframes * CFSIZ, 1016 GFP_KERNEL); 1017 if (!op->frames) { 1018 kfree(op); 1019 return -ENOMEM; 1020 } 1021 1022 /* create and init array for received can_frames */ 1023 op->last_frames = kzalloc(msg_head->nframes * CFSIZ, 1024 GFP_KERNEL); 1025 if (!op->last_frames) { 1026 kfree(op->frames); 1027 kfree(op); 1028 return -ENOMEM; 1029 } 1030 1031 } else { 1032 op->frames = &op->sframe; 1033 op->last_frames = &op->last_sframe; 1034 } 1035 1036 if (msg_head->nframes) { 1037 err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov, 1038 msg_head->nframes * CFSIZ); 1039 if (err < 0) { 1040 if (op->frames != &op->sframe) 1041 kfree(op->frames); 1042 if (op->last_frames != &op->last_sframe) 1043 kfree(op->last_frames); 1044 kfree(op); 1045 return err; 1046 } 1047 } 1048 1049 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1050 op->sk = sk; 1051 op->ifindex = ifindex; 1052 1053 /* initialize uninitialized (kzalloc) structure */ 1054 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1055 op->timer.function = bcm_rx_timeout_handler; 1056 1057 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1058 op->thrtimer.function = bcm_rx_thr_handler; 1059 1060 /* add this bcm_op to the list of the rx_ops */ 1061 list_add(&op->list, &bo->rx_ops); 1062 1063 /* call can_rx_register() */ 1064 do_rx_register = 1; 1065 1066 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ 1067 1068 /* check flags */ 1069 op->flags = msg_head->flags; 1070 1071 if (op->flags & RX_RTR_FRAME) { 1072 1073 /* no timers in RTR-mode */ 1074 hrtimer_cancel(&op->thrtimer); 1075 hrtimer_cancel(&op->timer); 1076 1077 /* 1078 * funny feature in RX(!)_SETUP only for RTR-mode: 1079 * copy can_id into frame BUT without RTR-flag to 1080 * prevent a full-load-loopback-test ... ;-] 1081 */ 1082 if ((op->flags & TX_CP_CAN_ID) || 1083 (op->frames[0].can_id == op->can_id)) 1084 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; 1085 1086 } else { 1087 if (op->flags & SETTIMER) { 1088 1089 /* set timer value */ 1090 op->ival1 = msg_head->ival1; 1091 op->ival2 = msg_head->ival2; 1092 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 1093 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 1094 1095 /* disable an active timer due to zero value? */ 1096 if (!op->kt_ival1.tv64) 1097 hrtimer_cancel(&op->timer); 1098 1099 /* 1100 * In any case cancel the throttle timer, flush 1101 * potentially blocked msgs and reset throttle handling 1102 */ 1103 op->kt_lastmsg = ktime_set(0, 0); 1104 hrtimer_cancel(&op->thrtimer); 1105 bcm_rx_thr_flush(op); 1106 } 1107 1108 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1109 hrtimer_start(&op->timer, op->kt_ival1, 1110 HRTIMER_MODE_REL); 1111 } 1112 1113 /* now we can register for can_ids, if we added a new bcm_op */ 1114 if (do_rx_register) { 1115 if (ifindex) { 1116 struct net_device *dev; 1117 1118 dev = dev_get_by_index(&init_net, ifindex); 1119 if (dev) { 1120 err = can_rx_register(dev, op->can_id, 1121 REGMASK(op->can_id), 1122 bcm_rx_handler, op, 1123 "bcm"); 1124 1125 op->rx_reg_dev = dev; 1126 dev_put(dev); 1127 } 1128 1129 } else 1130 err = can_rx_register(NULL, op->can_id, 1131 REGMASK(op->can_id), 1132 bcm_rx_handler, op, "bcm"); 1133 if (err) { 1134 /* this bcm rx op is broken -> remove it */ 1135 list_del(&op->list); 1136 bcm_remove_op(op); 1137 return err; 1138 } 1139 } 1140 1141 return msg_head->nframes * CFSIZ + MHSIZ; 1142 } 1143 1144 /* 1145 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) 1146 */ 1147 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) 1148 { 1149 struct sk_buff *skb; 1150 struct net_device *dev; 1151 int err; 1152 1153 /* we need a real device to send frames */ 1154 if (!ifindex) 1155 return -ENODEV; 1156 1157 skb = alloc_skb(CFSIZ, GFP_KERNEL); 1158 1159 if (!skb) 1160 return -ENOMEM; 1161 1162 err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ); 1163 if (err < 0) { 1164 kfree_skb(skb); 1165 return err; 1166 } 1167 1168 dev = dev_get_by_index(&init_net, ifindex); 1169 if (!dev) { 1170 kfree_skb(skb); 1171 return -ENODEV; 1172 } 1173 1174 skb->dev = dev; 1175 skb->sk = sk; 1176 err = can_send(skb, 1); /* send with loopback */ 1177 dev_put(dev); 1178 1179 if (err) 1180 return err; 1181 1182 return CFSIZ + MHSIZ; 1183 } 1184 1185 /* 1186 * bcm_sendmsg - process BCM commands (opcodes) from the userspace 1187 */ 1188 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, 1189 struct msghdr *msg, size_t size) 1190 { 1191 struct sock *sk = sock->sk; 1192 struct bcm_sock *bo = bcm_sk(sk); 1193 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ 1194 struct bcm_msg_head msg_head; 1195 int ret; /* read bytes or error codes as return value */ 1196 1197 if (!bo->bound) 1198 return -ENOTCONN; 1199 1200 /* check for valid message length from userspace */ 1201 if (size < MHSIZ || (size - MHSIZ) % CFSIZ) 1202 return -EINVAL; 1203 1204 /* check for alternative ifindex for this bcm_op */ 1205 1206 if (!ifindex && msg->msg_name) { 1207 /* no bound device as default => check msg_name */ 1208 struct sockaddr_can *addr = 1209 (struct sockaddr_can *)msg->msg_name; 1210 1211 if (addr->can_family != AF_CAN) 1212 return -EINVAL; 1213 1214 /* ifindex from sendto() */ 1215 ifindex = addr->can_ifindex; 1216 1217 if (ifindex) { 1218 struct net_device *dev; 1219 1220 dev = dev_get_by_index(&init_net, ifindex); 1221 if (!dev) 1222 return -ENODEV; 1223 1224 if (dev->type != ARPHRD_CAN) { 1225 dev_put(dev); 1226 return -ENODEV; 1227 } 1228 1229 dev_put(dev); 1230 } 1231 } 1232 1233 /* read message head information */ 1234 1235 ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ); 1236 if (ret < 0) 1237 return ret; 1238 1239 lock_sock(sk); 1240 1241 switch (msg_head.opcode) { 1242 1243 case TX_SETUP: 1244 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); 1245 break; 1246 1247 case RX_SETUP: 1248 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); 1249 break; 1250 1251 case TX_DELETE: 1252 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex)) 1253 ret = MHSIZ; 1254 else 1255 ret = -EINVAL; 1256 break; 1257 1258 case RX_DELETE: 1259 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex)) 1260 ret = MHSIZ; 1261 else 1262 ret = -EINVAL; 1263 break; 1264 1265 case TX_READ: 1266 /* reuse msg_head for the reply to TX_READ */ 1267 msg_head.opcode = TX_STATUS; 1268 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); 1269 break; 1270 1271 case RX_READ: 1272 /* reuse msg_head for the reply to RX_READ */ 1273 msg_head.opcode = RX_STATUS; 1274 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); 1275 break; 1276 1277 case TX_SEND: 1278 /* we need exactly one can_frame behind the msg head */ 1279 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ)) 1280 ret = -EINVAL; 1281 else 1282 ret = bcm_tx_send(msg, ifindex, sk); 1283 break; 1284 1285 default: 1286 ret = -EINVAL; 1287 break; 1288 } 1289 1290 release_sock(sk); 1291 1292 return ret; 1293 } 1294 1295 /* 1296 * notification handler for netdevice status changes 1297 */ 1298 static int bcm_notifier(struct notifier_block *nb, unsigned long msg, 1299 void *data) 1300 { 1301 struct net_device *dev = (struct net_device *)data; 1302 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); 1303 struct sock *sk = &bo->sk; 1304 struct bcm_op *op; 1305 int notify_enodev = 0; 1306 1307 if (!net_eq(dev_net(dev), &init_net)) 1308 return NOTIFY_DONE; 1309 1310 if (dev->type != ARPHRD_CAN) 1311 return NOTIFY_DONE; 1312 1313 switch (msg) { 1314 1315 case NETDEV_UNREGISTER: 1316 lock_sock(sk); 1317 1318 /* remove device specific receive entries */ 1319 list_for_each_entry(op, &bo->rx_ops, list) 1320 if (op->rx_reg_dev == dev) 1321 bcm_rx_unreg(dev, op); 1322 1323 /* remove device reference, if this is our bound device */ 1324 if (bo->bound && bo->ifindex == dev->ifindex) { 1325 bo->bound = 0; 1326 bo->ifindex = 0; 1327 notify_enodev = 1; 1328 } 1329 1330 release_sock(sk); 1331 1332 if (notify_enodev) { 1333 sk->sk_err = ENODEV; 1334 if (!sock_flag(sk, SOCK_DEAD)) 1335 sk->sk_error_report(sk); 1336 } 1337 break; 1338 1339 case NETDEV_DOWN: 1340 if (bo->bound && bo->ifindex == dev->ifindex) { 1341 sk->sk_err = ENETDOWN; 1342 if (!sock_flag(sk, SOCK_DEAD)) 1343 sk->sk_error_report(sk); 1344 } 1345 } 1346 1347 return NOTIFY_DONE; 1348 } 1349 1350 /* 1351 * initial settings for all BCM sockets to be set at socket creation time 1352 */ 1353 static int bcm_init(struct sock *sk) 1354 { 1355 struct bcm_sock *bo = bcm_sk(sk); 1356 1357 bo->bound = 0; 1358 bo->ifindex = 0; 1359 bo->dropped_usr_msgs = 0; 1360 bo->bcm_proc_read = NULL; 1361 1362 INIT_LIST_HEAD(&bo->tx_ops); 1363 INIT_LIST_HEAD(&bo->rx_ops); 1364 1365 /* set notifier */ 1366 bo->notifier.notifier_call = bcm_notifier; 1367 1368 register_netdevice_notifier(&bo->notifier); 1369 1370 return 0; 1371 } 1372 1373 /* 1374 * standard socket functions 1375 */ 1376 static int bcm_release(struct socket *sock) 1377 { 1378 struct sock *sk = sock->sk; 1379 struct bcm_sock *bo = bcm_sk(sk); 1380 struct bcm_op *op, *next; 1381 1382 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1383 1384 unregister_netdevice_notifier(&bo->notifier); 1385 1386 lock_sock(sk); 1387 1388 list_for_each_entry_safe(op, next, &bo->tx_ops, list) 1389 bcm_remove_op(op); 1390 1391 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { 1392 /* 1393 * Don't care if we're bound or not (due to netdev problems) 1394 * can_rx_unregister() is always a save thing to do here. 1395 */ 1396 if (op->ifindex) { 1397 /* 1398 * Only remove subscriptions that had not 1399 * been removed due to NETDEV_UNREGISTER 1400 * in bcm_notifier() 1401 */ 1402 if (op->rx_reg_dev) { 1403 struct net_device *dev; 1404 1405 dev = dev_get_by_index(&init_net, op->ifindex); 1406 if (dev) { 1407 bcm_rx_unreg(dev, op); 1408 dev_put(dev); 1409 } 1410 } 1411 } else 1412 can_rx_unregister(NULL, op->can_id, 1413 REGMASK(op->can_id), 1414 bcm_rx_handler, op); 1415 1416 bcm_remove_op(op); 1417 } 1418 1419 /* remove procfs entry */ 1420 if (proc_dir && bo->bcm_proc_read) 1421 remove_proc_entry(bo->procname, proc_dir); 1422 1423 /* remove device reference */ 1424 if (bo->bound) { 1425 bo->bound = 0; 1426 bo->ifindex = 0; 1427 } 1428 1429 release_sock(sk); 1430 sock_put(sk); 1431 1432 return 0; 1433 } 1434 1435 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, 1436 int flags) 1437 { 1438 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1439 struct sock *sk = sock->sk; 1440 struct bcm_sock *bo = bcm_sk(sk); 1441 1442 if (bo->bound) 1443 return -EISCONN; 1444 1445 /* bind a device to this socket */ 1446 if (addr->can_ifindex) { 1447 struct net_device *dev; 1448 1449 dev = dev_get_by_index(&init_net, addr->can_ifindex); 1450 if (!dev) 1451 return -ENODEV; 1452 1453 if (dev->type != ARPHRD_CAN) { 1454 dev_put(dev); 1455 return -ENODEV; 1456 } 1457 1458 bo->ifindex = dev->ifindex; 1459 dev_put(dev); 1460 1461 } else { 1462 /* no interface reference for ifindex = 0 ('any' CAN device) */ 1463 bo->ifindex = 0; 1464 } 1465 1466 bo->bound = 1; 1467 1468 if (proc_dir) { 1469 /* unique socket address as filename */ 1470 sprintf(bo->procname, "%p", sock); 1471 bo->bcm_proc_read = create_proc_read_entry(bo->procname, 0644, 1472 proc_dir, 1473 bcm_read_proc, sk); 1474 } 1475 1476 return 0; 1477 } 1478 1479 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, 1480 struct msghdr *msg, size_t size, int flags) 1481 { 1482 struct sock *sk = sock->sk; 1483 struct sk_buff *skb; 1484 int error = 0; 1485 int noblock; 1486 int err; 1487 1488 noblock = flags & MSG_DONTWAIT; 1489 flags &= ~MSG_DONTWAIT; 1490 skb = skb_recv_datagram(sk, flags, noblock, &error); 1491 if (!skb) 1492 return error; 1493 1494 if (skb->len < size) 1495 size = skb->len; 1496 1497 err = memcpy_toiovec(msg->msg_iov, skb->data, size); 1498 if (err < 0) { 1499 skb_free_datagram(sk, skb); 1500 return err; 1501 } 1502 1503 sock_recv_timestamp(msg, sk, skb); 1504 1505 if (msg->msg_name) { 1506 msg->msg_namelen = sizeof(struct sockaddr_can); 1507 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1508 } 1509 1510 skb_free_datagram(sk, skb); 1511 1512 return size; 1513 } 1514 1515 static struct proto_ops bcm_ops __read_mostly = { 1516 .family = PF_CAN, 1517 .release = bcm_release, 1518 .bind = sock_no_bind, 1519 .connect = bcm_connect, 1520 .socketpair = sock_no_socketpair, 1521 .accept = sock_no_accept, 1522 .getname = sock_no_getname, 1523 .poll = datagram_poll, 1524 .ioctl = NULL, /* use can_ioctl() from af_can.c */ 1525 .listen = sock_no_listen, 1526 .shutdown = sock_no_shutdown, 1527 .setsockopt = sock_no_setsockopt, 1528 .getsockopt = sock_no_getsockopt, 1529 .sendmsg = bcm_sendmsg, 1530 .recvmsg = bcm_recvmsg, 1531 .mmap = sock_no_mmap, 1532 .sendpage = sock_no_sendpage, 1533 }; 1534 1535 static struct proto bcm_proto __read_mostly = { 1536 .name = "CAN_BCM", 1537 .owner = THIS_MODULE, 1538 .obj_size = sizeof(struct bcm_sock), 1539 .init = bcm_init, 1540 }; 1541 1542 static struct can_proto bcm_can_proto __read_mostly = { 1543 .type = SOCK_DGRAM, 1544 .protocol = CAN_BCM, 1545 .capability = -1, 1546 .ops = &bcm_ops, 1547 .prot = &bcm_proto, 1548 }; 1549 1550 static int __init bcm_module_init(void) 1551 { 1552 int err; 1553 1554 printk(banner); 1555 1556 err = can_proto_register(&bcm_can_proto); 1557 if (err < 0) { 1558 printk(KERN_ERR "can: registration of bcm protocol failed\n"); 1559 return err; 1560 } 1561 1562 /* create /proc/net/can-bcm directory */ 1563 proc_dir = proc_mkdir("can-bcm", init_net.proc_net); 1564 1565 if (proc_dir) 1566 proc_dir->owner = THIS_MODULE; 1567 1568 return 0; 1569 } 1570 1571 static void __exit bcm_module_exit(void) 1572 { 1573 can_proto_unregister(&bcm_can_proto); 1574 1575 if (proc_dir) 1576 proc_net_remove(&init_net, "can-bcm"); 1577 } 1578 1579 module_init(bcm_module_init); 1580 module_exit(bcm_module_exit); 1581