xref: /linux/net/can/bcm.c (revision ecba1060583635ab55092072441ff903b5e9a659)
1 /*
2  * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3  *
4  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of Volkswagen nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * Alternatively, provided that this notice is retained in full, this
20  * software may be distributed under the terms of the GNU General
21  * Public License ("GPL") version 2, in which case the provisions of the
22  * GPL apply INSTEAD OF those given above.
23  *
24  * The provided data structures and external interfaces from this code
25  * are not restricted to be used by modules with a GPL compatible license.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38  * DAMAGE.
39  *
40  * Send feedback to <socketcan-users@lists.berlios.de>
41  *
42  */
43 
44 #include <linux/module.h>
45 #include <linux/init.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/uio.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/socket.h>
53 #include <linux/if_arp.h>
54 #include <linux/skbuff.h>
55 #include <linux/can.h>
56 #include <linux/can/core.h>
57 #include <linux/can/bcm.h>
58 #include <net/sock.h>
59 #include <net/net_namespace.h>
60 
61 /* use of last_frames[index].can_dlc */
62 #define RX_RECV    0x40 /* received data for this element */
63 #define RX_THR     0x80 /* element not been sent due to throttle feature */
64 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
65 
66 /* get best masking value for can_rx_register() for a given single can_id */
67 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
68 		     (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
69 		     (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
70 
71 #define CAN_BCM_VERSION CAN_VERSION
72 static __initdata const char banner[] = KERN_INFO
73 	"can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
74 
75 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
76 MODULE_LICENSE("Dual BSD/GPL");
77 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
78 MODULE_ALIAS("can-proto-2");
79 
80 /* easy access to can_frame payload */
81 static inline u64 GET_U64(const struct can_frame *cp)
82 {
83 	return *(u64 *)cp->data;
84 }
85 
86 struct bcm_op {
87 	struct list_head list;
88 	int ifindex;
89 	canid_t can_id;
90 	int flags;
91 	unsigned long frames_abs, frames_filtered;
92 	struct timeval ival1, ival2;
93 	struct hrtimer timer, thrtimer;
94 	struct tasklet_struct tsklet, thrtsklet;
95 	ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
96 	int rx_ifindex;
97 	int count;
98 	int nframes;
99 	int currframe;
100 	struct can_frame *frames;
101 	struct can_frame *last_frames;
102 	struct can_frame sframe;
103 	struct can_frame last_sframe;
104 	struct sock *sk;
105 	struct net_device *rx_reg_dev;
106 };
107 
108 static struct proc_dir_entry *proc_dir;
109 
110 struct bcm_sock {
111 	struct sock sk;
112 	int bound;
113 	int ifindex;
114 	struct notifier_block notifier;
115 	struct list_head rx_ops;
116 	struct list_head tx_ops;
117 	unsigned long dropped_usr_msgs;
118 	struct proc_dir_entry *bcm_proc_read;
119 	char procname [9]; /* pointer printed in ASCII with \0 */
120 };
121 
122 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
123 {
124 	return (struct bcm_sock *)sk;
125 }
126 
127 #define CFSIZ sizeof(struct can_frame)
128 #define OPSIZ sizeof(struct bcm_op)
129 #define MHSIZ sizeof(struct bcm_msg_head)
130 
131 /*
132  * procfs functions
133  */
134 static char *bcm_proc_getifname(int ifindex)
135 {
136 	struct net_device *dev;
137 
138 	if (!ifindex)
139 		return "any";
140 
141 	/* no usage counting */
142 	dev = __dev_get_by_index(&init_net, ifindex);
143 	if (dev)
144 		return dev->name;
145 
146 	return "???";
147 }
148 
149 static int bcm_read_proc(char *page, char **start, off_t off,
150 			 int count, int *eof, void *data)
151 {
152 	int len = 0;
153 	struct sock *sk = (struct sock *)data;
154 	struct bcm_sock *bo = bcm_sk(sk);
155 	struct bcm_op *op;
156 
157 	len += snprintf(page + len, PAGE_SIZE - len, ">>> socket %p",
158 			sk->sk_socket);
159 	len += snprintf(page + len, PAGE_SIZE - len, " / sk %p", sk);
160 	len += snprintf(page + len, PAGE_SIZE - len, " / bo %p", bo);
161 	len += snprintf(page + len, PAGE_SIZE - len, " / dropped %lu",
162 			bo->dropped_usr_msgs);
163 	len += snprintf(page + len, PAGE_SIZE - len, " / bound %s",
164 			bcm_proc_getifname(bo->ifindex));
165 	len += snprintf(page + len, PAGE_SIZE - len, " <<<\n");
166 
167 	list_for_each_entry(op, &bo->rx_ops, list) {
168 
169 		unsigned long reduction;
170 
171 		/* print only active entries & prevent division by zero */
172 		if (!op->frames_abs)
173 			continue;
174 
175 		len += snprintf(page + len, PAGE_SIZE - len,
176 				"rx_op: %03X %-5s ",
177 				op->can_id, bcm_proc_getifname(op->ifindex));
178 		len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ",
179 				op->nframes,
180 				(op->flags & RX_CHECK_DLC)?'d':' ');
181 		if (op->kt_ival1.tv64)
182 			len += snprintf(page + len, PAGE_SIZE - len,
183 					"timeo=%lld ",
184 					(long long)
185 					ktime_to_us(op->kt_ival1));
186 
187 		if (op->kt_ival2.tv64)
188 			len += snprintf(page + len, PAGE_SIZE - len,
189 					"thr=%lld ",
190 					(long long)
191 					ktime_to_us(op->kt_ival2));
192 
193 		len += snprintf(page + len, PAGE_SIZE - len,
194 				"# recv %ld (%ld) => reduction: ",
195 				op->frames_filtered, op->frames_abs);
196 
197 		reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
198 
199 		len += snprintf(page + len, PAGE_SIZE - len, "%s%ld%%\n",
200 				(reduction == 100)?"near ":"", reduction);
201 
202 		if (len > PAGE_SIZE - 200) {
203 			/* mark output cut off */
204 			len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
205 			break;
206 		}
207 	}
208 
209 	list_for_each_entry(op, &bo->tx_ops, list) {
210 
211 		len += snprintf(page + len, PAGE_SIZE - len,
212 				"tx_op: %03X %s [%d] ",
213 				op->can_id, bcm_proc_getifname(op->ifindex),
214 				op->nframes);
215 
216 		if (op->kt_ival1.tv64)
217 			len += snprintf(page + len, PAGE_SIZE - len, "t1=%lld ",
218 					(long long) ktime_to_us(op->kt_ival1));
219 
220 		if (op->kt_ival2.tv64)
221 			len += snprintf(page + len, PAGE_SIZE - len, "t2=%lld ",
222 					(long long) ktime_to_us(op->kt_ival2));
223 
224 		len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n",
225 				op->frames_abs);
226 
227 		if (len > PAGE_SIZE - 100) {
228 			/* mark output cut off */
229 			len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
230 			break;
231 		}
232 	}
233 
234 	len += snprintf(page + len, PAGE_SIZE - len, "\n");
235 
236 	*eof = 1;
237 	return len;
238 }
239 
240 /*
241  * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
242  *              of the given bcm tx op
243  */
244 static void bcm_can_tx(struct bcm_op *op)
245 {
246 	struct sk_buff *skb;
247 	struct net_device *dev;
248 	struct can_frame *cf = &op->frames[op->currframe];
249 
250 	/* no target device? => exit */
251 	if (!op->ifindex)
252 		return;
253 
254 	dev = dev_get_by_index(&init_net, op->ifindex);
255 	if (!dev) {
256 		/* RFC: should this bcm_op remove itself here? */
257 		return;
258 	}
259 
260 	skb = alloc_skb(CFSIZ, gfp_any());
261 	if (!skb)
262 		goto out;
263 
264 	memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
265 
266 	/* send with loopback */
267 	skb->dev = dev;
268 	skb->sk = op->sk;
269 	can_send(skb, 1);
270 
271 	/* update statistics */
272 	op->currframe++;
273 	op->frames_abs++;
274 
275 	/* reached last frame? */
276 	if (op->currframe >= op->nframes)
277 		op->currframe = 0;
278  out:
279 	dev_put(dev);
280 }
281 
282 /*
283  * bcm_send_to_user - send a BCM message to the userspace
284  *                    (consisting of bcm_msg_head + x CAN frames)
285  */
286 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
287 			     struct can_frame *frames, int has_timestamp)
288 {
289 	struct sk_buff *skb;
290 	struct can_frame *firstframe;
291 	struct sockaddr_can *addr;
292 	struct sock *sk = op->sk;
293 	int datalen = head->nframes * CFSIZ;
294 	int err;
295 
296 	skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
297 	if (!skb)
298 		return;
299 
300 	memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
301 
302 	if (head->nframes) {
303 		/* can_frames starting here */
304 		firstframe = (struct can_frame *)skb_tail_pointer(skb);
305 
306 		memcpy(skb_put(skb, datalen), frames, datalen);
307 
308 		/*
309 		 * the BCM uses the can_dlc-element of the can_frame
310 		 * structure for internal purposes. This is only
311 		 * relevant for updates that are generated by the
312 		 * BCM, where nframes is 1
313 		 */
314 		if (head->nframes == 1)
315 			firstframe->can_dlc &= BCM_CAN_DLC_MASK;
316 	}
317 
318 	if (has_timestamp) {
319 		/* restore rx timestamp */
320 		skb->tstamp = op->rx_stamp;
321 	}
322 
323 	/*
324 	 *  Put the datagram to the queue so that bcm_recvmsg() can
325 	 *  get it from there.  We need to pass the interface index to
326 	 *  bcm_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
327 	 *  containing the interface index.
328 	 */
329 
330 	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
331 	addr = (struct sockaddr_can *)skb->cb;
332 	memset(addr, 0, sizeof(*addr));
333 	addr->can_family  = AF_CAN;
334 	addr->can_ifindex = op->rx_ifindex;
335 
336 	err = sock_queue_rcv_skb(sk, skb);
337 	if (err < 0) {
338 		struct bcm_sock *bo = bcm_sk(sk);
339 
340 		kfree_skb(skb);
341 		/* don't care about overflows in this statistic */
342 		bo->dropped_usr_msgs++;
343 	}
344 }
345 
346 static void bcm_tx_timeout_tsklet(unsigned long data)
347 {
348 	struct bcm_op *op = (struct bcm_op *)data;
349 	struct bcm_msg_head msg_head;
350 
351 	if (op->kt_ival1.tv64 && (op->count > 0)) {
352 
353 		op->count--;
354 		if (!op->count && (op->flags & TX_COUNTEVT)) {
355 
356 			/* create notification to user */
357 			msg_head.opcode  = TX_EXPIRED;
358 			msg_head.flags   = op->flags;
359 			msg_head.count   = op->count;
360 			msg_head.ival1   = op->ival1;
361 			msg_head.ival2   = op->ival2;
362 			msg_head.can_id  = op->can_id;
363 			msg_head.nframes = 0;
364 
365 			bcm_send_to_user(op, &msg_head, NULL, 0);
366 		}
367 	}
368 
369 	if (op->kt_ival1.tv64 && (op->count > 0)) {
370 
371 		/* send (next) frame */
372 		bcm_can_tx(op);
373 		hrtimer_start(&op->timer,
374 			      ktime_add(ktime_get(), op->kt_ival1),
375 			      HRTIMER_MODE_ABS);
376 
377 	} else {
378 		if (op->kt_ival2.tv64) {
379 
380 			/* send (next) frame */
381 			bcm_can_tx(op);
382 			hrtimer_start(&op->timer,
383 				      ktime_add(ktime_get(), op->kt_ival2),
384 				      HRTIMER_MODE_ABS);
385 		}
386 	}
387 }
388 
389 /*
390  * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
391  */
392 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
393 {
394 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
395 
396 	tasklet_schedule(&op->tsklet);
397 
398 	return HRTIMER_NORESTART;
399 }
400 
401 /*
402  * bcm_rx_changed - create a RX_CHANGED notification due to changed content
403  */
404 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
405 {
406 	struct bcm_msg_head head;
407 
408 	/* update statistics */
409 	op->frames_filtered++;
410 
411 	/* prevent statistics overflow */
412 	if (op->frames_filtered > ULONG_MAX/100)
413 		op->frames_filtered = op->frames_abs = 0;
414 
415 	/* this element is not throttled anymore */
416 	data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
417 
418 	head.opcode  = RX_CHANGED;
419 	head.flags   = op->flags;
420 	head.count   = op->count;
421 	head.ival1   = op->ival1;
422 	head.ival2   = op->ival2;
423 	head.can_id  = op->can_id;
424 	head.nframes = 1;
425 
426 	bcm_send_to_user(op, &head, data, 1);
427 }
428 
429 /*
430  * bcm_rx_update_and_send - process a detected relevant receive content change
431  *                          1. update the last received data
432  *                          2. send a notification to the user (if possible)
433  */
434 static void bcm_rx_update_and_send(struct bcm_op *op,
435 				   struct can_frame *lastdata,
436 				   const struct can_frame *rxdata)
437 {
438 	memcpy(lastdata, rxdata, CFSIZ);
439 
440 	/* mark as used and throttled by default */
441 	lastdata->can_dlc |= (RX_RECV|RX_THR);
442 
443 	/* throtteling mode inactive ? */
444 	if (!op->kt_ival2.tv64) {
445 		/* send RX_CHANGED to the user immediately */
446 		bcm_rx_changed(op, lastdata);
447 		return;
448 	}
449 
450 	/* with active throttling timer we are just done here */
451 	if (hrtimer_active(&op->thrtimer))
452 		return;
453 
454 	/* first receiption with enabled throttling mode */
455 	if (!op->kt_lastmsg.tv64)
456 		goto rx_changed_settime;
457 
458 	/* got a second frame inside a potential throttle period? */
459 	if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
460 	    ktime_to_us(op->kt_ival2)) {
461 		/* do not send the saved data - only start throttle timer */
462 		hrtimer_start(&op->thrtimer,
463 			      ktime_add(op->kt_lastmsg, op->kt_ival2),
464 			      HRTIMER_MODE_ABS);
465 		return;
466 	}
467 
468 	/* the gap was that big, that throttling was not needed here */
469 rx_changed_settime:
470 	bcm_rx_changed(op, lastdata);
471 	op->kt_lastmsg = ktime_get();
472 }
473 
474 /*
475  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
476  *                       received data stored in op->last_frames[]
477  */
478 static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
479 				const struct can_frame *rxdata)
480 {
481 	/*
482 	 * no one uses the MSBs of can_dlc for comparation,
483 	 * so we use it here to detect the first time of reception
484 	 */
485 
486 	if (!(op->last_frames[index].can_dlc & RX_RECV)) {
487 		/* received data for the first time => send update to user */
488 		bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
489 		return;
490 	}
491 
492 	/* do a real check in can_frame data section */
493 
494 	if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
495 	    (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
496 		bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
497 		return;
498 	}
499 
500 	if (op->flags & RX_CHECK_DLC) {
501 		/* do a real check in can_frame dlc */
502 		if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
503 					BCM_CAN_DLC_MASK)) {
504 			bcm_rx_update_and_send(op, &op->last_frames[index],
505 					       rxdata);
506 			return;
507 		}
508 	}
509 }
510 
511 /*
512  * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
513  */
514 static void bcm_rx_starttimer(struct bcm_op *op)
515 {
516 	if (op->flags & RX_NO_AUTOTIMER)
517 		return;
518 
519 	if (op->kt_ival1.tv64)
520 		hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
521 }
522 
523 static void bcm_rx_timeout_tsklet(unsigned long data)
524 {
525 	struct bcm_op *op = (struct bcm_op *)data;
526 	struct bcm_msg_head msg_head;
527 
528 	/* create notification to user */
529 	msg_head.opcode  = RX_TIMEOUT;
530 	msg_head.flags   = op->flags;
531 	msg_head.count   = op->count;
532 	msg_head.ival1   = op->ival1;
533 	msg_head.ival2   = op->ival2;
534 	msg_head.can_id  = op->can_id;
535 	msg_head.nframes = 0;
536 
537 	bcm_send_to_user(op, &msg_head, NULL, 0);
538 }
539 
540 /*
541  * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
542  */
543 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
544 {
545 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
546 
547 	/* schedule before NET_RX_SOFTIRQ */
548 	tasklet_hi_schedule(&op->tsklet);
549 
550 	/* no restart of the timer is done here! */
551 
552 	/* if user wants to be informed, when cyclic CAN-Messages come back */
553 	if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
554 		/* clear received can_frames to indicate 'nothing received' */
555 		memset(op->last_frames, 0, op->nframes * CFSIZ);
556 	}
557 
558 	return HRTIMER_NORESTART;
559 }
560 
561 /*
562  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
563  */
564 static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
565 {
566 	if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
567 		if (update)
568 			bcm_rx_changed(op, &op->last_frames[index]);
569 		return 1;
570 	}
571 	return 0;
572 }
573 
574 /*
575  * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
576  *
577  * update == 0 : just check if throttled data is available  (any irq context)
578  * update == 1 : check and send throttled data to userspace (soft_irq context)
579  */
580 static int bcm_rx_thr_flush(struct bcm_op *op, int update)
581 {
582 	int updated = 0;
583 
584 	if (op->nframes > 1) {
585 		int i;
586 
587 		/* for MUX filter we start at index 1 */
588 		for (i = 1; i < op->nframes; i++)
589 			updated += bcm_rx_do_flush(op, update, i);
590 
591 	} else {
592 		/* for RX_FILTER_ID and simple filter */
593 		updated += bcm_rx_do_flush(op, update, 0);
594 	}
595 
596 	return updated;
597 }
598 
599 static void bcm_rx_thr_tsklet(unsigned long data)
600 {
601 	struct bcm_op *op = (struct bcm_op *)data;
602 
603 	/* push the changed data to the userspace */
604 	bcm_rx_thr_flush(op, 1);
605 }
606 
607 /*
608  * bcm_rx_thr_handler - the time for blocked content updates is over now:
609  *                      Check for throttled data and send it to the userspace
610  */
611 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
612 {
613 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
614 
615 	tasklet_schedule(&op->thrtsklet);
616 
617 	if (bcm_rx_thr_flush(op, 0)) {
618 		hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
619 		return HRTIMER_RESTART;
620 	} else {
621 		/* rearm throttle handling */
622 		op->kt_lastmsg = ktime_set(0, 0);
623 		return HRTIMER_NORESTART;
624 	}
625 }
626 
627 /*
628  * bcm_rx_handler - handle a CAN frame receiption
629  */
630 static void bcm_rx_handler(struct sk_buff *skb, void *data)
631 {
632 	struct bcm_op *op = (struct bcm_op *)data;
633 	const struct can_frame *rxframe = (struct can_frame *)skb->data;
634 	int i;
635 
636 	/* disable timeout */
637 	hrtimer_cancel(&op->timer);
638 
639 	if (op->can_id != rxframe->can_id)
640 		return;
641 
642 	/* save rx timestamp */
643 	op->rx_stamp = skb->tstamp;
644 	/* save originator for recvfrom() */
645 	op->rx_ifindex = skb->dev->ifindex;
646 	/* update statistics */
647 	op->frames_abs++;
648 
649 	if (op->flags & RX_RTR_FRAME) {
650 		/* send reply for RTR-request (placed in op->frames[0]) */
651 		bcm_can_tx(op);
652 		return;
653 	}
654 
655 	if (op->flags & RX_FILTER_ID) {
656 		/* the easiest case */
657 		bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
658 		goto rx_starttimer;
659 	}
660 
661 	if (op->nframes == 1) {
662 		/* simple compare with index 0 */
663 		bcm_rx_cmp_to_index(op, 0, rxframe);
664 		goto rx_starttimer;
665 	}
666 
667 	if (op->nframes > 1) {
668 		/*
669 		 * multiplex compare
670 		 *
671 		 * find the first multiplex mask that fits.
672 		 * Remark: The MUX-mask is stored in index 0
673 		 */
674 
675 		for (i = 1; i < op->nframes; i++) {
676 			if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
677 			    (GET_U64(&op->frames[0]) &
678 			     GET_U64(&op->frames[i]))) {
679 				bcm_rx_cmp_to_index(op, i, rxframe);
680 				break;
681 			}
682 		}
683 	}
684 
685 rx_starttimer:
686 	bcm_rx_starttimer(op);
687 }
688 
689 /*
690  * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
691  */
692 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
693 				  int ifindex)
694 {
695 	struct bcm_op *op;
696 
697 	list_for_each_entry(op, ops, list) {
698 		if ((op->can_id == can_id) && (op->ifindex == ifindex))
699 			return op;
700 	}
701 
702 	return NULL;
703 }
704 
705 static void bcm_remove_op(struct bcm_op *op)
706 {
707 	hrtimer_cancel(&op->timer);
708 	hrtimer_cancel(&op->thrtimer);
709 
710 	if (op->tsklet.func)
711 		tasklet_kill(&op->tsklet);
712 
713 	if (op->thrtsklet.func)
714 		tasklet_kill(&op->thrtsklet);
715 
716 	if ((op->frames) && (op->frames != &op->sframe))
717 		kfree(op->frames);
718 
719 	if ((op->last_frames) && (op->last_frames != &op->last_sframe))
720 		kfree(op->last_frames);
721 
722 	kfree(op);
723 
724 	return;
725 }
726 
727 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
728 {
729 	if (op->rx_reg_dev == dev) {
730 		can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
731 				  bcm_rx_handler, op);
732 
733 		/* mark as removed subscription */
734 		op->rx_reg_dev = NULL;
735 	} else
736 		printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
737 		       "mismatch %p %p\n", op->rx_reg_dev, dev);
738 }
739 
740 /*
741  * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
742  */
743 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
744 {
745 	struct bcm_op *op, *n;
746 
747 	list_for_each_entry_safe(op, n, ops, list) {
748 		if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
749 
750 			/*
751 			 * Don't care if we're bound or not (due to netdev
752 			 * problems) can_rx_unregister() is always a save
753 			 * thing to do here.
754 			 */
755 			if (op->ifindex) {
756 				/*
757 				 * Only remove subscriptions that had not
758 				 * been removed due to NETDEV_UNREGISTER
759 				 * in bcm_notifier()
760 				 */
761 				if (op->rx_reg_dev) {
762 					struct net_device *dev;
763 
764 					dev = dev_get_by_index(&init_net,
765 							       op->ifindex);
766 					if (dev) {
767 						bcm_rx_unreg(dev, op);
768 						dev_put(dev);
769 					}
770 				}
771 			} else
772 				can_rx_unregister(NULL, op->can_id,
773 						  REGMASK(op->can_id),
774 						  bcm_rx_handler, op);
775 
776 			list_del(&op->list);
777 			bcm_remove_op(op);
778 			return 1; /* done */
779 		}
780 	}
781 
782 	return 0; /* not found */
783 }
784 
785 /*
786  * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
787  */
788 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
789 {
790 	struct bcm_op *op, *n;
791 
792 	list_for_each_entry_safe(op, n, ops, list) {
793 		if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
794 			list_del(&op->list);
795 			bcm_remove_op(op);
796 			return 1; /* done */
797 		}
798 	}
799 
800 	return 0; /* not found */
801 }
802 
803 /*
804  * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
805  */
806 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
807 		       int ifindex)
808 {
809 	struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
810 
811 	if (!op)
812 		return -EINVAL;
813 
814 	/* put current values into msg_head */
815 	msg_head->flags   = op->flags;
816 	msg_head->count   = op->count;
817 	msg_head->ival1   = op->ival1;
818 	msg_head->ival2   = op->ival2;
819 	msg_head->nframes = op->nframes;
820 
821 	bcm_send_to_user(op, msg_head, op->frames, 0);
822 
823 	return MHSIZ;
824 }
825 
826 /*
827  * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
828  */
829 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
830 			int ifindex, struct sock *sk)
831 {
832 	struct bcm_sock *bo = bcm_sk(sk);
833 	struct bcm_op *op;
834 	int i, err;
835 
836 	/* we need a real device to send frames */
837 	if (!ifindex)
838 		return -ENODEV;
839 
840 	/* we need at least one can_frame */
841 	if (msg_head->nframes < 1)
842 		return -EINVAL;
843 
844 	/* check the given can_id */
845 	op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
846 
847 	if (op) {
848 		/* update existing BCM operation */
849 
850 		/*
851 		 * Do we need more space for the can_frames than currently
852 		 * allocated? -> This is a _really_ unusual use-case and
853 		 * therefore (complexity / locking) it is not supported.
854 		 */
855 		if (msg_head->nframes > op->nframes)
856 			return -E2BIG;
857 
858 		/* update can_frames content */
859 		for (i = 0; i < msg_head->nframes; i++) {
860 			err = memcpy_fromiovec((u8 *)&op->frames[i],
861 					       msg->msg_iov, CFSIZ);
862 
863 			if (op->frames[i].can_dlc > 8)
864 				err = -EINVAL;
865 
866 			if (err < 0)
867 				return err;
868 
869 			if (msg_head->flags & TX_CP_CAN_ID) {
870 				/* copy can_id into frame */
871 				op->frames[i].can_id = msg_head->can_id;
872 			}
873 		}
874 
875 	} else {
876 		/* insert new BCM operation for the given can_id */
877 
878 		op = kzalloc(OPSIZ, GFP_KERNEL);
879 		if (!op)
880 			return -ENOMEM;
881 
882 		op->can_id    = msg_head->can_id;
883 
884 		/* create array for can_frames and copy the data */
885 		if (msg_head->nframes > 1) {
886 			op->frames = kmalloc(msg_head->nframes * CFSIZ,
887 					     GFP_KERNEL);
888 			if (!op->frames) {
889 				kfree(op);
890 				return -ENOMEM;
891 			}
892 		} else
893 			op->frames = &op->sframe;
894 
895 		for (i = 0; i < msg_head->nframes; i++) {
896 			err = memcpy_fromiovec((u8 *)&op->frames[i],
897 					       msg->msg_iov, CFSIZ);
898 
899 			if (op->frames[i].can_dlc > 8)
900 				err = -EINVAL;
901 
902 			if (err < 0) {
903 				if (op->frames != &op->sframe)
904 					kfree(op->frames);
905 				kfree(op);
906 				return err;
907 			}
908 
909 			if (msg_head->flags & TX_CP_CAN_ID) {
910 				/* copy can_id into frame */
911 				op->frames[i].can_id = msg_head->can_id;
912 			}
913 		}
914 
915 		/* tx_ops never compare with previous received messages */
916 		op->last_frames = NULL;
917 
918 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
919 		op->sk = sk;
920 		op->ifindex = ifindex;
921 
922 		/* initialize uninitialized (kzalloc) structure */
923 		hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
924 		op->timer.function = bcm_tx_timeout_handler;
925 
926 		/* initialize tasklet for tx countevent notification */
927 		tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
928 			     (unsigned long) op);
929 
930 		/* currently unused in tx_ops */
931 		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
932 
933 		/* add this bcm_op to the list of the tx_ops */
934 		list_add(&op->list, &bo->tx_ops);
935 
936 	} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
937 
938 	if (op->nframes != msg_head->nframes) {
939 		op->nframes   = msg_head->nframes;
940 		/* start multiple frame transmission with index 0 */
941 		op->currframe = 0;
942 	}
943 
944 	/* check flags */
945 
946 	op->flags = msg_head->flags;
947 
948 	if (op->flags & TX_RESET_MULTI_IDX) {
949 		/* start multiple frame transmission with index 0 */
950 		op->currframe = 0;
951 	}
952 
953 	if (op->flags & SETTIMER) {
954 		/* set timer values */
955 		op->count = msg_head->count;
956 		op->ival1 = msg_head->ival1;
957 		op->ival2 = msg_head->ival2;
958 		op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
959 		op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
960 
961 		/* disable an active timer due to zero values? */
962 		if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
963 			hrtimer_cancel(&op->timer);
964 	}
965 
966 	if ((op->flags & STARTTIMER) &&
967 	    ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
968 
969 		/* spec: send can_frame when starting timer */
970 		op->flags |= TX_ANNOUNCE;
971 
972 		if (op->kt_ival1.tv64 && (op->count > 0)) {
973 			/* op->count-- is done in bcm_tx_timeout_handler */
974 			hrtimer_start(&op->timer, op->kt_ival1,
975 				      HRTIMER_MODE_REL);
976 		} else
977 			hrtimer_start(&op->timer, op->kt_ival2,
978 				      HRTIMER_MODE_REL);
979 	}
980 
981 	if (op->flags & TX_ANNOUNCE)
982 		bcm_can_tx(op);
983 
984 	return msg_head->nframes * CFSIZ + MHSIZ;
985 }
986 
987 /*
988  * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
989  */
990 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
991 			int ifindex, struct sock *sk)
992 {
993 	struct bcm_sock *bo = bcm_sk(sk);
994 	struct bcm_op *op;
995 	int do_rx_register;
996 	int err = 0;
997 
998 	if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
999 		/* be robust against wrong usage ... */
1000 		msg_head->flags |= RX_FILTER_ID;
1001 		/* ignore trailing garbage */
1002 		msg_head->nframes = 0;
1003 	}
1004 
1005 	if ((msg_head->flags & RX_RTR_FRAME) &&
1006 	    ((msg_head->nframes != 1) ||
1007 	     (!(msg_head->can_id & CAN_RTR_FLAG))))
1008 		return -EINVAL;
1009 
1010 	/* check the given can_id */
1011 	op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1012 	if (op) {
1013 		/* update existing BCM operation */
1014 
1015 		/*
1016 		 * Do we need more space for the can_frames than currently
1017 		 * allocated? -> This is a _really_ unusual use-case and
1018 		 * therefore (complexity / locking) it is not supported.
1019 		 */
1020 		if (msg_head->nframes > op->nframes)
1021 			return -E2BIG;
1022 
1023 		if (msg_head->nframes) {
1024 			/* update can_frames content */
1025 			err = memcpy_fromiovec((u8 *)op->frames,
1026 					       msg->msg_iov,
1027 					       msg_head->nframes * CFSIZ);
1028 			if (err < 0)
1029 				return err;
1030 
1031 			/* clear last_frames to indicate 'nothing received' */
1032 			memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1033 		}
1034 
1035 		op->nframes = msg_head->nframes;
1036 
1037 		/* Only an update -> do not call can_rx_register() */
1038 		do_rx_register = 0;
1039 
1040 	} else {
1041 		/* insert new BCM operation for the given can_id */
1042 		op = kzalloc(OPSIZ, GFP_KERNEL);
1043 		if (!op)
1044 			return -ENOMEM;
1045 
1046 		op->can_id    = msg_head->can_id;
1047 		op->nframes   = msg_head->nframes;
1048 
1049 		if (msg_head->nframes > 1) {
1050 			/* create array for can_frames and copy the data */
1051 			op->frames = kmalloc(msg_head->nframes * CFSIZ,
1052 					     GFP_KERNEL);
1053 			if (!op->frames) {
1054 				kfree(op);
1055 				return -ENOMEM;
1056 			}
1057 
1058 			/* create and init array for received can_frames */
1059 			op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1060 						  GFP_KERNEL);
1061 			if (!op->last_frames) {
1062 				kfree(op->frames);
1063 				kfree(op);
1064 				return -ENOMEM;
1065 			}
1066 
1067 		} else {
1068 			op->frames = &op->sframe;
1069 			op->last_frames = &op->last_sframe;
1070 		}
1071 
1072 		if (msg_head->nframes) {
1073 			err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
1074 					       msg_head->nframes * CFSIZ);
1075 			if (err < 0) {
1076 				if (op->frames != &op->sframe)
1077 					kfree(op->frames);
1078 				if (op->last_frames != &op->last_sframe)
1079 					kfree(op->last_frames);
1080 				kfree(op);
1081 				return err;
1082 			}
1083 		}
1084 
1085 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
1086 		op->sk = sk;
1087 		op->ifindex = ifindex;
1088 
1089 		/* initialize uninitialized (kzalloc) structure */
1090 		hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1091 		op->timer.function = bcm_rx_timeout_handler;
1092 
1093 		/* initialize tasklet for rx timeout notification */
1094 		tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1095 			     (unsigned long) op);
1096 
1097 		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1098 		op->thrtimer.function = bcm_rx_thr_handler;
1099 
1100 		/* initialize tasklet for rx throttle handling */
1101 		tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1102 			     (unsigned long) op);
1103 
1104 		/* add this bcm_op to the list of the rx_ops */
1105 		list_add(&op->list, &bo->rx_ops);
1106 
1107 		/* call can_rx_register() */
1108 		do_rx_register = 1;
1109 
1110 	} /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1111 
1112 	/* check flags */
1113 	op->flags = msg_head->flags;
1114 
1115 	if (op->flags & RX_RTR_FRAME) {
1116 
1117 		/* no timers in RTR-mode */
1118 		hrtimer_cancel(&op->thrtimer);
1119 		hrtimer_cancel(&op->timer);
1120 
1121 		/*
1122 		 * funny feature in RX(!)_SETUP only for RTR-mode:
1123 		 * copy can_id into frame BUT without RTR-flag to
1124 		 * prevent a full-load-loopback-test ... ;-]
1125 		 */
1126 		if ((op->flags & TX_CP_CAN_ID) ||
1127 		    (op->frames[0].can_id == op->can_id))
1128 			op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1129 
1130 	} else {
1131 		if (op->flags & SETTIMER) {
1132 
1133 			/* set timer value */
1134 			op->ival1 = msg_head->ival1;
1135 			op->ival2 = msg_head->ival2;
1136 			op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1137 			op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
1138 
1139 			/* disable an active timer due to zero value? */
1140 			if (!op->kt_ival1.tv64)
1141 				hrtimer_cancel(&op->timer);
1142 
1143 			/*
1144 			 * In any case cancel the throttle timer, flush
1145 			 * potentially blocked msgs and reset throttle handling
1146 			 */
1147 			op->kt_lastmsg = ktime_set(0, 0);
1148 			hrtimer_cancel(&op->thrtimer);
1149 			bcm_rx_thr_flush(op, 1);
1150 		}
1151 
1152 		if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1153 			hrtimer_start(&op->timer, op->kt_ival1,
1154 				      HRTIMER_MODE_REL);
1155 	}
1156 
1157 	/* now we can register for can_ids, if we added a new bcm_op */
1158 	if (do_rx_register) {
1159 		if (ifindex) {
1160 			struct net_device *dev;
1161 
1162 			dev = dev_get_by_index(&init_net, ifindex);
1163 			if (dev) {
1164 				err = can_rx_register(dev, op->can_id,
1165 						      REGMASK(op->can_id),
1166 						      bcm_rx_handler, op,
1167 						      "bcm");
1168 
1169 				op->rx_reg_dev = dev;
1170 				dev_put(dev);
1171 			}
1172 
1173 		} else
1174 			err = can_rx_register(NULL, op->can_id,
1175 					      REGMASK(op->can_id),
1176 					      bcm_rx_handler, op, "bcm");
1177 		if (err) {
1178 			/* this bcm rx op is broken -> remove it */
1179 			list_del(&op->list);
1180 			bcm_remove_op(op);
1181 			return err;
1182 		}
1183 	}
1184 
1185 	return msg_head->nframes * CFSIZ + MHSIZ;
1186 }
1187 
1188 /*
1189  * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1190  */
1191 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1192 {
1193 	struct sk_buff *skb;
1194 	struct net_device *dev;
1195 	int err;
1196 
1197 	/* we need a real device to send frames */
1198 	if (!ifindex)
1199 		return -ENODEV;
1200 
1201 	skb = alloc_skb(CFSIZ, GFP_KERNEL);
1202 
1203 	if (!skb)
1204 		return -ENOMEM;
1205 
1206 	err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1207 	if (err < 0) {
1208 		kfree_skb(skb);
1209 		return err;
1210 	}
1211 
1212 	dev = dev_get_by_index(&init_net, ifindex);
1213 	if (!dev) {
1214 		kfree_skb(skb);
1215 		return -ENODEV;
1216 	}
1217 
1218 	skb->dev = dev;
1219 	skb->sk  = sk;
1220 	err = can_send(skb, 1); /* send with loopback */
1221 	dev_put(dev);
1222 
1223 	if (err)
1224 		return err;
1225 
1226 	return CFSIZ + MHSIZ;
1227 }
1228 
1229 /*
1230  * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1231  */
1232 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1233 		       struct msghdr *msg, size_t size)
1234 {
1235 	struct sock *sk = sock->sk;
1236 	struct bcm_sock *bo = bcm_sk(sk);
1237 	int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1238 	struct bcm_msg_head msg_head;
1239 	int ret; /* read bytes or error codes as return value */
1240 
1241 	if (!bo->bound)
1242 		return -ENOTCONN;
1243 
1244 	/* check for valid message length from userspace */
1245 	if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1246 		return -EINVAL;
1247 
1248 	/* check for alternative ifindex for this bcm_op */
1249 
1250 	if (!ifindex && msg->msg_name) {
1251 		/* no bound device as default => check msg_name */
1252 		struct sockaddr_can *addr =
1253 			(struct sockaddr_can *)msg->msg_name;
1254 
1255 		if (addr->can_family != AF_CAN)
1256 			return -EINVAL;
1257 
1258 		/* ifindex from sendto() */
1259 		ifindex = addr->can_ifindex;
1260 
1261 		if (ifindex) {
1262 			struct net_device *dev;
1263 
1264 			dev = dev_get_by_index(&init_net, ifindex);
1265 			if (!dev)
1266 				return -ENODEV;
1267 
1268 			if (dev->type != ARPHRD_CAN) {
1269 				dev_put(dev);
1270 				return -ENODEV;
1271 			}
1272 
1273 			dev_put(dev);
1274 		}
1275 	}
1276 
1277 	/* read message head information */
1278 
1279 	ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
1280 	if (ret < 0)
1281 		return ret;
1282 
1283 	lock_sock(sk);
1284 
1285 	switch (msg_head.opcode) {
1286 
1287 	case TX_SETUP:
1288 		ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1289 		break;
1290 
1291 	case RX_SETUP:
1292 		ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1293 		break;
1294 
1295 	case TX_DELETE:
1296 		if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1297 			ret = MHSIZ;
1298 		else
1299 			ret = -EINVAL;
1300 		break;
1301 
1302 	case RX_DELETE:
1303 		if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1304 			ret = MHSIZ;
1305 		else
1306 			ret = -EINVAL;
1307 		break;
1308 
1309 	case TX_READ:
1310 		/* reuse msg_head for the reply to TX_READ */
1311 		msg_head.opcode  = TX_STATUS;
1312 		ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1313 		break;
1314 
1315 	case RX_READ:
1316 		/* reuse msg_head for the reply to RX_READ */
1317 		msg_head.opcode  = RX_STATUS;
1318 		ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1319 		break;
1320 
1321 	case TX_SEND:
1322 		/* we need exactly one can_frame behind the msg head */
1323 		if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
1324 			ret = -EINVAL;
1325 		else
1326 			ret = bcm_tx_send(msg, ifindex, sk);
1327 		break;
1328 
1329 	default:
1330 		ret = -EINVAL;
1331 		break;
1332 	}
1333 
1334 	release_sock(sk);
1335 
1336 	return ret;
1337 }
1338 
1339 /*
1340  * notification handler for netdevice status changes
1341  */
1342 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1343 			void *data)
1344 {
1345 	struct net_device *dev = (struct net_device *)data;
1346 	struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1347 	struct sock *sk = &bo->sk;
1348 	struct bcm_op *op;
1349 	int notify_enodev = 0;
1350 
1351 	if (!net_eq(dev_net(dev), &init_net))
1352 		return NOTIFY_DONE;
1353 
1354 	if (dev->type != ARPHRD_CAN)
1355 		return NOTIFY_DONE;
1356 
1357 	switch (msg) {
1358 
1359 	case NETDEV_UNREGISTER:
1360 		lock_sock(sk);
1361 
1362 		/* remove device specific receive entries */
1363 		list_for_each_entry(op, &bo->rx_ops, list)
1364 			if (op->rx_reg_dev == dev)
1365 				bcm_rx_unreg(dev, op);
1366 
1367 		/* remove device reference, if this is our bound device */
1368 		if (bo->bound && bo->ifindex == dev->ifindex) {
1369 			bo->bound   = 0;
1370 			bo->ifindex = 0;
1371 			notify_enodev = 1;
1372 		}
1373 
1374 		release_sock(sk);
1375 
1376 		if (notify_enodev) {
1377 			sk->sk_err = ENODEV;
1378 			if (!sock_flag(sk, SOCK_DEAD))
1379 				sk->sk_error_report(sk);
1380 		}
1381 		break;
1382 
1383 	case NETDEV_DOWN:
1384 		if (bo->bound && bo->ifindex == dev->ifindex) {
1385 			sk->sk_err = ENETDOWN;
1386 			if (!sock_flag(sk, SOCK_DEAD))
1387 				sk->sk_error_report(sk);
1388 		}
1389 	}
1390 
1391 	return NOTIFY_DONE;
1392 }
1393 
1394 /*
1395  * initial settings for all BCM sockets to be set at socket creation time
1396  */
1397 static int bcm_init(struct sock *sk)
1398 {
1399 	struct bcm_sock *bo = bcm_sk(sk);
1400 
1401 	bo->bound            = 0;
1402 	bo->ifindex          = 0;
1403 	bo->dropped_usr_msgs = 0;
1404 	bo->bcm_proc_read    = NULL;
1405 
1406 	INIT_LIST_HEAD(&bo->tx_ops);
1407 	INIT_LIST_HEAD(&bo->rx_ops);
1408 
1409 	/* set notifier */
1410 	bo->notifier.notifier_call = bcm_notifier;
1411 
1412 	register_netdevice_notifier(&bo->notifier);
1413 
1414 	return 0;
1415 }
1416 
1417 /*
1418  * standard socket functions
1419  */
1420 static int bcm_release(struct socket *sock)
1421 {
1422 	struct sock *sk = sock->sk;
1423 	struct bcm_sock *bo = bcm_sk(sk);
1424 	struct bcm_op *op, *next;
1425 
1426 	/* remove bcm_ops, timer, rx_unregister(), etc. */
1427 
1428 	unregister_netdevice_notifier(&bo->notifier);
1429 
1430 	lock_sock(sk);
1431 
1432 	list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1433 		bcm_remove_op(op);
1434 
1435 	list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1436 		/*
1437 		 * Don't care if we're bound or not (due to netdev problems)
1438 		 * can_rx_unregister() is always a save thing to do here.
1439 		 */
1440 		if (op->ifindex) {
1441 			/*
1442 			 * Only remove subscriptions that had not
1443 			 * been removed due to NETDEV_UNREGISTER
1444 			 * in bcm_notifier()
1445 			 */
1446 			if (op->rx_reg_dev) {
1447 				struct net_device *dev;
1448 
1449 				dev = dev_get_by_index(&init_net, op->ifindex);
1450 				if (dev) {
1451 					bcm_rx_unreg(dev, op);
1452 					dev_put(dev);
1453 				}
1454 			}
1455 		} else
1456 			can_rx_unregister(NULL, op->can_id,
1457 					  REGMASK(op->can_id),
1458 					  bcm_rx_handler, op);
1459 
1460 		bcm_remove_op(op);
1461 	}
1462 
1463 	/* remove procfs entry */
1464 	if (proc_dir && bo->bcm_proc_read)
1465 		remove_proc_entry(bo->procname, proc_dir);
1466 
1467 	/* remove device reference */
1468 	if (bo->bound) {
1469 		bo->bound   = 0;
1470 		bo->ifindex = 0;
1471 	}
1472 
1473 	sock_orphan(sk);
1474 	sock->sk = NULL;
1475 
1476 	release_sock(sk);
1477 	sock_put(sk);
1478 
1479 	return 0;
1480 }
1481 
1482 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1483 		       int flags)
1484 {
1485 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1486 	struct sock *sk = sock->sk;
1487 	struct bcm_sock *bo = bcm_sk(sk);
1488 
1489 	if (bo->bound)
1490 		return -EISCONN;
1491 
1492 	/* bind a device to this socket */
1493 	if (addr->can_ifindex) {
1494 		struct net_device *dev;
1495 
1496 		dev = dev_get_by_index(&init_net, addr->can_ifindex);
1497 		if (!dev)
1498 			return -ENODEV;
1499 
1500 		if (dev->type != ARPHRD_CAN) {
1501 			dev_put(dev);
1502 			return -ENODEV;
1503 		}
1504 
1505 		bo->ifindex = dev->ifindex;
1506 		dev_put(dev);
1507 
1508 	} else {
1509 		/* no interface reference for ifindex = 0 ('any' CAN device) */
1510 		bo->ifindex = 0;
1511 	}
1512 
1513 	bo->bound = 1;
1514 
1515 	if (proc_dir) {
1516 		/* unique socket address as filename */
1517 		sprintf(bo->procname, "%p", sock);
1518 		bo->bcm_proc_read = create_proc_read_entry(bo->procname, 0644,
1519 							   proc_dir,
1520 							   bcm_read_proc, sk);
1521 	}
1522 
1523 	return 0;
1524 }
1525 
1526 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1527 		       struct msghdr *msg, size_t size, int flags)
1528 {
1529 	struct sock *sk = sock->sk;
1530 	struct sk_buff *skb;
1531 	int error = 0;
1532 	int noblock;
1533 	int err;
1534 
1535 	noblock =  flags & MSG_DONTWAIT;
1536 	flags   &= ~MSG_DONTWAIT;
1537 	skb = skb_recv_datagram(sk, flags, noblock, &error);
1538 	if (!skb)
1539 		return error;
1540 
1541 	if (skb->len < size)
1542 		size = skb->len;
1543 
1544 	err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1545 	if (err < 0) {
1546 		skb_free_datagram(sk, skb);
1547 		return err;
1548 	}
1549 
1550 	sock_recv_timestamp(msg, sk, skb);
1551 
1552 	if (msg->msg_name) {
1553 		msg->msg_namelen = sizeof(struct sockaddr_can);
1554 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1555 	}
1556 
1557 	skb_free_datagram(sk, skb);
1558 
1559 	return size;
1560 }
1561 
1562 static struct proto_ops bcm_ops __read_mostly = {
1563 	.family        = PF_CAN,
1564 	.release       = bcm_release,
1565 	.bind          = sock_no_bind,
1566 	.connect       = bcm_connect,
1567 	.socketpair    = sock_no_socketpair,
1568 	.accept        = sock_no_accept,
1569 	.getname       = sock_no_getname,
1570 	.poll          = datagram_poll,
1571 	.ioctl         = NULL,		/* use can_ioctl() from af_can.c */
1572 	.listen        = sock_no_listen,
1573 	.shutdown      = sock_no_shutdown,
1574 	.setsockopt    = sock_no_setsockopt,
1575 	.getsockopt    = sock_no_getsockopt,
1576 	.sendmsg       = bcm_sendmsg,
1577 	.recvmsg       = bcm_recvmsg,
1578 	.mmap          = sock_no_mmap,
1579 	.sendpage      = sock_no_sendpage,
1580 };
1581 
1582 static struct proto bcm_proto __read_mostly = {
1583 	.name       = "CAN_BCM",
1584 	.owner      = THIS_MODULE,
1585 	.obj_size   = sizeof(struct bcm_sock),
1586 	.init       = bcm_init,
1587 };
1588 
1589 static struct can_proto bcm_can_proto __read_mostly = {
1590 	.type       = SOCK_DGRAM,
1591 	.protocol   = CAN_BCM,
1592 	.capability = -1,
1593 	.ops        = &bcm_ops,
1594 	.prot       = &bcm_proto,
1595 };
1596 
1597 static int __init bcm_module_init(void)
1598 {
1599 	int err;
1600 
1601 	printk(banner);
1602 
1603 	err = can_proto_register(&bcm_can_proto);
1604 	if (err < 0) {
1605 		printk(KERN_ERR "can: registration of bcm protocol failed\n");
1606 		return err;
1607 	}
1608 
1609 	/* create /proc/net/can-bcm directory */
1610 	proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1611 	return 0;
1612 }
1613 
1614 static void __exit bcm_module_exit(void)
1615 {
1616 	can_proto_unregister(&bcm_can_proto);
1617 
1618 	if (proc_dir)
1619 		proc_net_remove(&init_net, "can-bcm");
1620 }
1621 
1622 module_init(bcm_module_init);
1623 module_exit(bcm_module_exit);
1624