xref: /linux/net/can/bcm.c (revision e26207a3819684e9b4450a2d30bdd065fa92d9c7)
1 /*
2  * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3  *
4  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of Volkswagen nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * Alternatively, provided that this notice is retained in full, this
20  * software may be distributed under the terms of the GNU General
21  * Public License ("GPL") version 2, in which case the provisions of the
22  * GPL apply INSTEAD OF those given above.
23  *
24  * The provided data structures and external interfaces from this code
25  * are not restricted to be used by modules with a GPL compatible license.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38  * DAMAGE.
39  *
40  * Send feedback to <socketcan-users@lists.berlios.de>
41  *
42  */
43 
44 #include <linux/module.h>
45 #include <linux/init.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/seq_file.h>
50 #include <linux/uio.h>
51 #include <linux/net.h>
52 #include <linux/netdevice.h>
53 #include <linux/socket.h>
54 #include <linux/if_arp.h>
55 #include <linux/skbuff.h>
56 #include <linux/can.h>
57 #include <linux/can/core.h>
58 #include <linux/can/bcm.h>
59 #include <net/sock.h>
60 #include <net/net_namespace.h>
61 
62 /* use of last_frames[index].can_dlc */
63 #define RX_RECV    0x40 /* received data for this element */
64 #define RX_THR     0x80 /* element not been sent due to throttle feature */
65 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
66 
67 /* get best masking value for can_rx_register() for a given single can_id */
68 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
69 		     (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
70 		     (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
71 
72 #define CAN_BCM_VERSION CAN_VERSION
73 static __initdata const char banner[] = KERN_INFO
74 	"can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
75 
76 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
77 MODULE_LICENSE("Dual BSD/GPL");
78 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
79 MODULE_ALIAS("can-proto-2");
80 
81 /* easy access to can_frame payload */
82 static inline u64 GET_U64(const struct can_frame *cp)
83 {
84 	return *(u64 *)cp->data;
85 }
86 
87 struct bcm_op {
88 	struct list_head list;
89 	int ifindex;
90 	canid_t can_id;
91 	int flags;
92 	unsigned long frames_abs, frames_filtered;
93 	struct timeval ival1, ival2;
94 	struct hrtimer timer, thrtimer;
95 	struct tasklet_struct tsklet, thrtsklet;
96 	ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
97 	int rx_ifindex;
98 	int count;
99 	int nframes;
100 	int currframe;
101 	struct can_frame *frames;
102 	struct can_frame *last_frames;
103 	struct can_frame sframe;
104 	struct can_frame last_sframe;
105 	struct sock *sk;
106 	struct net_device *rx_reg_dev;
107 };
108 
109 static struct proc_dir_entry *proc_dir;
110 
111 struct bcm_sock {
112 	struct sock sk;
113 	int bound;
114 	int ifindex;
115 	struct notifier_block notifier;
116 	struct list_head rx_ops;
117 	struct list_head tx_ops;
118 	unsigned long dropped_usr_msgs;
119 	struct proc_dir_entry *bcm_proc_read;
120 	char procname [9]; /* pointer printed in ASCII with \0 */
121 };
122 
123 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
124 {
125 	return (struct bcm_sock *)sk;
126 }
127 
128 #define CFSIZ sizeof(struct can_frame)
129 #define OPSIZ sizeof(struct bcm_op)
130 #define MHSIZ sizeof(struct bcm_msg_head)
131 
132 /*
133  * procfs functions
134  */
135 static char *bcm_proc_getifname(char *result, int ifindex)
136 {
137 	struct net_device *dev;
138 
139 	if (!ifindex)
140 		return "any";
141 
142 	rcu_read_lock();
143 	dev = dev_get_by_index_rcu(&init_net, ifindex);
144 	if (dev)
145 		strcpy(result, dev->name);
146 	else
147 		strcpy(result, "???");
148 	rcu_read_unlock();
149 
150 	return result;
151 }
152 
153 static int bcm_proc_show(struct seq_file *m, void *v)
154 {
155 	char ifname[IFNAMSIZ];
156 	struct sock *sk = (struct sock *)m->private;
157 	struct bcm_sock *bo = bcm_sk(sk);
158 	struct bcm_op *op;
159 
160 	seq_printf(m, ">>> socket %p", sk->sk_socket);
161 	seq_printf(m, " / sk %p", sk);
162 	seq_printf(m, " / bo %p", bo);
163 	seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
164 	seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
165 	seq_printf(m, " <<<\n");
166 
167 	list_for_each_entry(op, &bo->rx_ops, list) {
168 
169 		unsigned long reduction;
170 
171 		/* print only active entries & prevent division by zero */
172 		if (!op->frames_abs)
173 			continue;
174 
175 		seq_printf(m, "rx_op: %03X %-5s ",
176 				op->can_id, bcm_proc_getifname(ifname, op->ifindex));
177 		seq_printf(m, "[%d]%c ", op->nframes,
178 				(op->flags & RX_CHECK_DLC)?'d':' ');
179 		if (op->kt_ival1.tv64)
180 			seq_printf(m, "timeo=%lld ",
181 					(long long)
182 					ktime_to_us(op->kt_ival1));
183 
184 		if (op->kt_ival2.tv64)
185 			seq_printf(m, "thr=%lld ",
186 					(long long)
187 					ktime_to_us(op->kt_ival2));
188 
189 		seq_printf(m, "# recv %ld (%ld) => reduction: ",
190 				op->frames_filtered, op->frames_abs);
191 
192 		reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
193 
194 		seq_printf(m, "%s%ld%%\n",
195 				(reduction == 100)?"near ":"", reduction);
196 	}
197 
198 	list_for_each_entry(op, &bo->tx_ops, list) {
199 
200 		seq_printf(m, "tx_op: %03X %s [%d] ",
201 				op->can_id,
202 				bcm_proc_getifname(ifname, op->ifindex),
203 				op->nframes);
204 
205 		if (op->kt_ival1.tv64)
206 			seq_printf(m, "t1=%lld ",
207 					(long long) ktime_to_us(op->kt_ival1));
208 
209 		if (op->kt_ival2.tv64)
210 			seq_printf(m, "t2=%lld ",
211 					(long long) ktime_to_us(op->kt_ival2));
212 
213 		seq_printf(m, "# sent %ld\n", op->frames_abs);
214 	}
215 	seq_putc(m, '\n');
216 	return 0;
217 }
218 
219 static int bcm_proc_open(struct inode *inode, struct file *file)
220 {
221 	return single_open(file, bcm_proc_show, PDE(inode)->data);
222 }
223 
224 static const struct file_operations bcm_proc_fops = {
225 	.owner		= THIS_MODULE,
226 	.open		= bcm_proc_open,
227 	.read		= seq_read,
228 	.llseek		= seq_lseek,
229 	.release	= single_release,
230 };
231 
232 /*
233  * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
234  *              of the given bcm tx op
235  */
236 static void bcm_can_tx(struct bcm_op *op)
237 {
238 	struct sk_buff *skb;
239 	struct net_device *dev;
240 	struct can_frame *cf = &op->frames[op->currframe];
241 
242 	/* no target device? => exit */
243 	if (!op->ifindex)
244 		return;
245 
246 	dev = dev_get_by_index(&init_net, op->ifindex);
247 	if (!dev) {
248 		/* RFC: should this bcm_op remove itself here? */
249 		return;
250 	}
251 
252 	skb = alloc_skb(CFSIZ, gfp_any());
253 	if (!skb)
254 		goto out;
255 
256 	memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
257 
258 	/* send with loopback */
259 	skb->dev = dev;
260 	skb->sk = op->sk;
261 	can_send(skb, 1);
262 
263 	/* update statistics */
264 	op->currframe++;
265 	op->frames_abs++;
266 
267 	/* reached last frame? */
268 	if (op->currframe >= op->nframes)
269 		op->currframe = 0;
270  out:
271 	dev_put(dev);
272 }
273 
274 /*
275  * bcm_send_to_user - send a BCM message to the userspace
276  *                    (consisting of bcm_msg_head + x CAN frames)
277  */
278 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
279 			     struct can_frame *frames, int has_timestamp)
280 {
281 	struct sk_buff *skb;
282 	struct can_frame *firstframe;
283 	struct sockaddr_can *addr;
284 	struct sock *sk = op->sk;
285 	int datalen = head->nframes * CFSIZ;
286 	int err;
287 
288 	skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
289 	if (!skb)
290 		return;
291 
292 	memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
293 
294 	if (head->nframes) {
295 		/* can_frames starting here */
296 		firstframe = (struct can_frame *)skb_tail_pointer(skb);
297 
298 		memcpy(skb_put(skb, datalen), frames, datalen);
299 
300 		/*
301 		 * the BCM uses the can_dlc-element of the can_frame
302 		 * structure for internal purposes. This is only
303 		 * relevant for updates that are generated by the
304 		 * BCM, where nframes is 1
305 		 */
306 		if (head->nframes == 1)
307 			firstframe->can_dlc &= BCM_CAN_DLC_MASK;
308 	}
309 
310 	if (has_timestamp) {
311 		/* restore rx timestamp */
312 		skb->tstamp = op->rx_stamp;
313 	}
314 
315 	/*
316 	 *  Put the datagram to the queue so that bcm_recvmsg() can
317 	 *  get it from there.  We need to pass the interface index to
318 	 *  bcm_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
319 	 *  containing the interface index.
320 	 */
321 
322 	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
323 	addr = (struct sockaddr_can *)skb->cb;
324 	memset(addr, 0, sizeof(*addr));
325 	addr->can_family  = AF_CAN;
326 	addr->can_ifindex = op->rx_ifindex;
327 
328 	err = sock_queue_rcv_skb(sk, skb);
329 	if (err < 0) {
330 		struct bcm_sock *bo = bcm_sk(sk);
331 
332 		kfree_skb(skb);
333 		/* don't care about overflows in this statistic */
334 		bo->dropped_usr_msgs++;
335 	}
336 }
337 
338 static void bcm_tx_timeout_tsklet(unsigned long data)
339 {
340 	struct bcm_op *op = (struct bcm_op *)data;
341 	struct bcm_msg_head msg_head;
342 
343 	if (op->kt_ival1.tv64 && (op->count > 0)) {
344 
345 		op->count--;
346 		if (!op->count && (op->flags & TX_COUNTEVT)) {
347 
348 			/* create notification to user */
349 			msg_head.opcode  = TX_EXPIRED;
350 			msg_head.flags   = op->flags;
351 			msg_head.count   = op->count;
352 			msg_head.ival1   = op->ival1;
353 			msg_head.ival2   = op->ival2;
354 			msg_head.can_id  = op->can_id;
355 			msg_head.nframes = 0;
356 
357 			bcm_send_to_user(op, &msg_head, NULL, 0);
358 		}
359 	}
360 
361 	if (op->kt_ival1.tv64 && (op->count > 0)) {
362 
363 		/* send (next) frame */
364 		bcm_can_tx(op);
365 		hrtimer_start(&op->timer,
366 			      ktime_add(ktime_get(), op->kt_ival1),
367 			      HRTIMER_MODE_ABS);
368 
369 	} else {
370 		if (op->kt_ival2.tv64) {
371 
372 			/* send (next) frame */
373 			bcm_can_tx(op);
374 			hrtimer_start(&op->timer,
375 				      ktime_add(ktime_get(), op->kt_ival2),
376 				      HRTIMER_MODE_ABS);
377 		}
378 	}
379 }
380 
381 /*
382  * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
383  */
384 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
385 {
386 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
387 
388 	tasklet_schedule(&op->tsklet);
389 
390 	return HRTIMER_NORESTART;
391 }
392 
393 /*
394  * bcm_rx_changed - create a RX_CHANGED notification due to changed content
395  */
396 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
397 {
398 	struct bcm_msg_head head;
399 
400 	/* update statistics */
401 	op->frames_filtered++;
402 
403 	/* prevent statistics overflow */
404 	if (op->frames_filtered > ULONG_MAX/100)
405 		op->frames_filtered = op->frames_abs = 0;
406 
407 	/* this element is not throttled anymore */
408 	data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
409 
410 	head.opcode  = RX_CHANGED;
411 	head.flags   = op->flags;
412 	head.count   = op->count;
413 	head.ival1   = op->ival1;
414 	head.ival2   = op->ival2;
415 	head.can_id  = op->can_id;
416 	head.nframes = 1;
417 
418 	bcm_send_to_user(op, &head, data, 1);
419 }
420 
421 /*
422  * bcm_rx_update_and_send - process a detected relevant receive content change
423  *                          1. update the last received data
424  *                          2. send a notification to the user (if possible)
425  */
426 static void bcm_rx_update_and_send(struct bcm_op *op,
427 				   struct can_frame *lastdata,
428 				   const struct can_frame *rxdata)
429 {
430 	memcpy(lastdata, rxdata, CFSIZ);
431 
432 	/* mark as used and throttled by default */
433 	lastdata->can_dlc |= (RX_RECV|RX_THR);
434 
435 	/* throtteling mode inactive ? */
436 	if (!op->kt_ival2.tv64) {
437 		/* send RX_CHANGED to the user immediately */
438 		bcm_rx_changed(op, lastdata);
439 		return;
440 	}
441 
442 	/* with active throttling timer we are just done here */
443 	if (hrtimer_active(&op->thrtimer))
444 		return;
445 
446 	/* first receiption with enabled throttling mode */
447 	if (!op->kt_lastmsg.tv64)
448 		goto rx_changed_settime;
449 
450 	/* got a second frame inside a potential throttle period? */
451 	if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
452 	    ktime_to_us(op->kt_ival2)) {
453 		/* do not send the saved data - only start throttle timer */
454 		hrtimer_start(&op->thrtimer,
455 			      ktime_add(op->kt_lastmsg, op->kt_ival2),
456 			      HRTIMER_MODE_ABS);
457 		return;
458 	}
459 
460 	/* the gap was that big, that throttling was not needed here */
461 rx_changed_settime:
462 	bcm_rx_changed(op, lastdata);
463 	op->kt_lastmsg = ktime_get();
464 }
465 
466 /*
467  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
468  *                       received data stored in op->last_frames[]
469  */
470 static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
471 				const struct can_frame *rxdata)
472 {
473 	/*
474 	 * no one uses the MSBs of can_dlc for comparation,
475 	 * so we use it here to detect the first time of reception
476 	 */
477 
478 	if (!(op->last_frames[index].can_dlc & RX_RECV)) {
479 		/* received data for the first time => send update to user */
480 		bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
481 		return;
482 	}
483 
484 	/* do a real check in can_frame data section */
485 
486 	if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
487 	    (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
488 		bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
489 		return;
490 	}
491 
492 	if (op->flags & RX_CHECK_DLC) {
493 		/* do a real check in can_frame dlc */
494 		if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
495 					BCM_CAN_DLC_MASK)) {
496 			bcm_rx_update_and_send(op, &op->last_frames[index],
497 					       rxdata);
498 			return;
499 		}
500 	}
501 }
502 
503 /*
504  * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
505  */
506 static void bcm_rx_starttimer(struct bcm_op *op)
507 {
508 	if (op->flags & RX_NO_AUTOTIMER)
509 		return;
510 
511 	if (op->kt_ival1.tv64)
512 		hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
513 }
514 
515 static void bcm_rx_timeout_tsklet(unsigned long data)
516 {
517 	struct bcm_op *op = (struct bcm_op *)data;
518 	struct bcm_msg_head msg_head;
519 
520 	/* create notification to user */
521 	msg_head.opcode  = RX_TIMEOUT;
522 	msg_head.flags   = op->flags;
523 	msg_head.count   = op->count;
524 	msg_head.ival1   = op->ival1;
525 	msg_head.ival2   = op->ival2;
526 	msg_head.can_id  = op->can_id;
527 	msg_head.nframes = 0;
528 
529 	bcm_send_to_user(op, &msg_head, NULL, 0);
530 }
531 
532 /*
533  * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
534  */
535 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
536 {
537 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
538 
539 	/* schedule before NET_RX_SOFTIRQ */
540 	tasklet_hi_schedule(&op->tsklet);
541 
542 	/* no restart of the timer is done here! */
543 
544 	/* if user wants to be informed, when cyclic CAN-Messages come back */
545 	if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
546 		/* clear received can_frames to indicate 'nothing received' */
547 		memset(op->last_frames, 0, op->nframes * CFSIZ);
548 	}
549 
550 	return HRTIMER_NORESTART;
551 }
552 
553 /*
554  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
555  */
556 static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
557 {
558 	if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
559 		if (update)
560 			bcm_rx_changed(op, &op->last_frames[index]);
561 		return 1;
562 	}
563 	return 0;
564 }
565 
566 /*
567  * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
568  *
569  * update == 0 : just check if throttled data is available  (any irq context)
570  * update == 1 : check and send throttled data to userspace (soft_irq context)
571  */
572 static int bcm_rx_thr_flush(struct bcm_op *op, int update)
573 {
574 	int updated = 0;
575 
576 	if (op->nframes > 1) {
577 		int i;
578 
579 		/* for MUX filter we start at index 1 */
580 		for (i = 1; i < op->nframes; i++)
581 			updated += bcm_rx_do_flush(op, update, i);
582 
583 	} else {
584 		/* for RX_FILTER_ID and simple filter */
585 		updated += bcm_rx_do_flush(op, update, 0);
586 	}
587 
588 	return updated;
589 }
590 
591 static void bcm_rx_thr_tsklet(unsigned long data)
592 {
593 	struct bcm_op *op = (struct bcm_op *)data;
594 
595 	/* push the changed data to the userspace */
596 	bcm_rx_thr_flush(op, 1);
597 }
598 
599 /*
600  * bcm_rx_thr_handler - the time for blocked content updates is over now:
601  *                      Check for throttled data and send it to the userspace
602  */
603 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
604 {
605 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
606 
607 	tasklet_schedule(&op->thrtsklet);
608 
609 	if (bcm_rx_thr_flush(op, 0)) {
610 		hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
611 		return HRTIMER_RESTART;
612 	} else {
613 		/* rearm throttle handling */
614 		op->kt_lastmsg = ktime_set(0, 0);
615 		return HRTIMER_NORESTART;
616 	}
617 }
618 
619 /*
620  * bcm_rx_handler - handle a CAN frame receiption
621  */
622 static void bcm_rx_handler(struct sk_buff *skb, void *data)
623 {
624 	struct bcm_op *op = (struct bcm_op *)data;
625 	const struct can_frame *rxframe = (struct can_frame *)skb->data;
626 	int i;
627 
628 	/* disable timeout */
629 	hrtimer_cancel(&op->timer);
630 
631 	if (op->can_id != rxframe->can_id)
632 		return;
633 
634 	/* save rx timestamp */
635 	op->rx_stamp = skb->tstamp;
636 	/* save originator for recvfrom() */
637 	op->rx_ifindex = skb->dev->ifindex;
638 	/* update statistics */
639 	op->frames_abs++;
640 
641 	if (op->flags & RX_RTR_FRAME) {
642 		/* send reply for RTR-request (placed in op->frames[0]) */
643 		bcm_can_tx(op);
644 		return;
645 	}
646 
647 	if (op->flags & RX_FILTER_ID) {
648 		/* the easiest case */
649 		bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
650 		goto rx_starttimer;
651 	}
652 
653 	if (op->nframes == 1) {
654 		/* simple compare with index 0 */
655 		bcm_rx_cmp_to_index(op, 0, rxframe);
656 		goto rx_starttimer;
657 	}
658 
659 	if (op->nframes > 1) {
660 		/*
661 		 * multiplex compare
662 		 *
663 		 * find the first multiplex mask that fits.
664 		 * Remark: The MUX-mask is stored in index 0
665 		 */
666 
667 		for (i = 1; i < op->nframes; i++) {
668 			if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
669 			    (GET_U64(&op->frames[0]) &
670 			     GET_U64(&op->frames[i]))) {
671 				bcm_rx_cmp_to_index(op, i, rxframe);
672 				break;
673 			}
674 		}
675 	}
676 
677 rx_starttimer:
678 	bcm_rx_starttimer(op);
679 }
680 
681 /*
682  * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
683  */
684 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
685 				  int ifindex)
686 {
687 	struct bcm_op *op;
688 
689 	list_for_each_entry(op, ops, list) {
690 		if ((op->can_id == can_id) && (op->ifindex == ifindex))
691 			return op;
692 	}
693 
694 	return NULL;
695 }
696 
697 static void bcm_remove_op(struct bcm_op *op)
698 {
699 	hrtimer_cancel(&op->timer);
700 	hrtimer_cancel(&op->thrtimer);
701 
702 	if (op->tsklet.func)
703 		tasklet_kill(&op->tsklet);
704 
705 	if (op->thrtsklet.func)
706 		tasklet_kill(&op->thrtsklet);
707 
708 	if ((op->frames) && (op->frames != &op->sframe))
709 		kfree(op->frames);
710 
711 	if ((op->last_frames) && (op->last_frames != &op->last_sframe))
712 		kfree(op->last_frames);
713 
714 	kfree(op);
715 
716 	return;
717 }
718 
719 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
720 {
721 	if (op->rx_reg_dev == dev) {
722 		can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
723 				  bcm_rx_handler, op);
724 
725 		/* mark as removed subscription */
726 		op->rx_reg_dev = NULL;
727 	} else
728 		printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
729 		       "mismatch %p %p\n", op->rx_reg_dev, dev);
730 }
731 
732 /*
733  * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
734  */
735 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
736 {
737 	struct bcm_op *op, *n;
738 
739 	list_for_each_entry_safe(op, n, ops, list) {
740 		if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
741 
742 			/*
743 			 * Don't care if we're bound or not (due to netdev
744 			 * problems) can_rx_unregister() is always a save
745 			 * thing to do here.
746 			 */
747 			if (op->ifindex) {
748 				/*
749 				 * Only remove subscriptions that had not
750 				 * been removed due to NETDEV_UNREGISTER
751 				 * in bcm_notifier()
752 				 */
753 				if (op->rx_reg_dev) {
754 					struct net_device *dev;
755 
756 					dev = dev_get_by_index(&init_net,
757 							       op->ifindex);
758 					if (dev) {
759 						bcm_rx_unreg(dev, op);
760 						dev_put(dev);
761 					}
762 				}
763 			} else
764 				can_rx_unregister(NULL, op->can_id,
765 						  REGMASK(op->can_id),
766 						  bcm_rx_handler, op);
767 
768 			list_del(&op->list);
769 			bcm_remove_op(op);
770 			return 1; /* done */
771 		}
772 	}
773 
774 	return 0; /* not found */
775 }
776 
777 /*
778  * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
779  */
780 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
781 {
782 	struct bcm_op *op, *n;
783 
784 	list_for_each_entry_safe(op, n, ops, list) {
785 		if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
786 			list_del(&op->list);
787 			bcm_remove_op(op);
788 			return 1; /* done */
789 		}
790 	}
791 
792 	return 0; /* not found */
793 }
794 
795 /*
796  * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
797  */
798 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
799 		       int ifindex)
800 {
801 	struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
802 
803 	if (!op)
804 		return -EINVAL;
805 
806 	/* put current values into msg_head */
807 	msg_head->flags   = op->flags;
808 	msg_head->count   = op->count;
809 	msg_head->ival1   = op->ival1;
810 	msg_head->ival2   = op->ival2;
811 	msg_head->nframes = op->nframes;
812 
813 	bcm_send_to_user(op, msg_head, op->frames, 0);
814 
815 	return MHSIZ;
816 }
817 
818 /*
819  * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
820  */
821 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
822 			int ifindex, struct sock *sk)
823 {
824 	struct bcm_sock *bo = bcm_sk(sk);
825 	struct bcm_op *op;
826 	int i, err;
827 
828 	/* we need a real device to send frames */
829 	if (!ifindex)
830 		return -ENODEV;
831 
832 	/* we need at least one can_frame */
833 	if (msg_head->nframes < 1)
834 		return -EINVAL;
835 
836 	/* check the given can_id */
837 	op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
838 
839 	if (op) {
840 		/* update existing BCM operation */
841 
842 		/*
843 		 * Do we need more space for the can_frames than currently
844 		 * allocated? -> This is a _really_ unusual use-case and
845 		 * therefore (complexity / locking) it is not supported.
846 		 */
847 		if (msg_head->nframes > op->nframes)
848 			return -E2BIG;
849 
850 		/* update can_frames content */
851 		for (i = 0; i < msg_head->nframes; i++) {
852 			err = memcpy_fromiovec((u8 *)&op->frames[i],
853 					       msg->msg_iov, CFSIZ);
854 
855 			if (op->frames[i].can_dlc > 8)
856 				err = -EINVAL;
857 
858 			if (err < 0)
859 				return err;
860 
861 			if (msg_head->flags & TX_CP_CAN_ID) {
862 				/* copy can_id into frame */
863 				op->frames[i].can_id = msg_head->can_id;
864 			}
865 		}
866 
867 	} else {
868 		/* insert new BCM operation for the given can_id */
869 
870 		op = kzalloc(OPSIZ, GFP_KERNEL);
871 		if (!op)
872 			return -ENOMEM;
873 
874 		op->can_id    = msg_head->can_id;
875 
876 		/* create array for can_frames and copy the data */
877 		if (msg_head->nframes > 1) {
878 			op->frames = kmalloc(msg_head->nframes * CFSIZ,
879 					     GFP_KERNEL);
880 			if (!op->frames) {
881 				kfree(op);
882 				return -ENOMEM;
883 			}
884 		} else
885 			op->frames = &op->sframe;
886 
887 		for (i = 0; i < msg_head->nframes; i++) {
888 			err = memcpy_fromiovec((u8 *)&op->frames[i],
889 					       msg->msg_iov, CFSIZ);
890 
891 			if (op->frames[i].can_dlc > 8)
892 				err = -EINVAL;
893 
894 			if (err < 0) {
895 				if (op->frames != &op->sframe)
896 					kfree(op->frames);
897 				kfree(op);
898 				return err;
899 			}
900 
901 			if (msg_head->flags & TX_CP_CAN_ID) {
902 				/* copy can_id into frame */
903 				op->frames[i].can_id = msg_head->can_id;
904 			}
905 		}
906 
907 		/* tx_ops never compare with previous received messages */
908 		op->last_frames = NULL;
909 
910 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
911 		op->sk = sk;
912 		op->ifindex = ifindex;
913 
914 		/* initialize uninitialized (kzalloc) structure */
915 		hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
916 		op->timer.function = bcm_tx_timeout_handler;
917 
918 		/* initialize tasklet for tx countevent notification */
919 		tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
920 			     (unsigned long) op);
921 
922 		/* currently unused in tx_ops */
923 		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
924 
925 		/* add this bcm_op to the list of the tx_ops */
926 		list_add(&op->list, &bo->tx_ops);
927 
928 	} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
929 
930 	if (op->nframes != msg_head->nframes) {
931 		op->nframes   = msg_head->nframes;
932 		/* start multiple frame transmission with index 0 */
933 		op->currframe = 0;
934 	}
935 
936 	/* check flags */
937 
938 	op->flags = msg_head->flags;
939 
940 	if (op->flags & TX_RESET_MULTI_IDX) {
941 		/* start multiple frame transmission with index 0 */
942 		op->currframe = 0;
943 	}
944 
945 	if (op->flags & SETTIMER) {
946 		/* set timer values */
947 		op->count = msg_head->count;
948 		op->ival1 = msg_head->ival1;
949 		op->ival2 = msg_head->ival2;
950 		op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
951 		op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
952 
953 		/* disable an active timer due to zero values? */
954 		if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
955 			hrtimer_cancel(&op->timer);
956 	}
957 
958 	if ((op->flags & STARTTIMER) &&
959 	    ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
960 
961 		/* spec: send can_frame when starting timer */
962 		op->flags |= TX_ANNOUNCE;
963 
964 		if (op->kt_ival1.tv64 && (op->count > 0)) {
965 			/* op->count-- is done in bcm_tx_timeout_handler */
966 			hrtimer_start(&op->timer, op->kt_ival1,
967 				      HRTIMER_MODE_REL);
968 		} else
969 			hrtimer_start(&op->timer, op->kt_ival2,
970 				      HRTIMER_MODE_REL);
971 	}
972 
973 	if (op->flags & TX_ANNOUNCE)
974 		bcm_can_tx(op);
975 
976 	return msg_head->nframes * CFSIZ + MHSIZ;
977 }
978 
979 /*
980  * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
981  */
982 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
983 			int ifindex, struct sock *sk)
984 {
985 	struct bcm_sock *bo = bcm_sk(sk);
986 	struct bcm_op *op;
987 	int do_rx_register;
988 	int err = 0;
989 
990 	if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
991 		/* be robust against wrong usage ... */
992 		msg_head->flags |= RX_FILTER_ID;
993 		/* ignore trailing garbage */
994 		msg_head->nframes = 0;
995 	}
996 
997 	if ((msg_head->flags & RX_RTR_FRAME) &&
998 	    ((msg_head->nframes != 1) ||
999 	     (!(msg_head->can_id & CAN_RTR_FLAG))))
1000 		return -EINVAL;
1001 
1002 	/* check the given can_id */
1003 	op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1004 	if (op) {
1005 		/* update existing BCM operation */
1006 
1007 		/*
1008 		 * Do we need more space for the can_frames than currently
1009 		 * allocated? -> This is a _really_ unusual use-case and
1010 		 * therefore (complexity / locking) it is not supported.
1011 		 */
1012 		if (msg_head->nframes > op->nframes)
1013 			return -E2BIG;
1014 
1015 		if (msg_head->nframes) {
1016 			/* update can_frames content */
1017 			err = memcpy_fromiovec((u8 *)op->frames,
1018 					       msg->msg_iov,
1019 					       msg_head->nframes * CFSIZ);
1020 			if (err < 0)
1021 				return err;
1022 
1023 			/* clear last_frames to indicate 'nothing received' */
1024 			memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1025 		}
1026 
1027 		op->nframes = msg_head->nframes;
1028 
1029 		/* Only an update -> do not call can_rx_register() */
1030 		do_rx_register = 0;
1031 
1032 	} else {
1033 		/* insert new BCM operation for the given can_id */
1034 		op = kzalloc(OPSIZ, GFP_KERNEL);
1035 		if (!op)
1036 			return -ENOMEM;
1037 
1038 		op->can_id    = msg_head->can_id;
1039 		op->nframes   = msg_head->nframes;
1040 
1041 		if (msg_head->nframes > 1) {
1042 			/* create array for can_frames and copy the data */
1043 			op->frames = kmalloc(msg_head->nframes * CFSIZ,
1044 					     GFP_KERNEL);
1045 			if (!op->frames) {
1046 				kfree(op);
1047 				return -ENOMEM;
1048 			}
1049 
1050 			/* create and init array for received can_frames */
1051 			op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1052 						  GFP_KERNEL);
1053 			if (!op->last_frames) {
1054 				kfree(op->frames);
1055 				kfree(op);
1056 				return -ENOMEM;
1057 			}
1058 
1059 		} else {
1060 			op->frames = &op->sframe;
1061 			op->last_frames = &op->last_sframe;
1062 		}
1063 
1064 		if (msg_head->nframes) {
1065 			err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
1066 					       msg_head->nframes * CFSIZ);
1067 			if (err < 0) {
1068 				if (op->frames != &op->sframe)
1069 					kfree(op->frames);
1070 				if (op->last_frames != &op->last_sframe)
1071 					kfree(op->last_frames);
1072 				kfree(op);
1073 				return err;
1074 			}
1075 		}
1076 
1077 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
1078 		op->sk = sk;
1079 		op->ifindex = ifindex;
1080 
1081 		/* initialize uninitialized (kzalloc) structure */
1082 		hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1083 		op->timer.function = bcm_rx_timeout_handler;
1084 
1085 		/* initialize tasklet for rx timeout notification */
1086 		tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1087 			     (unsigned long) op);
1088 
1089 		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1090 		op->thrtimer.function = bcm_rx_thr_handler;
1091 
1092 		/* initialize tasklet for rx throttle handling */
1093 		tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1094 			     (unsigned long) op);
1095 
1096 		/* add this bcm_op to the list of the rx_ops */
1097 		list_add(&op->list, &bo->rx_ops);
1098 
1099 		/* call can_rx_register() */
1100 		do_rx_register = 1;
1101 
1102 	} /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1103 
1104 	/* check flags */
1105 	op->flags = msg_head->flags;
1106 
1107 	if (op->flags & RX_RTR_FRAME) {
1108 
1109 		/* no timers in RTR-mode */
1110 		hrtimer_cancel(&op->thrtimer);
1111 		hrtimer_cancel(&op->timer);
1112 
1113 		/*
1114 		 * funny feature in RX(!)_SETUP only for RTR-mode:
1115 		 * copy can_id into frame BUT without RTR-flag to
1116 		 * prevent a full-load-loopback-test ... ;-]
1117 		 */
1118 		if ((op->flags & TX_CP_CAN_ID) ||
1119 		    (op->frames[0].can_id == op->can_id))
1120 			op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1121 
1122 	} else {
1123 		if (op->flags & SETTIMER) {
1124 
1125 			/* set timer value */
1126 			op->ival1 = msg_head->ival1;
1127 			op->ival2 = msg_head->ival2;
1128 			op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1129 			op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
1130 
1131 			/* disable an active timer due to zero value? */
1132 			if (!op->kt_ival1.tv64)
1133 				hrtimer_cancel(&op->timer);
1134 
1135 			/*
1136 			 * In any case cancel the throttle timer, flush
1137 			 * potentially blocked msgs and reset throttle handling
1138 			 */
1139 			op->kt_lastmsg = ktime_set(0, 0);
1140 			hrtimer_cancel(&op->thrtimer);
1141 			bcm_rx_thr_flush(op, 1);
1142 		}
1143 
1144 		if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1145 			hrtimer_start(&op->timer, op->kt_ival1,
1146 				      HRTIMER_MODE_REL);
1147 	}
1148 
1149 	/* now we can register for can_ids, if we added a new bcm_op */
1150 	if (do_rx_register) {
1151 		if (ifindex) {
1152 			struct net_device *dev;
1153 
1154 			dev = dev_get_by_index(&init_net, ifindex);
1155 			if (dev) {
1156 				err = can_rx_register(dev, op->can_id,
1157 						      REGMASK(op->can_id),
1158 						      bcm_rx_handler, op,
1159 						      "bcm");
1160 
1161 				op->rx_reg_dev = dev;
1162 				dev_put(dev);
1163 			}
1164 
1165 		} else
1166 			err = can_rx_register(NULL, op->can_id,
1167 					      REGMASK(op->can_id),
1168 					      bcm_rx_handler, op, "bcm");
1169 		if (err) {
1170 			/* this bcm rx op is broken -> remove it */
1171 			list_del(&op->list);
1172 			bcm_remove_op(op);
1173 			return err;
1174 		}
1175 	}
1176 
1177 	return msg_head->nframes * CFSIZ + MHSIZ;
1178 }
1179 
1180 /*
1181  * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1182  */
1183 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1184 {
1185 	struct sk_buff *skb;
1186 	struct net_device *dev;
1187 	int err;
1188 
1189 	/* we need a real device to send frames */
1190 	if (!ifindex)
1191 		return -ENODEV;
1192 
1193 	skb = alloc_skb(CFSIZ, GFP_KERNEL);
1194 
1195 	if (!skb)
1196 		return -ENOMEM;
1197 
1198 	err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1199 	if (err < 0) {
1200 		kfree_skb(skb);
1201 		return err;
1202 	}
1203 
1204 	dev = dev_get_by_index(&init_net, ifindex);
1205 	if (!dev) {
1206 		kfree_skb(skb);
1207 		return -ENODEV;
1208 	}
1209 
1210 	skb->dev = dev;
1211 	skb->sk  = sk;
1212 	err = can_send(skb, 1); /* send with loopback */
1213 	dev_put(dev);
1214 
1215 	if (err)
1216 		return err;
1217 
1218 	return CFSIZ + MHSIZ;
1219 }
1220 
1221 /*
1222  * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1223  */
1224 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1225 		       struct msghdr *msg, size_t size)
1226 {
1227 	struct sock *sk = sock->sk;
1228 	struct bcm_sock *bo = bcm_sk(sk);
1229 	int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1230 	struct bcm_msg_head msg_head;
1231 	int ret; /* read bytes or error codes as return value */
1232 
1233 	if (!bo->bound)
1234 		return -ENOTCONN;
1235 
1236 	/* check for valid message length from userspace */
1237 	if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1238 		return -EINVAL;
1239 
1240 	/* check for alternative ifindex for this bcm_op */
1241 
1242 	if (!ifindex && msg->msg_name) {
1243 		/* no bound device as default => check msg_name */
1244 		struct sockaddr_can *addr =
1245 			(struct sockaddr_can *)msg->msg_name;
1246 
1247 		if (addr->can_family != AF_CAN)
1248 			return -EINVAL;
1249 
1250 		/* ifindex from sendto() */
1251 		ifindex = addr->can_ifindex;
1252 
1253 		if (ifindex) {
1254 			struct net_device *dev;
1255 
1256 			dev = dev_get_by_index(&init_net, ifindex);
1257 			if (!dev)
1258 				return -ENODEV;
1259 
1260 			if (dev->type != ARPHRD_CAN) {
1261 				dev_put(dev);
1262 				return -ENODEV;
1263 			}
1264 
1265 			dev_put(dev);
1266 		}
1267 	}
1268 
1269 	/* read message head information */
1270 
1271 	ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
1272 	if (ret < 0)
1273 		return ret;
1274 
1275 	lock_sock(sk);
1276 
1277 	switch (msg_head.opcode) {
1278 
1279 	case TX_SETUP:
1280 		ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1281 		break;
1282 
1283 	case RX_SETUP:
1284 		ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1285 		break;
1286 
1287 	case TX_DELETE:
1288 		if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1289 			ret = MHSIZ;
1290 		else
1291 			ret = -EINVAL;
1292 		break;
1293 
1294 	case RX_DELETE:
1295 		if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1296 			ret = MHSIZ;
1297 		else
1298 			ret = -EINVAL;
1299 		break;
1300 
1301 	case TX_READ:
1302 		/* reuse msg_head for the reply to TX_READ */
1303 		msg_head.opcode  = TX_STATUS;
1304 		ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1305 		break;
1306 
1307 	case RX_READ:
1308 		/* reuse msg_head for the reply to RX_READ */
1309 		msg_head.opcode  = RX_STATUS;
1310 		ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1311 		break;
1312 
1313 	case TX_SEND:
1314 		/* we need exactly one can_frame behind the msg head */
1315 		if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
1316 			ret = -EINVAL;
1317 		else
1318 			ret = bcm_tx_send(msg, ifindex, sk);
1319 		break;
1320 
1321 	default:
1322 		ret = -EINVAL;
1323 		break;
1324 	}
1325 
1326 	release_sock(sk);
1327 
1328 	return ret;
1329 }
1330 
1331 /*
1332  * notification handler for netdevice status changes
1333  */
1334 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1335 			void *data)
1336 {
1337 	struct net_device *dev = (struct net_device *)data;
1338 	struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1339 	struct sock *sk = &bo->sk;
1340 	struct bcm_op *op;
1341 	int notify_enodev = 0;
1342 
1343 	if (!net_eq(dev_net(dev), &init_net))
1344 		return NOTIFY_DONE;
1345 
1346 	if (dev->type != ARPHRD_CAN)
1347 		return NOTIFY_DONE;
1348 
1349 	switch (msg) {
1350 
1351 	case NETDEV_UNREGISTER:
1352 		lock_sock(sk);
1353 
1354 		/* remove device specific receive entries */
1355 		list_for_each_entry(op, &bo->rx_ops, list)
1356 			if (op->rx_reg_dev == dev)
1357 				bcm_rx_unreg(dev, op);
1358 
1359 		/* remove device reference, if this is our bound device */
1360 		if (bo->bound && bo->ifindex == dev->ifindex) {
1361 			bo->bound   = 0;
1362 			bo->ifindex = 0;
1363 			notify_enodev = 1;
1364 		}
1365 
1366 		release_sock(sk);
1367 
1368 		if (notify_enodev) {
1369 			sk->sk_err = ENODEV;
1370 			if (!sock_flag(sk, SOCK_DEAD))
1371 				sk->sk_error_report(sk);
1372 		}
1373 		break;
1374 
1375 	case NETDEV_DOWN:
1376 		if (bo->bound && bo->ifindex == dev->ifindex) {
1377 			sk->sk_err = ENETDOWN;
1378 			if (!sock_flag(sk, SOCK_DEAD))
1379 				sk->sk_error_report(sk);
1380 		}
1381 	}
1382 
1383 	return NOTIFY_DONE;
1384 }
1385 
1386 /*
1387  * initial settings for all BCM sockets to be set at socket creation time
1388  */
1389 static int bcm_init(struct sock *sk)
1390 {
1391 	struct bcm_sock *bo = bcm_sk(sk);
1392 
1393 	bo->bound            = 0;
1394 	bo->ifindex          = 0;
1395 	bo->dropped_usr_msgs = 0;
1396 	bo->bcm_proc_read    = NULL;
1397 
1398 	INIT_LIST_HEAD(&bo->tx_ops);
1399 	INIT_LIST_HEAD(&bo->rx_ops);
1400 
1401 	/* set notifier */
1402 	bo->notifier.notifier_call = bcm_notifier;
1403 
1404 	register_netdevice_notifier(&bo->notifier);
1405 
1406 	return 0;
1407 }
1408 
1409 /*
1410  * standard socket functions
1411  */
1412 static int bcm_release(struct socket *sock)
1413 {
1414 	struct sock *sk = sock->sk;
1415 	struct bcm_sock *bo = bcm_sk(sk);
1416 	struct bcm_op *op, *next;
1417 
1418 	/* remove bcm_ops, timer, rx_unregister(), etc. */
1419 
1420 	unregister_netdevice_notifier(&bo->notifier);
1421 
1422 	lock_sock(sk);
1423 
1424 	list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1425 		bcm_remove_op(op);
1426 
1427 	list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1428 		/*
1429 		 * Don't care if we're bound or not (due to netdev problems)
1430 		 * can_rx_unregister() is always a save thing to do here.
1431 		 */
1432 		if (op->ifindex) {
1433 			/*
1434 			 * Only remove subscriptions that had not
1435 			 * been removed due to NETDEV_UNREGISTER
1436 			 * in bcm_notifier()
1437 			 */
1438 			if (op->rx_reg_dev) {
1439 				struct net_device *dev;
1440 
1441 				dev = dev_get_by_index(&init_net, op->ifindex);
1442 				if (dev) {
1443 					bcm_rx_unreg(dev, op);
1444 					dev_put(dev);
1445 				}
1446 			}
1447 		} else
1448 			can_rx_unregister(NULL, op->can_id,
1449 					  REGMASK(op->can_id),
1450 					  bcm_rx_handler, op);
1451 
1452 		bcm_remove_op(op);
1453 	}
1454 
1455 	/* remove procfs entry */
1456 	if (proc_dir && bo->bcm_proc_read)
1457 		remove_proc_entry(bo->procname, proc_dir);
1458 
1459 	/* remove device reference */
1460 	if (bo->bound) {
1461 		bo->bound   = 0;
1462 		bo->ifindex = 0;
1463 	}
1464 
1465 	sock_orphan(sk);
1466 	sock->sk = NULL;
1467 
1468 	release_sock(sk);
1469 	sock_put(sk);
1470 
1471 	return 0;
1472 }
1473 
1474 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1475 		       int flags)
1476 {
1477 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1478 	struct sock *sk = sock->sk;
1479 	struct bcm_sock *bo = bcm_sk(sk);
1480 
1481 	if (bo->bound)
1482 		return -EISCONN;
1483 
1484 	/* bind a device to this socket */
1485 	if (addr->can_ifindex) {
1486 		struct net_device *dev;
1487 
1488 		dev = dev_get_by_index(&init_net, addr->can_ifindex);
1489 		if (!dev)
1490 			return -ENODEV;
1491 
1492 		if (dev->type != ARPHRD_CAN) {
1493 			dev_put(dev);
1494 			return -ENODEV;
1495 		}
1496 
1497 		bo->ifindex = dev->ifindex;
1498 		dev_put(dev);
1499 
1500 	} else {
1501 		/* no interface reference for ifindex = 0 ('any' CAN device) */
1502 		bo->ifindex = 0;
1503 	}
1504 
1505 	bo->bound = 1;
1506 
1507 	if (proc_dir) {
1508 		/* unique socket address as filename */
1509 		sprintf(bo->procname, "%p", sock);
1510 		bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1511 						     proc_dir,
1512 						     &bcm_proc_fops, sk);
1513 	}
1514 
1515 	return 0;
1516 }
1517 
1518 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1519 		       struct msghdr *msg, size_t size, int flags)
1520 {
1521 	struct sock *sk = sock->sk;
1522 	struct sk_buff *skb;
1523 	int error = 0;
1524 	int noblock;
1525 	int err;
1526 
1527 	noblock =  flags & MSG_DONTWAIT;
1528 	flags   &= ~MSG_DONTWAIT;
1529 	skb = skb_recv_datagram(sk, flags, noblock, &error);
1530 	if (!skb)
1531 		return error;
1532 
1533 	if (skb->len < size)
1534 		size = skb->len;
1535 
1536 	err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1537 	if (err < 0) {
1538 		skb_free_datagram(sk, skb);
1539 		return err;
1540 	}
1541 
1542 	sock_recv_ts_and_drops(msg, sk, skb);
1543 
1544 	if (msg->msg_name) {
1545 		msg->msg_namelen = sizeof(struct sockaddr_can);
1546 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1547 	}
1548 
1549 	skb_free_datagram(sk, skb);
1550 
1551 	return size;
1552 }
1553 
1554 static struct proto_ops bcm_ops __read_mostly = {
1555 	.family        = PF_CAN,
1556 	.release       = bcm_release,
1557 	.bind          = sock_no_bind,
1558 	.connect       = bcm_connect,
1559 	.socketpair    = sock_no_socketpair,
1560 	.accept        = sock_no_accept,
1561 	.getname       = sock_no_getname,
1562 	.poll          = datagram_poll,
1563 	.ioctl         = NULL,		/* use can_ioctl() from af_can.c */
1564 	.listen        = sock_no_listen,
1565 	.shutdown      = sock_no_shutdown,
1566 	.setsockopt    = sock_no_setsockopt,
1567 	.getsockopt    = sock_no_getsockopt,
1568 	.sendmsg       = bcm_sendmsg,
1569 	.recvmsg       = bcm_recvmsg,
1570 	.mmap          = sock_no_mmap,
1571 	.sendpage      = sock_no_sendpage,
1572 };
1573 
1574 static struct proto bcm_proto __read_mostly = {
1575 	.name       = "CAN_BCM",
1576 	.owner      = THIS_MODULE,
1577 	.obj_size   = sizeof(struct bcm_sock),
1578 	.init       = bcm_init,
1579 };
1580 
1581 static struct can_proto bcm_can_proto __read_mostly = {
1582 	.type       = SOCK_DGRAM,
1583 	.protocol   = CAN_BCM,
1584 	.ops        = &bcm_ops,
1585 	.prot       = &bcm_proto,
1586 };
1587 
1588 static int __init bcm_module_init(void)
1589 {
1590 	int err;
1591 
1592 	printk(banner);
1593 
1594 	err = can_proto_register(&bcm_can_proto);
1595 	if (err < 0) {
1596 		printk(KERN_ERR "can: registration of bcm protocol failed\n");
1597 		return err;
1598 	}
1599 
1600 	/* create /proc/net/can-bcm directory */
1601 	proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1602 	return 0;
1603 }
1604 
1605 static void __exit bcm_module_exit(void)
1606 {
1607 	can_proto_unregister(&bcm_can_proto);
1608 
1609 	if (proc_dir)
1610 		proc_net_remove(&init_net, "can-bcm");
1611 }
1612 
1613 module_init(bcm_module_init);
1614 module_exit(bcm_module_exit);
1615