xref: /linux/net/can/bcm.c (revision ef2233850edc4cc0d5fc6136fcdb004a1ddfa7db)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
4  *
5  * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of Volkswagen nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * Alternatively, provided that this notice is retained in full, this
21  * software may be distributed under the terms of the GNU General
22  * Public License ("GPL") version 2, in which case the provisions of the
23  * GPL apply INSTEAD OF those given above.
24  *
25  * The provided data structures and external interfaces from this code
26  * are not restricted to be used by modules with a GPL compatible license.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39  * DAMAGE.
40  *
41  */
42 
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/interrupt.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/seq_file.h>
50 #include <linux/uio.h>
51 #include <linux/net.h>
52 #include <linux/netdevice.h>
53 #include <linux/socket.h>
54 #include <linux/if_arp.h>
55 #include <linux/skbuff.h>
56 #include <linux/can.h>
57 #include <linux/can/core.h>
58 #include <linux/can/skb.h>
59 #include <linux/can/bcm.h>
60 #include <linux/slab.h>
61 #include <linux/spinlock.h>
62 #include <net/sock.h>
63 #include <net/net_namespace.h>
64 
65 /*
66  * To send multiple CAN frame content within TX_SETUP or to filter
67  * CAN messages with multiplex index within RX_SETUP, the number of
68  * different filters is limited to 256 due to the one byte index value.
69  */
70 #define MAX_NFRAMES 256
71 
72 /* limit timers to 400 days for sending/timeouts */
73 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
74 
75 /* use of last_frames[index].flags */
76 #define RX_LOCAL   0x10 /* frame was created on the local host */
77 #define RX_OWN     0x20 /* frame was sent via the socket it was received on */
78 #define RX_RECV    0x40 /* received data for this element */
79 #define RX_THR     0x80 /* element not been sent due to throttle feature */
80 #define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */
81 
82 /* get best masking value for can_rx_register() for a given single can_id */
83 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
84 		     (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
85 		     (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
86 
87 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
88 MODULE_LICENSE("Dual BSD/GPL");
89 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
90 MODULE_ALIAS("can-proto-2");
91 
92 #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
93 
94 /*
95  * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
96  * 64 bit aligned so the offset has to be multiples of 8 which is ensured
97  * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
98  */
get_u64(const struct canfd_frame * cp,int offset)99 static inline u64 get_u64(const struct canfd_frame *cp, int offset)
100 {
101 	return *(u64 *)(cp->data + offset);
102 }
103 
104 struct bcm_op {
105 	struct list_head list;
106 	struct rcu_head rcu;
107 	int ifindex;
108 	canid_t can_id;
109 	u32 flags;
110 	unsigned long frames_abs, frames_filtered;
111 	struct bcm_timeval ival1, ival2;
112 	struct hrtimer timer, thrtimer;
113 	ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
114 	int rx_ifindex;
115 	int cfsiz;
116 	u32 count;
117 	u32 nframes;
118 	u32 currframe;
119 	/* void pointers to arrays of struct can[fd]_frame */
120 	void *frames;
121 	void *last_frames;
122 	struct canfd_frame sframe;
123 	struct canfd_frame last_sframe;
124 	struct sock *sk;
125 	struct net_device *rx_reg_dev;
126 	spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */
127 };
128 
129 struct bcm_sock {
130 	struct sock sk;
131 	int bound;
132 	int ifindex;
133 	struct list_head notifier;
134 	struct list_head rx_ops;
135 	struct list_head tx_ops;
136 	unsigned long dropped_usr_msgs;
137 	struct proc_dir_entry *bcm_proc_read;
138 	char procname [32]; /* inode number in decimal with \0 */
139 };
140 
141 static LIST_HEAD(bcm_notifier_list);
142 static DEFINE_SPINLOCK(bcm_notifier_lock);
143 static struct bcm_sock *bcm_busy_notifier;
144 
145 /* Return pointer to store the extra msg flags for bcm_recvmsg().
146  * We use the space of one unsigned int beyond the 'struct sockaddr_can'
147  * in skb->cb.
148  */
bcm_flags(struct sk_buff * skb)149 static inline unsigned int *bcm_flags(struct sk_buff *skb)
150 {
151 	/* return pointer after struct sockaddr_can */
152 	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
153 }
154 
bcm_sk(const struct sock * sk)155 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
156 {
157 	return (struct bcm_sock *)sk;
158 }
159 
bcm_timeval_to_ktime(struct bcm_timeval tv)160 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
161 {
162 	return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
163 }
164 
165 /* check limitations for timeval provided by user */
bcm_is_invalid_tv(struct bcm_msg_head * msg_head)166 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
167 {
168 	if ((msg_head->ival1.tv_sec < 0) ||
169 	    (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
170 	    (msg_head->ival1.tv_usec < 0) ||
171 	    (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
172 	    (msg_head->ival2.tv_sec < 0) ||
173 	    (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
174 	    (msg_head->ival2.tv_usec < 0) ||
175 	    (msg_head->ival2.tv_usec >= USEC_PER_SEC))
176 		return true;
177 
178 	return false;
179 }
180 
181 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
182 #define OPSIZ sizeof(struct bcm_op)
183 #define MHSIZ sizeof(struct bcm_msg_head)
184 
185 /*
186  * procfs functions
187  */
188 #if IS_ENABLED(CONFIG_PROC_FS)
bcm_proc_getifname(struct net * net,char * result,int ifindex)189 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
190 {
191 	struct net_device *dev;
192 
193 	if (!ifindex)
194 		return "any";
195 
196 	rcu_read_lock();
197 	dev = dev_get_by_index_rcu(net, ifindex);
198 	if (dev)
199 		strcpy(result, dev->name);
200 	else
201 		strcpy(result, "???");
202 	rcu_read_unlock();
203 
204 	return result;
205 }
206 
bcm_proc_show(struct seq_file * m,void * v)207 static int bcm_proc_show(struct seq_file *m, void *v)
208 {
209 	char ifname[IFNAMSIZ];
210 	struct net *net = m->private;
211 	struct sock *sk = (struct sock *)pde_data(m->file->f_inode);
212 	struct bcm_sock *bo = bcm_sk(sk);
213 	struct bcm_op *op;
214 
215 	seq_printf(m, ">>> socket %pK", sk->sk_socket);
216 	seq_printf(m, " / sk %pK", sk);
217 	seq_printf(m, " / bo %pK", bo);
218 	seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
219 	seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
220 	seq_printf(m, " <<<\n");
221 
222 	rcu_read_lock();
223 
224 	list_for_each_entry_rcu(op, &bo->rx_ops, list) {
225 
226 		unsigned long reduction;
227 
228 		/* print only active entries & prevent division by zero */
229 		if (!op->frames_abs)
230 			continue;
231 
232 		seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
233 			   bcm_proc_getifname(net, ifname, op->ifindex));
234 
235 		if (op->flags & CAN_FD_FRAME)
236 			seq_printf(m, "(%u)", op->nframes);
237 		else
238 			seq_printf(m, "[%u]", op->nframes);
239 
240 		seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
241 
242 		if (op->kt_ival1)
243 			seq_printf(m, "timeo=%lld ",
244 				   (long long)ktime_to_us(op->kt_ival1));
245 
246 		if (op->kt_ival2)
247 			seq_printf(m, "thr=%lld ",
248 				   (long long)ktime_to_us(op->kt_ival2));
249 
250 		seq_printf(m, "# recv %ld (%ld) => reduction: ",
251 			   op->frames_filtered, op->frames_abs);
252 
253 		reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
254 
255 		seq_printf(m, "%s%ld%%\n",
256 			   (reduction == 100) ? "near " : "", reduction);
257 	}
258 
259 	list_for_each_entry(op, &bo->tx_ops, list) {
260 
261 		seq_printf(m, "tx_op: %03X %s ", op->can_id,
262 			   bcm_proc_getifname(net, ifname, op->ifindex));
263 
264 		if (op->flags & CAN_FD_FRAME)
265 			seq_printf(m, "(%u) ", op->nframes);
266 		else
267 			seq_printf(m, "[%u] ", op->nframes);
268 
269 		if (op->kt_ival1)
270 			seq_printf(m, "t1=%lld ",
271 				   (long long)ktime_to_us(op->kt_ival1));
272 
273 		if (op->kt_ival2)
274 			seq_printf(m, "t2=%lld ",
275 				   (long long)ktime_to_us(op->kt_ival2));
276 
277 		seq_printf(m, "# sent %ld\n", op->frames_abs);
278 	}
279 	seq_putc(m, '\n');
280 
281 	rcu_read_unlock();
282 
283 	return 0;
284 }
285 #endif /* CONFIG_PROC_FS */
286 
287 /*
288  * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
289  *              of the given bcm tx op
290  */
bcm_can_tx(struct bcm_op * op)291 static void bcm_can_tx(struct bcm_op *op)
292 {
293 	struct sk_buff *skb;
294 	struct net_device *dev;
295 	struct canfd_frame *cf;
296 	int err;
297 
298 	/* no target device? => exit */
299 	if (!op->ifindex)
300 		return;
301 
302 	/* read currframe under lock protection */
303 	spin_lock_bh(&op->bcm_tx_lock);
304 	cf = op->frames + op->cfsiz * op->currframe;
305 	spin_unlock_bh(&op->bcm_tx_lock);
306 
307 	dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
308 	if (!dev) {
309 		/* RFC: should this bcm_op remove itself here? */
310 		return;
311 	}
312 
313 	skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
314 	if (!skb)
315 		goto out;
316 
317 	can_skb_reserve(skb);
318 	can_skb_prv(skb)->ifindex = dev->ifindex;
319 	can_skb_prv(skb)->skbcnt = 0;
320 
321 	skb_put_data(skb, cf, op->cfsiz);
322 
323 	/* send with loopback */
324 	skb->dev = dev;
325 	can_skb_set_owner(skb, op->sk);
326 	err = can_send(skb, 1);
327 
328 	/* update currframe and count under lock protection */
329 	spin_lock_bh(&op->bcm_tx_lock);
330 
331 	if (!err)
332 		op->frames_abs++;
333 
334 	op->currframe++;
335 
336 	/* reached last frame? */
337 	if (op->currframe >= op->nframes)
338 		op->currframe = 0;
339 
340 	if (op->count > 0)
341 		op->count--;
342 
343 	spin_unlock_bh(&op->bcm_tx_lock);
344 out:
345 	dev_put(dev);
346 }
347 
348 /*
349  * bcm_send_to_user - send a BCM message to the userspace
350  *                    (consisting of bcm_msg_head + x CAN frames)
351  */
bcm_send_to_user(struct bcm_op * op,struct bcm_msg_head * head,struct canfd_frame * frames,int has_timestamp)352 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
353 			     struct canfd_frame *frames, int has_timestamp)
354 {
355 	struct sk_buff *skb;
356 	struct canfd_frame *firstframe;
357 	struct sockaddr_can *addr;
358 	struct sock *sk = op->sk;
359 	unsigned int datalen = head->nframes * op->cfsiz;
360 	int err;
361 	unsigned int *pflags;
362 
363 	skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
364 	if (!skb)
365 		return;
366 
367 	skb_put_data(skb, head, sizeof(*head));
368 
369 	/* ensure space for sockaddr_can and msg flags */
370 	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
371 			       sizeof(unsigned int));
372 
373 	/* initialize msg flags */
374 	pflags = bcm_flags(skb);
375 	*pflags = 0;
376 
377 	if (head->nframes) {
378 		/* CAN frames starting here */
379 		firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
380 
381 		skb_put_data(skb, frames, datalen);
382 
383 		/*
384 		 * the BCM uses the flags-element of the canfd_frame
385 		 * structure for internal purposes. This is only
386 		 * relevant for updates that are generated by the
387 		 * BCM, where nframes is 1
388 		 */
389 		if (head->nframes == 1) {
390 			if (firstframe->flags & RX_LOCAL)
391 				*pflags |= MSG_DONTROUTE;
392 			if (firstframe->flags & RX_OWN)
393 				*pflags |= MSG_CONFIRM;
394 
395 			firstframe->flags &= BCM_CAN_FLAGS_MASK;
396 		}
397 	}
398 
399 	if (has_timestamp) {
400 		/* restore rx timestamp */
401 		skb->tstamp = op->rx_stamp;
402 	}
403 
404 	/*
405 	 *  Put the datagram to the queue so that bcm_recvmsg() can
406 	 *  get it from there.  We need to pass the interface index to
407 	 *  bcm_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
408 	 *  containing the interface index.
409 	 */
410 
411 	addr = (struct sockaddr_can *)skb->cb;
412 	memset(addr, 0, sizeof(*addr));
413 	addr->can_family  = AF_CAN;
414 	addr->can_ifindex = op->rx_ifindex;
415 
416 	err = sock_queue_rcv_skb(sk, skb);
417 	if (err < 0) {
418 		struct bcm_sock *bo = bcm_sk(sk);
419 
420 		kfree_skb(skb);
421 		/* don't care about overflows in this statistic */
422 		bo->dropped_usr_msgs++;
423 	}
424 }
425 
bcm_tx_set_expiry(struct bcm_op * op,struct hrtimer * hrt)426 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
427 {
428 	ktime_t ival;
429 
430 	if (op->kt_ival1 && op->count)
431 		ival = op->kt_ival1;
432 	else if (op->kt_ival2)
433 		ival = op->kt_ival2;
434 	else
435 		return false;
436 
437 	hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
438 	return true;
439 }
440 
bcm_tx_start_timer(struct bcm_op * op)441 static void bcm_tx_start_timer(struct bcm_op *op)
442 {
443 	if (bcm_tx_set_expiry(op, &op->timer))
444 		hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
445 }
446 
447 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
bcm_tx_timeout_handler(struct hrtimer * hrtimer)448 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
449 {
450 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
451 	struct bcm_msg_head msg_head;
452 
453 	if (op->kt_ival1 && (op->count > 0)) {
454 		bcm_can_tx(op);
455 		if (!op->count && (op->flags & TX_COUNTEVT)) {
456 
457 			/* create notification to user */
458 			memset(&msg_head, 0, sizeof(msg_head));
459 			msg_head.opcode  = TX_EXPIRED;
460 			msg_head.flags   = op->flags;
461 			msg_head.count   = op->count;
462 			msg_head.ival1   = op->ival1;
463 			msg_head.ival2   = op->ival2;
464 			msg_head.can_id  = op->can_id;
465 			msg_head.nframes = 0;
466 
467 			bcm_send_to_user(op, &msg_head, NULL, 0);
468 		}
469 
470 	} else if (op->kt_ival2) {
471 		bcm_can_tx(op);
472 	}
473 
474 	return bcm_tx_set_expiry(op, &op->timer) ?
475 		HRTIMER_RESTART : HRTIMER_NORESTART;
476 }
477 
478 /*
479  * bcm_rx_changed - create a RX_CHANGED notification due to changed content
480  */
bcm_rx_changed(struct bcm_op * op,struct canfd_frame * data)481 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
482 {
483 	struct bcm_msg_head head;
484 
485 	/* update statistics */
486 	op->frames_filtered++;
487 
488 	/* prevent statistics overflow */
489 	if (op->frames_filtered > ULONG_MAX/100)
490 		op->frames_filtered = op->frames_abs = 0;
491 
492 	/* this element is not throttled anymore */
493 	data->flags &= ~RX_THR;
494 
495 	memset(&head, 0, sizeof(head));
496 	head.opcode  = RX_CHANGED;
497 	head.flags   = op->flags;
498 	head.count   = op->count;
499 	head.ival1   = op->ival1;
500 	head.ival2   = op->ival2;
501 	head.can_id  = op->can_id;
502 	head.nframes = 1;
503 
504 	bcm_send_to_user(op, &head, data, 1);
505 }
506 
507 /*
508  * bcm_rx_update_and_send - process a detected relevant receive content change
509  *                          1. update the last received data
510  *                          2. send a notification to the user (if possible)
511  */
bcm_rx_update_and_send(struct bcm_op * op,struct canfd_frame * lastdata,const struct canfd_frame * rxdata,unsigned char traffic_flags)512 static void bcm_rx_update_and_send(struct bcm_op *op,
513 				   struct canfd_frame *lastdata,
514 				   const struct canfd_frame *rxdata,
515 				   unsigned char traffic_flags)
516 {
517 	memcpy(lastdata, rxdata, op->cfsiz);
518 
519 	/* mark as used and throttled by default */
520 	lastdata->flags |= (RX_RECV|RX_THR);
521 
522 	/* add own/local/remote traffic flags */
523 	lastdata->flags |= traffic_flags;
524 
525 	/* throttling mode inactive ? */
526 	if (!op->kt_ival2) {
527 		/* send RX_CHANGED to the user immediately */
528 		bcm_rx_changed(op, lastdata);
529 		return;
530 	}
531 
532 	/* with active throttling timer we are just done here */
533 	if (hrtimer_active(&op->thrtimer))
534 		return;
535 
536 	/* first reception with enabled throttling mode */
537 	if (!op->kt_lastmsg)
538 		goto rx_changed_settime;
539 
540 	/* got a second frame inside a potential throttle period? */
541 	if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
542 	    ktime_to_us(op->kt_ival2)) {
543 		/* do not send the saved data - only start throttle timer */
544 		hrtimer_start(&op->thrtimer,
545 			      ktime_add(op->kt_lastmsg, op->kt_ival2),
546 			      HRTIMER_MODE_ABS_SOFT);
547 		return;
548 	}
549 
550 	/* the gap was that big, that throttling was not needed here */
551 rx_changed_settime:
552 	bcm_rx_changed(op, lastdata);
553 	op->kt_lastmsg = ktime_get();
554 }
555 
556 /*
557  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
558  *                       received data stored in op->last_frames[]
559  */
bcm_rx_cmp_to_index(struct bcm_op * op,unsigned int index,const struct canfd_frame * rxdata,unsigned char traffic_flags)560 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
561 				const struct canfd_frame *rxdata,
562 				unsigned char traffic_flags)
563 {
564 	struct canfd_frame *cf = op->frames + op->cfsiz * index;
565 	struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
566 	int i;
567 
568 	/*
569 	 * no one uses the MSBs of flags for comparison,
570 	 * so we use it here to detect the first time of reception
571 	 */
572 
573 	if (!(lcf->flags & RX_RECV)) {
574 		/* received data for the first time => send update to user */
575 		bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
576 		return;
577 	}
578 
579 	/* do a real check in CAN frame data section */
580 	for (i = 0; i < rxdata->len; i += 8) {
581 		if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
582 		    (get_u64(cf, i) & get_u64(lcf, i))) {
583 			bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
584 			return;
585 		}
586 	}
587 
588 	if (op->flags & RX_CHECK_DLC) {
589 		/* do a real check in CAN frame length */
590 		if (rxdata->len != lcf->len) {
591 			bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
592 			return;
593 		}
594 	}
595 }
596 
597 /*
598  * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
599  */
bcm_rx_starttimer(struct bcm_op * op)600 static void bcm_rx_starttimer(struct bcm_op *op)
601 {
602 	if (op->flags & RX_NO_AUTOTIMER)
603 		return;
604 
605 	if (op->kt_ival1)
606 		hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
607 }
608 
609 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
bcm_rx_timeout_handler(struct hrtimer * hrtimer)610 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
611 {
612 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
613 	struct bcm_msg_head msg_head;
614 
615 	/* if user wants to be informed, when cyclic CAN-Messages come back */
616 	if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
617 		/* clear received CAN frames to indicate 'nothing received' */
618 		memset(op->last_frames, 0, op->nframes * op->cfsiz);
619 	}
620 
621 	/* create notification to user */
622 	memset(&msg_head, 0, sizeof(msg_head));
623 	msg_head.opcode  = RX_TIMEOUT;
624 	msg_head.flags   = op->flags;
625 	msg_head.count   = op->count;
626 	msg_head.ival1   = op->ival1;
627 	msg_head.ival2   = op->ival2;
628 	msg_head.can_id  = op->can_id;
629 	msg_head.nframes = 0;
630 
631 	bcm_send_to_user(op, &msg_head, NULL, 0);
632 
633 	return HRTIMER_NORESTART;
634 }
635 
636 /*
637  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
638  */
bcm_rx_do_flush(struct bcm_op * op,unsigned int index)639 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
640 {
641 	struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
642 
643 	if ((op->last_frames) && (lcf->flags & RX_THR)) {
644 		bcm_rx_changed(op, lcf);
645 		return 1;
646 	}
647 	return 0;
648 }
649 
650 /*
651  * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
652  */
bcm_rx_thr_flush(struct bcm_op * op)653 static int bcm_rx_thr_flush(struct bcm_op *op)
654 {
655 	int updated = 0;
656 
657 	if (op->nframes > 1) {
658 		unsigned int i;
659 
660 		/* for MUX filter we start at index 1 */
661 		for (i = 1; i < op->nframes; i++)
662 			updated += bcm_rx_do_flush(op, i);
663 
664 	} else {
665 		/* for RX_FILTER_ID and simple filter */
666 		updated += bcm_rx_do_flush(op, 0);
667 	}
668 
669 	return updated;
670 }
671 
672 /*
673  * bcm_rx_thr_handler - the time for blocked content updates is over now:
674  *                      Check for throttled data and send it to the userspace
675  */
bcm_rx_thr_handler(struct hrtimer * hrtimer)676 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
677 {
678 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
679 
680 	if (bcm_rx_thr_flush(op)) {
681 		hrtimer_forward_now(hrtimer, op->kt_ival2);
682 		return HRTIMER_RESTART;
683 	} else {
684 		/* rearm throttle handling */
685 		op->kt_lastmsg = 0;
686 		return HRTIMER_NORESTART;
687 	}
688 }
689 
690 /*
691  * bcm_rx_handler - handle a CAN frame reception
692  */
bcm_rx_handler(struct sk_buff * skb,void * data)693 static void bcm_rx_handler(struct sk_buff *skb, void *data)
694 {
695 	struct bcm_op *op = (struct bcm_op *)data;
696 	const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
697 	unsigned int i;
698 	unsigned char traffic_flags;
699 
700 	if (op->can_id != rxframe->can_id)
701 		return;
702 
703 	/* make sure to handle the correct frame type (CAN / CAN FD) */
704 	if (op->flags & CAN_FD_FRAME) {
705 		if (!can_is_canfd_skb(skb))
706 			return;
707 	} else {
708 		if (!can_is_can_skb(skb))
709 			return;
710 	}
711 
712 	/* disable timeout */
713 	hrtimer_cancel(&op->timer);
714 
715 	/* save rx timestamp */
716 	op->rx_stamp = skb->tstamp;
717 	/* save originator for recvfrom() */
718 	op->rx_ifindex = skb->dev->ifindex;
719 	/* update statistics */
720 	op->frames_abs++;
721 
722 	if (op->flags & RX_RTR_FRAME) {
723 		/* send reply for RTR-request (placed in op->frames[0]) */
724 		bcm_can_tx(op);
725 		return;
726 	}
727 
728 	/* compute flags to distinguish between own/local/remote CAN traffic */
729 	traffic_flags = 0;
730 	if (skb->sk) {
731 		traffic_flags |= RX_LOCAL;
732 		if (skb->sk == op->sk)
733 			traffic_flags |= RX_OWN;
734 	}
735 
736 	if (op->flags & RX_FILTER_ID) {
737 		/* the easiest case */
738 		bcm_rx_update_and_send(op, op->last_frames, rxframe,
739 				       traffic_flags);
740 		goto rx_starttimer;
741 	}
742 
743 	if (op->nframes == 1) {
744 		/* simple compare with index 0 */
745 		bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags);
746 		goto rx_starttimer;
747 	}
748 
749 	if (op->nframes > 1) {
750 		/*
751 		 * multiplex compare
752 		 *
753 		 * find the first multiplex mask that fits.
754 		 * Remark: The MUX-mask is stored in index 0 - but only the
755 		 * first 64 bits of the frame data[] are relevant (CAN FD)
756 		 */
757 
758 		for (i = 1; i < op->nframes; i++) {
759 			if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
760 			    (get_u64(op->frames, 0) &
761 			     get_u64(op->frames + op->cfsiz * i, 0))) {
762 				bcm_rx_cmp_to_index(op, i, rxframe,
763 						    traffic_flags);
764 				break;
765 			}
766 		}
767 	}
768 
769 rx_starttimer:
770 	bcm_rx_starttimer(op);
771 }
772 
773 /*
774  * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
775  */
bcm_find_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)776 static struct bcm_op *bcm_find_op(struct list_head *ops,
777 				  struct bcm_msg_head *mh, int ifindex)
778 {
779 	struct bcm_op *op;
780 
781 	list_for_each_entry(op, ops, list) {
782 		if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
783 		    (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
784 			return op;
785 	}
786 
787 	return NULL;
788 }
789 
bcm_free_op_rcu(struct rcu_head * rcu_head)790 static void bcm_free_op_rcu(struct rcu_head *rcu_head)
791 {
792 	struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu);
793 
794 	if ((op->frames) && (op->frames != &op->sframe))
795 		kfree(op->frames);
796 
797 	if ((op->last_frames) && (op->last_frames != &op->last_sframe))
798 		kfree(op->last_frames);
799 
800 	kfree(op);
801 }
802 
bcm_remove_op(struct bcm_op * op)803 static void bcm_remove_op(struct bcm_op *op)
804 {
805 	hrtimer_cancel(&op->timer);
806 	hrtimer_cancel(&op->thrtimer);
807 
808 	call_rcu(&op->rcu, bcm_free_op_rcu);
809 }
810 
bcm_rx_unreg(struct net_device * dev,struct bcm_op * op)811 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
812 {
813 	if (op->rx_reg_dev == dev) {
814 		can_rx_unregister(dev_net(dev), dev, op->can_id,
815 				  REGMASK(op->can_id), bcm_rx_handler, op);
816 
817 		/* mark as removed subscription */
818 		op->rx_reg_dev = NULL;
819 	} else
820 		printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
821 		       "mismatch %p %p\n", op->rx_reg_dev, dev);
822 }
823 
824 /*
825  * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
826  */
bcm_delete_rx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)827 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
828 			    int ifindex)
829 {
830 	struct bcm_op *op, *n;
831 
832 	list_for_each_entry_safe(op, n, ops, list) {
833 		if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
834 		    (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
835 
836 			/* disable automatic timer on frame reception */
837 			op->flags |= RX_NO_AUTOTIMER;
838 
839 			/*
840 			 * Don't care if we're bound or not (due to netdev
841 			 * problems) can_rx_unregister() is always a save
842 			 * thing to do here.
843 			 */
844 			if (op->ifindex) {
845 				/*
846 				 * Only remove subscriptions that had not
847 				 * been removed due to NETDEV_UNREGISTER
848 				 * in bcm_notifier()
849 				 */
850 				if (op->rx_reg_dev) {
851 					struct net_device *dev;
852 
853 					dev = dev_get_by_index(sock_net(op->sk),
854 							       op->ifindex);
855 					if (dev) {
856 						bcm_rx_unreg(dev, op);
857 						dev_put(dev);
858 					}
859 				}
860 			} else
861 				can_rx_unregister(sock_net(op->sk), NULL,
862 						  op->can_id,
863 						  REGMASK(op->can_id),
864 						  bcm_rx_handler, op);
865 
866 			list_del_rcu(&op->list);
867 			bcm_remove_op(op);
868 			return 1; /* done */
869 		}
870 	}
871 
872 	return 0; /* not found */
873 }
874 
875 /*
876  * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
877  */
bcm_delete_tx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)878 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
879 			    int ifindex)
880 {
881 	struct bcm_op *op, *n;
882 
883 	list_for_each_entry_safe(op, n, ops, list) {
884 		if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
885 		    (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
886 			list_del_rcu(&op->list);
887 			bcm_remove_op(op);
888 			return 1; /* done */
889 		}
890 	}
891 
892 	return 0; /* not found */
893 }
894 
895 /*
896  * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
897  */
bcm_read_op(struct list_head * ops,struct bcm_msg_head * msg_head,int ifindex)898 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
899 		       int ifindex)
900 {
901 	struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
902 
903 	if (!op)
904 		return -EINVAL;
905 
906 	/* put current values into msg_head */
907 	msg_head->flags   = op->flags;
908 	msg_head->count   = op->count;
909 	msg_head->ival1   = op->ival1;
910 	msg_head->ival2   = op->ival2;
911 	msg_head->nframes = op->nframes;
912 
913 	bcm_send_to_user(op, msg_head, op->frames, 0);
914 
915 	return MHSIZ;
916 }
917 
918 /*
919  * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
920  */
bcm_tx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)921 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
922 			int ifindex, struct sock *sk)
923 {
924 	struct bcm_sock *bo = bcm_sk(sk);
925 	struct bcm_op *op;
926 	struct canfd_frame *cf;
927 	unsigned int i;
928 	int err;
929 
930 	/* we need a real device to send frames */
931 	if (!ifindex)
932 		return -ENODEV;
933 
934 	/* check nframes boundaries - we need at least one CAN frame */
935 	if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
936 		return -EINVAL;
937 
938 	/* check timeval limitations */
939 	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
940 		return -EINVAL;
941 
942 	/* check the given can_id */
943 	op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
944 	if (op) {
945 		/* update existing BCM operation */
946 
947 		/*
948 		 * Do we need more space for the CAN frames than currently
949 		 * allocated? -> This is a _really_ unusual use-case and
950 		 * therefore (complexity / locking) it is not supported.
951 		 */
952 		if (msg_head->nframes > op->nframes)
953 			return -E2BIG;
954 
955 		/* update CAN frames content */
956 		for (i = 0; i < msg_head->nframes; i++) {
957 
958 			cf = op->frames + op->cfsiz * i;
959 			err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
960 
961 			if (op->flags & CAN_FD_FRAME) {
962 				if (cf->len > 64)
963 					err = -EINVAL;
964 			} else {
965 				if (cf->len > 8)
966 					err = -EINVAL;
967 			}
968 
969 			if (err < 0)
970 				return err;
971 
972 			if (msg_head->flags & TX_CP_CAN_ID) {
973 				/* copy can_id into frame */
974 				cf->can_id = msg_head->can_id;
975 			}
976 		}
977 		op->flags = msg_head->flags;
978 
979 		/* only lock for unlikely count/nframes/currframe changes */
980 		if (op->nframes != msg_head->nframes ||
981 		    op->flags & TX_RESET_MULTI_IDX ||
982 		    op->flags & SETTIMER) {
983 
984 			spin_lock_bh(&op->bcm_tx_lock);
985 
986 			if (op->nframes != msg_head->nframes ||
987 			    op->flags & TX_RESET_MULTI_IDX) {
988 				/* potentially update changed nframes */
989 				op->nframes = msg_head->nframes;
990 				/* restart multiple frame transmission */
991 				op->currframe = 0;
992 			}
993 
994 			if (op->flags & SETTIMER)
995 				op->count = msg_head->count;
996 
997 			spin_unlock_bh(&op->bcm_tx_lock);
998 		}
999 
1000 	} else {
1001 		/* insert new BCM operation for the given can_id */
1002 
1003 		op = kzalloc(OPSIZ, GFP_KERNEL);
1004 		if (!op)
1005 			return -ENOMEM;
1006 
1007 		spin_lock_init(&op->bcm_tx_lock);
1008 		op->can_id = msg_head->can_id;
1009 		op->cfsiz = CFSIZ(msg_head->flags);
1010 		op->flags = msg_head->flags;
1011 		op->nframes = msg_head->nframes;
1012 
1013 		if (op->flags & SETTIMER)
1014 			op->count = msg_head->count;
1015 
1016 		/* create array for CAN frames and copy the data */
1017 		if (msg_head->nframes > 1) {
1018 			op->frames = kmalloc_array(msg_head->nframes,
1019 						   op->cfsiz,
1020 						   GFP_KERNEL);
1021 			if (!op->frames) {
1022 				kfree(op);
1023 				return -ENOMEM;
1024 			}
1025 		} else
1026 			op->frames = &op->sframe;
1027 
1028 		for (i = 0; i < msg_head->nframes; i++) {
1029 
1030 			cf = op->frames + op->cfsiz * i;
1031 			err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
1032 			if (err < 0)
1033 				goto free_op;
1034 
1035 			if (op->flags & CAN_FD_FRAME) {
1036 				if (cf->len > 64)
1037 					err = -EINVAL;
1038 			} else {
1039 				if (cf->len > 8)
1040 					err = -EINVAL;
1041 			}
1042 
1043 			if (err < 0)
1044 				goto free_op;
1045 
1046 			if (msg_head->flags & TX_CP_CAN_ID) {
1047 				/* copy can_id into frame */
1048 				cf->can_id = msg_head->can_id;
1049 			}
1050 		}
1051 
1052 		/* tx_ops never compare with previous received messages */
1053 		op->last_frames = NULL;
1054 
1055 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
1056 		op->sk = sk;
1057 		op->ifindex = ifindex;
1058 
1059 		/* initialize uninitialized (kzalloc) structure */
1060 		hrtimer_setup(&op->timer, bcm_tx_timeout_handler, CLOCK_MONOTONIC,
1061 			      HRTIMER_MODE_REL_SOFT);
1062 
1063 		/* currently unused in tx_ops */
1064 		hrtimer_setup(&op->thrtimer, hrtimer_dummy_timeout, CLOCK_MONOTONIC,
1065 			      HRTIMER_MODE_REL_SOFT);
1066 
1067 		/* add this bcm_op to the list of the tx_ops */
1068 		list_add(&op->list, &bo->tx_ops);
1069 
1070 	} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
1071 
1072 	if (op->flags & SETTIMER) {
1073 		/* set timer values */
1074 		op->ival1 = msg_head->ival1;
1075 		op->ival2 = msg_head->ival2;
1076 		op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1077 		op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1078 
1079 		/* disable an active timer due to zero values? */
1080 		if (!op->kt_ival1 && !op->kt_ival2)
1081 			hrtimer_cancel(&op->timer);
1082 	}
1083 
1084 	if (op->flags & STARTTIMER) {
1085 		hrtimer_cancel(&op->timer);
1086 		/* spec: send CAN frame when starting timer */
1087 		op->flags |= TX_ANNOUNCE;
1088 	}
1089 
1090 	if (op->flags & TX_ANNOUNCE)
1091 		bcm_can_tx(op);
1092 
1093 	if (op->flags & STARTTIMER)
1094 		bcm_tx_start_timer(op);
1095 
1096 	return msg_head->nframes * op->cfsiz + MHSIZ;
1097 
1098 free_op:
1099 	if (op->frames != &op->sframe)
1100 		kfree(op->frames);
1101 	kfree(op);
1102 	return err;
1103 }
1104 
1105 /*
1106  * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
1107  */
bcm_rx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)1108 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1109 			int ifindex, struct sock *sk)
1110 {
1111 	struct bcm_sock *bo = bcm_sk(sk);
1112 	struct bcm_op *op;
1113 	int do_rx_register;
1114 	int err = 0;
1115 
1116 	if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1117 		/* be robust against wrong usage ... */
1118 		msg_head->flags |= RX_FILTER_ID;
1119 		/* ignore trailing garbage */
1120 		msg_head->nframes = 0;
1121 	}
1122 
1123 	/* the first element contains the mux-mask => MAX_NFRAMES + 1  */
1124 	if (msg_head->nframes > MAX_NFRAMES + 1)
1125 		return -EINVAL;
1126 
1127 	if ((msg_head->flags & RX_RTR_FRAME) &&
1128 	    ((msg_head->nframes != 1) ||
1129 	     (!(msg_head->can_id & CAN_RTR_FLAG))))
1130 		return -EINVAL;
1131 
1132 	/* check timeval limitations */
1133 	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1134 		return -EINVAL;
1135 
1136 	/* check the given can_id */
1137 	op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1138 	if (op) {
1139 		/* update existing BCM operation */
1140 
1141 		/*
1142 		 * Do we need more space for the CAN frames than currently
1143 		 * allocated? -> This is a _really_ unusual use-case and
1144 		 * therefore (complexity / locking) it is not supported.
1145 		 */
1146 		if (msg_head->nframes > op->nframes)
1147 			return -E2BIG;
1148 
1149 		if (msg_head->nframes) {
1150 			/* update CAN frames content */
1151 			err = memcpy_from_msg(op->frames, msg,
1152 					      msg_head->nframes * op->cfsiz);
1153 			if (err < 0)
1154 				return err;
1155 
1156 			/* clear last_frames to indicate 'nothing received' */
1157 			memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1158 		}
1159 
1160 		op->nframes = msg_head->nframes;
1161 		op->flags = msg_head->flags;
1162 
1163 		/* Only an update -> do not call can_rx_register() */
1164 		do_rx_register = 0;
1165 
1166 	} else {
1167 		/* insert new BCM operation for the given can_id */
1168 		op = kzalloc(OPSIZ, GFP_KERNEL);
1169 		if (!op)
1170 			return -ENOMEM;
1171 
1172 		op->can_id = msg_head->can_id;
1173 		op->nframes = msg_head->nframes;
1174 		op->cfsiz = CFSIZ(msg_head->flags);
1175 		op->flags = msg_head->flags;
1176 
1177 		if (msg_head->nframes > 1) {
1178 			/* create array for CAN frames and copy the data */
1179 			op->frames = kmalloc_array(msg_head->nframes,
1180 						   op->cfsiz,
1181 						   GFP_KERNEL);
1182 			if (!op->frames) {
1183 				kfree(op);
1184 				return -ENOMEM;
1185 			}
1186 
1187 			/* create and init array for received CAN frames */
1188 			op->last_frames = kcalloc(msg_head->nframes,
1189 						  op->cfsiz,
1190 						  GFP_KERNEL);
1191 			if (!op->last_frames) {
1192 				kfree(op->frames);
1193 				kfree(op);
1194 				return -ENOMEM;
1195 			}
1196 
1197 		} else {
1198 			op->frames = &op->sframe;
1199 			op->last_frames = &op->last_sframe;
1200 		}
1201 
1202 		if (msg_head->nframes) {
1203 			err = memcpy_from_msg(op->frames, msg,
1204 					      msg_head->nframes * op->cfsiz);
1205 			if (err < 0) {
1206 				if (op->frames != &op->sframe)
1207 					kfree(op->frames);
1208 				if (op->last_frames != &op->last_sframe)
1209 					kfree(op->last_frames);
1210 				kfree(op);
1211 				return err;
1212 			}
1213 		}
1214 
1215 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
1216 		op->sk = sk;
1217 		op->ifindex = ifindex;
1218 
1219 		/* ifindex for timeout events w/o previous frame reception */
1220 		op->rx_ifindex = ifindex;
1221 
1222 		/* initialize uninitialized (kzalloc) structure */
1223 		hrtimer_setup(&op->timer, bcm_rx_timeout_handler, CLOCK_MONOTONIC,
1224 			      HRTIMER_MODE_REL_SOFT);
1225 		hrtimer_setup(&op->thrtimer, bcm_rx_thr_handler, CLOCK_MONOTONIC,
1226 			      HRTIMER_MODE_REL_SOFT);
1227 
1228 		/* add this bcm_op to the list of the rx_ops */
1229 		list_add(&op->list, &bo->rx_ops);
1230 
1231 		/* call can_rx_register() */
1232 		do_rx_register = 1;
1233 
1234 	} /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1235 
1236 	/* check flags */
1237 
1238 	if (op->flags & RX_RTR_FRAME) {
1239 		struct canfd_frame *frame0 = op->frames;
1240 
1241 		/* no timers in RTR-mode */
1242 		hrtimer_cancel(&op->thrtimer);
1243 		hrtimer_cancel(&op->timer);
1244 
1245 		/*
1246 		 * funny feature in RX(!)_SETUP only for RTR-mode:
1247 		 * copy can_id into frame BUT without RTR-flag to
1248 		 * prevent a full-load-loopback-test ... ;-]
1249 		 */
1250 		if ((op->flags & TX_CP_CAN_ID) ||
1251 		    (frame0->can_id == op->can_id))
1252 			frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1253 
1254 	} else {
1255 		if (op->flags & SETTIMER) {
1256 
1257 			/* set timer value */
1258 			op->ival1 = msg_head->ival1;
1259 			op->ival2 = msg_head->ival2;
1260 			op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1261 			op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1262 
1263 			/* disable an active timer due to zero value? */
1264 			if (!op->kt_ival1)
1265 				hrtimer_cancel(&op->timer);
1266 
1267 			/*
1268 			 * In any case cancel the throttle timer, flush
1269 			 * potentially blocked msgs and reset throttle handling
1270 			 */
1271 			op->kt_lastmsg = 0;
1272 			hrtimer_cancel(&op->thrtimer);
1273 			bcm_rx_thr_flush(op);
1274 		}
1275 
1276 		if ((op->flags & STARTTIMER) && op->kt_ival1)
1277 			hrtimer_start(&op->timer, op->kt_ival1,
1278 				      HRTIMER_MODE_REL_SOFT);
1279 	}
1280 
1281 	/* now we can register for can_ids, if we added a new bcm_op */
1282 	if (do_rx_register) {
1283 		if (ifindex) {
1284 			struct net_device *dev;
1285 
1286 			dev = dev_get_by_index(sock_net(sk), ifindex);
1287 			if (dev) {
1288 				err = can_rx_register(sock_net(sk), dev,
1289 						      op->can_id,
1290 						      REGMASK(op->can_id),
1291 						      bcm_rx_handler, op,
1292 						      "bcm", sk);
1293 
1294 				op->rx_reg_dev = dev;
1295 				dev_put(dev);
1296 			}
1297 
1298 		} else
1299 			err = can_rx_register(sock_net(sk), NULL, op->can_id,
1300 					      REGMASK(op->can_id),
1301 					      bcm_rx_handler, op, "bcm", sk);
1302 		if (err) {
1303 			/* this bcm rx op is broken -> remove it */
1304 			list_del_rcu(&op->list);
1305 			bcm_remove_op(op);
1306 			return err;
1307 		}
1308 	}
1309 
1310 	return msg_head->nframes * op->cfsiz + MHSIZ;
1311 }
1312 
1313 /*
1314  * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1315  */
bcm_tx_send(struct msghdr * msg,int ifindex,struct sock * sk,int cfsiz)1316 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1317 		       int cfsiz)
1318 {
1319 	struct sk_buff *skb;
1320 	struct net_device *dev;
1321 	int err;
1322 
1323 	/* we need a real device to send frames */
1324 	if (!ifindex)
1325 		return -ENODEV;
1326 
1327 	skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
1328 	if (!skb)
1329 		return -ENOMEM;
1330 
1331 	can_skb_reserve(skb);
1332 
1333 	err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1334 	if (err < 0) {
1335 		kfree_skb(skb);
1336 		return err;
1337 	}
1338 
1339 	dev = dev_get_by_index(sock_net(sk), ifindex);
1340 	if (!dev) {
1341 		kfree_skb(skb);
1342 		return -ENODEV;
1343 	}
1344 
1345 	can_skb_prv(skb)->ifindex = dev->ifindex;
1346 	can_skb_prv(skb)->skbcnt = 0;
1347 	skb->dev = dev;
1348 	can_skb_set_owner(skb, sk);
1349 	err = can_send(skb, 1); /* send with loopback */
1350 	dev_put(dev);
1351 
1352 	if (err)
1353 		return err;
1354 
1355 	return cfsiz + MHSIZ;
1356 }
1357 
1358 /*
1359  * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1360  */
bcm_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)1361 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1362 {
1363 	struct sock *sk = sock->sk;
1364 	struct bcm_sock *bo = bcm_sk(sk);
1365 	int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1366 	struct bcm_msg_head msg_head;
1367 	int cfsiz;
1368 	int ret; /* read bytes or error codes as return value */
1369 
1370 	if (!bo->bound)
1371 		return -ENOTCONN;
1372 
1373 	/* check for valid message length from userspace */
1374 	if (size < MHSIZ)
1375 		return -EINVAL;
1376 
1377 	/* read message head information */
1378 	ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1379 	if (ret < 0)
1380 		return ret;
1381 
1382 	cfsiz = CFSIZ(msg_head.flags);
1383 	if ((size - MHSIZ) % cfsiz)
1384 		return -EINVAL;
1385 
1386 	/* check for alternative ifindex for this bcm_op */
1387 
1388 	if (!ifindex && msg->msg_name) {
1389 		/* no bound device as default => check msg_name */
1390 		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1391 
1392 		if (msg->msg_namelen < BCM_MIN_NAMELEN)
1393 			return -EINVAL;
1394 
1395 		if (addr->can_family != AF_CAN)
1396 			return -EINVAL;
1397 
1398 		/* ifindex from sendto() */
1399 		ifindex = addr->can_ifindex;
1400 
1401 		if (ifindex) {
1402 			struct net_device *dev;
1403 
1404 			dev = dev_get_by_index(sock_net(sk), ifindex);
1405 			if (!dev)
1406 				return -ENODEV;
1407 
1408 			if (dev->type != ARPHRD_CAN) {
1409 				dev_put(dev);
1410 				return -ENODEV;
1411 			}
1412 
1413 			dev_put(dev);
1414 		}
1415 	}
1416 
1417 	lock_sock(sk);
1418 
1419 	switch (msg_head.opcode) {
1420 
1421 	case TX_SETUP:
1422 		ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1423 		break;
1424 
1425 	case RX_SETUP:
1426 		ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1427 		break;
1428 
1429 	case TX_DELETE:
1430 		if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1431 			ret = MHSIZ;
1432 		else
1433 			ret = -EINVAL;
1434 		break;
1435 
1436 	case RX_DELETE:
1437 		if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1438 			ret = MHSIZ;
1439 		else
1440 			ret = -EINVAL;
1441 		break;
1442 
1443 	case TX_READ:
1444 		/* reuse msg_head for the reply to TX_READ */
1445 		msg_head.opcode  = TX_STATUS;
1446 		ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1447 		break;
1448 
1449 	case RX_READ:
1450 		/* reuse msg_head for the reply to RX_READ */
1451 		msg_head.opcode  = RX_STATUS;
1452 		ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1453 		break;
1454 
1455 	case TX_SEND:
1456 		/* we need exactly one CAN frame behind the msg head */
1457 		if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1458 			ret = -EINVAL;
1459 		else
1460 			ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1461 		break;
1462 
1463 	default:
1464 		ret = -EINVAL;
1465 		break;
1466 	}
1467 
1468 	release_sock(sk);
1469 
1470 	return ret;
1471 }
1472 
1473 /*
1474  * notification handler for netdevice status changes
1475  */
bcm_notify(struct bcm_sock * bo,unsigned long msg,struct net_device * dev)1476 static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1477 		       struct net_device *dev)
1478 {
1479 	struct sock *sk = &bo->sk;
1480 	struct bcm_op *op;
1481 	int notify_enodev = 0;
1482 
1483 	if (!net_eq(dev_net(dev), sock_net(sk)))
1484 		return;
1485 
1486 	switch (msg) {
1487 
1488 	case NETDEV_UNREGISTER:
1489 		lock_sock(sk);
1490 
1491 		/* remove device specific receive entries */
1492 		list_for_each_entry(op, &bo->rx_ops, list)
1493 			if (op->rx_reg_dev == dev)
1494 				bcm_rx_unreg(dev, op);
1495 
1496 		/* remove device reference, if this is our bound device */
1497 		if (bo->bound && bo->ifindex == dev->ifindex) {
1498 #if IS_ENABLED(CONFIG_PROC_FS)
1499 			if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
1500 				remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
1501 				bo->bcm_proc_read = NULL;
1502 			}
1503 #endif
1504 			bo->bound   = 0;
1505 			bo->ifindex = 0;
1506 			notify_enodev = 1;
1507 		}
1508 
1509 		release_sock(sk);
1510 
1511 		if (notify_enodev) {
1512 			sk->sk_err = ENODEV;
1513 			if (!sock_flag(sk, SOCK_DEAD))
1514 				sk_error_report(sk);
1515 		}
1516 		break;
1517 
1518 	case NETDEV_DOWN:
1519 		if (bo->bound && bo->ifindex == dev->ifindex) {
1520 			sk->sk_err = ENETDOWN;
1521 			if (!sock_flag(sk, SOCK_DEAD))
1522 				sk_error_report(sk);
1523 		}
1524 	}
1525 }
1526 
bcm_notifier(struct notifier_block * nb,unsigned long msg,void * ptr)1527 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1528 			void *ptr)
1529 {
1530 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1531 
1532 	if (dev->type != ARPHRD_CAN)
1533 		return NOTIFY_DONE;
1534 	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1535 		return NOTIFY_DONE;
1536 	if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1537 		return NOTIFY_DONE;
1538 
1539 	spin_lock(&bcm_notifier_lock);
1540 	list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1541 		spin_unlock(&bcm_notifier_lock);
1542 		bcm_notify(bcm_busy_notifier, msg, dev);
1543 		spin_lock(&bcm_notifier_lock);
1544 	}
1545 	bcm_busy_notifier = NULL;
1546 	spin_unlock(&bcm_notifier_lock);
1547 	return NOTIFY_DONE;
1548 }
1549 
1550 /*
1551  * initial settings for all BCM sockets to be set at socket creation time
1552  */
bcm_init(struct sock * sk)1553 static int bcm_init(struct sock *sk)
1554 {
1555 	struct bcm_sock *bo = bcm_sk(sk);
1556 
1557 	bo->bound            = 0;
1558 	bo->ifindex          = 0;
1559 	bo->dropped_usr_msgs = 0;
1560 	bo->bcm_proc_read    = NULL;
1561 
1562 	INIT_LIST_HEAD(&bo->tx_ops);
1563 	INIT_LIST_HEAD(&bo->rx_ops);
1564 
1565 	/* set notifier */
1566 	spin_lock(&bcm_notifier_lock);
1567 	list_add_tail(&bo->notifier, &bcm_notifier_list);
1568 	spin_unlock(&bcm_notifier_lock);
1569 
1570 	return 0;
1571 }
1572 
1573 /*
1574  * standard socket functions
1575  */
bcm_release(struct socket * sock)1576 static int bcm_release(struct socket *sock)
1577 {
1578 	struct sock *sk = sock->sk;
1579 	struct net *net;
1580 	struct bcm_sock *bo;
1581 	struct bcm_op *op, *next;
1582 
1583 	if (!sk)
1584 		return 0;
1585 
1586 	net = sock_net(sk);
1587 	bo = bcm_sk(sk);
1588 
1589 	/* remove bcm_ops, timer, rx_unregister(), etc. */
1590 
1591 	spin_lock(&bcm_notifier_lock);
1592 	while (bcm_busy_notifier == bo) {
1593 		spin_unlock(&bcm_notifier_lock);
1594 		schedule_timeout_uninterruptible(1);
1595 		spin_lock(&bcm_notifier_lock);
1596 	}
1597 	list_del(&bo->notifier);
1598 	spin_unlock(&bcm_notifier_lock);
1599 
1600 	lock_sock(sk);
1601 
1602 #if IS_ENABLED(CONFIG_PROC_FS)
1603 	/* remove procfs entry */
1604 	if (net->can.bcmproc_dir && bo->bcm_proc_read)
1605 		remove_proc_entry(bo->procname, net->can.bcmproc_dir);
1606 #endif /* CONFIG_PROC_FS */
1607 
1608 	list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1609 		bcm_remove_op(op);
1610 
1611 	list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1612 		/*
1613 		 * Don't care if we're bound or not (due to netdev problems)
1614 		 * can_rx_unregister() is always a save thing to do here.
1615 		 */
1616 		if (op->ifindex) {
1617 			/*
1618 			 * Only remove subscriptions that had not
1619 			 * been removed due to NETDEV_UNREGISTER
1620 			 * in bcm_notifier()
1621 			 */
1622 			if (op->rx_reg_dev) {
1623 				struct net_device *dev;
1624 
1625 				dev = dev_get_by_index(net, op->ifindex);
1626 				if (dev) {
1627 					bcm_rx_unreg(dev, op);
1628 					dev_put(dev);
1629 				}
1630 			}
1631 		} else
1632 			can_rx_unregister(net, NULL, op->can_id,
1633 					  REGMASK(op->can_id),
1634 					  bcm_rx_handler, op);
1635 
1636 	}
1637 
1638 	synchronize_rcu();
1639 
1640 	list_for_each_entry_safe(op, next, &bo->rx_ops, list)
1641 		bcm_remove_op(op);
1642 
1643 	/* remove device reference */
1644 	if (bo->bound) {
1645 		bo->bound   = 0;
1646 		bo->ifindex = 0;
1647 	}
1648 
1649 	sock_orphan(sk);
1650 	sock->sk = NULL;
1651 
1652 	release_sock(sk);
1653 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1654 	sock_put(sk);
1655 
1656 	return 0;
1657 }
1658 
bcm_connect(struct socket * sock,struct sockaddr * uaddr,int len,int flags)1659 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1660 		       int flags)
1661 {
1662 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1663 	struct sock *sk = sock->sk;
1664 	struct bcm_sock *bo = bcm_sk(sk);
1665 	struct net *net = sock_net(sk);
1666 	int ret = 0;
1667 
1668 	if (len < BCM_MIN_NAMELEN)
1669 		return -EINVAL;
1670 
1671 	lock_sock(sk);
1672 
1673 	if (bo->bound) {
1674 		ret = -EISCONN;
1675 		goto fail;
1676 	}
1677 
1678 	/* bind a device to this socket */
1679 	if (addr->can_ifindex) {
1680 		struct net_device *dev;
1681 
1682 		dev = dev_get_by_index(net, addr->can_ifindex);
1683 		if (!dev) {
1684 			ret = -ENODEV;
1685 			goto fail;
1686 		}
1687 		if (dev->type != ARPHRD_CAN) {
1688 			dev_put(dev);
1689 			ret = -ENODEV;
1690 			goto fail;
1691 		}
1692 
1693 		bo->ifindex = dev->ifindex;
1694 		dev_put(dev);
1695 
1696 	} else {
1697 		/* no interface reference for ifindex = 0 ('any' CAN device) */
1698 		bo->ifindex = 0;
1699 	}
1700 
1701 #if IS_ENABLED(CONFIG_PROC_FS)
1702 	if (net->can.bcmproc_dir) {
1703 		/* unique socket address as filename */
1704 		sprintf(bo->procname, "%lu", sock_i_ino(sk));
1705 		bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
1706 						     net->can.bcmproc_dir,
1707 						     bcm_proc_show, sk);
1708 		if (!bo->bcm_proc_read) {
1709 			ret = -ENOMEM;
1710 			goto fail;
1711 		}
1712 	}
1713 #endif /* CONFIG_PROC_FS */
1714 
1715 	bo->bound = 1;
1716 
1717 fail:
1718 	release_sock(sk);
1719 
1720 	return ret;
1721 }
1722 
bcm_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)1723 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1724 		       int flags)
1725 {
1726 	struct sock *sk = sock->sk;
1727 	struct sk_buff *skb;
1728 	int error = 0;
1729 	int err;
1730 
1731 	skb = skb_recv_datagram(sk, flags, &error);
1732 	if (!skb)
1733 		return error;
1734 
1735 	if (skb->len < size)
1736 		size = skb->len;
1737 
1738 	err = memcpy_to_msg(msg, skb->data, size);
1739 	if (err < 0) {
1740 		skb_free_datagram(sk, skb);
1741 		return err;
1742 	}
1743 
1744 	sock_recv_cmsgs(msg, sk, skb);
1745 
1746 	if (msg->msg_name) {
1747 		__sockaddr_check_size(BCM_MIN_NAMELEN);
1748 		msg->msg_namelen = BCM_MIN_NAMELEN;
1749 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1750 	}
1751 
1752 	/* assign the flags that have been recorded in bcm_send_to_user() */
1753 	msg->msg_flags |= *(bcm_flags(skb));
1754 
1755 	skb_free_datagram(sk, skb);
1756 
1757 	return size;
1758 }
1759 
bcm_sock_no_ioctlcmd(struct socket * sock,unsigned int cmd,unsigned long arg)1760 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1761 				unsigned long arg)
1762 {
1763 	/* no ioctls for socket layer -> hand it down to NIC layer */
1764 	return -ENOIOCTLCMD;
1765 }
1766 
1767 static const struct proto_ops bcm_ops = {
1768 	.family        = PF_CAN,
1769 	.release       = bcm_release,
1770 	.bind          = sock_no_bind,
1771 	.connect       = bcm_connect,
1772 	.socketpair    = sock_no_socketpair,
1773 	.accept        = sock_no_accept,
1774 	.getname       = sock_no_getname,
1775 	.poll          = datagram_poll,
1776 	.ioctl         = bcm_sock_no_ioctlcmd,
1777 	.gettstamp     = sock_gettstamp,
1778 	.listen        = sock_no_listen,
1779 	.shutdown      = sock_no_shutdown,
1780 	.sendmsg       = bcm_sendmsg,
1781 	.recvmsg       = bcm_recvmsg,
1782 	.mmap          = sock_no_mmap,
1783 };
1784 
1785 static struct proto bcm_proto __read_mostly = {
1786 	.name       = "CAN_BCM",
1787 	.owner      = THIS_MODULE,
1788 	.obj_size   = sizeof(struct bcm_sock),
1789 	.init       = bcm_init,
1790 };
1791 
1792 static const struct can_proto bcm_can_proto = {
1793 	.type       = SOCK_DGRAM,
1794 	.protocol   = CAN_BCM,
1795 	.ops        = &bcm_ops,
1796 	.prot       = &bcm_proto,
1797 };
1798 
canbcm_pernet_init(struct net * net)1799 static int canbcm_pernet_init(struct net *net)
1800 {
1801 #if IS_ENABLED(CONFIG_PROC_FS)
1802 	/* create /proc/net/can-bcm directory */
1803 	net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
1804 #endif /* CONFIG_PROC_FS */
1805 
1806 	return 0;
1807 }
1808 
canbcm_pernet_exit(struct net * net)1809 static void canbcm_pernet_exit(struct net *net)
1810 {
1811 #if IS_ENABLED(CONFIG_PROC_FS)
1812 	/* remove /proc/net/can-bcm directory */
1813 	if (net->can.bcmproc_dir)
1814 		remove_proc_entry("can-bcm", net->proc_net);
1815 #endif /* CONFIG_PROC_FS */
1816 }
1817 
1818 static struct pernet_operations canbcm_pernet_ops __read_mostly = {
1819 	.init = canbcm_pernet_init,
1820 	.exit = canbcm_pernet_exit,
1821 };
1822 
1823 static struct notifier_block canbcm_notifier = {
1824 	.notifier_call = bcm_notifier
1825 };
1826 
bcm_module_init(void)1827 static int __init bcm_module_init(void)
1828 {
1829 	int err;
1830 
1831 	pr_info("can: broadcast manager protocol\n");
1832 
1833 	err = register_pernet_subsys(&canbcm_pernet_ops);
1834 	if (err)
1835 		return err;
1836 
1837 	err = register_netdevice_notifier(&canbcm_notifier);
1838 	if (err)
1839 		goto register_notifier_failed;
1840 
1841 	err = can_proto_register(&bcm_can_proto);
1842 	if (err < 0) {
1843 		printk(KERN_ERR "can: registration of bcm protocol failed\n");
1844 		goto register_proto_failed;
1845 	}
1846 
1847 	return 0;
1848 
1849 register_proto_failed:
1850 	unregister_netdevice_notifier(&canbcm_notifier);
1851 register_notifier_failed:
1852 	unregister_pernet_subsys(&canbcm_pernet_ops);
1853 	return err;
1854 }
1855 
bcm_module_exit(void)1856 static void __exit bcm_module_exit(void)
1857 {
1858 	can_proto_unregister(&bcm_can_proto);
1859 	unregister_netdevice_notifier(&canbcm_notifier);
1860 	unregister_pernet_subsys(&canbcm_pernet_ops);
1861 }
1862 
1863 module_init(bcm_module_init);
1864 module_exit(bcm_module_exit);
1865