1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /* gw.c - CAN frame Gateway/Router/Bridge with netlink interface
3 *
4 * Copyright (c) 2019 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 */
41
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/list.h>
47 #include <linux/spinlock.h>
48 #include <linux/rcupdate.h>
49 #include <linux/rculist.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/if_arp.h>
53 #include <linux/skbuff.h>
54 #include <linux/can.h>
55 #include <linux/can/core.h>
56 #include <linux/can/skb.h>
57 #include <linux/can/gw.h>
58 #include <net/can.h>
59 #include <net/rtnetlink.h>
60 #include <net/net_namespace.h>
61 #include <net/sock.h>
62
63 #define CAN_GW_NAME "can-gw"
64
65 MODULE_DESCRIPTION("PF_CAN netlink gateway");
66 MODULE_LICENSE("Dual BSD/GPL");
67 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
68 MODULE_ALIAS(CAN_GW_NAME);
69
70 #define CGW_MIN_HOPS 1
71 #define CGW_MAX_HOPS 6
72 #define CGW_DEFAULT_HOPS 1
73
74 static unsigned char max_hops __read_mostly = CGW_DEFAULT_HOPS;
75 module_param(max_hops, byte, 0444);
76 MODULE_PARM_DESC(max_hops,
77 "maximum " CAN_GW_NAME " routing hops for CAN frames "
78 "(valid values: " __stringify(CGW_MIN_HOPS) "-"
79 __stringify(CGW_MAX_HOPS) " hops, "
80 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
81
82 static struct notifier_block notifier;
83 static struct kmem_cache *cgw_cache __read_mostly;
84
85 /* structure that contains the (on-the-fly) CAN frame modifications */
86 struct cf_mod {
87 struct {
88 struct canfd_frame and;
89 struct canfd_frame or;
90 struct canfd_frame xor;
91 struct canfd_frame set;
92 } modframe;
93 struct {
94 u8 and;
95 u8 or;
96 u8 xor;
97 u8 set;
98 } modtype;
99 void (*modfunc[MAX_MODFUNCTIONS])(struct canfd_frame *cf,
100 struct cf_mod *mod);
101
102 /* CAN frame checksum calculation after CAN frame modifications */
103 struct {
104 struct cgw_csum_xor xor;
105 struct cgw_csum_crc8 crc8;
106 } csum;
107 struct {
108 void (*xor)(struct canfd_frame *cf,
109 struct cgw_csum_xor *xor);
110 void (*crc8)(struct canfd_frame *cf,
111 struct cgw_csum_crc8 *crc8);
112 } csumfunc;
113 u32 uid;
114 };
115
116 /* So far we just support CAN -> CAN routing and frame modifications.
117 *
118 * The internal can_can_gw structure contains data and attributes for
119 * a CAN -> CAN gateway job.
120 */
121 struct can_can_gw {
122 struct can_filter filter;
123 int src_idx;
124 int dst_idx;
125 };
126
127 /* list entry for CAN gateways jobs */
128 struct cgw_job {
129 struct hlist_node list;
130 struct rcu_head rcu;
131 u32 handled_frames;
132 u32 dropped_frames;
133 u32 deleted_frames;
134 struct cf_mod __rcu *cf_mod;
135 union {
136 /* CAN frame data source */
137 struct net_device *dev;
138 } src;
139 union {
140 /* CAN frame data destination */
141 struct net_device *dev;
142 } dst;
143 union {
144 struct can_can_gw ccgw;
145 /* tbc */
146 };
147 u8 gwtype;
148 u8 limit_hops;
149 u16 flags;
150 };
151
152 /* modification functions that are invoked in the hot path in can_can_gw_rcv */
153
154 #define MODFUNC(func, op) static void func(struct canfd_frame *cf, \
155 struct cf_mod *mod) { op ; }
156
157 MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
158 MODFUNC(mod_and_len, cf->len &= mod->modframe.and.len)
159 MODFUNC(mod_and_flags, cf->flags &= mod->modframe.and.flags)
160 MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
161 MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
162 MODFUNC(mod_or_len, cf->len |= mod->modframe.or.len)
163 MODFUNC(mod_or_flags, cf->flags |= mod->modframe.or.flags)
164 MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
165 MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
166 MODFUNC(mod_xor_len, cf->len ^= mod->modframe.xor.len)
167 MODFUNC(mod_xor_flags, cf->flags ^= mod->modframe.xor.flags)
168 MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
169 MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
170 MODFUNC(mod_set_len, cf->len = mod->modframe.set.len)
171 MODFUNC(mod_set_flags, cf->flags = mod->modframe.set.flags)
172 MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
173
mod_and_fddata(struct canfd_frame * cf,struct cf_mod * mod)174 static void mod_and_fddata(struct canfd_frame *cf, struct cf_mod *mod)
175 {
176 int i;
177
178 for (i = 0; i < CANFD_MAX_DLEN; i += 8)
179 *(u64 *)(cf->data + i) &= *(u64 *)(mod->modframe.and.data + i);
180 }
181
mod_or_fddata(struct canfd_frame * cf,struct cf_mod * mod)182 static void mod_or_fddata(struct canfd_frame *cf, struct cf_mod *mod)
183 {
184 int i;
185
186 for (i = 0; i < CANFD_MAX_DLEN; i += 8)
187 *(u64 *)(cf->data + i) |= *(u64 *)(mod->modframe.or.data + i);
188 }
189
mod_xor_fddata(struct canfd_frame * cf,struct cf_mod * mod)190 static void mod_xor_fddata(struct canfd_frame *cf, struct cf_mod *mod)
191 {
192 int i;
193
194 for (i = 0; i < CANFD_MAX_DLEN; i += 8)
195 *(u64 *)(cf->data + i) ^= *(u64 *)(mod->modframe.xor.data + i);
196 }
197
mod_set_fddata(struct canfd_frame * cf,struct cf_mod * mod)198 static void mod_set_fddata(struct canfd_frame *cf, struct cf_mod *mod)
199 {
200 memcpy(cf->data, mod->modframe.set.data, CANFD_MAX_DLEN);
201 }
202
203 /* retrieve valid CC DLC value and store it into 'len' */
mod_retrieve_ccdlc(struct canfd_frame * cf)204 static void mod_retrieve_ccdlc(struct canfd_frame *cf)
205 {
206 struct can_frame *ccf = (struct can_frame *)cf;
207
208 /* len8_dlc is only valid if len == CAN_MAX_DLEN */
209 if (ccf->len != CAN_MAX_DLEN)
210 return;
211
212 /* do we have a valid len8_dlc value from 9 .. 15 ? */
213 if (ccf->len8_dlc > CAN_MAX_DLEN && ccf->len8_dlc <= CAN_MAX_RAW_DLC)
214 ccf->len = ccf->len8_dlc;
215 }
216
217 /* convert valid CC DLC value in 'len' into struct can_frame elements */
mod_store_ccdlc(struct canfd_frame * cf)218 static void mod_store_ccdlc(struct canfd_frame *cf)
219 {
220 struct can_frame *ccf = (struct can_frame *)cf;
221
222 /* clear potential leftovers */
223 ccf->len8_dlc = 0;
224
225 /* plain data length 0 .. 8 - that was easy */
226 if (ccf->len <= CAN_MAX_DLEN)
227 return;
228
229 /* potentially broken values are caught in can_can_gw_rcv() */
230 if (ccf->len > CAN_MAX_RAW_DLC)
231 return;
232
233 /* we have a valid dlc value from 9 .. 15 in ccf->len */
234 ccf->len8_dlc = ccf->len;
235 ccf->len = CAN_MAX_DLEN;
236 }
237
mod_and_ccdlc(struct canfd_frame * cf,struct cf_mod * mod)238 static void mod_and_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
239 {
240 mod_retrieve_ccdlc(cf);
241 mod_and_len(cf, mod);
242 mod_store_ccdlc(cf);
243 }
244
mod_or_ccdlc(struct canfd_frame * cf,struct cf_mod * mod)245 static void mod_or_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
246 {
247 mod_retrieve_ccdlc(cf);
248 mod_or_len(cf, mod);
249 mod_store_ccdlc(cf);
250 }
251
mod_xor_ccdlc(struct canfd_frame * cf,struct cf_mod * mod)252 static void mod_xor_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
253 {
254 mod_retrieve_ccdlc(cf);
255 mod_xor_len(cf, mod);
256 mod_store_ccdlc(cf);
257 }
258
mod_set_ccdlc(struct canfd_frame * cf,struct cf_mod * mod)259 static void mod_set_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
260 {
261 mod_set_len(cf, mod);
262 mod_store_ccdlc(cf);
263 }
264
canframecpy(struct canfd_frame * dst,struct can_frame * src)265 static void canframecpy(struct canfd_frame *dst, struct can_frame *src)
266 {
267 /* Copy the struct members separately to ensure that no uninitialized
268 * data are copied in the 3 bytes hole of the struct. This is needed
269 * to make easy compares of the data in the struct cf_mod.
270 */
271
272 dst->can_id = src->can_id;
273 dst->len = src->len;
274 *(u64 *)dst->data = *(u64 *)src->data;
275 }
276
canfdframecpy(struct canfd_frame * dst,struct canfd_frame * src)277 static void canfdframecpy(struct canfd_frame *dst, struct canfd_frame *src)
278 {
279 /* Copy the struct members separately to ensure that no uninitialized
280 * data are copied in the 2 bytes hole of the struct. This is needed
281 * to make easy compares of the data in the struct cf_mod.
282 */
283
284 dst->can_id = src->can_id;
285 dst->flags = src->flags;
286 dst->len = src->len;
287 memcpy(dst->data, src->data, CANFD_MAX_DLEN);
288 }
289
cgw_chk_csum_parms(s8 fr,s8 to,s8 re,struct rtcanmsg * r)290 static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re, struct rtcanmsg *r)
291 {
292 s8 dlen = CAN_MAX_DLEN;
293
294 if (r->flags & CGW_FLAGS_CAN_FD)
295 dlen = CANFD_MAX_DLEN;
296
297 /* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
298 * relative to received dlc -1 .. -8 :
299 * e.g. for received dlc = 8
300 * -1 => index = 7 (data[7])
301 * -3 => index = 5 (data[5])
302 * -8 => index = 0 (data[0])
303 */
304
305 if (fr >= -dlen && fr < dlen &&
306 to >= -dlen && to < dlen &&
307 re >= -dlen && re < dlen)
308 return 0;
309 else
310 return -EINVAL;
311 }
312
calc_idx(int idx,int rx_len)313 static inline int calc_idx(int idx, int rx_len)
314 {
315 if (idx < 0)
316 return rx_len + idx;
317 else
318 return idx;
319 }
320
cgw_csum_xor_rel(struct canfd_frame * cf,struct cgw_csum_xor * xor)321 static void cgw_csum_xor_rel(struct canfd_frame *cf, struct cgw_csum_xor *xor)
322 {
323 int from = calc_idx(xor->from_idx, cf->len);
324 int to = calc_idx(xor->to_idx, cf->len);
325 int res = calc_idx(xor->result_idx, cf->len);
326 u8 val = xor->init_xor_val;
327 int i;
328
329 if (from < 0 || to < 0 || res < 0)
330 return;
331
332 if (from <= to) {
333 for (i = from; i <= to; i++)
334 val ^= cf->data[i];
335 } else {
336 for (i = from; i >= to; i--)
337 val ^= cf->data[i];
338 }
339
340 cf->data[res] = val;
341 }
342
cgw_csum_xor_pos(struct canfd_frame * cf,struct cgw_csum_xor * xor)343 static void cgw_csum_xor_pos(struct canfd_frame *cf, struct cgw_csum_xor *xor)
344 {
345 u8 val = xor->init_xor_val;
346 int i;
347
348 for (i = xor->from_idx; i <= xor->to_idx; i++)
349 val ^= cf->data[i];
350
351 cf->data[xor->result_idx] = val;
352 }
353
cgw_csum_xor_neg(struct canfd_frame * cf,struct cgw_csum_xor * xor)354 static void cgw_csum_xor_neg(struct canfd_frame *cf, struct cgw_csum_xor *xor)
355 {
356 u8 val = xor->init_xor_val;
357 int i;
358
359 for (i = xor->from_idx; i >= xor->to_idx; i--)
360 val ^= cf->data[i];
361
362 cf->data[xor->result_idx] = val;
363 }
364
cgw_csum_crc8_rel(struct canfd_frame * cf,struct cgw_csum_crc8 * crc8)365 static void cgw_csum_crc8_rel(struct canfd_frame *cf,
366 struct cgw_csum_crc8 *crc8)
367 {
368 int from = calc_idx(crc8->from_idx, cf->len);
369 int to = calc_idx(crc8->to_idx, cf->len);
370 int res = calc_idx(crc8->result_idx, cf->len);
371 u8 crc = crc8->init_crc_val;
372 int i;
373
374 if (from < 0 || to < 0 || res < 0)
375 return;
376
377 if (from <= to) {
378 for (i = crc8->from_idx; i <= crc8->to_idx; i++)
379 crc = crc8->crctab[crc ^ cf->data[i]];
380 } else {
381 for (i = crc8->from_idx; i >= crc8->to_idx; i--)
382 crc = crc8->crctab[crc ^ cf->data[i]];
383 }
384
385 switch (crc8->profile) {
386 case CGW_CRC8PRF_1U8:
387 crc = crc8->crctab[crc ^ crc8->profile_data[0]];
388 break;
389
390 case CGW_CRC8PRF_16U8:
391 crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
392 break;
393
394 case CGW_CRC8PRF_SFFID_XOR:
395 crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
396 (cf->can_id >> 8 & 0xFF)];
397 break;
398 }
399
400 cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
401 }
402
cgw_csum_crc8_pos(struct canfd_frame * cf,struct cgw_csum_crc8 * crc8)403 static void cgw_csum_crc8_pos(struct canfd_frame *cf,
404 struct cgw_csum_crc8 *crc8)
405 {
406 u8 crc = crc8->init_crc_val;
407 int i;
408
409 for (i = crc8->from_idx; i <= crc8->to_idx; i++)
410 crc = crc8->crctab[crc ^ cf->data[i]];
411
412 switch (crc8->profile) {
413 case CGW_CRC8PRF_1U8:
414 crc = crc8->crctab[crc ^ crc8->profile_data[0]];
415 break;
416
417 case CGW_CRC8PRF_16U8:
418 crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
419 break;
420
421 case CGW_CRC8PRF_SFFID_XOR:
422 crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
423 (cf->can_id >> 8 & 0xFF)];
424 break;
425 }
426
427 cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
428 }
429
cgw_csum_crc8_neg(struct canfd_frame * cf,struct cgw_csum_crc8 * crc8)430 static void cgw_csum_crc8_neg(struct canfd_frame *cf,
431 struct cgw_csum_crc8 *crc8)
432 {
433 u8 crc = crc8->init_crc_val;
434 int i;
435
436 for (i = crc8->from_idx; i >= crc8->to_idx; i--)
437 crc = crc8->crctab[crc ^ cf->data[i]];
438
439 switch (crc8->profile) {
440 case CGW_CRC8PRF_1U8:
441 crc = crc8->crctab[crc ^ crc8->profile_data[0]];
442 break;
443
444 case CGW_CRC8PRF_16U8:
445 crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
446 break;
447
448 case CGW_CRC8PRF_SFFID_XOR:
449 crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
450 (cf->can_id >> 8 & 0xFF)];
451 break;
452 }
453
454 cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
455 }
456
457 /* the receive & process & send function */
can_can_gw_rcv(struct sk_buff * skb,void * data)458 static void can_can_gw_rcv(struct sk_buff *skb, void *data)
459 {
460 struct cgw_job *gwj = (struct cgw_job *)data;
461 struct canfd_frame *cf;
462 struct sk_buff *nskb;
463 struct can_skb_ext *csx, *ncsx;
464 struct cf_mod *mod;
465 int modidx = 0;
466
467 /* process strictly Classic CAN or CAN FD frames */
468 if (gwj->flags & CGW_FLAGS_CAN_FD) {
469 if (!can_is_canfd_skb(skb))
470 return;
471 } else {
472 if (!can_is_can_skb(skb))
473 return;
474 }
475
476 csx = can_skb_ext_find(skb);
477 if (!csx)
478 return;
479
480 /* Do not handle CAN frames routed more than 'max_hops' times.
481 * In general we should never catch this delimiter which is intended
482 * to cover a misconfiguration protection (e.g. circular CAN routes).
483 */
484 if (csx->can_gw_hops >= max_hops) {
485 /* indicate deleted frames due to misconfiguration */
486 gwj->deleted_frames++;
487 return;
488 }
489
490 if (!(gwj->dst.dev->flags & IFF_UP)) {
491 gwj->dropped_frames++;
492 return;
493 }
494
495 /* is sending the skb back to the incoming interface not allowed? */
496 if (!(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK) &&
497 csx->can_iif == gwj->dst.dev->ifindex)
498 return;
499
500 /* clone the given skb, which has not been done in can_rcv()
501 *
502 * When there is at least one modification function activated,
503 * we need to copy the skb as we want to modify skb->data.
504 */
505 mod = rcu_dereference(gwj->cf_mod);
506 if (mod->modfunc[0])
507 nskb = skb_copy(skb, GFP_ATOMIC);
508 else
509 nskb = skb_clone(skb, GFP_ATOMIC);
510
511 if (!nskb) {
512 gwj->dropped_frames++;
513 return;
514 }
515
516 /* the cloned/copied nskb points to the skb extension of the original
517 * skb with an increased refcount. skb_ext_add() creates a copy to
518 * separate the skb extension data to modify the can_gw_hops.
519 */
520 ncsx = skb_ext_add(nskb, SKB_EXT_CAN);
521 if (!ncsx) {
522 kfree_skb(nskb);
523 gwj->dropped_frames++;
524 return;
525 }
526
527 /* put the incremented hop counter in the cloned skb */
528 ncsx->can_gw_hops = csx->can_gw_hops + 1;
529
530 /* first processing of this CAN frame -> adjust to private hop limit */
531 if (gwj->limit_hops && ncsx->can_gw_hops == 1)
532 ncsx->can_gw_hops = max_hops - gwj->limit_hops + 1;
533
534 nskb->dev = gwj->dst.dev;
535
536 /* pointer to modifiable CAN frame */
537 cf = (struct canfd_frame *)nskb->data;
538
539 /* perform preprocessed modification functions if there are any */
540 while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx])
541 (*mod->modfunc[modidx++])(cf, mod);
542
543 /* Has the CAN frame been modified? */
544 if (modidx) {
545 /* get available space for the processed CAN frame type */
546 int max_len = nskb->len - offsetof(struct canfd_frame, data);
547
548 /* dlc may have changed, make sure it fits to the CAN frame */
549 if (cf->len > max_len) {
550 /* delete frame due to misconfiguration */
551 gwj->deleted_frames++;
552 kfree_skb(nskb);
553 return;
554 }
555
556 /* check for checksum updates */
557 if (mod->csumfunc.crc8)
558 (*mod->csumfunc.crc8)(cf, &mod->csum.crc8);
559
560 if (mod->csumfunc.xor)
561 (*mod->csumfunc.xor)(cf, &mod->csum.xor);
562 }
563
564 /* clear the skb timestamp if not configured the other way */
565 if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
566 nskb->tstamp = 0;
567
568 /* send to netdevice */
569 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
570 gwj->dropped_frames++;
571 else
572 gwj->handled_frames++;
573 }
574
cgw_register_filter(struct net * net,struct cgw_job * gwj)575 static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
576 {
577 return can_rx_register(net, gwj->src.dev, gwj->ccgw.filter.can_id,
578 gwj->ccgw.filter.can_mask, can_can_gw_rcv,
579 gwj, "gw", NULL);
580 }
581
cgw_unregister_filter(struct net * net,struct cgw_job * gwj)582 static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj)
583 {
584 can_rx_unregister(net, gwj->src.dev, gwj->ccgw.filter.can_id,
585 gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
586 }
587
cgw_job_free_rcu(struct rcu_head * rcu_head)588 static void cgw_job_free_rcu(struct rcu_head *rcu_head)
589 {
590 struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
591
592 /* cgw_job::cf_mod is always accessed from the same cgw_job object within
593 * the same RCU read section. Once cgw_job is scheduled for removal,
594 * cf_mod can also be removed without mandating an additional grace period.
595 */
596 kfree(rcu_access_pointer(gwj->cf_mod));
597 kmem_cache_free(cgw_cache, gwj);
598 }
599
600 /* Return cgw_job::cf_mod with RTNL protected section */
cgw_job_cf_mod(struct cgw_job * gwj)601 static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj)
602 {
603 return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked());
604 }
605
cgw_notifier(struct notifier_block * nb,unsigned long msg,void * ptr)606 static int cgw_notifier(struct notifier_block *nb,
607 unsigned long msg, void *ptr)
608 {
609 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
610 struct net *net = dev_net(dev);
611
612 if (dev->type != ARPHRD_CAN)
613 return NOTIFY_DONE;
614
615 if (msg == NETDEV_UNREGISTER) {
616 struct cgw_job *gwj = NULL;
617 struct hlist_node *nx;
618
619 ASSERT_RTNL();
620
621 hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
622 if (gwj->src.dev == dev || gwj->dst.dev == dev) {
623 hlist_del(&gwj->list);
624 cgw_unregister_filter(net, gwj);
625 call_rcu(&gwj->rcu, cgw_job_free_rcu);
626 }
627 }
628 }
629
630 return NOTIFY_DONE;
631 }
632
cgw_put_job(struct sk_buff * skb,struct cgw_job * gwj,int type,u32 pid,u32 seq,int flags)633 static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
634 u32 pid, u32 seq, int flags)
635 {
636 struct rtcanmsg *rtcan;
637 struct nlmsghdr *nlh;
638 struct cf_mod *mod;
639
640 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
641 if (!nlh)
642 return -EMSGSIZE;
643
644 rtcan = nlmsg_data(nlh);
645 rtcan->can_family = AF_CAN;
646 rtcan->gwtype = gwj->gwtype;
647 rtcan->flags = gwj->flags;
648
649 /* add statistics if available */
650
651 if (gwj->handled_frames) {
652 if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
653 goto cancel;
654 }
655
656 if (gwj->dropped_frames) {
657 if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
658 goto cancel;
659 }
660
661 if (gwj->deleted_frames) {
662 if (nla_put_u32(skb, CGW_DELETED, gwj->deleted_frames) < 0)
663 goto cancel;
664 }
665
666 /* check non default settings of attributes */
667
668 if (gwj->limit_hops) {
669 if (nla_put_u8(skb, CGW_LIM_HOPS, gwj->limit_hops) < 0)
670 goto cancel;
671 }
672
673 mod = cgw_job_cf_mod(gwj);
674 if (gwj->flags & CGW_FLAGS_CAN_FD) {
675 struct cgw_fdframe_mod mb;
676
677 if (mod->modtype.and) {
678 memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
679 mb.modtype = mod->modtype.and;
680 if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
681 goto cancel;
682 }
683
684 if (mod->modtype.or) {
685 memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
686 mb.modtype = mod->modtype.or;
687 if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
688 goto cancel;
689 }
690
691 if (mod->modtype.xor) {
692 memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
693 mb.modtype = mod->modtype.xor;
694 if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
695 goto cancel;
696 }
697
698 if (mod->modtype.set) {
699 memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
700 mb.modtype = mod->modtype.set;
701 if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
702 goto cancel;
703 }
704 } else {
705 struct cgw_frame_mod mb;
706
707 if (mod->modtype.and) {
708 memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
709 mb.modtype = mod->modtype.and;
710 if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
711 goto cancel;
712 }
713
714 if (mod->modtype.or) {
715 memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
716 mb.modtype = mod->modtype.or;
717 if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
718 goto cancel;
719 }
720
721 if (mod->modtype.xor) {
722 memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
723 mb.modtype = mod->modtype.xor;
724 if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
725 goto cancel;
726 }
727
728 if (mod->modtype.set) {
729 memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
730 mb.modtype = mod->modtype.set;
731 if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
732 goto cancel;
733 }
734 }
735
736 if (mod->uid) {
737 if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0)
738 goto cancel;
739 }
740
741 if (mod->csumfunc.crc8) {
742 if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
743 &mod->csum.crc8) < 0)
744 goto cancel;
745 }
746
747 if (mod->csumfunc.xor) {
748 if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
749 &mod->csum.xor) < 0)
750 goto cancel;
751 }
752
753 if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
754 if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
755 if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
756 &gwj->ccgw.filter) < 0)
757 goto cancel;
758 }
759
760 if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
761 goto cancel;
762
763 if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
764 goto cancel;
765 }
766
767 nlmsg_end(skb, nlh);
768 return 0;
769
770 cancel:
771 nlmsg_cancel(skb, nlh);
772 return -EMSGSIZE;
773 }
774
775 /* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
cgw_dump_jobs(struct sk_buff * skb,struct netlink_callback * cb)776 static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
777 {
778 struct net *net = sock_net(skb->sk);
779 struct cgw_job *gwj = NULL;
780 int idx = 0;
781 int s_idx = cb->args[0];
782
783 rcu_read_lock();
784 hlist_for_each_entry_rcu(gwj, &net->can.cgw_list, list) {
785 if (idx < s_idx)
786 goto cont;
787
788 if (cgw_put_job(skb, gwj, RTM_NEWROUTE,
789 NETLINK_CB(cb->skb).portid,
790 cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
791 break;
792 cont:
793 idx++;
794 }
795 rcu_read_unlock();
796
797 cb->args[0] = idx;
798
799 return skb->len;
800 }
801
802 static const struct nla_policy cgw_policy[CGW_MAX + 1] = {
803 [CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) },
804 [CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) },
805 [CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) },
806 [CGW_MOD_SET] = { .len = sizeof(struct cgw_frame_mod) },
807 [CGW_CS_XOR] = { .len = sizeof(struct cgw_csum_xor) },
808 [CGW_CS_CRC8] = { .len = sizeof(struct cgw_csum_crc8) },
809 [CGW_SRC_IF] = { .type = NLA_U32 },
810 [CGW_DST_IF] = { .type = NLA_U32 },
811 [CGW_FILTER] = { .len = sizeof(struct can_filter) },
812 [CGW_LIM_HOPS] = { .type = NLA_U8 },
813 [CGW_MOD_UID] = { .type = NLA_U32 },
814 [CGW_FDMOD_AND] = { .len = sizeof(struct cgw_fdframe_mod) },
815 [CGW_FDMOD_OR] = { .len = sizeof(struct cgw_fdframe_mod) },
816 [CGW_FDMOD_XOR] = { .len = sizeof(struct cgw_fdframe_mod) },
817 [CGW_FDMOD_SET] = { .len = sizeof(struct cgw_fdframe_mod) },
818 };
819
820 /* check for common and gwtype specific attributes */
cgw_parse_attr(struct nlmsghdr * nlh,struct cf_mod * mod,u8 gwtype,void * gwtypeattr,u8 * limhops)821 static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
822 u8 gwtype, void *gwtypeattr, u8 *limhops)
823 {
824 struct nlattr *tb[CGW_MAX + 1];
825 struct rtcanmsg *r = nlmsg_data(nlh);
826 int modidx = 0;
827 int err = 0;
828
829 /* initialize modification & checksum data space */
830 memset(mod, 0, sizeof(*mod));
831
832 err = nlmsg_parse_deprecated(nlh, sizeof(struct rtcanmsg), tb,
833 CGW_MAX, cgw_policy, NULL);
834 if (err < 0)
835 return err;
836
837 if (tb[CGW_LIM_HOPS]) {
838 *limhops = nla_get_u8(tb[CGW_LIM_HOPS]);
839
840 if (*limhops < 1 || *limhops > max_hops)
841 return -EINVAL;
842 }
843
844 /* check for AND/OR/XOR/SET modifications */
845 if (r->flags & CGW_FLAGS_CAN_FD) {
846 struct cgw_fdframe_mod mb;
847
848 if (tb[CGW_FDMOD_AND]) {
849 nla_memcpy(&mb, tb[CGW_FDMOD_AND], CGW_FDMODATTR_LEN);
850
851 canfdframecpy(&mod->modframe.and, &mb.cf);
852 mod->modtype.and = mb.modtype;
853
854 if (mb.modtype & CGW_MOD_ID)
855 mod->modfunc[modidx++] = mod_and_id;
856
857 if (mb.modtype & CGW_MOD_LEN)
858 mod->modfunc[modidx++] = mod_and_len;
859
860 if (mb.modtype & CGW_MOD_FLAGS)
861 mod->modfunc[modidx++] = mod_and_flags;
862
863 if (mb.modtype & CGW_MOD_DATA)
864 mod->modfunc[modidx++] = mod_and_fddata;
865 }
866
867 if (tb[CGW_FDMOD_OR]) {
868 nla_memcpy(&mb, tb[CGW_FDMOD_OR], CGW_FDMODATTR_LEN);
869
870 canfdframecpy(&mod->modframe.or, &mb.cf);
871 mod->modtype.or = mb.modtype;
872
873 if (mb.modtype & CGW_MOD_ID)
874 mod->modfunc[modidx++] = mod_or_id;
875
876 if (mb.modtype & CGW_MOD_LEN)
877 mod->modfunc[modidx++] = mod_or_len;
878
879 if (mb.modtype & CGW_MOD_FLAGS)
880 mod->modfunc[modidx++] = mod_or_flags;
881
882 if (mb.modtype & CGW_MOD_DATA)
883 mod->modfunc[modidx++] = mod_or_fddata;
884 }
885
886 if (tb[CGW_FDMOD_XOR]) {
887 nla_memcpy(&mb, tb[CGW_FDMOD_XOR], CGW_FDMODATTR_LEN);
888
889 canfdframecpy(&mod->modframe.xor, &mb.cf);
890 mod->modtype.xor = mb.modtype;
891
892 if (mb.modtype & CGW_MOD_ID)
893 mod->modfunc[modidx++] = mod_xor_id;
894
895 if (mb.modtype & CGW_MOD_LEN)
896 mod->modfunc[modidx++] = mod_xor_len;
897
898 if (mb.modtype & CGW_MOD_FLAGS)
899 mod->modfunc[modidx++] = mod_xor_flags;
900
901 if (mb.modtype & CGW_MOD_DATA)
902 mod->modfunc[modidx++] = mod_xor_fddata;
903 }
904
905 if (tb[CGW_FDMOD_SET]) {
906 nla_memcpy(&mb, tb[CGW_FDMOD_SET], CGW_FDMODATTR_LEN);
907
908 canfdframecpy(&mod->modframe.set, &mb.cf);
909 mod->modtype.set = mb.modtype;
910
911 if (mb.modtype & CGW_MOD_ID)
912 mod->modfunc[modidx++] = mod_set_id;
913
914 if (mb.modtype & CGW_MOD_LEN)
915 mod->modfunc[modidx++] = mod_set_len;
916
917 if (mb.modtype & CGW_MOD_FLAGS)
918 mod->modfunc[modidx++] = mod_set_flags;
919
920 if (mb.modtype & CGW_MOD_DATA)
921 mod->modfunc[modidx++] = mod_set_fddata;
922 }
923 } else {
924 struct cgw_frame_mod mb;
925
926 if (tb[CGW_MOD_AND]) {
927 nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
928
929 canframecpy(&mod->modframe.and, &mb.cf);
930 mod->modtype.and = mb.modtype;
931
932 if (mb.modtype & CGW_MOD_ID)
933 mod->modfunc[modidx++] = mod_and_id;
934
935 if (mb.modtype & CGW_MOD_DLC)
936 mod->modfunc[modidx++] = mod_and_ccdlc;
937
938 if (mb.modtype & CGW_MOD_DATA)
939 mod->modfunc[modidx++] = mod_and_data;
940 }
941
942 if (tb[CGW_MOD_OR]) {
943 nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
944
945 canframecpy(&mod->modframe.or, &mb.cf);
946 mod->modtype.or = mb.modtype;
947
948 if (mb.modtype & CGW_MOD_ID)
949 mod->modfunc[modidx++] = mod_or_id;
950
951 if (mb.modtype & CGW_MOD_DLC)
952 mod->modfunc[modidx++] = mod_or_ccdlc;
953
954 if (mb.modtype & CGW_MOD_DATA)
955 mod->modfunc[modidx++] = mod_or_data;
956 }
957
958 if (tb[CGW_MOD_XOR]) {
959 nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
960
961 canframecpy(&mod->modframe.xor, &mb.cf);
962 mod->modtype.xor = mb.modtype;
963
964 if (mb.modtype & CGW_MOD_ID)
965 mod->modfunc[modidx++] = mod_xor_id;
966
967 if (mb.modtype & CGW_MOD_DLC)
968 mod->modfunc[modidx++] = mod_xor_ccdlc;
969
970 if (mb.modtype & CGW_MOD_DATA)
971 mod->modfunc[modidx++] = mod_xor_data;
972 }
973
974 if (tb[CGW_MOD_SET]) {
975 nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
976
977 canframecpy(&mod->modframe.set, &mb.cf);
978 mod->modtype.set = mb.modtype;
979
980 if (mb.modtype & CGW_MOD_ID)
981 mod->modfunc[modidx++] = mod_set_id;
982
983 if (mb.modtype & CGW_MOD_DLC)
984 mod->modfunc[modidx++] = mod_set_ccdlc;
985
986 if (mb.modtype & CGW_MOD_DATA)
987 mod->modfunc[modidx++] = mod_set_data;
988 }
989 }
990
991 /* check for checksum operations after CAN frame modifications */
992 if (modidx) {
993 if (tb[CGW_CS_CRC8]) {
994 struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
995
996 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
997 c->result_idx, r);
998 if (err)
999 return err;
1000
1001 nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
1002 CGW_CS_CRC8_LEN);
1003
1004 /* select dedicated processing function to reduce
1005 * runtime operations in receive hot path.
1006 */
1007 if (c->from_idx < 0 || c->to_idx < 0 ||
1008 c->result_idx < 0)
1009 mod->csumfunc.crc8 = cgw_csum_crc8_rel;
1010 else if (c->from_idx <= c->to_idx)
1011 mod->csumfunc.crc8 = cgw_csum_crc8_pos;
1012 else
1013 mod->csumfunc.crc8 = cgw_csum_crc8_neg;
1014 }
1015
1016 if (tb[CGW_CS_XOR]) {
1017 struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
1018
1019 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
1020 c->result_idx, r);
1021 if (err)
1022 return err;
1023
1024 nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
1025 CGW_CS_XOR_LEN);
1026
1027 /* select dedicated processing function to reduce
1028 * runtime operations in receive hot path.
1029 */
1030 if (c->from_idx < 0 || c->to_idx < 0 ||
1031 c->result_idx < 0)
1032 mod->csumfunc.xor = cgw_csum_xor_rel;
1033 else if (c->from_idx <= c->to_idx)
1034 mod->csumfunc.xor = cgw_csum_xor_pos;
1035 else
1036 mod->csumfunc.xor = cgw_csum_xor_neg;
1037 }
1038
1039 if (tb[CGW_MOD_UID])
1040 nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32));
1041 }
1042
1043 if (gwtype == CGW_TYPE_CAN_CAN) {
1044 /* check CGW_TYPE_CAN_CAN specific attributes */
1045 struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
1046
1047 memset(ccgw, 0, sizeof(*ccgw));
1048
1049 /* check for can_filter in attributes */
1050 if (tb[CGW_FILTER])
1051 nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
1052 sizeof(struct can_filter));
1053
1054 err = -ENODEV;
1055
1056 /* specifying two interfaces is mandatory */
1057 if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
1058 return err;
1059
1060 ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
1061 ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
1062
1063 /* both indices set to 0 for flushing all routing entries */
1064 if (!ccgw->src_idx && !ccgw->dst_idx)
1065 return 0;
1066
1067 /* only one index set to 0 is an error */
1068 if (!ccgw->src_idx || !ccgw->dst_idx)
1069 return err;
1070 }
1071
1072 /* add the checks for other gwtypes here */
1073
1074 return 0;
1075 }
1076
cgw_create_job(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1077 static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
1078 struct netlink_ext_ack *extack)
1079 {
1080 struct net *net = sock_net(skb->sk);
1081 struct rtcanmsg *r;
1082 struct cgw_job *gwj;
1083 struct cf_mod *mod;
1084 struct can_can_gw ccgw;
1085 u8 limhops = 0;
1086 int err = 0;
1087
1088 if (!netlink_capable(skb, CAP_NET_ADMIN))
1089 return -EPERM;
1090
1091 if (nlmsg_len(nlh) < sizeof(*r))
1092 return -EINVAL;
1093
1094 r = nlmsg_data(nlh);
1095 if (r->can_family != AF_CAN)
1096 return -EPFNOSUPPORT;
1097
1098 /* so far we only support CAN -> CAN routings */
1099 if (r->gwtype != CGW_TYPE_CAN_CAN)
1100 return -EINVAL;
1101
1102 mod = kmalloc_obj(*mod);
1103 if (!mod)
1104 return -ENOMEM;
1105
1106 err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
1107 if (err < 0)
1108 goto out_free_cf;
1109
1110 if (mod->uid) {
1111 ASSERT_RTNL();
1112
1113 /* check for updating an existing job with identical uid */
1114 hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
1115 struct cf_mod *old_cf;
1116
1117 old_cf = cgw_job_cf_mod(gwj);
1118 if (old_cf->uid != mod->uid)
1119 continue;
1120
1121 /* interfaces & filters must be identical */
1122 if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) {
1123 err = -EINVAL;
1124 goto out_free_cf;
1125 }
1126
1127 rcu_assign_pointer(gwj->cf_mod, mod);
1128 kfree_rcu_mightsleep(old_cf);
1129 return 0;
1130 }
1131 }
1132
1133 /* ifindex == 0 is not allowed for job creation */
1134 if (!ccgw.src_idx || !ccgw.dst_idx) {
1135 err = -ENODEV;
1136 goto out_free_cf;
1137 }
1138
1139 gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
1140 if (!gwj) {
1141 err = -ENOMEM;
1142 goto out_free_cf;
1143 }
1144
1145 gwj->handled_frames = 0;
1146 gwj->dropped_frames = 0;
1147 gwj->deleted_frames = 0;
1148 gwj->flags = r->flags;
1149 gwj->gwtype = r->gwtype;
1150 gwj->limit_hops = limhops;
1151
1152 /* insert already parsed information */
1153 RCU_INIT_POINTER(gwj->cf_mod, mod);
1154 memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
1155
1156 err = -ENODEV;
1157
1158 gwj->src.dev = __dev_get_by_index(net, gwj->ccgw.src_idx);
1159
1160 if (!gwj->src.dev)
1161 goto out;
1162
1163 if (gwj->src.dev->type != ARPHRD_CAN)
1164 goto out;
1165
1166 gwj->dst.dev = __dev_get_by_index(net, gwj->ccgw.dst_idx);
1167
1168 if (!gwj->dst.dev)
1169 goto out;
1170
1171 if (gwj->dst.dev->type != ARPHRD_CAN)
1172 goto out;
1173
1174 /* is sending the skb back to the incoming interface intended? */
1175 if (gwj->src.dev == gwj->dst.dev &&
1176 !(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK)) {
1177 err = -EINVAL;
1178 goto out;
1179 }
1180
1181 ASSERT_RTNL();
1182
1183 err = cgw_register_filter(net, gwj);
1184 if (!err)
1185 hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
1186 out:
1187 if (err) {
1188 kmem_cache_free(cgw_cache, gwj);
1189 out_free_cf:
1190 kfree(mod);
1191 }
1192 return err;
1193 }
1194
cgw_remove_all_jobs(struct net * net)1195 static void cgw_remove_all_jobs(struct net *net)
1196 {
1197 struct cgw_job *gwj = NULL;
1198 struct hlist_node *nx;
1199
1200 ASSERT_RTNL();
1201
1202 hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
1203 hlist_del(&gwj->list);
1204 cgw_unregister_filter(net, gwj);
1205 call_rcu(&gwj->rcu, cgw_job_free_rcu);
1206 }
1207 }
1208
cgw_remove_job(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1209 static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
1210 struct netlink_ext_ack *extack)
1211 {
1212 struct net *net = sock_net(skb->sk);
1213 struct cgw_job *gwj = NULL;
1214 struct hlist_node *nx;
1215 struct rtcanmsg *r;
1216 struct cf_mod mod;
1217 struct can_can_gw ccgw;
1218 u8 limhops = 0;
1219 int err = 0;
1220
1221 if (!netlink_capable(skb, CAP_NET_ADMIN))
1222 return -EPERM;
1223
1224 if (nlmsg_len(nlh) < sizeof(*r))
1225 return -EINVAL;
1226
1227 r = nlmsg_data(nlh);
1228 if (r->can_family != AF_CAN)
1229 return -EPFNOSUPPORT;
1230
1231 /* so far we only support CAN -> CAN routings */
1232 if (r->gwtype != CGW_TYPE_CAN_CAN)
1233 return -EINVAL;
1234
1235 err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
1236 if (err < 0)
1237 return err;
1238
1239 /* two interface indices both set to 0 => remove all entries */
1240 if (!ccgw.src_idx && !ccgw.dst_idx) {
1241 cgw_remove_all_jobs(net);
1242 return 0;
1243 }
1244
1245 err = -EINVAL;
1246
1247 ASSERT_RTNL();
1248
1249 /* remove only the first matching entry */
1250 hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
1251 struct cf_mod *cf_mod;
1252
1253 if (gwj->flags != r->flags)
1254 continue;
1255
1256 if (gwj->limit_hops != limhops)
1257 continue;
1258
1259 cf_mod = cgw_job_cf_mod(gwj);
1260 /* we have a match when uid is enabled and identical */
1261 if (cf_mod->uid || mod.uid) {
1262 if (cf_mod->uid != mod.uid)
1263 continue;
1264 } else {
1265 /* no uid => check for identical modifications */
1266 if (memcmp(cf_mod, &mod, sizeof(mod)))
1267 continue;
1268 }
1269
1270 /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
1271 if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
1272 continue;
1273
1274 hlist_del(&gwj->list);
1275 cgw_unregister_filter(net, gwj);
1276 call_rcu(&gwj->rcu, cgw_job_free_rcu);
1277 err = 0;
1278 break;
1279 }
1280
1281 return err;
1282 }
1283
cangw_pernet_init(struct net * net)1284 static int __net_init cangw_pernet_init(struct net *net)
1285 {
1286 INIT_HLIST_HEAD(&net->can.cgw_list);
1287 return 0;
1288 }
1289
cangw_pernet_exit_batch(struct list_head * net_list)1290 static void __net_exit cangw_pernet_exit_batch(struct list_head *net_list)
1291 {
1292 struct net *net;
1293
1294 rtnl_lock();
1295 list_for_each_entry(net, net_list, exit_list)
1296 cgw_remove_all_jobs(net);
1297 rtnl_unlock();
1298 }
1299
1300 static struct pernet_operations cangw_pernet_ops = {
1301 .init = cangw_pernet_init,
1302 .exit_batch = cangw_pernet_exit_batch,
1303 };
1304
1305 static const struct rtnl_msg_handler cgw_rtnl_msg_handlers[] __initconst_or_module = {
1306 {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_NEWROUTE,
1307 .doit = cgw_create_job},
1308 {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_DELROUTE,
1309 .doit = cgw_remove_job},
1310 {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_GETROUTE,
1311 .dumpit = cgw_dump_jobs},
1312 };
1313
cgw_module_init(void)1314 static __init int cgw_module_init(void)
1315 {
1316 int ret;
1317
1318 /* sanitize given module parameter */
1319 max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS);
1320
1321 pr_info("can: netlink gateway - max_hops=%d\n", max_hops);
1322
1323 ret = register_pernet_subsys(&cangw_pernet_ops);
1324 if (ret)
1325 return ret;
1326
1327 ret = -ENOMEM;
1328 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
1329 0, 0, NULL);
1330 if (!cgw_cache)
1331 goto out_cache_create;
1332
1333 /* set notifier */
1334 notifier.notifier_call = cgw_notifier;
1335 ret = register_netdevice_notifier(¬ifier);
1336 if (ret)
1337 goto out_register_notifier;
1338
1339 ret = rtnl_register_many(cgw_rtnl_msg_handlers);
1340 if (ret)
1341 goto out_rtnl_register;
1342
1343 return 0;
1344
1345 out_rtnl_register:
1346 unregister_netdevice_notifier(¬ifier);
1347 out_register_notifier:
1348 kmem_cache_destroy(cgw_cache);
1349 out_cache_create:
1350 unregister_pernet_subsys(&cangw_pernet_ops);
1351
1352 return ret;
1353 }
1354
cgw_module_exit(void)1355 static __exit void cgw_module_exit(void)
1356 {
1357 rtnl_unregister_all(PF_CAN);
1358
1359 unregister_netdevice_notifier(¬ifier);
1360
1361 unregister_pernet_subsys(&cangw_pernet_ops);
1362 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1363
1364 kmem_cache_destroy(cgw_cache);
1365 }
1366
1367 module_init(cgw_module_init);
1368 module_exit(cgw_module_exit);
1369