1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2010-2011 EIA Electronics,
3 // Pieter Beyens <pieter.beyens@eia.be>
4 // Copyright (c) 2010-2011 EIA Electronics,
5 // Kurt Van Dijck <kurt.van.dijck@eia.be>
6 // Copyright (c) 2018 Protonic,
7 // Robin van der Gracht <robin@protonic.nl>
8 // Copyright (c) 2017-2019 Pengutronix,
9 // Marc Kleine-Budde <kernel@pengutronix.de>
10 // Copyright (c) 2017-2019 Pengutronix,
11 // Oleksij Rempel <kernel@pengutronix.de>
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/can/can-ml.h>
16 #include <linux/can/core.h>
17 #include <linux/can/skb.h>
18 #include <linux/errqueue.h>
19 #include <linux/if_arp.h>
20 #include <net/can.h>
21
22 #include "j1939-priv.h"
23
24 #define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939)
25
26 /* conversion function between struct sock::sk_priority from linux and
27 * j1939 priority field
28 */
j1939_prio(u32 sk_priority)29 static inline priority_t j1939_prio(u32 sk_priority)
30 {
31 sk_priority = min(sk_priority, 7U);
32
33 return 7 - sk_priority;
34 }
35
j1939_to_sk_priority(priority_t prio)36 static inline u32 j1939_to_sk_priority(priority_t prio)
37 {
38 return 7 - prio;
39 }
40
41 /* function to see if pgn is to be evaluated */
j1939_pgn_is_valid(pgn_t pgn)42 static inline bool j1939_pgn_is_valid(pgn_t pgn)
43 {
44 return pgn <= J1939_PGN_MAX;
45 }
46
47 /* test function to avoid non-zero DA placeholder for pdu1 pgn's */
j1939_pgn_is_clean_pdu(pgn_t pgn)48 static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn)
49 {
50 if (j1939_pgn_is_pdu1(pgn))
51 return !(pgn & 0xff);
52 else
53 return true;
54 }
55
j1939_sock_pending_add(struct sock * sk)56 static inline void j1939_sock_pending_add(struct sock *sk)
57 {
58 struct j1939_sock *jsk = j1939_sk(sk);
59
60 atomic_inc(&jsk->skb_pending);
61 }
62
j1939_sock_pending_get(struct sock * sk)63 static int j1939_sock_pending_get(struct sock *sk)
64 {
65 struct j1939_sock *jsk = j1939_sk(sk);
66
67 return atomic_read(&jsk->skb_pending);
68 }
69
j1939_sock_pending_del(struct sock * sk)70 void j1939_sock_pending_del(struct sock *sk)
71 {
72 struct j1939_sock *jsk = j1939_sk(sk);
73
74 /* atomic_dec_return returns the new value */
75 if (!atomic_dec_return(&jsk->skb_pending))
76 wake_up(&jsk->waitq); /* no pending SKB's */
77 }
78
j1939_jsk_add(struct j1939_priv * priv,struct j1939_sock * jsk)79 static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
80 {
81 jsk->state |= J1939_SOCK_BOUND;
82 j1939_priv_get(priv);
83
84 write_lock_bh(&priv->j1939_socks_lock);
85 list_add_tail(&jsk->list, &priv->j1939_socks);
86 write_unlock_bh(&priv->j1939_socks_lock);
87 }
88
j1939_jsk_del(struct j1939_priv * priv,struct j1939_sock * jsk)89 static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
90 {
91 write_lock_bh(&priv->j1939_socks_lock);
92 list_del_init(&jsk->list);
93 write_unlock_bh(&priv->j1939_socks_lock);
94
95 j1939_priv_put(priv);
96 jsk->state &= ~J1939_SOCK_BOUND;
97 }
98
j1939_sk_queue_session(struct j1939_session * session)99 static bool j1939_sk_queue_session(struct j1939_session *session)
100 {
101 struct j1939_sock *jsk = j1939_sk(session->sk);
102 bool empty;
103
104 spin_lock_bh(&jsk->sk_session_queue_lock);
105 empty = list_empty(&jsk->sk_session_queue);
106 j1939_session_get(session);
107 list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue);
108 spin_unlock_bh(&jsk->sk_session_queue_lock);
109 j1939_sock_pending_add(&jsk->sk);
110
111 return empty;
112 }
113
114 static struct
j1939_sk_get_incomplete_session(struct j1939_sock * jsk)115 j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk)
116 {
117 struct j1939_session *session = NULL;
118
119 spin_lock_bh(&jsk->sk_session_queue_lock);
120 if (!list_empty(&jsk->sk_session_queue)) {
121 session = list_last_entry(&jsk->sk_session_queue,
122 struct j1939_session,
123 sk_session_queue_entry);
124 if (session->total_queued_size == session->total_message_size)
125 session = NULL;
126 else
127 j1939_session_get(session);
128 }
129 spin_unlock_bh(&jsk->sk_session_queue_lock);
130
131 return session;
132 }
133
j1939_sk_queue_drop_all(struct j1939_priv * priv,struct j1939_sock * jsk,int err)134 static void j1939_sk_queue_drop_all(struct j1939_priv *priv,
135 struct j1939_sock *jsk, int err)
136 {
137 struct j1939_session *session, *tmp;
138
139 netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err);
140 spin_lock_bh(&jsk->sk_session_queue_lock);
141 list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue,
142 sk_session_queue_entry) {
143 list_del_init(&session->sk_session_queue_entry);
144 session->err = err;
145 j1939_session_put(session);
146 }
147 spin_unlock_bh(&jsk->sk_session_queue_lock);
148 }
149
j1939_sk_queue_activate_next_locked(struct j1939_session * session)150 static void j1939_sk_queue_activate_next_locked(struct j1939_session *session)
151 {
152 struct j1939_sock *jsk;
153 struct j1939_session *first;
154 int err;
155
156 /* RX-Session don't have a socket (yet) */
157 if (!session->sk)
158 return;
159
160 jsk = j1939_sk(session->sk);
161 lockdep_assert_held(&jsk->sk_session_queue_lock);
162
163 err = session->err;
164
165 first = list_first_entry_or_null(&jsk->sk_session_queue,
166 struct j1939_session,
167 sk_session_queue_entry);
168
169 /* Some else has already activated the next session */
170 if (first != session)
171 return;
172
173 activate_next:
174 list_del_init(&first->sk_session_queue_entry);
175 j1939_session_put(first);
176 first = list_first_entry_or_null(&jsk->sk_session_queue,
177 struct j1939_session,
178 sk_session_queue_entry);
179 if (!first)
180 return;
181
182 if (j1939_session_activate(first)) {
183 netdev_warn_once(first->priv->ndev,
184 "%s: 0x%p: Identical session is already activated.\n",
185 __func__, first);
186 first->err = -EBUSY;
187 goto activate_next;
188 } else {
189 /* Give receiver some time (arbitrary chosen) to recover */
190 int time_ms = 0;
191
192 if (err)
193 time_ms = 10 + get_random_u32_below(16);
194
195 j1939_tp_schedule_txtimer(first, time_ms);
196 }
197 }
198
j1939_sk_queue_activate_next(struct j1939_session * session)199 void j1939_sk_queue_activate_next(struct j1939_session *session)
200 {
201 struct j1939_sock *jsk;
202
203 if (!session->sk)
204 return;
205
206 jsk = j1939_sk(session->sk);
207
208 spin_lock_bh(&jsk->sk_session_queue_lock);
209 j1939_sk_queue_activate_next_locked(session);
210 spin_unlock_bh(&jsk->sk_session_queue_lock);
211 }
212
j1939_sk_match_dst(struct j1939_sock * jsk,const struct j1939_sk_buff_cb * skcb)213 static bool j1939_sk_match_dst(struct j1939_sock *jsk,
214 const struct j1939_sk_buff_cb *skcb)
215 {
216 if ((jsk->state & J1939_SOCK_PROMISC))
217 return true;
218
219 /* Destination address filter */
220 if (jsk->addr.src_name && skcb->addr.dst_name) {
221 if (jsk->addr.src_name != skcb->addr.dst_name)
222 return false;
223 } else {
224 /* receive (all sockets) if
225 * - all packages that match our bind() address
226 * - all broadcast on a socket if SO_BROADCAST
227 * is set
228 */
229 if (j1939_address_is_unicast(skcb->addr.da)) {
230 if (jsk->addr.sa != skcb->addr.da)
231 return false;
232 } else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) {
233 /* receiving broadcast without SO_BROADCAST
234 * flag is not allowed
235 */
236 return false;
237 }
238 }
239
240 /* Source address filter */
241 if (jsk->state & J1939_SOCK_CONNECTED) {
242 /* receive (all sockets) if
243 * - all packages that match our connect() name or address
244 */
245 if (jsk->addr.dst_name && skcb->addr.src_name) {
246 if (jsk->addr.dst_name != skcb->addr.src_name)
247 return false;
248 } else {
249 if (jsk->addr.da != skcb->addr.sa)
250 return false;
251 }
252 }
253
254 /* PGN filter */
255 if (j1939_pgn_is_valid(jsk->pgn_rx_filter) &&
256 jsk->pgn_rx_filter != skcb->addr.pgn)
257 return false;
258
259 return true;
260 }
261
262 /* matches skb control buffer (addr) with a j1939 filter */
j1939_sk_match_filter(struct j1939_sock * jsk,const struct j1939_sk_buff_cb * skcb)263 static bool j1939_sk_match_filter(struct j1939_sock *jsk,
264 const struct j1939_sk_buff_cb *skcb)
265 {
266 const struct j1939_filter *f;
267 int nfilter;
268
269 spin_lock_bh(&jsk->filters_lock);
270
271 f = jsk->filters;
272 nfilter = jsk->nfilters;
273
274 if (!nfilter)
275 /* receive all when no filters are assigned */
276 goto filter_match_found;
277
278 for (; nfilter; ++f, --nfilter) {
279 if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
280 continue;
281 if ((skcb->addr.sa & f->addr_mask) != f->addr)
282 continue;
283 if ((skcb->addr.src_name & f->name_mask) != f->name)
284 continue;
285 goto filter_match_found;
286 }
287
288 spin_unlock_bh(&jsk->filters_lock);
289 return false;
290
291 filter_match_found:
292 spin_unlock_bh(&jsk->filters_lock);
293 return true;
294 }
295
j1939_sk_recv_match_one(struct j1939_sock * jsk,const struct j1939_sk_buff_cb * skcb)296 static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
297 const struct j1939_sk_buff_cb *skcb)
298 {
299 if (!(jsk->state & J1939_SOCK_BOUND))
300 return false;
301
302 if (!j1939_sk_match_dst(jsk, skcb))
303 return false;
304
305 if (!j1939_sk_match_filter(jsk, skcb))
306 return false;
307
308 return true;
309 }
310
j1939_sk_recv_one(struct j1939_sock * jsk,struct sk_buff * oskb)311 static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
312 {
313 const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
314 struct j1939_sk_buff_cb *skcb;
315 enum skb_drop_reason reason;
316 struct sk_buff *skb;
317
318 if (oskb->sk == &jsk->sk)
319 return;
320
321 if (!j1939_sk_recv_match_one(jsk, oskcb))
322 return;
323
324 skb = skb_clone(oskb, GFP_ATOMIC);
325 if (!skb) {
326 pr_warn("skb clone failed\n");
327 return;
328 }
329 can_skb_set_owner(skb, oskb->sk);
330
331 skcb = j1939_skb_to_cb(skb);
332 skcb->msg_flags &= ~(MSG_DONTROUTE);
333 if (skb->sk)
334 skcb->msg_flags |= MSG_DONTROUTE;
335
336 if (sock_queue_rcv_skb_reason(&jsk->sk, skb, &reason) < 0)
337 sk_skb_reason_drop(&jsk->sk, skb, reason);
338 }
339
j1939_sk_recv_match(struct j1939_priv * priv,struct j1939_sk_buff_cb * skcb)340 bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
341 {
342 struct j1939_sock *jsk;
343 bool match = false;
344
345 read_lock_bh(&priv->j1939_socks_lock);
346 list_for_each_entry(jsk, &priv->j1939_socks, list) {
347 match = j1939_sk_recv_match_one(jsk, skcb);
348 if (match)
349 break;
350 }
351 read_unlock_bh(&priv->j1939_socks_lock);
352
353 return match;
354 }
355
j1939_sk_recv(struct j1939_priv * priv,struct sk_buff * skb)356 void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
357 {
358 struct j1939_sock *jsk;
359
360 read_lock_bh(&priv->j1939_socks_lock);
361 list_for_each_entry(jsk, &priv->j1939_socks, list) {
362 j1939_sk_recv_one(jsk, skb);
363 }
364 read_unlock_bh(&priv->j1939_socks_lock);
365 }
366
j1939_sk_sock_destruct(struct sock * sk)367 static void j1939_sk_sock_destruct(struct sock *sk)
368 {
369 struct j1939_sock *jsk = j1939_sk(sk);
370
371 /* This function will be called by the generic networking code, when
372 * the socket is ultimately closed (sk->sk_destruct).
373 *
374 * The race between
375 * - processing a received CAN frame
376 * (can_receive -> j1939_can_recv)
377 * and accessing j1939_priv
378 * ... and ...
379 * - closing a socket
380 * (j1939_can_rx_unregister -> can_rx_unregister)
381 * and calling the final j1939_priv_put()
382 *
383 * is avoided by calling the final j1939_priv_put() from this
384 * RCU deferred cleanup call.
385 */
386 if (jsk->priv) {
387 j1939_priv_put(jsk->priv);
388 jsk->priv = NULL;
389 }
390
391 /* call generic CAN sock destruct */
392 can_sock_destruct(sk);
393 }
394
j1939_sk_init(struct sock * sk)395 static int j1939_sk_init(struct sock *sk)
396 {
397 struct j1939_sock *jsk = j1939_sk(sk);
398
399 /* Ensure that "sk" is first member in "struct j1939_sock", so that we
400 * can skip it during memset().
401 */
402 BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0);
403 memset((void *)jsk + sizeof(jsk->sk), 0x0,
404 sizeof(*jsk) - sizeof(jsk->sk));
405
406 INIT_LIST_HEAD(&jsk->list);
407 init_waitqueue_head(&jsk->waitq);
408 jsk->sk.sk_priority = j1939_to_sk_priority(6);
409 jsk->sk.sk_reuse = 1; /* per default */
410 jsk->addr.sa = J1939_NO_ADDR;
411 jsk->addr.da = J1939_NO_ADDR;
412 jsk->addr.pgn = J1939_NO_PGN;
413 jsk->pgn_rx_filter = J1939_NO_PGN;
414 atomic_set(&jsk->skb_pending, 0);
415 spin_lock_init(&jsk->sk_session_queue_lock);
416 INIT_LIST_HEAD(&jsk->sk_session_queue);
417 spin_lock_init(&jsk->filters_lock);
418
419 /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
420 sock_set_flag(sk, SOCK_RCU_FREE);
421 sk->sk_destruct = j1939_sk_sock_destruct;
422 sk->sk_protocol = CAN_J1939;
423
424 return 0;
425 }
426
j1939_sk_sanity_check(struct sockaddr_can * addr,int len)427 static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len)
428 {
429 if (!addr)
430 return -EDESTADDRREQ;
431 if (len < J1939_MIN_NAMELEN)
432 return -EINVAL;
433 if (addr->can_family != AF_CAN)
434 return -EINVAL;
435 if (!addr->can_ifindex)
436 return -ENODEV;
437 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
438 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
439 return -EINVAL;
440
441 return 0;
442 }
443
j1939_sk_bind(struct socket * sock,struct sockaddr_unsized * uaddr,int len)444 static int j1939_sk_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int len)
445 {
446 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
447 struct j1939_sock *jsk = j1939_sk(sock->sk);
448 struct j1939_priv *priv;
449 struct sock *sk;
450 struct net *net;
451 int ret = 0;
452
453 ret = j1939_sk_sanity_check(addr, len);
454 if (ret)
455 return ret;
456
457 lock_sock(sock->sk);
458
459 priv = jsk->priv;
460 sk = sock->sk;
461 net = sock_net(sk);
462
463 /* Already bound to an interface? */
464 if (jsk->state & J1939_SOCK_BOUND) {
465 /* A re-bind() to a different interface is not
466 * supported.
467 */
468 if (jsk->ifindex != addr->can_ifindex) {
469 ret = -EINVAL;
470 goto out_release_sock;
471 }
472
473 /* drop old references */
474 j1939_jsk_del(priv, jsk);
475 j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
476 } else {
477 struct can_ml_priv *can_ml;
478 struct net_device *ndev;
479
480 ndev = dev_get_by_index(net, addr->can_ifindex);
481 if (!ndev) {
482 ret = -ENODEV;
483 goto out_release_sock;
484 }
485
486 if (ndev->reg_state != NETREG_REGISTERED) {
487 dev_put(ndev);
488 ret = -ENODEV;
489 goto out_release_sock;
490 }
491
492 can_ml = can_get_ml_priv(ndev);
493 if (!can_ml) {
494 dev_put(ndev);
495 ret = -ENODEV;
496 goto out_release_sock;
497 }
498
499 if (!(ndev->flags & IFF_UP)) {
500 dev_put(ndev);
501 ret = -ENETDOWN;
502 goto out_release_sock;
503 }
504
505 priv = j1939_netdev_start(ndev);
506 dev_put(ndev);
507 if (IS_ERR(priv)) {
508 ret = PTR_ERR(priv);
509 goto out_release_sock;
510 }
511
512 jsk->ifindex = addr->can_ifindex;
513
514 /* the corresponding j1939_priv_put() is called via
515 * sk->sk_destruct, which points to j1939_sk_sock_destruct()
516 */
517 j1939_priv_get(priv);
518 jsk->priv = priv;
519 }
520
521 /* set default transmit pgn */
522 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
523 jsk->pgn_rx_filter = addr->can_addr.j1939.pgn;
524 jsk->addr.src_name = addr->can_addr.j1939.name;
525 jsk->addr.sa = addr->can_addr.j1939.addr;
526
527 /* get new references */
528 ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
529 if (ret) {
530 j1939_netdev_stop(priv);
531 jsk->priv = NULL;
532 synchronize_rcu();
533 j1939_priv_put(priv);
534 goto out_release_sock;
535 }
536
537 j1939_jsk_add(priv, jsk);
538
539 out_release_sock: /* fall through */
540 release_sock(sock->sk);
541
542 return ret;
543 }
544
j1939_sk_connect(struct socket * sock,struct sockaddr_unsized * uaddr,int len,int flags)545 static int j1939_sk_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
546 int len, int flags)
547 {
548 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
549 struct j1939_sock *jsk = j1939_sk(sock->sk);
550 int ret = 0;
551
552 ret = j1939_sk_sanity_check(addr, len);
553 if (ret)
554 return ret;
555
556 lock_sock(sock->sk);
557
558 /* bind() before connect() is mandatory */
559 if (!(jsk->state & J1939_SOCK_BOUND)) {
560 ret = -EINVAL;
561 goto out_release_sock;
562 }
563
564 /* A connect() to a different interface is not supported. */
565 if (jsk->ifindex != addr->can_ifindex) {
566 ret = -EINVAL;
567 goto out_release_sock;
568 }
569
570 if (!addr->can_addr.j1939.name &&
571 addr->can_addr.j1939.addr == J1939_NO_ADDR &&
572 !sock_flag(&jsk->sk, SOCK_BROADCAST)) {
573 /* broadcast, but SO_BROADCAST not set */
574 ret = -EACCES;
575 goto out_release_sock;
576 }
577
578 jsk->addr.dst_name = addr->can_addr.j1939.name;
579 jsk->addr.da = addr->can_addr.j1939.addr;
580
581 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
582 jsk->addr.pgn = addr->can_addr.j1939.pgn;
583
584 jsk->state |= J1939_SOCK_CONNECTED;
585
586 out_release_sock: /* fall through */
587 release_sock(sock->sk);
588
589 return ret;
590 }
591
j1939_sk_sock2sockaddr_can(struct sockaddr_can * addr,const struct j1939_sock * jsk,int peer)592 static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
593 const struct j1939_sock *jsk, int peer)
594 {
595 /* There are two holes (2 bytes and 3 bytes) to clear to avoid
596 * leaking kernel information to user space.
597 */
598 memset(addr, 0, J1939_MIN_NAMELEN);
599
600 addr->can_family = AF_CAN;
601 addr->can_ifindex = jsk->ifindex;
602 addr->can_addr.j1939.pgn = jsk->addr.pgn;
603 if (peer) {
604 addr->can_addr.j1939.name = jsk->addr.dst_name;
605 addr->can_addr.j1939.addr = jsk->addr.da;
606 } else {
607 addr->can_addr.j1939.name = jsk->addr.src_name;
608 addr->can_addr.j1939.addr = jsk->addr.sa;
609 }
610 }
611
j1939_sk_getname(struct socket * sock,struct sockaddr * uaddr,int peer)612 static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr,
613 int peer)
614 {
615 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
616 struct sock *sk = sock->sk;
617 struct j1939_sock *jsk = j1939_sk(sk);
618 int ret = 0;
619
620 lock_sock(sk);
621
622 if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) {
623 ret = -EADDRNOTAVAIL;
624 goto failure;
625 }
626
627 j1939_sk_sock2sockaddr_can(addr, jsk, peer);
628 ret = J1939_MIN_NAMELEN;
629
630 failure:
631 release_sock(sk);
632
633 return ret;
634 }
635
j1939_sk_release(struct socket * sock)636 static int j1939_sk_release(struct socket *sock)
637 {
638 struct sock *sk = sock->sk;
639 struct j1939_sock *jsk;
640
641 if (!sk)
642 return 0;
643
644 lock_sock(sk);
645 jsk = j1939_sk(sk);
646
647 if (jsk->state & J1939_SOCK_BOUND) {
648 struct j1939_priv *priv = jsk->priv;
649
650 if (wait_event_interruptible(jsk->waitq,
651 !j1939_sock_pending_get(&jsk->sk))) {
652 j1939_cancel_active_session(priv, sk);
653 j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN);
654 }
655
656 j1939_jsk_del(priv, jsk);
657
658 j1939_local_ecu_put(priv, jsk->addr.src_name,
659 jsk->addr.sa);
660
661 j1939_netdev_stop(priv);
662 }
663
664 kfree(jsk->filters);
665 sock_orphan(sk);
666 sock->sk = NULL;
667
668 release_sock(sk);
669 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
670 sock_put(sk);
671
672 return 0;
673 }
674
j1939_sk_setsockopt_flag(struct j1939_sock * jsk,sockptr_t optval,unsigned int optlen,int flag)675 static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval,
676 unsigned int optlen, int flag)
677 {
678 int tmp;
679
680 if (optlen != sizeof(tmp))
681 return -EINVAL;
682 if (copy_from_sockptr(&tmp, optval, optlen))
683 return -EFAULT;
684 lock_sock(&jsk->sk);
685 if (tmp)
686 jsk->state |= flag;
687 else
688 jsk->state &= ~flag;
689 release_sock(&jsk->sk);
690 return tmp;
691 }
692
j1939_sk_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)693 static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
694 sockptr_t optval, unsigned int optlen)
695 {
696 struct sock *sk = sock->sk;
697 struct j1939_sock *jsk = j1939_sk(sk);
698 int tmp, count = 0, ret = 0;
699 struct j1939_filter *filters = NULL, *ofilters;
700
701 if (level != SOL_CAN_J1939)
702 return -EINVAL;
703
704 switch (optname) {
705 case SO_J1939_FILTER:
706 if (!sockptr_is_null(optval) && optlen != 0) {
707 struct j1939_filter *f;
708 int c;
709
710 if (optlen % sizeof(*filters) != 0)
711 return -EINVAL;
712
713 if (optlen > J1939_FILTER_MAX *
714 sizeof(struct j1939_filter))
715 return -EINVAL;
716
717 count = optlen / sizeof(*filters);
718 filters = memdup_sockptr(optval, optlen);
719 if (IS_ERR(filters))
720 return PTR_ERR(filters);
721
722 for (f = filters, c = count; c; f++, c--) {
723 f->name &= f->name_mask;
724 f->pgn &= f->pgn_mask;
725 f->addr &= f->addr_mask;
726 }
727 }
728
729 lock_sock(&jsk->sk);
730 spin_lock_bh(&jsk->filters_lock);
731 ofilters = jsk->filters;
732 jsk->filters = filters;
733 jsk->nfilters = count;
734 spin_unlock_bh(&jsk->filters_lock);
735 release_sock(&jsk->sk);
736 kfree(ofilters);
737 return 0;
738 case SO_J1939_PROMISC:
739 return j1939_sk_setsockopt_flag(jsk, optval, optlen,
740 J1939_SOCK_PROMISC);
741 case SO_J1939_ERRQUEUE:
742 ret = j1939_sk_setsockopt_flag(jsk, optval, optlen,
743 J1939_SOCK_ERRQUEUE);
744 if (ret < 0)
745 return ret;
746
747 if (!(jsk->state & J1939_SOCK_ERRQUEUE))
748 skb_queue_purge(&sk->sk_error_queue);
749 return ret;
750 case SO_J1939_SEND_PRIO:
751 if (optlen != sizeof(tmp))
752 return -EINVAL;
753 if (copy_from_sockptr(&tmp, optval, optlen))
754 return -EFAULT;
755 if (tmp < 0 || tmp > 7)
756 return -EDOM;
757 if (tmp < 2 && !capable(CAP_NET_ADMIN))
758 return -EPERM;
759 lock_sock(&jsk->sk);
760 jsk->sk.sk_priority = j1939_to_sk_priority(tmp);
761 release_sock(&jsk->sk);
762 return 0;
763 default:
764 return -ENOPROTOOPT;
765 }
766 }
767
j1939_sk_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)768 static int j1939_sk_getsockopt(struct socket *sock, int level, int optname,
769 char __user *optval, int __user *optlen)
770 {
771 struct sock *sk = sock->sk;
772 struct j1939_sock *jsk = j1939_sk(sk);
773 int ret, ulen;
774 /* set defaults for using 'int' properties */
775 int tmp = 0;
776 int len = sizeof(tmp);
777 void *val = &tmp;
778
779 if (level != SOL_CAN_J1939)
780 return -EINVAL;
781 if (get_user(ulen, optlen))
782 return -EFAULT;
783 if (ulen < 0)
784 return -EINVAL;
785
786 lock_sock(&jsk->sk);
787 switch (optname) {
788 case SO_J1939_PROMISC:
789 tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0;
790 break;
791 case SO_J1939_ERRQUEUE:
792 tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0;
793 break;
794 case SO_J1939_SEND_PRIO:
795 tmp = j1939_prio(jsk->sk.sk_priority);
796 break;
797 default:
798 ret = -ENOPROTOOPT;
799 goto no_copy;
800 }
801
802 /* copy to user, based on 'len' & 'val'
803 * but most sockopt's are 'int' properties, and have 'len' & 'val'
804 * left unchanged, but instead modified 'tmp'
805 */
806 if (len > ulen)
807 ret = -EFAULT;
808 else if (put_user(len, optlen))
809 ret = -EFAULT;
810 else if (copy_to_user(optval, val, len))
811 ret = -EFAULT;
812 else
813 ret = 0;
814 no_copy:
815 release_sock(&jsk->sk);
816 return ret;
817 }
818
j1939_sk_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)819 static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
820 size_t size, int flags)
821 {
822 struct sock *sk = sock->sk;
823 struct sk_buff *skb;
824 struct j1939_sk_buff_cb *skcb;
825 int ret = 0;
826
827 if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
828 return -EINVAL;
829
830 if (flags & MSG_ERRQUEUE)
831 return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
832 SCM_J1939_ERRQUEUE);
833
834 skb = skb_recv_datagram(sk, flags, &ret);
835 if (!skb)
836 return ret;
837
838 if (size < skb->len)
839 msg->msg_flags |= MSG_TRUNC;
840 else
841 size = skb->len;
842
843 ret = memcpy_to_msg(msg, skb->data, size);
844 if (ret < 0) {
845 skb_free_datagram(sk, skb);
846 return ret;
847 }
848
849 skcb = j1939_skb_to_cb(skb);
850 if (j1939_address_is_valid(skcb->addr.da))
851 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR,
852 sizeof(skcb->addr.da), &skcb->addr.da);
853
854 if (skcb->addr.dst_name)
855 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME,
856 sizeof(skcb->addr.dst_name), &skcb->addr.dst_name);
857
858 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO,
859 sizeof(skcb->priority), &skcb->priority);
860
861 if (msg->msg_name) {
862 struct sockaddr_can *paddr = msg->msg_name;
863
864 msg->msg_namelen = J1939_MIN_NAMELEN;
865 memset(msg->msg_name, 0, msg->msg_namelen);
866 paddr->can_family = AF_CAN;
867 paddr->can_ifindex = skb->skb_iif;
868 paddr->can_addr.j1939.name = skcb->addr.src_name;
869 paddr->can_addr.j1939.addr = skcb->addr.sa;
870 paddr->can_addr.j1939.pgn = skcb->addr.pgn;
871 }
872
873 sock_recv_cmsgs(msg, sk, skb);
874 msg->msg_flags |= skcb->msg_flags;
875 skb_free_datagram(sk, skb);
876
877 return size;
878 }
879
j1939_sk_alloc_skb(struct net_device * ndev,struct sock * sk,struct msghdr * msg,size_t size,int * errcode)880 static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
881 struct sock *sk,
882 struct msghdr *msg, size_t size,
883 int *errcode)
884 {
885 struct j1939_sock *jsk = j1939_sk(sk);
886 struct j1939_sk_buff_cb *skcb;
887 struct sk_buff *skb;
888 struct can_skb_ext *csx;
889 int ret;
890
891 skb = sock_alloc_send_skb(sk,
892 size +
893 sizeof(struct can_frame) -
894 sizeof(((struct can_frame *)NULL)->data),
895 msg->msg_flags & MSG_DONTWAIT, &ret);
896 if (!skb)
897 goto failure;
898
899 csx = can_skb_ext_add(skb);
900 if (!csx) {
901 kfree_skb(skb);
902 ret = -ENOMEM;
903 goto failure;
904 }
905
906 csx->can_iif = ndev->ifindex;
907 skb_reserve(skb, offsetof(struct can_frame, data));
908
909 ret = memcpy_from_msg(skb_put(skb, size), msg, size);
910 if (ret < 0)
911 goto free_skb;
912
913 skb->dev = ndev;
914
915 skcb = j1939_skb_to_cb(skb);
916 memset(skcb, 0, sizeof(*skcb));
917 skcb->addr = jsk->addr;
918 skcb->priority = j1939_prio(READ_ONCE(sk->sk_priority));
919
920 if (msg->msg_name) {
921 struct sockaddr_can *addr = msg->msg_name;
922
923 if (addr->can_addr.j1939.name ||
924 addr->can_addr.j1939.addr != J1939_NO_ADDR) {
925 skcb->addr.dst_name = addr->can_addr.j1939.name;
926 skcb->addr.da = addr->can_addr.j1939.addr;
927 }
928 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
929 skcb->addr.pgn = addr->can_addr.j1939.pgn;
930 }
931
932 *errcode = ret;
933 return skb;
934
935 free_skb:
936 kfree_skb(skb);
937 failure:
938 *errcode = ret;
939 return NULL;
940 }
941
j1939_sk_opt_stats_get_size(enum j1939_sk_errqueue_type type)942 static size_t j1939_sk_opt_stats_get_size(enum j1939_sk_errqueue_type type)
943 {
944 switch (type) {
945 case J1939_ERRQUEUE_RX_RTS:
946 return
947 nla_total_size(sizeof(u32)) + /* J1939_NLA_TOTAL_SIZE */
948 nla_total_size(sizeof(u32)) + /* J1939_NLA_PGN */
949 nla_total_size(sizeof(u64)) + /* J1939_NLA_SRC_NAME */
950 nla_total_size(sizeof(u64)) + /* J1939_NLA_DEST_NAME */
951 nla_total_size(sizeof(u8)) + /* J1939_NLA_SRC_ADDR */
952 nla_total_size(sizeof(u8)) + /* J1939_NLA_DEST_ADDR */
953 0;
954 default:
955 return
956 nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */
957 0;
958 }
959 }
960
961 static struct sk_buff *
j1939_sk_get_timestamping_opt_stats(struct j1939_session * session,enum j1939_sk_errqueue_type type)962 j1939_sk_get_timestamping_opt_stats(struct j1939_session *session,
963 enum j1939_sk_errqueue_type type)
964 {
965 struct sk_buff *stats;
966 u32 size;
967
968 stats = alloc_skb(j1939_sk_opt_stats_get_size(type), GFP_ATOMIC);
969 if (!stats)
970 return NULL;
971
972 if (session->skcb.addr.type == J1939_SIMPLE)
973 size = session->total_message_size;
974 else
975 size = min(session->pkt.tx_acked * 7,
976 session->total_message_size);
977
978 switch (type) {
979 case J1939_ERRQUEUE_RX_RTS:
980 nla_put_u32(stats, J1939_NLA_TOTAL_SIZE,
981 session->total_message_size);
982 nla_put_u32(stats, J1939_NLA_PGN,
983 session->skcb.addr.pgn);
984 nla_put_u64_64bit(stats, J1939_NLA_SRC_NAME,
985 session->skcb.addr.src_name, J1939_NLA_PAD);
986 nla_put_u64_64bit(stats, J1939_NLA_DEST_NAME,
987 session->skcb.addr.dst_name, J1939_NLA_PAD);
988 nla_put_u8(stats, J1939_NLA_SRC_ADDR,
989 session->skcb.addr.sa);
990 nla_put_u8(stats, J1939_NLA_DEST_ADDR,
991 session->skcb.addr.da);
992 break;
993 default:
994 nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size);
995 }
996
997 return stats;
998 }
999
__j1939_sk_errqueue(struct j1939_session * session,struct sock * sk,enum j1939_sk_errqueue_type type)1000 static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
1001 enum j1939_sk_errqueue_type type)
1002 {
1003 struct j1939_priv *priv = session->priv;
1004 struct j1939_sock *jsk;
1005 struct sock_exterr_skb *serr;
1006 struct sk_buff *skb;
1007 char *state = "UNK";
1008 u32 tsflags;
1009 int err;
1010
1011 jsk = j1939_sk(sk);
1012
1013 if (!(jsk->state & J1939_SOCK_ERRQUEUE))
1014 return;
1015
1016 tsflags = READ_ONCE(sk->sk_tsflags);
1017 switch (type) {
1018 case J1939_ERRQUEUE_TX_ACK:
1019 if (!(tsflags & SOF_TIMESTAMPING_TX_ACK))
1020 return;
1021 break;
1022 case J1939_ERRQUEUE_TX_SCHED:
1023 if (!(tsflags & SOF_TIMESTAMPING_TX_SCHED))
1024 return;
1025 break;
1026 case J1939_ERRQUEUE_TX_ABORT:
1027 break;
1028 case J1939_ERRQUEUE_RX_RTS:
1029 fallthrough;
1030 case J1939_ERRQUEUE_RX_DPO:
1031 fallthrough;
1032 case J1939_ERRQUEUE_RX_ABORT:
1033 if (!(tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
1034 return;
1035 break;
1036 default:
1037 netdev_err(priv->ndev, "Unknown errqueue type %i\n", type);
1038 }
1039
1040 skb = j1939_sk_get_timestamping_opt_stats(session, type);
1041 if (!skb)
1042 return;
1043
1044 skb->tstamp = ktime_get_real();
1045
1046 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
1047
1048 serr = SKB_EXT_ERR(skb);
1049 memset(serr, 0, sizeof(*serr));
1050 switch (type) {
1051 case J1939_ERRQUEUE_TX_ACK:
1052 serr->ee.ee_errno = ENOMSG;
1053 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
1054 serr->ee.ee_info = SCM_TSTAMP_ACK;
1055 state = "TX ACK";
1056 break;
1057 case J1939_ERRQUEUE_TX_SCHED:
1058 serr->ee.ee_errno = ENOMSG;
1059 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
1060 serr->ee.ee_info = SCM_TSTAMP_SCHED;
1061 state = "TX SCH";
1062 break;
1063 case J1939_ERRQUEUE_TX_ABORT:
1064 serr->ee.ee_errno = session->err;
1065 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
1066 serr->ee.ee_info = J1939_EE_INFO_TX_ABORT;
1067 state = "TX ABT";
1068 break;
1069 case J1939_ERRQUEUE_RX_RTS:
1070 serr->ee.ee_errno = ENOMSG;
1071 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
1072 serr->ee.ee_info = J1939_EE_INFO_RX_RTS;
1073 state = "RX RTS";
1074 break;
1075 case J1939_ERRQUEUE_RX_DPO:
1076 serr->ee.ee_errno = ENOMSG;
1077 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
1078 serr->ee.ee_info = J1939_EE_INFO_RX_DPO;
1079 state = "RX DPO";
1080 break;
1081 case J1939_ERRQUEUE_RX_ABORT:
1082 serr->ee.ee_errno = session->err;
1083 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
1084 serr->ee.ee_info = J1939_EE_INFO_RX_ABORT;
1085 state = "RX ABT";
1086 break;
1087 }
1088
1089 serr->opt_stats = true;
1090 if (tsflags & SOF_TIMESTAMPING_OPT_ID)
1091 serr->ee.ee_data = session->tskey;
1092
1093 netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
1094 __func__, session, session->tskey, state);
1095 err = sock_queue_err_skb(sk, skb);
1096
1097 if (err)
1098 kfree_skb(skb);
1099 };
1100
j1939_sk_errqueue(struct j1939_session * session,enum j1939_sk_errqueue_type type)1101 void j1939_sk_errqueue(struct j1939_session *session,
1102 enum j1939_sk_errqueue_type type)
1103 {
1104 struct j1939_priv *priv = session->priv;
1105 struct j1939_sock *jsk;
1106
1107 if (session->sk) {
1108 /* send TX notifications to the socket of origin */
1109 __j1939_sk_errqueue(session, session->sk, type);
1110 return;
1111 }
1112
1113 /* spread RX notifications to all sockets subscribed to this session */
1114 read_lock_bh(&priv->j1939_socks_lock);
1115 list_for_each_entry(jsk, &priv->j1939_socks, list) {
1116 if (j1939_sk_recv_match_one(jsk, &session->skcb))
1117 __j1939_sk_errqueue(session, &jsk->sk, type);
1118 }
1119 read_unlock_bh(&priv->j1939_socks_lock);
1120 };
1121
j1939_sk_send_loop_abort(struct sock * sk,int err)1122 void j1939_sk_send_loop_abort(struct sock *sk, int err)
1123 {
1124 struct j1939_sock *jsk = j1939_sk(sk);
1125
1126 if (jsk->state & J1939_SOCK_ERRQUEUE)
1127 return;
1128
1129 sk->sk_err = err;
1130
1131 sk_error_report(sk);
1132 }
1133
j1939_sk_send_loop(struct j1939_priv * priv,struct sock * sk,struct msghdr * msg,size_t size)1134 static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
1135 struct msghdr *msg, size_t size)
1136
1137 {
1138 struct j1939_sock *jsk = j1939_sk(sk);
1139 struct j1939_session *session = j1939_sk_get_incomplete_session(jsk);
1140 struct sk_buff *skb;
1141 size_t segment_size, todo_size;
1142 int ret = 0;
1143
1144 if (session &&
1145 session->total_message_size != session->total_queued_size + size) {
1146 j1939_session_put(session);
1147 return -EIO;
1148 }
1149
1150 todo_size = size;
1151
1152 do {
1153 struct j1939_sk_buff_cb *skcb;
1154
1155 segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
1156 todo_size);
1157
1158 /* Allocate skb for one segment */
1159 skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size,
1160 &ret);
1161 if (ret)
1162 break;
1163
1164 skcb = j1939_skb_to_cb(skb);
1165
1166 if (!session) {
1167 /* at this point the size should be full size
1168 * of the session
1169 */
1170 skcb->offset = 0;
1171 session = j1939_tp_send(priv, skb, size);
1172 if (IS_ERR(session)) {
1173 ret = PTR_ERR(session);
1174 goto kfree_skb;
1175 }
1176 if (j1939_sk_queue_session(session)) {
1177 /* try to activate session if we a
1178 * fist in the queue
1179 */
1180 if (!j1939_session_activate(session)) {
1181 j1939_tp_schedule_txtimer(session, 0);
1182 } else {
1183 ret = -EBUSY;
1184 session->err = ret;
1185 j1939_sk_queue_drop_all(priv, jsk,
1186 EBUSY);
1187 break;
1188 }
1189 }
1190 } else {
1191 skcb->offset = session->total_queued_size;
1192 j1939_session_skb_queue(session, skb);
1193 }
1194
1195 todo_size -= segment_size;
1196 session->total_queued_size += segment_size;
1197 } while (todo_size);
1198
1199 switch (ret) {
1200 case 0: /* OK */
1201 if (todo_size)
1202 netdev_warn(priv->ndev,
1203 "no error found and not completely queued?! %zu\n",
1204 todo_size);
1205 ret = size;
1206 break;
1207 case -ERESTARTSYS:
1208 ret = -EINTR;
1209 fallthrough;
1210 case -EAGAIN: /* OK */
1211 if (todo_size != size)
1212 ret = size - todo_size;
1213 break;
1214 default: /* ERROR */
1215 break;
1216 }
1217
1218 if (session)
1219 j1939_session_put(session);
1220
1221 return ret;
1222
1223 kfree_skb:
1224 kfree_skb(skb);
1225 return ret;
1226 }
1227
j1939_sk_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)1228 static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
1229 size_t size)
1230 {
1231 struct sock *sk = sock->sk;
1232 struct j1939_sock *jsk = j1939_sk(sk);
1233 struct j1939_priv *priv;
1234 int ifindex;
1235 int ret;
1236
1237 lock_sock(sock->sk);
1238 /* various socket state tests */
1239 if (!(jsk->state & J1939_SOCK_BOUND)) {
1240 ret = -EBADFD;
1241 goto sendmsg_done;
1242 }
1243
1244 priv = jsk->priv;
1245 ifindex = jsk->ifindex;
1246
1247 if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
1248 /* no source address assigned yet */
1249 ret = -EBADFD;
1250 goto sendmsg_done;
1251 }
1252
1253 /* deal with provided destination address info */
1254 if (msg->msg_name) {
1255 struct sockaddr_can *addr = msg->msg_name;
1256
1257 if (msg->msg_namelen < J1939_MIN_NAMELEN) {
1258 ret = -EINVAL;
1259 goto sendmsg_done;
1260 }
1261
1262 if (addr->can_family != AF_CAN) {
1263 ret = -EINVAL;
1264 goto sendmsg_done;
1265 }
1266
1267 if (addr->can_ifindex && addr->can_ifindex != ifindex) {
1268 ret = -EBADFD;
1269 goto sendmsg_done;
1270 }
1271
1272 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
1273 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
1274 ret = -EINVAL;
1275 goto sendmsg_done;
1276 }
1277
1278 if (!addr->can_addr.j1939.name &&
1279 addr->can_addr.j1939.addr == J1939_NO_ADDR &&
1280 !sock_flag(sk, SOCK_BROADCAST)) {
1281 /* broadcast, but SO_BROADCAST not set */
1282 ret = -EACCES;
1283 goto sendmsg_done;
1284 }
1285 } else {
1286 if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
1287 !sock_flag(sk, SOCK_BROADCAST)) {
1288 /* broadcast, but SO_BROADCAST not set */
1289 ret = -EACCES;
1290 goto sendmsg_done;
1291 }
1292 }
1293
1294 ret = j1939_sk_send_loop(priv, sk, msg, size);
1295
1296 sendmsg_done:
1297 release_sock(sock->sk);
1298
1299 return ret;
1300 }
1301
j1939_sk_netdev_event_netdown(struct j1939_priv * priv)1302 void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
1303 {
1304 struct j1939_sock *jsk;
1305 int error_code = ENETDOWN;
1306
1307 read_lock_bh(&priv->j1939_socks_lock);
1308 list_for_each_entry(jsk, &priv->j1939_socks, list) {
1309 jsk->sk.sk_err = error_code;
1310 if (!sock_flag(&jsk->sk, SOCK_DEAD))
1311 sk_error_report(&jsk->sk);
1312
1313 j1939_sk_queue_drop_all(priv, jsk, error_code);
1314 }
1315 read_unlock_bh(&priv->j1939_socks_lock);
1316 }
1317
j1939_sk_netdev_event_unregister(struct j1939_priv * priv)1318 void j1939_sk_netdev_event_unregister(struct j1939_priv *priv)
1319 {
1320 struct sock *sk;
1321 struct j1939_sock *jsk;
1322 bool wait_rcu = false;
1323
1324 rescan: /* The caller is holding a ref on this "priv" via j1939_priv_get_by_ndev(). */
1325 read_lock_bh(&priv->j1939_socks_lock);
1326 list_for_each_entry(jsk, &priv->j1939_socks, list) {
1327 /* Skip if j1939_jsk_add() is not called on this socket. */
1328 if (!(jsk->state & J1939_SOCK_BOUND))
1329 continue;
1330 sk = &jsk->sk;
1331 sock_hold(sk);
1332 read_unlock_bh(&priv->j1939_socks_lock);
1333 /* Check if j1939_jsk_del() is not yet called on this socket after holding
1334 * socket's lock, for both j1939_sk_bind() and j1939_sk_release() call
1335 * j1939_jsk_del() with socket's lock held.
1336 */
1337 lock_sock(sk);
1338 if (jsk->state & J1939_SOCK_BOUND) {
1339 /* Neither j1939_sk_bind() nor j1939_sk_release() called j1939_jsk_del().
1340 * Make this socket no longer bound, by pretending as if j1939_sk_bind()
1341 * dropped old references but did not get new references.
1342 */
1343 j1939_jsk_del(priv, jsk);
1344 j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
1345 j1939_netdev_stop(priv);
1346 /* Call j1939_priv_put() now and prevent j1939_sk_sock_destruct() from
1347 * calling the corresponding j1939_priv_put().
1348 *
1349 * j1939_sk_sock_destruct() is supposed to call j1939_priv_put() after
1350 * an RCU grace period. But since the caller is holding a ref on this
1351 * "priv", we can defer synchronize_rcu() until immediately before
1352 * the caller calls j1939_priv_put().
1353 */
1354 j1939_priv_put(priv);
1355 jsk->priv = NULL;
1356 wait_rcu = true;
1357 }
1358 release_sock(sk);
1359 sock_put(sk);
1360 goto rescan;
1361 }
1362 read_unlock_bh(&priv->j1939_socks_lock);
1363 if (wait_rcu)
1364 synchronize_rcu();
1365 }
1366
j1939_sk_no_ioctlcmd(struct socket * sock,unsigned int cmd,unsigned long arg)1367 static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1368 unsigned long arg)
1369 {
1370 /* no ioctls for socket layer -> hand it down to NIC layer */
1371 return -ENOIOCTLCMD;
1372 }
1373
1374 static const struct proto_ops j1939_ops = {
1375 .family = PF_CAN,
1376 .release = j1939_sk_release,
1377 .bind = j1939_sk_bind,
1378 .connect = j1939_sk_connect,
1379 .socketpair = sock_no_socketpair,
1380 .accept = sock_no_accept,
1381 .getname = j1939_sk_getname,
1382 .poll = datagram_poll,
1383 .ioctl = j1939_sk_no_ioctlcmd,
1384 .listen = sock_no_listen,
1385 .shutdown = sock_no_shutdown,
1386 .setsockopt = j1939_sk_setsockopt,
1387 .getsockopt = j1939_sk_getsockopt,
1388 .sendmsg = j1939_sk_sendmsg,
1389 .recvmsg = j1939_sk_recvmsg,
1390 .mmap = sock_no_mmap,
1391 };
1392
1393 static struct proto j1939_proto __read_mostly = {
1394 .name = "CAN_J1939",
1395 .owner = THIS_MODULE,
1396 .obj_size = sizeof(struct j1939_sock),
1397 .init = j1939_sk_init,
1398 };
1399
1400 const struct can_proto j1939_can_proto = {
1401 .type = SOCK_DGRAM,
1402 .protocol = CAN_J1939,
1403 .ops = &j1939_ops,
1404 .prot = &j1939_proto,
1405 };
1406