1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2010-2011 EIA Electronics, 3 // Pieter Beyens <pieter.beyens@eia.be> 4 // Copyright (c) 2010-2011 EIA Electronics, 5 // Kurt Van Dijck <kurt.van.dijck@eia.be> 6 // Copyright (c) 2018 Protonic, 7 // Robin van der Gracht <robin@protonic.nl> 8 // Copyright (c) 2017-2019 Pengutronix, 9 // Marc Kleine-Budde <kernel@pengutronix.de> 10 // Copyright (c) 2017-2019 Pengutronix, 11 // Oleksij Rempel <kernel@pengutronix.de> 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/can/can-ml.h> 16 #include <linux/can/core.h> 17 #include <linux/can/skb.h> 18 #include <linux/errqueue.h> 19 #include <linux/if_arp.h> 20 21 #include "j1939-priv.h" 22 23 #define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939) 24 25 /* conversion function between struct sock::sk_priority from linux and 26 * j1939 priority field 27 */ 28 static inline priority_t j1939_prio(u32 sk_priority) 29 { 30 sk_priority = min(sk_priority, 7U); 31 32 return 7 - sk_priority; 33 } 34 35 static inline u32 j1939_to_sk_priority(priority_t prio) 36 { 37 return 7 - prio; 38 } 39 40 /* function to see if pgn is to be evaluated */ 41 static inline bool j1939_pgn_is_valid(pgn_t pgn) 42 { 43 return pgn <= J1939_PGN_MAX; 44 } 45 46 /* test function to avoid non-zero DA placeholder for pdu1 pgn's */ 47 static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn) 48 { 49 if (j1939_pgn_is_pdu1(pgn)) 50 return !(pgn & 0xff); 51 else 52 return true; 53 } 54 55 static inline void j1939_sock_pending_add(struct sock *sk) 56 { 57 struct j1939_sock *jsk = j1939_sk(sk); 58 59 atomic_inc(&jsk->skb_pending); 60 } 61 62 static int j1939_sock_pending_get(struct sock *sk) 63 { 64 struct j1939_sock *jsk = j1939_sk(sk); 65 66 return atomic_read(&jsk->skb_pending); 67 } 68 69 void j1939_sock_pending_del(struct sock *sk) 70 { 71 struct j1939_sock *jsk = j1939_sk(sk); 72 73 /* atomic_dec_return returns the new value */ 74 if (!atomic_dec_return(&jsk->skb_pending)) 75 wake_up(&jsk->waitq); /* no pending SKB's */ 76 } 77 78 static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk) 79 { 80 jsk->state |= J1939_SOCK_BOUND; 81 j1939_priv_get(priv); 82 83 spin_lock_bh(&priv->j1939_socks_lock); 84 list_add_tail(&jsk->list, &priv->j1939_socks); 85 spin_unlock_bh(&priv->j1939_socks_lock); 86 } 87 88 static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk) 89 { 90 spin_lock_bh(&priv->j1939_socks_lock); 91 list_del_init(&jsk->list); 92 spin_unlock_bh(&priv->j1939_socks_lock); 93 94 j1939_priv_put(priv); 95 jsk->state &= ~J1939_SOCK_BOUND; 96 } 97 98 static bool j1939_sk_queue_session(struct j1939_session *session) 99 { 100 struct j1939_sock *jsk = j1939_sk(session->sk); 101 bool empty; 102 103 spin_lock_bh(&jsk->sk_session_queue_lock); 104 empty = list_empty(&jsk->sk_session_queue); 105 j1939_session_get(session); 106 list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue); 107 spin_unlock_bh(&jsk->sk_session_queue_lock); 108 j1939_sock_pending_add(&jsk->sk); 109 110 return empty; 111 } 112 113 static struct 114 j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk) 115 { 116 struct j1939_session *session = NULL; 117 118 spin_lock_bh(&jsk->sk_session_queue_lock); 119 if (!list_empty(&jsk->sk_session_queue)) { 120 session = list_last_entry(&jsk->sk_session_queue, 121 struct j1939_session, 122 sk_session_queue_entry); 123 if (session->total_queued_size == session->total_message_size) 124 session = NULL; 125 else 126 j1939_session_get(session); 127 } 128 spin_unlock_bh(&jsk->sk_session_queue_lock); 129 130 return session; 131 } 132 133 static void j1939_sk_queue_drop_all(struct j1939_priv *priv, 134 struct j1939_sock *jsk, int err) 135 { 136 struct j1939_session *session, *tmp; 137 138 netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err); 139 spin_lock_bh(&jsk->sk_session_queue_lock); 140 list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue, 141 sk_session_queue_entry) { 142 list_del_init(&session->sk_session_queue_entry); 143 session->err = err; 144 j1939_session_put(session); 145 } 146 spin_unlock_bh(&jsk->sk_session_queue_lock); 147 } 148 149 static void j1939_sk_queue_activate_next_locked(struct j1939_session *session) 150 { 151 struct j1939_sock *jsk; 152 struct j1939_session *first; 153 int err; 154 155 /* RX-Session don't have a socket (yet) */ 156 if (!session->sk) 157 return; 158 159 jsk = j1939_sk(session->sk); 160 lockdep_assert_held(&jsk->sk_session_queue_lock); 161 162 err = session->err; 163 164 first = list_first_entry_or_null(&jsk->sk_session_queue, 165 struct j1939_session, 166 sk_session_queue_entry); 167 168 /* Some else has already activated the next session */ 169 if (first != session) 170 return; 171 172 activate_next: 173 list_del_init(&first->sk_session_queue_entry); 174 j1939_session_put(first); 175 first = list_first_entry_or_null(&jsk->sk_session_queue, 176 struct j1939_session, 177 sk_session_queue_entry); 178 if (!first) 179 return; 180 181 if (WARN_ON_ONCE(j1939_session_activate(first))) { 182 first->err = -EBUSY; 183 goto activate_next; 184 } else { 185 /* Give receiver some time (arbitrary chosen) to recover */ 186 int time_ms = 0; 187 188 if (err) 189 time_ms = 10 + prandom_u32_max(16); 190 191 j1939_tp_schedule_txtimer(first, time_ms); 192 } 193 } 194 195 void j1939_sk_queue_activate_next(struct j1939_session *session) 196 { 197 struct j1939_sock *jsk; 198 199 if (!session->sk) 200 return; 201 202 jsk = j1939_sk(session->sk); 203 204 spin_lock_bh(&jsk->sk_session_queue_lock); 205 j1939_sk_queue_activate_next_locked(session); 206 spin_unlock_bh(&jsk->sk_session_queue_lock); 207 } 208 209 static bool j1939_sk_match_dst(struct j1939_sock *jsk, 210 const struct j1939_sk_buff_cb *skcb) 211 { 212 if ((jsk->state & J1939_SOCK_PROMISC)) 213 return true; 214 215 /* Destination address filter */ 216 if (jsk->addr.src_name && skcb->addr.dst_name) { 217 if (jsk->addr.src_name != skcb->addr.dst_name) 218 return false; 219 } else { 220 /* receive (all sockets) if 221 * - all packages that match our bind() address 222 * - all broadcast on a socket if SO_BROADCAST 223 * is set 224 */ 225 if (j1939_address_is_unicast(skcb->addr.da)) { 226 if (jsk->addr.sa != skcb->addr.da) 227 return false; 228 } else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) { 229 /* receiving broadcast without SO_BROADCAST 230 * flag is not allowed 231 */ 232 return false; 233 } 234 } 235 236 /* Source address filter */ 237 if (jsk->state & J1939_SOCK_CONNECTED) { 238 /* receive (all sockets) if 239 * - all packages that match our connect() name or address 240 */ 241 if (jsk->addr.dst_name && skcb->addr.src_name) { 242 if (jsk->addr.dst_name != skcb->addr.src_name) 243 return false; 244 } else { 245 if (jsk->addr.da != skcb->addr.sa) 246 return false; 247 } 248 } 249 250 /* PGN filter */ 251 if (j1939_pgn_is_valid(jsk->pgn_rx_filter) && 252 jsk->pgn_rx_filter != skcb->addr.pgn) 253 return false; 254 255 return true; 256 } 257 258 /* matches skb control buffer (addr) with a j1939 filter */ 259 static bool j1939_sk_match_filter(struct j1939_sock *jsk, 260 const struct j1939_sk_buff_cb *skcb) 261 { 262 const struct j1939_filter *f = jsk->filters; 263 int nfilter = jsk->nfilters; 264 265 if (!nfilter) 266 /* receive all when no filters are assigned */ 267 return true; 268 269 for (; nfilter; ++f, --nfilter) { 270 if ((skcb->addr.pgn & f->pgn_mask) != f->pgn) 271 continue; 272 if ((skcb->addr.sa & f->addr_mask) != f->addr) 273 continue; 274 if ((skcb->addr.src_name & f->name_mask) != f->name) 275 continue; 276 return true; 277 } 278 return false; 279 } 280 281 static bool j1939_sk_recv_match_one(struct j1939_sock *jsk, 282 const struct j1939_sk_buff_cb *skcb) 283 { 284 if (!(jsk->state & J1939_SOCK_BOUND)) 285 return false; 286 287 if (!j1939_sk_match_dst(jsk, skcb)) 288 return false; 289 290 if (!j1939_sk_match_filter(jsk, skcb)) 291 return false; 292 293 return true; 294 } 295 296 static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb) 297 { 298 const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb); 299 struct j1939_sk_buff_cb *skcb; 300 struct sk_buff *skb; 301 302 if (oskb->sk == &jsk->sk) 303 return; 304 305 if (!j1939_sk_recv_match_one(jsk, oskcb)) 306 return; 307 308 skb = skb_clone(oskb, GFP_ATOMIC); 309 if (!skb) { 310 pr_warn("skb clone failed\n"); 311 return; 312 } 313 can_skb_set_owner(skb, oskb->sk); 314 315 skcb = j1939_skb_to_cb(skb); 316 skcb->msg_flags &= ~(MSG_DONTROUTE); 317 if (skb->sk) 318 skcb->msg_flags |= MSG_DONTROUTE; 319 320 if (sock_queue_rcv_skb(&jsk->sk, skb) < 0) 321 kfree_skb(skb); 322 } 323 324 bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb) 325 { 326 struct j1939_sock *jsk; 327 bool match = false; 328 329 spin_lock_bh(&priv->j1939_socks_lock); 330 list_for_each_entry(jsk, &priv->j1939_socks, list) { 331 match = j1939_sk_recv_match_one(jsk, skcb); 332 if (match) 333 break; 334 } 335 spin_unlock_bh(&priv->j1939_socks_lock); 336 337 return match; 338 } 339 340 void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb) 341 { 342 struct j1939_sock *jsk; 343 344 spin_lock_bh(&priv->j1939_socks_lock); 345 list_for_each_entry(jsk, &priv->j1939_socks, list) { 346 j1939_sk_recv_one(jsk, skb); 347 } 348 spin_unlock_bh(&priv->j1939_socks_lock); 349 } 350 351 static void j1939_sk_sock_destruct(struct sock *sk) 352 { 353 struct j1939_sock *jsk = j1939_sk(sk); 354 355 /* This function will be called by the generic networking code, when 356 * the socket is ultimately closed (sk->sk_destruct). 357 * 358 * The race between 359 * - processing a received CAN frame 360 * (can_receive -> j1939_can_recv) 361 * and accessing j1939_priv 362 * ... and ... 363 * - closing a socket 364 * (j1939_can_rx_unregister -> can_rx_unregister) 365 * and calling the final j1939_priv_put() 366 * 367 * is avoided by calling the final j1939_priv_put() from this 368 * RCU deferred cleanup call. 369 */ 370 if (jsk->priv) { 371 j1939_priv_put(jsk->priv); 372 jsk->priv = NULL; 373 } 374 375 /* call generic CAN sock destruct */ 376 can_sock_destruct(sk); 377 } 378 379 static int j1939_sk_init(struct sock *sk) 380 { 381 struct j1939_sock *jsk = j1939_sk(sk); 382 383 /* Ensure that "sk" is first member in "struct j1939_sock", so that we 384 * can skip it during memset(). 385 */ 386 BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0); 387 memset((void *)jsk + sizeof(jsk->sk), 0x0, 388 sizeof(*jsk) - sizeof(jsk->sk)); 389 390 INIT_LIST_HEAD(&jsk->list); 391 init_waitqueue_head(&jsk->waitq); 392 jsk->sk.sk_priority = j1939_to_sk_priority(6); 393 jsk->sk.sk_reuse = 1; /* per default */ 394 jsk->addr.sa = J1939_NO_ADDR; 395 jsk->addr.da = J1939_NO_ADDR; 396 jsk->addr.pgn = J1939_NO_PGN; 397 jsk->pgn_rx_filter = J1939_NO_PGN; 398 atomic_set(&jsk->skb_pending, 0); 399 spin_lock_init(&jsk->sk_session_queue_lock); 400 INIT_LIST_HEAD(&jsk->sk_session_queue); 401 402 /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */ 403 sock_set_flag(sk, SOCK_RCU_FREE); 404 sk->sk_destruct = j1939_sk_sock_destruct; 405 sk->sk_protocol = CAN_J1939; 406 407 return 0; 408 } 409 410 static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len) 411 { 412 if (!addr) 413 return -EDESTADDRREQ; 414 if (len < J1939_MIN_NAMELEN) 415 return -EINVAL; 416 if (addr->can_family != AF_CAN) 417 return -EINVAL; 418 if (!addr->can_ifindex) 419 return -ENODEV; 420 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) && 421 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) 422 return -EINVAL; 423 424 return 0; 425 } 426 427 static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) 428 { 429 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 430 struct j1939_sock *jsk = j1939_sk(sock->sk); 431 struct j1939_priv *priv; 432 struct sock *sk; 433 struct net *net; 434 int ret = 0; 435 436 ret = j1939_sk_sanity_check(addr, len); 437 if (ret) 438 return ret; 439 440 lock_sock(sock->sk); 441 442 priv = jsk->priv; 443 sk = sock->sk; 444 net = sock_net(sk); 445 446 /* Already bound to an interface? */ 447 if (jsk->state & J1939_SOCK_BOUND) { 448 /* A re-bind() to a different interface is not 449 * supported. 450 */ 451 if (jsk->ifindex != addr->can_ifindex) { 452 ret = -EINVAL; 453 goto out_release_sock; 454 } 455 456 /* drop old references */ 457 j1939_jsk_del(priv, jsk); 458 j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa); 459 } else { 460 struct can_ml_priv *can_ml; 461 struct net_device *ndev; 462 463 ndev = dev_get_by_index(net, addr->can_ifindex); 464 if (!ndev) { 465 ret = -ENODEV; 466 goto out_release_sock; 467 } 468 469 can_ml = can_get_ml_priv(ndev); 470 if (!can_ml) { 471 dev_put(ndev); 472 ret = -ENODEV; 473 goto out_release_sock; 474 } 475 476 if (!(ndev->flags & IFF_UP)) { 477 dev_put(ndev); 478 ret = -ENETDOWN; 479 goto out_release_sock; 480 } 481 482 priv = j1939_netdev_start(ndev); 483 dev_put(ndev); 484 if (IS_ERR(priv)) { 485 ret = PTR_ERR(priv); 486 goto out_release_sock; 487 } 488 489 jsk->ifindex = addr->can_ifindex; 490 491 /* the corresponding j1939_priv_put() is called via 492 * sk->sk_destruct, which points to j1939_sk_sock_destruct() 493 */ 494 j1939_priv_get(priv); 495 jsk->priv = priv; 496 } 497 498 /* set default transmit pgn */ 499 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) 500 jsk->pgn_rx_filter = addr->can_addr.j1939.pgn; 501 jsk->addr.src_name = addr->can_addr.j1939.name; 502 jsk->addr.sa = addr->can_addr.j1939.addr; 503 504 /* get new references */ 505 ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa); 506 if (ret) { 507 j1939_netdev_stop(priv); 508 goto out_release_sock; 509 } 510 511 j1939_jsk_add(priv, jsk); 512 513 out_release_sock: /* fall through */ 514 release_sock(sock->sk); 515 516 return ret; 517 } 518 519 static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr, 520 int len, int flags) 521 { 522 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 523 struct j1939_sock *jsk = j1939_sk(sock->sk); 524 int ret = 0; 525 526 ret = j1939_sk_sanity_check(addr, len); 527 if (ret) 528 return ret; 529 530 lock_sock(sock->sk); 531 532 /* bind() before connect() is mandatory */ 533 if (!(jsk->state & J1939_SOCK_BOUND)) { 534 ret = -EINVAL; 535 goto out_release_sock; 536 } 537 538 /* A connect() to a different interface is not supported. */ 539 if (jsk->ifindex != addr->can_ifindex) { 540 ret = -EINVAL; 541 goto out_release_sock; 542 } 543 544 if (!addr->can_addr.j1939.name && 545 addr->can_addr.j1939.addr == J1939_NO_ADDR && 546 !sock_flag(&jsk->sk, SOCK_BROADCAST)) { 547 /* broadcast, but SO_BROADCAST not set */ 548 ret = -EACCES; 549 goto out_release_sock; 550 } 551 552 jsk->addr.dst_name = addr->can_addr.j1939.name; 553 jsk->addr.da = addr->can_addr.j1939.addr; 554 555 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) 556 jsk->addr.pgn = addr->can_addr.j1939.pgn; 557 558 jsk->state |= J1939_SOCK_CONNECTED; 559 560 out_release_sock: /* fall through */ 561 release_sock(sock->sk); 562 563 return ret; 564 } 565 566 static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr, 567 const struct j1939_sock *jsk, int peer) 568 { 569 /* There are two holes (2 bytes and 3 bytes) to clear to avoid 570 * leaking kernel information to user space. 571 */ 572 memset(addr, 0, J1939_MIN_NAMELEN); 573 574 addr->can_family = AF_CAN; 575 addr->can_ifindex = jsk->ifindex; 576 addr->can_addr.j1939.pgn = jsk->addr.pgn; 577 if (peer) { 578 addr->can_addr.j1939.name = jsk->addr.dst_name; 579 addr->can_addr.j1939.addr = jsk->addr.da; 580 } else { 581 addr->can_addr.j1939.name = jsk->addr.src_name; 582 addr->can_addr.j1939.addr = jsk->addr.sa; 583 } 584 } 585 586 static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr, 587 int peer) 588 { 589 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 590 struct sock *sk = sock->sk; 591 struct j1939_sock *jsk = j1939_sk(sk); 592 int ret = 0; 593 594 lock_sock(sk); 595 596 if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) { 597 ret = -EADDRNOTAVAIL; 598 goto failure; 599 } 600 601 j1939_sk_sock2sockaddr_can(addr, jsk, peer); 602 ret = J1939_MIN_NAMELEN; 603 604 failure: 605 release_sock(sk); 606 607 return ret; 608 } 609 610 static int j1939_sk_release(struct socket *sock) 611 { 612 struct sock *sk = sock->sk; 613 struct j1939_sock *jsk; 614 615 if (!sk) 616 return 0; 617 618 lock_sock(sk); 619 jsk = j1939_sk(sk); 620 621 if (jsk->state & J1939_SOCK_BOUND) { 622 struct j1939_priv *priv = jsk->priv; 623 624 if (wait_event_interruptible(jsk->waitq, 625 !j1939_sock_pending_get(&jsk->sk))) { 626 j1939_cancel_active_session(priv, sk); 627 j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN); 628 } 629 630 j1939_jsk_del(priv, jsk); 631 632 j1939_local_ecu_put(priv, jsk->addr.src_name, 633 jsk->addr.sa); 634 635 j1939_netdev_stop(priv); 636 } 637 638 kfree(jsk->filters); 639 sock_orphan(sk); 640 sock->sk = NULL; 641 642 release_sock(sk); 643 sock_put(sk); 644 645 return 0; 646 } 647 648 static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval, 649 unsigned int optlen, int flag) 650 { 651 int tmp; 652 653 if (optlen != sizeof(tmp)) 654 return -EINVAL; 655 if (copy_from_sockptr(&tmp, optval, optlen)) 656 return -EFAULT; 657 lock_sock(&jsk->sk); 658 if (tmp) 659 jsk->state |= flag; 660 else 661 jsk->state &= ~flag; 662 release_sock(&jsk->sk); 663 return tmp; 664 } 665 666 static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, 667 sockptr_t optval, unsigned int optlen) 668 { 669 struct sock *sk = sock->sk; 670 struct j1939_sock *jsk = j1939_sk(sk); 671 int tmp, count = 0, ret = 0; 672 struct j1939_filter *filters = NULL, *ofilters; 673 674 if (level != SOL_CAN_J1939) 675 return -EINVAL; 676 677 switch (optname) { 678 case SO_J1939_FILTER: 679 if (!sockptr_is_null(optval) && optlen != 0) { 680 struct j1939_filter *f; 681 int c; 682 683 if (optlen % sizeof(*filters) != 0) 684 return -EINVAL; 685 686 if (optlen > J1939_FILTER_MAX * 687 sizeof(struct j1939_filter)) 688 return -EINVAL; 689 690 count = optlen / sizeof(*filters); 691 filters = memdup_sockptr(optval, optlen); 692 if (IS_ERR(filters)) 693 return PTR_ERR(filters); 694 695 for (f = filters, c = count; c; f++, c--) { 696 f->name &= f->name_mask; 697 f->pgn &= f->pgn_mask; 698 f->addr &= f->addr_mask; 699 } 700 } 701 702 lock_sock(&jsk->sk); 703 ofilters = jsk->filters; 704 jsk->filters = filters; 705 jsk->nfilters = count; 706 release_sock(&jsk->sk); 707 kfree(ofilters); 708 return 0; 709 case SO_J1939_PROMISC: 710 return j1939_sk_setsockopt_flag(jsk, optval, optlen, 711 J1939_SOCK_PROMISC); 712 case SO_J1939_ERRQUEUE: 713 ret = j1939_sk_setsockopt_flag(jsk, optval, optlen, 714 J1939_SOCK_ERRQUEUE); 715 if (ret < 0) 716 return ret; 717 718 if (!(jsk->state & J1939_SOCK_ERRQUEUE)) 719 skb_queue_purge(&sk->sk_error_queue); 720 return ret; 721 case SO_J1939_SEND_PRIO: 722 if (optlen != sizeof(tmp)) 723 return -EINVAL; 724 if (copy_from_sockptr(&tmp, optval, optlen)) 725 return -EFAULT; 726 if (tmp < 0 || tmp > 7) 727 return -EDOM; 728 if (tmp < 2 && !capable(CAP_NET_ADMIN)) 729 return -EPERM; 730 lock_sock(&jsk->sk); 731 jsk->sk.sk_priority = j1939_to_sk_priority(tmp); 732 release_sock(&jsk->sk); 733 return 0; 734 default: 735 return -ENOPROTOOPT; 736 } 737 } 738 739 static int j1939_sk_getsockopt(struct socket *sock, int level, int optname, 740 char __user *optval, int __user *optlen) 741 { 742 struct sock *sk = sock->sk; 743 struct j1939_sock *jsk = j1939_sk(sk); 744 int ret, ulen; 745 /* set defaults for using 'int' properties */ 746 int tmp = 0; 747 int len = sizeof(tmp); 748 void *val = &tmp; 749 750 if (level != SOL_CAN_J1939) 751 return -EINVAL; 752 if (get_user(ulen, optlen)) 753 return -EFAULT; 754 if (ulen < 0) 755 return -EINVAL; 756 757 lock_sock(&jsk->sk); 758 switch (optname) { 759 case SO_J1939_PROMISC: 760 tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0; 761 break; 762 case SO_J1939_ERRQUEUE: 763 tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0; 764 break; 765 case SO_J1939_SEND_PRIO: 766 tmp = j1939_prio(jsk->sk.sk_priority); 767 break; 768 default: 769 ret = -ENOPROTOOPT; 770 goto no_copy; 771 } 772 773 /* copy to user, based on 'len' & 'val' 774 * but most sockopt's are 'int' properties, and have 'len' & 'val' 775 * left unchanged, but instead modified 'tmp' 776 */ 777 if (len > ulen) 778 ret = -EFAULT; 779 else if (put_user(len, optlen)) 780 ret = -EFAULT; 781 else if (copy_to_user(optval, val, len)) 782 ret = -EFAULT; 783 else 784 ret = 0; 785 no_copy: 786 release_sock(&jsk->sk); 787 return ret; 788 } 789 790 static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg, 791 size_t size, int flags) 792 { 793 struct sock *sk = sock->sk; 794 struct sk_buff *skb; 795 struct j1939_sk_buff_cb *skcb; 796 int ret = 0; 797 798 if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE)) 799 return -EINVAL; 800 801 if (flags & MSG_ERRQUEUE) 802 return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939, 803 SCM_J1939_ERRQUEUE); 804 805 skb = skb_recv_datagram(sk, flags, 0, &ret); 806 if (!skb) 807 return ret; 808 809 if (size < skb->len) 810 msg->msg_flags |= MSG_TRUNC; 811 else 812 size = skb->len; 813 814 ret = memcpy_to_msg(msg, skb->data, size); 815 if (ret < 0) { 816 skb_free_datagram(sk, skb); 817 return ret; 818 } 819 820 skcb = j1939_skb_to_cb(skb); 821 if (j1939_address_is_valid(skcb->addr.da)) 822 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR, 823 sizeof(skcb->addr.da), &skcb->addr.da); 824 825 if (skcb->addr.dst_name) 826 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME, 827 sizeof(skcb->addr.dst_name), &skcb->addr.dst_name); 828 829 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO, 830 sizeof(skcb->priority), &skcb->priority); 831 832 if (msg->msg_name) { 833 struct sockaddr_can *paddr = msg->msg_name; 834 835 msg->msg_namelen = J1939_MIN_NAMELEN; 836 memset(msg->msg_name, 0, msg->msg_namelen); 837 paddr->can_family = AF_CAN; 838 paddr->can_ifindex = skb->skb_iif; 839 paddr->can_addr.j1939.name = skcb->addr.src_name; 840 paddr->can_addr.j1939.addr = skcb->addr.sa; 841 paddr->can_addr.j1939.pgn = skcb->addr.pgn; 842 } 843 844 sock_recv_ts_and_drops(msg, sk, skb); 845 msg->msg_flags |= skcb->msg_flags; 846 skb_free_datagram(sk, skb); 847 848 return size; 849 } 850 851 static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev, 852 struct sock *sk, 853 struct msghdr *msg, size_t size, 854 int *errcode) 855 { 856 struct j1939_sock *jsk = j1939_sk(sk); 857 struct j1939_sk_buff_cb *skcb; 858 struct sk_buff *skb; 859 int ret; 860 861 skb = sock_alloc_send_skb(sk, 862 size + 863 sizeof(struct can_frame) - 864 sizeof(((struct can_frame *)NULL)->data) + 865 sizeof(struct can_skb_priv), 866 msg->msg_flags & MSG_DONTWAIT, &ret); 867 if (!skb) 868 goto failure; 869 870 can_skb_reserve(skb); 871 can_skb_prv(skb)->ifindex = ndev->ifindex; 872 can_skb_prv(skb)->skbcnt = 0; 873 skb_reserve(skb, offsetof(struct can_frame, data)); 874 875 ret = memcpy_from_msg(skb_put(skb, size), msg, size); 876 if (ret < 0) 877 goto free_skb; 878 879 skb->dev = ndev; 880 881 skcb = j1939_skb_to_cb(skb); 882 memset(skcb, 0, sizeof(*skcb)); 883 skcb->addr = jsk->addr; 884 skcb->priority = j1939_prio(sk->sk_priority); 885 886 if (msg->msg_name) { 887 struct sockaddr_can *addr = msg->msg_name; 888 889 if (addr->can_addr.j1939.name || 890 addr->can_addr.j1939.addr != J1939_NO_ADDR) { 891 skcb->addr.dst_name = addr->can_addr.j1939.name; 892 skcb->addr.da = addr->can_addr.j1939.addr; 893 } 894 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) 895 skcb->addr.pgn = addr->can_addr.j1939.pgn; 896 } 897 898 *errcode = ret; 899 return skb; 900 901 free_skb: 902 kfree_skb(skb); 903 failure: 904 *errcode = ret; 905 return NULL; 906 } 907 908 static size_t j1939_sk_opt_stats_get_size(enum j1939_sk_errqueue_type type) 909 { 910 switch (type) { 911 case J1939_ERRQUEUE_RX_RTS: 912 return 913 nla_total_size(sizeof(u32)) + /* J1939_NLA_TOTAL_SIZE */ 914 nla_total_size(sizeof(u32)) + /* J1939_NLA_PGN */ 915 nla_total_size(sizeof(u64)) + /* J1939_NLA_SRC_NAME */ 916 nla_total_size(sizeof(u64)) + /* J1939_NLA_DEST_NAME */ 917 nla_total_size(sizeof(u8)) + /* J1939_NLA_SRC_ADDR */ 918 nla_total_size(sizeof(u8)) + /* J1939_NLA_DEST_ADDR */ 919 0; 920 default: 921 return 922 nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */ 923 0; 924 } 925 } 926 927 static struct sk_buff * 928 j1939_sk_get_timestamping_opt_stats(struct j1939_session *session, 929 enum j1939_sk_errqueue_type type) 930 { 931 struct sk_buff *stats; 932 u32 size; 933 934 stats = alloc_skb(j1939_sk_opt_stats_get_size(type), GFP_ATOMIC); 935 if (!stats) 936 return NULL; 937 938 if (session->skcb.addr.type == J1939_SIMPLE) 939 size = session->total_message_size; 940 else 941 size = min(session->pkt.tx_acked * 7, 942 session->total_message_size); 943 944 switch (type) { 945 case J1939_ERRQUEUE_RX_RTS: 946 nla_put_u32(stats, J1939_NLA_TOTAL_SIZE, 947 session->total_message_size); 948 nla_put_u32(stats, J1939_NLA_PGN, 949 session->skcb.addr.pgn); 950 nla_put_u64_64bit(stats, J1939_NLA_SRC_NAME, 951 session->skcb.addr.src_name, J1939_NLA_PAD); 952 nla_put_u64_64bit(stats, J1939_NLA_DEST_NAME, 953 session->skcb.addr.dst_name, J1939_NLA_PAD); 954 nla_put_u8(stats, J1939_NLA_SRC_ADDR, 955 session->skcb.addr.sa); 956 nla_put_u8(stats, J1939_NLA_DEST_ADDR, 957 session->skcb.addr.da); 958 break; 959 default: 960 nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size); 961 } 962 963 return stats; 964 } 965 966 static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk, 967 enum j1939_sk_errqueue_type type) 968 { 969 struct j1939_priv *priv = session->priv; 970 struct j1939_sock *jsk; 971 struct sock_exterr_skb *serr; 972 struct sk_buff *skb; 973 char *state = "UNK"; 974 int err; 975 976 jsk = j1939_sk(sk); 977 978 if (!(jsk->state & J1939_SOCK_ERRQUEUE)) 979 return; 980 981 switch (type) { 982 case J1939_ERRQUEUE_TX_ACK: 983 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) 984 return; 985 break; 986 case J1939_ERRQUEUE_TX_SCHED: 987 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) 988 return; 989 break; 990 case J1939_ERRQUEUE_TX_ABORT: 991 break; 992 case J1939_ERRQUEUE_RX_RTS: 993 fallthrough; 994 case J1939_ERRQUEUE_RX_DPO: 995 fallthrough; 996 case J1939_ERRQUEUE_RX_ABORT: 997 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE)) 998 return; 999 break; 1000 default: 1001 netdev_err(priv->ndev, "Unknown errqueue type %i\n", type); 1002 } 1003 1004 skb = j1939_sk_get_timestamping_opt_stats(session, type); 1005 if (!skb) 1006 return; 1007 1008 skb->tstamp = ktime_get_real(); 1009 1010 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 1011 1012 serr = SKB_EXT_ERR(skb); 1013 memset(serr, 0, sizeof(*serr)); 1014 switch (type) { 1015 case J1939_ERRQUEUE_TX_ACK: 1016 serr->ee.ee_errno = ENOMSG; 1017 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 1018 serr->ee.ee_info = SCM_TSTAMP_ACK; 1019 state = "TX ACK"; 1020 break; 1021 case J1939_ERRQUEUE_TX_SCHED: 1022 serr->ee.ee_errno = ENOMSG; 1023 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 1024 serr->ee.ee_info = SCM_TSTAMP_SCHED; 1025 state = "TX SCH"; 1026 break; 1027 case J1939_ERRQUEUE_TX_ABORT: 1028 serr->ee.ee_errno = session->err; 1029 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; 1030 serr->ee.ee_info = J1939_EE_INFO_TX_ABORT; 1031 state = "TX ABT"; 1032 break; 1033 case J1939_ERRQUEUE_RX_RTS: 1034 serr->ee.ee_errno = ENOMSG; 1035 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; 1036 serr->ee.ee_info = J1939_EE_INFO_RX_RTS; 1037 state = "RX RTS"; 1038 break; 1039 case J1939_ERRQUEUE_RX_DPO: 1040 serr->ee.ee_errno = ENOMSG; 1041 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; 1042 serr->ee.ee_info = J1939_EE_INFO_RX_DPO; 1043 state = "RX DPO"; 1044 break; 1045 case J1939_ERRQUEUE_RX_ABORT: 1046 serr->ee.ee_errno = session->err; 1047 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; 1048 serr->ee.ee_info = J1939_EE_INFO_RX_ABORT; 1049 state = "RX ABT"; 1050 break; 1051 } 1052 1053 serr->opt_stats = true; 1054 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) 1055 serr->ee.ee_data = session->tskey; 1056 1057 netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n", 1058 __func__, session, session->tskey, state); 1059 err = sock_queue_err_skb(sk, skb); 1060 1061 if (err) 1062 kfree_skb(skb); 1063 }; 1064 1065 void j1939_sk_errqueue(struct j1939_session *session, 1066 enum j1939_sk_errqueue_type type) 1067 { 1068 struct j1939_priv *priv = session->priv; 1069 struct j1939_sock *jsk; 1070 1071 if (session->sk) { 1072 /* send TX notifications to the socket of origin */ 1073 __j1939_sk_errqueue(session, session->sk, type); 1074 return; 1075 } 1076 1077 /* spread RX notifications to all sockets subscribed to this session */ 1078 spin_lock_bh(&priv->j1939_socks_lock); 1079 list_for_each_entry(jsk, &priv->j1939_socks, list) { 1080 if (j1939_sk_recv_match_one(jsk, &session->skcb)) 1081 __j1939_sk_errqueue(session, &jsk->sk, type); 1082 } 1083 spin_unlock_bh(&priv->j1939_socks_lock); 1084 }; 1085 1086 void j1939_sk_send_loop_abort(struct sock *sk, int err) 1087 { 1088 sk->sk_err = err; 1089 1090 sk_error_report(sk); 1091 } 1092 1093 static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk, 1094 struct msghdr *msg, size_t size) 1095 1096 { 1097 struct j1939_sock *jsk = j1939_sk(sk); 1098 struct j1939_session *session = j1939_sk_get_incomplete_session(jsk); 1099 struct sk_buff *skb; 1100 size_t segment_size, todo_size; 1101 int ret = 0; 1102 1103 if (session && 1104 session->total_message_size != session->total_queued_size + size) { 1105 j1939_session_put(session); 1106 return -EIO; 1107 } 1108 1109 todo_size = size; 1110 1111 while (todo_size) { 1112 struct j1939_sk_buff_cb *skcb; 1113 1114 segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE, 1115 todo_size); 1116 1117 /* Allocate skb for one segment */ 1118 skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size, 1119 &ret); 1120 if (ret) 1121 break; 1122 1123 skcb = j1939_skb_to_cb(skb); 1124 1125 if (!session) { 1126 /* at this point the size should be full size 1127 * of the session 1128 */ 1129 skcb->offset = 0; 1130 session = j1939_tp_send(priv, skb, size); 1131 if (IS_ERR(session)) { 1132 ret = PTR_ERR(session); 1133 goto kfree_skb; 1134 } 1135 if (j1939_sk_queue_session(session)) { 1136 /* try to activate session if we a 1137 * fist in the queue 1138 */ 1139 if (!j1939_session_activate(session)) { 1140 j1939_tp_schedule_txtimer(session, 0); 1141 } else { 1142 ret = -EBUSY; 1143 session->err = ret; 1144 j1939_sk_queue_drop_all(priv, jsk, 1145 EBUSY); 1146 break; 1147 } 1148 } 1149 } else { 1150 skcb->offset = session->total_queued_size; 1151 j1939_session_skb_queue(session, skb); 1152 } 1153 1154 todo_size -= segment_size; 1155 session->total_queued_size += segment_size; 1156 } 1157 1158 switch (ret) { 1159 case 0: /* OK */ 1160 if (todo_size) 1161 netdev_warn(priv->ndev, 1162 "no error found and not completely queued?! %zu\n", 1163 todo_size); 1164 ret = size; 1165 break; 1166 case -ERESTARTSYS: 1167 ret = -EINTR; 1168 fallthrough; 1169 case -EAGAIN: /* OK */ 1170 if (todo_size != size) 1171 ret = size - todo_size; 1172 break; 1173 default: /* ERROR */ 1174 break; 1175 } 1176 1177 if (session) 1178 j1939_session_put(session); 1179 1180 return ret; 1181 1182 kfree_skb: 1183 kfree_skb(skb); 1184 return ret; 1185 } 1186 1187 static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg, 1188 size_t size) 1189 { 1190 struct sock *sk = sock->sk; 1191 struct j1939_sock *jsk = j1939_sk(sk); 1192 struct j1939_priv *priv; 1193 int ifindex; 1194 int ret; 1195 1196 lock_sock(sock->sk); 1197 /* various socket state tests */ 1198 if (!(jsk->state & J1939_SOCK_BOUND)) { 1199 ret = -EBADFD; 1200 goto sendmsg_done; 1201 } 1202 1203 priv = jsk->priv; 1204 ifindex = jsk->ifindex; 1205 1206 if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) { 1207 /* no source address assigned yet */ 1208 ret = -EBADFD; 1209 goto sendmsg_done; 1210 } 1211 1212 /* deal with provided destination address info */ 1213 if (msg->msg_name) { 1214 struct sockaddr_can *addr = msg->msg_name; 1215 1216 if (msg->msg_namelen < J1939_MIN_NAMELEN) { 1217 ret = -EINVAL; 1218 goto sendmsg_done; 1219 } 1220 1221 if (addr->can_family != AF_CAN) { 1222 ret = -EINVAL; 1223 goto sendmsg_done; 1224 } 1225 1226 if (addr->can_ifindex && addr->can_ifindex != ifindex) { 1227 ret = -EBADFD; 1228 goto sendmsg_done; 1229 } 1230 1231 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) && 1232 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) { 1233 ret = -EINVAL; 1234 goto sendmsg_done; 1235 } 1236 1237 if (!addr->can_addr.j1939.name && 1238 addr->can_addr.j1939.addr == J1939_NO_ADDR && 1239 !sock_flag(sk, SOCK_BROADCAST)) { 1240 /* broadcast, but SO_BROADCAST not set */ 1241 ret = -EACCES; 1242 goto sendmsg_done; 1243 } 1244 } else { 1245 if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR && 1246 !sock_flag(sk, SOCK_BROADCAST)) { 1247 /* broadcast, but SO_BROADCAST not set */ 1248 ret = -EACCES; 1249 goto sendmsg_done; 1250 } 1251 } 1252 1253 ret = j1939_sk_send_loop(priv, sk, msg, size); 1254 1255 sendmsg_done: 1256 release_sock(sock->sk); 1257 1258 return ret; 1259 } 1260 1261 void j1939_sk_netdev_event_netdown(struct j1939_priv *priv) 1262 { 1263 struct j1939_sock *jsk; 1264 int error_code = ENETDOWN; 1265 1266 spin_lock_bh(&priv->j1939_socks_lock); 1267 list_for_each_entry(jsk, &priv->j1939_socks, list) { 1268 jsk->sk.sk_err = error_code; 1269 if (!sock_flag(&jsk->sk, SOCK_DEAD)) 1270 sk_error_report(&jsk->sk); 1271 1272 j1939_sk_queue_drop_all(priv, jsk, error_code); 1273 } 1274 spin_unlock_bh(&priv->j1939_socks_lock); 1275 } 1276 1277 static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd, 1278 unsigned long arg) 1279 { 1280 /* no ioctls for socket layer -> hand it down to NIC layer */ 1281 return -ENOIOCTLCMD; 1282 } 1283 1284 static const struct proto_ops j1939_ops = { 1285 .family = PF_CAN, 1286 .release = j1939_sk_release, 1287 .bind = j1939_sk_bind, 1288 .connect = j1939_sk_connect, 1289 .socketpair = sock_no_socketpair, 1290 .accept = sock_no_accept, 1291 .getname = j1939_sk_getname, 1292 .poll = datagram_poll, 1293 .ioctl = j1939_sk_no_ioctlcmd, 1294 .listen = sock_no_listen, 1295 .shutdown = sock_no_shutdown, 1296 .setsockopt = j1939_sk_setsockopt, 1297 .getsockopt = j1939_sk_getsockopt, 1298 .sendmsg = j1939_sk_sendmsg, 1299 .recvmsg = j1939_sk_recvmsg, 1300 .mmap = sock_no_mmap, 1301 .sendpage = sock_no_sendpage, 1302 }; 1303 1304 static struct proto j1939_proto __read_mostly = { 1305 .name = "CAN_J1939", 1306 .owner = THIS_MODULE, 1307 .obj_size = sizeof(struct j1939_sock), 1308 .init = j1939_sk_init, 1309 }; 1310 1311 const struct can_proto j1939_can_proto = { 1312 .type = SOCK_DGRAM, 1313 .protocol = CAN_J1939, 1314 .ops = &j1939_ops, 1315 .prot = &j1939_proto, 1316 }; 1317