1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2010-2011 EIA Electronics, 3 // Pieter Beyens <pieter.beyens@eia.be> 4 // Copyright (c) 2010-2011 EIA Electronics, 5 // Kurt Van Dijck <kurt.van.dijck@eia.be> 6 // Copyright (c) 2018 Protonic, 7 // Robin van der Gracht <robin@protonic.nl> 8 // Copyright (c) 2017-2019 Pengutronix, 9 // Marc Kleine-Budde <kernel@pengutronix.de> 10 // Copyright (c) 2017-2019 Pengutronix, 11 // Oleksij Rempel <kernel@pengutronix.de> 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/can/can-ml.h> 16 #include <linux/can/core.h> 17 #include <linux/can/skb.h> 18 #include <linux/errqueue.h> 19 #include <linux/if_arp.h> 20 21 #include "j1939-priv.h" 22 23 #define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939) 24 25 /* conversion function between struct sock::sk_priority from linux and 26 * j1939 priority field 27 */ 28 static inline priority_t j1939_prio(u32 sk_priority) 29 { 30 sk_priority = min(sk_priority, 7U); 31 32 return 7 - sk_priority; 33 } 34 35 static inline u32 j1939_to_sk_priority(priority_t prio) 36 { 37 return 7 - prio; 38 } 39 40 /* function to see if pgn is to be evaluated */ 41 static inline bool j1939_pgn_is_valid(pgn_t pgn) 42 { 43 return pgn <= J1939_PGN_MAX; 44 } 45 46 /* test function to avoid non-zero DA placeholder for pdu1 pgn's */ 47 static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn) 48 { 49 if (j1939_pgn_is_pdu1(pgn)) 50 return !(pgn & 0xff); 51 else 52 return true; 53 } 54 55 static inline void j1939_sock_pending_add(struct sock *sk) 56 { 57 struct j1939_sock *jsk = j1939_sk(sk); 58 59 atomic_inc(&jsk->skb_pending); 60 } 61 62 static int j1939_sock_pending_get(struct sock *sk) 63 { 64 struct j1939_sock *jsk = j1939_sk(sk); 65 66 return atomic_read(&jsk->skb_pending); 67 } 68 69 void j1939_sock_pending_del(struct sock *sk) 70 { 71 struct j1939_sock *jsk = j1939_sk(sk); 72 73 /* atomic_dec_return returns the new value */ 74 if (!atomic_dec_return(&jsk->skb_pending)) 75 wake_up(&jsk->waitq); /* no pending SKB's */ 76 } 77 78 static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk) 79 { 80 jsk->state |= J1939_SOCK_BOUND; 81 j1939_priv_get(priv); 82 83 spin_lock_bh(&priv->j1939_socks_lock); 84 list_add_tail(&jsk->list, &priv->j1939_socks); 85 spin_unlock_bh(&priv->j1939_socks_lock); 86 } 87 88 static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk) 89 { 90 spin_lock_bh(&priv->j1939_socks_lock); 91 list_del_init(&jsk->list); 92 spin_unlock_bh(&priv->j1939_socks_lock); 93 94 j1939_priv_put(priv); 95 jsk->state &= ~J1939_SOCK_BOUND; 96 } 97 98 static bool j1939_sk_queue_session(struct j1939_session *session) 99 { 100 struct j1939_sock *jsk = j1939_sk(session->sk); 101 bool empty; 102 103 spin_lock_bh(&jsk->sk_session_queue_lock); 104 empty = list_empty(&jsk->sk_session_queue); 105 j1939_session_get(session); 106 list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue); 107 spin_unlock_bh(&jsk->sk_session_queue_lock); 108 j1939_sock_pending_add(&jsk->sk); 109 110 return empty; 111 } 112 113 static struct 114 j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk) 115 { 116 struct j1939_session *session = NULL; 117 118 spin_lock_bh(&jsk->sk_session_queue_lock); 119 if (!list_empty(&jsk->sk_session_queue)) { 120 session = list_last_entry(&jsk->sk_session_queue, 121 struct j1939_session, 122 sk_session_queue_entry); 123 if (session->total_queued_size == session->total_message_size) 124 session = NULL; 125 else 126 j1939_session_get(session); 127 } 128 spin_unlock_bh(&jsk->sk_session_queue_lock); 129 130 return session; 131 } 132 133 static void j1939_sk_queue_drop_all(struct j1939_priv *priv, 134 struct j1939_sock *jsk, int err) 135 { 136 struct j1939_session *session, *tmp; 137 138 netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err); 139 spin_lock_bh(&jsk->sk_session_queue_lock); 140 list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue, 141 sk_session_queue_entry) { 142 list_del_init(&session->sk_session_queue_entry); 143 session->err = err; 144 j1939_session_put(session); 145 } 146 spin_unlock_bh(&jsk->sk_session_queue_lock); 147 } 148 149 static void j1939_sk_queue_activate_next_locked(struct j1939_session *session) 150 { 151 struct j1939_sock *jsk; 152 struct j1939_session *first; 153 int err; 154 155 /* RX-Session don't have a socket (yet) */ 156 if (!session->sk) 157 return; 158 159 jsk = j1939_sk(session->sk); 160 lockdep_assert_held(&jsk->sk_session_queue_lock); 161 162 err = session->err; 163 164 first = list_first_entry_or_null(&jsk->sk_session_queue, 165 struct j1939_session, 166 sk_session_queue_entry); 167 168 /* Some else has already activated the next session */ 169 if (first != session) 170 return; 171 172 activate_next: 173 list_del_init(&first->sk_session_queue_entry); 174 j1939_session_put(first); 175 first = list_first_entry_or_null(&jsk->sk_session_queue, 176 struct j1939_session, 177 sk_session_queue_entry); 178 if (!first) 179 return; 180 181 if (WARN_ON_ONCE(j1939_session_activate(first))) { 182 first->err = -EBUSY; 183 goto activate_next; 184 } else { 185 /* Give receiver some time (arbitrary chosen) to recover */ 186 int time_ms = 0; 187 188 if (err) 189 time_ms = 10 + prandom_u32_max(16); 190 191 j1939_tp_schedule_txtimer(first, time_ms); 192 } 193 } 194 195 void j1939_sk_queue_activate_next(struct j1939_session *session) 196 { 197 struct j1939_sock *jsk; 198 199 if (!session->sk) 200 return; 201 202 jsk = j1939_sk(session->sk); 203 204 spin_lock_bh(&jsk->sk_session_queue_lock); 205 j1939_sk_queue_activate_next_locked(session); 206 spin_unlock_bh(&jsk->sk_session_queue_lock); 207 } 208 209 static bool j1939_sk_match_dst(struct j1939_sock *jsk, 210 const struct j1939_sk_buff_cb *skcb) 211 { 212 if ((jsk->state & J1939_SOCK_PROMISC)) 213 return true; 214 215 /* Destination address filter */ 216 if (jsk->addr.src_name && skcb->addr.dst_name) { 217 if (jsk->addr.src_name != skcb->addr.dst_name) 218 return false; 219 } else { 220 /* receive (all sockets) if 221 * - all packages that match our bind() address 222 * - all broadcast on a socket if SO_BROADCAST 223 * is set 224 */ 225 if (j1939_address_is_unicast(skcb->addr.da)) { 226 if (jsk->addr.sa != skcb->addr.da) 227 return false; 228 } else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) { 229 /* receiving broadcast without SO_BROADCAST 230 * flag is not allowed 231 */ 232 return false; 233 } 234 } 235 236 /* Source address filter */ 237 if (jsk->state & J1939_SOCK_CONNECTED) { 238 /* receive (all sockets) if 239 * - all packages that match our connect() name or address 240 */ 241 if (jsk->addr.dst_name && skcb->addr.src_name) { 242 if (jsk->addr.dst_name != skcb->addr.src_name) 243 return false; 244 } else { 245 if (jsk->addr.da != skcb->addr.sa) 246 return false; 247 } 248 } 249 250 /* PGN filter */ 251 if (j1939_pgn_is_valid(jsk->pgn_rx_filter) && 252 jsk->pgn_rx_filter != skcb->addr.pgn) 253 return false; 254 255 return true; 256 } 257 258 /* matches skb control buffer (addr) with a j1939 filter */ 259 static bool j1939_sk_match_filter(struct j1939_sock *jsk, 260 const struct j1939_sk_buff_cb *skcb) 261 { 262 const struct j1939_filter *f = jsk->filters; 263 int nfilter = jsk->nfilters; 264 265 if (!nfilter) 266 /* receive all when no filters are assigned */ 267 return true; 268 269 for (; nfilter; ++f, --nfilter) { 270 if ((skcb->addr.pgn & f->pgn_mask) != f->pgn) 271 continue; 272 if ((skcb->addr.sa & f->addr_mask) != f->addr) 273 continue; 274 if ((skcb->addr.src_name & f->name_mask) != f->name) 275 continue; 276 return true; 277 } 278 return false; 279 } 280 281 static bool j1939_sk_recv_match_one(struct j1939_sock *jsk, 282 const struct j1939_sk_buff_cb *skcb) 283 { 284 if (!(jsk->state & J1939_SOCK_BOUND)) 285 return false; 286 287 if (!j1939_sk_match_dst(jsk, skcb)) 288 return false; 289 290 if (!j1939_sk_match_filter(jsk, skcb)) 291 return false; 292 293 return true; 294 } 295 296 static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb) 297 { 298 const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb); 299 struct j1939_sk_buff_cb *skcb; 300 struct sk_buff *skb; 301 302 if (oskb->sk == &jsk->sk) 303 return; 304 305 if (!j1939_sk_recv_match_one(jsk, oskcb)) 306 return; 307 308 skb = skb_clone(oskb, GFP_ATOMIC); 309 if (!skb) { 310 pr_warn("skb clone failed\n"); 311 return; 312 } 313 can_skb_set_owner(skb, oskb->sk); 314 315 skcb = j1939_skb_to_cb(skb); 316 skcb->msg_flags &= ~(MSG_DONTROUTE); 317 if (skb->sk) 318 skcb->msg_flags |= MSG_DONTROUTE; 319 320 if (sock_queue_rcv_skb(&jsk->sk, skb) < 0) 321 kfree_skb(skb); 322 } 323 324 bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb) 325 { 326 struct j1939_sock *jsk; 327 bool match = false; 328 329 spin_lock_bh(&priv->j1939_socks_lock); 330 list_for_each_entry(jsk, &priv->j1939_socks, list) { 331 match = j1939_sk_recv_match_one(jsk, skcb); 332 if (match) 333 break; 334 } 335 spin_unlock_bh(&priv->j1939_socks_lock); 336 337 return match; 338 } 339 340 void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb) 341 { 342 struct j1939_sock *jsk; 343 344 spin_lock_bh(&priv->j1939_socks_lock); 345 list_for_each_entry(jsk, &priv->j1939_socks, list) { 346 j1939_sk_recv_one(jsk, skb); 347 } 348 spin_unlock_bh(&priv->j1939_socks_lock); 349 } 350 351 static void j1939_sk_sock_destruct(struct sock *sk) 352 { 353 struct j1939_sock *jsk = j1939_sk(sk); 354 355 /* This function will be call by the generic networking code, when then 356 * the socket is ultimately closed (sk->sk_destruct). 357 * 358 * The race between 359 * - processing a received CAN frame 360 * (can_receive -> j1939_can_recv) 361 * and accessing j1939_priv 362 * ... and ... 363 * - closing a socket 364 * (j1939_can_rx_unregister -> can_rx_unregister) 365 * and calling the final j1939_priv_put() 366 * 367 * is avoided by calling the final j1939_priv_put() from this 368 * RCU deferred cleanup call. 369 */ 370 if (jsk->priv) { 371 j1939_priv_put(jsk->priv); 372 jsk->priv = NULL; 373 } 374 375 /* call generic CAN sock destruct */ 376 can_sock_destruct(sk); 377 } 378 379 static int j1939_sk_init(struct sock *sk) 380 { 381 struct j1939_sock *jsk = j1939_sk(sk); 382 383 /* Ensure that "sk" is first member in "struct j1939_sock", so that we 384 * can skip it during memset(). 385 */ 386 BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0); 387 memset((void *)jsk + sizeof(jsk->sk), 0x0, 388 sizeof(*jsk) - sizeof(jsk->sk)); 389 390 INIT_LIST_HEAD(&jsk->list); 391 init_waitqueue_head(&jsk->waitq); 392 jsk->sk.sk_priority = j1939_to_sk_priority(6); 393 jsk->sk.sk_reuse = 1; /* per default */ 394 jsk->addr.sa = J1939_NO_ADDR; 395 jsk->addr.da = J1939_NO_ADDR; 396 jsk->addr.pgn = J1939_NO_PGN; 397 jsk->pgn_rx_filter = J1939_NO_PGN; 398 atomic_set(&jsk->skb_pending, 0); 399 spin_lock_init(&jsk->sk_session_queue_lock); 400 INIT_LIST_HEAD(&jsk->sk_session_queue); 401 sk->sk_destruct = j1939_sk_sock_destruct; 402 sk->sk_protocol = CAN_J1939; 403 404 return 0; 405 } 406 407 static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len) 408 { 409 if (!addr) 410 return -EDESTADDRREQ; 411 if (len < J1939_MIN_NAMELEN) 412 return -EINVAL; 413 if (addr->can_family != AF_CAN) 414 return -EINVAL; 415 if (!addr->can_ifindex) 416 return -ENODEV; 417 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) && 418 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) 419 return -EINVAL; 420 421 return 0; 422 } 423 424 static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) 425 { 426 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 427 struct j1939_sock *jsk = j1939_sk(sock->sk); 428 struct j1939_priv *priv; 429 struct sock *sk; 430 struct net *net; 431 int ret = 0; 432 433 ret = j1939_sk_sanity_check(addr, len); 434 if (ret) 435 return ret; 436 437 lock_sock(sock->sk); 438 439 priv = jsk->priv; 440 sk = sock->sk; 441 net = sock_net(sk); 442 443 /* Already bound to an interface? */ 444 if (jsk->state & J1939_SOCK_BOUND) { 445 /* A re-bind() to a different interface is not 446 * supported. 447 */ 448 if (jsk->ifindex != addr->can_ifindex) { 449 ret = -EINVAL; 450 goto out_release_sock; 451 } 452 453 /* drop old references */ 454 j1939_jsk_del(priv, jsk); 455 j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa); 456 } else { 457 struct can_ml_priv *can_ml; 458 struct net_device *ndev; 459 460 ndev = dev_get_by_index(net, addr->can_ifindex); 461 if (!ndev) { 462 ret = -ENODEV; 463 goto out_release_sock; 464 } 465 466 can_ml = can_get_ml_priv(ndev); 467 if (!can_ml) { 468 dev_put(ndev); 469 ret = -ENODEV; 470 goto out_release_sock; 471 } 472 473 if (!(ndev->flags & IFF_UP)) { 474 dev_put(ndev); 475 ret = -ENETDOWN; 476 goto out_release_sock; 477 } 478 479 priv = j1939_netdev_start(ndev); 480 dev_put(ndev); 481 if (IS_ERR(priv)) { 482 ret = PTR_ERR(priv); 483 goto out_release_sock; 484 } 485 486 jsk->ifindex = addr->can_ifindex; 487 488 /* the corresponding j1939_priv_put() is called via 489 * sk->sk_destruct, which points to j1939_sk_sock_destruct() 490 */ 491 j1939_priv_get(priv); 492 jsk->priv = priv; 493 } 494 495 /* set default transmit pgn */ 496 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) 497 jsk->pgn_rx_filter = addr->can_addr.j1939.pgn; 498 jsk->addr.src_name = addr->can_addr.j1939.name; 499 jsk->addr.sa = addr->can_addr.j1939.addr; 500 501 /* get new references */ 502 ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa); 503 if (ret) { 504 j1939_netdev_stop(priv); 505 goto out_release_sock; 506 } 507 508 j1939_jsk_add(priv, jsk); 509 510 out_release_sock: /* fall through */ 511 release_sock(sock->sk); 512 513 return ret; 514 } 515 516 static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr, 517 int len, int flags) 518 { 519 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 520 struct j1939_sock *jsk = j1939_sk(sock->sk); 521 int ret = 0; 522 523 ret = j1939_sk_sanity_check(addr, len); 524 if (ret) 525 return ret; 526 527 lock_sock(sock->sk); 528 529 /* bind() before connect() is mandatory */ 530 if (!(jsk->state & J1939_SOCK_BOUND)) { 531 ret = -EINVAL; 532 goto out_release_sock; 533 } 534 535 /* A connect() to a different interface is not supported. */ 536 if (jsk->ifindex != addr->can_ifindex) { 537 ret = -EINVAL; 538 goto out_release_sock; 539 } 540 541 if (!addr->can_addr.j1939.name && 542 addr->can_addr.j1939.addr == J1939_NO_ADDR && 543 !sock_flag(&jsk->sk, SOCK_BROADCAST)) { 544 /* broadcast, but SO_BROADCAST not set */ 545 ret = -EACCES; 546 goto out_release_sock; 547 } 548 549 jsk->addr.dst_name = addr->can_addr.j1939.name; 550 jsk->addr.da = addr->can_addr.j1939.addr; 551 552 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) 553 jsk->addr.pgn = addr->can_addr.j1939.pgn; 554 555 jsk->state |= J1939_SOCK_CONNECTED; 556 557 out_release_sock: /* fall through */ 558 release_sock(sock->sk); 559 560 return ret; 561 } 562 563 static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr, 564 const struct j1939_sock *jsk, int peer) 565 { 566 /* There are two holes (2 bytes and 3 bytes) to clear to avoid 567 * leaking kernel information to user space. 568 */ 569 memset(addr, 0, J1939_MIN_NAMELEN); 570 571 addr->can_family = AF_CAN; 572 addr->can_ifindex = jsk->ifindex; 573 addr->can_addr.j1939.pgn = jsk->addr.pgn; 574 if (peer) { 575 addr->can_addr.j1939.name = jsk->addr.dst_name; 576 addr->can_addr.j1939.addr = jsk->addr.da; 577 } else { 578 addr->can_addr.j1939.name = jsk->addr.src_name; 579 addr->can_addr.j1939.addr = jsk->addr.sa; 580 } 581 } 582 583 static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr, 584 int peer) 585 { 586 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 587 struct sock *sk = sock->sk; 588 struct j1939_sock *jsk = j1939_sk(sk); 589 int ret = 0; 590 591 lock_sock(sk); 592 593 if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) { 594 ret = -EADDRNOTAVAIL; 595 goto failure; 596 } 597 598 j1939_sk_sock2sockaddr_can(addr, jsk, peer); 599 ret = J1939_MIN_NAMELEN; 600 601 failure: 602 release_sock(sk); 603 604 return ret; 605 } 606 607 static int j1939_sk_release(struct socket *sock) 608 { 609 struct sock *sk = sock->sk; 610 struct j1939_sock *jsk; 611 612 if (!sk) 613 return 0; 614 615 lock_sock(sk); 616 jsk = j1939_sk(sk); 617 618 if (jsk->state & J1939_SOCK_BOUND) { 619 struct j1939_priv *priv = jsk->priv; 620 621 if (wait_event_interruptible(jsk->waitq, 622 !j1939_sock_pending_get(&jsk->sk))) { 623 j1939_cancel_active_session(priv, sk); 624 j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN); 625 } 626 627 j1939_jsk_del(priv, jsk); 628 629 j1939_local_ecu_put(priv, jsk->addr.src_name, 630 jsk->addr.sa); 631 632 j1939_netdev_stop(priv); 633 } 634 635 kfree(jsk->filters); 636 sock_orphan(sk); 637 sock->sk = NULL; 638 639 release_sock(sk); 640 sock_put(sk); 641 642 return 0; 643 } 644 645 static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval, 646 unsigned int optlen, int flag) 647 { 648 int tmp; 649 650 if (optlen != sizeof(tmp)) 651 return -EINVAL; 652 if (copy_from_sockptr(&tmp, optval, optlen)) 653 return -EFAULT; 654 lock_sock(&jsk->sk); 655 if (tmp) 656 jsk->state |= flag; 657 else 658 jsk->state &= ~flag; 659 release_sock(&jsk->sk); 660 return tmp; 661 } 662 663 static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, 664 sockptr_t optval, unsigned int optlen) 665 { 666 struct sock *sk = sock->sk; 667 struct j1939_sock *jsk = j1939_sk(sk); 668 int tmp, count = 0, ret = 0; 669 struct j1939_filter *filters = NULL, *ofilters; 670 671 if (level != SOL_CAN_J1939) 672 return -EINVAL; 673 674 switch (optname) { 675 case SO_J1939_FILTER: 676 if (!sockptr_is_null(optval)) { 677 struct j1939_filter *f; 678 int c; 679 680 if (optlen % sizeof(*filters) != 0) 681 return -EINVAL; 682 683 if (optlen > J1939_FILTER_MAX * 684 sizeof(struct j1939_filter)) 685 return -EINVAL; 686 687 count = optlen / sizeof(*filters); 688 filters = memdup_sockptr(optval, optlen); 689 if (IS_ERR(filters)) 690 return PTR_ERR(filters); 691 692 for (f = filters, c = count; c; f++, c--) { 693 f->name &= f->name_mask; 694 f->pgn &= f->pgn_mask; 695 f->addr &= f->addr_mask; 696 } 697 } 698 699 lock_sock(&jsk->sk); 700 ofilters = jsk->filters; 701 jsk->filters = filters; 702 jsk->nfilters = count; 703 release_sock(&jsk->sk); 704 kfree(ofilters); 705 return 0; 706 case SO_J1939_PROMISC: 707 return j1939_sk_setsockopt_flag(jsk, optval, optlen, 708 J1939_SOCK_PROMISC); 709 case SO_J1939_ERRQUEUE: 710 ret = j1939_sk_setsockopt_flag(jsk, optval, optlen, 711 J1939_SOCK_ERRQUEUE); 712 if (ret < 0) 713 return ret; 714 715 if (!(jsk->state & J1939_SOCK_ERRQUEUE)) 716 skb_queue_purge(&sk->sk_error_queue); 717 return ret; 718 case SO_J1939_SEND_PRIO: 719 if (optlen != sizeof(tmp)) 720 return -EINVAL; 721 if (copy_from_sockptr(&tmp, optval, optlen)) 722 return -EFAULT; 723 if (tmp < 0 || tmp > 7) 724 return -EDOM; 725 if (tmp < 2 && !capable(CAP_NET_ADMIN)) 726 return -EPERM; 727 lock_sock(&jsk->sk); 728 jsk->sk.sk_priority = j1939_to_sk_priority(tmp); 729 release_sock(&jsk->sk); 730 return 0; 731 default: 732 return -ENOPROTOOPT; 733 } 734 } 735 736 static int j1939_sk_getsockopt(struct socket *sock, int level, int optname, 737 char __user *optval, int __user *optlen) 738 { 739 struct sock *sk = sock->sk; 740 struct j1939_sock *jsk = j1939_sk(sk); 741 int ret, ulen; 742 /* set defaults for using 'int' properties */ 743 int tmp = 0; 744 int len = sizeof(tmp); 745 void *val = &tmp; 746 747 if (level != SOL_CAN_J1939) 748 return -EINVAL; 749 if (get_user(ulen, optlen)) 750 return -EFAULT; 751 if (ulen < 0) 752 return -EINVAL; 753 754 lock_sock(&jsk->sk); 755 switch (optname) { 756 case SO_J1939_PROMISC: 757 tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0; 758 break; 759 case SO_J1939_ERRQUEUE: 760 tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0; 761 break; 762 case SO_J1939_SEND_PRIO: 763 tmp = j1939_prio(jsk->sk.sk_priority); 764 break; 765 default: 766 ret = -ENOPROTOOPT; 767 goto no_copy; 768 } 769 770 /* copy to user, based on 'len' & 'val' 771 * but most sockopt's are 'int' properties, and have 'len' & 'val' 772 * left unchanged, but instead modified 'tmp' 773 */ 774 if (len > ulen) 775 ret = -EFAULT; 776 else if (put_user(len, optlen)) 777 ret = -EFAULT; 778 else if (copy_to_user(optval, val, len)) 779 ret = -EFAULT; 780 else 781 ret = 0; 782 no_copy: 783 release_sock(&jsk->sk); 784 return ret; 785 } 786 787 static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg, 788 size_t size, int flags) 789 { 790 struct sock *sk = sock->sk; 791 struct sk_buff *skb; 792 struct j1939_sk_buff_cb *skcb; 793 int ret = 0; 794 795 if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE)) 796 return -EINVAL; 797 798 if (flags & MSG_ERRQUEUE) 799 return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939, 800 SCM_J1939_ERRQUEUE); 801 802 skb = skb_recv_datagram(sk, flags, 0, &ret); 803 if (!skb) 804 return ret; 805 806 if (size < skb->len) 807 msg->msg_flags |= MSG_TRUNC; 808 else 809 size = skb->len; 810 811 ret = memcpy_to_msg(msg, skb->data, size); 812 if (ret < 0) { 813 skb_free_datagram(sk, skb); 814 return ret; 815 } 816 817 skcb = j1939_skb_to_cb(skb); 818 if (j1939_address_is_valid(skcb->addr.da)) 819 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR, 820 sizeof(skcb->addr.da), &skcb->addr.da); 821 822 if (skcb->addr.dst_name) 823 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME, 824 sizeof(skcb->addr.dst_name), &skcb->addr.dst_name); 825 826 put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO, 827 sizeof(skcb->priority), &skcb->priority); 828 829 if (msg->msg_name) { 830 struct sockaddr_can *paddr = msg->msg_name; 831 832 msg->msg_namelen = J1939_MIN_NAMELEN; 833 memset(msg->msg_name, 0, msg->msg_namelen); 834 paddr->can_family = AF_CAN; 835 paddr->can_ifindex = skb->skb_iif; 836 paddr->can_addr.j1939.name = skcb->addr.src_name; 837 paddr->can_addr.j1939.addr = skcb->addr.sa; 838 paddr->can_addr.j1939.pgn = skcb->addr.pgn; 839 } 840 841 sock_recv_ts_and_drops(msg, sk, skb); 842 msg->msg_flags |= skcb->msg_flags; 843 skb_free_datagram(sk, skb); 844 845 return size; 846 } 847 848 static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev, 849 struct sock *sk, 850 struct msghdr *msg, size_t size, 851 int *errcode) 852 { 853 struct j1939_sock *jsk = j1939_sk(sk); 854 struct j1939_sk_buff_cb *skcb; 855 struct sk_buff *skb; 856 int ret; 857 858 skb = sock_alloc_send_skb(sk, 859 size + 860 sizeof(struct can_frame) - 861 sizeof(((struct can_frame *)NULL)->data) + 862 sizeof(struct can_skb_priv), 863 msg->msg_flags & MSG_DONTWAIT, &ret); 864 if (!skb) 865 goto failure; 866 867 can_skb_reserve(skb); 868 can_skb_prv(skb)->ifindex = ndev->ifindex; 869 can_skb_prv(skb)->skbcnt = 0; 870 skb_reserve(skb, offsetof(struct can_frame, data)); 871 872 ret = memcpy_from_msg(skb_put(skb, size), msg, size); 873 if (ret < 0) 874 goto free_skb; 875 876 skb->dev = ndev; 877 878 skcb = j1939_skb_to_cb(skb); 879 memset(skcb, 0, sizeof(*skcb)); 880 skcb->addr = jsk->addr; 881 skcb->priority = j1939_prio(sk->sk_priority); 882 883 if (msg->msg_name) { 884 struct sockaddr_can *addr = msg->msg_name; 885 886 if (addr->can_addr.j1939.name || 887 addr->can_addr.j1939.addr != J1939_NO_ADDR) { 888 skcb->addr.dst_name = addr->can_addr.j1939.name; 889 skcb->addr.da = addr->can_addr.j1939.addr; 890 } 891 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) 892 skcb->addr.pgn = addr->can_addr.j1939.pgn; 893 } 894 895 *errcode = ret; 896 return skb; 897 898 free_skb: 899 kfree_skb(skb); 900 failure: 901 *errcode = ret; 902 return NULL; 903 } 904 905 static size_t j1939_sk_opt_stats_get_size(void) 906 { 907 return 908 nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */ 909 0; 910 } 911 912 static struct sk_buff * 913 j1939_sk_get_timestamping_opt_stats(struct j1939_session *session) 914 { 915 struct sk_buff *stats; 916 u32 size; 917 918 stats = alloc_skb(j1939_sk_opt_stats_get_size(), GFP_ATOMIC); 919 if (!stats) 920 return NULL; 921 922 if (session->skcb.addr.type == J1939_SIMPLE) 923 size = session->total_message_size; 924 else 925 size = min(session->pkt.tx_acked * 7, 926 session->total_message_size); 927 928 nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size); 929 930 return stats; 931 } 932 933 void j1939_sk_errqueue(struct j1939_session *session, 934 enum j1939_sk_errqueue_type type) 935 { 936 struct j1939_priv *priv = session->priv; 937 struct sock *sk = session->sk; 938 struct j1939_sock *jsk; 939 struct sock_exterr_skb *serr; 940 struct sk_buff *skb; 941 char *state = "UNK"; 942 int err; 943 944 /* currently we have no sk for the RX session */ 945 if (!sk) 946 return; 947 948 jsk = j1939_sk(sk); 949 950 if (!(jsk->state & J1939_SOCK_ERRQUEUE)) 951 return; 952 953 skb = j1939_sk_get_timestamping_opt_stats(session); 954 if (!skb) 955 return; 956 957 skb->tstamp = ktime_get_real(); 958 959 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 960 961 serr = SKB_EXT_ERR(skb); 962 memset(serr, 0, sizeof(*serr)); 963 switch (type) { 964 case J1939_ERRQUEUE_ACK: 965 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) { 966 kfree_skb(skb); 967 return; 968 } 969 970 serr->ee.ee_errno = ENOMSG; 971 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 972 serr->ee.ee_info = SCM_TSTAMP_ACK; 973 state = "ACK"; 974 break; 975 case J1939_ERRQUEUE_SCHED: 976 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) { 977 kfree_skb(skb); 978 return; 979 } 980 981 serr->ee.ee_errno = ENOMSG; 982 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 983 serr->ee.ee_info = SCM_TSTAMP_SCHED; 984 state = "SCH"; 985 break; 986 case J1939_ERRQUEUE_ABORT: 987 serr->ee.ee_errno = session->err; 988 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; 989 serr->ee.ee_info = J1939_EE_INFO_TX_ABORT; 990 state = "ABT"; 991 break; 992 default: 993 netdev_err(priv->ndev, "Unknown errqueue type %i\n", type); 994 } 995 996 serr->opt_stats = true; 997 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) 998 serr->ee.ee_data = session->tskey; 999 1000 netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n", 1001 __func__, session, session->tskey, state); 1002 err = sock_queue_err_skb(sk, skb); 1003 1004 if (err) 1005 kfree_skb(skb); 1006 }; 1007 1008 void j1939_sk_send_loop_abort(struct sock *sk, int err) 1009 { 1010 sk->sk_err = err; 1011 1012 sk->sk_error_report(sk); 1013 } 1014 1015 static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk, 1016 struct msghdr *msg, size_t size) 1017 1018 { 1019 struct j1939_sock *jsk = j1939_sk(sk); 1020 struct j1939_session *session = j1939_sk_get_incomplete_session(jsk); 1021 struct sk_buff *skb; 1022 size_t segment_size, todo_size; 1023 int ret = 0; 1024 1025 if (session && 1026 session->total_message_size != session->total_queued_size + size) { 1027 j1939_session_put(session); 1028 return -EIO; 1029 } 1030 1031 todo_size = size; 1032 1033 while (todo_size) { 1034 struct j1939_sk_buff_cb *skcb; 1035 1036 segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE, 1037 todo_size); 1038 1039 /* Allocate skb for one segment */ 1040 skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size, 1041 &ret); 1042 if (ret) 1043 break; 1044 1045 skcb = j1939_skb_to_cb(skb); 1046 1047 if (!session) { 1048 /* at this point the size should be full size 1049 * of the session 1050 */ 1051 skcb->offset = 0; 1052 session = j1939_tp_send(priv, skb, size); 1053 if (IS_ERR(session)) { 1054 ret = PTR_ERR(session); 1055 goto kfree_skb; 1056 } 1057 if (j1939_sk_queue_session(session)) { 1058 /* try to activate session if we a 1059 * fist in the queue 1060 */ 1061 if (!j1939_session_activate(session)) { 1062 j1939_tp_schedule_txtimer(session, 0); 1063 } else { 1064 ret = -EBUSY; 1065 session->err = ret; 1066 j1939_sk_queue_drop_all(priv, jsk, 1067 EBUSY); 1068 break; 1069 } 1070 } 1071 } else { 1072 skcb->offset = session->total_queued_size; 1073 j1939_session_skb_queue(session, skb); 1074 } 1075 1076 todo_size -= segment_size; 1077 session->total_queued_size += segment_size; 1078 } 1079 1080 switch (ret) { 1081 case 0: /* OK */ 1082 if (todo_size) 1083 netdev_warn(priv->ndev, 1084 "no error found and not completely queued?! %zu\n", 1085 todo_size); 1086 ret = size; 1087 break; 1088 case -ERESTARTSYS: 1089 ret = -EINTR; 1090 fallthrough; 1091 case -EAGAIN: /* OK */ 1092 if (todo_size != size) 1093 ret = size - todo_size; 1094 break; 1095 default: /* ERROR */ 1096 break; 1097 } 1098 1099 if (session) 1100 j1939_session_put(session); 1101 1102 return ret; 1103 1104 kfree_skb: 1105 kfree_skb(skb); 1106 return ret; 1107 } 1108 1109 static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg, 1110 size_t size) 1111 { 1112 struct sock *sk = sock->sk; 1113 struct j1939_sock *jsk = j1939_sk(sk); 1114 struct j1939_priv *priv; 1115 int ifindex; 1116 int ret; 1117 1118 lock_sock(sock->sk); 1119 /* various socket state tests */ 1120 if (!(jsk->state & J1939_SOCK_BOUND)) { 1121 ret = -EBADFD; 1122 goto sendmsg_done; 1123 } 1124 1125 priv = jsk->priv; 1126 ifindex = jsk->ifindex; 1127 1128 if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) { 1129 /* no source address assigned yet */ 1130 ret = -EBADFD; 1131 goto sendmsg_done; 1132 } 1133 1134 /* deal with provided destination address info */ 1135 if (msg->msg_name) { 1136 struct sockaddr_can *addr = msg->msg_name; 1137 1138 if (msg->msg_namelen < J1939_MIN_NAMELEN) { 1139 ret = -EINVAL; 1140 goto sendmsg_done; 1141 } 1142 1143 if (addr->can_family != AF_CAN) { 1144 ret = -EINVAL; 1145 goto sendmsg_done; 1146 } 1147 1148 if (addr->can_ifindex && addr->can_ifindex != ifindex) { 1149 ret = -EBADFD; 1150 goto sendmsg_done; 1151 } 1152 1153 if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) && 1154 !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) { 1155 ret = -EINVAL; 1156 goto sendmsg_done; 1157 } 1158 1159 if (!addr->can_addr.j1939.name && 1160 addr->can_addr.j1939.addr == J1939_NO_ADDR && 1161 !sock_flag(sk, SOCK_BROADCAST)) { 1162 /* broadcast, but SO_BROADCAST not set */ 1163 ret = -EACCES; 1164 goto sendmsg_done; 1165 } 1166 } else { 1167 if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR && 1168 !sock_flag(sk, SOCK_BROADCAST)) { 1169 /* broadcast, but SO_BROADCAST not set */ 1170 ret = -EACCES; 1171 goto sendmsg_done; 1172 } 1173 } 1174 1175 ret = j1939_sk_send_loop(priv, sk, msg, size); 1176 1177 sendmsg_done: 1178 release_sock(sock->sk); 1179 1180 return ret; 1181 } 1182 1183 void j1939_sk_netdev_event_netdown(struct j1939_priv *priv) 1184 { 1185 struct j1939_sock *jsk; 1186 int error_code = ENETDOWN; 1187 1188 spin_lock_bh(&priv->j1939_socks_lock); 1189 list_for_each_entry(jsk, &priv->j1939_socks, list) { 1190 jsk->sk.sk_err = error_code; 1191 if (!sock_flag(&jsk->sk, SOCK_DEAD)) 1192 jsk->sk.sk_error_report(&jsk->sk); 1193 1194 j1939_sk_queue_drop_all(priv, jsk, error_code); 1195 } 1196 spin_unlock_bh(&priv->j1939_socks_lock); 1197 } 1198 1199 static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd, 1200 unsigned long arg) 1201 { 1202 /* no ioctls for socket layer -> hand it down to NIC layer */ 1203 return -ENOIOCTLCMD; 1204 } 1205 1206 static const struct proto_ops j1939_ops = { 1207 .family = PF_CAN, 1208 .release = j1939_sk_release, 1209 .bind = j1939_sk_bind, 1210 .connect = j1939_sk_connect, 1211 .socketpair = sock_no_socketpair, 1212 .accept = sock_no_accept, 1213 .getname = j1939_sk_getname, 1214 .poll = datagram_poll, 1215 .ioctl = j1939_sk_no_ioctlcmd, 1216 .listen = sock_no_listen, 1217 .shutdown = sock_no_shutdown, 1218 .setsockopt = j1939_sk_setsockopt, 1219 .getsockopt = j1939_sk_getsockopt, 1220 .sendmsg = j1939_sk_sendmsg, 1221 .recvmsg = j1939_sk_recvmsg, 1222 .mmap = sock_no_mmap, 1223 .sendpage = sock_no_sendpage, 1224 }; 1225 1226 static struct proto j1939_proto __read_mostly = { 1227 .name = "CAN_J1939", 1228 .owner = THIS_MODULE, 1229 .obj_size = sizeof(struct j1939_sock), 1230 .init = j1939_sk_init, 1231 }; 1232 1233 const struct can_proto j1939_can_proto = { 1234 .type = SOCK_DGRAM, 1235 .protocol = CAN_J1939, 1236 .ops = &j1939_ops, 1237 .prot = &j1939_proto, 1238 }; 1239