1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* AF_RXRPC sendmsg() implementation. 3 * 4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/net.h> 11 #include <linux/gfp.h> 12 #include <linux/skbuff.h> 13 #include <linux/export.h> 14 #include <linux/sched/signal.h> 15 16 #include <net/sock.h> 17 #include <net/af_rxrpc.h> 18 #include "ar-internal.h" 19 20 /* 21 * Propose an abort to be made in the I/O thread. 22 */ 23 bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error, 24 enum rxrpc_abort_reason why) 25 { 26 _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why); 27 28 if (!call->send_abort && !rxrpc_call_is_complete(call)) { 29 call->send_abort_why = why; 30 call->send_abort_err = error; 31 call->send_abort_seq = 0; 32 /* Request abort locklessly vs rxrpc_input_call_event(). */ 33 smp_store_release(&call->send_abort, abort_code); 34 rxrpc_poke_call(call, rxrpc_call_poke_abort); 35 return true; 36 } 37 38 return false; 39 } 40 41 /* 42 * Wait for a call to become connected. Interruption here doesn't cause the 43 * call to be aborted. 44 */ 45 static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo) 46 { 47 DECLARE_WAITQUEUE(myself, current); 48 int ret = 0; 49 50 _enter("%d", call->debug_id); 51 52 if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) 53 goto no_wait; 54 55 add_wait_queue_exclusive(&call->waitq, &myself); 56 57 for (;;) { 58 switch (call->interruptibility) { 59 case RXRPC_INTERRUPTIBLE: 60 case RXRPC_PREINTERRUPTIBLE: 61 set_current_state(TASK_INTERRUPTIBLE); 62 break; 63 case RXRPC_UNINTERRUPTIBLE: 64 default: 65 set_current_state(TASK_UNINTERRUPTIBLE); 66 break; 67 } 68 69 if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) 70 break; 71 if ((call->interruptibility == RXRPC_INTERRUPTIBLE || 72 call->interruptibility == RXRPC_PREINTERRUPTIBLE) && 73 signal_pending(current)) { 74 ret = sock_intr_errno(*timeo); 75 break; 76 } 77 *timeo = schedule_timeout(*timeo); 78 } 79 80 remove_wait_queue(&call->waitq, &myself); 81 __set_current_state(TASK_RUNNING); 82 83 no_wait: 84 if (ret == 0 && rxrpc_call_is_complete(call)) 85 ret = call->error; 86 87 _leave(" = %d", ret); 88 return ret; 89 } 90 91 /* 92 * Return true if there's sufficient Tx queue space. 93 */ 94 static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) 95 { 96 if (_tx_win) 97 *_tx_win = call->tx_bottom; 98 return call->tx_prepared - call->tx_bottom < 256; 99 } 100 101 /* 102 * Wait for space to appear in the Tx queue or a signal to occur. 103 */ 104 static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, 105 struct rxrpc_call *call, 106 long *timeo) 107 { 108 for (;;) { 109 set_current_state(TASK_INTERRUPTIBLE); 110 if (rxrpc_check_tx_space(call, NULL)) 111 return 0; 112 113 if (rxrpc_call_is_complete(call)) 114 return call->error; 115 116 if (signal_pending(current)) 117 return sock_intr_errno(*timeo); 118 119 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait); 120 *timeo = schedule_timeout(*timeo); 121 } 122 } 123 124 /* 125 * Wait for space to appear in the Tx queue uninterruptibly, but with 126 * a timeout of 2*RTT if no progress was made and a signal occurred. 127 */ 128 static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, 129 struct rxrpc_call *call) 130 { 131 rxrpc_seq_t tx_start, tx_win; 132 signed long rtt, timeout; 133 134 rtt = READ_ONCE(call->peer->srtt_us) >> 3; 135 rtt = usecs_to_jiffies(rtt) * 2; 136 if (rtt < 2) 137 rtt = 2; 138 139 timeout = rtt; 140 tx_start = smp_load_acquire(&call->acks_hard_ack); 141 142 for (;;) { 143 set_current_state(TASK_UNINTERRUPTIBLE); 144 145 if (rxrpc_check_tx_space(call, &tx_win)) 146 return 0; 147 148 if (rxrpc_call_is_complete(call)) 149 return call->error; 150 151 if (timeout == 0 && 152 tx_win == tx_start && signal_pending(current)) 153 return -EINTR; 154 155 if (tx_win != tx_start) { 156 timeout = rtt; 157 tx_start = tx_win; 158 } 159 160 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait); 161 timeout = schedule_timeout(timeout); 162 } 163 } 164 165 /* 166 * Wait for space to appear in the Tx queue uninterruptibly. 167 */ 168 static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, 169 struct rxrpc_call *call, 170 long *timeo) 171 { 172 for (;;) { 173 set_current_state(TASK_UNINTERRUPTIBLE); 174 if (rxrpc_check_tx_space(call, NULL)) 175 return 0; 176 177 if (rxrpc_call_is_complete(call)) 178 return call->error; 179 180 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait); 181 *timeo = schedule_timeout(*timeo); 182 } 183 } 184 185 /* 186 * wait for space to appear in the transmit/ACK window 187 * - caller holds the socket locked 188 */ 189 static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, 190 struct rxrpc_call *call, 191 long *timeo, 192 bool waitall) 193 { 194 DECLARE_WAITQUEUE(myself, current); 195 int ret; 196 197 _enter(",{%u,%u,%u,%u}", 198 call->tx_bottom, call->acks_hard_ack, call->tx_top, call->tx_winsize); 199 200 add_wait_queue(&call->waitq, &myself); 201 202 switch (call->interruptibility) { 203 case RXRPC_INTERRUPTIBLE: 204 if (waitall) 205 ret = rxrpc_wait_for_tx_window_waitall(rx, call); 206 else 207 ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); 208 break; 209 case RXRPC_PREINTERRUPTIBLE: 210 case RXRPC_UNINTERRUPTIBLE: 211 default: 212 ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo); 213 break; 214 } 215 216 remove_wait_queue(&call->waitq, &myself); 217 set_current_state(TASK_RUNNING); 218 _leave(" = %d", ret); 219 return ret; 220 } 221 222 /* 223 * Notify the owner of the call that the transmit phase is ended and the last 224 * packet has been queued. 225 */ 226 static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call, 227 rxrpc_notify_end_tx_t notify_end_tx) 228 { 229 if (notify_end_tx) 230 notify_end_tx(&rx->sk, call, call->user_call_ID); 231 } 232 233 /* 234 * Queue a DATA packet for transmission, set the resend timeout and send 235 * the packet immediately. Returns the error from rxrpc_send_data_packet() 236 * in case the caller wants to do something with it. 237 */ 238 static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, 239 struct rxrpc_txbuf *txb, 240 rxrpc_notify_end_tx_t notify_end_tx) 241 { 242 rxrpc_seq_t seq = txb->seq; 243 bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke; 244 245 rxrpc_inc_stat(call->rxnet, stat_tx_data); 246 247 ASSERTCMP(txb->seq, ==, call->tx_prepared + 1); 248 249 /* We have to set the timestamp before queueing as the retransmit 250 * algorithm can see the packet as soon as we queue it. 251 */ 252 txb->last_sent = ktime_get_real(); 253 254 if (last) 255 trace_rxrpc_txqueue(call, rxrpc_txqueue_queue_last); 256 else 257 trace_rxrpc_txqueue(call, rxrpc_txqueue_queue); 258 259 /* Add the packet to the call's output buffer */ 260 spin_lock(&call->tx_lock); 261 poke = list_empty(&call->tx_sendmsg); 262 list_add_tail(&txb->call_link, &call->tx_sendmsg); 263 call->tx_prepared = seq; 264 if (last) 265 rxrpc_notify_end_tx(rx, call, notify_end_tx); 266 spin_unlock(&call->tx_lock); 267 268 if (poke) 269 rxrpc_poke_call(call, rxrpc_call_poke_start); 270 } 271 272 /* 273 * send data through a socket 274 * - must be called in process context 275 * - The caller holds the call user access mutex, but not the socket lock. 276 */ 277 static int rxrpc_send_data(struct rxrpc_sock *rx, 278 struct rxrpc_call *call, 279 struct msghdr *msg, size_t len, 280 rxrpc_notify_end_tx_t notify_end_tx, 281 bool *_dropped_lock) 282 { 283 struct rxrpc_txbuf *txb; 284 struct sock *sk = &rx->sk; 285 enum rxrpc_call_state state; 286 long timeo; 287 bool more = msg->msg_flags & MSG_MORE; 288 int ret, copied = 0; 289 290 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 291 292 ret = rxrpc_wait_to_be_connected(call, &timeo); 293 if (ret < 0) 294 return ret; 295 296 if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) { 297 ret = rxrpc_init_client_conn_security(call->conn); 298 if (ret < 0) 299 return ret; 300 } 301 302 /* this should be in poll */ 303 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 304 305 reload: 306 ret = -EPIPE; 307 if (sk->sk_shutdown & SEND_SHUTDOWN) 308 goto maybe_error; 309 state = rxrpc_call_state(call); 310 ret = -ESHUTDOWN; 311 if (state >= RXRPC_CALL_COMPLETE) 312 goto maybe_error; 313 ret = -EPROTO; 314 if (state != RXRPC_CALL_CLIENT_SEND_REQUEST && 315 state != RXRPC_CALL_SERVER_ACK_REQUEST && 316 state != RXRPC_CALL_SERVER_SEND_REPLY) { 317 /* Request phase complete for this client call */ 318 trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send, 319 call->cid, call->call_id, call->rx_consumed, 320 0, -EPROTO); 321 goto maybe_error; 322 } 323 324 ret = -EMSGSIZE; 325 if (call->tx_total_len != -1) { 326 if (len - copied > call->tx_total_len) 327 goto maybe_error; 328 if (!more && len - copied != call->tx_total_len) 329 goto maybe_error; 330 } 331 332 txb = call->tx_pending; 333 call->tx_pending = NULL; 334 if (txb) 335 rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more); 336 337 do { 338 if (!txb) { 339 size_t remain, bufsize, chunk, offset; 340 341 _debug("alloc"); 342 343 if (!rxrpc_check_tx_space(call, NULL)) 344 goto wait_for_space; 345 346 /* Work out the maximum size of a packet. Assume that 347 * the security header is going to be in the padded 348 * region (enc blocksize), but the trailer is not. 349 */ 350 remain = more ? INT_MAX : msg_data_left(msg); 351 ret = call->conn->security->how_much_data(call, remain, 352 &bufsize, &chunk, &offset); 353 if (ret < 0) 354 goto maybe_error; 355 356 _debug("SIZE: %zu/%zu @%zu", chunk, bufsize, offset); 357 358 /* create a buffer that we can retain until it's ACK'd */ 359 ret = -ENOMEM; 360 txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_DATA, 361 GFP_KERNEL); 362 if (!txb) 363 goto maybe_error; 364 365 txb->offset = offset; 366 txb->space -= offset; 367 txb->space = min_t(size_t, chunk, txb->space); 368 } 369 370 _debug("append"); 371 372 /* append next segment of data to the current buffer */ 373 if (msg_data_left(msg) > 0) { 374 size_t copy = min_t(size_t, txb->space, msg_data_left(msg)); 375 376 _debug("add %zu", copy); 377 if (!copy_from_iter_full(txb->data + txb->offset, copy, 378 &msg->msg_iter)) 379 goto efault; 380 _debug("added"); 381 txb->space -= copy; 382 txb->len += copy; 383 txb->offset += copy; 384 copied += copy; 385 if (call->tx_total_len != -1) 386 call->tx_total_len -= copy; 387 } 388 389 /* check for the far side aborting the call or a network error 390 * occurring */ 391 if (rxrpc_call_is_complete(call)) 392 goto call_terminated; 393 394 /* add the packet to the send queue if it's now full */ 395 if (!txb->space || 396 (msg_data_left(msg) == 0 && !more)) { 397 if (msg_data_left(msg) == 0 && !more) { 398 txb->wire.flags |= RXRPC_LAST_PACKET; 399 __set_bit(RXRPC_TXBUF_LAST, &txb->flags); 400 } 401 else if (call->tx_top - call->acks_hard_ack < 402 call->tx_winsize) 403 txb->wire.flags |= RXRPC_MORE_PACKETS; 404 405 ret = call->security->secure_packet(call, txb); 406 if (ret < 0) 407 goto out; 408 409 rxrpc_queue_packet(rx, call, txb, notify_end_tx); 410 txb = NULL; 411 } 412 } while (msg_data_left(msg) > 0); 413 414 success: 415 ret = copied; 416 if (rxrpc_call_is_complete(call) && 417 call->error < 0) 418 ret = call->error; 419 out: 420 call->tx_pending = txb; 421 _leave(" = %d", ret); 422 return ret; 423 424 call_terminated: 425 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_send_aborted); 426 _leave(" = %d", call->error); 427 return call->error; 428 429 maybe_error: 430 if (copied) 431 goto success; 432 goto out; 433 434 efault: 435 ret = -EFAULT; 436 goto out; 437 438 wait_for_space: 439 ret = -EAGAIN; 440 if (msg->msg_flags & MSG_DONTWAIT) 441 goto maybe_error; 442 mutex_unlock(&call->user_mutex); 443 *_dropped_lock = true; 444 ret = rxrpc_wait_for_tx_window(rx, call, &timeo, 445 msg->msg_flags & MSG_WAITALL); 446 if (ret < 0) 447 goto maybe_error; 448 if (call->interruptibility == RXRPC_INTERRUPTIBLE) { 449 if (mutex_lock_interruptible(&call->user_mutex) < 0) { 450 ret = sock_intr_errno(timeo); 451 goto maybe_error; 452 } 453 } else { 454 mutex_lock(&call->user_mutex); 455 } 456 *_dropped_lock = false; 457 goto reload; 458 } 459 460 /* 461 * extract control messages from the sendmsg() control buffer 462 */ 463 static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) 464 { 465 struct cmsghdr *cmsg; 466 bool got_user_ID = false; 467 int len; 468 469 if (msg->msg_controllen == 0) 470 return -EINVAL; 471 472 for_each_cmsghdr(cmsg, msg) { 473 if (!CMSG_OK(msg, cmsg)) 474 return -EINVAL; 475 476 len = cmsg->cmsg_len - sizeof(struct cmsghdr); 477 _debug("CMSG %d, %d, %d", 478 cmsg->cmsg_level, cmsg->cmsg_type, len); 479 480 if (cmsg->cmsg_level != SOL_RXRPC) 481 continue; 482 483 switch (cmsg->cmsg_type) { 484 case RXRPC_USER_CALL_ID: 485 if (msg->msg_flags & MSG_CMSG_COMPAT) { 486 if (len != sizeof(u32)) 487 return -EINVAL; 488 p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg); 489 } else { 490 if (len != sizeof(unsigned long)) 491 return -EINVAL; 492 p->call.user_call_ID = *(unsigned long *) 493 CMSG_DATA(cmsg); 494 } 495 got_user_ID = true; 496 break; 497 498 case RXRPC_ABORT: 499 if (p->command != RXRPC_CMD_SEND_DATA) 500 return -EINVAL; 501 p->command = RXRPC_CMD_SEND_ABORT; 502 if (len != sizeof(p->abort_code)) 503 return -EINVAL; 504 p->abort_code = *(unsigned int *)CMSG_DATA(cmsg); 505 if (p->abort_code == 0) 506 return -EINVAL; 507 break; 508 509 case RXRPC_CHARGE_ACCEPT: 510 if (p->command != RXRPC_CMD_SEND_DATA) 511 return -EINVAL; 512 p->command = RXRPC_CMD_CHARGE_ACCEPT; 513 if (len != 0) 514 return -EINVAL; 515 break; 516 517 case RXRPC_EXCLUSIVE_CALL: 518 p->exclusive = true; 519 if (len != 0) 520 return -EINVAL; 521 break; 522 523 case RXRPC_UPGRADE_SERVICE: 524 p->upgrade = true; 525 if (len != 0) 526 return -EINVAL; 527 break; 528 529 case RXRPC_TX_LENGTH: 530 if (p->call.tx_total_len != -1 || len != sizeof(__s64)) 531 return -EINVAL; 532 p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg); 533 if (p->call.tx_total_len < 0) 534 return -EINVAL; 535 break; 536 537 case RXRPC_SET_CALL_TIMEOUT: 538 if (len & 3 || len < 4 || len > 12) 539 return -EINVAL; 540 memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len); 541 p->call.nr_timeouts = len / 4; 542 if (p->call.timeouts.hard > INT_MAX / HZ) 543 return -ERANGE; 544 if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000) 545 return -ERANGE; 546 if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000) 547 return -ERANGE; 548 break; 549 550 default: 551 return -EINVAL; 552 } 553 } 554 555 if (!got_user_ID) 556 return -EINVAL; 557 if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) 558 return -EINVAL; 559 _leave(" = 0"); 560 return 0; 561 } 562 563 /* 564 * Create a new client call for sendmsg(). 565 * - Called with the socket lock held, which it must release. 566 * - If it returns a call, the call's lock will need releasing by the caller. 567 */ 568 static struct rxrpc_call * 569 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 570 struct rxrpc_send_params *p) 571 __releases(&rx->sk.sk_lock.slock) 572 __acquires(&call->user_mutex) 573 { 574 struct rxrpc_conn_parameters cp; 575 struct rxrpc_peer *peer; 576 struct rxrpc_call *call; 577 struct key *key; 578 579 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); 580 581 _enter(""); 582 583 if (!msg->msg_name) { 584 release_sock(&rx->sk); 585 return ERR_PTR(-EDESTADDRREQ); 586 } 587 588 peer = rxrpc_lookup_peer(rx->local, srx, GFP_KERNEL); 589 if (!peer) { 590 release_sock(&rx->sk); 591 return ERR_PTR(-ENOMEM); 592 } 593 594 key = rx->key; 595 if (key && !rx->key->payload.data[0]) 596 key = NULL; 597 598 memset(&cp, 0, sizeof(cp)); 599 cp.local = rx->local; 600 cp.peer = peer; 601 cp.key = rx->key; 602 cp.security_level = rx->min_sec_level; 603 cp.exclusive = rx->exclusive | p->exclusive; 604 cp.upgrade = p->upgrade; 605 cp.service_id = srx->srx_service; 606 call = rxrpc_new_client_call(rx, &cp, &p->call, GFP_KERNEL, 607 atomic_inc_return(&rxrpc_debug_id)); 608 /* The socket is now unlocked */ 609 610 rxrpc_put_peer(peer, rxrpc_peer_put_application); 611 _leave(" = %p\n", call); 612 return call; 613 } 614 615 /* 616 * send a message forming part of a client call through an RxRPC socket 617 * - caller holds the socket locked 618 * - the socket may be either a client socket or a server socket 619 */ 620 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 621 __releases(&rx->sk.sk_lock.slock) 622 { 623 struct rxrpc_call *call; 624 unsigned long now, j; 625 bool dropped_lock = false; 626 int ret; 627 628 struct rxrpc_send_params p = { 629 .call.tx_total_len = -1, 630 .call.user_call_ID = 0, 631 .call.nr_timeouts = 0, 632 .call.interruptibility = RXRPC_INTERRUPTIBLE, 633 .abort_code = 0, 634 .command = RXRPC_CMD_SEND_DATA, 635 .exclusive = false, 636 .upgrade = false, 637 }; 638 639 _enter(""); 640 641 ret = rxrpc_sendmsg_cmsg(msg, &p); 642 if (ret < 0) 643 goto error_release_sock; 644 645 if (p.command == RXRPC_CMD_CHARGE_ACCEPT) { 646 ret = -EINVAL; 647 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 648 goto error_release_sock; 649 ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID); 650 goto error_release_sock; 651 } 652 653 call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); 654 if (!call) { 655 ret = -EBADSLT; 656 if (p.command != RXRPC_CMD_SEND_DATA) 657 goto error_release_sock; 658 call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p); 659 /* The socket is now unlocked... */ 660 if (IS_ERR(call)) 661 return PTR_ERR(call); 662 /* ... and we have the call lock. */ 663 p.call.nr_timeouts = 0; 664 ret = 0; 665 if (rxrpc_call_is_complete(call)) 666 goto out_put_unlock; 667 } else { 668 switch (rxrpc_call_state(call)) { 669 case RXRPC_CALL_CLIENT_AWAIT_CONN: 670 case RXRPC_CALL_SERVER_SECURING: 671 if (p.command == RXRPC_CMD_SEND_ABORT) 672 break; 673 fallthrough; 674 case RXRPC_CALL_UNINITIALISED: 675 case RXRPC_CALL_SERVER_PREALLOC: 676 rxrpc_put_call(call, rxrpc_call_put_sendmsg); 677 ret = -EBUSY; 678 goto error_release_sock; 679 default: 680 break; 681 } 682 683 ret = mutex_lock_interruptible(&call->user_mutex); 684 release_sock(&rx->sk); 685 if (ret < 0) { 686 ret = -ERESTARTSYS; 687 goto error_put; 688 } 689 690 if (p.call.tx_total_len != -1) { 691 ret = -EINVAL; 692 if (call->tx_total_len != -1 || 693 call->tx_pending || 694 call->tx_top != 0) 695 goto out_put_unlock; 696 call->tx_total_len = p.call.tx_total_len; 697 } 698 } 699 700 switch (p.call.nr_timeouts) { 701 case 3: 702 j = msecs_to_jiffies(p.call.timeouts.normal); 703 if (p.call.timeouts.normal > 0 && j == 0) 704 j = 1; 705 WRITE_ONCE(call->next_rx_timo, j); 706 fallthrough; 707 case 2: 708 j = msecs_to_jiffies(p.call.timeouts.idle); 709 if (p.call.timeouts.idle > 0 && j == 0) 710 j = 1; 711 WRITE_ONCE(call->next_req_timo, j); 712 fallthrough; 713 case 1: 714 if (p.call.timeouts.hard > 0) { 715 j = p.call.timeouts.hard * HZ; 716 now = jiffies; 717 j += now; 718 WRITE_ONCE(call->expect_term_by, j); 719 rxrpc_reduce_call_timer(call, j, now, 720 rxrpc_timer_set_for_hard); 721 } 722 break; 723 } 724 725 if (rxrpc_call_is_complete(call)) { 726 /* it's too late for this call */ 727 ret = -ESHUTDOWN; 728 } else if (p.command == RXRPC_CMD_SEND_ABORT) { 729 rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED, 730 rxrpc_abort_call_sendmsg); 731 ret = 0; 732 } else if (p.command != RXRPC_CMD_SEND_DATA) { 733 ret = -EINVAL; 734 } else { 735 ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock); 736 } 737 738 out_put_unlock: 739 if (!dropped_lock) 740 mutex_unlock(&call->user_mutex); 741 error_put: 742 rxrpc_put_call(call, rxrpc_call_put_sendmsg); 743 _leave(" = %d", ret); 744 return ret; 745 746 error_release_sock: 747 release_sock(&rx->sk); 748 return ret; 749 } 750 751 /** 752 * rxrpc_kernel_send_data - Allow a kernel service to send data on a call 753 * @sock: The socket the call is on 754 * @call: The call to send data through 755 * @msg: The data to send 756 * @len: The amount of data to send 757 * @notify_end_tx: Notification that the last packet is queued. 758 * 759 * Allow a kernel service to send data on a call. The call must be in an state 760 * appropriate to sending data. No control data should be supplied in @msg, 761 * nor should an address be supplied. MSG_MORE should be flagged if there's 762 * more data to come, otherwise this data will end the transmission phase. 763 */ 764 int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, 765 struct msghdr *msg, size_t len, 766 rxrpc_notify_end_tx_t notify_end_tx) 767 { 768 bool dropped_lock = false; 769 int ret; 770 771 _enter("{%d},", call->debug_id); 772 773 ASSERTCMP(msg->msg_name, ==, NULL); 774 ASSERTCMP(msg->msg_control, ==, NULL); 775 776 mutex_lock(&call->user_mutex); 777 778 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, 779 notify_end_tx, &dropped_lock); 780 if (ret == -ESHUTDOWN) 781 ret = call->error; 782 783 if (!dropped_lock) 784 mutex_unlock(&call->user_mutex); 785 _leave(" = %d", ret); 786 return ret; 787 } 788 EXPORT_SYMBOL(rxrpc_kernel_send_data); 789 790 /** 791 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call 792 * @sock: The socket the call is on 793 * @call: The call to be aborted 794 * @abort_code: The abort code to stick into the ABORT packet 795 * @error: Local error value 796 * @why: Indication as to why. 797 * 798 * Allow a kernel service to abort a call, if it's still in an abortable state 799 * and return true if the call was aborted, false if it was already complete. 800 */ 801 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, 802 u32 abort_code, int error, enum rxrpc_abort_reason why) 803 { 804 bool aborted; 805 806 _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why); 807 808 mutex_lock(&call->user_mutex); 809 aborted = rxrpc_propose_abort(call, abort_code, error, why); 810 mutex_unlock(&call->user_mutex); 811 return aborted; 812 } 813 EXPORT_SYMBOL(rxrpc_kernel_abort_call); 814 815 /** 816 * rxrpc_kernel_set_tx_length - Set the total Tx length on a call 817 * @sock: The socket the call is on 818 * @call: The call to be informed 819 * @tx_total_len: The amount of data to be transmitted for this call 820 * 821 * Allow a kernel service to set the total transmit length on a call. This 822 * allows buffer-to-packet encrypt-and-copy to be performed. 823 * 824 * This function is primarily for use for setting the reply length since the 825 * request length can be set when beginning the call. 826 */ 827 void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call, 828 s64 tx_total_len) 829 { 830 WARN_ON(call->tx_total_len != -1); 831 call->tx_total_len = tx_total_len; 832 } 833 EXPORT_SYMBOL(rxrpc_kernel_set_tx_length); 834