1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* AF_RXRPC sendmsg() implementation. 3 * 4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/net.h> 11 #include <linux/gfp.h> 12 #include <linux/skbuff.h> 13 #include <linux/export.h> 14 #include <linux/sched/signal.h> 15 16 #include <net/sock.h> 17 #include <net/af_rxrpc.h> 18 #include "ar-internal.h" 19 20 /* 21 * Propose an abort to be made in the I/O thread. 22 */ 23 bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error, 24 enum rxrpc_abort_reason why) 25 { 26 _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why); 27 28 if (!call->send_abort && !rxrpc_call_is_complete(call)) { 29 call->send_abort_why = why; 30 call->send_abort_err = error; 31 call->send_abort_seq = 0; 32 trace_rxrpc_abort_call(call, abort_code); 33 /* Request abort locklessly vs rxrpc_input_call_event(). */ 34 smp_store_release(&call->send_abort, abort_code); 35 rxrpc_poke_call(call, rxrpc_call_poke_abort); 36 return true; 37 } 38 39 return false; 40 } 41 42 /* 43 * Wait for a call to become connected. Interruption here doesn't cause the 44 * call to be aborted. 45 */ 46 static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo) 47 { 48 DECLARE_WAITQUEUE(myself, current); 49 int ret = 0; 50 51 _enter("%d", call->debug_id); 52 53 if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) 54 goto no_wait; 55 56 add_wait_queue_exclusive(&call->waitq, &myself); 57 58 for (;;) { 59 switch (call->interruptibility) { 60 case RXRPC_INTERRUPTIBLE: 61 case RXRPC_PREINTERRUPTIBLE: 62 set_current_state(TASK_INTERRUPTIBLE); 63 break; 64 case RXRPC_UNINTERRUPTIBLE: 65 default: 66 set_current_state(TASK_UNINTERRUPTIBLE); 67 break; 68 } 69 70 if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) 71 break; 72 if ((call->interruptibility == RXRPC_INTERRUPTIBLE || 73 call->interruptibility == RXRPC_PREINTERRUPTIBLE) && 74 signal_pending(current)) { 75 ret = sock_intr_errno(*timeo); 76 break; 77 } 78 *timeo = schedule_timeout(*timeo); 79 } 80 81 remove_wait_queue(&call->waitq, &myself); 82 __set_current_state(TASK_RUNNING); 83 84 no_wait: 85 if (ret == 0 && rxrpc_call_is_complete(call)) 86 ret = call->error; 87 88 _leave(" = %d", ret); 89 return ret; 90 } 91 92 /* 93 * Return true if there's sufficient Tx queue space. 94 */ 95 static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) 96 { 97 rxrpc_seq_t tx_bottom = READ_ONCE(call->tx_bottom); 98 99 if (_tx_win) 100 *_tx_win = tx_bottom; 101 return call->send_top - tx_bottom < 256; 102 } 103 104 /* 105 * Wait for space to appear in the Tx queue or a signal to occur. 106 */ 107 static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, 108 struct rxrpc_call *call, 109 long *timeo) 110 { 111 for (;;) { 112 set_current_state(TASK_INTERRUPTIBLE); 113 if (rxrpc_check_tx_space(call, NULL)) 114 return 0; 115 116 if (rxrpc_call_is_complete(call)) 117 return call->error; 118 119 if (signal_pending(current)) 120 return sock_intr_errno(*timeo); 121 122 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait); 123 *timeo = schedule_timeout(*timeo); 124 } 125 } 126 127 /* 128 * Wait for space to appear in the Tx queue uninterruptibly, but with 129 * a timeout of 2*RTT if no progress was made and a signal occurred. 130 */ 131 static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, 132 struct rxrpc_call *call) 133 { 134 rxrpc_seq_t tx_start, tx_win; 135 signed long rtt, timeout; 136 137 rtt = READ_ONCE(call->srtt_us) >> 3; 138 rtt = usecs_to_jiffies(rtt) * 2; 139 if (rtt < 2) 140 rtt = 2; 141 142 timeout = rtt; 143 tx_start = READ_ONCE(call->tx_bottom); 144 145 for (;;) { 146 set_current_state(TASK_UNINTERRUPTIBLE); 147 148 if (rxrpc_check_tx_space(call, &tx_win)) 149 return 0; 150 151 if (rxrpc_call_is_complete(call)) 152 return call->error; 153 154 if (timeout == 0 && 155 tx_win == tx_start && signal_pending(current)) 156 return -EINTR; 157 158 if (tx_win != tx_start) { 159 timeout = rtt; 160 tx_start = tx_win; 161 } 162 163 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait); 164 timeout = schedule_timeout(timeout); 165 } 166 } 167 168 /* 169 * Wait for space to appear in the Tx queue uninterruptibly. 170 */ 171 static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, 172 struct rxrpc_call *call, 173 long *timeo) 174 { 175 for (;;) { 176 set_current_state(TASK_UNINTERRUPTIBLE); 177 if (rxrpc_check_tx_space(call, NULL)) 178 return 0; 179 180 if (rxrpc_call_is_complete(call)) 181 return call->error; 182 183 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait); 184 *timeo = schedule_timeout(*timeo); 185 } 186 } 187 188 /* 189 * wait for space to appear in the transmit/ACK window 190 * - caller holds the socket locked 191 */ 192 static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, 193 struct rxrpc_call *call, 194 long *timeo, 195 bool waitall) 196 { 197 DECLARE_WAITQUEUE(myself, current); 198 int ret; 199 200 _enter(",{%u,%u,%u}", 201 call->tx_bottom, call->tx_top, call->tx_winsize); 202 203 add_wait_queue(&call->waitq, &myself); 204 205 switch (call->interruptibility) { 206 case RXRPC_INTERRUPTIBLE: 207 if (waitall) 208 ret = rxrpc_wait_for_tx_window_waitall(rx, call); 209 else 210 ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); 211 break; 212 case RXRPC_PREINTERRUPTIBLE: 213 case RXRPC_UNINTERRUPTIBLE: 214 default: 215 ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo); 216 break; 217 } 218 219 remove_wait_queue(&call->waitq, &myself); 220 set_current_state(TASK_RUNNING); 221 _leave(" = %d", ret); 222 return ret; 223 } 224 225 /* 226 * Notify the owner of the call that the transmit phase is ended and the last 227 * packet has been queued. 228 */ 229 static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call, 230 rxrpc_notify_end_tx_t notify_end_tx) 231 { 232 if (notify_end_tx) 233 notify_end_tx(&rx->sk, call, call->user_call_ID); 234 } 235 236 /* 237 * Queue a DATA packet for transmission, set the resend timeout and send 238 * the packet immediately. Returns the error from rxrpc_send_data_packet() 239 * in case the caller wants to do something with it. 240 */ 241 static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, 242 struct rxrpc_txbuf *txb, 243 rxrpc_notify_end_tx_t notify_end_tx) 244 { 245 struct rxrpc_txqueue *sq = call->send_queue; 246 rxrpc_seq_t seq = txb->seq; 247 bool poke, last = txb->flags & RXRPC_LAST_PACKET; 248 int ix = seq & RXRPC_TXQ_MASK; 249 rxrpc_inc_stat(call->rxnet, stat_tx_data); 250 251 ASSERTCMP(txb->seq, ==, call->send_top + 1); 252 253 if (last) 254 trace_rxrpc_txqueue(call, rxrpc_txqueue_queue_last); 255 else 256 trace_rxrpc_txqueue(call, rxrpc_txqueue_queue); 257 258 if (WARN_ON_ONCE(sq->bufs[ix])) 259 trace_rxrpc_tq(call, sq, seq, rxrpc_tq_queue_dup); 260 else 261 trace_rxrpc_tq(call, sq, seq, rxrpc_tq_queue); 262 263 /* Add the packet to the call's output buffer */ 264 poke = (READ_ONCE(call->tx_bottom) == call->send_top); 265 sq->bufs[ix] = txb; 266 /* Order send_top after the queue->next pointer and txb content. */ 267 smp_store_release(&call->send_top, seq); 268 if (last) { 269 set_bit(RXRPC_CALL_TX_NO_MORE, &call->flags); 270 rxrpc_notify_end_tx(rx, call, notify_end_tx); 271 call->send_queue = NULL; 272 } 273 274 if (poke) 275 rxrpc_poke_call(call, rxrpc_call_poke_start); 276 } 277 278 /* 279 * Allocate a new txqueue unit and add it to the transmission queue. 280 */ 281 static int rxrpc_alloc_txqueue(struct sock *sk, struct rxrpc_call *call) 282 { 283 struct rxrpc_txqueue *tq; 284 285 tq = kzalloc(sizeof(*tq), sk->sk_allocation); 286 if (!tq) 287 return -ENOMEM; 288 289 tq->xmit_ts_base = KTIME_MIN; 290 for (int i = 0; i < RXRPC_NR_TXQUEUE; i++) 291 tq->segment_xmit_ts[i] = UINT_MAX; 292 293 if (call->send_queue) { 294 tq->qbase = call->send_top + 1; 295 call->send_queue->next = tq; 296 call->send_queue = tq; 297 } else if (WARN_ON(call->tx_queue)) { 298 kfree(tq); 299 return -ENOMEM; 300 } else { 301 /* We start at seq 1, so pretend seq 0 is hard-acked. */ 302 tq->nr_reported_acks = 1; 303 tq->segment_acked = 1UL; 304 tq->qbase = 0; 305 call->tx_qbase = 0; 306 call->send_queue = tq; 307 call->tx_qtail = tq; 308 call->tx_queue = tq; 309 } 310 311 trace_rxrpc_tq(call, tq, call->send_top, rxrpc_tq_alloc); 312 return 0; 313 } 314 315 /* 316 * send data through a socket 317 * - must be called in process context 318 * - The caller holds the call user access mutex, but not the socket lock. 319 */ 320 static int rxrpc_send_data(struct rxrpc_sock *rx, 321 struct rxrpc_call *call, 322 struct msghdr *msg, size_t len, 323 rxrpc_notify_end_tx_t notify_end_tx, 324 bool *_dropped_lock) 325 { 326 struct rxrpc_txbuf *txb; 327 struct sock *sk = &rx->sk; 328 enum rxrpc_call_state state; 329 long timeo; 330 bool more = msg->msg_flags & MSG_MORE; 331 int ret, copied = 0; 332 333 if (test_bit(RXRPC_CALL_TX_NO_MORE, &call->flags)) { 334 trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send, 335 call->cid, call->call_id, call->rx_consumed, 336 0, -EPROTO); 337 return -EPROTO; 338 } 339 340 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 341 342 ret = rxrpc_wait_to_be_connected(call, &timeo); 343 if (ret < 0) 344 return ret; 345 346 if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) { 347 ret = rxrpc_init_client_conn_security(call->conn); 348 if (ret < 0) 349 return ret; 350 } 351 352 /* this should be in poll */ 353 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 354 355 reload: 356 txb = call->tx_pending; 357 call->tx_pending = NULL; 358 if (txb) 359 rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more); 360 361 ret = -EPIPE; 362 if (sk->sk_shutdown & SEND_SHUTDOWN) 363 goto maybe_error; 364 state = rxrpc_call_state(call); 365 ret = -ESHUTDOWN; 366 if (state >= RXRPC_CALL_COMPLETE) 367 goto maybe_error; 368 ret = -EPROTO; 369 if (state != RXRPC_CALL_CLIENT_SEND_REQUEST && 370 state != RXRPC_CALL_SERVER_ACK_REQUEST && 371 state != RXRPC_CALL_SERVER_SEND_REPLY) { 372 /* Request phase complete for this client call */ 373 trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send, 374 call->cid, call->call_id, call->rx_consumed, 375 0, -EPROTO); 376 goto maybe_error; 377 } 378 379 ret = -EMSGSIZE; 380 if (call->tx_total_len != -1) { 381 if (len - copied > call->tx_total_len) 382 goto maybe_error; 383 if (!more && len - copied != call->tx_total_len) 384 goto maybe_error; 385 } 386 387 do { 388 if (!txb) { 389 size_t remain; 390 391 _debug("alloc"); 392 393 if (!rxrpc_check_tx_space(call, NULL)) 394 goto wait_for_space; 395 396 /* See if we need to begin/extend the Tx queue. */ 397 if (!call->send_queue || !((call->send_top + 1) & RXRPC_TXQ_MASK)) { 398 ret = rxrpc_alloc_txqueue(sk, call); 399 if (ret < 0) 400 goto maybe_error; 401 } 402 403 /* Work out the maximum size of a packet. Assume that 404 * the security header is going to be in the padded 405 * region (enc blocksize), but the trailer is not. 406 */ 407 remain = more ? INT_MAX : msg_data_left(msg); 408 txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation); 409 if (!txb) { 410 ret = -ENOMEM; 411 goto maybe_error; 412 } 413 } 414 415 _debug("append"); 416 417 /* append next segment of data to the current buffer */ 418 if (msg_data_left(msg) > 0) { 419 size_t copy = umin(txb->space, msg_data_left(msg)); 420 421 _debug("add %zu", copy); 422 if (!copy_from_iter_full(txb->kvec[0].iov_base + txb->offset, 423 copy, &msg->msg_iter)) 424 goto efault; 425 _debug("added"); 426 txb->space -= copy; 427 txb->len += copy; 428 txb->offset += copy; 429 copied += copy; 430 if (call->tx_total_len != -1) 431 call->tx_total_len -= copy; 432 } 433 434 /* check for the far side aborting the call or a network error 435 * occurring */ 436 if (rxrpc_call_is_complete(call)) 437 goto call_terminated; 438 439 /* add the packet to the send queue if it's now full */ 440 if (!txb->space || 441 (msg_data_left(msg) == 0 && !more)) { 442 if (msg_data_left(msg) == 0 && !more) 443 txb->flags |= RXRPC_LAST_PACKET; 444 445 ret = call->security->secure_packet(call, txb); 446 if (ret < 0) 447 goto out; 448 449 txb->kvec[0].iov_len += txb->len; 450 rxrpc_queue_packet(rx, call, txb, notify_end_tx); 451 txb = NULL; 452 } 453 } while (msg_data_left(msg) > 0); 454 455 success: 456 ret = copied; 457 if (rxrpc_call_is_complete(call) && 458 call->error < 0) 459 ret = call->error; 460 out: 461 call->tx_pending = txb; 462 _leave(" = %d", ret); 463 return ret; 464 465 call_terminated: 466 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_send_aborted); 467 _leave(" = %d", call->error); 468 return call->error; 469 470 maybe_error: 471 if (copied) 472 goto success; 473 goto out; 474 475 efault: 476 ret = -EFAULT; 477 goto out; 478 479 wait_for_space: 480 ret = -EAGAIN; 481 if (msg->msg_flags & MSG_DONTWAIT) 482 goto maybe_error; 483 mutex_unlock(&call->user_mutex); 484 *_dropped_lock = true; 485 ret = rxrpc_wait_for_tx_window(rx, call, &timeo, 486 msg->msg_flags & MSG_WAITALL); 487 if (ret < 0) 488 goto maybe_error; 489 if (call->interruptibility == RXRPC_INTERRUPTIBLE) { 490 if (mutex_lock_interruptible(&call->user_mutex) < 0) { 491 ret = sock_intr_errno(timeo); 492 goto maybe_error; 493 } 494 } else { 495 mutex_lock(&call->user_mutex); 496 } 497 *_dropped_lock = false; 498 goto reload; 499 } 500 501 /* 502 * extract control messages from the sendmsg() control buffer 503 */ 504 static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) 505 { 506 struct cmsghdr *cmsg; 507 bool got_user_ID = false; 508 int len; 509 510 if (msg->msg_controllen == 0) 511 return -EINVAL; 512 513 for_each_cmsghdr(cmsg, msg) { 514 if (!CMSG_OK(msg, cmsg)) 515 return -EINVAL; 516 517 len = cmsg->cmsg_len - sizeof(struct cmsghdr); 518 _debug("CMSG %d, %d, %d", 519 cmsg->cmsg_level, cmsg->cmsg_type, len); 520 521 if (cmsg->cmsg_level != SOL_RXRPC) 522 continue; 523 524 switch (cmsg->cmsg_type) { 525 case RXRPC_USER_CALL_ID: 526 if (msg->msg_flags & MSG_CMSG_COMPAT) { 527 if (len != sizeof(u32)) 528 return -EINVAL; 529 p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg); 530 } else { 531 if (len != sizeof(unsigned long)) 532 return -EINVAL; 533 p->call.user_call_ID = *(unsigned long *) 534 CMSG_DATA(cmsg); 535 } 536 got_user_ID = true; 537 break; 538 539 case RXRPC_ABORT: 540 if (p->command != RXRPC_CMD_SEND_DATA) 541 return -EINVAL; 542 p->command = RXRPC_CMD_SEND_ABORT; 543 if (len != sizeof(p->abort_code)) 544 return -EINVAL; 545 p->abort_code = *(unsigned int *)CMSG_DATA(cmsg); 546 if (p->abort_code == 0) 547 return -EINVAL; 548 break; 549 550 case RXRPC_CHARGE_ACCEPT: 551 if (p->command != RXRPC_CMD_SEND_DATA) 552 return -EINVAL; 553 p->command = RXRPC_CMD_CHARGE_ACCEPT; 554 if (len != 0) 555 return -EINVAL; 556 break; 557 558 case RXRPC_EXCLUSIVE_CALL: 559 p->exclusive = true; 560 if (len != 0) 561 return -EINVAL; 562 break; 563 564 case RXRPC_UPGRADE_SERVICE: 565 p->upgrade = true; 566 if (len != 0) 567 return -EINVAL; 568 break; 569 570 case RXRPC_TX_LENGTH: 571 if (p->call.tx_total_len != -1 || len != sizeof(__s64)) 572 return -EINVAL; 573 p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg); 574 if (p->call.tx_total_len < 0) 575 return -EINVAL; 576 break; 577 578 case RXRPC_SET_CALL_TIMEOUT: 579 if (len & 3 || len < 4 || len > 12) 580 return -EINVAL; 581 memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len); 582 p->call.nr_timeouts = len / 4; 583 if (p->call.timeouts.hard > INT_MAX / HZ) 584 return -ERANGE; 585 if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000) 586 return -ERANGE; 587 if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000) 588 return -ERANGE; 589 break; 590 591 default: 592 return -EINVAL; 593 } 594 } 595 596 if (!got_user_ID) 597 return -EINVAL; 598 if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) 599 return -EINVAL; 600 _leave(" = 0"); 601 return 0; 602 } 603 604 /* 605 * Create a new client call for sendmsg(). 606 * - Called with the socket lock held, which it must release. 607 * - If it returns a call, the call's lock will need releasing by the caller. 608 */ 609 static struct rxrpc_call * 610 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 611 struct rxrpc_send_params *p) 612 __releases(&rx->sk.sk_lock.slock) 613 __acquires(&call->user_mutex) 614 { 615 struct rxrpc_conn_parameters cp; 616 struct rxrpc_peer *peer; 617 struct rxrpc_call *call; 618 struct key *key; 619 620 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); 621 622 _enter(""); 623 624 if (!msg->msg_name) { 625 release_sock(&rx->sk); 626 return ERR_PTR(-EDESTADDRREQ); 627 } 628 629 peer = rxrpc_lookup_peer(rx->local, srx, GFP_KERNEL); 630 if (!peer) { 631 release_sock(&rx->sk); 632 return ERR_PTR(-ENOMEM); 633 } 634 635 key = rx->key; 636 if (key && !rx->key->payload.data[0]) 637 key = NULL; 638 639 memset(&cp, 0, sizeof(cp)); 640 cp.local = rx->local; 641 cp.peer = peer; 642 cp.key = rx->key; 643 cp.security_level = rx->min_sec_level; 644 cp.exclusive = rx->exclusive | p->exclusive; 645 cp.upgrade = p->upgrade; 646 cp.service_id = srx->srx_service; 647 call = rxrpc_new_client_call(rx, &cp, &p->call, GFP_KERNEL, 648 atomic_inc_return(&rxrpc_debug_id)); 649 /* The socket is now unlocked */ 650 651 rxrpc_put_peer(peer, rxrpc_peer_put_application); 652 _leave(" = %p\n", call); 653 return call; 654 } 655 656 /* 657 * send a message forming part of a client call through an RxRPC socket 658 * - caller holds the socket locked 659 * - the socket may be either a client socket or a server socket 660 */ 661 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 662 __releases(&rx->sk.sk_lock.slock) 663 { 664 struct rxrpc_call *call; 665 bool dropped_lock = false; 666 int ret; 667 668 struct rxrpc_send_params p = { 669 .call.tx_total_len = -1, 670 .call.user_call_ID = 0, 671 .call.nr_timeouts = 0, 672 .call.interruptibility = RXRPC_INTERRUPTIBLE, 673 .abort_code = 0, 674 .command = RXRPC_CMD_SEND_DATA, 675 .exclusive = false, 676 .upgrade = false, 677 }; 678 679 _enter(""); 680 681 ret = rxrpc_sendmsg_cmsg(msg, &p); 682 if (ret < 0) 683 goto error_release_sock; 684 685 if (p.command == RXRPC_CMD_CHARGE_ACCEPT) { 686 ret = -EINVAL; 687 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 688 goto error_release_sock; 689 ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID); 690 goto error_release_sock; 691 } 692 693 call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); 694 if (!call) { 695 ret = -EBADSLT; 696 if (p.command != RXRPC_CMD_SEND_DATA) 697 goto error_release_sock; 698 call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p); 699 /* The socket is now unlocked... */ 700 if (IS_ERR(call)) 701 return PTR_ERR(call); 702 /* ... and we have the call lock. */ 703 p.call.nr_timeouts = 0; 704 ret = 0; 705 if (rxrpc_call_is_complete(call)) 706 goto out_put_unlock; 707 } else { 708 switch (rxrpc_call_state(call)) { 709 case RXRPC_CALL_CLIENT_AWAIT_CONN: 710 case RXRPC_CALL_SERVER_SECURING: 711 if (p.command == RXRPC_CMD_SEND_ABORT) 712 break; 713 fallthrough; 714 case RXRPC_CALL_UNINITIALISED: 715 case RXRPC_CALL_SERVER_PREALLOC: 716 rxrpc_put_call(call, rxrpc_call_put_sendmsg); 717 ret = -EBUSY; 718 goto error_release_sock; 719 default: 720 break; 721 } 722 723 ret = mutex_lock_interruptible(&call->user_mutex); 724 release_sock(&rx->sk); 725 if (ret < 0) { 726 ret = -ERESTARTSYS; 727 goto error_put; 728 } 729 730 if (p.call.tx_total_len != -1) { 731 ret = -EINVAL; 732 if (call->tx_total_len != -1 || 733 call->tx_pending || 734 call->tx_top != 0) 735 goto out_put_unlock; 736 call->tx_total_len = p.call.tx_total_len; 737 } 738 } 739 740 switch (p.call.nr_timeouts) { 741 case 3: 742 WRITE_ONCE(call->next_rx_timo, p.call.timeouts.normal); 743 fallthrough; 744 case 2: 745 WRITE_ONCE(call->next_req_timo, p.call.timeouts.idle); 746 fallthrough; 747 case 1: 748 if (p.call.timeouts.hard > 0) { 749 ktime_t delay = ms_to_ktime(p.call.timeouts.hard * MSEC_PER_SEC); 750 751 WRITE_ONCE(call->expect_term_by, 752 ktime_add(p.call.timeouts.hard, 753 ktime_get_real())); 754 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard); 755 rxrpc_poke_call(call, rxrpc_call_poke_set_timeout); 756 757 } 758 break; 759 } 760 761 if (rxrpc_call_is_complete(call)) { 762 /* it's too late for this call */ 763 ret = -ESHUTDOWN; 764 } else if (p.command == RXRPC_CMD_SEND_ABORT) { 765 rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED, 766 rxrpc_abort_call_sendmsg); 767 ret = 0; 768 } else if (p.command != RXRPC_CMD_SEND_DATA) { 769 ret = -EINVAL; 770 } else { 771 ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock); 772 } 773 774 out_put_unlock: 775 if (!dropped_lock) 776 mutex_unlock(&call->user_mutex); 777 error_put: 778 rxrpc_put_call(call, rxrpc_call_put_sendmsg); 779 _leave(" = %d", ret); 780 return ret; 781 782 error_release_sock: 783 release_sock(&rx->sk); 784 return ret; 785 } 786 787 /** 788 * rxrpc_kernel_send_data - Allow a kernel service to send data on a call 789 * @sock: The socket the call is on 790 * @call: The call to send data through 791 * @msg: The data to send 792 * @len: The amount of data to send 793 * @notify_end_tx: Notification that the last packet is queued. 794 * 795 * Allow a kernel service to send data on a call. The call must be in an state 796 * appropriate to sending data. No control data should be supplied in @msg, 797 * nor should an address be supplied. MSG_MORE should be flagged if there's 798 * more data to come, otherwise this data will end the transmission phase. 799 */ 800 int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, 801 struct msghdr *msg, size_t len, 802 rxrpc_notify_end_tx_t notify_end_tx) 803 { 804 bool dropped_lock = false; 805 int ret; 806 807 _enter("{%d},", call->debug_id); 808 809 ASSERTCMP(msg->msg_name, ==, NULL); 810 ASSERTCMP(msg->msg_control, ==, NULL); 811 812 mutex_lock(&call->user_mutex); 813 814 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, 815 notify_end_tx, &dropped_lock); 816 if (ret == -ESHUTDOWN) 817 ret = call->error; 818 819 if (!dropped_lock) 820 mutex_unlock(&call->user_mutex); 821 _leave(" = %d", ret); 822 return ret; 823 } 824 EXPORT_SYMBOL(rxrpc_kernel_send_data); 825 826 /** 827 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call 828 * @sock: The socket the call is on 829 * @call: The call to be aborted 830 * @abort_code: The abort code to stick into the ABORT packet 831 * @error: Local error value 832 * @why: Indication as to why. 833 * 834 * Allow a kernel service to abort a call, if it's still in an abortable state 835 * and return true if the call was aborted, false if it was already complete. 836 */ 837 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, 838 u32 abort_code, int error, enum rxrpc_abort_reason why) 839 { 840 bool aborted; 841 842 _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why); 843 844 mutex_lock(&call->user_mutex); 845 aborted = rxrpc_propose_abort(call, abort_code, error, why); 846 mutex_unlock(&call->user_mutex); 847 return aborted; 848 } 849 EXPORT_SYMBOL(rxrpc_kernel_abort_call); 850 851 /** 852 * rxrpc_kernel_set_tx_length - Set the total Tx length on a call 853 * @sock: The socket the call is on 854 * @call: The call to be informed 855 * @tx_total_len: The amount of data to be transmitted for this call 856 * 857 * Allow a kernel service to set the total transmit length on a call. This 858 * allows buffer-to-packet encrypt-and-copy to be performed. 859 * 860 * This function is primarily for use for setting the reply length since the 861 * request length can be set when beginning the call. 862 */ 863 void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call, 864 s64 tx_total_len) 865 { 866 WARN_ON(call->tx_total_len != -1); 867 call->tx_total_len = tx_total_len; 868 } 869 EXPORT_SYMBOL(rxrpc_kernel_set_tx_length); 870