1 /* RxRPC packet transmission 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/net.h> 15 #include <linux/gfp.h> 16 #include <linux/skbuff.h> 17 #include <linux/circ_buf.h> 18 #include <linux/export.h> 19 #include <net/sock.h> 20 #include <net/af_rxrpc.h> 21 #include "ar-internal.h" 22 23 /* 24 * Time till packet resend (in jiffies). 25 */ 26 unsigned int rxrpc_resend_timeout = 4 * HZ; 27 28 static int rxrpc_send_data(struct rxrpc_sock *rx, 29 struct rxrpc_call *call, 30 struct msghdr *msg, size_t len); 31 32 /* 33 * extract control messages from the sendmsg() control buffer 34 */ 35 static int rxrpc_sendmsg_cmsg(struct msghdr *msg, 36 unsigned long *user_call_ID, 37 enum rxrpc_command *command, 38 u32 *abort_code) 39 { 40 struct cmsghdr *cmsg; 41 bool got_user_ID = false; 42 int len; 43 44 *command = RXRPC_CMD_SEND_DATA; 45 46 if (msg->msg_controllen == 0) 47 return -EINVAL; 48 49 for_each_cmsghdr(cmsg, msg) { 50 if (!CMSG_OK(msg, cmsg)) 51 return -EINVAL; 52 53 len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 54 _debug("CMSG %d, %d, %d", 55 cmsg->cmsg_level, cmsg->cmsg_type, len); 56 57 if (cmsg->cmsg_level != SOL_RXRPC) 58 continue; 59 60 switch (cmsg->cmsg_type) { 61 case RXRPC_USER_CALL_ID: 62 if (msg->msg_flags & MSG_CMSG_COMPAT) { 63 if (len != sizeof(u32)) 64 return -EINVAL; 65 *user_call_ID = *(u32 *) CMSG_DATA(cmsg); 66 } else { 67 if (len != sizeof(unsigned long)) 68 return -EINVAL; 69 *user_call_ID = *(unsigned long *) 70 CMSG_DATA(cmsg); 71 } 72 _debug("User Call ID %lx", *user_call_ID); 73 got_user_ID = true; 74 break; 75 76 case RXRPC_ABORT: 77 if (*command != RXRPC_CMD_SEND_DATA) 78 return -EINVAL; 79 *command = RXRPC_CMD_SEND_ABORT; 80 if (len != sizeof(*abort_code)) 81 return -EINVAL; 82 *abort_code = *(unsigned int *) CMSG_DATA(cmsg); 83 _debug("Abort %x", *abort_code); 84 if (*abort_code == 0) 85 return -EINVAL; 86 break; 87 88 case RXRPC_ACCEPT: 89 if (*command != RXRPC_CMD_SEND_DATA) 90 return -EINVAL; 91 *command = RXRPC_CMD_ACCEPT; 92 if (len != 0) 93 return -EINVAL; 94 break; 95 96 default: 97 return -EINVAL; 98 } 99 } 100 101 if (!got_user_ID) 102 return -EINVAL; 103 _leave(" = 0"); 104 return 0; 105 } 106 107 /* 108 * abort a call, sending an ABORT packet to the peer 109 */ 110 static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) 111 { 112 write_lock_bh(&call->state_lock); 113 114 if (call->state <= RXRPC_CALL_COMPLETE) { 115 call->state = RXRPC_CALL_LOCALLY_ABORTED; 116 call->local_abort = abort_code; 117 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 118 del_timer_sync(&call->resend_timer); 119 del_timer_sync(&call->ack_timer); 120 clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events); 121 clear_bit(RXRPC_CALL_EV_ACK, &call->events); 122 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 123 rxrpc_queue_call(call); 124 } 125 126 write_unlock_bh(&call->state_lock); 127 } 128 129 /* 130 * Create a new client call for sendmsg(). 131 */ 132 static struct rxrpc_call * 133 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 134 unsigned long user_call_ID) 135 { 136 struct rxrpc_conn_bundle *bundle; 137 struct rxrpc_transport *trans; 138 struct rxrpc_call *call; 139 struct key *key; 140 long ret; 141 142 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); 143 144 _enter(""); 145 146 if (!msg->msg_name) 147 return ERR_PTR(-EDESTADDRREQ); 148 149 trans = rxrpc_name_to_transport(rx, msg->msg_name, msg->msg_namelen, 0, 150 GFP_KERNEL); 151 if (IS_ERR(trans)) { 152 ret = PTR_ERR(trans); 153 goto out; 154 } 155 156 key = rx->key; 157 if (key && !rx->key->payload.data[0]) 158 key = NULL; 159 bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, GFP_KERNEL); 160 if (IS_ERR(bundle)) { 161 ret = PTR_ERR(bundle); 162 goto out_trans; 163 } 164 165 call = rxrpc_new_client_call(rx, trans, bundle, user_call_ID, 166 GFP_KERNEL); 167 rxrpc_put_bundle(trans, bundle); 168 rxrpc_put_transport(trans); 169 if (IS_ERR(call)) { 170 ret = PTR_ERR(call); 171 goto out_trans; 172 } 173 174 _leave(" = %p\n", call); 175 return call; 176 177 out_trans: 178 rxrpc_put_transport(trans); 179 out: 180 _leave(" = %ld", ret); 181 return ERR_PTR(ret); 182 } 183 184 /* 185 * send a message forming part of a client call through an RxRPC socket 186 * - caller holds the socket locked 187 * - the socket may be either a client socket or a server socket 188 */ 189 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 190 { 191 enum rxrpc_command cmd; 192 struct rxrpc_call *call; 193 unsigned long user_call_ID = 0; 194 u32 abort_code = 0; 195 int ret; 196 197 _enter(""); 198 199 ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code); 200 if (ret < 0) 201 return ret; 202 203 if (cmd == RXRPC_CMD_ACCEPT) { 204 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 205 return -EINVAL; 206 call = rxrpc_accept_call(rx, user_call_ID); 207 if (IS_ERR(call)) 208 return PTR_ERR(call); 209 rxrpc_put_call(call); 210 return 0; 211 } 212 213 call = rxrpc_find_call_by_user_ID(rx, user_call_ID); 214 if (!call) { 215 if (cmd != RXRPC_CMD_SEND_DATA) 216 return -EBADSLT; 217 call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID); 218 if (IS_ERR(call)) 219 return PTR_ERR(call); 220 } 221 222 _debug("CALL %d USR %lx ST %d on CONN %p", 223 call->debug_id, call->user_call_ID, call->state, call->conn); 224 225 if (call->state >= RXRPC_CALL_COMPLETE) { 226 /* it's too late for this call */ 227 ret = -ECONNRESET; 228 } else if (cmd == RXRPC_CMD_SEND_ABORT) { 229 rxrpc_send_abort(call, abort_code); 230 ret = 0; 231 } else if (cmd != RXRPC_CMD_SEND_DATA) { 232 ret = -EINVAL; 233 } else if (!call->in_clientflag && 234 call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { 235 /* request phase complete for this client call */ 236 ret = -EPROTO; 237 } else if (call->in_clientflag && 238 call->state != RXRPC_CALL_SERVER_ACK_REQUEST && 239 call->state != RXRPC_CALL_SERVER_SEND_REPLY) { 240 /* Reply phase not begun or not complete for service call. */ 241 ret = -EPROTO; 242 } else { 243 ret = rxrpc_send_data(rx, call, msg, len); 244 } 245 246 rxrpc_put_call(call); 247 _leave(" = %d", ret); 248 return ret; 249 } 250 251 /** 252 * rxrpc_kernel_send_data - Allow a kernel service to send data on a call 253 * @call: The call to send data through 254 * @msg: The data to send 255 * @len: The amount of data to send 256 * 257 * Allow a kernel service to send data on a call. The call must be in an state 258 * appropriate to sending data. No control data should be supplied in @msg, 259 * nor should an address be supplied. MSG_MORE should be flagged if there's 260 * more data to come, otherwise this data will end the transmission phase. 261 */ 262 int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, 263 size_t len) 264 { 265 int ret; 266 267 _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); 268 269 ASSERTCMP(msg->msg_name, ==, NULL); 270 ASSERTCMP(msg->msg_control, ==, NULL); 271 272 lock_sock(&call->socket->sk); 273 274 _debug("CALL %d USR %lx ST %d on CONN %p", 275 call->debug_id, call->user_call_ID, call->state, call->conn); 276 277 if (call->state >= RXRPC_CALL_COMPLETE) { 278 ret = -ESHUTDOWN; /* it's too late for this call */ 279 } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && 280 call->state != RXRPC_CALL_SERVER_ACK_REQUEST && 281 call->state != RXRPC_CALL_SERVER_SEND_REPLY) { 282 ret = -EPROTO; /* request phase complete for this client call */ 283 } else { 284 ret = rxrpc_send_data(call->socket, call, msg, len); 285 } 286 287 release_sock(&call->socket->sk); 288 _leave(" = %d", ret); 289 return ret; 290 } 291 292 EXPORT_SYMBOL(rxrpc_kernel_send_data); 293 294 /** 295 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call 296 * @call: The call to be aborted 297 * @abort_code: The abort code to stick into the ABORT packet 298 * 299 * Allow a kernel service to abort a call, if it's still in an abortable state. 300 */ 301 void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code) 302 { 303 _enter("{%d},%d", call->debug_id, abort_code); 304 305 lock_sock(&call->socket->sk); 306 307 _debug("CALL %d USR %lx ST %d on CONN %p", 308 call->debug_id, call->user_call_ID, call->state, call->conn); 309 310 if (call->state < RXRPC_CALL_COMPLETE) 311 rxrpc_send_abort(call, abort_code); 312 313 release_sock(&call->socket->sk); 314 _leave(""); 315 } 316 317 EXPORT_SYMBOL(rxrpc_kernel_abort_call); 318 319 /* 320 * send a packet through the transport endpoint 321 */ 322 int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb) 323 { 324 struct kvec iov[1]; 325 struct msghdr msg; 326 int ret, opt; 327 328 _enter(",{%d}", skb->len); 329 330 iov[0].iov_base = skb->head; 331 iov[0].iov_len = skb->len; 332 333 msg.msg_name = &trans->peer->srx.transport.sin; 334 msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); 335 msg.msg_control = NULL; 336 msg.msg_controllen = 0; 337 msg.msg_flags = 0; 338 339 /* send the packet with the don't fragment bit set if we currently 340 * think it's small enough */ 341 if (skb->len - sizeof(struct rxrpc_wire_header) < trans->peer->maxdata) { 342 down_read(&trans->local->defrag_sem); 343 /* send the packet by UDP 344 * - returns -EMSGSIZE if UDP would have to fragment the packet 345 * to go out of the interface 346 * - in which case, we'll have processed the ICMP error 347 * message and update the peer record 348 */ 349 ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, 350 iov[0].iov_len); 351 352 up_read(&trans->local->defrag_sem); 353 if (ret == -EMSGSIZE) 354 goto send_fragmentable; 355 356 _leave(" = %d [%u]", ret, trans->peer->maxdata); 357 return ret; 358 } 359 360 send_fragmentable: 361 /* attempt to send this message with fragmentation enabled */ 362 _debug("send fragment"); 363 364 down_write(&trans->local->defrag_sem); 365 opt = IP_PMTUDISC_DONT; 366 ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, 367 (char *) &opt, sizeof(opt)); 368 if (ret == 0) { 369 ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, 370 iov[0].iov_len); 371 372 opt = IP_PMTUDISC_DO; 373 kernel_setsockopt(trans->local->socket, SOL_IP, 374 IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); 375 } 376 377 up_write(&trans->local->defrag_sem); 378 _leave(" = %d [frag %u]", ret, trans->peer->maxdata); 379 return ret; 380 } 381 382 /* 383 * wait for space to appear in the transmit/ACK window 384 * - caller holds the socket locked 385 */ 386 static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, 387 struct rxrpc_call *call, 388 long *timeo) 389 { 390 DECLARE_WAITQUEUE(myself, current); 391 int ret; 392 393 _enter(",{%d},%ld", 394 CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail), 395 call->acks_winsz), 396 *timeo); 397 398 add_wait_queue(&call->tx_waitq, &myself); 399 400 for (;;) { 401 set_current_state(TASK_INTERRUPTIBLE); 402 ret = 0; 403 if (CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail), 404 call->acks_winsz) > 0) 405 break; 406 if (signal_pending(current)) { 407 ret = sock_intr_errno(*timeo); 408 break; 409 } 410 411 release_sock(&rx->sk); 412 *timeo = schedule_timeout(*timeo); 413 lock_sock(&rx->sk); 414 } 415 416 remove_wait_queue(&call->tx_waitq, &myself); 417 set_current_state(TASK_RUNNING); 418 _leave(" = %d", ret); 419 return ret; 420 } 421 422 /* 423 * attempt to schedule an instant Tx resend 424 */ 425 static inline void rxrpc_instant_resend(struct rxrpc_call *call) 426 { 427 read_lock_bh(&call->state_lock); 428 if (try_to_del_timer_sync(&call->resend_timer) >= 0) { 429 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 430 if (call->state < RXRPC_CALL_COMPLETE && 431 !test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events)) 432 rxrpc_queue_call(call); 433 } 434 read_unlock_bh(&call->state_lock); 435 } 436 437 /* 438 * queue a packet for transmission, set the resend timer and attempt 439 * to send the packet immediately 440 */ 441 static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, 442 bool last) 443 { 444 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 445 int ret; 446 447 _net("queue skb %p [%d]", skb, call->acks_head); 448 449 ASSERT(call->acks_window != NULL); 450 call->acks_window[call->acks_head] = (unsigned long) skb; 451 smp_wmb(); 452 call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1); 453 454 if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { 455 _debug("________awaiting reply/ACK__________"); 456 write_lock_bh(&call->state_lock); 457 switch (call->state) { 458 case RXRPC_CALL_CLIENT_SEND_REQUEST: 459 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 460 break; 461 case RXRPC_CALL_SERVER_ACK_REQUEST: 462 call->state = RXRPC_CALL_SERVER_SEND_REPLY; 463 if (!last) 464 break; 465 case RXRPC_CALL_SERVER_SEND_REPLY: 466 call->state = RXRPC_CALL_SERVER_AWAIT_ACK; 467 break; 468 default: 469 break; 470 } 471 write_unlock_bh(&call->state_lock); 472 } 473 474 _proto("Tx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq); 475 476 sp->need_resend = false; 477 sp->resend_at = jiffies + rxrpc_resend_timeout; 478 if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) { 479 _debug("run timer"); 480 call->resend_timer.expires = sp->resend_at; 481 add_timer(&call->resend_timer); 482 } 483 484 /* attempt to cancel the rx-ACK timer, deferring reply transmission if 485 * we're ACK'ing the request phase of an incoming call */ 486 ret = -EAGAIN; 487 if (try_to_del_timer_sync(&call->ack_timer) >= 0) { 488 /* the packet may be freed by rxrpc_process_call() before this 489 * returns */ 490 ret = rxrpc_send_packet(call->conn->trans, skb); 491 _net("sent skb %p", skb); 492 } else { 493 _debug("failed to delete ACK timer"); 494 } 495 496 if (ret < 0) { 497 _debug("need instant resend %d", ret); 498 sp->need_resend = true; 499 rxrpc_instant_resend(call); 500 } 501 502 _leave(""); 503 } 504 505 /* 506 * Convert a host-endian header into a network-endian header. 507 */ 508 static void rxrpc_insert_header(struct sk_buff *skb) 509 { 510 struct rxrpc_wire_header whdr; 511 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 512 513 whdr.epoch = htonl(sp->hdr.epoch); 514 whdr.cid = htonl(sp->hdr.cid); 515 whdr.callNumber = htonl(sp->hdr.callNumber); 516 whdr.seq = htonl(sp->hdr.seq); 517 whdr.serial = htonl(sp->hdr.serial); 518 whdr.type = sp->hdr.type; 519 whdr.flags = sp->hdr.flags; 520 whdr.userStatus = sp->hdr.userStatus; 521 whdr.securityIndex = sp->hdr.securityIndex; 522 whdr._rsvd = htons(sp->hdr._rsvd); 523 whdr.serviceId = htons(sp->hdr.serviceId); 524 525 memcpy(skb->head, &whdr, sizeof(whdr)); 526 } 527 528 /* 529 * send data through a socket 530 * - must be called in process context 531 * - caller holds the socket locked 532 */ 533 static int rxrpc_send_data(struct rxrpc_sock *rx, 534 struct rxrpc_call *call, 535 struct msghdr *msg, size_t len) 536 { 537 struct rxrpc_skb_priv *sp; 538 struct sk_buff *skb; 539 struct sock *sk = &rx->sk; 540 long timeo; 541 bool more; 542 int ret, copied; 543 544 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 545 546 /* this should be in poll */ 547 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 548 549 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 550 return -EPIPE; 551 552 more = msg->msg_flags & MSG_MORE; 553 554 skb = call->tx_pending; 555 call->tx_pending = NULL; 556 557 copied = 0; 558 do { 559 if (!skb) { 560 size_t size, chunk, max, space; 561 562 _debug("alloc"); 563 564 if (CIRC_SPACE(call->acks_head, 565 ACCESS_ONCE(call->acks_tail), 566 call->acks_winsz) <= 0) { 567 ret = -EAGAIN; 568 if (msg->msg_flags & MSG_DONTWAIT) 569 goto maybe_error; 570 ret = rxrpc_wait_for_tx_window(rx, call, 571 &timeo); 572 if (ret < 0) 573 goto maybe_error; 574 } 575 576 max = call->conn->trans->peer->maxdata; 577 max -= call->conn->security_size; 578 max &= ~(call->conn->size_align - 1UL); 579 580 chunk = max; 581 if (chunk > msg_data_left(msg) && !more) 582 chunk = msg_data_left(msg); 583 584 space = chunk + call->conn->size_align; 585 space &= ~(call->conn->size_align - 1UL); 586 587 size = space + call->conn->header_size; 588 589 _debug("SIZE: %zu/%zu/%zu", chunk, space, size); 590 591 /* create a buffer that we can retain until it's ACK'd */ 592 skb = sock_alloc_send_skb( 593 sk, size, msg->msg_flags & MSG_DONTWAIT, &ret); 594 if (!skb) 595 goto maybe_error; 596 597 rxrpc_new_skb(skb); 598 599 _debug("ALLOC SEND %p", skb); 600 601 ASSERTCMP(skb->mark, ==, 0); 602 603 _debug("HS: %u", call->conn->header_size); 604 skb_reserve(skb, call->conn->header_size); 605 skb->len += call->conn->header_size; 606 607 sp = rxrpc_skb(skb); 608 sp->remain = chunk; 609 if (sp->remain > skb_tailroom(skb)) 610 sp->remain = skb_tailroom(skb); 611 612 _net("skb: hr %d, tr %d, hl %d, rm %d", 613 skb_headroom(skb), 614 skb_tailroom(skb), 615 skb_headlen(skb), 616 sp->remain); 617 618 skb->ip_summed = CHECKSUM_UNNECESSARY; 619 } 620 621 _debug("append"); 622 sp = rxrpc_skb(skb); 623 624 /* append next segment of data to the current buffer */ 625 if (msg_data_left(msg) > 0) { 626 int copy = skb_tailroom(skb); 627 ASSERTCMP(copy, >, 0); 628 if (copy > msg_data_left(msg)) 629 copy = msg_data_left(msg); 630 if (copy > sp->remain) 631 copy = sp->remain; 632 633 _debug("add"); 634 ret = skb_add_data(skb, &msg->msg_iter, copy); 635 _debug("added"); 636 if (ret < 0) 637 goto efault; 638 sp->remain -= copy; 639 skb->mark += copy; 640 copied += copy; 641 } 642 643 /* check for the far side aborting the call or a network error 644 * occurring */ 645 if (call->state > RXRPC_CALL_COMPLETE) 646 goto call_aborted; 647 648 /* add the packet to the send queue if it's now full */ 649 if (sp->remain <= 0 || 650 (msg_data_left(msg) == 0 && !more)) { 651 struct rxrpc_connection *conn = call->conn; 652 uint32_t seq; 653 size_t pad; 654 655 /* pad out if we're using security */ 656 if (conn->security_ix) { 657 pad = conn->security_size + skb->mark; 658 pad = conn->size_align - pad; 659 pad &= conn->size_align - 1; 660 _debug("pad %zu", pad); 661 if (pad) 662 memset(skb_put(skb, pad), 0, pad); 663 } 664 665 seq = atomic_inc_return(&call->sequence); 666 667 sp->hdr.epoch = conn->epoch; 668 sp->hdr.cid = call->cid; 669 sp->hdr.callNumber = call->call_id; 670 sp->hdr.seq = seq; 671 sp->hdr.serial = atomic_inc_return(&conn->serial); 672 sp->hdr.type = RXRPC_PACKET_TYPE_DATA; 673 sp->hdr.userStatus = 0; 674 sp->hdr.securityIndex = conn->security_ix; 675 sp->hdr._rsvd = 0; 676 sp->hdr.serviceId = call->service_id; 677 678 sp->hdr.flags = conn->out_clientflag; 679 if (msg_data_left(msg) == 0 && !more) 680 sp->hdr.flags |= RXRPC_LAST_PACKET; 681 else if (CIRC_SPACE(call->acks_head, 682 ACCESS_ONCE(call->acks_tail), 683 call->acks_winsz) > 1) 684 sp->hdr.flags |= RXRPC_MORE_PACKETS; 685 if (more && seq & 1) 686 sp->hdr.flags |= RXRPC_REQUEST_ACK; 687 688 ret = conn->security->secure_packet( 689 call, skb, skb->mark, 690 skb->head + sizeof(struct rxrpc_wire_header)); 691 if (ret < 0) 692 goto out; 693 694 rxrpc_insert_header(skb); 695 rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more); 696 skb = NULL; 697 } 698 } while (msg_data_left(msg) > 0); 699 700 success: 701 ret = copied; 702 out: 703 call->tx_pending = skb; 704 _leave(" = %d", ret); 705 return ret; 706 707 call_aborted: 708 rxrpc_free_skb(skb); 709 if (call->state == RXRPC_CALL_NETWORK_ERROR) 710 ret = call->error_report < RXRPC_LOCAL_ERROR_OFFSET ? 711 call->error_report : 712 call->error_report - RXRPC_LOCAL_ERROR_OFFSET; 713 else 714 ret = -ECONNABORTED; 715 _leave(" = %d", ret); 716 return ret; 717 718 maybe_error: 719 if (copied) 720 goto success; 721 goto out; 722 723 efault: 724 ret = -EFAULT; 725 goto out; 726 } 727