1 // SPDX-License-Identifier: GPL-2.0 2 #include <net/tcp.h> 3 #include <net/strparser.h> 4 #include <net/xfrm.h> 5 #include <net/esp.h> 6 #include <net/espintcp.h> 7 #include <linux/skmsg.h> 8 #include <net/inet_common.h> 9 #include <trace/events/sock.h> 10 #if IS_ENABLED(CONFIG_IPV6) 11 #include <net/ipv6_stubs.h> 12 #endif 13 #include <net/hotdata.h> 14 15 static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb, 16 struct sock *sk) 17 { 18 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf || 19 !sk_rmem_schedule(sk, skb, skb->truesize)) { 20 XFRM_INC_STATS(sock_net(sk), LINUX_MIB_XFRMINERROR); 21 kfree_skb(skb); 22 return; 23 } 24 25 skb_set_owner_r(skb, sk); 26 27 memset(skb->cb, 0, sizeof(skb->cb)); 28 skb_queue_tail(&ctx->ike_queue, skb); 29 ctx->saved_data_ready(sk); 30 } 31 32 static void handle_esp(struct sk_buff *skb, struct sock *sk) 33 { 34 struct tcp_skb_cb *tcp_cb = (struct tcp_skb_cb *)skb->cb; 35 36 skb_reset_transport_header(skb); 37 38 /* restore IP CB, we need at least IP6CB->nhoff */ 39 memmove(skb->cb, &tcp_cb->header, sizeof(tcp_cb->header)); 40 41 rcu_read_lock(); 42 skb->dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); 43 local_bh_disable(); 44 #if IS_ENABLED(CONFIG_IPV6) 45 if (sk->sk_family == AF_INET6) 46 ipv6_stub->xfrm6_rcv_encap(skb, IPPROTO_ESP, 0, TCP_ENCAP_ESPINTCP); 47 else 48 #endif 49 xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, TCP_ENCAP_ESPINTCP); 50 local_bh_enable(); 51 rcu_read_unlock(); 52 } 53 54 static void espintcp_rcv(struct strparser *strp, struct sk_buff *skb) 55 { 56 struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx, 57 strp); 58 struct strp_msg *rxm = strp_msg(skb); 59 int len = rxm->full_len - 2; 60 u32 nonesp_marker; 61 int err; 62 63 /* keepalive packet? */ 64 if (unlikely(len == 1)) { 65 u8 data; 66 67 err = skb_copy_bits(skb, rxm->offset + 2, &data, 1); 68 if (err < 0) { 69 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); 70 kfree_skb(skb); 71 return; 72 } 73 74 if (data == 0xff) { 75 kfree_skb(skb); 76 return; 77 } 78 } 79 80 /* drop other short messages */ 81 if (unlikely(len <= sizeof(nonesp_marker))) { 82 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); 83 kfree_skb(skb); 84 return; 85 } 86 87 err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker, 88 sizeof(nonesp_marker)); 89 if (err < 0) { 90 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); 91 kfree_skb(skb); 92 return; 93 } 94 95 /* remove header, leave non-ESP marker/SPI */ 96 if (!pskb_pull(skb, rxm->offset + 2)) { 97 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); 98 kfree_skb(skb); 99 return; 100 } 101 102 if (pskb_trim(skb, rxm->full_len - 2) != 0) { 103 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); 104 kfree_skb(skb); 105 return; 106 } 107 108 if (nonesp_marker == 0) 109 handle_nonesp(ctx, skb, strp->sk); 110 else 111 handle_esp(skb, strp->sk); 112 } 113 114 static int espintcp_parse(struct strparser *strp, struct sk_buff *skb) 115 { 116 struct strp_msg *rxm = strp_msg(skb); 117 __be16 blen; 118 u16 len; 119 int err; 120 121 if (skb->len < rxm->offset + 2) 122 return 0; 123 124 err = skb_copy_bits(skb, rxm->offset, &blen, sizeof(blen)); 125 if (err < 0) 126 return err; 127 128 len = be16_to_cpu(blen); 129 if (len < 2) 130 return -EINVAL; 131 132 return len; 133 } 134 135 static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 136 int flags, int *addr_len) 137 { 138 struct espintcp_ctx *ctx = espintcp_getctx(sk); 139 struct sk_buff *skb; 140 int err = 0; 141 int copied; 142 int off = 0; 143 144 skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err); 145 if (!skb) { 146 if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) 147 return 0; 148 return err; 149 } 150 151 copied = len; 152 if (copied > skb->len) 153 copied = skb->len; 154 else if (copied < skb->len) 155 msg->msg_flags |= MSG_TRUNC; 156 157 err = skb_copy_datagram_msg(skb, 0, msg, copied); 158 if (unlikely(err)) { 159 kfree_skb(skb); 160 return err; 161 } 162 163 if (flags & MSG_TRUNC) 164 copied = skb->len; 165 kfree_skb(skb); 166 return copied; 167 } 168 169 int espintcp_queue_out(struct sock *sk, struct sk_buff *skb) 170 { 171 struct espintcp_ctx *ctx = espintcp_getctx(sk); 172 173 if (skb_queue_len(&ctx->out_queue) >= 174 READ_ONCE(net_hotdata.max_backlog)) { 175 kfree_skb(skb); 176 return -ENOBUFS; 177 } 178 179 __skb_queue_tail(&ctx->out_queue, skb); 180 181 return 0; 182 } 183 EXPORT_SYMBOL_GPL(espintcp_queue_out); 184 185 /* espintcp length field is 2B and length includes the length field's size */ 186 #define MAX_ESPINTCP_MSG (((1 << 16) - 1) - 2) 187 188 static int espintcp_sendskb_locked(struct sock *sk, struct espintcp_msg *emsg, 189 int flags) 190 { 191 do { 192 int ret; 193 194 ret = skb_send_sock_locked(sk, emsg->skb, 195 emsg->offset, emsg->len); 196 if (ret < 0) 197 return ret; 198 199 emsg->len -= ret; 200 emsg->offset += ret; 201 } while (emsg->len > 0); 202 203 kfree_skb(emsg->skb); 204 memset(emsg, 0, sizeof(*emsg)); 205 206 return 0; 207 } 208 209 static int espintcp_sendskmsg_locked(struct sock *sk, 210 struct espintcp_msg *emsg, int flags) 211 { 212 struct msghdr msghdr = { 213 .msg_flags = flags | MSG_SPLICE_PAGES | MSG_MORE, 214 }; 215 struct sk_msg *skmsg = &emsg->skmsg; 216 bool more = flags & MSG_MORE; 217 struct scatterlist *sg; 218 int done = 0; 219 int ret; 220 221 sg = &skmsg->sg.data[skmsg->sg.start]; 222 do { 223 struct bio_vec bvec; 224 size_t size = sg->length - emsg->offset; 225 int offset = sg->offset + emsg->offset; 226 struct page *p; 227 228 emsg->offset = 0; 229 230 if (sg_is_last(sg) && !more) 231 msghdr.msg_flags &= ~MSG_MORE; 232 233 p = sg_page(sg); 234 retry: 235 bvec_set_page(&bvec, p, size, offset); 236 iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); 237 ret = tcp_sendmsg_locked(sk, &msghdr, size); 238 if (ret < 0) { 239 emsg->offset = offset - sg->offset; 240 skmsg->sg.start += done; 241 return ret; 242 } 243 244 if (ret != size) { 245 offset += ret; 246 size -= ret; 247 goto retry; 248 } 249 250 done++; 251 put_page(p); 252 sk_mem_uncharge(sk, sg->length); 253 sg = sg_next(sg); 254 } while (sg); 255 256 memset(emsg, 0, sizeof(*emsg)); 257 258 return 0; 259 } 260 261 static int espintcp_push_msgs(struct sock *sk, int flags) 262 { 263 struct espintcp_ctx *ctx = espintcp_getctx(sk); 264 struct espintcp_msg *emsg = &ctx->partial; 265 int err; 266 267 if (!emsg->len) 268 return 0; 269 270 if (ctx->tx_running) 271 return -EAGAIN; 272 ctx->tx_running = 1; 273 274 if (emsg->skb) 275 err = espintcp_sendskb_locked(sk, emsg, flags); 276 else 277 err = espintcp_sendskmsg_locked(sk, emsg, flags); 278 if (err == -EAGAIN) { 279 ctx->tx_running = 0; 280 return flags & MSG_DONTWAIT ? -EAGAIN : 0; 281 } 282 if (!err) 283 memset(emsg, 0, sizeof(*emsg)); 284 285 ctx->tx_running = 0; 286 287 return err; 288 } 289 290 int espintcp_push_skb(struct sock *sk, struct sk_buff *skb) 291 { 292 struct espintcp_ctx *ctx = espintcp_getctx(sk); 293 struct espintcp_msg *emsg = &ctx->partial; 294 unsigned int len; 295 int offset; 296 297 if (sk->sk_state != TCP_ESTABLISHED) { 298 kfree_skb(skb); 299 return -ECONNRESET; 300 } 301 302 offset = skb_transport_offset(skb); 303 len = skb->len - offset; 304 305 espintcp_push_msgs(sk, 0); 306 307 if (emsg->len) { 308 kfree_skb(skb); 309 return -ENOBUFS; 310 } 311 312 skb_set_owner_w(skb, sk); 313 314 emsg->offset = offset; 315 emsg->len = len; 316 emsg->skb = skb; 317 318 espintcp_push_msgs(sk, 0); 319 320 return 0; 321 } 322 EXPORT_SYMBOL_GPL(espintcp_push_skb); 323 324 static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 325 { 326 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 327 struct espintcp_ctx *ctx = espintcp_getctx(sk); 328 struct espintcp_msg *emsg = &ctx->partial; 329 struct iov_iter pfx_iter; 330 struct kvec pfx_iov = {}; 331 size_t msglen = size + 2; 332 char buf[2] = {0}; 333 int err, end; 334 335 if (msg->msg_flags & ~MSG_DONTWAIT) 336 return -EOPNOTSUPP; 337 338 if (size > MAX_ESPINTCP_MSG) 339 return -EMSGSIZE; 340 341 if (msg->msg_controllen) 342 return -EOPNOTSUPP; 343 344 lock_sock(sk); 345 346 err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT); 347 if (err < 0) { 348 if (err != -EAGAIN || !(msg->msg_flags & MSG_DONTWAIT)) 349 err = -ENOBUFS; 350 goto unlock; 351 } 352 353 sk_msg_init(&emsg->skmsg); 354 while (1) { 355 /* only -ENOMEM is possible since we don't coalesce */ 356 err = sk_msg_alloc(sk, &emsg->skmsg, msglen, 0); 357 if (!err) 358 break; 359 360 err = sk_stream_wait_memory(sk, &timeo); 361 if (err) 362 goto fail; 363 } 364 365 *((__be16 *)buf) = cpu_to_be16(msglen); 366 pfx_iov.iov_base = buf; 367 pfx_iov.iov_len = sizeof(buf); 368 iov_iter_kvec(&pfx_iter, ITER_SOURCE, &pfx_iov, 1, pfx_iov.iov_len); 369 370 err = sk_msg_memcopy_from_iter(sk, &pfx_iter, &emsg->skmsg, 371 pfx_iov.iov_len); 372 if (err < 0) 373 goto fail; 374 375 err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, &emsg->skmsg, size); 376 if (err < 0) 377 goto fail; 378 379 end = emsg->skmsg.sg.end; 380 emsg->len = size; 381 sk_msg_iter_var_prev(end); 382 sg_mark_end(sk_msg_elem(&emsg->skmsg, end)); 383 384 tcp_rate_check_app_limited(sk); 385 386 err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT); 387 /* this message could be partially sent, keep it */ 388 389 release_sock(sk); 390 391 return size; 392 393 fail: 394 sk_msg_free(sk, &emsg->skmsg); 395 memset(emsg, 0, sizeof(*emsg)); 396 unlock: 397 release_sock(sk); 398 return err; 399 } 400 401 static struct proto espintcp_prot __ro_after_init; 402 static struct proto_ops espintcp_ops __ro_after_init; 403 static struct proto espintcp6_prot; 404 static struct proto_ops espintcp6_ops; 405 static DEFINE_MUTEX(tcpv6_prot_mutex); 406 407 static void espintcp_data_ready(struct sock *sk) 408 { 409 struct espintcp_ctx *ctx = espintcp_getctx(sk); 410 411 trace_sk_data_ready(sk); 412 413 strp_data_ready(&ctx->strp); 414 } 415 416 static void espintcp_tx_work(struct work_struct *work) 417 { 418 struct espintcp_ctx *ctx = container_of(work, 419 struct espintcp_ctx, work); 420 struct sock *sk = ctx->strp.sk; 421 422 lock_sock(sk); 423 if (!ctx->tx_running) 424 espintcp_push_msgs(sk, 0); 425 release_sock(sk); 426 } 427 428 static void espintcp_write_space(struct sock *sk) 429 { 430 struct espintcp_ctx *ctx = espintcp_getctx(sk); 431 432 schedule_work(&ctx->work); 433 ctx->saved_write_space(sk); 434 } 435 436 static void espintcp_destruct(struct sock *sk) 437 { 438 struct espintcp_ctx *ctx = espintcp_getctx(sk); 439 440 ctx->saved_destruct(sk); 441 kfree(ctx); 442 } 443 444 bool tcp_is_ulp_esp(struct sock *sk) 445 { 446 return sk->sk_prot == &espintcp_prot || sk->sk_prot == &espintcp6_prot; 447 } 448 EXPORT_SYMBOL_GPL(tcp_is_ulp_esp); 449 450 static void build_protos(struct proto *espintcp_prot, 451 struct proto_ops *espintcp_ops, 452 const struct proto *orig_prot, 453 const struct proto_ops *orig_ops); 454 static int espintcp_init_sk(struct sock *sk) 455 { 456 struct inet_connection_sock *icsk = inet_csk(sk); 457 struct strp_callbacks cb = { 458 .rcv_msg = espintcp_rcv, 459 .parse_msg = espintcp_parse, 460 }; 461 struct espintcp_ctx *ctx; 462 int err; 463 464 /* sockmap is not compatible with espintcp */ 465 if (sk->sk_user_data) 466 return -EBUSY; 467 468 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 469 if (!ctx) 470 return -ENOMEM; 471 472 err = strp_init(&ctx->strp, sk, &cb); 473 if (err) 474 goto free; 475 476 __sk_dst_reset(sk); 477 478 strp_check_rcv(&ctx->strp); 479 skb_queue_head_init(&ctx->ike_queue); 480 skb_queue_head_init(&ctx->out_queue); 481 482 if (sk->sk_family == AF_INET) { 483 sk->sk_prot = &espintcp_prot; 484 sk->sk_socket->ops = &espintcp_ops; 485 } else { 486 mutex_lock(&tcpv6_prot_mutex); 487 if (!espintcp6_prot.recvmsg) 488 build_protos(&espintcp6_prot, &espintcp6_ops, sk->sk_prot, sk->sk_socket->ops); 489 mutex_unlock(&tcpv6_prot_mutex); 490 491 sk->sk_prot = &espintcp6_prot; 492 sk->sk_socket->ops = &espintcp6_ops; 493 } 494 ctx->saved_data_ready = sk->sk_data_ready; 495 ctx->saved_write_space = sk->sk_write_space; 496 ctx->saved_destruct = sk->sk_destruct; 497 sk->sk_data_ready = espintcp_data_ready; 498 sk->sk_write_space = espintcp_write_space; 499 sk->sk_destruct = espintcp_destruct; 500 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 501 INIT_WORK(&ctx->work, espintcp_tx_work); 502 503 /* avoid using task_frag */ 504 sk->sk_allocation = GFP_ATOMIC; 505 sk->sk_use_task_frag = false; 506 507 return 0; 508 509 free: 510 kfree(ctx); 511 return err; 512 } 513 514 static void espintcp_release(struct sock *sk) 515 { 516 struct espintcp_ctx *ctx = espintcp_getctx(sk); 517 struct sk_buff_head queue; 518 struct sk_buff *skb; 519 520 __skb_queue_head_init(&queue); 521 skb_queue_splice_init(&ctx->out_queue, &queue); 522 523 while ((skb = __skb_dequeue(&queue))) 524 espintcp_push_skb(sk, skb); 525 526 tcp_release_cb(sk); 527 } 528 529 static void espintcp_close(struct sock *sk, long timeout) 530 { 531 struct espintcp_ctx *ctx = espintcp_getctx(sk); 532 struct espintcp_msg *emsg = &ctx->partial; 533 534 strp_stop(&ctx->strp); 535 536 sk->sk_prot = &tcp_prot; 537 barrier(); 538 539 cancel_work_sync(&ctx->work); 540 strp_done(&ctx->strp); 541 542 skb_queue_purge(&ctx->out_queue); 543 skb_queue_purge(&ctx->ike_queue); 544 545 if (emsg->len) { 546 if (emsg->skb) 547 kfree_skb(emsg->skb); 548 else 549 sk_msg_free(sk, &emsg->skmsg); 550 } 551 552 tcp_close(sk, timeout); 553 } 554 555 static __poll_t espintcp_poll(struct file *file, struct socket *sock, 556 poll_table *wait) 557 { 558 __poll_t mask = datagram_poll(file, sock, wait); 559 struct sock *sk = sock->sk; 560 struct espintcp_ctx *ctx = espintcp_getctx(sk); 561 562 if (!skb_queue_empty(&ctx->ike_queue)) 563 mask |= EPOLLIN | EPOLLRDNORM; 564 565 return mask; 566 } 567 568 static void build_protos(struct proto *espintcp_prot, 569 struct proto_ops *espintcp_ops, 570 const struct proto *orig_prot, 571 const struct proto_ops *orig_ops) 572 { 573 memcpy(espintcp_prot, orig_prot, sizeof(struct proto)); 574 memcpy(espintcp_ops, orig_ops, sizeof(struct proto_ops)); 575 espintcp_prot->sendmsg = espintcp_sendmsg; 576 espintcp_prot->recvmsg = espintcp_recvmsg; 577 espintcp_prot->close = espintcp_close; 578 espintcp_prot->release_cb = espintcp_release; 579 espintcp_ops->poll = espintcp_poll; 580 } 581 582 static struct tcp_ulp_ops espintcp_ulp __read_mostly = { 583 .name = "espintcp", 584 .owner = THIS_MODULE, 585 .init = espintcp_init_sk, 586 }; 587 588 void __init espintcp_init(void) 589 { 590 build_protos(&espintcp_prot, &espintcp_ops, &tcp_prot, &inet_stream_ops); 591 592 tcp_register_ulp(&espintcp_ulp); 593 } 594