1604326b4SDaniel Borkmann // SPDX-License-Identifier: GPL-2.0 2604326b4SDaniel Borkmann /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3604326b4SDaniel Borkmann 4604326b4SDaniel Borkmann #include <linux/skmsg.h> 5604326b4SDaniel Borkmann #include <linux/filter.h> 6604326b4SDaniel Borkmann #include <linux/bpf.h> 7604326b4SDaniel Borkmann #include <linux/init.h> 8604326b4SDaniel Borkmann #include <linux/wait.h> 9ddce1e09SJakub Sitnicki #include <linux/util_macros.h> 10604326b4SDaniel Borkmann 11604326b4SDaniel Borkmann #include <net/inet_common.h> 120608c69cSJohn Fastabend #include <net/tls.h> 13604326b4SDaniel Borkmann 14604326b4SDaniel Borkmann static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, 15604326b4SDaniel Borkmann struct sk_msg *msg, u32 apply_bytes, int flags) 16604326b4SDaniel Borkmann { 17604326b4SDaniel Borkmann bool apply = apply_bytes; 18604326b4SDaniel Borkmann struct scatterlist *sge; 19604326b4SDaniel Borkmann u32 size, copied = 0; 20604326b4SDaniel Borkmann struct sk_msg *tmp; 21604326b4SDaniel Borkmann int i, ret = 0; 22604326b4SDaniel Borkmann 23604326b4SDaniel Borkmann tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); 24604326b4SDaniel Borkmann if (unlikely(!tmp)) 25604326b4SDaniel Borkmann return -ENOMEM; 26604326b4SDaniel Borkmann 27604326b4SDaniel Borkmann lock_sock(sk); 28604326b4SDaniel Borkmann tmp->sg.start = msg->sg.start; 29604326b4SDaniel Borkmann i = msg->sg.start; 30604326b4SDaniel Borkmann do { 31604326b4SDaniel Borkmann sge = sk_msg_elem(msg, i); 32604326b4SDaniel Borkmann size = (apply && apply_bytes < sge->length) ? 33604326b4SDaniel Borkmann apply_bytes : sge->length; 34604326b4SDaniel Borkmann if (!sk_wmem_schedule(sk, size)) { 35604326b4SDaniel Borkmann if (!copied) 36604326b4SDaniel Borkmann ret = -ENOMEM; 37604326b4SDaniel Borkmann break; 38604326b4SDaniel Borkmann } 39604326b4SDaniel Borkmann 40604326b4SDaniel Borkmann sk_mem_charge(sk, size); 41604326b4SDaniel Borkmann sk_msg_xfer(tmp, msg, i, size); 42604326b4SDaniel Borkmann copied += size; 43604326b4SDaniel Borkmann if (sge->length) 44604326b4SDaniel Borkmann get_page(sk_msg_page(tmp, i)); 45604326b4SDaniel Borkmann sk_msg_iter_var_next(i); 46604326b4SDaniel Borkmann tmp->sg.end = i; 47604326b4SDaniel Borkmann if (apply) { 48604326b4SDaniel Borkmann apply_bytes -= size; 499072931fSPengcheng Yang if (!apply_bytes) { 509072931fSPengcheng Yang if (sge->length) 519072931fSPengcheng Yang sk_msg_iter_var_prev(i); 52604326b4SDaniel Borkmann break; 53604326b4SDaniel Borkmann } 549072931fSPengcheng Yang } 55604326b4SDaniel Borkmann } while (i != msg->sg.end); 56604326b4SDaniel Borkmann 57604326b4SDaniel Borkmann if (!ret) { 58604326b4SDaniel Borkmann msg->sg.start = i; 59604326b4SDaniel Borkmann sk_psock_queue_msg(psock, tmp); 60552de910SJohn Fastabend sk_psock_data_ready(sk, psock); 61604326b4SDaniel Borkmann } else { 62604326b4SDaniel Borkmann sk_msg_free(sk, tmp); 63604326b4SDaniel Borkmann kfree(tmp); 64604326b4SDaniel Borkmann } 65604326b4SDaniel Borkmann 66604326b4SDaniel Borkmann release_sock(sk); 67604326b4SDaniel Borkmann return ret; 68604326b4SDaniel Borkmann } 69604326b4SDaniel Borkmann 70604326b4SDaniel Borkmann static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, 71604326b4SDaniel Borkmann int flags, bool uncharge) 72604326b4SDaniel Borkmann { 73604326b4SDaniel Borkmann bool apply = apply_bytes; 74604326b4SDaniel Borkmann struct scatterlist *sge; 75*ebf2e886SDavid Howells struct msghdr msghdr = { .msg_flags = flags | MSG_SPLICE_PAGES, }; 76604326b4SDaniel Borkmann struct page *page; 77604326b4SDaniel Borkmann int size, ret = 0; 78604326b4SDaniel Borkmann u32 off; 79604326b4SDaniel Borkmann 80604326b4SDaniel Borkmann while (1) { 81*ebf2e886SDavid Howells struct bio_vec bvec; 820608c69cSJohn Fastabend bool has_tx_ulp; 830608c69cSJohn Fastabend 84604326b4SDaniel Borkmann sge = sk_msg_elem(msg, msg->sg.start); 85604326b4SDaniel Borkmann size = (apply && apply_bytes < sge->length) ? 86604326b4SDaniel Borkmann apply_bytes : sge->length; 87604326b4SDaniel Borkmann off = sge->offset; 88604326b4SDaniel Borkmann page = sg_page(sge); 89604326b4SDaniel Borkmann 90604326b4SDaniel Borkmann tcp_rate_check_app_limited(sk); 91604326b4SDaniel Borkmann retry: 920608c69cSJohn Fastabend has_tx_ulp = tls_sw_has_ctx_tx(sk); 93*ebf2e886SDavid Howells if (has_tx_ulp) 94*ebf2e886SDavid Howells msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY; 950608c69cSJohn Fastabend 96*ebf2e886SDavid Howells if (flags & MSG_SENDPAGE_NOTLAST) 97*ebf2e886SDavid Howells msghdr.msg_flags |= MSG_MORE; 98*ebf2e886SDavid Howells 99*ebf2e886SDavid Howells bvec_set_page(&bvec, page, size, off); 100*ebf2e886SDavid Howells iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); 101*ebf2e886SDavid Howells ret = tcp_sendmsg_locked(sk, &msghdr, size); 102604326b4SDaniel Borkmann if (ret <= 0) 103604326b4SDaniel Borkmann return ret; 104*ebf2e886SDavid Howells 105604326b4SDaniel Borkmann if (apply) 106604326b4SDaniel Borkmann apply_bytes -= ret; 107604326b4SDaniel Borkmann msg->sg.size -= ret; 108604326b4SDaniel Borkmann sge->offset += ret; 109604326b4SDaniel Borkmann sge->length -= ret; 110604326b4SDaniel Borkmann if (uncharge) 111604326b4SDaniel Borkmann sk_mem_uncharge(sk, ret); 112604326b4SDaniel Borkmann if (ret != size) { 113604326b4SDaniel Borkmann size -= ret; 114604326b4SDaniel Borkmann off += ret; 115604326b4SDaniel Borkmann goto retry; 116604326b4SDaniel Borkmann } 117604326b4SDaniel Borkmann if (!sge->length) { 118604326b4SDaniel Borkmann put_page(page); 119604326b4SDaniel Borkmann sk_msg_iter_next(msg, start); 120604326b4SDaniel Borkmann sg_init_table(sge, 1); 121604326b4SDaniel Borkmann if (msg->sg.start == msg->sg.end) 122604326b4SDaniel Borkmann break; 123604326b4SDaniel Borkmann } 124604326b4SDaniel Borkmann if (apply && !apply_bytes) 125604326b4SDaniel Borkmann break; 126604326b4SDaniel Borkmann } 127604326b4SDaniel Borkmann 128604326b4SDaniel Borkmann return 0; 129604326b4SDaniel Borkmann } 130604326b4SDaniel Borkmann 131604326b4SDaniel Borkmann static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, 132604326b4SDaniel Borkmann u32 apply_bytes, int flags, bool uncharge) 133604326b4SDaniel Borkmann { 134604326b4SDaniel Borkmann int ret; 135604326b4SDaniel Borkmann 136604326b4SDaniel Borkmann lock_sock(sk); 137604326b4SDaniel Borkmann ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); 138604326b4SDaniel Borkmann release_sock(sk); 139604326b4SDaniel Borkmann return ret; 140604326b4SDaniel Borkmann } 141604326b4SDaniel Borkmann 142a351d608SPengcheng Yang int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, 143a351d608SPengcheng Yang struct sk_msg *msg, u32 bytes, int flags) 144604326b4SDaniel Borkmann { 145604326b4SDaniel Borkmann struct sk_psock *psock = sk_psock_get(sk); 146604326b4SDaniel Borkmann int ret; 147604326b4SDaniel Borkmann 1482486ab43SWang Yufen if (unlikely(!psock)) 1492486ab43SWang Yufen return -EPIPE; 1502486ab43SWang Yufen 151604326b4SDaniel Borkmann ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : 152604326b4SDaniel Borkmann tcp_bpf_push_locked(sk, msg, bytes, flags, false); 153604326b4SDaniel Borkmann sk_psock_put(sk, psock); 154604326b4SDaniel Borkmann return ret; 155604326b4SDaniel Borkmann } 156604326b4SDaniel Borkmann EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); 157604326b4SDaniel Borkmann 15888759609SCong Wang #ifdef CONFIG_BPF_SYSCALL 159b6df0078SJakub Kicinski static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, 160b6df0078SJakub Kicinski long timeo) 1619f2470fbSCong Wang { 1629f2470fbSCong Wang DEFINE_WAIT_FUNC(wait, woken_wake_function); 1639f2470fbSCong Wang int ret = 0; 1649f2470fbSCong Wang 1659f2470fbSCong Wang if (sk->sk_shutdown & RCV_SHUTDOWN) 1669f2470fbSCong Wang return 1; 1679f2470fbSCong Wang 1689f2470fbSCong Wang if (!timeo) 1699f2470fbSCong Wang return ret; 1709f2470fbSCong Wang 1719f2470fbSCong Wang add_wait_queue(sk_sleep(sk), &wait); 1729f2470fbSCong Wang sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1739f2470fbSCong Wang ret = sk_wait_event(sk, &timeo, 1749f2470fbSCong Wang !list_empty(&psock->ingress_msg) || 175d0ac89f6SEric Dumazet !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait); 1769f2470fbSCong Wang sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1779f2470fbSCong Wang remove_wait_queue(sk_sleep(sk), &wait); 1789f2470fbSCong Wang return ret; 1799f2470fbSCong Wang } 1809f2470fbSCong Wang 181c5d2177aSJohn Fastabend static int tcp_bpf_recvmsg_parser(struct sock *sk, 182c5d2177aSJohn Fastabend struct msghdr *msg, 183c5d2177aSJohn Fastabend size_t len, 184c5d2177aSJohn Fastabend int flags, 185c5d2177aSJohn Fastabend int *addr_len) 186c5d2177aSJohn Fastabend { 187c5d2177aSJohn Fastabend struct sk_psock *psock; 188c5d2177aSJohn Fastabend int copied; 189c5d2177aSJohn Fastabend 190c5d2177aSJohn Fastabend if (unlikely(flags & MSG_ERRQUEUE)) 191c5d2177aSJohn Fastabend return inet_recv_error(sk, msg, len, addr_len); 192c5d2177aSJohn Fastabend 193d900f3d2SLiu Jian if (!len) 194d900f3d2SLiu Jian return 0; 195d900f3d2SLiu Jian 196c5d2177aSJohn Fastabend psock = sk_psock_get(sk); 197c5d2177aSJohn Fastabend if (unlikely(!psock)) 198ec095263SOliver Hartkopp return tcp_recvmsg(sk, msg, len, flags, addr_len); 199c5d2177aSJohn Fastabend 200c5d2177aSJohn Fastabend lock_sock(sk); 201c5d2177aSJohn Fastabend msg_bytes_ready: 202c5d2177aSJohn Fastabend copied = sk_msg_recvmsg(sk, psock, msg, len, flags); 203c5d2177aSJohn Fastabend if (!copied) { 204c5d2177aSJohn Fastabend long timeo; 205c5d2177aSJohn Fastabend int data; 206c5d2177aSJohn Fastabend 2075b2c5540SJohn Fastabend if (sock_flag(sk, SOCK_DONE)) 2085b2c5540SJohn Fastabend goto out; 2095b2c5540SJohn Fastabend 2105b2c5540SJohn Fastabend if (sk->sk_err) { 2115b2c5540SJohn Fastabend copied = sock_error(sk); 2125b2c5540SJohn Fastabend goto out; 2135b2c5540SJohn Fastabend } 2145b2c5540SJohn Fastabend 2155b2c5540SJohn Fastabend if (sk->sk_shutdown & RCV_SHUTDOWN) 2165b2c5540SJohn Fastabend goto out; 2175b2c5540SJohn Fastabend 2185b2c5540SJohn Fastabend if (sk->sk_state == TCP_CLOSE) { 2195b2c5540SJohn Fastabend copied = -ENOTCONN; 2205b2c5540SJohn Fastabend goto out; 2215b2c5540SJohn Fastabend } 2225b2c5540SJohn Fastabend 223ec095263SOliver Hartkopp timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2245b2c5540SJohn Fastabend if (!timeo) { 2255b2c5540SJohn Fastabend copied = -EAGAIN; 2265b2c5540SJohn Fastabend goto out; 2275b2c5540SJohn Fastabend } 2285b2c5540SJohn Fastabend 2295b2c5540SJohn Fastabend if (signal_pending(current)) { 2305b2c5540SJohn Fastabend copied = sock_intr_errno(timeo); 2315b2c5540SJohn Fastabend goto out; 2325b2c5540SJohn Fastabend } 2335b2c5540SJohn Fastabend 234c5d2177aSJohn Fastabend data = tcp_msg_wait_data(sk, psock, timeo); 235c5d2177aSJohn Fastabend if (data && !sk_psock_queue_empty(psock)) 236c5d2177aSJohn Fastabend goto msg_bytes_ready; 237c5d2177aSJohn Fastabend copied = -EAGAIN; 238c5d2177aSJohn Fastabend } 2395b2c5540SJohn Fastabend out: 240c5d2177aSJohn Fastabend release_sock(sk); 241c5d2177aSJohn Fastabend sk_psock_put(sk, psock); 242c5d2177aSJohn Fastabend return copied; 243c5d2177aSJohn Fastabend } 244c5d2177aSJohn Fastabend 245c0fd336eSYueHaibing static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 246ec095263SOliver Hartkopp int flags, int *addr_len) 247c0fd336eSYueHaibing { 248c0fd336eSYueHaibing struct sk_psock *psock; 249c0fd336eSYueHaibing int copied, ret; 250c0fd336eSYueHaibing 25118f02ad1SXiyu Yang if (unlikely(flags & MSG_ERRQUEUE)) 25218f02ad1SXiyu Yang return inet_recv_error(sk, msg, len, addr_len); 25318f02ad1SXiyu Yang 254d900f3d2SLiu Jian if (!len) 255d900f3d2SLiu Jian return 0; 256d900f3d2SLiu Jian 257c0fd336eSYueHaibing psock = sk_psock_get(sk); 258c0fd336eSYueHaibing if (unlikely(!psock)) 259ec095263SOliver Hartkopp return tcp_recvmsg(sk, msg, len, flags, addr_len); 260c0fd336eSYueHaibing if (!skb_queue_empty(&sk->sk_receive_queue) && 26118f02ad1SXiyu Yang sk_psock_queue_empty(psock)) { 26218f02ad1SXiyu Yang sk_psock_put(sk, psock); 263ec095263SOliver Hartkopp return tcp_recvmsg(sk, msg, len, flags, addr_len); 26418f02ad1SXiyu Yang } 265c0fd336eSYueHaibing lock_sock(sk); 266c0fd336eSYueHaibing msg_bytes_ready: 2672bc793e3SCong Wang copied = sk_msg_recvmsg(sk, psock, msg, len, flags); 268c0fd336eSYueHaibing if (!copied) { 269c0fd336eSYueHaibing long timeo; 270c49661aaSCong Wang int data; 271c0fd336eSYueHaibing 272ec095263SOliver Hartkopp timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 273b6df0078SJakub Kicinski data = tcp_msg_wait_data(sk, psock, timeo); 274c0fd336eSYueHaibing if (data) { 275c0fd336eSYueHaibing if (!sk_psock_queue_empty(psock)) 276c0fd336eSYueHaibing goto msg_bytes_ready; 277c0fd336eSYueHaibing release_sock(sk); 278c0fd336eSYueHaibing sk_psock_put(sk, psock); 279ec095263SOliver Hartkopp return tcp_recvmsg(sk, msg, len, flags, addr_len); 280c0fd336eSYueHaibing } 281c0fd336eSYueHaibing copied = -EAGAIN; 282c0fd336eSYueHaibing } 283c0fd336eSYueHaibing ret = copied; 284c0fd336eSYueHaibing release_sock(sk); 285c0fd336eSYueHaibing sk_psock_put(sk, psock); 286c0fd336eSYueHaibing return ret; 287c0fd336eSYueHaibing } 288c0fd336eSYueHaibing 289604326b4SDaniel Borkmann static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, 290604326b4SDaniel Borkmann struct sk_msg *msg, int *copied, int flags) 291604326b4SDaniel Borkmann { 292a351d608SPengcheng Yang bool cork = false, enospc = sk_msg_full(msg), redir_ingress; 293604326b4SDaniel Borkmann struct sock *sk_redir; 2948ec95b94SWang Yufen u32 tosend, origsize, sent, delta = 0; 2957a9841caSPengcheng Yang u32 eval; 296604326b4SDaniel Borkmann int ret; 297604326b4SDaniel Borkmann 298604326b4SDaniel Borkmann more_data: 2997246d8edSJohn Fastabend if (psock->eval == __SK_NONE) { 3007246d8edSJohn Fastabend /* Track delta in msg size to add/subtract it on SK_DROP from 3017246d8edSJohn Fastabend * returned to user copied size. This ensures user doesn't 3027246d8edSJohn Fastabend * get a positive return code with msg_cut_data and SK_DROP 3037246d8edSJohn Fastabend * verdict. 3047246d8edSJohn Fastabend */ 3057246d8edSJohn Fastabend delta = msg->sg.size; 306604326b4SDaniel Borkmann psock->eval = sk_psock_msg_verdict(sk, psock, msg); 3077246d8edSJohn Fastabend delta -= msg->sg.size; 3087246d8edSJohn Fastabend } 309604326b4SDaniel Borkmann 310604326b4SDaniel Borkmann if (msg->cork_bytes && 311604326b4SDaniel Borkmann msg->cork_bytes > msg->sg.size && !enospc) { 312604326b4SDaniel Borkmann psock->cork_bytes = msg->cork_bytes - msg->sg.size; 313604326b4SDaniel Borkmann if (!psock->cork) { 314604326b4SDaniel Borkmann psock->cork = kzalloc(sizeof(*psock->cork), 315604326b4SDaniel Borkmann GFP_ATOMIC | __GFP_NOWARN); 316604326b4SDaniel Borkmann if (!psock->cork) 317604326b4SDaniel Borkmann return -ENOMEM; 318604326b4SDaniel Borkmann } 319604326b4SDaniel Borkmann memcpy(psock->cork, msg, sizeof(*msg)); 320604326b4SDaniel Borkmann return 0; 321604326b4SDaniel Borkmann } 322604326b4SDaniel Borkmann 323604326b4SDaniel Borkmann tosend = msg->sg.size; 324604326b4SDaniel Borkmann if (psock->apply_bytes && psock->apply_bytes < tosend) 325604326b4SDaniel Borkmann tosend = psock->apply_bytes; 3267a9841caSPengcheng Yang eval = __SK_NONE; 327604326b4SDaniel Borkmann 328604326b4SDaniel Borkmann switch (psock->eval) { 329604326b4SDaniel Borkmann case __SK_PASS: 330604326b4SDaniel Borkmann ret = tcp_bpf_push(sk, msg, tosend, flags, true); 331604326b4SDaniel Borkmann if (unlikely(ret)) { 332604326b4SDaniel Borkmann *copied -= sk_msg_free(sk, msg); 333604326b4SDaniel Borkmann break; 334604326b4SDaniel Borkmann } 335604326b4SDaniel Borkmann sk_msg_apply_bytes(psock, tosend); 336604326b4SDaniel Borkmann break; 337604326b4SDaniel Borkmann case __SK_REDIRECT: 338a351d608SPengcheng Yang redir_ingress = psock->redir_ingress; 339604326b4SDaniel Borkmann sk_redir = psock->sk_redir; 340604326b4SDaniel Borkmann sk_msg_apply_bytes(psock, tosend); 341cd9733f5SLiu Jian if (!psock->apply_bytes) { 342cd9733f5SLiu Jian /* Clean up before releasing the sock lock. */ 343cd9733f5SLiu Jian eval = psock->eval; 344cd9733f5SLiu Jian psock->eval = __SK_NONE; 345cd9733f5SLiu Jian psock->sk_redir = NULL; 346cd9733f5SLiu Jian } 347604326b4SDaniel Borkmann if (psock->cork) { 348604326b4SDaniel Borkmann cork = true; 349604326b4SDaniel Borkmann psock->cork = NULL; 350604326b4SDaniel Borkmann } 3518ec95b94SWang Yufen sk_msg_return(sk, msg, tosend); 352604326b4SDaniel Borkmann release_sock(sk); 353cd9733f5SLiu Jian 3548ec95b94SWang Yufen origsize = msg->sg.size; 355a351d608SPengcheng Yang ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, 356a351d608SPengcheng Yang msg, tosend, flags); 3578ec95b94SWang Yufen sent = origsize - msg->sg.size; 358cd9733f5SLiu Jian 359cd9733f5SLiu Jian if (eval == __SK_REDIRECT) 360cd9733f5SLiu Jian sock_put(sk_redir); 361cd9733f5SLiu Jian 362604326b4SDaniel Borkmann lock_sock(sk); 363604326b4SDaniel Borkmann if (unlikely(ret < 0)) { 364604326b4SDaniel Borkmann int free = sk_msg_free_nocharge(sk, msg); 365604326b4SDaniel Borkmann 366604326b4SDaniel Borkmann if (!cork) 367604326b4SDaniel Borkmann *copied -= free; 368604326b4SDaniel Borkmann } 369604326b4SDaniel Borkmann if (cork) { 370604326b4SDaniel Borkmann sk_msg_free(sk, msg); 371604326b4SDaniel Borkmann kfree(msg); 372604326b4SDaniel Borkmann msg = NULL; 373604326b4SDaniel Borkmann ret = 0; 374604326b4SDaniel Borkmann } 375604326b4SDaniel Borkmann break; 376604326b4SDaniel Borkmann case __SK_DROP: 377604326b4SDaniel Borkmann default: 378604326b4SDaniel Borkmann sk_msg_free_partial(sk, msg, tosend); 379604326b4SDaniel Borkmann sk_msg_apply_bytes(psock, tosend); 3807246d8edSJohn Fastabend *copied -= (tosend + delta); 381604326b4SDaniel Borkmann return -EACCES; 382604326b4SDaniel Borkmann } 383604326b4SDaniel Borkmann 384604326b4SDaniel Borkmann if (likely(!ret)) { 385604326b4SDaniel Borkmann if (!psock->apply_bytes) { 386604326b4SDaniel Borkmann psock->eval = __SK_NONE; 387604326b4SDaniel Borkmann if (psock->sk_redir) { 388604326b4SDaniel Borkmann sock_put(psock->sk_redir); 389604326b4SDaniel Borkmann psock->sk_redir = NULL; 390604326b4SDaniel Borkmann } 391604326b4SDaniel Borkmann } 392604326b4SDaniel Borkmann if (msg && 393604326b4SDaniel Borkmann msg->sg.data[msg->sg.start].page_link && 39484472b43SWang Yufen msg->sg.data[msg->sg.start].length) { 39584472b43SWang Yufen if (eval == __SK_REDIRECT) 3968ec95b94SWang Yufen sk_mem_charge(sk, tosend - sent); 397604326b4SDaniel Borkmann goto more_data; 398604326b4SDaniel Borkmann } 39984472b43SWang Yufen } 400604326b4SDaniel Borkmann return ret; 401604326b4SDaniel Borkmann } 402604326b4SDaniel Borkmann 403604326b4SDaniel Borkmann static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 404604326b4SDaniel Borkmann { 405604326b4SDaniel Borkmann struct sk_msg tmp, *msg_tx = NULL; 406604326b4SDaniel Borkmann int copied = 0, err = 0; 407604326b4SDaniel Borkmann struct sk_psock *psock; 408604326b4SDaniel Borkmann long timeo; 40941477662SJakub Kicinski int flags; 41041477662SJakub Kicinski 411*ebf2e886SDavid Howells /* Don't let internal sendpage flags through */ 41241477662SJakub Kicinski flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); 41341477662SJakub Kicinski flags |= MSG_NO_SHARED_FRAGS; 414604326b4SDaniel Borkmann 415604326b4SDaniel Borkmann psock = sk_psock_get(sk); 416604326b4SDaniel Borkmann if (unlikely(!psock)) 417604326b4SDaniel Borkmann return tcp_sendmsg(sk, msg, size); 418604326b4SDaniel Borkmann 419604326b4SDaniel Borkmann lock_sock(sk); 420604326b4SDaniel Borkmann timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 421604326b4SDaniel Borkmann while (msg_data_left(msg)) { 422604326b4SDaniel Borkmann bool enospc = false; 423604326b4SDaniel Borkmann u32 copy, osize; 424604326b4SDaniel Borkmann 425604326b4SDaniel Borkmann if (sk->sk_err) { 426604326b4SDaniel Borkmann err = -sk->sk_err; 427604326b4SDaniel Borkmann goto out_err; 428604326b4SDaniel Borkmann } 429604326b4SDaniel Borkmann 430604326b4SDaniel Borkmann copy = msg_data_left(msg); 431604326b4SDaniel Borkmann if (!sk_stream_memory_free(sk)) 432604326b4SDaniel Borkmann goto wait_for_sndbuf; 433604326b4SDaniel Borkmann if (psock->cork) { 434604326b4SDaniel Borkmann msg_tx = psock->cork; 435604326b4SDaniel Borkmann } else { 436604326b4SDaniel Borkmann msg_tx = &tmp; 437604326b4SDaniel Borkmann sk_msg_init(msg_tx); 438604326b4SDaniel Borkmann } 439604326b4SDaniel Borkmann 440604326b4SDaniel Borkmann osize = msg_tx->sg.size; 441604326b4SDaniel Borkmann err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); 442604326b4SDaniel Borkmann if (err) { 443604326b4SDaniel Borkmann if (err != -ENOSPC) 444604326b4SDaniel Borkmann goto wait_for_memory; 445604326b4SDaniel Borkmann enospc = true; 446604326b4SDaniel Borkmann copy = msg_tx->sg.size - osize; 447604326b4SDaniel Borkmann } 448604326b4SDaniel Borkmann 449604326b4SDaniel Borkmann err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, 450604326b4SDaniel Borkmann copy); 451604326b4SDaniel Borkmann if (err < 0) { 452604326b4SDaniel Borkmann sk_msg_trim(sk, msg_tx, osize); 453604326b4SDaniel Borkmann goto out_err; 454604326b4SDaniel Borkmann } 455604326b4SDaniel Borkmann 456604326b4SDaniel Borkmann copied += copy; 457604326b4SDaniel Borkmann if (psock->cork_bytes) { 458604326b4SDaniel Borkmann if (size > psock->cork_bytes) 459604326b4SDaniel Borkmann psock->cork_bytes = 0; 460604326b4SDaniel Borkmann else 461604326b4SDaniel Borkmann psock->cork_bytes -= size; 462604326b4SDaniel Borkmann if (psock->cork_bytes && !enospc) 463604326b4SDaniel Borkmann goto out_err; 464604326b4SDaniel Borkmann /* All cork bytes are accounted, rerun the prog. */ 465604326b4SDaniel Borkmann psock->eval = __SK_NONE; 466604326b4SDaniel Borkmann psock->cork_bytes = 0; 467604326b4SDaniel Borkmann } 468604326b4SDaniel Borkmann 469604326b4SDaniel Borkmann err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); 470604326b4SDaniel Borkmann if (unlikely(err < 0)) 471604326b4SDaniel Borkmann goto out_err; 472604326b4SDaniel Borkmann continue; 473604326b4SDaniel Borkmann wait_for_sndbuf: 474604326b4SDaniel Borkmann set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 475604326b4SDaniel Borkmann wait_for_memory: 476604326b4SDaniel Borkmann err = sk_stream_wait_memory(sk, &timeo); 477604326b4SDaniel Borkmann if (err) { 478604326b4SDaniel Borkmann if (msg_tx && msg_tx != psock->cork) 479604326b4SDaniel Borkmann sk_msg_free(sk, msg_tx); 480604326b4SDaniel Borkmann goto out_err; 481604326b4SDaniel Borkmann } 482604326b4SDaniel Borkmann } 483604326b4SDaniel Borkmann out_err: 484604326b4SDaniel Borkmann if (err < 0) 485604326b4SDaniel Borkmann err = sk_stream_error(sk, msg->msg_flags, err); 486604326b4SDaniel Borkmann release_sock(sk); 487604326b4SDaniel Borkmann sk_psock_put(sk, psock); 488604326b4SDaniel Borkmann return copied ? copied : err; 489604326b4SDaniel Borkmann } 490604326b4SDaniel Borkmann 491604326b4SDaniel Borkmann static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset, 492604326b4SDaniel Borkmann size_t size, int flags) 493604326b4SDaniel Borkmann { 494604326b4SDaniel Borkmann struct sk_msg tmp, *msg = NULL; 495604326b4SDaniel Borkmann int err = 0, copied = 0; 496604326b4SDaniel Borkmann struct sk_psock *psock; 497604326b4SDaniel Borkmann bool enospc = false; 498604326b4SDaniel Borkmann 499604326b4SDaniel Borkmann psock = sk_psock_get(sk); 500604326b4SDaniel Borkmann if (unlikely(!psock)) 501604326b4SDaniel Borkmann return tcp_sendpage(sk, page, offset, size, flags); 502604326b4SDaniel Borkmann 503604326b4SDaniel Borkmann lock_sock(sk); 504604326b4SDaniel Borkmann if (psock->cork) { 505604326b4SDaniel Borkmann msg = psock->cork; 506604326b4SDaniel Borkmann } else { 507604326b4SDaniel Borkmann msg = &tmp; 508604326b4SDaniel Borkmann sk_msg_init(msg); 509604326b4SDaniel Borkmann } 510604326b4SDaniel Borkmann 511604326b4SDaniel Borkmann /* Catch case where ring is full and sendpage is stalled. */ 512604326b4SDaniel Borkmann if (unlikely(sk_msg_full(msg))) 513604326b4SDaniel Borkmann goto out_err; 514604326b4SDaniel Borkmann 515604326b4SDaniel Borkmann sk_msg_page_add(msg, page, size, offset); 516604326b4SDaniel Borkmann sk_mem_charge(sk, size); 517604326b4SDaniel Borkmann copied = size; 518604326b4SDaniel Borkmann if (sk_msg_full(msg)) 519604326b4SDaniel Borkmann enospc = true; 520604326b4SDaniel Borkmann if (psock->cork_bytes) { 521604326b4SDaniel Borkmann if (size > psock->cork_bytes) 522604326b4SDaniel Borkmann psock->cork_bytes = 0; 523604326b4SDaniel Borkmann else 524604326b4SDaniel Borkmann psock->cork_bytes -= size; 525604326b4SDaniel Borkmann if (psock->cork_bytes && !enospc) 526604326b4SDaniel Borkmann goto out_err; 527604326b4SDaniel Borkmann /* All cork bytes are accounted, rerun the prog. */ 528604326b4SDaniel Borkmann psock->eval = __SK_NONE; 529604326b4SDaniel Borkmann psock->cork_bytes = 0; 530604326b4SDaniel Borkmann } 531604326b4SDaniel Borkmann 532604326b4SDaniel Borkmann err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags); 533604326b4SDaniel Borkmann out_err: 534604326b4SDaniel Borkmann release_sock(sk); 535604326b4SDaniel Borkmann sk_psock_put(sk, psock); 536604326b4SDaniel Borkmann return copied ? copied : err; 537604326b4SDaniel Borkmann } 538604326b4SDaniel Borkmann 539604326b4SDaniel Borkmann enum { 540604326b4SDaniel Borkmann TCP_BPF_IPV4, 541604326b4SDaniel Borkmann TCP_BPF_IPV6, 542604326b4SDaniel Borkmann TCP_BPF_NUM_PROTS, 543604326b4SDaniel Borkmann }; 544604326b4SDaniel Borkmann 545604326b4SDaniel Borkmann enum { 546604326b4SDaniel Borkmann TCP_BPF_BASE, 547604326b4SDaniel Borkmann TCP_BPF_TX, 548c5d2177aSJohn Fastabend TCP_BPF_RX, 549c5d2177aSJohn Fastabend TCP_BPF_TXRX, 550604326b4SDaniel Borkmann TCP_BPF_NUM_CFGS, 551604326b4SDaniel Borkmann }; 552604326b4SDaniel Borkmann 553604326b4SDaniel Borkmann static struct proto *tcpv6_prot_saved __read_mostly; 554604326b4SDaniel Borkmann static DEFINE_SPINLOCK(tcpv6_prot_lock); 555604326b4SDaniel Borkmann static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; 556604326b4SDaniel Borkmann 557604326b4SDaniel Borkmann static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], 558604326b4SDaniel Borkmann struct proto *base) 559604326b4SDaniel Borkmann { 560604326b4SDaniel Borkmann prot[TCP_BPF_BASE] = *base; 561d8616ee2SWang Yufen prot[TCP_BPF_BASE].destroy = sock_map_destroy; 562f747632bSLorenz Bauer prot[TCP_BPF_BASE].close = sock_map_close; 563604326b4SDaniel Borkmann prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; 564fb4e0a5eSCong Wang prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable; 565604326b4SDaniel Borkmann 566604326b4SDaniel Borkmann prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; 567604326b4SDaniel Borkmann prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; 568604326b4SDaniel Borkmann prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage; 569c5d2177aSJohn Fastabend 570c5d2177aSJohn Fastabend prot[TCP_BPF_RX] = prot[TCP_BPF_BASE]; 571c5d2177aSJohn Fastabend prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser; 572c5d2177aSJohn Fastabend 573c5d2177aSJohn Fastabend prot[TCP_BPF_TXRX] = prot[TCP_BPF_TX]; 574c5d2177aSJohn Fastabend prot[TCP_BPF_TXRX].recvmsg = tcp_bpf_recvmsg_parser; 575604326b4SDaniel Borkmann } 576604326b4SDaniel Borkmann 5777b219da4SLorenz Bauer static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) 578604326b4SDaniel Borkmann { 5797b219da4SLorenz Bauer if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { 580604326b4SDaniel Borkmann spin_lock_bh(&tcpv6_prot_lock); 581604326b4SDaniel Borkmann if (likely(ops != tcpv6_prot_saved)) { 582604326b4SDaniel Borkmann tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); 583604326b4SDaniel Borkmann smp_store_release(&tcpv6_prot_saved, ops); 584604326b4SDaniel Borkmann } 585604326b4SDaniel Borkmann spin_unlock_bh(&tcpv6_prot_lock); 586604326b4SDaniel Borkmann } 587604326b4SDaniel Borkmann } 588604326b4SDaniel Borkmann 589604326b4SDaniel Borkmann static int __init tcp_bpf_v4_build_proto(void) 590604326b4SDaniel Borkmann { 591604326b4SDaniel Borkmann tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); 592604326b4SDaniel Borkmann return 0; 593604326b4SDaniel Borkmann } 594228a4a7bSJohn Fastabend late_initcall(tcp_bpf_v4_build_proto); 595604326b4SDaniel Borkmann 596604326b4SDaniel Borkmann static int tcp_bpf_assert_proto_ops(struct proto *ops) 597604326b4SDaniel Borkmann { 598604326b4SDaniel Borkmann /* In order to avoid retpoline, we make assumptions when we call 599604326b4SDaniel Borkmann * into ops if e.g. a psock is not present. Make sure they are 600604326b4SDaniel Borkmann * indeed valid assumptions. 601604326b4SDaniel Borkmann */ 602604326b4SDaniel Borkmann return ops->recvmsg == tcp_recvmsg && 603604326b4SDaniel Borkmann ops->sendmsg == tcp_sendmsg && 604604326b4SDaniel Borkmann ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP; 605604326b4SDaniel Borkmann } 606604326b4SDaniel Borkmann 60751e0158aSCong Wang int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) 608604326b4SDaniel Borkmann { 609d19da360SLorenz Bauer int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; 610d19da360SLorenz Bauer int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; 611604326b4SDaniel Borkmann 612c5d2177aSJohn Fastabend if (psock->progs.stream_verdict || psock->progs.skb_verdict) { 613c5d2177aSJohn Fastabend config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX; 614c5d2177aSJohn Fastabend } 615c5d2177aSJohn Fastabend 6168a59f9d1SCong Wang if (restore) { 6178a59f9d1SCong Wang if (inet_csk_has_ulp(sk)) { 6188859a44eSJakub Kicinski /* TLS does not have an unhash proto in SW cases, 6198859a44eSJakub Kicinski * but we need to ensure we stop using the sock_map 6208859a44eSJakub Kicinski * unhash routine because the associated psock is being 6218859a44eSJakub Kicinski * removed. So use the original unhash handler. 6228859a44eSJakub Kicinski */ 6238859a44eSJakub Kicinski WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); 6248a59f9d1SCong Wang tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); 6258a59f9d1SCong Wang } else { 6268a59f9d1SCong Wang sk->sk_write_space = psock->saved_write_space; 6278a59f9d1SCong Wang /* Pairs with lockless read in sk_clone_lock() */ 628fee9ac06SPavel Begunkov sock_replace_proto(sk, psock->sk_proto); 6298a59f9d1SCong Wang } 6308a59f9d1SCong Wang return 0; 6318a59f9d1SCong Wang } 6328a59f9d1SCong Wang 6337b219da4SLorenz Bauer if (sk->sk_family == AF_INET6) { 6347b219da4SLorenz Bauer if (tcp_bpf_assert_proto_ops(psock->sk_proto)) 6358a59f9d1SCong Wang return -EINVAL; 636d19da360SLorenz Bauer 6377b219da4SLorenz Bauer tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); 638d19da360SLorenz Bauer } 639d19da360SLorenz Bauer 6408a59f9d1SCong Wang /* Pairs with lockless read in sk_clone_lock() */ 641fee9ac06SPavel Begunkov sock_replace_proto(sk, &tcp_bpf_prots[family][config]); 6428a59f9d1SCong Wang return 0; 643604326b4SDaniel Borkmann } 6448a59f9d1SCong Wang EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); 645604326b4SDaniel Borkmann 646e8025155SJakub Sitnicki /* If a child got cloned from a listening socket that had tcp_bpf 647e8025155SJakub Sitnicki * protocol callbacks installed, we need to restore the callbacks to 648e8025155SJakub Sitnicki * the default ones because the child does not inherit the psock state 649e8025155SJakub Sitnicki * that tcp_bpf callbacks expect. 650e8025155SJakub Sitnicki */ 651e8025155SJakub Sitnicki void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) 652e8025155SJakub Sitnicki { 653e8025155SJakub Sitnicki struct proto *prot = newsk->sk_prot; 654e8025155SJakub Sitnicki 655ddce1e09SJakub Sitnicki if (is_insidevar(prot, tcp_bpf_prots)) 656e8025155SJakub Sitnicki newsk->sk_prot = sk->sk_prot_creator; 657e8025155SJakub Sitnicki } 65888759609SCong Wang #endif /* CONFIG_BPF_SYSCALL */ 659