Lines Matching +full:mux +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
46 return (struct kcm_tx_msg *)skb->cb; in kcm_tx_msg()
51 csk->sk_err = EPIPE; in report_csk_error()
58 struct sock *csk = psock->sk; in kcm_abort_tx_psock()
59 struct kcm_mux *mux = psock->mux; in kcm_abort_tx_psock() local
63 spin_lock_bh(&mux->lock); in kcm_abort_tx_psock()
65 if (psock->tx_stopped) { in kcm_abort_tx_psock()
66 spin_unlock_bh(&mux->lock); in kcm_abort_tx_psock()
70 psock->tx_stopped = 1; in kcm_abort_tx_psock()
71 KCM_STATS_INCR(psock->stats.tx_aborts); in kcm_abort_tx_psock()
73 if (!psock->tx_kcm) { in kcm_abort_tx_psock()
75 list_del(&psock->psock_avail_list); in kcm_abort_tx_psock()
84 queue_work(kcm_wq, &psock->tx_kcm->tx_work); in kcm_abort_tx_psock()
87 spin_unlock_bh(&mux->lock); in kcm_abort_tx_psock()
93 /* RX mux lock held. */
94 static void kcm_update_rx_mux_stats(struct kcm_mux *mux, in kcm_update_rx_mux_stats() argument
97 STRP_STATS_ADD(mux->stats.rx_bytes, in kcm_update_rx_mux_stats()
98 psock->strp.stats.bytes - in kcm_update_rx_mux_stats()
99 psock->saved_rx_bytes); in kcm_update_rx_mux_stats()
100 mux->stats.rx_msgs += in kcm_update_rx_mux_stats()
101 psock->strp.stats.msgs - psock->saved_rx_msgs; in kcm_update_rx_mux_stats()
102 psock->saved_rx_msgs = psock->strp.stats.msgs; in kcm_update_rx_mux_stats()
103 psock->saved_rx_bytes = psock->strp.stats.bytes; in kcm_update_rx_mux_stats()
106 static void kcm_update_tx_mux_stats(struct kcm_mux *mux, in kcm_update_tx_mux_stats() argument
109 KCM_STATS_ADD(mux->stats.tx_bytes, in kcm_update_tx_mux_stats()
110 psock->stats.tx_bytes - psock->saved_tx_bytes); in kcm_update_tx_mux_stats()
111 mux->stats.tx_msgs += in kcm_update_tx_mux_stats()
112 psock->stats.tx_msgs - psock->saved_tx_msgs; in kcm_update_tx_mux_stats()
113 psock->saved_tx_msgs = psock->stats.tx_msgs; in kcm_update_tx_mux_stats()
114 psock->saved_tx_bytes = psock->stats.tx_bytes; in kcm_update_tx_mux_stats()
119 /* KCM is ready to receive messages on its queue-- either the KCM is new or
121 * pending ready messages on a psock. RX mux lock held.
125 struct kcm_mux *mux = kcm->mux; in kcm_rcv_ready() local
129 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled)) in kcm_rcv_ready()
132 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) { in kcm_rcv_ready()
133 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in kcm_rcv_ready()
135 skb_queue_head(&mux->rx_hold_queue, skb); in kcm_rcv_ready()
136 WARN_ON(!sk_rmem_alloc_get(&kcm->sk)); in kcm_rcv_ready()
141 while (!list_empty(&mux->psocks_ready)) { in kcm_rcv_ready()
142 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock, in kcm_rcv_ready()
145 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) { in kcm_rcv_ready()
147 WARN_ON(!sk_rmem_alloc_get(&kcm->sk)); in kcm_rcv_ready()
154 list_del(&psock->psock_ready_list); in kcm_rcv_ready()
155 psock->ready_rx_msg = NULL; in kcm_rcv_ready()
159 strp_unpause(&psock->strp); in kcm_rcv_ready()
160 strp_check_rcv(&psock->strp); in kcm_rcv_ready()
164 list_add_tail(&kcm->wait_rx_list, in kcm_rcv_ready()
165 &kcm->mux->kcm_rx_waiters); in kcm_rcv_ready()
167 WRITE_ONCE(kcm->rx_wait, true); in kcm_rcv_ready()
172 struct sock *sk = skb->sk; in kcm_rfree()
174 struct kcm_mux *mux = kcm->mux; in kcm_rfree() local
175 unsigned int len = skb->truesize; in kcm_rfree()
178 atomic_sub(len, &sk->sk_rmem_alloc); in kcm_rfree()
183 if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) && in kcm_rfree()
184 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) { in kcm_rfree()
185 spin_lock_bh(&mux->rx_lock); in kcm_rfree()
187 spin_unlock_bh(&mux->rx_lock); in kcm_rfree()
193 struct sk_buff_head *list = &sk->sk_receive_queue; in kcm_queue_rcv_skb()
195 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in kcm_queue_rcv_skb()
196 return -ENOMEM; in kcm_queue_rcv_skb()
198 if (!sk_rmem_schedule(sk, skb, skb->truesize)) in kcm_queue_rcv_skb()
199 return -ENOBUFS; in kcm_queue_rcv_skb()
201 skb->dev = NULL; in kcm_queue_rcv_skb()
204 skb->sk = sk; in kcm_queue_rcv_skb()
205 skb->destructor = kcm_rfree; in kcm_queue_rcv_skb()
206 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in kcm_queue_rcv_skb()
207 sk_mem_charge(sk, skb->truesize); in kcm_queue_rcv_skb()
212 sk->sk_data_ready(sk); in kcm_queue_rcv_skb()
219 * RX mux lock held.
221 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head) in requeue_rx_msgs() argument
228 skb->destructor = sock_rfree; in requeue_rx_msgs()
231 if (list_empty(&mux->kcm_rx_waiters)) { in requeue_rx_msgs()
232 skb_queue_tail(&mux->rx_hold_queue, skb); in requeue_rx_msgs()
236 kcm = list_first_entry(&mux->kcm_rx_waiters, in requeue_rx_msgs()
239 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in requeue_rx_msgs()
241 list_del(&kcm->wait_rx_list); in requeue_rx_msgs()
243 WRITE_ONCE(kcm->rx_wait, false); in requeue_rx_msgs()
257 struct kcm_mux *mux = psock->mux; in reserve_rx_kcm() local
260 WARN_ON(psock->ready_rx_msg); in reserve_rx_kcm()
262 if (psock->rx_kcm) in reserve_rx_kcm()
263 return psock->rx_kcm; in reserve_rx_kcm()
265 spin_lock_bh(&mux->rx_lock); in reserve_rx_kcm()
267 if (psock->rx_kcm) { in reserve_rx_kcm()
268 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
269 return psock->rx_kcm; in reserve_rx_kcm()
272 kcm_update_rx_mux_stats(mux, psock); in reserve_rx_kcm()
274 if (list_empty(&mux->kcm_rx_waiters)) { in reserve_rx_kcm()
275 psock->ready_rx_msg = head; in reserve_rx_kcm()
276 strp_pause(&psock->strp); in reserve_rx_kcm()
277 list_add_tail(&psock->psock_ready_list, in reserve_rx_kcm()
278 &mux->psocks_ready); in reserve_rx_kcm()
279 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
283 kcm = list_first_entry(&mux->kcm_rx_waiters, in reserve_rx_kcm()
285 list_del(&kcm->wait_rx_list); in reserve_rx_kcm()
287 WRITE_ONCE(kcm->rx_wait, false); in reserve_rx_kcm()
289 psock->rx_kcm = kcm; in reserve_rx_kcm()
291 WRITE_ONCE(kcm->rx_psock, psock); in reserve_rx_kcm()
293 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
309 struct kcm_sock *kcm = psock->rx_kcm; in unreserve_rx_kcm()
310 struct kcm_mux *mux = psock->mux; in unreserve_rx_kcm() local
315 spin_lock_bh(&mux->rx_lock); in unreserve_rx_kcm()
317 psock->rx_kcm = NULL; in unreserve_rx_kcm()
319 WRITE_ONCE(kcm->rx_psock, NULL); in unreserve_rx_kcm()
321 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with in unreserve_rx_kcm()
326 if (unlikely(kcm->done)) { in unreserve_rx_kcm()
327 spin_unlock_bh(&mux->rx_lock); in unreserve_rx_kcm()
332 INIT_WORK(&kcm->done_work, kcm_done_work); in unreserve_rx_kcm()
333 schedule_work(&kcm->done_work); in unreserve_rx_kcm()
337 if (unlikely(kcm->rx_disabled)) { in unreserve_rx_kcm()
338 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in unreserve_rx_kcm()
339 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) { in unreserve_rx_kcm()
345 spin_unlock_bh(&mux->rx_lock); in unreserve_rx_kcm()
355 read_lock_bh(&sk->sk_callback_lock); in psock_data_ready()
357 psock = (struct kcm_psock *)sk->sk_user_data; in psock_data_ready()
359 strp_data_ready(&psock->strp); in psock_data_ready()
361 read_unlock_bh(&sk->sk_callback_lock); in psock_data_ready()
379 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in kcm_rcv_strparser()
389 struct bpf_prog *prog = psock->bpf_prog; in kcm_parse_func_strparser()
418 struct kcm_mux *mux; in psock_write_space() local
421 read_lock_bh(&sk->sk_callback_lock); in psock_write_space()
423 psock = (struct kcm_psock *)sk->sk_user_data; in psock_write_space()
426 mux = psock->mux; in psock_write_space()
428 spin_lock_bh(&mux->lock); in psock_write_space()
431 kcm = psock->tx_kcm; in psock_write_space()
432 if (kcm && !unlikely(kcm->tx_stopped)) in psock_write_space()
433 queue_work(kcm_wq, &kcm->tx_work); in psock_write_space()
435 spin_unlock_bh(&mux->lock); in psock_write_space()
437 read_unlock_bh(&sk->sk_callback_lock); in psock_write_space()
445 struct kcm_mux *mux = kcm->mux; in reserve_psock() local
448 psock = kcm->tx_psock; in reserve_psock()
453 WARN_ON(kcm->tx_wait); in reserve_psock()
454 if (unlikely(psock->tx_stopped)) in reserve_psock()
457 return kcm->tx_psock; in reserve_psock()
460 spin_lock_bh(&mux->lock); in reserve_psock()
465 psock = kcm->tx_psock; in reserve_psock()
467 WARN_ON(kcm->tx_wait); in reserve_psock()
468 spin_unlock_bh(&mux->lock); in reserve_psock()
469 return kcm->tx_psock; in reserve_psock()
472 if (!list_empty(&mux->psocks_avail)) { in reserve_psock()
473 psock = list_first_entry(&mux->psocks_avail, in reserve_psock()
476 list_del(&psock->psock_avail_list); in reserve_psock()
477 if (kcm->tx_wait) { in reserve_psock()
478 list_del(&kcm->wait_psock_list); in reserve_psock()
479 kcm->tx_wait = false; in reserve_psock()
481 kcm->tx_psock = psock; in reserve_psock()
482 psock->tx_kcm = kcm; in reserve_psock()
483 KCM_STATS_INCR(psock->stats.reserved); in reserve_psock()
484 } else if (!kcm->tx_wait) { in reserve_psock()
485 list_add_tail(&kcm->wait_psock_list, in reserve_psock()
486 &mux->kcm_tx_waiters); in reserve_psock()
487 kcm->tx_wait = true; in reserve_psock()
490 spin_unlock_bh(&mux->lock); in reserve_psock()
495 /* mux lock held */
498 struct kcm_mux *mux = psock->mux; in psock_now_avail() local
501 if (list_empty(&mux->kcm_tx_waiters)) { in psock_now_avail()
502 list_add_tail(&psock->psock_avail_list, in psock_now_avail()
503 &mux->psocks_avail); in psock_now_avail()
505 kcm = list_first_entry(&mux->kcm_tx_waiters, in psock_now_avail()
508 list_del(&kcm->wait_psock_list); in psock_now_avail()
509 kcm->tx_wait = false; in psock_now_avail()
510 psock->tx_kcm = kcm; in psock_now_avail()
517 kcm->tx_psock = psock; in psock_now_avail()
518 KCM_STATS_INCR(psock->stats.reserved); in psock_now_avail()
519 queue_work(kcm_wq, &kcm->tx_work); in psock_now_avail()
527 struct kcm_mux *mux = kcm->mux; in unreserve_psock() local
529 spin_lock_bh(&mux->lock); in unreserve_psock()
531 psock = kcm->tx_psock; in unreserve_psock()
534 spin_unlock_bh(&mux->lock); in unreserve_psock()
540 kcm_update_tx_mux_stats(mux, psock); in unreserve_psock()
542 WARN_ON(kcm->tx_wait); in unreserve_psock()
544 kcm->tx_psock = NULL; in unreserve_psock()
545 psock->tx_kcm = NULL; in unreserve_psock()
546 KCM_STATS_INCR(psock->stats.unreserved); in unreserve_psock()
548 if (unlikely(psock->tx_stopped)) { in unreserve_psock()
549 if (psock->done) { in unreserve_psock()
551 list_del(&psock->psock_list); in unreserve_psock()
552 mux->psocks_cnt--; in unreserve_psock()
553 sock_put(psock->sk); in unreserve_psock()
554 fput(psock->sk->sk_socket->file); in unreserve_psock()
560 spin_unlock_bh(&mux->lock); in unreserve_psock()
567 spin_unlock_bh(&mux->lock); in unreserve_psock()
572 struct kcm_mux *mux = kcm->mux; in kcm_report_tx_retry() local
574 spin_lock_bh(&mux->lock); in kcm_report_tx_retry()
575 KCM_STATS_INCR(mux->stats.tx_retries); in kcm_report_tx_retry()
576 spin_unlock_bh(&mux->lock); in kcm_report_tx_retry()
585 struct sock *sk = &kcm->sk; in kcm_write_msgs()
590 kcm->tx_wait_more = false; in kcm_write_msgs()
591 psock = kcm->tx_psock; in kcm_write_msgs()
592 if (unlikely(psock && psock->tx_stopped)) { in kcm_write_msgs()
598 if (skb_queue_empty(&sk->sk_write_queue)) in kcm_write_msgs()
601 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->started_tx = false; in kcm_write_msgs()
605 while ((head = skb_peek(&sk->sk_write_queue))) { in kcm_write_msgs()
614 if (!txm->started_tx) { in kcm_write_msgs()
619 txm->frag_offset = 0; in kcm_write_msgs()
620 txm->sent = 0; in kcm_write_msgs()
621 txm->started_tx = true; in kcm_write_msgs()
624 ret = -EINVAL; in kcm_write_msgs()
627 skb = txm->frag_skb; in kcm_write_msgs()
630 if (WARN_ON(!skb_shinfo(skb)->nr_frags) || in kcm_write_msgs()
631 WARN_ON_ONCE(!skb_frag_page(&skb_shinfo(skb)->frags[0]))) { in kcm_write_msgs()
632 ret = -EINVAL; in kcm_write_msgs()
637 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in kcm_write_msgs()
638 msize += skb_frag_size(&skb_shinfo(skb)->frags[i]); in kcm_write_msgs()
641 (const struct bio_vec *)skb_shinfo(skb)->frags, in kcm_write_msgs()
642 skb_shinfo(skb)->nr_frags, msize); in kcm_write_msgs()
643 iov_iter_advance(&msg.msg_iter, txm->frag_offset); in kcm_write_msgs()
646 ret = sock_sendmsg(psock->sk->sk_socket, &msg); in kcm_write_msgs()
648 if (ret == -EAGAIN) { in kcm_write_msgs()
652 txm->frag_skb = skb; in kcm_write_msgs()
662 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE, in kcm_write_msgs()
667 txm->started_tx = false; in kcm_write_msgs()
673 txm->sent += ret; in kcm_write_msgs()
674 txm->frag_offset += ret; in kcm_write_msgs()
675 KCM_STATS_ADD(psock->stats.tx_bytes, ret); in kcm_write_msgs()
680 txm->frag_skb = skb_shinfo(skb)->frag_list; in kcm_write_msgs()
681 txm->frag_offset = 0; in kcm_write_msgs()
684 } else if (skb->next) { in kcm_write_msgs()
685 txm->frag_skb = skb->next; in kcm_write_msgs()
686 txm->frag_offset = 0; in kcm_write_msgs()
691 sk->sk_wmem_queued -= txm->sent; in kcm_write_msgs()
692 total_sent += txm->sent; in kcm_write_msgs()
693 skb_dequeue(&sk->sk_write_queue); in kcm_write_msgs()
695 KCM_STATS_INCR(psock->stats.tx_msgs); in kcm_write_msgs()
700 WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); in kcm_write_msgs()
706 sk->sk_write_space(sk); in kcm_write_msgs()
714 struct sock *sk = &kcm->sk; in kcm_tx_work()
726 report_csk_error(&kcm->sk, -err); in kcm_tx_work()
731 if (likely(sk->sk_socket) && in kcm_tx_work()
732 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in kcm_tx_work()
733 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_tx_work()
734 sk->sk_write_space(sk); in kcm_tx_work()
743 if (kcm->tx_wait_more) in kcm_push()
749 struct sock *sk = sock->sk; in kcm_sendmsg()
753 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); in kcm_sendmsg()
754 int eor = (sock->type == SOCK_DGRAM) ? in kcm_sendmsg()
755 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR); in kcm_sendmsg()
756 int err = -EPIPE; in kcm_sendmsg()
758 mutex_lock(&kcm->tx_mutex); in kcm_sendmsg()
764 if (sk->sk_err) in kcm_sendmsg()
767 if (kcm->seq_skb) { in kcm_sendmsg()
769 head = kcm->seq_skb; in kcm_sendmsg()
770 skb = kcm_tx_msg(head)->last_skb; in kcm_sendmsg()
777 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_sendmsg()
785 head = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
792 head = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
800 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendmsg()
806 int i = skb_shinfo(skb)->nr_frags; in kcm_sendmsg()
812 if (!skb_can_coalesce(skb, i, pfrag->page, in kcm_sendmsg()
813 pfrag->offset)) { in kcm_sendmsg()
817 tskb = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
822 skb_shinfo(head)->frag_list = tskb; in kcm_sendmsg()
824 skb->next = tskb; in kcm_sendmsg()
827 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendmsg()
833 if (msg->msg_flags & MSG_SPLICE_PAGES) { in kcm_sendmsg()
838 err = skb_splice_from_iter(skb, &msg->msg_iter, copy, in kcm_sendmsg()
839 sk->sk_allocation); in kcm_sendmsg()
841 if (err == -EMSGSIZE) in kcm_sendmsg()
847 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; in kcm_sendmsg()
852 head->truesize += copy; in kcm_sendmsg()
855 pfrag->size - pfrag->offset); in kcm_sendmsg()
859 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, in kcm_sendmsg()
860 pfrag->page, in kcm_sendmsg()
861 pfrag->offset, in kcm_sendmsg()
869 &skb_shinfo(skb)->frags[i - 1], copy); in kcm_sendmsg()
871 skb_fill_page_desc(skb, i, pfrag->page, in kcm_sendmsg()
872 pfrag->offset, copy); in kcm_sendmsg()
873 get_page(pfrag->page); in kcm_sendmsg()
876 pfrag->offset += copy; in kcm_sendmsg()
881 head->len += copy; in kcm_sendmsg()
882 head->data_len += copy; in kcm_sendmsg()
895 bool not_busy = skb_queue_empty(&sk->sk_write_queue); in kcm_sendmsg()
899 __skb_queue_tail(&sk->sk_write_queue, head); in kcm_sendmsg()
900 kcm->seq_skb = NULL; in kcm_sendmsg()
901 KCM_STATS_INCR(kcm->stats.tx_msgs); in kcm_sendmsg()
904 if (msg->msg_flags & MSG_BATCH) { in kcm_sendmsg()
905 kcm->tx_wait_more = true; in kcm_sendmsg()
906 } else if (kcm->tx_wait_more || not_busy) { in kcm_sendmsg()
915 report_csk_error(&kcm->sk, -err); in kcm_sendmsg()
922 kcm->seq_skb = head; in kcm_sendmsg()
923 kcm_tx_msg(head)->last_skb = skb; in kcm_sendmsg()
927 KCM_STATS_ADD(kcm->stats.tx_bytes, copied); in kcm_sendmsg()
930 mutex_unlock(&kcm->tx_mutex); in kcm_sendmsg()
936 if (sock->type == SOCK_SEQPACKET) { in kcm_sendmsg()
942 if (head != kcm->seq_skb) in kcm_sendmsg()
946 kcm->seq_skb = NULL; in kcm_sendmsg()
949 err = sk_stream_error(sk, msg->msg_flags, err); in kcm_sendmsg()
952 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in kcm_sendmsg()
953 sk->sk_write_space(sk); in kcm_sendmsg()
956 mutex_unlock(&kcm->tx_mutex); in kcm_sendmsg()
962 struct sock *sk = sock->sk; in kcm_splice_eof()
965 if (skb_queue_empty_lockless(&sk->sk_write_queue)) in kcm_splice_eof()
976 struct sock *sk = sock->sk; in kcm_recvmsg()
991 if (len > stm->full_len) in kcm_recvmsg()
992 len = stm->full_len; in kcm_recvmsg()
994 err = skb_copy_datagram_msg(skb, stm->offset, msg, len); in kcm_recvmsg()
1000 KCM_STATS_ADD(kcm->stats.rx_bytes, copied); in kcm_recvmsg()
1001 if (copied < stm->full_len) { in kcm_recvmsg()
1002 if (sock->type == SOCK_DGRAM) { in kcm_recvmsg()
1004 msg->msg_flags |= MSG_TRUNC; in kcm_recvmsg()
1007 stm->offset += copied; in kcm_recvmsg()
1008 stm->full_len -= copied; in kcm_recvmsg()
1012 msg->msg_flags |= MSG_EOR; in kcm_recvmsg()
1013 KCM_STATS_INCR(kcm->stats.rx_msgs); in kcm_recvmsg()
1026 struct sock *sk = sock->sk; in kcm_splice_read()
1043 if (len > stm->full_len) in kcm_splice_read()
1044 len = stm->full_len; in kcm_splice_read()
1046 copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags); in kcm_splice_read()
1052 KCM_STATS_ADD(kcm->stats.rx_bytes, copied); in kcm_splice_read()
1054 stm->offset += copied; in kcm_splice_read()
1055 stm->full_len -= copied; in kcm_splice_read()
1074 struct kcm_mux *mux = kcm->mux; in kcm_recv_disable() local
1076 if (kcm->rx_disabled) in kcm_recv_disable()
1079 spin_lock_bh(&mux->rx_lock); in kcm_recv_disable()
1081 kcm->rx_disabled = 1; in kcm_recv_disable()
1084 if (!kcm->rx_psock) { in kcm_recv_disable()
1085 if (kcm->rx_wait) { in kcm_recv_disable()
1086 list_del(&kcm->wait_rx_list); in kcm_recv_disable()
1088 WRITE_ONCE(kcm->rx_wait, false); in kcm_recv_disable()
1091 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in kcm_recv_disable()
1094 spin_unlock_bh(&mux->rx_lock); in kcm_recv_disable()
1100 struct kcm_mux *mux = kcm->mux; in kcm_recv_enable() local
1102 if (!kcm->rx_disabled) in kcm_recv_enable()
1105 spin_lock_bh(&mux->rx_lock); in kcm_recv_enable()
1107 kcm->rx_disabled = 0; in kcm_recv_enable()
1110 spin_unlock_bh(&mux->rx_lock); in kcm_recv_enable()
1116 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_setsockopt()
1121 return -ENOPROTOOPT; in kcm_setsockopt()
1124 return -EINVAL; in kcm_setsockopt()
1127 return -EFAULT; in kcm_setsockopt()
1133 lock_sock(&kcm->sk); in kcm_setsockopt()
1138 release_sock(&kcm->sk); in kcm_setsockopt()
1141 err = -ENOPROTOOPT; in kcm_setsockopt()
1150 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_getsockopt()
1154 return -ENOPROTOOPT; in kcm_getsockopt()
1157 return -EFAULT; in kcm_getsockopt()
1160 return -EINVAL; in kcm_getsockopt()
1166 val = kcm->rx_disabled; in kcm_getsockopt()
1169 return -ENOPROTOOPT; in kcm_getsockopt()
1173 return -EFAULT; in kcm_getsockopt()
1175 return -EFAULT; in kcm_getsockopt()
1179 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux) in init_kcm_sock() argument
1189 kcm->sk.sk_state = TCP_ESTABLISHED; in init_kcm_sock()
1191 /* Add to mux's kcm sockets list */ in init_kcm_sock()
1192 kcm->mux = mux; in init_kcm_sock()
1193 spin_lock_bh(&mux->lock); in init_kcm_sock()
1195 head = &mux->kcm_socks; in init_kcm_sock()
1196 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) { in init_kcm_sock()
1197 if (tkcm->index != index) in init_kcm_sock()
1199 head = &tkcm->kcm_sock_list; in init_kcm_sock()
1203 list_add(&kcm->kcm_sock_list, head); in init_kcm_sock()
1204 kcm->index = index; in init_kcm_sock()
1206 mux->kcm_socks_cnt++; in init_kcm_sock()
1207 spin_unlock_bh(&mux->lock); in init_kcm_sock()
1209 INIT_WORK(&kcm->tx_work, kcm_tx_work); in init_kcm_sock()
1210 mutex_init(&kcm->tx_mutex); in init_kcm_sock()
1212 spin_lock_bh(&mux->rx_lock); in init_kcm_sock()
1214 spin_unlock_bh(&mux->rx_lock); in init_kcm_sock()
1220 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_attach()
1221 struct kcm_mux *mux = kcm->mux; in kcm_attach() local
1233 csk = csock->sk; in kcm_attach()
1235 return -EINVAL; in kcm_attach()
1240 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || in kcm_attach()
1241 csk->sk_protocol != IPPROTO_TCP) { in kcm_attach()
1242 err = -EOPNOTSUPP; in kcm_attach()
1247 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) { in kcm_attach()
1248 err = -EOPNOTSUPP; in kcm_attach()
1254 err = -ENOMEM; in kcm_attach()
1258 psock->mux = mux; in kcm_attach()
1259 psock->sk = csk; in kcm_attach()
1260 psock->bpf_prog = prog; in kcm_attach()
1262 write_lock_bh(&csk->sk_callback_lock); in kcm_attach()
1267 if (csk->sk_user_data) { in kcm_attach()
1268 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1270 err = -EALREADY; in kcm_attach()
1274 err = strp_init(&psock->strp, csk, &cb); in kcm_attach()
1276 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1281 psock->save_data_ready = csk->sk_data_ready; in kcm_attach()
1282 psock->save_write_space = csk->sk_write_space; in kcm_attach()
1283 psock->save_state_change = csk->sk_state_change; in kcm_attach()
1284 csk->sk_user_data = psock; in kcm_attach()
1285 csk->sk_data_ready = psock_data_ready; in kcm_attach()
1286 csk->sk_write_space = psock_write_space; in kcm_attach()
1287 csk->sk_state_change = psock_state_change; in kcm_attach()
1289 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1293 /* Finished initialization, now add the psock to the MUX. */ in kcm_attach()
1294 spin_lock_bh(&mux->lock); in kcm_attach()
1295 head = &mux->psocks; in kcm_attach()
1296 list_for_each_entry(tpsock, &mux->psocks, psock_list) { in kcm_attach()
1297 if (tpsock->index != index) in kcm_attach()
1299 head = &tpsock->psock_list; in kcm_attach()
1303 list_add(&psock->psock_list, head); in kcm_attach()
1304 psock->index = index; in kcm_attach()
1306 KCM_STATS_INCR(mux->stats.psock_attach); in kcm_attach()
1307 mux->psocks_cnt++; in kcm_attach()
1309 spin_unlock_bh(&mux->lock); in kcm_attach()
1312 strp_check_rcv(&psock->strp); in kcm_attach()
1326 csock = sockfd_lookup(info->fd, &err); in kcm_attach_ioctl()
1328 return -ENOENT; in kcm_attach_ioctl()
1330 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER); in kcm_attach_ioctl()
1352 struct sock *csk = psock->sk; in kcm_unattach()
1353 struct kcm_mux *mux = psock->mux; in kcm_unattach() local
1360 write_lock_bh(&csk->sk_callback_lock); in kcm_unattach()
1361 csk->sk_user_data = NULL; in kcm_unattach()
1362 csk->sk_data_ready = psock->save_data_ready; in kcm_unattach()
1363 csk->sk_write_space = psock->save_write_space; in kcm_unattach()
1364 csk->sk_state_change = psock->save_state_change; in kcm_unattach()
1365 strp_stop(&psock->strp); in kcm_unattach()
1367 if (WARN_ON(psock->rx_kcm)) { in kcm_unattach()
1368 write_unlock_bh(&csk->sk_callback_lock); in kcm_unattach()
1373 spin_lock_bh(&mux->rx_lock); in kcm_unattach()
1378 if (psock->ready_rx_msg) { in kcm_unattach()
1379 list_del(&psock->psock_ready_list); in kcm_unattach()
1380 kfree_skb(psock->ready_rx_msg); in kcm_unattach()
1381 psock->ready_rx_msg = NULL; in kcm_unattach()
1382 KCM_STATS_INCR(mux->stats.rx_ready_drops); in kcm_unattach()
1385 spin_unlock_bh(&mux->rx_lock); in kcm_unattach()
1387 write_unlock_bh(&csk->sk_callback_lock); in kcm_unattach()
1391 strp_done(&psock->strp); in kcm_unattach()
1394 bpf_prog_put(psock->bpf_prog); in kcm_unattach()
1396 spin_lock_bh(&mux->lock); in kcm_unattach()
1398 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats); in kcm_unattach()
1399 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats); in kcm_unattach()
1401 KCM_STATS_INCR(mux->stats.psock_unattach); in kcm_unattach()
1403 if (psock->tx_kcm) { in kcm_unattach()
1408 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd); in kcm_unattach()
1409 spin_unlock_bh(&mux->lock); in kcm_unattach()
1413 * to do this without the mux lock. in kcm_unattach()
1417 spin_lock_bh(&mux->lock); in kcm_unattach()
1418 if (!psock->tx_kcm) { in kcm_unattach()
1419 /* psock now unreserved in window mux was unlocked */ in kcm_unattach()
1422 psock->done = 1; in kcm_unattach()
1427 /* Queue tx work to make sure psock->done is handled */ in kcm_unattach()
1428 queue_work(kcm_wq, &psock->tx_kcm->tx_work); in kcm_unattach()
1429 spin_unlock_bh(&mux->lock); in kcm_unattach()
1432 if (!psock->tx_stopped) in kcm_unattach()
1433 list_del(&psock->psock_avail_list); in kcm_unattach()
1434 list_del(&psock->psock_list); in kcm_unattach()
1435 mux->psocks_cnt--; in kcm_unattach()
1436 spin_unlock_bh(&mux->lock); in kcm_unattach()
1439 fput(csk->sk_socket->file); in kcm_unattach()
1448 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_unattach_ioctl()
1449 struct kcm_mux *mux = kcm->mux; in kcm_unattach_ioctl() local
1455 csock = sockfd_lookup(info->fd, &err); in kcm_unattach_ioctl()
1457 return -ENOENT; in kcm_unattach_ioctl()
1459 csk = csock->sk; in kcm_unattach_ioctl()
1461 err = -EINVAL; in kcm_unattach_ioctl()
1465 err = -ENOENT; in kcm_unattach_ioctl()
1467 spin_lock_bh(&mux->lock); in kcm_unattach_ioctl()
1469 list_for_each_entry(psock, &mux->psocks, psock_list) { in kcm_unattach_ioctl()
1470 if (psock->sk != csk) in kcm_unattach_ioctl()
1475 if (psock->unattaching || WARN_ON(psock->done)) { in kcm_unattach_ioctl()
1476 err = -EALREADY; in kcm_unattach_ioctl()
1480 psock->unattaching = 1; in kcm_unattach_ioctl()
1482 spin_unlock_bh(&mux->lock); in kcm_unattach_ioctl()
1491 spin_unlock_bh(&mux->lock); in kcm_unattach_ioctl()
1512 return ERR_PTR(-ENFILE); in kcm_clone()
1514 newsock->type = osock->type; in kcm_clone()
1515 newsock->ops = osock->ops; in kcm_clone()
1517 __module_get(newsock->ops->owner); in kcm_clone()
1519 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, in kcm_clone()
1523 return ERR_PTR(-ENOMEM); in kcm_clone()
1526 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux); in kcm_clone()
1528 return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name); in kcm_clone()
1540 return -EFAULT; in kcm_ioctl()
1550 return -EFAULT; in kcm_ioctl()
1573 return -EFAULT; in kcm_ioctl()
1580 err = -ENOIOCTLCMD; in kcm_ioctl()
1587 static void release_mux(struct kcm_mux *mux) in release_mux() argument
1589 struct kcm_net *knet = mux->knet; in release_mux()
1594 &mux->psocks, psock_list) { in release_mux()
1595 if (!WARN_ON(psock->unattaching)) in release_mux()
1599 if (WARN_ON(mux->psocks_cnt)) in release_mux()
1602 __skb_queue_purge(&mux->rx_hold_queue); in release_mux()
1604 mutex_lock(&knet->mutex); in release_mux()
1605 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats); in release_mux()
1606 aggregate_psock_stats(&mux->aggregate_psock_stats, in release_mux()
1607 &knet->aggregate_psock_stats); in release_mux()
1608 aggregate_strp_stats(&mux->aggregate_strp_stats, in release_mux()
1609 &knet->aggregate_strp_stats); in release_mux()
1610 list_del_rcu(&mux->kcm_mux_list); in release_mux()
1611 knet->count--; in release_mux()
1612 mutex_unlock(&knet->mutex); in release_mux()
1614 kfree_rcu(mux, rcu); in release_mux()
1619 struct kcm_mux *mux = kcm->mux; in kcm_done() local
1620 struct sock *sk = &kcm->sk; in kcm_done()
1623 spin_lock_bh(&mux->rx_lock); in kcm_done()
1624 if (kcm->rx_psock) { in kcm_done()
1626 WARN_ON(kcm->done); in kcm_done()
1627 kcm->rx_disabled = 1; in kcm_done()
1628 kcm->done = 1; in kcm_done()
1629 spin_unlock_bh(&mux->rx_lock); in kcm_done()
1633 if (kcm->rx_wait) { in kcm_done()
1634 list_del(&kcm->wait_rx_list); in kcm_done()
1636 WRITE_ONCE(kcm->rx_wait, false); in kcm_done()
1639 requeue_rx_msgs(mux, &sk->sk_receive_queue); in kcm_done()
1641 spin_unlock_bh(&mux->rx_lock); in kcm_done()
1646 /* Detach from MUX */ in kcm_done()
1647 spin_lock_bh(&mux->lock); in kcm_done()
1649 list_del(&kcm->kcm_sock_list); in kcm_done()
1650 mux->kcm_socks_cnt--; in kcm_done()
1651 socks_cnt = mux->kcm_socks_cnt; in kcm_done()
1653 spin_unlock_bh(&mux->lock); in kcm_done()
1656 /* We are done with the mux now. */ in kcm_done()
1657 release_mux(mux); in kcm_done()
1660 WARN_ON(kcm->rx_wait); in kcm_done()
1662 sock_put(&kcm->sk); in kcm_done()
1666 * If this is the last KCM socket on the MUX, destroy the MUX.
1670 struct sock *sk = sock->sk; in kcm_release()
1672 struct kcm_mux *mux; in kcm_release() local
1679 mux = kcm->mux; in kcm_release()
1683 kfree_skb(kcm->seq_skb); in kcm_release()
1689 __skb_queue_purge(&sk->sk_write_queue); in kcm_release()
1695 kcm->tx_stopped = 1; in kcm_release()
1699 spin_lock_bh(&mux->lock); in kcm_release()
1700 if (kcm->tx_wait) { in kcm_release()
1704 list_del(&kcm->wait_psock_list); in kcm_release()
1705 kcm->tx_wait = false; in kcm_release()
1707 spin_unlock_bh(&mux->lock); in kcm_release()
1712 cancel_work_sync(&kcm->tx_work); in kcm_release()
1715 psock = kcm->tx_psock; in kcm_release()
1726 WARN_ON(kcm->tx_wait); in kcm_release()
1727 WARN_ON(kcm->tx_psock); in kcm_release()
1729 sock->sk = NULL; in kcm_release()
1785 struct kcm_mux *mux; in kcm_create() local
1787 switch (sock->type) { in kcm_create()
1789 sock->ops = &kcm_dgram_ops; in kcm_create()
1792 sock->ops = &kcm_seqpacket_ops; in kcm_create()
1795 return -ESOCKTNOSUPPORT; in kcm_create()
1799 return -EPROTONOSUPPORT; in kcm_create()
1803 return -ENOMEM; in kcm_create()
1805 /* Allocate a kcm mux, shared between KCM sockets */ in kcm_create()
1806 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL); in kcm_create()
1807 if (!mux) { in kcm_create()
1809 return -ENOMEM; in kcm_create()
1812 spin_lock_init(&mux->lock); in kcm_create()
1813 spin_lock_init(&mux->rx_lock); in kcm_create()
1814 INIT_LIST_HEAD(&mux->kcm_socks); in kcm_create()
1815 INIT_LIST_HEAD(&mux->kcm_rx_waiters); in kcm_create()
1816 INIT_LIST_HEAD(&mux->kcm_tx_waiters); in kcm_create()
1818 INIT_LIST_HEAD(&mux->psocks); in kcm_create()
1819 INIT_LIST_HEAD(&mux->psocks_ready); in kcm_create()
1820 INIT_LIST_HEAD(&mux->psocks_avail); in kcm_create()
1822 mux->knet = knet; in kcm_create()
1824 /* Add new MUX to list */ in kcm_create()
1825 mutex_lock(&knet->mutex); in kcm_create()
1826 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list); in kcm_create()
1827 knet->count++; in kcm_create()
1828 mutex_unlock(&knet->mutex); in kcm_create()
1830 skb_queue_head_init(&mux->rx_hold_queue); in kcm_create()
1834 init_kcm_sock(kcm_sk(sk), mux); in kcm_create()
1849 INIT_LIST_HEAD_RCU(&knet->mux_list); in kcm_init_net()
1850 mutex_init(&knet->mutex); in kcm_init_net()
1862 WARN_ON(!list_empty(&knet->mux_list)); in kcm_exit_net()
1864 mutex_destroy(&knet->mutex); in kcm_exit_net()
1876 int err = -ENOMEM; in kcm_init()