/linux/net/mptcp/ |
H A D | fastopen.c | 28 skb = skb_peek(&ssk->sk_receive_queue); in mptcp_fastopen_subflow_synack_set_params() 33 __skb_unlink(skb, &ssk->sk_receive_queue); in mptcp_fastopen_subflow_synack_set_params() 54 __skb_queue_tail(&sk->sk_receive_queue, skb); in mptcp_fastopen_subflow_synack_set_params() 68 skb = skb_peek_tail(&sk->sk_receive_queue); in __mptcp_fastopen_gen_msk_ackseq()
|
H A D | subflow.c | 1062 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) { in validate_data_csum() 1068 while ((skb = skb_peek(&ssk->sk_receive_queue))) in validate_data_csum() 1109 skb = skb_peek(&ssk->sk_receive_queue); in get_mapping_status() 1284 if (skb_queue_empty(&ssk->sk_receive_queue) && in subflow_sched_work_if_closed() 1335 if (!skb_peek(&ssk->sk_receive_queue)) in subflow_check_data_avail() 1346 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue)); in subflow_check_data_avail() 1354 skb = skb_peek(&ssk->sk_receive_queue); in subflow_check_data_avail() 1408 while ((skb = skb_peek(&ssk->sk_receive_queue))) in subflow_check_data_avail() 1418 skb = skb_peek(&ssk->sk_receive_queue); in subflow_check_data_avail()
|
/linux/net/unix/ |
H A D | af_unix.c | 190 a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map); in unix_recvq_lock_cmp_fn() 191 b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map); in unix_recvq_lock_cmp_fn() 301 return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; in unix_recvq_full_lockless() 629 if (!skb_queue_empty(&sk->sk_receive_queue)) { in unix_dgram_disconnected() 630 skb_queue_purge(&sk->sk_receive_queue); in unix_dgram_disconnected() 648 skb_queue_purge(&sk->sk_receive_queue); in unix_sock_destructor() 706 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion) in unix_release_sock() 719 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { in unix_release_sock() 847 spin_lock(&sk->sk_receive_queue.lock); in unix_count_nr_fds() 848 skb = skb_peek(&sk->sk_receive_queue); in unix_count_nr_fds() [all …]
|
H A D | diag.c | 67 spin_lock(&sk->sk_receive_queue.lock); in sk_diag_dump_icons() 70 sk->sk_receive_queue.qlen * sizeof(u32)); in sk_diag_dump_icons() 76 skb_queue_walk(&sk->sk_receive_queue, skb) in sk_diag_dump_icons() 79 spin_unlock(&sk->sk_receive_queue.lock); in sk_diag_dump_icons() 85 spin_unlock(&sk->sk_receive_queue.lock); in sk_diag_dump_icons() 94 rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue); in sk_diag_show_rqlen()
|
H A D | unix_bpf.c | 10 ({ !skb_queue_empty(&__sk->sk_receive_queue) || \ 68 if (!skb_queue_empty(&sk->sk_receive_queue) && in unix_bpf_recvmsg()
|
/linux/net/phonet/ |
H A D | datagram.c | 37 spin_lock_bh(&sk->sk_receive_queue.lock); in pn_ioctl() 38 skb = skb_peek(&sk->sk_receive_queue); in pn_ioctl() 40 spin_unlock_bh(&sk->sk_receive_queue.lock); in pn_ioctl() 61 skb_queue_purge(&sk->sk_receive_queue); in pn_destruct()
|
H A D | pep.c | 405 queue = &sk->sk_receive_queue; in pipe_do_rcv() 464 skb_queue_purge(&sk->sk_receive_queue); in pipe_destruct() 577 skb_queue_tail(&sk->sk_receive_queue, skb); in pipe_handler_do_rcv() 684 skb_queue_head(&sk->sk_receive_queue, skb); in pep_do_rcv() 940 q = &sk->sk_receive_queue; in pep_first_packet_length() 1256 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); in pep_read()
|
/linux/net/bluetooth/ |
H A D | af_bluetooth.c | 383 if (!skb_queue_empty(&sk->sk_receive_queue)) in bt_sock_data_wait() 426 skb = skb_dequeue(&sk->sk_receive_queue); in bt_sock_stream_recvmsg() 452 skb_queue_head(&sk->sk_receive_queue, skb); in bt_sock_stream_recvmsg() 491 skb_queue_head(&sk->sk_receive_queue, skb); in bt_sock_stream_recvmsg() 498 skb_queue_head(&sk->sk_receive_queue, skb); in bt_sock_stream_recvmsg() 546 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) in bt_sock_poll() 590 spin_lock(&sk->sk_receive_queue.lock); in bt_sock_ioctl() 591 skb = skb_peek(&sk->sk_receive_queue); in bt_sock_ioctl() 593 spin_unlock(&sk->sk_receive_queue.lock); in bt_sock_ioctl()
|
/linux/net/atm/ |
H A D | signaling.c | 33 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); in sigd_put_skb() 113 skb_queue_tail(&sk->sk_receive_queue, skb); in sigd_send() 204 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) in sigd_close() 206 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); in sigd_close()
|
H A D | ioctl.c | 82 spin_lock_irq(&sk->sk_receive_queue.lock); in do_vcc_ioctl() 83 skb = skb_peek(&sk->sk_receive_queue); in do_vcc_ioctl() 85 spin_unlock_irq(&sk->sk_receive_queue.lock); in do_vcc_ioctl()
|
H A D | raw.c | 28 skb_queue_tail(&sk->sk_receive_queue, skb); in atm_push_raw()
|
/linux/net/sctp/ |
H A D | ulpqueue.c | 135 &sk->sk_receive_queue); in sctp_clear_pd() 152 __skb_queue_tail(&sk->sk_receive_queue, in sctp_clear_pd() 213 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 227 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 236 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 251 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) { in sctp_ulpq_tail_event() 1079 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { in sctp_ulpq_renege() 1119 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); in sctp_ulpq_abort_pd()
|
/linux/net/caif/ |
H A D | caif_socket.c | 124 struct sk_buff_head *list = &sk->sk_receive_queue; in caif_queue_rcv_skb() 313 if (!skb_queue_empty(&sk->sk_receive_queue) || in caif_stream_data_wait() 377 skb = skb_dequeue(&sk->sk_receive_queue); in caif_stream_recvmsg() 422 skb_queue_head(&sk->sk_receive_queue, skb); in caif_stream_recvmsg() 436 skb_queue_head(&sk->sk_receive_queue, skb); in caif_stream_recvmsg() 446 skb_queue_head(&sk->sk_receive_queue, skb); in caif_stream_recvmsg() 906 spin_lock_bh(&sk->sk_receive_queue.lock); in caif_release() 908 spin_unlock_bh(&sk->sk_receive_queue.lock); in caif_release() 949 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || in caif_poll()
|
/linux/net/kcm/ |
H A D | kcmproc.c | 119 kcm->sk.sk_receive_queue.qlen, in kcm_format_sock() 149 psock->sk->sk_receive_queue.qlen, in kcm_format_psock() 167 if (psock->sk->sk_receive_queue.qlen) { in kcm_format_psock()
|
/linux/net/tipc/ |
H A D | socket.c | 279 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); in tsk_advance_rx_queue() 310 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) in tsk_rej_rx_queue() 567 skb = skb_peek(&sk->sk_receive_queue); in __tipc_shutdown() 569 __skb_unlink(skb, &sk->sk_receive_queue); in __tipc_shutdown() 585 skb = __skb_dequeue(&sk->sk_receive_queue); in __tipc_shutdown() 587 __skb_queue_purge(&sk->sk_receive_queue); in __tipc_shutdown() 603 __skb_queue_purge(&sk->sk_receive_queue); in __tipc_shutdown() 811 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) in tipc_poll() 819 if (skb_queue_empty_lockless(&sk->sk_receive_queue)) in tipc_poll() 1839 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { in tipc_wait_for_rcvmsg() [all …]
|
/linux/drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
H A D | chtls_cm.h | 175 __skb_unlink(skb, &sk->sk_receive_queue); in chtls_free_skb() 182 __skb_unlink(skb, &sk->sk_receive_queue); in chtls_kfree_skb()
|
H A D | chtls_io.c | 1378 skb = skb_peek(&sk->sk_receive_queue); in chtls_pt_recvmsg() 1439 __skb_unlink(skb, &sk->sk_receive_queue); in chtls_pt_recvmsg() 1519 next_skb = skb_peek(&sk->sk_receive_queue); in chtls_pt_recvmsg() 1564 skb_queue_walk(&sk->sk_receive_queue, skb) { in peekmsg() 1686 skb_queue_empty_lockless(&sk->sk_receive_queue) && in chtls_recvmsg() 1716 skb = skb_peek(&sk->sk_receive_queue); in chtls_recvmsg() 1832 !skb_peek(&sk->sk_receive_queue)) in chtls_recvmsg()
|
/linux/net/ipv4/ |
H A D | tcp_bpf.c | 196 !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait); in tcp_msg_wait_data() 252 if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { in tcp_bpf_recvmsg_parser() 258 if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { in tcp_bpf_recvmsg_parser() 349 if (!skb_queue_empty(&sk->sk_receive_queue) && in tcp_bpf_recvmsg()
|
/linux/drivers/xen/ |
H A D | pvcalls-back.c | 117 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); in pvcalls_conn_back_read() 118 if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) { in pvcalls_conn_back_read() 120 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, in pvcalls_conn_back_read() 124 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); in pvcalls_conn_back_read() 149 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); in pvcalls_conn_back_read() 150 if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue)) in pvcalls_conn_back_read() 152 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); in pvcalls_conn_back_read()
|
/linux/net/nfc/ |
H A D | llcp_sock.c | 572 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) in llcp_sock_poll() 841 skb_queue_empty(&sk->sk_receive_queue)) { in llcp_sock_recvmsg() 868 skb_queue_head(&sk->sk_receive_queue, skb); in llcp_sock_recvmsg() 899 skb_queue_head(&sk->sk_receive_queue, skb); in llcp_sock_recvmsg() 964 skb_queue_purge(&sk->sk_receive_queue); in llcp_sock_destruct()
|
/linux/net/packet/ |
H A D | af_packet.c | 744 spin_lock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired() 806 spin_unlock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired() 2271 spin_lock(&sk->sk_receive_queue.lock); in packet_rcv() 2275 __skb_queue_tail(&sk->sk_receive_queue, skb); in packet_rcv() 2276 spin_unlock(&sk->sk_receive_queue.lock); in packet_rcv() 2416 spin_lock(&sk->sk_receive_queue.lock); in tpacket_rcv() 2454 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); in tpacket_rcv() 2456 spin_unlock(&sk->sk_receive_queue.lock); in tpacket_rcv() 2549 spin_lock(&sk->sk_receive_queue.lock); in tpacket_rcv() 2552 spin_unlock(&sk->sk_receive_queue.lock); in tpacket_rcv() [all …]
|
/linux/net/l2tp/ |
H A D | l2tp_ip.c | 599 spin_lock_bh(&sk->sk_receive_queue.lock); in l2tp_ioctl() 600 skb = skb_peek(&sk->sk_receive_queue); in l2tp_ioctl() 602 spin_unlock_bh(&sk->sk_receive_queue.lock); in l2tp_ioctl()
|
/linux/net/llc/ |
H A D | af_llc.c | 714 if (skb_queue_empty(&sk->sk_receive_queue)) { in llc_ui_accept() 721 skb = skb_dequeue(&sk->sk_receive_queue); in llc_ui_accept() 807 skb = skb_peek(&sk->sk_receive_queue); in llc_ui_recvmsg() 891 skb_unlink(skb, &sk->sk_receive_queue); in llc_ui_recvmsg() 913 skb_unlink(skb, &sk->sk_receive_queue); in llc_ui_recvmsg()
|
/linux/net/rose/ |
H A D | af_rose.c | 370 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { in rose_destroy_socket() 952 skb = skb_dequeue(&sk->sk_receive_queue); in rose_accept() 1091 skb_queue_head(&sk->sk_receive_queue, skb); in rose_rx_call_request() 1348 spin_lock_irq(&sk->sk_receive_queue.lock); in rose_ioctl() 1349 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) in rose_ioctl() 1351 spin_unlock_irq(&sk->sk_receive_queue.lock); in rose_ioctl()
|
/linux/net/core/ |
H A D | datagram.c | 316 return __skb_recv_datagram(sk, &sk->sk_receive_queue, flags, in skb_recv_datagram() 375 int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags, in skb_kill_datagram() 905 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) in datagram_poll()
|