Searched refs:sk_backlog (Results 1 – 8 of 8) sorted by relevance
127 #define sk_rmem_alloc sk_backlog.rmem_alloc
419 } sk_backlog; member420 #define sk_rmem_alloc sk_backlog.rmem_alloc1126 if (!sk->sk_backlog.tail) in __sk_add_backlog()1127 WRITE_ONCE(sk->sk_backlog.head, skb); in __sk_add_backlog()1129 sk->sk_backlog.tail->next = skb; in __sk_add_backlog()1131 WRITE_ONCE(sk->sk_backlog.tail, skb); in __sk_add_backlog()1142 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); in sk_rcvqueues_full()1163 sk->sk_backlog.len += skb->truesize; in sk_add_backlog()1239 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) { in sk_flush_backlog()
1658 READ_ONCE(sk->sk_backlog.len) - in tcp_space()
198 !!sk->sk_backlog.tail, sock_owned_by_user_nocheck(sk)); in llc_seq_core_show()
814 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) in llc_ui_recvmsg()
2510 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; in sk_clone()2511 newsk->sk_backlog.len = 0; in sk_clone()3203 while ((skb = sk->sk_backlog.head) != NULL) { in __release_sock()3204 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; in __release_sock()3230 sk->sk_backlog.len = 0; in __release_sock()3794 if (sk->sk_backlog.tail) in release_sock()4023 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); in sk_get_meminfo()4491 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog); in sock_struct_check()
182 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); in inet_sctp_diag_fill()
2730 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) in tcp_recvmsg_locked()