| /linux/include/net/ |
| H A D | hotdata.h | 16 struct net_hotdata { struct 53 #define inet_ehash_secret net_hotdata.tcp_protocol.secret argument 54 #define udp_ehash_secret net_hotdata.udp_protocol.secret 55 #define inet6_ehash_secret net_hotdata.tcpv6_protocol.secret 56 #define tcp_ipv6_hash_secret net_hotdata.tcpv6_offload.secret 57 #define udp6_ehash_secret net_hotdata.udpv6_protocol.secret 58 #define udp_ipv6_hash_secret net_hotdata.udpv6_offload.secret 60 extern struct net_hotdata net_hotdata;
|
| /linux/net/core/ |
| H A D | hotdata.c | 10 struct net_hotdata net_hotdata __cacheline_aligned = { 11 .offload_base = LIST_HEAD_INIT(net_hotdata.offload_base), 27 EXPORT_SYMBOL(net_hotdata);
|
| H A D | sysctl_net_core.c | 153 net_hotdata.rps_sock_flow_table, in rps_sock_flow_sysctl() 174 net_hotdata.rps_cpu_mask = in rps_sock_flow_sysctl() 186 rcu_assign_pointer(net_hotdata.rps_sock_flow_table, in rps_sock_flow_sysctl() 317 WRITE_ONCE(net_hotdata.dev_rx_weight, weight * dev_weight_rx_bias); in proc_do_dev_weight() 318 WRITE_ONCE(net_hotdata.dev_tx_weight, weight * dev_weight_tx_bias); in proc_do_dev_weight() 395 .data = &net_hotdata.sysctl_mem_pcpu_rsv, 427 .data = &net_hotdata.max_backlog, 434 .data = &net_hotdata.qdisc_max_burst, 493 .data = &net_hotdata.tstamp_prequeue, 562 .data = &net_hotdata.netdev_budget, [all …]
|
| H A D | gso.c | 20 list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) { in skb_eth_gso_segment() 51 list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) { in skb_mac_gso_segment()
|
| H A D | gro.c | 30 list_for_each_entry(elem, &net_hotdata.offload_base, list) { in dev_add_offload() 54 struct list_head *head = &net_hotdata.offload_base; in __dev_remove_offload() 256 struct list_head *head = &net_hotdata.offload_base; in gro_complete() 466 struct list_head *head = &net_hotdata.offload_base; in dev_gro_receive() 572 struct list_head *offload_head = &net_hotdata.offload_base; in gro_find_receive_by_type() 586 struct list_head *offload_head = &net_hotdata.offload_base; in gro_find_complete_by_type()
|
| H A D | gro_cells.c | 34 if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) { in gro_cells_receive()
|
| H A D | skbuff.c | 291 nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, in napi_skb_cache_get() 338 nc->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, in napi_skb_cache_get_bulk() 345 n -= kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, in napi_skb_cache_get_bulk() 422 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in slab_build_skb() 474 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in __build_skb() 586 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, in kmalloc_reserve() 594 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); in kmalloc_reserve() 660 cache = net_hotdata.skbuff_fclone_cache; in __alloc_skb() 663 cache = net_hotdata.skbuff_cache; in __alloc_skb() 1066 kmem_cache_free(net_hotdata.skb_small_head_cache, head); in skb_kfree_head() [all …]
|
| H A D | dev.c | 4206 if (unlikely(defer_count > READ_ONCE(net_hotdata.qdisc_max_burst))) { in __dev_xmit_skb() 5094 sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table); in get_rps_cpu() 5104 if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask) in get_rps_cpu() 5107 next_cpu = ident & net_hotdata.rps_cpu_mask; in get_rps_cpu() 5333 max_backlog = READ_ONCE(net_hotdata.max_backlog); in enqueue_to_backlog() 5639 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb); in netif_rx_internal() 5934 net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb); in __netif_receive_skb_core() 6333 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb); in netif_receive_skb_internal() 6362 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), in netif_receive_skb_list_internal() 6609 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight); in process_backlog() [all …]
|
| /linux/net/ipv6/ |
| H A D | tcpv6_offload.c | 198 net_hotdata.tcpv6_offload = (struct net_offload) { in tcpv6_offload_init() 205 return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP); in tcpv6_offload_init()
|
| H A D | ip6_offload.c | 472 net_hotdata.ipv6_packet_offload = (struct packet_offload) { in ipv6_offload_init() 480 dev_add_offload(&net_hotdata.ipv6_packet_offload); in ipv6_offload_init()
|
| H A D | tcp_ipv6.c | 2380 net_hotdata.tcpv6_protocol = (struct inet6_protocol) { 2385 ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP); 2410 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP); 2418 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
|
| /linux/net/ipv4/ |
| H A D | tcp_offload.c | 471 net_hotdata.tcpv4_offload = (struct net_offload) { in tcpv4_offload_init() 478 return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP); in tcpv4_offload_init()
|
| H A D | udp_offload.c | 988 net_hotdata.udpv4_offload = (struct net_offload) { in udpv4_offload_init() 996 return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP); in udpv4_offload_init()
|
| H A D | tcp.c | 1252 if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { in tcp_sendmsg_locked()
|
| /linux/net/xfrm/ |
| H A D | espintcp.c | 174 READ_ONCE(net_hotdata.max_backlog)) { in espintcp_queue_out()
|
| H A D | xfrm_input.c | 801 if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog)) in xfrm_trans_queue_net()
|
| /linux/drivers/net/ovpn/ |
| H A D | tcp.c | 324 READ_ONCE(net_hotdata.max_backlog)) { in ovpn_tcp_send_skb()
|
| /linux/net/sched/ |
| H A D | sch_generic.c | 417 int quota = READ_ONCE(net_hotdata.dev_tx_weight); in __qdisc_run()
|
| /linux/net/bpf/ |
| H A D | test_run.c | 249 n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes, in xdp_recv_frames()
|
| /linux/net/mptcp/ |
| H A D | protocol.c | 1321 if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { in mptcp_sendmsg_frag()
|