Lines Matching +full:lite +full:- +full:on
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
15 * Alan Cox : Turned on udp checksums. I don't want to
34 * struct udp_skb_cb - UDP(-Lite) private variables
37 * @cscov: checksum coverage length (UDP-Lite only)
50 #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
53 * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4
73 * struct udp_hslot_main - UDP hash slot used by udp_table.hash2
88 * struct udp_table - UDP table
90 * @hash: hash table, sockets are hashed on (local port)
91 * @hash2: hash table, sockets are hashed on (local port, local address)
92 * @hash4: hash table, connected sockets are hashed on
112 return &table->hash[udp_hashfn(net, num, table->mask)];
122 return &table->hash2[hash & table->mask].hslot;
161 /* Must be called with table->hash2 initialized */
164 table->hash4 = (void *)(table->hash2 + (table->mask + 1));
165 for (int i = 0; i <= table->mask; i++) {
166 table->hash2[i].hash4_cnt = 0;
168 INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i);
169 table->hash4[i].count = 0;
170 spin_lock_init(&table->hash4[i].lock);
177 return &table->hash4[hash & table->mask];
182 return !hlist_nulls_unhashed(&udp_sk(sk)->udp_lrpa_node);
192 return UDP_HSLOT_MAIN(hslot2)->hash4_cnt;
197 UDP_HSLOT_MAIN(hslot2)->hash4_cnt++;
202 UDP_HSLOT_MAIN(hslot2)->hash4_cnt--;
218 * Generic checksumming routines for UDP(-Lite) v4 and v6
222 return (UDP_SKB_CB(skb)->cscov == skb->len ?
224 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
234 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
236 * @skb: sk_buff containing the filled-in UDP header
243 skb_queue_walk(&sk->sk_write_queue, skb) {
244 csum = csum_add(csum, skb->csum);
252 sizeof(struct udphdr), skb->csum);
254 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
255 csum = csum_add(csum, skb->csum);
271 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
272 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
273 skb->csum);
275 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
291 sk->sk_drop_counters = &up->drop_counters;
292 skb_queue_head_init(&up->reader_queue);
293 INIT_HLIST_NODE(&up->tunnel_list);
294 up->forward_threshold = sk->sk_rcvbuf >> 2;
295 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
297 up->udp_prod_queue = kcalloc(nr_node_ids, sizeof(*up->udp_prod_queue),
299 if (!up->udp_prod_queue)
300 return -ENOMEM;
302 init_llist_head(&up->udp_prod_queue[i].ll_root);
308 numa_drop_add(&udp_sk(sk)->drop_counters, 1);
311 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
363 hash = jhash(skb->data, 2 * ETH_ALEN,
364 (__force u32) skb->protocol);
373 /* Since this is being sent on the wire obfuscate hash a bit
380 return htons((((u64) hash * (max - min)) >> 32) + min);
385 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
392 return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept),
460 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
461 * possibly multiple cache miss on dequeue()
464 /* skb->truesize and the stateless bit are embedded in a single field;
472 * will be on cold cache lines at recvmsg time.
473 * skb->len can be stored on 16 bits since the udp header has been
484 return (struct udp_dev_scratch *)&skb->dev_scratch;
490 return udp_skb_scratch(skb)->len;
495 return udp_skb_scratch(skb)->csum_unnecessary;
500 return udp_skb_scratch(skb)->is_linear;
506 return skb->len;
523 return copy_to_iter_full(skb->data + off, len, to) ? 0 : -EFAULT;
527 * SNMP statistics for UDP and UDP-Lite
530 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
531 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
533 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
534 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
537 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
538 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
541 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
542 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
548 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
549 sock_net(sk)->mib.udp_statistics) : \
550 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
551 sock_net(sk)->mib.udp_stats_in6); \
556 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
557 sock_net(sk)->mib.udp_statistics; \
562 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
609 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
628 if (skb->pkt_type == PACKET_LOOPBACK)
629 skb->ip_summed = CHECKSUM_PARTIAL;
636 drop_count = skb_shinfo(skb)->gso_segs;
652 /* UDP-lite can't land here - no GRO */
653 WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
657 * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx)
668 UDP_SKB_CB(skb)->cscov = skb->len;
669 if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
670 skb->csum_valid = 1;