Lines Matching +full:rcv +full:- +full:sel

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
34 #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */
55 /* Limits on Hop-by-Hop and Destination options.
58 * Hop-by-Hop or Destination options other then the packet must fit in an MTU.
63 * - Limit the number of options in a Hop-by-Hop or Destination options
65 * - Limit the byte length of a Hop-by-Hop or Destination options extension
67 * - Disallow unknown options
77 * options or Hop-by-Hop options. If the number is less than zero then unknown
82 * Hop-by-Hop options extension header. Setting the value to INT_MAX
89 /* Default limits for Hop-by-Hop and Destination options */
98 * type - unicast | multicast
99 * scope - local | site | global
100 * v4 - compat
125 ((a)->s6_addr[1] & 0x0f) /* nonstandard */
126 #define __IPV6_ADDR_SCOPE_INVALID -1
137 ((a)->s6_addr[1] & 0x10)
139 ((a)->s6_addr[1] & 0x20)
141 ((a)->s6_addr[1] & 0x40)
184 struct sk_buff *skb = iter->frag; in ip6_fraglist_next()
186 iter->frag = skb->next; in ip6_fraglist_next()
212 ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
224 mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
225 mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
233 SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
234 mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
242 SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
243 SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
250 mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
251 mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
258 mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
259 mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
289 int sel; member
369 .hlimit = -1, in ipcm6_init()
370 .tclass = -1, in ipcm6_init()
371 .dontfrag = -1, in ipcm6_init()
379 .hlimit = -1, in ipcm6_init_sk()
380 .tclass = inet6_sk(sk)->tclass, in ipcm6_init_sk()
390 opt = rcu_dereference(np->opt); in txopt_get()
392 if (!refcount_inc_not_zero(&opt->refcnt)) in txopt_get()
403 if (opt && refcount_dec_and_test(&opt->refcnt)) in txopt_put()
415 READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl)) in fl6_sock_lookup()
416 return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT); in fl6_sock_lookup()
436 atomic_dec(&fl->users); in fl6_sock_release()
445 int ip6_ra_control(struct sock *sk, int sel);
473 * It assumes headers are already in skb->head.
481 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) in ipv6_has_hopopt_jumbo()
484 if (skb->protocol != htons(ETH_P_IPV6)) in ipv6_has_hopopt_jumbo()
494 if (nhdr->nexthdr != NEXTHDR_HOP) in ipv6_has_hopopt_jumbo()
498 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || in ipv6_has_hopopt_jumbo()
499 jhdr->nexthdr != IPPROTO_TCP) in ipv6_has_hopopt_jumbo()
501 return jhdr->nexthdr; in ipv6_has_hopopt_jumbo()
518 return -1; in ipv6_hopopt_jumbo_remove()
524 skb_network_header(skb) - skb_mac_header(skb) + in ipv6_hopopt_jumbo_remove()
528 skb->network_header += hophdr_len; in ipv6_hopopt_jumbo_remove()
529 skb->mac_header += hophdr_len; in ipv6_hopopt_jumbo_remove()
532 h6->nexthdr = nexthdr; in ipv6_hopopt_jumbo_remove()
539 s32 accept_ra = READ_ONCE(idev->cnf.accept_ra); in ipv6_accept_ra()
544 return READ_ONCE(idev->cnf.forwarding) ? accept_ra == 2 : in ipv6_accept_ra()
602 return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | in ipv6_masked_addr_cmp()
603 ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | in ipv6_masked_addr_cmp()
604 ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | in ipv6_masked_addr_cmp()
605 ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); in ipv6_masked_addr_cmp()
617 memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr)); in ipv6_addr_prefix()
618 memcpy(pfx->s6_addr, addr, o); in ipv6_addr_prefix()
620 pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b); in ipv6_addr_prefix()
631 memcpy(addr->s6_addr, pfx, o); in ipv6_addr_prefix_copy()
633 addr->s6_addr[o] &= ~(0xff00 >> b); in ipv6_addr_prefix_copy()
634 addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b)); in ipv6_addr_prefix_copy()
662 __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2); in ipv6_addr_set()
663 __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4); in ipv6_addr_set()
675 return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | in ipv6_addr_equal()
676 (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | in ipv6_addr_equal()
677 (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | in ipv6_addr_equal()
678 (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; in ipv6_addr_equal()
687 if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len)))) in __ipv6_prefix_equal64_half()
702 return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64); in ipv6_prefix_equal()
711 const __be32 *a1 = addr1->s6_addr32; in ipv6_prefix_equal()
712 const __be32 *a2 = addr2->s6_addr32; in ipv6_prefix_equal()
722 if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi)))) in ipv6_prefix_equal()
736 return (a->s6_addr32[0] | a->s6_addr32[1] | in ipv6_addr_any()
737 a->s6_addr32[2] | a->s6_addr32[3]) == 0; in ipv6_addr_any()
749 return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^ in ipv6_addr_hash()
750 a->s6_addr32[2] ^ a->s6_addr32[3]); in ipv6_addr_hash()
757 return jhash2((__force const u32 *)a->s6_addr32, in __ipv6_addr_jhash()
758 ARRAY_SIZE(a->s6_addr32), initval); in __ipv6_addr_jhash()
768 return (a->s6_addr32[0] | a->s6_addr32[1] | in ipv6_addr_loopback()
769 a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0; in ipv6_addr_loopback()
775 * since all of the endian-annotated types are fixed size regardless of arch.
783 (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) | in ipv6_addr_v4mapped()
785 (__force unsigned long)(a->s6_addr32[2] ^ in ipv6_addr_v4mapped()
791 return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]); in ipv6_addr_v4mapped_loopback()
803 hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); in ipv6_portaddr_hash()
805 hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); in ipv6_portaddr_hash()
816 return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); in ipv6_addr_orchid()
821 return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000); in ipv6_addr_is_multicast()
847 return i * 32 + 31 - __fls(ntohl(xb)); in __ipv6_addr_diff32()
864 * --ANK (980803) in __ipv6_addr_diff32()
880 return i * 64 + 63 - __fls(be64_to_cpu(xb)); in __ipv6_addr_diff64()
913 if (ipv6_addr_is_multicast(&fl6->daddr)) in ip6_sk_dst_hoplimit()
914 hlimit = READ_ONCE(np->mcast_hops); in ip6_sk_dst_hoplimit()
916 hlimit = READ_ONCE(np->hop_limit); in ip6_sk_dst_hoplimit()
923 * Equivalent to : flow->v6addrs.src = iph->saddr;
924 * flow->v6addrs.dst = iph->daddr;
929 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != in iph_to_flow_copy_v6addrs()
930 offsetof(typeof(flow->addrs), v6addrs.src) + in iph_to_flow_copy_v6addrs()
931 sizeof(flow->addrs.v6addrs.src)); in iph_to_flow_copy_v6addrs()
932 memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs)); in iph_to_flow_copy_v6addrs()
933 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; in iph_to_flow_copy_v6addrs()
941 return net->ipv6.sysctl.ip_nonlocal_bind || in ipv6_can_nonlocal_bind()
942 test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) || in ipv6_can_nonlocal_bind()
943 test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags); in ipv6_can_nonlocal_bind()
968 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || in ip6_make_flowlabel()
970 net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED)) in ip6_make_flowlabel()
983 if (net->ipv6.sysctl.flowlabel_state_ranges) in ip6_make_flowlabel()
991 switch (net->ipv6.sysctl.auto_flowlabels) { in ip6_default_np_autolabel()
1017 return net->ipv6.sysctl.multipath_hash_policy; in ip6_multipath_hash_policy()
1021 return net->ipv6.sysctl.multipath_hash_fields; in ip6_multipath_hash_fields()
1070 return fl6->flowlabel & IPV6_FLOWLABEL_MASK; in flowi6_get_flowlabel()
1078 * rcv function (called from netdevice level)
1089 * upper-layer output functions
1122 return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork, in ip6_finish_skb()
1123 &inet6_sk(sk)->cork); in ip6_finish_skb()
1287 if (inet_sk(sk)->inet_num) in ip6_sock_set_v6only()
1288 return -EINVAL; in ip6_sock_set_v6only()
1290 sk->sk_ipv6only = true; in ip6_sock_set_v6only()
1329 return -EINVAL; in ip6_sock_set_addr_preferences()
1343 return -EINVAL; in ip6_sock_set_addr_preferences()
1353 return -EINVAL; in ip6_sock_set_addr_preferences()
1356 WRITE_ONCE(inet6_sk(sk)->srcprefs, in ip6_sock_set_addr_preferences()
1357 (READ_ONCE(inet6_sk(sk)->srcprefs) & prefmask) | pref); in ip6_sock_set_addr_preferences()
1364 inet6_sk(sk)->rxopt.bits.rxinfo = true; in ip6_sock_set_recvpktinfo()