1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Support for INET6 connection oriented protocols. 7 * 8 * Authors: See the TCPv6 sources 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or(at your option) any later version. 14 */ 15 16 #include <linux/module.h> 17 #include <linux/in6.h> 18 #include <linux/ipv6.h> 19 #include <linux/jhash.h> 20 #include <linux/slab.h> 21 22 #include <net/addrconf.h> 23 #include <net/inet_connection_sock.h> 24 #include <net/inet_ecn.h> 25 #include <net/inet_hashtables.h> 26 #include <net/ip6_route.h> 27 #include <net/sock.h> 28 #include <net/inet6_connection_sock.h> 29 30 int inet6_csk_bind_conflict(const struct sock *sk, 31 const struct inet_bind_bucket *tb, bool relax) 32 { 33 const struct sock *sk2; 34 const struct hlist_node *node; 35 int reuse = sk->sk_reuse; 36 int reuseport = sk->sk_reuseport; 37 kuid_t uid = sock_i_uid((struct sock *)sk); 38 39 /* We must walk the whole port owner list in this case. -DaveM */ 40 /* 41 * See comment in inet_csk_bind_conflict about sock lookup 42 * vs net namespaces issues. 43 */ 44 sk_for_each_bound(sk2, node, &tb->owners) { 45 if (sk != sk2 && 46 (!sk->sk_bound_dev_if || 47 !sk2->sk_bound_dev_if || 48 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 49 if ((!reuse || !sk2->sk_reuse || 50 sk2->sk_state == TCP_LISTEN) && 51 (!reuseport || !sk2->sk_reuseport || 52 (sk2->sk_state != TCP_TIME_WAIT && 53 !uid_eq(uid, 54 sock_i_uid((struct sock *)sk2))))) { 55 if (ipv6_rcv_saddr_equal(sk, sk2)) 56 break; 57 } 58 } 59 } 60 61 return node != NULL; 62 } 63 64 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); 65 66 struct dst_entry *inet6_csk_route_req(struct sock *sk, 67 struct flowi6 *fl6, 68 const struct request_sock *req) 69 { 70 struct inet6_request_sock *treq = inet6_rsk(req); 71 struct ipv6_pinfo *np = inet6_sk(sk); 72 struct in6_addr *final_p, final; 73 struct dst_entry *dst; 74 75 memset(fl6, 0, sizeof(*fl6)); 76 fl6->flowi6_proto = IPPROTO_TCP; 77 fl6->daddr = treq->rmt_addr; 78 final_p = fl6_update_dst(fl6, np->opt, &final); 79 fl6->saddr = treq->loc_addr; 80 fl6->flowi6_oif = treq->iif; 81 fl6->flowi6_mark = sk->sk_mark; 82 fl6->fl6_dport = inet_rsk(req)->rmt_port; 83 fl6->fl6_sport = inet_rsk(req)->loc_port; 84 security_req_classify_flow(req, flowi6_to_flowi(fl6)); 85 86 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); 87 if (IS_ERR(dst)) 88 return NULL; 89 90 return dst; 91 } 92 93 /* 94 * request_sock (formerly open request) hash tables. 95 */ 96 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, 97 const u32 rnd, const u32 synq_hsize) 98 { 99 u32 c; 100 101 c = jhash_3words((__force u32)raddr->s6_addr32[0], 102 (__force u32)raddr->s6_addr32[1], 103 (__force u32)raddr->s6_addr32[2], 104 rnd); 105 106 c = jhash_2words((__force u32)raddr->s6_addr32[3], 107 (__force u32)rport, 108 c); 109 110 return c & (synq_hsize - 1); 111 } 112 113 struct request_sock *inet6_csk_search_req(const struct sock *sk, 114 struct request_sock ***prevp, 115 const __be16 rport, 116 const struct in6_addr *raddr, 117 const struct in6_addr *laddr, 118 const int iif) 119 { 120 const struct inet_connection_sock *icsk = inet_csk(sk); 121 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 122 struct request_sock *req, **prev; 123 124 for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport, 125 lopt->hash_rnd, 126 lopt->nr_table_entries)]; 127 (req = *prev) != NULL; 128 prev = &req->dl_next) { 129 const struct inet6_request_sock *treq = inet6_rsk(req); 130 131 if (inet_rsk(req)->rmt_port == rport && 132 req->rsk_ops->family == AF_INET6 && 133 ipv6_addr_equal(&treq->rmt_addr, raddr) && 134 ipv6_addr_equal(&treq->loc_addr, laddr) && 135 (!treq->iif || treq->iif == iif)) { 136 WARN_ON(req->sk != NULL); 137 *prevp = prev; 138 return req; 139 } 140 } 141 142 return NULL; 143 } 144 145 EXPORT_SYMBOL_GPL(inet6_csk_search_req); 146 147 void inet6_csk_reqsk_queue_hash_add(struct sock *sk, 148 struct request_sock *req, 149 const unsigned long timeout) 150 { 151 struct inet_connection_sock *icsk = inet_csk(sk); 152 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 153 const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr, 154 inet_rsk(req)->rmt_port, 155 lopt->hash_rnd, lopt->nr_table_entries); 156 157 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); 158 inet_csk_reqsk_queue_added(sk, timeout); 159 } 160 161 EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add); 162 163 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) 164 { 165 struct ipv6_pinfo *np = inet6_sk(sk); 166 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr; 167 168 sin6->sin6_family = AF_INET6; 169 sin6->sin6_addr = np->daddr; 170 sin6->sin6_port = inet_sk(sk)->inet_dport; 171 /* We do not store received flowlabel for TCP */ 172 sin6->sin6_flowinfo = 0; 173 sin6->sin6_scope_id = 0; 174 if (sk->sk_bound_dev_if && 175 ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 176 sin6->sin6_scope_id = sk->sk_bound_dev_if; 177 } 178 179 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); 180 181 static inline 182 void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, 183 const struct in6_addr *daddr, 184 const struct in6_addr *saddr) 185 { 186 __ip6_dst_store(sk, dst, daddr, saddr); 187 } 188 189 static inline 190 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) 191 { 192 return __sk_dst_check(sk, cookie); 193 } 194 195 static struct dst_entry *inet6_csk_route_socket(struct sock *sk, 196 struct flowi6 *fl6) 197 { 198 struct inet_sock *inet = inet_sk(sk); 199 struct ipv6_pinfo *np = inet6_sk(sk); 200 struct in6_addr *final_p, final; 201 struct dst_entry *dst; 202 203 memset(fl6, 0, sizeof(*fl6)); 204 fl6->flowi6_proto = sk->sk_protocol; 205 fl6->daddr = np->daddr; 206 fl6->saddr = np->saddr; 207 fl6->flowlabel = np->flow_label; 208 IP6_ECN_flow_xmit(sk, fl6->flowlabel); 209 fl6->flowi6_oif = sk->sk_bound_dev_if; 210 fl6->flowi6_mark = sk->sk_mark; 211 fl6->fl6_sport = inet->inet_sport; 212 fl6->fl6_dport = inet->inet_dport; 213 security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 214 215 final_p = fl6_update_dst(fl6, np->opt, &final); 216 217 dst = __inet6_csk_dst_check(sk, np->dst_cookie); 218 if (!dst) { 219 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); 220 221 if (!IS_ERR(dst)) 222 __inet6_csk_dst_store(sk, dst, NULL, NULL); 223 } 224 return dst; 225 } 226 227 int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) 228 { 229 struct sock *sk = skb->sk; 230 struct ipv6_pinfo *np = inet6_sk(sk); 231 struct flowi6 fl6; 232 struct dst_entry *dst; 233 int res; 234 235 dst = inet6_csk_route_socket(sk, &fl6); 236 if (IS_ERR(dst)) { 237 sk->sk_err_soft = -PTR_ERR(dst); 238 sk->sk_route_caps = 0; 239 kfree_skb(skb); 240 return PTR_ERR(dst); 241 } 242 243 rcu_read_lock(); 244 skb_dst_set_noref(skb, dst); 245 246 /* Restore final destination back after routing done */ 247 fl6.daddr = np->daddr; 248 249 res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 250 rcu_read_unlock(); 251 return res; 252 } 253 EXPORT_SYMBOL_GPL(inet6_csk_xmit); 254 255 struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) 256 { 257 struct flowi6 fl6; 258 struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6); 259 260 if (IS_ERR(dst)) 261 return NULL; 262 dst->ops->update_pmtu(dst, sk, NULL, mtu); 263 264 dst = inet6_csk_route_socket(sk, &fl6); 265 return IS_ERR(dst) ? NULL : dst; 266 } 267 EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu); 268