xref: /linux/net/ipv6/ip6_input.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 input
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *	Ian P. Morris		<I.P.Morris@soton.ac.uk>
9  *
10  *	Based in linux/net/ipv4/ip_input.c
11  */
12 /* Changes
13  *
14  *	Mitsuru KANDA @USAGI and
15  *	YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs().
16  */
17 
18 #include <linux/errno.h>
19 #include <linux/types.h>
20 #include <linux/socket.h>
21 #include <linux/sockios.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/in6.h>
25 #include <linux/icmpv6.h>
26 #include <linux/mroute6.h>
27 #include <linux/slab.h>
28 #include <linux/indirect_call_wrapper.h>
29 
30 #include <linux/netfilter.h>
31 #include <linux/netfilter_ipv6.h>
32 
33 #include <net/sock.h>
34 #include <net/snmp.h>
35 #include <net/udp.h>
36 
37 #include <net/ipv6.h>
38 #include <net/protocol.h>
39 #include <net/transp_v6.h>
40 #include <net/rawv6.h>
41 #include <net/ndisc.h>
42 #include <net/ip6_route.h>
43 #include <net/addrconf.h>
44 #include <net/xfrm.h>
45 #include <net/inet_ecn.h>
46 #include <net/dst_metadata.h>
47 #include <net/inet6_hashtables.h>
48 
49 static void tcp_v6_early_demux(struct sk_buff *skb)
50 {
51 	struct net *net = dev_net_rcu(skb->dev);
52 	const struct ipv6hdr *hdr;
53 	const struct tcphdr *th;
54 	struct sock *sk;
55 
56 	if (skb->pkt_type != PACKET_HOST)
57 		return;
58 
59 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
60 				sizeof(struct tcphdr)))
61 		return;
62 
63 	hdr = ipv6_hdr(skb);
64 	th = tcp_hdr(skb);
65 
66 	if (th->doff < sizeof(struct tcphdr) / 4)
67 		return;
68 
69 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
70 	sk = __inet6_lookup_established(net, &hdr->saddr, th->source,
71 					&hdr->daddr, ntohs(th->dest),
72 					inet6_iif(skb), inet6_sdif(skb));
73 	if (sk) {
74 		skb->sk = sk;
75 		skb->destructor = sock_edemux;
76 		if (sk_fullsock(sk)) {
77 			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
78 
79 			if (dst)
80 				dst = dst_check(dst, sk->sk_rx_dst_cookie);
81 			if (dst &&
82 			    sk->sk_rx_dst_ifindex == skb->skb_iif)
83 				skb_dst_set_noref(skb, dst);
84 		}
85 	}
86 }
87 
88 static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
89 				struct sk_buff *skb)
90 {
91 	if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
92 	    !skb_dst(skb) && !skb->sk) {
93 		switch (ipv6_hdr(skb)->nexthdr) {
94 		case IPPROTO_TCP:
95 			if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux))
96 				tcp_v6_early_demux(skb);
97 			break;
98 		case IPPROTO_UDP:
99 			if (READ_ONCE(net->ipv4.sysctl_udp_early_demux))
100 				udp_v6_early_demux(skb);
101 			break;
102 		}
103 	}
104 
105 	if (!skb_valid_dst(skb))
106 		ip6_route_input(skb);
107 }
108 
109 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
110 {
111 	/* if ingress device is enslaved to an L3 master device pass the
112 	 * skb to its handler for processing
113 	 */
114 	skb = l3mdev_ip6_rcv(skb);
115 	if (!skb)
116 		return NET_RX_SUCCESS;
117 	ip6_rcv_finish_core(net, sk, skb);
118 
119 	return dst_input(skb);
120 }
121 
122 static void ip6_sublist_rcv_finish(struct list_head *head)
123 {
124 	struct sk_buff *skb, *next;
125 
126 	list_for_each_entry_safe(skb, next, head, list) {
127 		skb_list_del_init(skb);
128 		dst_input(skb);
129 	}
130 }
131 
132 static bool ip6_can_use_hint(const struct sk_buff *skb,
133 			     const struct sk_buff *hint)
134 {
135 	return hint && !skb_dst(skb) &&
136 	       ipv6_addr_equal(&ipv6_hdr(hint)->daddr, &ipv6_hdr(skb)->daddr);
137 }
138 
139 static struct sk_buff *ip6_extract_route_hint(const struct net *net,
140 					      struct sk_buff *skb)
141 {
142 	if (fib6_routes_require_src(net) || fib6_has_custom_rules(net) ||
143 	    IP6CB(skb)->flags & IP6SKB_MULTIPATH)
144 		return NULL;
145 
146 	return skb;
147 }
148 
149 static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
150 				struct list_head *head)
151 {
152 	struct sk_buff *skb, *next, *hint = NULL;
153 	struct dst_entry *curr_dst = NULL;
154 	LIST_HEAD(sublist);
155 
156 	list_for_each_entry_safe(skb, next, head, list) {
157 		struct dst_entry *dst;
158 
159 		skb_list_del_init(skb);
160 		/* if ingress device is enslaved to an L3 master device pass the
161 		 * skb to its handler for processing
162 		 */
163 		skb = l3mdev_ip6_rcv(skb);
164 		if (!skb)
165 			continue;
166 
167 		if (ip6_can_use_hint(skb, hint))
168 			skb_dst_copy(skb, hint);
169 		else
170 			ip6_rcv_finish_core(net, sk, skb);
171 		dst = skb_dst(skb);
172 		if (curr_dst != dst) {
173 			hint = ip6_extract_route_hint(net, skb);
174 
175 			/* dispatch old sublist */
176 			if (!list_empty(&sublist))
177 				ip6_sublist_rcv_finish(&sublist);
178 			/* start new sublist */
179 			INIT_LIST_HEAD(&sublist);
180 			curr_dst = dst;
181 		}
182 		list_add_tail(&skb->list, &sublist);
183 	}
184 	/* dispatch final sublist */
185 	ip6_sublist_rcv_finish(&sublist);
186 }
187 
188 static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
189 				    struct net *net)
190 {
191 	enum skb_drop_reason reason;
192 	const struct ipv6hdr *hdr;
193 	u32 pkt_len;
194 	struct inet6_dev *idev;
195 
196 	if (skb->pkt_type == PACKET_OTHERHOST) {
197 		dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
198 		kfree_skb_reason(skb, SKB_DROP_REASON_OTHERHOST);
199 		return NULL;
200 	}
201 
202 	rcu_read_lock();
203 
204 	idev = __in6_dev_get(skb->dev);
205 
206 	__IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
207 
208 	SKB_DR_SET(reason, NOT_SPECIFIED);
209 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
210 	    !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
211 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
212 		if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6)))
213 			SKB_DR_SET(reason, IPV6DISABLED);
214 		goto drop;
215 	}
216 
217 	memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
218 
219 	/*
220 	 * Store incoming device index. When the packet will
221 	 * be queued, we cannot refer to skb->dev anymore.
222 	 *
223 	 * BTW, when we send a packet for our own local address on a
224 	 * non-loopback interface (e.g. ethX), it is being delivered
225 	 * via the loopback interface (lo) here; skb->dev = loopback_dev.
226 	 * It, however, should be considered as if it is being
227 	 * arrived via the sending interface (ethX), because of the
228 	 * nature of scoping architecture. --yoshfuji
229 	 */
230 	IP6CB(skb)->iif = skb_valid_dst(skb) ?
231 				ip6_dst_idev(skb_dst(skb))->dev->ifindex :
232 				dev->ifindex;
233 
234 	if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
235 		goto err;
236 
237 	hdr = ipv6_hdr(skb);
238 
239 	if (hdr->version != 6) {
240 		SKB_DR_SET(reason, UNHANDLED_PROTO);
241 		goto err;
242 	}
243 
244 	__IP6_ADD_STATS(net, idev,
245 			IPSTATS_MIB_NOECTPKTS +
246 				(ipv6_get_dsfield(hdr) & INET_ECN_MASK),
247 			max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
248 	/*
249 	 * RFC4291 2.5.3
250 	 * The loopback address must not be used as the source address in IPv6
251 	 * packets that are sent outside of a single node. [..]
252 	 * A packet received on an interface with a destination address
253 	 * of loopback must be dropped.
254 	 */
255 	if ((ipv6_addr_loopback(&hdr->saddr) ||
256 	     ipv6_addr_loopback(&hdr->daddr)) &&
257 	    !(dev->flags & IFF_LOOPBACK) &&
258 	    !netif_is_l3_master(dev))
259 		goto err;
260 
261 	/* RFC4291 Errata ID: 3480
262 	 * Interface-Local scope spans only a single interface on a
263 	 * node and is useful only for loopback transmission of
264 	 * multicast.  Packets with interface-local scope received
265 	 * from another node must be discarded.
266 	 */
267 	if (!(skb->pkt_type == PACKET_LOOPBACK ||
268 	      dev->flags & IFF_LOOPBACK) &&
269 	    ipv6_addr_is_multicast(&hdr->daddr) &&
270 	    IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
271 		goto err;
272 
273 	/* If enabled, drop unicast packets that were encapsulated in link-layer
274 	 * multicast or broadcast to protected against the so-called "hole-196"
275 	 * attack in 802.11 wireless.
276 	 */
277 	if (!ipv6_addr_is_multicast(&hdr->daddr) &&
278 	    (skb->pkt_type == PACKET_BROADCAST ||
279 	     skb->pkt_type == PACKET_MULTICAST) &&
280 	    READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) {
281 		SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST);
282 		goto err;
283 	}
284 
285 	/* RFC4291 2.7
286 	 * Nodes must not originate a packet to a multicast address whose scope
287 	 * field contains the reserved value 0; if such a packet is received, it
288 	 * must be silently dropped.
289 	 */
290 	if (ipv6_addr_is_multicast(&hdr->daddr) &&
291 	    IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
292 		goto err;
293 
294 	/*
295 	 * RFC4291 2.7
296 	 * Multicast addresses must not be used as source addresses in IPv6
297 	 * packets or appear in any Routing header.
298 	 */
299 	if (ipv6_addr_is_multicast(&hdr->saddr))
300 		goto err;
301 
302 	skb->transport_header = skb->network_header + sizeof(*hdr);
303 	IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
304 
305 	pkt_len = ipv6_payload_len(skb, hdr);
306 
307 	/* pkt_len may be zero if Jumbo payload option is present */
308 	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
309 		if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
310 			__IP6_INC_STATS(net,
311 					idev, IPSTATS_MIB_INTRUNCATEDPKTS);
312 			SKB_DR_SET(reason, PKT_TOO_SMALL);
313 			goto drop;
314 		}
315 		if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
316 			goto err;
317 		hdr = ipv6_hdr(skb);
318 	}
319 
320 	if (hdr->nexthdr == NEXTHDR_HOP) {
321 		if (ipv6_parse_hopopts(skb) < 0) {
322 			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
323 			rcu_read_unlock();
324 			return NULL;
325 		}
326 	}
327 
328 	rcu_read_unlock();
329 
330 	/* Must drop socket now because of tproxy. */
331 	if (!skb_sk_is_prefetched(skb))
332 		skb_orphan(skb);
333 
334 	return skb;
335 err:
336 	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
337 	SKB_DR_OR(reason, IP_INHDR);
338 drop:
339 	rcu_read_unlock();
340 	kfree_skb_reason(skb, reason);
341 	return NULL;
342 }
343 
344 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
345 {
346 	struct net *net = dev_net(skb->dev);
347 
348 	skb = ip6_rcv_core(skb, dev, net);
349 	if (skb == NULL)
350 		return NET_RX_DROP;
351 	return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
352 		       net, NULL, skb, dev, NULL,
353 		       ip6_rcv_finish);
354 }
355 
356 static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev,
357 			    struct net *net)
358 {
359 	NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL,
360 		     head, dev, NULL, ip6_rcv_finish);
361 	ip6_list_rcv_finish(net, NULL, head);
362 }
363 
364 /* Receive a list of IPv6 packets */
365 void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
366 		   struct net_device *orig_dev)
367 {
368 	struct net_device *curr_dev = NULL;
369 	struct net *curr_net = NULL;
370 	struct sk_buff *skb, *next;
371 	LIST_HEAD(sublist);
372 
373 	list_for_each_entry_safe(skb, next, head, list) {
374 		struct net_device *dev = skb->dev;
375 		struct net *net = dev_net(dev);
376 
377 		skb_list_del_init(skb);
378 		skb = ip6_rcv_core(skb, dev, net);
379 		if (skb == NULL)
380 			continue;
381 
382 		if (curr_dev != dev || curr_net != net) {
383 			/* dispatch old sublist */
384 			if (!list_empty(&sublist))
385 				ip6_sublist_rcv(&sublist, curr_dev, curr_net);
386 			/* start new sublist */
387 			INIT_LIST_HEAD(&sublist);
388 			curr_dev = dev;
389 			curr_net = net;
390 		}
391 		list_add_tail(&skb->list, &sublist);
392 	}
393 	/* dispatch final sublist */
394 	if (!list_empty(&sublist))
395 		ip6_sublist_rcv(&sublist, curr_dev, curr_net);
396 }
397 
398 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *));
399 
400 /*
401  *	Deliver the packet to the host
402  */
403 void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
404 			      bool have_final)
405 {
406 	const struct inet6_protocol *ipprot;
407 	struct inet6_dev *idev;
408 	unsigned int nhoff;
409 	SKB_DR(reason);
410 	bool raw;
411 
412 	/*
413 	 *	Parse extension headers
414 	 */
415 
416 resubmit:
417 	idev = ip6_dst_idev(skb_dst(skb));
418 	nhoff = IP6CB(skb)->nhoff;
419 	if (!have_final) {
420 		if (!pskb_pull(skb, skb_transport_offset(skb)))
421 			goto discard;
422 		nexthdr = skb_network_header(skb)[nhoff];
423 	}
424 
425 resubmit_final:
426 	raw = raw6_local_deliver(skb, nexthdr);
427 	ipprot = rcu_dereference(inet6_protos[nexthdr]);
428 	if (ipprot) {
429 		int ret;
430 
431 		if (have_final) {
432 			if (!(ipprot->flags & INET6_PROTO_FINAL)) {
433 				/* Once we've seen a final protocol don't
434 				 * allow encapsulation on any non-final
435 				 * ones. This allows foo in UDP encapsulation
436 				 * to work.
437 				 */
438 				goto discard;
439 			}
440 		} else if (ipprot->flags & INET6_PROTO_FINAL) {
441 			const struct ipv6hdr *hdr;
442 			int sdif = inet6_sdif(skb);
443 			struct net_device *dev;
444 
445 			/* Only do this once for first final protocol */
446 			have_final = true;
447 
448 
449 			skb_postpull_rcsum(skb, skb_network_header(skb),
450 					   skb_network_header_len(skb));
451 			hdr = ipv6_hdr(skb);
452 
453 			/* skb->dev passed may be master dev for vrfs. */
454 			if (sdif) {
455 				dev = dev_get_by_index_rcu(net, sdif);
456 				if (!dev)
457 					goto discard;
458 			} else {
459 				dev = skb->dev;
460 			}
461 
462 			if (ipv6_addr_is_multicast(&hdr->daddr) &&
463 			    !ipv6_chk_mcast_addr(dev, &hdr->daddr,
464 						 &hdr->saddr) &&
465 			    !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) {
466 				SKB_DR_SET(reason, IP_INADDRERRORS);
467 				goto discard;
468 			}
469 		}
470 		if (!(ipprot->flags & INET6_PROTO_NOPOLICY)) {
471 			if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
472 				SKB_DR_SET(reason, XFRM_POLICY);
473 				goto discard;
474 			}
475 			nf_reset_ct(skb);
476 		}
477 
478 		ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv,
479 				      skb);
480 		if (ret > 0) {
481 			if (ipprot->flags & INET6_PROTO_FINAL) {
482 				/* Not an extension header, most likely UDP
483 				 * encapsulation. Use return value as nexthdr
484 				 * protocol not nhoff (which presumably is
485 				 * not set by handler).
486 				 */
487 				nexthdr = ret;
488 				goto resubmit_final;
489 			} else {
490 				goto resubmit;
491 			}
492 		} else if (ret == 0) {
493 			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
494 		}
495 	} else {
496 		if (!raw) {
497 			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
498 				__IP6_INC_STATS(net, idev,
499 						IPSTATS_MIB_INUNKNOWNPROTOS);
500 				icmpv6_send(skb, ICMPV6_PARAMPROB,
501 					    ICMPV6_UNK_NEXTHDR, nhoff);
502 				SKB_DR_SET(reason, IP_NOPROTO);
503 			} else {
504 				SKB_DR_SET(reason, XFRM_POLICY);
505 			}
506 			kfree_skb_reason(skb, reason);
507 		} else {
508 			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
509 			consume_skb(skb);
510 		}
511 	}
512 	return;
513 
514 discard:
515 	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
516 	kfree_skb_reason(skb, reason);
517 }
518 
519 static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
520 {
521 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
522 		__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
523 				IPSTATS_MIB_INDISCARDS);
524 		kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
525 		return 0;
526 	}
527 
528 	skb_clear_delivery_time(skb);
529 	ip6_protocol_deliver_rcu(net, skb, 0, false);
530 
531 	return 0;
532 }
533 
534 
535 int ip6_input(struct sk_buff *skb)
536 {
537 	int res;
538 
539 	rcu_read_lock();
540 	res = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN,
541 		      dev_net_rcu(skb->dev), NULL, skb, skb->dev, NULL,
542 		      ip6_input_finish);
543 	rcu_read_unlock();
544 
545 	return res;
546 }
547 EXPORT_SYMBOL_GPL(ip6_input);
548 
549 int ip6_mc_input(struct sk_buff *skb)
550 {
551 	struct net_device *dev = skb->dev;
552 	int sdif = inet6_sdif(skb);
553 	const struct ipv6hdr *hdr;
554 	bool deliver;
555 
556 	__IP6_UPD_PO_STATS(skb_dst_dev_net_rcu(skb),
557 			   __in6_dev_get_safely(dev), IPSTATS_MIB_INMCAST,
558 			   skb->len);
559 
560 	/* skb->dev passed may be master dev for vrfs. */
561 	if (sdif) {
562 		dev = dev_get_by_index_rcu(dev_net_rcu(dev), sdif);
563 		if (!dev) {
564 			kfree_skb(skb);
565 			return -ENODEV;
566 		}
567 	}
568 
569 	hdr = ipv6_hdr(skb);
570 	deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL);
571 
572 #ifdef CONFIG_IPV6_MROUTE
573 	/*
574 	 *      IPv6 multicast router mode is now supported ;)
575 	 */
576 	if (atomic_read(&dev_net_rcu(skb->dev)->ipv6.devconf_all->mc_forwarding) &&
577 	    !(ipv6_addr_type(&hdr->daddr) &
578 	      (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
579 	    likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
580 		/*
581 		 * Okay, we try to forward - split and duplicate
582 		 * packets.
583 		 */
584 		struct sk_buff *skb2;
585 		struct inet6_skb_parm *opt = IP6CB(skb);
586 
587 		/* Check for MLD */
588 		if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
589 			/* Check if this is a mld message */
590 			u8 nexthdr = hdr->nexthdr;
591 			__be16 frag_off;
592 			int offset;
593 
594 			/* Check if the value of Router Alert
595 			 * is for MLD (0x0000).
596 			 */
597 			if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) {
598 				deliver = false;
599 
600 				if (!ipv6_ext_hdr(nexthdr)) {
601 					/* BUG */
602 					goto out;
603 				}
604 				offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
605 							  &nexthdr, &frag_off);
606 				if (offset < 0)
607 					goto out;
608 
609 				if (ipv6_is_mld(skb, nexthdr, offset))
610 					deliver = true;
611 
612 				goto out;
613 			}
614 			/* unknown RA - process it normally */
615 		}
616 
617 		if (deliver) {
618 			skb2 = skb_clone(skb, GFP_ATOMIC);
619 		} else {
620 			skb2 = skb;
621 			skb = NULL;
622 		}
623 
624 		if (skb2)
625 			ip6_mr_input(skb2);
626 	}
627 out:
628 #endif
629 	if (likely(deliver)) {
630 		ip6_input(skb);
631 	} else {
632 		/* discard */
633 		kfree_skb(skb);
634 	}
635 
636 	return 0;
637 }
638